query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
import data from file asset = Only import for the asset that you want searchAndReplace = Change any part of the objects name to another word
def importData( self, asset = '', searchAndReplace = ['',''] ): pickleData = pickle.load( open( self.dataPath.path, "rb" ) ) layers = [RenderLayerData(l,d) for l,d in pickleData.items() if not ':' in l] for l in layers: if not searchAndReplace [0]== '' or not searchAndReplace[1] == '': l.filterMe( asset, searchAndReplace ) l.create() l.addObjects() l.makeOverrides() l.makeOverrideConnections() l.makeShaderOverride()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def replace(name, newobject):", "def renameAssetObjects(self):\n\t\tfor i,o in enumerate( self.objects ):\n\t\t\tmn.Node( o ).name = self.name + '%i'%i", "def loadAssetTypeSpecialCaseFromFile(file):\n\tstringToTuple = compose(\n\t\ttuple\n\t , partial(map, lambda s: s.strip())\n\t , lambda s: s.split(',')\n\t)\n\n\n\tupdatePosition = lambda position: mergeDict(\n\t\tposition\n\t , { 'Portfolio': str(int(position['Portfolio'])) \\\n\t \t\t\t\t\tif isinstance(position['Portfolio'], float) \\\n\t \t\t\t\t\telse position['Portfolio']\n\t \t, 'AssetType': stringToTuple(position['AssetType'])\n\t \t}\n\t)\n\n\n\treturn \\\n\tcompose(\n\t\tdict\n\t , partial(map, lambda p: (p['ID'], p))\n\t , partial(map, updatePosition)\n\t , getRawPositions\n\t , fileToLines\n\t , partial(join, getDataDirectory())\n\t)(file)", "def _update_object_content(name, input):\n content = input._content\n\n hrefs = re.compile(r'<\\s*[^\\>]*href\\s*=\\s*([\"\\'])(.*?)\\1')\n srcs = re.compile(r'<\\s*[^\\>]*src\\s*=\\s*([\"\\'])(.*?)\\1')\n\n matches = hrefs.findall(content)\n matches.extend(srcs.findall(content))\n relative_paths = []\n for found in matches:\n found = found[1]\n if found not in relative_paths:\n relative_paths.append(found)\n\n for relative_path in relative_paths:\n if not \"://\" in relative_path: # we don't want to rewrite protocols\n dest_path = os.sep.join((get_relative_path(name), \"static\",\n relative_path))\n content = content.replace(relative_path, dest_path)\n\n return content", "def importLightLinking(self, asset = '', searchAndReplace = ['',''] ):\n\t\tLayersInfo = pickle.load( open( self.lightLinkPath.path, \"rb\") )\n\t\tmc.refresh( su = 1 )\n\t\tif not asset == '':\n\t\t\tLayersInfo = self.filterLightLinksData( LayersInfo , asset, searchAndReplace )\n\t\tfor l in LayersInfo.keys():\n\t\t\tobjsToBreakLink = []\n\t\t\tfor link in LayersInfo[l]:\n\t\t\t\tif mc.objExists( link ):\n\t\t\t\t\tobjsToBreakLink.append( link )\n\t\t\tmc.lightlink( b = True, light = l, o = objsToBreakLink )\n\t\tmc.refresh( su = 0 )", "def replace_includes(self, file_name):\n\n indexBegin = 0\n indexEnd = 0\n text = self.dir_helper.read_file(file_name)\n while indexBegin != -1:\n indexBegin = text.find('\\input{', indexBegin+1)\n indexEnd = text.find('}', indexBegin+1)\n text_to_replace = text[indexBegin:indexEnd+1]\n if indexBegin != -1:\n # print 'text_to_replace : ' + text_to_replace\n new_path = self.construct_path(text_to_replace)\n new_text = self.replace_includes(file_name = new_path)\n text = text.replace(text_to_replace, new_text)\n\n return text", "def _mangle_petsc_intersphinx():\n\n if 'LOC' in os.environ and os.path.isfile(os.path.join(os.environ['LOC'],'objects.inv')):\n base_doc_url = os.environ['LOC']\n url=f\"file://\" + os.path.join(base_doc_url,'objects.inv')\n else:\n website = intersphinx_mapping['petsc'][0].partition('/release/')[0]\n branch = get_doc_branch()\n base_doc_url = f\"{website}/{branch}/\"\n url=f\"{base_doc_url}objects.inv\"\n print(\"Using PETSC inventory from \"+url)\n inventory = sphobjinv.Inventory(url=url)\n print(inventory)\n\n for obj in inventory.objects:\n if obj.name.startswith(\"manualpages\"):\n obj.name = \"petsc.\" + \"/\".join(obj.name.split(\"/\")[2:])\n obj.role = \"class\"\n obj.domain = \"py\"\n\n new_inventory_filename = \"petsc_objects.inv\"\n sphobjinv.writebytes(\n new_inventory_filename,\n sphobjinv.compress(inventory.data_file(contract=True))\n )\n intersphinx_mapping['petsc'] = (base_doc_url, new_inventory_filename)", "def _map_source(source):\n for pattern, replacement in \\\n settings.REFINERY_FILE_SOURCE_MAP.iteritems():\n translated_source = re.sub(pattern, replacement, source)\n if translated_source != source:\n return translated_source\n return source", "def updateCountryNames(self):\n try:\n with open('countryNameMapping.json', 'r') as file:\n name_mapping = json.loads(file.read())\n except:\n sys.exit('countryNameMapping.json file is unavailable in current directory.')\n \n for key, value in name_mapping.items():\n self.covid_df.replace(key, value, inplace=True)\n \n try:\n with open('countryNameISO2.json', 'r') as file:\n self.name_iso2_mapping = json.loads(file.read())\n except:\n print('countryNameISO2.json file is unavailable in current directory, creating file...')\n self.writeCountryCodeFile()\n print('Re-importing required JSONs...')\n self.updateCountryNames()", "def replace_sandesh_obj_name(self, obj, file):\n obj_class_name = obj.__class__.__name__\n if hasattr(obj, 'sreq_class'):\n try:\n subprocess.call(\"sed -i 's/\" + obj_class_name + \"/\" +\n obj.sreq_class + \"/g' \" + file, shell=True)\n except Exception as e:\n self.logger.error(\n \"Failed to replace sandesh obj name = \" +\n obj_class_name)\n self.logger.error(e)", "def read_and_clean_files(clueweb_file, ann_file, data_dir, ann_dir):\n annotation_input = fileinput.FileInput(os.path.join(ann_dir, ann_file), openhook=fileinput.hook_compressed)\n annotation_list = []\n for line in annotation_input:\n\tannotation_list.append(Annotation.parse_annotation(line))\n\n warc_path = os.path.join(data_dir, clueweb_file)\n warc_file = warc.open(warc_path)\n print \"Replacing entity mentions for \", clueweb_file, \":\", ann_file, \"...\"\n start = time.time()\n warc_entry = WarcEntry(warc_path, warc_file, annotation_list)\n cleaned_records = warc_entry.replace_entity_mentions()\n end = time.time()\n print \"Time used: \", end - start\n warc_file.close()\n return cleaned_records", "def parse_file_replace(path, args):\n try:\n fisier = open(path, 'r')\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n full_data = fisier.read()\n fisier.close()\n\n try:\n fisier = open(path, \"w+\")\n except IOError:\n print(\"Nu am putut deschide fisierul :\", path)\n return\n\n data = \"\"\n for line in full_data:\n data += line\n\n if args.ignore_case:\n pattern = re.compile(re.escape(args.pattern), re.IGNORECASE)\n pattern.sub(args.pattern, data)\n else:\n data = data.replace(args.pattern, args.replace)\n\n fisier.write(data)\n fisier.close()", "def reverse_update_source_names(apps, schema_editor):\n Source = apps.get_model(\"vast_pipeline\", \"Source\")\n while Source.objects.filter(name__startswith=\"J\").exists():\n # do the updates in transaction batches of 1000 in case the source table is large\n with transaction.atomic():\n for source in Source.objects.filter(name__startswith=\"J\")[:1000]:\n source.name = (\n f\"ASKAP_{deg2hms(source.wavg_ra, precision=2)}\"\n f\"{deg2dms(source.wavg_dec, precision=2)}\"\n ).replace(\":\", \"\")\n source.save()", "def object_import(request, simulation, object_name):\n try:\n if object_name == 'function':\n parent = simulation.scenario.supply.functionset\n else:\n parent = simulation.scenario.supply.network\n query = get_query(object_name, simulation)\n user_id_set = set(query.values_list('user_id', flat=True))\n if object_name == 'link':\n # To import links, we retrieve the user ids of all centroids, crossings\n # and functions and we build mappings between ids and objects.\n centroids = get_query('centroid', simulation)\n centroid_ids = set(centroids.values_list('user_id', flat=True))\n crossings = get_query('crossing', simulation)\n crossing_ids = set(crossings.values_list('user_id', flat=True))\n node_ids = centroid_ids.union(crossing_ids)\n # Mapping between the user id and the id of the nodes.\n node_mapping = dict()\n for centroid in centroids:\n node_mapping[centroid.user_id] = centroid.id\n for crossing in crossings:\n node_mapping[crossing.user_id] = crossing.id\n functions = get_query('function', simulation)\n function_ids = set(functions.values_list('user_id', flat=True))\n # Mapping between the user id and the id of the functions.\n function_id_mapping = dict()\n # Mapping between the user id and the instance of the functions\n function_mapping = dict()\n for function in functions:\n function_id_mapping[function.user_id] = function.id\n function_mapping[function.user_id] = function\n # Convert imported file to a csv DictReader.\n encoded_file = request.FILES['import_file']\n tsv_file = StringIO(encoded_file.read().decode())\n reader = csv.DictReader(tsv_file, delimiter='\\t')\n to_be_updated = set()\n to_be_created = list()\n # Store the user_id of the imported instance to avoid two instances\n # with the same id.\n imported_ids = set()\n if object_name == 'centroid':\n # Do not import centroid with same id as a crossing.\n crossings = get_query('crossing', simulation)\n imported_ids = set(crossings.values_list('user_id', flat=True))\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], float(row['x']),\n float(row['y']))\n )\n else:\n to_be_created.append(\n Centroid(user_id=id, name=row['name'],\n x=float(row['x']), y=float(row['y']))\n )\n elif object_name == 'crossing':\n # Do not import crossing with same id as a centroid.\n centroids = get_query('centroid', simulation)\n imported_ids = set(centroids.values_list('user_id', flat=True))\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], float(row['x']),\n float(row['y']))\n )\n else:\n to_be_created.append(\n Crossing(user_id=id, name=row['name'],\n x=float(row['x']), y=float(row['y']))\n )\n elif object_name == 'function':\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'], row['expression'])\n )\n else:\n to_be_created.append(\n Function(user_id=id, name=row['name'],\n expression=row['expression'])\n )\n elif object_name == 'link':\n for row in reader:\n id = int(row['id'])\n if not id in imported_ids:\n imported_ids.add(id)\n if id in user_id_set:\n to_be_updated.add(\n (id, row['name'],\n node_mapping[int(row['origin'])],\n node_mapping[int(row['destination'])],\n function_id_mapping[int(row['function'])],\n float(row['lanes']), float(row['length']),\n float(row['speed']), float(row['capacity']))\n )\n else:\n if int(row['origin']) in node_ids \\\n and int(row['destination']) in node_ids \\\n and int(row['function']) in function_ids:\n # Ignore the links with unidentified origin,\n # destination or function.\n to_be_created.append(\n Link(user_id=id, name=row['name'],\n origin=node_mapping[int(row['origin'])],\n destination=node_mapping[int(row['destination'])],\n vdf=function_mapping[int(row['function'])],\n lanes=float(row['lanes']),\n length=float(row['length']),\n speed=float(row['speed']),\n capacity=float(row['capacity']))\n )\n if to_be_updated:\n if object_name in ('centroid', 'crossing'):\n values = set(query.values_list('user_id', 'name', 'x', 'y'))\n elif object_name == 'function':\n values = set(query.values_list('user_id', 'name', 'expression'))\n elif object_name == 'link':\n values = set(query.values_list('user_id', 'name', 'origin',\n 'destination', 'vdf_id', 'lanes',\n 'length', 'speed', 'capacity'))\n # Find the instances that really need to be updated (the values have\n # changed).\n to_be_updated = to_be_updated.difference(values)\n if object_name in ('centroid', 'crossing', 'function'):\n # Update the objects (it would be faster to delete and re-create\n # them but this would require to also change the foreign keys of\n # the links).\n for values in to_be_updated:\n # Index 0 of values is the id column i.e. the user_id.\n instance = query.filter(user_id=values[0])\n if object_name in ('centroid', 'crossing'):\n instance.update(name=values[1], x=values[2], y=values[3])\n else: # Function\n instance.update(name=values[1], expression=values[2])\n elif object_name == 'link':\n # Delete the links and re-create them.\n ids = list(query.values_list('id', 'user_id'))\n # Create a mapping between the user ids and the ids.\n id_mapping = dict()\n for i in range(len(values)):\n id_mapping[ids[i][1]] = ids[i][0]\n # Retrieve the ids of the links to be updated with the mapping and\n # delete them.\n to_be_updated_ids = [id_mapping[values[0]]\n for values in to_be_updated]\n with connection.cursor() as cursor:\n chunk_size = 20000\n chunks = [\n to_be_updated_ids[x:x + chunk_size]\n for x in range(0, len(to_be_updated_ids), chunk_size)\n ]\n for chunk in chunks:\n # Delete the relations first.\n cursor.execute(\n \"DELETE FROM Network_Link \"\n \"WHERE link_id IN %s;\",\n [chunk]\n )\n cursor.execute(\n \"DELETE FROM Link \"\n \"WHERE id IN %s;\",\n [chunk]\n )\n # Create a mapping between the id and the instance of the\n # functions.\n function_mapping = dict()\n for function in functions:\n function_mapping[function.id] = function\n # Now, create the updated instances with the new values.\n to_be_created += [\n Link(user_id=values[0], name=values[1], origin=values[2],\n destination=values[3], vdf=function_mapping[values[4]],\n lanes=values[5], length=values[6], speed=values[7],\n capacity=values[8])\n for values in to_be_updated\n ]\n # Create the new objects in bulk.\n # The chunk size is limited by the MySQL engine (timeout if it is too big).\n chunk_size = 10000\n chunks = [to_be_created[x:x + chunk_size]\n for x in range(0, len(to_be_created), chunk_size)]\n # Remove the orphan instances.\n if object_name == 'function':\n query.model.objects \\\n .exclude(functionset__in=FunctionSet.objects.all()) \\\n .delete()\n else:\n query.model.objects.exclude(network__in=Network.objects.all()).delete()\n for chunk in chunks:\n # Create the new instances.\n query.model.objects.bulk_create(chunk, chunk_size)\n # Retrieve the newly created instances and add the many-to-many\n # relation.\n # Add the many-to-many relation.\n if object_name == 'function':\n new_instances = query.model.objects \\\n .exclude(functionset__in=FunctionSet.objects.all())\n for instance in new_instances:\n instance.functionset.add(parent)\n else:\n new_instances = query.model.objects \\\n .exclude(network__in=Network.objects.all())\n for instance in new_instances:\n instance.network.add(parent)\n simulation.has_changed = True\n simulation.save()\n return HttpResponseRedirect(\n reverse('metro:object_list', args=(simulation.id, object_name,))\n )\n except Exception as e:\n print(e)\n context = {\n 'simulation': simulation,\n 'object': object_name,\n }\n return render(request, 'metro_app/import_error.html', context)", "def replace(self, filter, asset_dict): # client_dict provides the uuid\n mongo_core = MainDb.get_core_db_instance()\n replace_result = mongo_core.get_assets().find_one_and_replace(\n {\"uuid\": asset_dict[\"uuid\"]}, asset_dict, upsert=True, return_document=ReturnDocument.AFTER)\n if replace_result[\"uuid\"] == asset_dict[\"uuid\"]:\n return True, \"MongoAsset replaced\"\n else:\n return False, \"Failed to replace asset\"", "def merge_asset(self, other):\n for asset in other.asset:\n asset_name = asset.get(\"name\")\n asset_type = asset.tag\n # Avoids duplication\n pattern = \"./{}[@name='{}']\".format(asset_type, asset_name)\n if self.asset.find(pattern) is None:\n self.asset.append(asset)", "def update_source_names(apps, schema_editor):\n Source = apps.get_model(\"vast_pipeline\", \"Source\")\n while Source.objects.filter(name__startswith=\"ASKAP_\").exists():\n # do the updates in transaction batches of 1000 in case the source table is large\n with transaction.atomic():\n for source in Source.objects.filter(name__startswith=\"ASKAP_\")[:1000]:\n source.name = (\n f\"J{deg2hms(source.wavg_ra, precision=1)}\"\n f\"{deg2dms(source.wavg_dec, precision=0)}\"\n ).replace(\":\", \"\")\n source.save()", "def preprocessing_objects(img_data, hierarchy_mapping, object_file_name='objects.p'):\n\n object_path_token = \"{0}.{1}.{2}\".format(DATA, VISUAL_GENOME, get_name_from_file(object_file_name))\n\n # Check if pickles are already created\n objects_path = FilesManager().get_file_path(object_path_token)\n\n if os.path.isfile(objects_path):\n Logger().log('File is already exist {0}'.format(objects_path))\n objects = FilesManager().load_file(object_path_token)\n return objects\n\n # Bad urls which should be sorted out\n bad_urls = get_bad_urls()\n\n # Get the whole objects from entities\n objects_lst = []\n correct_labels = hierarchy_mapping.keys()\n idx = 0\n for img in img_data:\n\n # Get the url image\n url = img.image.url\n\n # Sorting bad urls\n if url in bad_urls:\n continue\n\n # Get the objects per image\n objects = img.objects\n for object in objects:\n\n # Get the lable of object\n label = object.names[0]\n\n # Check if it is a correct label\n if label not in correct_labels:\n continue\n\n new_object_mapping = ObjectMapping(object.id, object.x, object.y, object.width, object.height, object.names,\n object.synsets, url)\n # Append the new objectMapping to objects_lst\n objects_lst.append(new_object_mapping)\n\n idx += 1\n Logger().log(\"Finished img: {}\".format(idx))\n\n # Pickle objects_lst\n objects_array = np.array(objects_lst)\n # Save the objects files to the disk\n FilesManager().save_file(object_path_token, objects_array)\n return objects_array", "def replaceStringInFile():\n sel = nuke.selectedNodes()\n pane = nuke.Panel('replace string in file knob')\n pane.addSingleLineInput('replace this', '')\n pane.addSingleLineInput('by this', '')\n val = pane.show()\n\n if val and sel:\n for node in sel:\n try:\n str1 = pane.value('replace this')\n str2 = pane.value('by this')\n file = str(node['file'].value())\n newfile = file.replace(str1, str2)\n node['file'].setValue(newfile)\n print 'replacing string in', node.name()\n except:\n print 'failed on', node.name()", "def __replaceFiles(self):\n self.ui.showReplaceFilesDialog(self.textForFind())", "def removez_all(self,name):\n\t\tnew_name = string.replace(name,' ', '.')\n\t\tnew_name = self.remove_uploader(new_name)\n\t\tnew_name = string.replace(new_name,'..', '.')\n\t\t\n\t\t#new_name = string.replace(name,'\\&.', '.') BUG\n\t\t\n\t\tnew_name = string.replace(new_name,'-', '.')\n\t\tnew_name = string.replace(new_name,'_', '.')\t\t\n\t\tnew_name = string.replace(new_name,'(', '')\n\t\tnew_name = string.replace(new_name,')', '')\n\t\tnew_name = string.replace(new_name,'..', '.')\n\t\t\t\t\t\n\t\tnew_name = string.replace(new_name,'X264', 'x264')\n\t\tnew_name = string.replace(new_name,'XVID', 'XviD')\n\t\tnew_name = string.replace(new_name,'TRUEHD', 'TrueHD')\n\t\t\t\t\t\n\t\tnew_name = string.replace(new_name,'multi', 'MULTi')\n\t\tnew_name = string.replace(new_name,'Multi', 'MULTi')\n\t\tnew_name = string.replace(new_name,'MULTI', 'MULTi')\n\t\tnew_name = string.replace(new_name,'MULTiF', 'MULTi')\n\t\tnew_name = string.replace(new_name,'VO.VF','MULTi')\n\t\tnew_name = string.replace(new_name,'VF.VOSTFR','MULTi')\n\t\tnew_name = string.replace(new_name,'VF.VO+ST','MULTi')\n\t\t\n\t\t\n\t\tnew_name = string.replace(new_name,'TRUE.HD', 'TRUEHD')\n\t\tnew_name = string.replace(new_name,'blueray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'bluray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'Bluray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BluraY', 'BluRay')\n\t\tnew_name = string.replace(new_name,'Blu-Ray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'Blu.Ray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'Blu.ray', 'BluRay')\n\t\tnew_name = string.replace(new_name,'(Bluray-rip)', 'BluRay')\n\t\tnew_name = string.replace(new_name,'Blu-Ray Rip', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BDRip', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BDRIP', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BDRiP', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BRDRiP', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BRDRip', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BRRip', 'BluRay')\n\t\tnew_name = string.replace(new_name,'BD', 'BluRay')\n\t\tnew_name = string.replace(new_name,'HD-DVDRiP', 'HDRiP')\n\t\tnew_name = string.replace(new_name,'HD.DVDRiP', 'HDRiP')\n\t\tnew_name = string.replace(new_name,'HDVD', 'HDRiP')\n\t\tnew_name = string.replace(new_name,'HDDVD', 'HDRiP')\t\t\t\t\n\t\tnew_name = string.replace(new_name,'DVDrip','DVDRiP')\n\t\tnew_name = string.replace(new_name,'DVDriP','DVDRiP')\n\t\tnew_name = string.replace(new_name,'dvdrip','DVDRiP')\n\t\tnew_name = string.replace(new_name,'DVD5','DVDRiP')\n\t\tnew_name = string.replace(new_name,'.DVD.','DVDRiP')\n\t\t\n\t\t\n\t\tnew_name = string.replace(new_name,'.DD.5.1','DD5.1')\n\t\tnew_name = string.replace(new_name,'6.Canaux','5.1')\t\n\t\tnew_name = string.replace(new_name,'dts', 'DTS')\n\t\tnew_name = string.replace(new_name,'Dts', 'DTS')\n\t\tnew_name = string.replace(new_name,'DtS', 'DTS')\n\t\tnew_name = string.replace(new_name,'DTS.DTS','DTS')\n\t\tnew_name = string.replace(new_name,'DTSHD.','DTS.')\n\t\tnew_name = string.replace(new_name,'.HD.','.')\n\t\t\n\t\tnew_name = string.replace(new_name,'hdma', 'HDMA')\n\t\tnew_name = string.replace(new_name,'HD MA', 'HDMA')\n\t\tnew_name = string.replace(new_name,'HD.MA', 'HDMA')\n\t\tnew_name = string.replace(new_name,'.MA.', '.HDMA.')\n\t\tnew_name = string.replace(new_name,'ac3','AC3')\n\t\tnew_name = string.replace(new_name,'Ac3','AC3')\n\t\tnew_name = string.replace(new_name,'AC.3.','AC3.')\n\t\t\n\t\tnew_name = string.replace(new_name,'HD.HRA','HRA') #High resolution audio\n\t\t#new_name = string.replace(new_name,'.HRA.', '.')\n\t\t\n\t\tnew_name = string.replace(new_name,'.fr.', '.FRENCH.')\n\t\tnew_name = string.replace(new_name,'.Fr.', '.FRENCH.')\n\t\tnew_name = string.replace(new_name,'.FR.', '.FRENCH.')\n\t\tnew_name = string.replace(new_name,'french', 'FRENCH')\n\t\tnew_name = string.replace(new_name,'French', 'FRENCH')\n\t\tnew_name = string.replace(new_name,'VF.', 'FRENCH.')\n\t\tnew_name = string.replace(new_name,'VFF', 'TRUEFRENCH')\t\t\n\t\tnew_name = string.replace(new_name,'truefrench', 'TRUEFRENCH')\n\t\tnew_name = string.replace(new_name,'Truefrench', 'TRUEFRENCH')\n\t\tnew_name = string.replace(new_name,'TrueFrench', 'TRUEFRENCH')\n\t\tnew_name = string.replace(new_name,'TrueFRENCH', 'TRUEFRENCH')\n\t\t\n\t\tnew_name = string.replace(new_name,'VF', 'FRENCH')\n\t\tnew_name = string.replace(new_name,'.PAL.', '.')\n\t\tnew_name = string.replace(new_name,'HD1080', '1080p')\n\t\tnew_name = string.replace(new_name,'1080P', '1080p')\n\t\tnew_name = string.replace(new_name,'720P', '720p')\n\t\t\n\t\tnew_name = string.replace(new_name,'VERSION.LONGUE','EXTENDED')\n\t\tnew_name = string.replace(new_name,'Version.Longue','EXTENDED')\n\t\tnew_name = string.replace(new_name,'Extended.Cut', 'EXTENDED')\n\t\tnew_name = string.replace(new_name,'Extended.Edition', 'EXTENDED')\n\t\tnew_name = string.replace(new_name,'Director\\'s.Cut', 'DIRECTOR.CUT')\n\t\tnew_name = string.replace(new_name,'Directors.Cut', 'DIRECTOR.CUT')\n\t\tnew_name = string.replace(new_name,'DC', 'DIRECTOR.CUT')\n\t\tnew_name = string.replace(new_name,'D/C', 'DIRECTOR.CUT')\t\t\n\t\tnew_name = string.replace(new_name,'Remastered','REMASTERED')\n\t\tnew_name = string.replace(new_name,'Theatrical.Cut','THEATRICAL.CUT')\n\t\tnew_name = string.replace(new_name,'Theatricul.Cut','THEATRICAL.CUT')\n\t\tnew_name = string.replace(new_name,'Sunshine.Edition','SUNSHINE.EDITION')\n\t\tnew_name = string.replace(new_name,'Revisited.The.Final.Cut','REVISITED.FiNAL.CUT')\t\t\n\t\tnew_name = string.replace(new_name,'LIMITED','LiMiTED')\n\t\t\n\t\tnew_name = string.replace(new_name,'iNT','iNTERNAL')\n\t\tnew_name = string.replace(new_name,'JKF.3D', 'JFK3D')\n\t\tnew_name = string.replace(new_name,'GAIA', 'GAÏA')\n\t\tnew_name = string.replace(new_name,'Gaïa', 'GAÏA')\n\t\tnew_name = string.replace(new_name,'GAÏA', 'GAÏA')\n\t\tnew_name = string.replace(new_name,'GAϏA', 'GAÏA')\n\t\tnew_name = string.replace(new_name,'GAiA', 'GAÏA')\n\t\t\n\t\tnew_name = string.replace(new_name,'dxva', 'DXVA') #<harwdare decode\n\t\tnew_name = string.replace(new_name,'rip','')\n\t\tnew_name = string.replace(new_name,'Rip','')\n\t\tnew_name = string.replace(new_name,'Ripp','')\n\t\tnew_name = string.replace(new_name,'.mkv.mkv', '.mkv')\n\t\t#new_name = string.replace(new_name,'..', '.')\t#USELESS\n\t\treturn self.refactor_line(new_name)", "def preload(self):\n # load the objects\n for otype, fname in self.TYPE2NAME.items():\n if fname:\n path = os.path.join(self.anodir, fname + \".gz\")\n if os.path.isfile(path):\n with gzip.open(path, \"rt\") as handler:\n for line in handler:\n omap = json.loads(line)\n cls = self.TYPE2CLASS[otype]\n item = cls.from_map(omap, self)\n self.caches[otype][item.id] = item", "def replace_parts(file, file_out, replacements):\n # Read in original file\n with open(file, \"r\") as f:\n lines = f.readlines()\n\n # Replace lines in file\n for i, line in enumerate(lines[:]):\n # Replace file name and tag\n for key, val in replacements.items():\n if key in line:\n lines[i] = line.replace(str(key), str(val))\n\n with open(file_out, \"w\") as f:\n f.writelines(lines)", "def update(self, namein, nameout):\n\t\ttext = self.dict.sub(self.readFile(namein))\n\t\tself.writeFile(nameout, text)\n\t\treturn", "def appendSkinFile(appendFileName, skinPartSearchAndReplace):\n\trsSkinLines = []\n\n# TODO:\n#\tfile_lines = fileReadLines(appendFileName, source=\"MyMetrixLite\")\n\tskFile = open(appendFileName, \"r\")\n\tfile_lines = skFile.readlines()\n\tskFile.close()\n\n\tfor skinLine in file_lines:\n\t\tfor item in skinPartSearchAndReplace:\n\t\t\tskinLine = skinLine.replace(item[0], item[1])\n\t\trsSkinLines.append(skinLine)\n\n\treturn rsSkinLines", "def importLights(self, asset = '', searchAndReplace = ['',''] ):\n\t\tif self.lightPath.exists:\n\t\t\tself.lightPath.imp()\n\t\t\tif self.lightLinkPath.exists:\n\t\t\t\tself.importLightLinking( asset, searchAndReplace )", "def process(self,line):\n\n pattern_str = f\"src=.?[\\s\\\"].*?[\\s\\\"]\"\n p = re.compile(pattern_str)\n for m in p.finditer(line):\n\n file = m.group(0).split(\"src=\")[1][1:-1]\n if file.startswith(\"http\"):\n continue\n\n new_file = self._copy_file(file)\n\n re.sub(file,new_file,line)\n\n return line", "def replace(file,original_text,replacement_text):\n with open(file, \"rt\") as fin:\n with open(str(file+\"temp\"), \"wt\") as fout:\n for line in fin:\n fout.write(line.replace(original_text,replacement_text))\n os.rename(str(file+\"temp\"),file)\n return", "def rewrite_importlib_resources(pkg_files, new_root):\n for file in pkg_files.glob('*.py'):\n text = file.read_text().replace('importlib_resources.abc', '.abc')\n text = text.replace('zipp', '..zipp')\n file.write_text(text)", "def patch(text):\n if match := re.search(r'\\d+__\\d+__\\d+', text):\n tag = match.group(0)\n if tag not in ocds_tags:\n if ocds_version or not use_development_version:\n text = text.replace(tag, ocds_tag)\n else:\n text = text.replace(ocds_schema_base_url + tag, development_base_url)\n return text", "def load(self,fname,replace_name=True,reset_theano=True):\n import cPickle\n\n noreplace_list = ['model_name','tstring']\n\n with open(fname,'rb') as fh:\n ldict = cPickle.load(fh)\n for sname in self._params:\n if ldict.has_key(sname):\n if replace_name:\n setattr(self,sname,ldict[sname])\n else:\n if sname not in noreplace_list:\n setattr(self,sname,ldict[sname])\n else:\n print 'WARNING: key %s missing in file %s'%(sname,fname)\n\n if reset_theano: self._reset_on_load()", "def change_all_field(edited_field, smali_file_list, class_landroid_java_over_list):\n for smali_file in smali_file_list: # For each file\n for smali_line in u.open_file_input(smali_file): # For each line\n if re.search(r'^([ ]*?)(((i|s)get(\\-)?)|((i|s)put(\\-)?))', smali_line) is not None: # If contains a field reference\n change_match_line(smali_line, edited_field, class_landroid_java_over_list)\n else:\n print smali_line, # Print the line unchanged", "def ralph2_sync_ack(data):\n model = model_mapping[data['model']]\n ct = ContentType.objects.get_for_model(model)\n try:\n ImportedObjects.objects.get(\n content_type=ct,\n object_pk=data['ralph3_id']\n )\n logger.info(\n 'ImportedObject mapping for {} found in Ralph3'.format(data)\n )\n except ImportedObjects.DoesNotExist:\n logger.info(\n 'Creating new ImportedObject mapping in Ralph3: {}'.format(data)\n )\n ImportedObjects.objects.create(\n content_type=ContentType.objects.get_for_model(model),\n old_object_pk=data['id'],\n object_pk=data['ralph3_id'],\n )", "def ModifyScenarioFiles(base_path):\n enumrealm = socket.gethostbyname('enum_realm')\n prirealm = socket.gethostbyname('prv_rsa')\n pubrealm = socket.gethostbyname('pub_rsa')\n\n strList = ['ENUM_REALM_IP','PRI_REALM_IP','PUB_REALM_IP']\n repList = [enumrealm,prirealm,pubrealm]\n fileName = ['Basic_Receiver_enum.xml','Basic_Receiver_pri.xml','Basic_Receiver_pub.xml']\n \n try:\n for i in range(len(strList)):\n zfile=open(base_path + fileName[i],\"r\")\n zList = zfile.readlines()\n zfile.close()\n\n for j in zList:\n if j.__contains__(strList[i]):\n str1 = j.replace(strList[i],repList[i])\n ind = zList.index(j)\n zList[ind] = str1\n break\n \n zfile=open(base_path + fileName[i],\"w\")\n zList = zfile.writelines(zList)\n zfile.close()\n except Exception, e:\n log.error('error: %s' %str(e))", "def run(self, edit):\n\n obj = ConvertPythonSrc2Obj().convert(self.view.substr(sublime.Region(0, self.view.size())))[0]\n\n if obj is None:\n return\n if not obj.get('name'):\n error('A valid name must be provided!')\n elif obj.get('scope') is None and obj.get('find') is None:\n error('A valid find pattern or scope must be provided!')\n elif not self.is_existing_name(obj['name']):\n try:\n if obj.get('find') is not None:\n if obj.get('selection_inputs', False):\n pass\n elif obj.get('literal', False):\n flags = 0\n pattern = re.escape(obj['find'])\n if obj.get('literal_ignorecase', False):\n flags = re.I\n re.compile(pattern, flags)\n else:\n extend = sublime.load_settings(\n 'reg_replace.sublime-settings'\n ).get('extended_back_references', False)\n if extend:\n bre.compile_search(obj['find'])\n else:\n re.compile(obj['find'])\n settings = sublime.load_settings('reg_replace_rules.sublime-settings')\n rules = settings.get('replacements', {})\n rules[obj['name']] = obj\n settings.set('replacements', rules)\n sublime.save_settings('reg_replace_rules.sublime-settings')\n self.view.settings().set('regreplace.name', obj['name'])\n except Exception as e:\n error('Regex compile failed!\\n\\n%s' % str(e))", "def AgiImport(dirpath, file):\n objPath=dirpath+'\\\\'+file\n if os.path.exists(objPath)==False:\n print objPath\n return\n \n ## Open new template file ##\n template = rs.TemplateFile()\n cmd=\"-_New \"\n cmd+=template+\" \"\n rs.Command(cmd)\n \n \n cmd=\"-_Import \"\n cmd+='\"'+os.path.abspath(objPath)+'\"'+\" \"\n cmd+=\"IgnoreTextures=No \"\n cmd+=\"MapOBJToRhinoZ=Yes \"\n cmd+=\"_Enter \"\n rs.Command(cmd)\n \n rs.Command(\"SplitDisjointMesh \")\n \n meshes = rs.LastCreatedObjects()\n max=0\n keep=None\n for guid in meshes:\n mesh = rs.coercemesh(guid)\n count = mesh.Faces.Count\n if count > max:\n keep = guid\n max = count\n \n if keep:\n meshes.remove(keep)\n rs.DeleteObjects(meshes)\n \n rs.ZoomExtents(all=True)\n \n cmd=\"-_SaveAs \"\n cmd+=\"SaveTextures=Yes \"\n cmd+='\"'+os.path.abspath(objPath).replace(\".wrl\",\".3dm\")+'\"'+\" \"\n cmd+=\"_Enter \"\n rs.Command(cmd)\n rs.DocumentModified(False)\n Rhino.RhinoApp.Wait()\n Rhino.RhinoApp.Wait()", "def process_asset_data(data):\n buffered_assets = []\n\n for asset in data:\n asset_geom = shape(asset['geometry'])\n buffered_geom = asset_geom.buffer(100)\n\n asset['buffer'] = buffered_geom\n buffered_assets.append(asset)\n\n output = []\n assets_seen = set()\n\n for asset in tqdm(buffered_assets):\n if asset['properties']['Opref'] in assets_seen:\n continue\n assets_seen.add(asset['properties']['Opref'])\n touching_assets = []\n for other_asset in buffered_assets:\n if asset['buffer'].intersects(other_asset['buffer']):\n touching_assets.append(other_asset)\n assets_seen.add(other_asset['properties']['Opref'])\n\n dissolved_shape = cascaded_union([a['buffer'] for a in touching_assets])\n final_centroid = dissolved_shape.centroid\n output.append({\n 'type': \"Feature\",\n 'geometry': {\n \"type\": \"Point\",\n \"coordinates\": [final_centroid.coords[0][0], final_centroid.coords[0][1]],\n },\n 'properties':{\n 'name': asset['properties']['name'],\n }\n })\n\n return output", "def obfuscate(source):\n lines = tokenizer.tokenize_file(source)\n for ind, line in enumerate(lines):\n for pattern in pattern_search.values():\n match = re.search(pattern, line)\n if match:\n search_variable_to_replace(line)\n lines = replace(lines)\n return (lines, replacement_dic)", "def change_file(paths, object):\r\n \"\"\" \r\n for x in os.listdir(dirs): print(x) \r\n \"\"\"\r\n check_path = paths + \"\\changer\"\r\n\r\n for x in object:\r\n pass_data = False\r\n name = x.split('.')[0]\r\n for y in os.listdir(check_path):\r\n if name in y:\r\n pass_data = True\r\n\r\n if pass_data:\r\n pass\r\n else:\r\n vid = VideoFileClip(os.path.join(paths, \"\", x))\r\n\r\n vid.audio.write_audiofile(os.path.join(\r\n paths, \"changer\", name+\".mp3\")\r\n )\r\n show_result(check_path)", "def readdata(self, filepaths):\n pass", "def restore_speedsters(apps, schema_editor):\n\n Pokemon = apps.get_model(\"stats\", \"Pokemon\")\n Pokemon.objects.filter(id__in=[\"ZERAORA\", \"TALONFLAME\", \"ABSOL\", \"GENGAR\"]).update(category=\"SS\")", "def update_datasource():\n ds = Datasource(TDSX_FILE)\n ds.replace_extract(HYPER_FILE)", "def replace(lines):\n for index, line in enumerate(lines):\n if not line == '\\n':\n token_line = tokenizer.tokenize_line(line)\n for ind, tok in enumerate(token_line):\n if token_line[ind][1] in replacement_dic.keys() and token_line[ind][1] not in ignore_variable:\n if ind > 1 and token_line[ind-2][1] in import_list:\n continue\n if token_line[ind][0] == token.NAME and token_line[ind+1][1] == '(':\n continue\n token_line[ind][1] = replacement_dic.get(token_line[ind][1])\n\n lines[index] = tokenizer.untokenize_line(token_line)\n return lines", "def fixDelex(filename, data, data2, idx, idx_acts):\n try:\n turn = data2[filename.strip('.json')][str(idx_acts)]\n except:\n return data\n\n if not isinstance(turn, str) and not isinstance(turn, unicode):\n for k, act in turn.items():\n if 'Attraction' in k:\n if 'restaurant_' in data['log'][idx]['text']:\n data['log'][idx]['text'] = data['log'][idx]['text'].replace(\"restaurant\", \"attraction\")\n if 'hotel_' in data['log'][idx]['text']:\n data['log'][idx]['text'] = data['log'][idx]['text'].replace(\"hotel\", \"attraction\")\n if 'Hotel' in k:\n if 'attraction_' in data['log'][idx]['text']:\n data['log'][idx]['text'] = data['log'][idx]['text'].replace(\"attraction\", \"hotel\")\n if 'restaurant_' in data['log'][idx]['text']:\n data['log'][idx]['text'] = data['log'][idx]['text'].replace(\"restaurant\", \"hotel\")\n if 'Restaurant' in k:\n if 'attraction_' in data['log'][idx]['text']:\n data['log'][idx]['text'] = data['log'][idx]['text'].replace(\"attraction\", \"restaurant\")\n if 'hotel_' in data['log'][idx]['text']:\n data['log'][idx]['text'] = data['log'][idx]['text'].replace(\"hotel\", \"restaurant\")\n\n return data", "def loadText(self,filePath):\n ins = file(filePath,'r')\n reComment = re.compile(r\"#.*\")\n reSection = re.compile(r'@ +(srcmod|replace)',re.M)\n reReplace = re.compile(r\"(\\w[-\\w ']+)\\s*:\\s*(.+)\")\n reNewIds = re.compile(r\",\\s*\")\n mode = None\n for line in ins:\n line = reComment.sub('',line.strip())\n maSection = reSection.match(line)\n if maSection:\n mode = maSection.group(1)\n elif not line: #--Empty/comment line\n pass\n elif mode == 'srcmod':\n self.srcModName = line\n elif mode == 'replace':\n maReplace = reReplace.match(line)\n if not maReplace: continue\n oldId = maReplace.group(1)\n self.newIds[oldId.lower()] = reNewIds.split(maReplace.group(2))\n ins.close()", "def test_replace_software_asset_for_software_component(self):\n pass", "def process(self, source, dest):\n\n if os.path.isfile(dest):\n print(\"File %s exists -> aborting\" % dest)\n exit(1)\n print(dest)\n \n fin = open(source)\n fout = open(dest, 'w')\n for l in fin.readlines():\n l = l.replace(\"AUTHOR\", self.author)\n l = l.replace(\"DESCRIPTION\", self.description)\n l = l.replace(\"NAMESPACE\", self.namespace)\n l = l.replace(\"MyComponent\", self.className)\n l = l.replace(\"INCDIR\", self.hDir)\n l = l.replace(\"CXXDIR\", self.cxxDir)\n l = l.replace(\"YEAR\", str(self.now.year))\n l = l.replace(\"DATE\", \"%d %s %d\" % (self.now.day, self.now.strftime(\"%b\"), self.now.year))\n fout.write(l)\n fout.close()\n fin.close()", "def get_names():\n\n #Initialize entities dictionary\n entities = {'entity': 'source_file'}\n\n # Construct the raw_directory path\n project_root = os.environ['PYTHONPATH']\n raw_directory = '{}/data/raw/'.format(project_root)\n \n for file in os.listdir(raw_directory):\n if file.endswith('.json'):\n \n # Construct the full file path\n full_path = '{}{}'.format(raw_directory, file)\n \n # Open each JSON file\n with open(full_path, 'r') as source_file:\n data = source_file.read()\n parsed_data = json.loads(data)\n \n # Iterate through the dictionary parsed_data\n for key in parsed_data:\n if 'SocialTag' in key:\n name = parsed_data[key]['name']\n entities.update({name: file})\n\n return entities", "def do_replace(self, tx, item):\n sql = \"\"\"REPLACE INTO des_raw_parsed_data (id, content, url, source, exception_detail, exception_code, site, template_id, domain, classify, subclass)\n VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)\"\"\"\n content = json.dumps(dict(item[\"other_parameter\"]),ensure_ascii=False)\n # if content:\n # content = content.decode('unicode_escape')\n args = (\n item[\"uuid\"],\n content,\n item[\"url\"],\n item[\"source\"],\n json.dumps(item[\"exception\"],ensure_ascii=False),\n item[\"exception_code\"],\n item[\"site\"],\n item[\"template_id\"],\n item[\"domain\"],\n item[\"classify\"],\n item[\"subclass\"]\n )\n\n tx.execute(sql, args)", "def doImport(self,textFile):\n self.loadText(textFile)\n self.getBooks()\n #self.copyBooks()\n self.genLibData()\n self.genLibCells()\n self.sortRecords()", "def autorename_shots(context):\n\n for index, shot in enumerate(context.scene.milkshake_shots):\n shot.code = f\"SH{index + 1 :02}\"\n shot.camera.name = f\"{shot.code}.CAMX.000\"\n for obj in bpy.data.objects:\n if obj.data == shot.camera:\n obj.name = shot.camera.name\n break\n core.log(f\"Renamed shot {shot.code} and camera {shot.camera.name}.\")", "def normalize_names(self):\n for node in self.asset.findall(\".//*[@name]\"):\n name = node.get(\"name\")\n if not name.startswith(self.name + \".\"):\n node.set(\"name\", self.name + \".\" + name)\n\n for attr in ['texture', 'material', 'mesh']:\n for node in self.root.findall(\".//*[@{}]\".format(attr)):\n name = node.get(attr)\n if not name.startswith(self.name + \".\"):\n node.set(attr, self.name + \".\" + name)\n\n for node in self.worldbody.findall(\".//*[@name]\"):\n name = node.get(\"name\")\n if not name.startswith(self.name + \".\"):\n node.set(\"name\", self.name + \".\" + name)\n\n for node in self.worldbody.findall(\".//*[@joint]\"):\n joint = node.get(\"joint\")\n if not joint.startswith(self.name + \".\"):\n node.set(\"joint\", self.name + \".\" + name)", "def loadText(self,inName):\n reComment = re.compile(r'\\s*\\#.*')\n ins = file(inName)\n for line in ins:\n #print line,\n #--Strip spaces and comments\n line = reComment.sub('',line)\n line = line.rstrip()\n #--Skip empty/comment lines\n if not line: continue\n #--Parse line\n (libId,srcId,altId) = line.split('\\t')[:3]\n self.libList.append(libId)\n self.libMap[libId] = (srcId,altId)\n #--Done\n ins.close()", "def search_replace(filename, search, replace):\n with open(filename, 'r') as f:\n filedata = f.read()\n modified_data = re.sub(search, replace, filedata, flags=re.M)\n with open(filename, 'w') as f:\n f.write(modified_data)", "def _CMD_IMPORT(self, file_name):\n # reset inspector:\n # self.inspector = DataInspectorRecord()\n\n ext = file_name.split('.')[-1]\n if ext == 'mat':\n # self.model.from_json_dict(buff)\n self.model.from_mat_file(file_name)\n\n elif ext == 'json':\n buff = ''\n with open(file_name, 'rb') as f:\n buff = f.read()\n model = json.loads(buff)\n self.model.from_json_dict(model)\n\n else:\n raise DataExplorerError('Unsupported file format: {}'.format(ext))\n\n # update initial selection - first row:\n if len(self.model.data_list) > 0:\n self.handle_row_select([self.model.data_list[0]])", "def clearString(name):\n if name.find(\".tar\") != 0:\n name = name.replace(\".tar\", \"\")\n\n if name.find(\".gz\") != 0:\n name = name.replace(\".gz\", \"\")\n return name", "def asset_adding_panel(self, context):\r\n \r\n AM = context.window_manager.asset_m\r\n layout = self.layout\r\n box = layout.box()\r\n act_obj = context.active_object\r\n obj_list = [obj for obj in context.scene.objects if obj.select]\r\n thumbnails_path = get_directory('icons')\r\n is_subsurf = False\r\n view = context.space_data\r\n fx_settings = view.fx_settings\r\n ssao_settings = fx_settings.ssao\r\n extentions = (\".jpg\", \".jpeg\", \".png\")\r\n thumb_list = [thumb.rsplit(\".\", 1)[0] for thumb in listdir(thumbnails_path) if thumb.endswith(extentions)]\r\n \r\n if len(obj_list) >= 2:\r\n asset_name = AM.group_name\r\n \r\n else:\r\n asset_name = act_obj.name\r\n if act_obj.modifiers:\r\n for mod in act_obj.modifiers:\r\n if mod.type == 'SUBSURF':\r\n is_subsurf = True\r\n \r\n if asset_name not in thumb_list or asset_name in thumb_list and AM.replace_rename == 'replace':\r\n if asset_name in thumb_list and AM.replace_rename == 'replace':\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if multi_object:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\") \r\n \r\n else:\r\n if len(obj_list) >= 2:\r\n row = box.row()\r\n box.label(\"Choose the asset name\")\r\n box.prop(AM, \"group_name\", text = \"\")\r\n \r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"Name\")\r\n \r\n row = box.row(align = True)\r\n row.prop(AM, \"render_type\", text = \" \", expand = True)\r\n row = box.row()\r\n row.label(\"Thumbnail extention:\")\r\n row = box.row(align = True)\r\n row.prop(AM, \"thumb_ext\", expand = True)\r\n \r\n # ---------------------- # \r\n # RENNDER THUMBNAIL #\r\n # ---------------------- #\r\n \r\n if AM.render_type == 'render':\r\n if len(obj_list) == 1 and not is_subsurf:\r\n box.prop(AM, \"add_subsurf\", text = \"Subsurf\")\r\n box.prop(AM, \"add_smooth\", text = \"Smooth\") \r\n \r\n box.prop(AM, \"material_render\", text=\"Addon material\")\r\n \r\n # --------------------- # \r\n # OPENGL THUMBNAIL #\r\n # --------------------- #\r\n \r\n elif AM.render_type == 'opengl':\r\n row = box.row(align=True)\r\n row.operator(\"object.setup_ogl_render\", text=\"Setup OGL render\" if not \"AM_OGL_Camera\" in [obj.name for obj in context.scene.objects] else \"View camera\", icon='ZOOMIN')\r\n row.operator(\"object.remove_ogl_render\", text=\"\", icon='ZOOMOUT')\r\n row = layout.column()\r\n row = box.row(align=True) \r\n row.label(\"Background:\")\r\n row.prop(AM, \"background_alpha\", text=\"\")\r\n row = box.row(align=True)\r\n row.prop(view, \"show_only_render\")\r\n row = box.row(align=True)\r\n row.prop(view, \"use_matcap\")\r\n if view.use_matcap :\r\n row.prop(AM, \"matcap_options\", text=\"\", icon='TRIA_UP' if AM.matcap_options else 'TRIA_DOWN') \r\n if AM.matcap_options:\r\n row = box.row(align=True)\r\n row.template_icon_view(view, \"matcap_icon\")\r\n row = box.row(align=True)\r\n row.prop(fx_settings, \"use_ssao\", text=\"Ambient Occlusion\")\r\n if fx_settings.use_ssao:\r\n row.prop(AM, \"ao_options\", text=\"\", icon='TRIA_UP' if AM.ao_options else 'TRIA_DOWN') \r\n if AM.ao_options:\r\n subcol = box.column(align=True)\r\n subcol.prop(ssao_settings, \"factor\")\r\n subcol.prop(ssao_settings, \"distance_max\")\r\n subcol.prop(ssao_settings, \"attenuation\")\r\n subcol.prop(ssao_settings, \"samples\")\r\n subcol.prop(ssao_settings, \"color\")\r\n \r\n # -------------------- # \r\n # IMAGE THUMBNAIL #\r\n # -------------------- #\r\n \r\n elif AM.render_type == 'image':\r\n row = box.row(align=True)\r\n row.prop(AM, \"image_type\", text=\" \", expand=True)\r\n if AM.image_type == 'disk':\r\n box.label(\"Choose your thumbnail\")\r\n box.prop(AM, \"custom_thumbnail_path\", text=\"\")\r\n else:\r\n box.prop_search(AM, \"render_name\", bpy.data, \"images\", text=\"\") \r\n \r\n row = box.row(align=True)\r\n if len(obj_list) == 1:\r\n if (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n else:\r\n if AM.group_name and (asset_name not in thumb_list or AM.replace_rename == 'replace') and (AM.render_type in ['opengl', 'render'] or AM.render_type == 'image' and (AM.image_type == 'disk' and AM.custom_thumbnail_path or AM.image_type == 'rendered' and AM.render_name)):\r\n \r\n row.operator(\"object.add_asset_in_library\", text=\"OK\", icon='FILE_TICK') \r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')\r\n \r\n else:\r\n box.label(\"\\\" {} \\\" already exist\".format(asset_name), icon='ERROR')\r\n box.separator()\r\n row = box.row(align=True)\r\n row.prop(AM, \"replace_rename\", text=\" \", expand=True)\r\n if AM.replace_rename == 'rename':\r\n if len(obj_list) >= 2:\r\n box.prop(AM, \"group_name\", text=\"\")\r\n else:\r\n ob = context.object\r\n box.prop(ob, \"name\", text=\"\")\r\n row = box.row()\r\n row.operator(\"object.cancel_panel_choise\", text=\"Cancel\", icon='X')", "def fix_dataset(dir):\n print(\"Fix obj files\")\n\n r = re.compile(r'f (\\d+)[/\\d]* (\\d+)[/\\d]* (\\d+)[/\\d]*')\n\n path = os.path.join(dir, \"*.obj\")\n files = sorted(glob.glob(path))\n\n c = 0\n for i, f in enumerate(files):\n with open(f, \"rt\") as x:\n y = x.read()\n yy = r.sub(r\"f \\1 \\2 \\3\", y)\n if y != yy:\n c += 1\n with open(f, \"wt\") as x:\n x.write(yy)\n print(\"{}/{} {} fixed \".format(i + 1, len(files), c), end=\"\\r\")", "def test_replacements_applied_before_force_name():\n\n conf = r\"\"\"\n {\"always_rename\": true,\n \"select_first\": true,\n\n \"force_name\": \"Scrubs\",\n\n \"input_filename_replacements\": [\n {\"is_regex\": true,\n \"match\": \"S01E02 - \",\n \"replacement\": \"\"}\n ]\n }\n \"\"\"\n\n out_data = run_tvnamer(\n with_files = ['S01E02 - Some File.avi'],\n with_config = conf)\n\n expected_files = ['S01E02 - Some File.avi']\n\n verify_out_data(out_data, expected_files, expected_returncode = 2)", "def merge_nonjunk_into_new_name(self, event=None):\n # Delete all original names\n aid_list = self.all_aid_list\n aid_list_filtered = ut.filterfalse_items(\n aid_list, self.ibs.get_annot_isjunk(aid_list)\n )\n # Rename annotations\n self.ibs.set_annot_names_to_same_new_name(aid_list_filtered)\n self.update_callback()\n self.backend_callback()\n self.show_page()", "def restore_names(input_file, output_file):\n\n if not dataModel.loadModel(input_file):\n print(\"Couldn't open input file\")\n return 1\n\n model = dataModel.getModel()\n\n restore_names_in(model.getCompartments())\n restore_names_in(model.getMetabolitesX())\n restore_names_in(model.getModelValues())\n restore_names_in(model.getReactions())\n restore_names_in(model.getEvents())\n\n dataModel.saveModel(output_file, True)\n\n return 0", "def _rename_assets_identifiers(write_cursor: 'DBCursor') -> None:\n log.debug('Enter _rename_assets_identifiers')\n write_cursor.execute('SELECT identifier FROM assets')\n old_id_to_new = {}\n for (identifier,) in write_cursor:\n # We only need to update the ethereum assets and those that will be replaced by evm assets\n # Any other asset should keep the identifier they have now.\n if identifier.startswith(ETHEREUM_DIRECTIVE):\n old_id_to_new[identifier] = evm_address_to_identifier(\n address=identifier[ETHEREUM_DIRECTIVE_LENGTH:],\n chain_id=ChainID.ETHEREUM,\n token_type=EvmTokenKind.ERC20,\n )\n elif identifier in OTHER_EVM_CHAINS_ASSETS:\n old_id_to_new[identifier] = OTHER_EVM_CHAINS_ASSETS[identifier]\n\n sqlite_tuples = [(new_id, old_id) for old_id, new_id in old_id_to_new.items()]\n log.debug('About to execute the asset id update with executemany')\n write_cursor.executemany('UPDATE assets SET identifier=? WHERE identifier=?', sqlite_tuples) # noqa: E501\n log.debug('Exit _rename_assets_identifiers')", "def update_name(name, mapping):\n m = street_type_re.search(name)\n if m:\n street_type = m.group()\n for key, value in mapping.iteritems():\n if street_type == key:\n name = name.replace(key,value)\n\n return name", "def changeFilenames(reduced, path, input):\n file = path + '/' + input\n data = open(file, 'r').readlines()\n os.remove(file)\n\n fh = open(file, 'w')\n for line in data:\n if 'includegraphics' in line:\n for key in reduced:\n if key in line:\n new = line.replace(key, reduced[key])\n fh.write(new)\n\n logging.debug('Changed {0:>s} to {1:>s} '.format(line, new))\n else:\n fh.write(line)\n\n fh.close()", "def replace_rmap_text(rmapping, new_filename, old_text, new_text, *args, **keys):\n log.info(\"Replacing\", srepr(old_text), \"with\", srepr(new_text), \"in\",\n srepr(rmapping.basename), \"to\", srepr(new_filename))\n original_rmap = str(rmapping)\n new_rmap = original_rmap.replace(old_text, new_text)\n new_mapping = ReferenceMapping.from_string(new_rmap, ignore_checksum=True)\n new_mapping.write(new_filename)", "def load_mesh(name):\n if name[-4:] == \".obj\":\n bpy.ops.import_scene.obj(filepath=name)\n mesh_name = (os.path.basename(name)).replace('.obj','')\n return mesh_name\n else:\n raise ValueError(\"{} not an obj file\".format(name))", "def search_source(self,strz):\n\t\tfor src in sources_rip: #sources_rip = list of allow source words\n\t\t\tif src in strz:\n\t\t\t\tself.src_rip=src.replace(\".\",\"\")\n\t\t\t\treturn strz.replace(src,\"\")\n\t\treturn strz", "def __init__(self, name_map):\n self.name_map = name_map\n self.read = gpd.read_file(self.name_map)\n self.read = self.read.replace({self.read[\"NOMBRE_DPT\"][32]:'SAN ANDRES',\n self.read[\"NOMBRE_DPT\"][2]:'BOGOTA'})\n self.ordenado = ordenar(self.read,\"NOMBRE_DPT\")", "def localize_objects(self,path):\n\t\tfrom google.cloud import vision\n\t\t\n\t\tclient = vision.ImageAnnotatorClient()\n\n\t\twith open(path, 'rb') as image_file:\n\t\t\tcontent = image_file.read()\n\t\timage = vision.types.Image(content=content)\n\n\t\tobjects = client.object_localization(\n\t\t\timage=image).localized_object_annotations\n\t\treturn objects\n\n\t\t#print('Number of objects found: {}'.format(len(objects)))\n\t\t#for object_ in objects:\n\t\t#\tprint('\\n{} (confidence: {})'.format(object_.name, object_.score))\n\t\t#\tprint('Normalized bounding polygon vertices: ')\n\t\t#\tfor vertex in object_.bounding_poly.normalized_vertices:\n\t\t#\t\tprint(' - ({}, {})'.format(vertex.x, vertex.y))", "def rename_fields(all_data):\n\tfield_map = load_json('field_mapping.json', fdir=os.path.join('data', 'archived_data'))\n\tfor old_field in all_data.keys():\n\t\ttmp_vals = pd.Series(all_data[old_field].values, index=all_data.index)\n\t\tall_data = all_data.drop(old_field, 1)\n\t\tif old_field in field_map:\n\t\t\tnew_field = field_map[old_field]\n\t\t\tall_data[new_field] = tmp_vals\n\treturn all_data", "def import_from_file(jamsite, source='jammers.csv', fieldnames=None):\n\t# import jammers.csv\n\twith open(source) as csvfile:\n\t\tjamsite.mergeinsert( import_jammers(csvfile, fieldnames=fieldnames) )", "def replace_in_file(file_path, find, replace):\n\tcontent = read_file(file_path)\n\tcontent = content.replace(find, replace)\n\twrite_file(file_path, content)", "def parse_names(lines, oti_file_name):\n print \" * Parsing names\"\n # Read the real texture file names form the file.\n real_names = []\n if os.path.isfile(oti_file_name):\n with open(oti_file_name, \"rU\") as oti_fd:\n real_names = oti_fd.read().splitlines()\n\n names = {}\n for i, line in enumerate(lines):\n name = \".\"\n if i < len(real_names):\n name = real_names[i]\n names[\"%s\" % i] = {\"alias\": line, \"name\": name}\n return names", "def _handle_import(contents, use_tags, owner):\n \n lines = contents.decode(\"utf-8\").split(\"\\n\")\n \n title = re.compile(r\"<a.*?>(.+?)</a>\", re.I)\n url = re.compile(r\"\"\"<a.*href=['\"](.+?)['\"]\"\"\", re.I)\n tags = re.compile(r\"\"\"<a.*?tags=[\"'](.+?)[\"']\"\"\", re.I)\n addTime = re.compile(r\"\"\"<a.*?add_date=[\"'](\\d+?)[\"']\"\"\", re.I)\n \n for l in lines:\n if \"<a\" in l.lower() and \"</a>\" in l.lower():\n bookmark = {}\n \n bookmark[\"title\"] = title.search(l)\n if not bookmark[\"title\"]:\n continue\n bookmark[\"title\"] = _unescape(bookmark[\"title\"].group(1))\n \n bookmark[\"url\"] = url.search(l)\n if not bookmark[\"url\"]:\n continue\n bookmark[\"url\"] = _unescape(bookmark[\"url\"].group(1))\n \n bookmark[\"tags\"] = [];\n if use_tags:\n result = tags.search(l)\n if result:\n bookmark[\"tags\"] = map(_unescape, result.group(1).split(\",\"))\n \n bookmark[\"added\"] = addTime.search(l)\n if bookmark[\"added\"]:\n bookmark[\"added\"] = bookmark[\"added\"].group(1)\n \n if not Bookmark.objects.filter(owner=owner, url=bookmark[\"url\"]).exists():\n bm = Bookmark(owner=owner, url=bookmark[\"url\"], title=bookmark[\"title\"])\n \n bm.save()\n if bookmark[\"added\"]:\n bm.added = datetime.datetime.fromtimestamp(int(bookmark[\"added\"]))\n \n for t in bookmark[\"tags\"]:\n bm.tag(t)\n \n bm.save()\n bm.autotag_rules()", "def convertAssetData(dataPath, data):\n\tif os.path.isfile(os.path.join(dataPath, 'icData.py')):\n\t\tsys.path.append(dataPath)\n\t\timport icData\n\t\treloadModule(icData)\n\t\tsys.path.remove(dataPath)\n\n\t\t# Store values\n\t\ttry:\n\t\t\tdata.set_attr('asset', 'assetRootDir', icData.assetRootDir)\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tdata.set_attr('asset', 'assetPblName', icData.assetPblName)\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tdata.set_attr('asset', 'asset', icData.asset)\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tdata.set_attr('asset', 'assetType', icData.assetType)\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tdata.set_attr('asset', 'assetExt', icData.assetExt)\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tdata.set_attr('asset', 'version', icData.version)\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tdata.set_attr('asset', 'requires', icData.requires)\n\t\texcept AttributeError:\n\t\t\tpass\n\t\ttry:\n\t\t\tdata.set_attr('asset', 'compatible', icData.compatible)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\t# Parse notes field\n\t\ttry:\n\t\t\tnotesLegacy = icData.notes.rsplit('\\n\\n', 1)\n\t\t\tnotes = notesLegacy[0]\n\t\t\tnotesFooter = notesLegacy[1].split(' ', 1)\n\t\t\tusername = notesFooter[0]\n\t\t\ttimestamp = notesFooter[1]\n\n\t\t\tdata.set_attr('asset', 'notes', notes)\n\t\t\tdata.set_attr('asset', 'user', username)\n\t\t\tdata.set_attr('asset', 'timestamp', timestamp)\n\t\texcept AttributeError:\n\t\t\tpass\n\n\t\t# Save XML\n\t\tif data.save():\n\t\t\tverbose.message(\"Successfully converted legacy asset data to XML.\")\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\telse:\n\t\tverbose.print_(\"Cannot convert settings: asset data not found.\", 4)\n\t\treturn False", "def re_obj_sub(objects):\n\n def func(match, create_new=False, *args, **kwargs):\n \"\"\"Processing match objects. Replacing objects GUIDs.\n \"\"\"\n\n data = match.group(0)\n\n if create_new:\n dash = data.replace(\"_\", \"-\") if \"_\" in data else data\n\n if dash not in objects:\n new = gen_guid()\n objects[dash] = new\n objects[dash.replace(\"-\", \"_\")] = new.replace(\"-\", \"_\")\n\n if data in objects:\n return (objects[data], True)\n\n return (data, False)\n\n return func", "def correct_naming(obsid, inst):\n cobsid = str(int(float(obsid)))\n if len(cobsid) == 5:\n return \n\n lobsid = mcf.add_leading_zero(obsid, 5)\n \n for sdir in ['secondary', 'analysis']:\n\n cmd = 'ls /data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/hrcf* >' + zspace\n os.system(cmd)\n\n data = mcf.read_data_file(zspace, remove=1)\n for ent in data:\n atemp = re.split('\\/', ent)\n fname = atemp[-1]\n mc = re.search(lobsid, fname)\n if mc is not None:\n continue\n else:\n atemp = re.split('hrcf', fname)\n btemp = re.split('_', atemp[1])\n sobs = btemp[0]\n new = fname.replace(sobs, lobsid)\n full = '/data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/' + new\n\n cmd = 'mv ' + ent + ' ' + full\n os.system(cmd)", "def _internal_method(all_assets, asset_idx):\n if asset_idx is None:\n raise ItemNotFoundError(asset_key)\n\n # Form an AssetMetadata.\n mdata = AssetMetadata(asset_key, asset_key.path)\n mdata.from_storable(all_assets[asset_idx])\n mdata.update(attr_dict)\n\n # Generate a Mongo doc from the metadata and update the course asset info.\n all_assets.insert_or_update(mdata)\n return all_assets", "def addTextureToOcc(self):\n\t\tshas = self._getShapes()\n\t\tfname, _ = QtGui.QFileDialog.getOpenFileName(self, 'Open file ',\n\t\t\t\t\t\t\t\t\t\t\t\t\t'/home')\n\t\tif fname:\n\t\t\tfor sha in shas:\n\t\t\t\t#get texture Path\n\t\t\t\tif not sha.a.texture_Occ.exists:\n\t\t\t\t\toccText = sha.a.texture_Occ.add( dt='string' )\n\t\t\t\tsha.a.texture_Occ.v = fname", "def combine_with_assets(coins):\n \n with open('index_assets.json', encoding='utf-8') as f:\n \n data = json.loads(f.read())\n \n for coin in coins:\n symbol = coin['symbol']\n original_symbol = symbol\n\n if coin['gecko_id'] in [asset['gecko_id'] for asset in data.values()]:\n continue\n\n print('got a new coin: {}'.format(coin))\n\n counter = 1\n while symbol in data:\n print('symbol {} repeats !'.format(symbol))\n symbol = original_symbol + \"_\" + str(counter)\n counter += 1\n\n data[symbol] = {'gecko_id': coin['gecko_id'], 'symbol': coin['symbol']}\n if 'logo' in coin:\n data[symbol]['logo'] = coin['logo']\n else:\n print('could not find image')\n\n\n\n with open('index_assets.json', 'w', encoding='utf-8') as f:\n f.write(json.dumps(data))\n print('wrote to file')", "def rewrite_importlib_metadata(pkg_files, new_root):\n for file in pkg_files.glob('*.py'):\n text = file.read_text().replace('typing_extensions', '..typing_extensions')\n text = text.replace('import zipp', 'from .. import zipp')\n file.write_text(text)", "def update_object(self, name: str) -> None:\n try:\n object = Object.from_name(name)\n except Object.NotFound:\n record = self.catalog.get(name) # must be name pattern recognized by catalog\n log.info(f'Creating new object for {name}')\n Object.add({'type_id': self.__get_type_id(record), 'aliases': self.__get_names(record),\n 'ra': record.ra, 'dec': record.declination, 'redshift': record.redshift,\n 'data': {'tns': record.to_json()}})\n else:\n # find best alternate identifier for catalog search\n for provider in ('iau', 'ztf', 'atlas'): # preferred ordering\n if provider in object.aliases:\n if name != object.aliases[provider]:\n log.debug(f'Searching with name {object.aliases[provider]} <- {name}')\n name = object.aliases[provider]\n break\n else:\n raise TNSError(f'Object ({name}) not found in catalog')\n record = self.catalog.get(name)\n self.__ensure_iau_pattern(record.name)\n if info := self.__build_info(object, record):\n Object.update(object.id, **info)\n else:\n log.info(f'No changes found for {name}')", "def test_update_asset_content(self):\n pass", "def import_project_dump(self, key):", "def update(src):", "def importShaders(self, namespace=':'):\n self.logger.info(\"Import Shaders\")\n\n if self.data['abcShadersAttr']:\n\n abcfile = self.data['abcShadersAttr']\n \n # shotgun query for maya file\n mayafile = find_shader_package_from_shader_file(file_path=abcfile, file_type='ma')\n if mayafile != {}:\n mayafile = mayafile['ma']\n self.logger.debug(\"Found maya shader file: %s\" % mayafile)\n else:\n localfile = abcfile.replace('.abc', '.ma')\n if os.path.isfile(localfile):\n mayafile = localfile\n self.logger.debug(\"Found maya shader file: %s\" % mayafile)\n else:\n self.logger.error(\"Missing file : %s\" % self.data['abcShadersAttr'])\n return False\n\n if os.path.isfile(mayafile):\n try: \n imported_shaders = cmds.file(mayafile, i=True, returnNewNodes=True, renameAll=True, mergeNamespacesOnClash=True, namespace=namespace)\n self.setAttr(\"abcShaders\", \"\")\n self.logger.debug(\"Imported under %s namespace\" % namespace)\n\n # reset selection back to alembicHolder\n cmds.select(self.data['shapeNode'])\n self.logger.info(\"Imported : %s\" % self.data['abcShadersAttr'])\n return True\n\n except Exception, e:\n self.logger.error(\"Import Json Error : %s\" % e)\n return False\n else:\n self.logger.error(\"Missing file : %s\" % self.data['abcShadersAttr'])\n return False\n else:\n self.logger.info(\"Empty attribute : %s.abcShadersAttr\" % self.data['shapeNode'])\n return False", "def replace(self, pat, repl):\n re_pat = re.compile(pat)\n for infilename in self.file_names:\n infile = open(infilename, 'r')\n for line in infile:\n line = line.rstrip()\n line1 = re_pat.sub(repl, line)\n if line1 != line:\n print 'Repl: %s' % (line1, )", "def importMesh(self, name, file, mtype, material, **args):\n args = dictToTuple(**args)\n\n if not self.rank:\n logging.info('Importing mesh from {}'.format(file))\n\n self.lmp.command('fix {} all {} file {} type {} '.format(name, mtype, file, material) + ('{} ' * len(args)).format(*args))", "def _load_data(self):\n\n def __correct_car_make(car_make):\n \"\"\" Corrects given make names to a standard make name. \"\"\"\n ## define model corrections\n correct_makes = {\n 'chevroelt': 'chevrolet',\n 'chevy': 'chevrolet',\n 'maxda': 'mazda',\n 'mercedes-benz': 'mercedes',\n 'toyouta': 'toyota',\n 'vokswagen': 'volkswagen',\n 'vw': 'volkswagen'\n }\n ## return corrected make\n return correct_makes[car_make] if car_make in correct_makes.keys() else car_make\n\n logger.debug('checking auto-mpg.data.txt')\n if not path.exists('auto-mpg.data.txt'):\n ## file not present, get it\n logger.debug('getting auto-mpg.data.txt')\n self._get_data()\n if not path.exists('auto-mpg.clean.txt'):\n ## file not present, clean it\n self._clean_data()\n \n ## we got the data and we cleaned it\n logger.debug('checking auto-mpg.clean.txt')\n try:\n with open('auto-mpg.clean.txt', 'r') as clean_data:\n logger.debug('auto-mpg.clean.txt exists')\n ## counter for auto objects\n counter = 0\n logger.debug('Parsing auto-mpg.clean.txt into AutoMPG objects')\n for auto_record in csv.reader(clean_data, delimiter= ' ', skipinitialspace= True):\n ## split the car name into 2 tokens\n split = auto_record[8].replace('\\'', '').split(' ', 1)\n ## handle the case for 'subaru'\n if len(split) < 2:\n make = f'{split[0]}'\n auto = Record(auto_record[0], auto_record[6], __correct_car_make(make), '')\n elif len(split) == 2:\n make = f'{split[0]}'\n model = f'{split[1]}'\n auto = Record(auto_record[0], auto_record[6], __correct_car_make(make), model)\n counter += 1\n ## append the auto object\n self.data.append(AutoMPG(auto.make, auto.model, auto.year, auto.mpg))\n except Exception as e:\n logger.info(f'Error occurred: {e}')", "def filter_imports(imports):\n import_list = imports.split(\"\\n\")\n import_list = filter(lambda line: \"./\" not in line and line.strip(\" ,\\n\")\n not in relations + [\"Index\"], import_list)\n return \"\\n\".join(import_list)", "def unpack_rename_data(self):\n for old, new in self.f_name_map.items():\n try:\n read_path = Path(os.environ[\"DATA_PATH\"]) / old\n write_path = Path(os.environ[\"DATA_PATH\"]) / new\n if \"ZIP\" in old:\n shutil.unpack_archive(read_path, write_path, \"zip\")\n else:\n shutil.unpack_archive(read_path, write_path)\n read_path.unlink() # delete original file\n except OSError:\n print(f\"Did not unpack {read_path}\")", "def create_obj(destination,mtl_name):\r\n\tshutil.copyfile(\"file_cube.obj\",destination)\r\n\tf=open(destination,\"r\")\r\n\tlines=f.readlines()\r\n\tlines[0]=\"mtllib \"+mtl_name+\"\\n\"\r\n\tf.close()\r\n\tf=open(destination,\"w\")\r\n\tf.writelines(lines)\r\n\tf.close()", "def _process_infores(source: str) -> str:\n # don't touch something that already looks like an infores CURIE\n if source.startswith(\"infores:\"):\n return source\n\n if self.filter:\n infores = self.filter.sub(self.substr, source)\n else:\n infores = source\n infores = self.prefix + \" \" + infores\n infores = infores.strip()\n infores = infores.lower()\n infores = re.sub(r\"\\s+\", \"_\", infores)\n infores = re.sub(r\"\\.+\", \"_\", infores)\n infores = re.sub(r\"[\\W]\", \"\", infores)\n infores = re.sub(r\"_\", \"-\", infores)\n\n infores = \"infores:\" + infores\n return infores", "def remap_buildings(request):\n body = json.loads(request.body)\n import_file_id = body.get('file_id')\n if not import_file_id:\n return {'status': 'error', 'message': 'Import File does not exist'}\n\n return remap_data(import_file_id)", "def fix_fasta(database_names):\n for file in database_names:\n file_mod = file.replace(\".fasta\", \"_mod.fasta\")\n with open(file, 'r') as f:\n lines = f.readlines()\n new_lines = []\n for line in lines:\n if '|' in line and \">\" not in line:\n # we replace spaces in header line with \"__\"\n # so I can manipulate that later as biopython doesn't\n # like \"__\"\n new_line = \">\"+line.replace(\" \", \"__\")\n new_lines.append(new_line)\n else:\n new_lines.append(line)\n with open(file_mod, 'w') as f:\n for line in new_lines:\n f.write(line)", "def importFile(self):\n\n ## Backing up old CSV and JSON files before beginning import operations\n if os.path.isfile(\"text_files/customers.csv\") and os.path.isfile(\"text_files/customers.json\"):\n print(\"\\nCreating a backup of the existing customer .csv and .json files before overwriting\")\n shutil.copy2(\"text_files/customers.csv\", \"text_files/customers.csv.backup\" + str(time.time()))\n shutil.copy2(\"text_files/customers.json\", \"text_files/customers.json.backup\" + str(time.time()))\n\n ## Importing the text file for cleaning then converting to CSV\n input_file = open(\"text_files/customer_export.txt\", \"r\")\n output_file = open(\"text_files/customers.csv\", \"w\")\n\n ## A loop to clean and write the customer_export txt file to a CSV\n for line in input_file:\n clean_text = \"\"\n check_line = line.replace(\"#\", \"\").replace(\",,\",\"\").split(\"|\")\n for line in check_line:\n if line != check_line[10]:\n clean_text += line + \",\"\n elif line == check_line[10]:\n clean_text += line + \"\\n\"\n output_file.write(clean_text)\n\n ## Closing TXT file and CSV file after formatting\n input_file.close()\n output_file.close()\n\n ## Opening the cleaned CSV file for conversion to Json\n with open('text_files/customers.csv') as clean_csv:\n ## Converting CSV file to Json\n converted = csv.DictReader(clean_csv)\n rows = list(converted)\n\n ## Writing converted CSV to Json file\n with open('text_files/customers.json', 'w') as convert:\n json.dump(rows, convert)\n\n ## Deleting all data currently in database before importing new file\n db_connection.executeQuery(\"DELETE FROM CRM;DBCC CHECKIDENT ('CRM', RESEED, 0) DELETE FROM Mailings; DBCC CHECKIDENT ('Mailings', RESEED, 0) COMMIT\") \n\n ## Loading the newly created Json file\n with open(\"text_files/customers.json\") as customers_json:\n customers = json.load(customers_json)\n\n ## A loop to add the contents of the Json file to the database \n print(\"Writing imported file to database please wait...\")\n for key in customers:\n db_connection.executeQuery(\"INSERT INTO dbo.CRM (f_name, l_name, company, address, city, county, state, zip, primary_phone, secondary_phone, email_address) VALUES ('\" + key[\"first_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"last_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"company_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"address\"] + \"', '\" + key[\"city\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"county\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"state\"] + \"', '\" + str(key[\"zip\"]) + \"', '\" + key[\"phone1\"] + \"', '\" + key[\"phone2\"] + \"' , '\" + key[\"email\"] + \"'); COMMIT\")\n db_connection.executeQuery(\"INSERT INTO dbo.Mailings (name, company, address) VALUES ('\" + key[\"first_name\"].replace(\"\\'\", \"\\'\\'\") + \" \" + key[\"last_name\"].replace(\"\\'\", \"\\'\\'\") + \"', '\" + key[\"company_name\"].replace(\"\\'\", \"\\'\\'\") + \"','\" + key[\"address\"] + \" \" + key[\"city\"] + \" \" + key[\"county\"] + \" \" + key[\"state\"] + \" \" + str(key[\"zip\"]) + \"'); COMMIT\") \n\n print(\"\\nFinished writing to file. Returning to main menu...\")", "def extra_object_files(self):", "def reload(self):\n try:\n with open(self.__file_path, 'r') as f:\n for key, value in json.load(f).items():\n self.__objects[key] = eval(key.split('.')[0])(**value)\n except FileNotFoundError:\n pass", "def substitute(files: str, pattern: str, replacement: str):\n with fileinput.input(\n files=glob.glob(files, recursive=True), inplace=True\n ) as file:\n for line in file:\n print(re.sub(pattern, replacement, line), end='')", "def replace(mapping_dic, f_tr):\n f_opt_tr = f_tr.replace('.tr', '.opt.tr')\n f_tr = open(f_tr)\n f_opt_tr = open(f_opt_tr, 'w')\n for line in f_tr:\n if line.startswith('module: '):\n tokens = line.split(' ')\n module_name = tokens[1]\n mapping = mapping_dic[module_name].split()\n d = {}\n i = 0\n for item in mapping:\n d[int(item)] = i\n i += 1\n if 'SRC' not in line:\n f_opt_tr.write(line)\n continue\n line = re.sub(r'SRC: (\\d+)', lambda match: 'SRC: '+str(d.get(int(match.group(1)), 'SOMETHING WENT WRONG' + match.group(1))), line)\n line = re.sub(r'DST: (\\d+)', lambda match: 'DST: '+str(d.get(int(match.group(1)), 'SOMETHING WENT WRONG' + match.group(1))), line)\n f_opt_tr.write(line)" ]
[ "0.61271197", "0.5805997", "0.5651167", "0.5605872", "0.5587787", "0.5498569", "0.54969203", "0.53437096", "0.53432715", "0.5332981", "0.532139", "0.53159696", "0.5253007", "0.52161473", "0.52138823", "0.51546794", "0.5153983", "0.5098517", "0.5057885", "0.5048195", "0.5045116", "0.5001853", "0.4977753", "0.49767575", "0.49727008", "0.49699062", "0.4966435", "0.49655357", "0.49627945", "0.4955837", "0.49294594", "0.4907502", "0.48994133", "0.48933184", "0.48915648", "0.48821953", "0.48812857", "0.4868981", "0.48601022", "0.4853622", "0.4849398", "0.48492938", "0.48458275", "0.4843127", "0.48202983", "0.48141035", "0.48120654", "0.4809689", "0.4797167", "0.47960716", "0.47950462", "0.47884753", "0.47866958", "0.47667092", "0.47639453", "0.47611368", "0.4759912", "0.4757082", "0.47564006", "0.47526392", "0.4750331", "0.47492102", "0.47486287", "0.47479355", "0.47459093", "0.47374958", "0.47340602", "0.4714758", "0.47140342", "0.47127876", "0.47113466", "0.47104123", "0.47089636", "0.47080725", "0.47079748", "0.47068647", "0.4705064", "0.47035128", "0.47026455", "0.47007108", "0.46990365", "0.46987152", "0.46967164", "0.46963868", "0.4695945", "0.4695769", "0.46875373", "0.46823838", "0.467827", "0.46755508", "0.4675427", "0.46751294", "0.46732193", "0.46630535", "0.4654025", "0.46527094", "0.46519434", "0.46501952", "0.46501115", "0.46435034" ]
0.67480993
0
import master settings from data file
def importMasterSettings(self): pickleData = pickle.load( open( self.masterPath.path, "rb" ) ) master = rlayer.RenderLayer( 'defaultRenderLayer' ) master.makeCurrent() for a in pickleData.keys(): try: a.v = pickleData[a] except: continue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_settings(self, config):\n user = config.get(self.bot.client.host, 'master')\n # Replace old master\n if ':master' in self.users and user != self.users[':master']:\n self.users[self.users[':master']]['rank'] = 'none'\n if not user in self.users:\n self.users[user] = User('master', datetime.datetime.now(), None,\n hash_password(user, user))\n if not ':master' in self.users or self.users[':master'] != user:\n self.users[':master'] = user\n self.users[':new_master'] = True", "def load_settings_from_cli():\n load_user_from_cli()\n load_local_contacts()", "def load_new_data():\n require('settings', provided_by=[production, staging])\n \n maintenance_up()\n load_data()\n maintenance_down()", "def loadSettings(self, filename='short_240.settings'):\n global master_run_no\n self.settingsFilename = filename\n # print 'self.settingsFilename = ', self.settingsFilename\n if os.path.exists(filename):\n stream = open(filename, 'r')\n else:\n stream = open(master_lattice_location+filename, 'r')\n self.settings = yaml.load(stream, Loader=yaml.UnsafeLoader)\n self.globalSettings = self.settings['global']\n master_run_no = self.globalSettings['run_no'] if 'run_no' in self.globalSettings else 1\n self.fileSettings = self.settings['files']\n elements = self.settings['elements']\n self.groups = self.settings['groups'] if 'groups' in self.settings and self.settings['groups'] is not None else {}\n stream.close()\n\n # for name, elem in list(self.groups.items()):\n # group = globals()[elem['type']](name, self.elementObjects, **elem)\n # self.groupObjects[name] = group\n\n for name, elem in list(elements.items()):\n self.read_Element(name, elem)\n\n # for name, lattice in list(self.fileSettings.items()):\n # self.read_Lattice(name, lattice)", "def load_measurement_settings_file():\n\n # First update the settings that the state machine is up to date\n self.variables.ui_plugins[\"Settings_window\"].load_new_settings()\n\n fileDialog = QFileDialog()\n file = fileDialog.getOpenFileName()\n\n if file[0]:\n file = open(str(file[0]), \"r\")\n dict = yaml.load(file)\n file.close()\n\n # l.info(\"Loaded new measurement settings file: \" + str(file[0]))\n self.variables.default_values_dict[\"settings\"].update(\n dict\n ) # Updates the values of the dict, it either updates the values or adds them if not incluced\n self.variables.ui_plugins[\"Settings_window\"].configure_settings()", "def load_data_conf(self):\n data_file = select_file(os.getcwd())\n if data_file is not None:\n self.load_tab(data_file)\n else:\n msg_window('please select valid data config file')", "def load(self):\n if not path.isfile(self.SETTINGS_FILE):\n return\n data = load_json_from_disk(self.SETTINGS_FILE)\n for (key, value) in data.items():\n self.__dict__[key] = value", "def update_from_file(self):\n config_path = os.environ.get('MINDINSIGHT_CONFIG', '')\n if not config_path:\n return\n\n config_module = None\n\n # python:full.path.for.config.module\n if config_path.startswith('python:'):\n config_module = import_module(config_path[len('python:'):])\n\n # file:full/path/for/config.py\n elif config_path.startswith('file:'):\n config_path = config_path[len('file:'):]\n module_name = '__mindinsightconfig__'\n config_module = types.ModuleType(module_name)\n machinery = import_module('importlib.machinery')\n loader = machinery.SourceFileLoader(module_name, config_path)\n loader.exec_module(config_module)\n\n if config_module is None:\n return\n\n for setting in dir(config_module):\n if setting.isupper() and setting in self._default_settings:\n setting_value = getattr(config_module, setting)\n setattr(self, setting, setting_value)\n self._explicit_settings.add(setting)", "def load_settings(self):\n\n self.std = settings.settings", "def load(filename):\n conf = CommonConfig.get()\n conf.update(toml.load(filename))\n return conf", "def load_settings(self, outfile='settings.p'):\n settings = pickle.load(open(path,'rb'))\n self.__dict__.update(settings)", "def set_master_table(filepath):\n my_globals['master_table_path'] = filepath\n my_globals['master_table_data'] = None", "def load_from_file(self):\n if not os.path.exists(self.settings_file):\n return\n \n with open(self.settings_file, 'rb') as settings_file:\n try:\n options = json.load(settings_file)\n \n if self._settings_coordinate(options):\n self.options = options\n except:\n self.load_default()", "def import_config(self):\n # Get the config file\n import config\n\n # Get all keys from keyvalue pairs in the config file\n settingsFromConfigFile = [x for x in dir(config) if not x.startswith('__')]\n\n # Convert config file into dict\n for key in settingsFromConfigFile:\n value = getattr(config, key)\n self.config[key] = value\n\n # Settings validation: specify keys which are valid settings\n # If there are rows in the config file which are not listed here, an\n # error will be raised\n validSettings = {\n 'data_dir',\n 'running_data_dir',\n 'unison_log_dir',\n 'unisonctrl_log_dir',\n 'log_file',\n 'make_root_directories_if_not_found',\n 'sync_hierarchy_rules',\n 'unison_local_root',\n 'unison_remote_root',\n 'unison_path',\n 'global_unison_config_options',\n 'unison_remote_ssh_conn',\n 'unison_remote_ssh_keyfile',\n 'unison_local_hostname',\n 'unison_home_dir',\n 'unison_user',\n 'webhooks',\n 'rotate_logs',\n }\n\n # If a setting contains a directory path, add it's key here and it will\n # be sanatized (whitespace and trailing whitespaces stripped)\n settingPathsToSanitize = {\n 'data_dir',\n 'unison_home_dir',\n 'running_data_dir',\n 'unison_log_dir',\n 'unisonctrl_log_dir',\n }\n\n # Values here are used as config values unless overridden in the\n # config.py file\n defaultSettings = {\n 'data_dir': '/tmp/unisonctrl',\n 'log_file': '/dev/null',\n 'make_root_directories_if_not_found': True,\n 'unison_path': '/usr/bin/unison', # Default ubuntu path for unison\n 'unison_remote_ssh_keyfile': \"\",\n 'unison_local_hostname': platform.node(),\n 'running_data_dir': self.config['data_dir'] + os.sep + \"running-sync-instance-information\",\n 'unison_log_dir': self.config['data_dir'] + os.sep + \"unison-logs\",\n 'unisonctrl_log_dir': self.config['data_dir'] + os.sep + \"unisonctrl-logs\",\n 'unison_user': getpass.getuser(),\n 'rotate_logs': \"time\",\n }\n\n # TODO: Implement allowedSettings, which force settings to be\n # in a given list of options\n\n # Apply default settings to fill gaps between explicitly set ones\n for key in defaultSettings:\n if (key not in self.config):\n self.config[key] = defaultSettings[key]\n\n # Ensure all required keys are specified\n for key in validSettings:\n if (key not in self.config):\n raise LookupError(\"Required config entry '\" + key + \"' not specified\")\n\n # Ensure no additional keys are specified\n for key in self.config:\n if (key not in validSettings):\n raise LookupError(\"Unknown config entry: '\" + key + \"'\")\n\n # Sanatize directory paths\n for key in settingPathsToSanitize:\n self.config[key] = self.sanatize_path(self.config[key])\n\n # If you reach here, configuration was read and imported without error\n\n return True", "def load(name):\n\n update(settings.all())\n\n config_specific_settings = _config.pop('config', None) or {}\n if name:\n if name not in names():\n errors.string_exit('config {} not found in .ssha file'.format(name))\n if name in config_specific_settings:\n update(config_specific_settings[name])\n add('config.name', name)\n\n if not _get('ssh.username'):\n add('ssh.username', '$(whoami)')\n\n if _get('bastion') and not _get('ssh.proxy_command'):\n add('ssh.proxy_command', 'ssh -W %h:%p ${bastion.address}')\n\n iam_group_specific_settings = get('iam.group')\n if iam_group_specific_settings:\n from . import iam\n for group in iam.groups():\n if group in iam_group_specific_settings:\n update(iam_group_specific_settings[group])", "def read_settings(self):\n self.settings = read_settings(self.settings_path)", "def load(self,fileName,doSave=True):\n #--Load masters\n modFileNames = self.keys()\n for master,size in self[fileName].tes3.masters:\n if master in modFileNames and master != fileName:\n self.load(master,False)\n #--Load self\n mwIniFile.load(fileName,doSave)", "def apply_config(filename):\n with open(filename) as config_file:\n config = json.load(config_file)\n for setting, value in config.items():\n CoreConfig.__dict__[setting] = value", "def merge_into_settings(self, settings):\n if not self._meta_dict:\n self._load_from_file()\n\n settings.chat_name = self._meta_dict[DumpMetadata.CHAT_NAME]\n settings.last_message_id = self._meta_dict[DumpMetadata.LAST_MESSAGE_ID]\n settings.exporter = self._meta_dict[DumpMetadata.EXPORTER]", "def import_db(import_file):\n import_data(import_file)", "def add_settings_early(self):\n\n # config settings\n config = {\n # some generic settings for every site, to point to location of some stuff\n mconst.DEF_SETTINGNAME_pkgdirimps_sitempacks: [pkgdirimp_sitempacks],\n mconst.DEF_SETTINGNAME_controllerroot: pkgdirimp_controllers,\n mconst.DEF_SETTINGNAME_sitefilepath: misc.calc_modulefiledirpath(__file__),\n # should we also load mewlo site installed setuptools plugins\n mconst.DEF_SETTINGNAME_flag_importsetuptoolspacks: True,\n mconst.DEF_SETTINGNAME_replaceshadowpath: '${sitefilepath}/replaceshadow',\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n # config settings\n config = {\n # Name of site\n mconst.DEF_SETTINGNAME_sitename: 'Mewlo',\n # Specify where this site serves from\n # these siteurls should not end in / so if you are serving a site at root just use relative of '' and absolute of 'http://sitename.com'\n mconst.DEF_SETTINGNAME_siteurl_relative: '',\n mconst.DEF_SETTINGNAME_siteurl_absolute: 'http://127.0.0.1:8080',\n #mconst.DEF_SETTINGNAME_siteurl_relative: '/public/publicity',\n #mconst.DEF_SETTINGNAME_siteurl_absolute: 'http://127.0.0.1:8080/public/publicity',\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n # config settings\n config = {\n # online status information\n mconst.DEF_SETTINGNAME_isenabled: True,\n mconst.DEF_SETTINGNAME_isonline: True,\n mconst.DEF_SETTINGNAME_offline_mode: 'maintenance',\n mconst.DEF_SETTINGNAME_offline_message: 'We are down for leap-year maintenance; we will be back soon.',\n mconst.DEF_SETTINGNAME_offline_allowadmin: False,\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_config, config)\n\n\n\n # extension pack config -- we need to explicitly enable plugins\n packconfig = {\n 'mouser.mewlotestplug' : {\n 'isenabled': False,\n },\n 'mouser.testpack' : {\n 'isenabled': False,\n },\n 'mewlo.siteaddon.account' : {\n 'isenabled': True,\n },\n 'mewlo.siteaddon.group' : {\n 'isenabled': True,\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_packs, packconfig)\n\n\n # database config\n databaseconfig = {\n 'settings' : {\n 'sqlalchemy_loglevel' : logging.NOTSET,\n #'sqlalchemy_loglevel' : logging.INFO,\n },\n 'default' : {\n 'url' : 'sqlite:///${dbfilepath}/mewlo_testsite1.sqlite',\n #'tablename_prefix': 'mewlo_',\n 'flag_echologging' : False,\n },\n 'mysql_unused' : {\n # Sample configuration for mysql\n 'url' : 'mysql://mewlo_user:mewlo_pass@localhost:3306/mewlo_testsite1',\n 'tablename_prefix': 'mewlo_'\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_database, databaseconfig)\n self.settings.listappend_settings_key(mconst.DEF_SETTINGSEC_make_dirs, '${dbfilepath}')\n\n # email config settings\n mailconfig = {\n # online status information\n 'smtp_host': self.get_configval('mail_smtp_host'),\n 'smtp_login': self.get_configval('mail_smtp_login'),\n 'smtp_port': self.get_configval('mail_smtp_port'),\n 'smtp_mode': self.get_configval('mail_smtp_mode'),\n 'smtp_password': self.get_configval('mail_smtp_password'),\n 'mail_from' : self.get_configval('mail_from'),\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_mail, mailconfig)\n\n\n # account siteaddon settings\n siteaddonconfig = {\n # online status information\n 'registration_mode': 'immediate',\n 'flag_require_email_verified_before_login': False,\n }\n self.settings.merge_settings_key('siteaddon_account', siteaddonconfig)\n\n\n\n # ATTN: UNFINISHED\n # asset mounts config\n if (False):\n assetmountconfig = {\n 'default' : {\n # an internal assetmount just needs a url route\n 'type': 'internal',\n 'routeid': 'static_files',\n },\n 'external' : {\n 'type': 'external',\n 'filepath': '${mewlofilepath}/public_assets',\n 'urlpath': 'http://127.0.0.1/mewlo/public_assets',\n },\n }\n self.settings.merge_settings_key(mconst.DEF_SETTINGSEC_asset_mounts, assetmountconfig)\n\n\n\n\n\n #print \"TESTING CONFIG1:\"\n #self.run_configfunc('sayhello',1,2,3)\n #print \"TESTING CONFIG2:\"\n #self.run_allconfigfuncs('sayhello',1,2,3)", "def load(self):\n settings_path = os.path.join(self.file_path, \"__file_data.json\")\n if os.path.exists( settings_path ):\n self.fileList = simplejson.loads( open( settings_path, 'r' ).read() )\n\n settings_path = os.path.join(self.file_path, \"__user_data.json\")\n if os.path.exists( settings_path ):\n self.userList = simplejson.loads( open( settings_path, 'r' ).read() )", "def import_settings(path_to_settings=None):\n\n file_path = 'settings.json' if path_to_settings is None else path_to_settings\n\n if not os.path.isfile(file_path):\n # settings file doesn't exist\n raise IOError(errno.ENOENT, os.strerror(errno.ENOENT), 'settings.json')\n\n with open(file_path) as in_file:\n data = json.load(in_file)\n settings = Settings()\n\n # required attributes, fail if missing\n try:\n settings.input_file_path = os.path.join(os.path.dirname(sys.argv[0]), data['input_folder'], data['input_file'])\n settings.output_file_path = os.path.join(os.path.dirname(sys.argv[0]), data['output_folder'], data['output_file'])\n settings.default_timezone = data['default_timezone']\n settings.output_timezone = data['output_timezone']\n settings.custom_column_headers = data.get('custom_column_headers', [])\n settings.app_id = data['app_id']\n except KeyError as e:\n print(\"Key not found in {}: \".format(file_path) + str(e))\n sys.exit(1)\n\n return settings", "def importVarious(context):\n #VAMOS A PONERLA EN EL CONFIG\n USERNAME='PPM'\n username=USERNAME\n title='cenditel.ppm'\n # Ordinarily, GenericSetup handlers check for the existence of XML files.\n # Here, we are not parsing an XML file, but we use this text file as a \n # flag to check that we actually meant for this import step to be run.\n # The file is found in profiles/default.\n\n if context.readDataFile('cenditel.ppm.txt') is None:\n return\n\n portal = context.getSite()\n obj = SetupEnviron() \n logger = obj.getLogger(\"cenditel.ppm\")\n MakeDefaultUser(context, username, title)\n #TODO configureCMFNotification(portal,logger) ", "def load_settings(self):\n # Set the default settings. In case in a later version of this script the settings change, new default variables will be added automatically\n self.settings = {\n # Connection settings to OBS Studio websockets plugin\n \"host\": \"localhost\",\n \"port\": 4444,\n \"password\": \"\",\n \"update_frequency\": 1, # seconds, how often the script loads the SC2 UI location\n }\n if os.path.isfile(self.settings_path):\n with open(self.settings_path) as f:\n self.settings.update(json.load(f))", "def load(initial=False):\n log.info(\"Loading settings file\")\n try:\n if initial and _initialCache: # If we are initial and we already have cached load, don't go to file\n data = _initialCache\n else:\n with open(SETTINGS_FILE) as file:\n data = json.load(file)\n if initial: # Store this for later\n _initialCache.update(data)\n except FileNotFoundError:\n log.warning(\"No log file found to load! At '{}'\".format(SETTINGS_FILE))\n return\n except json.JSONDecodeError:\n log.error(\"Settings file was corrupt! Cannot load settings\")\n return\n else:\n for name in _names: # Make sure we update in place and don't make new values\n _names[name].clear()\n _names[name].update(data[name])\n\n for id in _names[name].info: # Check to make sure we have all values at least at a default\n if id not in _names[name]:\n _names[name][id] = _names[name].info[id][\"default\"]\n\n # Legacy updating code for updating 1.2.2.0 to newer versions\n if \"voiceChannel\" in _names[\"discord\"] and type(_names[\"discord\"][\"voiceChannel\"]) == str:\n log.warning(\"Updating save file to newest version!\")\n _names[\"discord\"][\"voiceChannel\"] = int(_names[\"discord\"][\"voiceChannel\"])\n save() # Save these changes", "def import_data(self, data):\n # Import additional data for tuning\n # data: a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'\n pass", "def und_add_setting(udb_file, configuration_path_file):\n subprocess.call(f\"und import {configuration_path_file} {udb_file}\")", "def _import(self, datadict):\n self.GUID = datadict.get(\"GUID\", uuid.uuid1())\n self.FileName = datadict.get(\"FileName\", \"\")\n self.Name = datadict.get(\"Name\", \"\")\n self.Projects = datadict.get(\"Projects\", [])\n self.VSVersion = datadict.get(\"VSVersion\", None)", "def load_from_conf(self):\r\n raise NotImplementedError", "def load(config_file_name=\"network_importer.toml\", config_data=None):\n global SETTINGS\n\n if config_data:\n SETTINGS = _configure_backend(Settings(**config_data))\n return\n\n if os.path.exists(config_file_name):\n config_string = Path(config_file_name).read_text()\n config_tmp = toml.loads(config_string)\n SETTINGS = _configure_backend(Settings(**config_tmp))\n return\n\n SETTINGS = Settings()", "def load_settings():\r\n if os.path.exists('settings.json'):\r\n json_data = open('settings.json').read()\r\n\r\n data = json.loads(json_data)\r\n return data\r\n else:\r\n return False", "def cli(ctx, root):\n try:\n ctx.obj = create_initial_context(root)\n except SettingsBroken as e:\n click.echo(\n 'Failed to read the settings file: %s' % str(e),\n err=True\n )\n exit(1)", "def load_settings(filename=None):\n filename = filename or SETTINGS\n return common.open_and_read_file(filename, as_json=True)", "def loadFromSumid(self): \n if not hasattr(self,\"mainINIFilePath\"): self.mainINIFilePath=mainINIFilePath # 026 Added a condition because the paths were constantly overwriting each other.\n if not hasattr(RawSettings,\"mainINIFilePath\"): # (025) For unittest sanity.\n RawSettings.mainINIFilePath=self.mainINIFilePath# 026 Just RawSettings.mainINIFilePath=mainINIFilePath was conflicting with sls. \n return RawSettings", "def readSettingsFile(self):\n with open(self.settingsFilePath, 'r') as settingsFile:\n self.settings = json.loads(settingsFile.read())", "def load_settings(self):\n # config file from branch's asdf\n config_exists = os.path.isfile(self.config_path)\n\n if config_exists:\n\n config_file = open(self.config_path, 'r')\n self.config_json = json.load(config_file)\n config_file.close()\n\n else:\n raise Exception(\"Error BranchConfig: could not find config json\")\n\n\n try:\n self.branch_settings = self.config_json[self.branch]\n\n self.branch_keys = self.branch_settings.keys()\n\n for attr in self.branch_keys:\n setattr(self, attr, self.branch_settings[attr])\n\n except:\n raise Exception(\"Error BranchConfig: could not add config settings to BranchConfig\")", "def load_from_conf(self):\n raise NotImplementedError", "def load_server_conf(self):\n if os.path.exists(os.path.join(self.data_path,\"settings.txt\")):\n settings_file = file(os.path.join(self.data_path,\"settings.txt\"),\"rb\")\n self.server_conf = settings_file.read().split(';')\n else:\n self.server_conf = None", "def _loadConfig(self):\n self._packRoot = getattr(sys, \"_MEIPASS\", path.abspath(path.dirname(__file__)))\n rootDir = path.abspath(path.join(self._packRoot, path.pardir))\n logger.debug(\"MOTools root dir is: %s\" % rootDir)\n\n metConf = path.join(rootDir, \"met_config\", \"met_config.json\")\n mainConf = path.join(rootDir, \"main_config.json\")\n userConf = path.join(rootDir, \"user_config.json\")\n\n self._confData = {\n \"MET\": {\"path\": metConf, \"config\": {}, \"loaded\": False},\n \"MAIN\": {\"path\": mainConf, \"config\": {}, \"loaded\": False},\n \"USER\": {\"path\": userConf, \"config\": {}, \"loaded\": False},\n }\n\n for confGroup in self._confData:\n confFile = self._confData[confGroup][\"path\"]\n logger.debug(\"Loading %s config file\" % confGroup)\n if path.isfile(confFile):\n jsonData = {}\n try:\n with open(confFile, mode=\"r\") as inFile:\n jsonData = json.loads(inFile.read())\n if \"config\" in jsonData:\n self._confData[confGroup][\"config\"] = jsonData[\"config\"]\n self._confData[confGroup][\"loaded\"] = True\n except Exception as e:\n logger.error(\"Failed to parse config JSON data.\")\n logger.error(str(e))\n return False\n else:\n logger.debug(\"No file: %s\" % confFile)\n\n # if not self._confData[\"MAIN\"][\"loaded\"]:\n # logger.error(\"Failed to load minimum configuration file main_config.json.\")\n # raise RuntimeError\n\n return", "def loadSettings():\t\n\tglobal settings\n\tglobal bookpath\n\tjson_data=open(config_file)\n\ttry:\n\t\tsettings = json.load(json_data)\n\texcept ValueError as e:\n\t\tprint \"The configuration file \" + config_file + \" exists but is not valid JSON. Either correct the error or revert to a working veersion\"\n\t\tprint \"Error details: \" + str(e)\n\t\tsys.exit(1)\n\tbookpath = os.path.join(os.getcwd(),settings['source'])", "def read_settings():\n \n settings = OrdDic()\n settings.update(json.load(open(\"resources/files/settings.txt\", \"r\")))\n\n ## OLD WAY BELOW\n\n #r = open(\"resources/files/settings.txt\", \"r\", newline=\"\\n\")\n # for option in r.read().split('\\n'):\n # try:\n # #option = option.split('\\\\')\n # #settings.update({option[0]: option[1]})\n # # settings.update(json.loads(option))\n # except IndexError:\n # pass\n return settings", "def init_channel_master_config_data():\n config_data = {}\n config_data[\"tabs\"] = []\n config_data[\"tabs_data\"] = {}\n config_data[\"current_tab\"] = 0\n\n return config_data", "def load( self ):\n ini = codecs.open(self.filename,\"r\",\"utf-8\",errors=\"replace\",buffering=0)\n for l in ini:\n l = l.strip()\n if l:\n (name,value) = l.split(\"=\",1)\n self.conf[name.strip()] = value.strip()\n ini.close()", "def loadData(self, file):\n self.data = batchImport(file, self.ps)", "def load(self, file):\n self.namespace['workflow'].configfile(file)\n self.updateNamespace()", "def loadDefaults(self):\n # (025) Merged into settings.RawSettings.\n pass", "def load(filename = config_filename):\n global parser,save_parser,script_dir,config_filename\n\n config_filename = filename #updates config filename\n if type(config_filename) == str:\n config_filename = [config_filename]\n # for saving we use only last file\n save_parser = ConfigParser()\n save_parser.read(os.path.join(script_dir, config_filename[-1]))\n #for loading we use all files\n parser = ConfigParser()\n for filename in config_filename:\n #loads all ini files\n parser.read(os.path.join(script_dir,filename))\n\n #generates all global variables representing config\n #DEFAULT section\n for key,val in default_values.items():\n usekey = key\n if key.startswith(\"_\"):\n usekey = key.lstrip(\"_\")\n #if there is a config with same name remove it, only protected one stays\n if usekey in parser.items(\"DEFAULT\"):\n parser.remove_option(section=\"DEFAULT\",option=usekey)\n if usekey in save_parser.items(\"DEFAULT\"):\n save_parser.remove_option(section=\"DEFAULT\",option=usekey)\n #cast read string variable to type from default_values\n setting = get(key,section=\"DEFAULT\",default=val)\n if type(val) in [list,dict]: #load as json instead\n globals()[usekey] = json.loads(setting.replace(\"'\",'\"'))\n else:\n globals()[usekey] = type(val)(setting)\n save()\n load_sections()", "def LoadSettings(self, name):\n file_path = os.path.join(self._root, Storage._SETTINGS_FILE % name)\n if not os.path.exists(file_path):\n return {}\n with open(file_path) as f:\n return json.load(f)", "def load_settings_from_file(self, cfg_file):\n \n #\n #\n # TODO\n # Missing settings should not cause exceptions\n #\n #\n #\n\n if not os.path.exists(cfg_file): \n raise Exception('Provided config file [%s] does not exist or cannot be read.' % cfg_file)\n\n import ConfigParser\n config = ConfigParser.ConfigParser()\n config.read(cfg_file)\n \n \n self.reference_root = config.get('Paths','reference-root')\n \n self.scratch_root = os.getcwd()\n try:\n self.scratch_root = config.get('Paths','scratch-root')\n except ConfigParser.NoOptionError:\n self.logger.info('Scratch-root setting is missing. Using current directory: %s' % self.scratch_root)\n\n\n if (self.run_folder != None):\n self.run_id = os.path.basename(self.run_folder)\n else:\n raise Exception('Set runfolder with PipelineConfig.set_runfolder() before loading settings')\n \n \n #\n # TODO\n # needs to be updated on update of settings\n #\n self.runs_scratch_dir = os.path.join(self.scratch_root, self.run_id) if self.run_folder != None else self.scratch_root\n self.logger.info('Run\\'s scratch directory: %s' % self.runs_scratch_dir)\n \n # optional results and fastq archive dirs \n self.results_archive = None\n try:\n self.results_archive = config.get('Paths','results-archive')\n except ConfigParser.NoOptionError:\n self.logger.info('No results-archive provided. Results will not be archived outside of the run\\'s scratch directory.')\n \n self.fastq_archive = None\n try:\n self.fastq_archive = config.get('Paths','fastq-archive')\n except ConfigParser.NoOptionError:\n self.logger.info('No fastq-archive provided. Fastq files will not be archived outside of the run\\'s scratch directory.')\n \n \n # optional /tmp dir\n self.tmp_dir = '/tmp'\n try:\n self.tmp_dir = config.get('Paths','tmp-dir')\n except ConfigParser.NoOptionError:\n self.logger.info('No tmp-dir provided. /tmp will be used.')\n \n \n \n \n # reference files\n self.reference = os.path.join(self.reference_root, config.get('Resources','reference-genome'))\n self.capture = os.path.join(self.reference_root, config.get('Resources','capture-regions-bed'))\n self.capture_qualimap = os.path.join(self.reference_root, config.get('Resources','capture-regions-bed-for-qualimap'))\n self.capture_plus = os.path.join(self.reference_root, config.get('Resources', 'capture-plus-regions-bed'))\n self.gene_coordinates = os.path.join(self.reference_root, config.get('Resources', 'gene-coordinates'))\n \n self.adapters = os.path.join(self.reference_root, config.get('Resources', 'adapters-fasta'))\n \n # tools\n self.bcl2fastq = config.get('Tools','bcl2fastq')\n self.trimmomatic = config.get('Tools','trimmomatic') \n self.bwa = config.get('Tools','bwa')\n self.samtools = config.get('Tools','samtools')\n self.picard = config.get('Tools','picard')\n self.gatk = config.get('Tools','gatk')\n self.freebayes = config.get('Tools','freebayes')\n self.bcftools = config.get('Tools','bcftools')\n self.qualimap = config.get('Tools','qualimap')\n \tself.fastqc\t = config.get('Tools','fastqc')\n\n\n # annovar settings\n self.convert_to_annovar = os.path.join(config.get('Annovar','annovar_home'), \n config.get('Annovar','convert_to_annovar'))\n self.annovar_annotate = os.path.join(config.get('Annovar','annovar_home'),\n config.get('Annovar','annovar_annotate'))\n self.table_annovar = os.path.join(config.get('Annovar','annovar_home'), \n config.get('Annovar','table_annovar'))\n self.annovar_human_db = os.path.join(config.get('Annovar','annovar_home'),\n config.get('Annovar','annovar_human_db'))\n self.annovar_1000genomes_eur = config.get('Annovar','annovar_1000genomes_eur')\n self.annovar_1000genomes_eur_maf_cutoff = config.get('Annovar','annovar_1000genomes_eur_maf_cutoff')\n self.annovar_inhouse_dbs = config.get('Annovar','annovar_inhouse_dbs')\n self.omim_gene_phenotype_map_file = config.get('Annovar','omim_gene_phenotype_map_file')", "def load_from_defaults(self):\n default_settings = import_module('mindinsight.conf.defaults')\n for setting in dir(default_settings):\n if setting.isupper():\n setattr(self, setting, getattr(default_settings, setting))\n self._default_settings.add(setting)", "def read_settings(self):\n config = ConfigParser.SafeConfigParser()\n config.read(os.path.dirname(os.path.realpath(__file__)) + '/linode.ini')\n\n # Cache related\n cache_path = config.get('linode', 'cache_path')\n self.cache_path_cache = cache_path + \"/ansible-linode.cache\"\n self.cache_path_index = cache_path + \"/ansible-linode.index\"\n self.cache_max_age = config.getint('linode', 'cache_max_age')", "def _import_config(handle, file_name, file_location=\"ucscentral\",\n file_dir=None, merge=True, protocol=None,\n hostname=\"localhost\",\n username=None, password=\"\", timeout=120):\n\n from ..mometa.top.TopSystem import TopSystem\n from ..mometa.mgmt.MgmtDataImporter import MgmtDataImporter, \\\n MgmtDataImporterConsts\n\n if not file_name:\n raise UcscValidationException(\"Missing file_name argument\")\n\n if file_location != \"ucscentral\":\n if not file_dir:\n raise UcscValidationException(\"Missing file_dir argument\")\n\n if (not file_name.endswith('.tgz')):\n raise UcscValidationException(\"file_name must be .tgz format\")\n\n top_system = TopSystem()\n\n if file_location == \"remote\":\n file_path = os.path.join(file_dir, file_name)\n _validate_remote_host_args(protocol, hostname, username, password)\n mgmt_importer = MgmtDataImporter(\n parent_mo_or_dn=top_system,\n hostname=hostname,\n remote_file=file_path,\n proto=protocol,\n user=username,\n pwd=password,\n admin_state=MgmtDataImporterConsts.ADMIN_STATE_ENABLED)\n\n elif file_location == \"local\":\n file_path = os.path.join(file_dir, file_name)\n if not os.path.exists(file_path):\n raise UcscOperationError(\"Import config\",\n \"Backup File '%s' not found\" %\n file_path)\n mgmt_importer = MgmtDataImporter(\n parent_mo_or_dn=top_system,\n hostname=\"localhost\",\n remote_file='/' + file_name,\n proto=MgmtDataImporterConsts.PROTO_HTTP,\n admin_state=MgmtDataImporterConsts.ADMIN_STATE_ENABLED)\n\n elif file_location == \"ucscentral\":\n if not _is_backup_file_on_server(handle, \"ucs-central\", file_name):\n raise UcscOperationError(\"Import config\",\n \"Backup File '%s' not found \"\n \"on UcsCentral\" % file_name)\n mgmt_importer = MgmtDataImporter(\n parent_mo_or_dn=top_system,\n hostname=\"localhost\",\n remote_file='/ucs-central/cfg-backups/' + file_name,\n proto=MgmtDataImporterConsts.PROTO_TFTP,\n admin_state=MgmtDataImporterConsts.ADMIN_STATE_ENABLED)\n\n else:\n raise UcscOperationError(\n \"Import config\",\n \"Invalid file_location argument.\"\n \"It must be either ucscentral,local or remote\")\n\n if merge:\n mgmt_importer.action = MgmtDataImporterConsts.ACTION_MERGE\n else:\n mgmt_importer.action = MgmtDataImporterConsts.ACTION_REPLACE\n\n if file_location == \"local\":\n try:\n log.debug(\"Start uploading config\")\n uri_suffix = \"operations/file-%s/importconfig.txt?Cookie=%s\" % (\n file_name, handle.cookie)\n handle.file_upload(url_suffix=uri_suffix,\n file_dir=file_dir,\n file_name=file_name)\n\n except Exception as err:\n UcscWarning(str(err))\n raise UcscOperationError(\"Upload config\", \"upload failed\")\n\n handle.add_mo(mgmt_importer, modify_present=True)\n handle.commit()\n\n duration = timeout\n poll_interval = 2\n log.debug(\"Importing UcsCentral config\")\n while True:\n mgmt_importer = handle.query_dn(dn=mgmt_importer.dn)\n admin_state = mgmt_importer.admin_state\n\n # Break condition:- if state id disabled then break\n if admin_state == MgmtDataImporterConsts.ADMIN_STATE_DISABLED:\n break\n\n time.sleep(min(duration, poll_interval))\n duration = max(0, (duration - poll_interval))\n if duration == 0:\n raise UcscOperationError(\n \"Import config\", \"operation timed out\")\n\n if mgmt_importer.over_all_status != \\\n MgmtDataImporterConsts.OVER_ALL_STATUS_ALL_SUCCESS:\n raise UcscOperationError(\n \"Import config\",\n (\"operational status %s \" % mgmt_importer.over_all_status))\n\n log.debug(\"Import config to UcsCentral was successfull\")\n return mgmt_importer", "def read_setup(inifile):\n # inifile = os.path.join(spathy_path, inifile)\n print(inifile)\n cfg = configparser.ConfigParser()\n cfg.read(inifile)\n\n pp = {}\n for s in cfg.sections():\n section = s.encode('ascii', 'ignore')\n pp[section] = {}\n for k, v in cfg.items(section):\n key = k.encode('ascii', 'ignore')\n val = v.encode('ascii', 'ignore')\n if section == 'General': # 'general' section\n pp[section][key] = val\n else:\n pp[section][key] = float(val)\n pp['General']['dt'] = float(pp['General']['dt'])\n\n pgen = pp['General']\n pcpy = pp['CanopyGrid']\n pbu = pp['BucketGrid']\n ptop = pp['Topmodel']\n\n return pgen, pcpy, pbu, ptop", "def _importNode(self, node):\n if self.environ.shouldPurge():\n self._purgeProperties()\n self._initProperties(node)\n self._logger.info('settings imported.')", "def __setup(self, SETTINGS_FILE):\n config = ConfigParser()\n try:\n config.read(SETTINGS_FILE)\n self.settings = Settings(config)\n self.data = Data()\n except IOError:\n raise FileMissing(SETTINGS_FILE)\n except Exception as e:\n raise e", "def defaultconfig(self):\r\n\r\n config_data = {\r\n \"path_to_database\": \"FUDB/FOLLOWUP.DB\",\r\n \"path_to_frontend\": \"FUDB/\",\r\n \"path_to_dcs_info\": \"FUDB/\",\r\n \"path_to_bin\": \"bin/\",\r\n \"path_to_excels_exported_from_database\": \"excels exported/\",\r\n \"path_to_excels_to_be_imported_in_database\": \"excels to be imported/\",\r\n \"path_to_new_opfiles\": \"DC BATCHES IN WORK/0 NEW/\",\r\n \"path_to_batches_unassigned\": \"DC BATCHES IN WORK/1 UNASSIGNED/\",\r\n \"path_to_batches_prepfiles\": \"DC BATCHES IN WORK/2 PREPARED FILES/\",\r\n \"path_to_batches_assigned\": \"DC BATCHES IN WORK/3 ASSIGNED/\",\r\n \"path_to_batches_tobechecked\": \"DC BATCHES IN WORK/4 TO BE CHECKED/\",\r\n \"path_to_batches_tbimported\": \"DC BATCHES IN WORK/5 TO BE IMPORTED/\",\r\n \"path_to_batches_finished\": \"DC BATCHES IN WORK/6 FINISHED/\",\r\n \"path_to_batches_instandby\": \"DC BATCHES IN WORK/7 IN STANDBY/\",\r\n \"path_to_batches_unrecordable\": \"DC BATCHES IN WORK/8 UNRECORDABLE/\",\r\n \"batch_status_options_responsible\": \"PREP. OP FILE, IMPORTATION & SPLIT FILE, RELIABILITY & DATA UPGRADE, CHECK OP FILE, CHECK SPLIT FILE, CHECK FRONT END, **TO BE CHECKED\",\r\n \"batch_status_options_proofreader\": \"OP FILE OK, SPLIT FILE OK, FRONT END OK, **TO BE IMPORTED, **FINISHED, **REWORK, **STANDBY, **UNRECORDABLE\",\r\n \"batch_status_options_overall\": \"ONGOING, STANDBY, FINISHED, UNRECORDABLE\",\r\n \"aircrafts\": \"A300, A300-600, A310, A320, A330, A340, A350, A380\",\r\n \"split_batch_factor\": \"2, 3, 4, 5, 6, 7, 8, 9\",\r\n \"IDlentgh\": \"6\",\r\n \"port\": \"5000\"\r\n }\r\n \r\n if not os.path.isfile(os.path.join(self.cwd, \"config.json\")):\r\n self.func.write_json(config_data, self.cwd, fname=\"config.json\")", "def import_control_section(self, filename_suffix='run'):\n pass", "def load_settings(self):\n settings_file = open('./resources/settings.json')\n settings = json.load(settings_file)\n settings_file.close()\n try:\n if settings['camera'] in self.camera_list:\n self.comboCamera.setCurrentIndex(settings['camera'])\n self.comboRotation.setCurrentIndex(settings['rotation'])\n self.spinMinHue.setValue(settings['colors']['min_hue'])\n self.spinMaxHue.setValue(settings['colors']['max_hue'])\n self.spinMinSaturation.setValue(settings['colors']['min_saturation'])\n self.spinMaxSaturation.setValue(settings['colors']['max_saturation'])\n self.spinMinValue.setValue(settings['colors']['min_value'])\n self.spinMaxValue.setValue(settings['colors']['max_value'])\n self.spinDiameter.setValue(settings['diameter'])\n self.lineEditLifter.setText(settings['lifter'])\n self.checkSaveVideo.setChecked(settings['save_video'])\n except KeyError:\n self.statusbar.clearMessage()\n self.statusbar.showMessage('Error in settings.json. Loading defaults instead.')", "def config():\n file_path = None # path to the input file\n db_path = None # path to the output db\n atomic_properties = (\n \"Properties=species:S:1:pos:R:3\"\n ) # atomic properties of the input file\n molecular_properties = [\"energy\"] # molecular properties of the input file\n overwrite = False", "def parse_data_config(path):\n cfg = dict()\n cfg['gpus'] = '0,1,2,3'\n cfg['num_workers'] = '10'\n \n with open(path, 'r') as fp:\n lines = fp.readlines()\n for line in lines:\n line = line.strip()\n if line == '' or line.startswith('#'):\n continue\n key, value = line.split('=')\n cfg[key.strip()] = value.strip()\n \n return cfg", "def reads(self, data):\n\n self.parser = configparser.ConfigParser()\n ds = io.StringIO(data)\n ds.name = os.path.expanduser(os.path.join('~', RC_FILE))\n self.parser.readfp(ds)", "def settings_load(self):\n self.ui.spinBox_ATSP.setValue(self.default['ATSP'])\n\n if self.default['serialLabel'] == 'bt':\n self.ui.btRadio.setChecked(True)\n try:\n os.system(\"blueman-manager\")\n except:\n print \"Please install 'blueman' package\"\n elif self.default['serialLabel'] == 'usb':\n self.ui.usbRadio.setChecked(True)\n else:\n self.ui.devRadio.setChecked(True)\n\n if self.default['units'] == 'metric':\n self.ui.units_metric_radio.setChecked(True)\n else:\n self.ui.units_US_radio.setChecked(True)\n\n return", "def __load_settings(self):\n\n self.app_settings = sublime.load_settings(self.SETTINGS_FILE)\n self.__refresh_settings(True)\n\n # The settings may change during execution so we need to listen for changes\n self.app_settings.add_on_change(self.SETTINGS_CALLBACK_KEY, self.__refresh_settings)", "def fixture_example_data():\n import_example_data()", "def update_current_settings(file_name):\n new_settings = importlib.import_module(file_name)\n for k, v in new_settings.__dict__.items():\n if k.upper() == k:\n globals().update({k: v})", "def get_settings():\n with open('config/config.json') as data_file:\n settings = json.load(data_file)\n return settings", "def _load (cls, *files):\n config = ConfigParser.ConfigParser()\n config.read(files)\n \n metadata = {}\n if config.has_section(\"metadata\"):\n for key in config.options(\"metadata\"):\n metadata[key] = config.get(\"metadata\", key)\n\n processes = {}\n datasources = {}\n for section in config.sections():\n if section == \"metadata\": continue\n if section.startswith(\"process_\"):\n try:\n processes[section[8:]] = FeatureServer.Processing.loadFromSection(config, section)\n except Exception, E:\n pass \n else: \n datasources[section] = cls.loadFromSection(\n config, section, 'DataSource')\n\n return cls(datasources, metadata, processes)", "def from_settings(settings):", "def multi_interpreter(self, keys):\n # first - check the arguments are legit #\n # run over all the keys\n for index, key in enumerate(keys):\n # normalize the keys\n key = str(key).strip()\n # check if valid key\n if key is None or key == \"\":\n raise Exception(\"You need to provide theme and key to the settings interpreter\")\n # try open and read from file\n try:\n # read database\n with open(self.file_location) as settings_file:\n data = json.load(settings_file)\n # check if we found any database\n if data is None:\n raise Exception(\"You need to provide theme and key to the settings interpreter\")\n # get into the right location\n for key in keys:\n data = data[key]\n # if this is not a final value, allow iteration on it\n if isinstance(data, dict):\n self.setting_group = data\n self.setting_group_iter = iter(data)\n # this is the value inside \"database\" at this point\n return data\n except Exception as error:\n raise Exception(\"You need to provide theme and key to the settings interpreter\")", "def load_config(self):\n pass", "def get_settings_from_file(path, default_settings=DEFAULT_CONFIG):\r\n\r\n name, ext = os.path.splitext(os.path.basename(path))\r\n module = load_source(name, path)\r\n return get_settings_from_module(module, default_settings=default_settings)", "def init_from_file(self):\n self.src.load('start.00') \n self.oe1.load('start.01')\n #self.det.load('start.02')\n print('NOTE: variables loaded from start.00/start.01 files')", "def loadseasoning(self):\n stream = open(self.fileref)\n self.config = yaml.safe_load(stream)\n stream.close()", "def load_settings(env=\"prod\"):\n global config\n config = configparser.SafeConfigParser()\n config.read(CONFIG_FILES.get(env))", "def startup(self):\n self.settings = sublime.load_settings(self.settings_base)\n self.sublime_settings = sublime.load_settings(self.sublime_base)", "def LoadConfig(path):\n config = None\n with open(path) as f:\n config = json.load(f)\n\n presets = {}\n for name in config['presets']:\n presets[name] = lightserver.Preset(**config['presets'][name])\n\n return config['bulbs'], config['groups'], presets", "def _load(self):\n p = os.path.join(paths.setup_dir, 'system_health.yaml')\n if os.path.isfile(p):\n with open(p, 'r') as rfile:\n config = yaml.load(rfile)\n if config:\n self._values = config['values']\n self._conditionals = config['conditionals']\n\n general = config['general']\n self._limit = general['limit']", "def LoadCodereviewSettingsFromFile(file):\n settings = {}\n for line in file.read().splitlines():\n if not line or line.startswith(\"#\"):\n continue\n k, v = line.split(\": \", 1)\n settings[k] = v\n\n def GetProperty(name):\n return settings.get(name)\n\n def SetProperty(name, setting, unset_error_ok=False):\n fullname = 'rietveld.' + name\n if setting in settings:\n cl_settings.RunGit(['config', fullname, settings[setting]])\n else:\n cl_settings.RunGit(['config', '--unset-all', fullname], error_ok=unset_error_ok)\n\n SetProperty('server', 'CODE_REVIEW_SERVER')\n # Only server setting is required. Other settings can be absent.\n # In that case, we ignore errors raised during option deletion attempt.\n SetProperty('cc', 'CC_LIST', unset_error_ok=True)\n SetProperty('tree-status-url', 'STATUS', unset_error_ok=True)\n SetProperty('viewvc-url', 'VIEW_VC', unset_error_ok=True)\n hooks = {}\n if GetProperty('GITCL_PREUPLOAD'):\n hooks['preupload'] = GetProperty('GITCL_PREUPLOAD')\n if GetProperty('GITCL_PREDCOMMIT'):\n hooks['predcommit'] = GetProperty('GITCL_PREDCOMMIT')\n return hooks", "def configureMaster(self):\n\t\t\n\t\tfin = open('/opt/google/earth/free/drivers.ini', 'r')\n\t\tfout = open('/etc/X11/ge-drivers.ini', 'w')\n\t\t\n\t\tfor line in fin.readlines():\n\t\t\tfout.write(line)\n\t\t\tif line.find('SETTINGS {') != 0:\n\t\t\t\tcontinue\n\t\t\tfout.write('\\tViewSync/send = true\\n')\n\t\t\tfout.write('\\tViewSync/receive = false\\n')\n\n\t\t\tfout.write('\\tViewSync/hostname = %s\\n' %\n\t\t\t\t self.db.getHostAttr('localhost',\n\t\t\t\t\t\t 'Kickstart_PrivateBroadcast'))\n fout.write('\\tViewSync/port = 21567\\n')\n\t\t\tfout.write('\\n')\n\t\t\tfout.write('\\tViewSync/horizFov = 60\\n')\n fout.write('\\tViewSync/rollOffset = 0\\n')\n fout.write('\\tViewSync/yawOffset = 0\\n')\n\t\t\tfout.write('\\tViewSync/pitchOffset = 0\\n')\n\t\t\tfout.write('\\n')\n\n\n\t\tfin.close()\n\t\tfout.close()\n\n\t\tshutil.copy('/etc/X11/ge-drivers.ini', '/opt/google/earth/free/drivers.ini')", "def setup_settings():\n settings = DEFAULT_SETTINGS\n if os.environ.get(\"MUTALYZER_SETTINGS\"):\n configuration_path = os.environ[\"MUTALYZER_SETTINGS\"]\n with open(configuration_path) as f:\n configuration_content = \"[config]\\n\" + f.read()\n loaded_settings = configparser.ConfigParser()\n loaded_settings.optionxform = str\n loaded_settings.read_string(configuration_content)\n loaded_settings = {\n sect: dict(loaded_settings.items(sect))\n for sect in loaded_settings.sections()\n }[\"config\"]\n for k in loaded_settings:\n if loaded_settings[k] in {\"yes\", \"true\", \"1\"}:\n loaded_settings[k] = True\n elif loaded_settings[k] in {\"no\", \"false\", \"0\"}:\n loaded_settings[k] = False\n elif loaded_settings[k].isnumeric():\n loaded_settings[k] = int(loaded_settings[k])\n settings.update(loaded_settings)\n\n return settings", "def GetMasterDevParameters(filename='master_cfg_params.json'):\n if os.path.isfile(filename):\n return ReadJsonAsUtf8(filename=filename)\n return {}", "def load_json():\n\n global REMOVE_WORDS, NEGATIVE_WORDS # pylint: disable=W0603\n this_dir, _ = os.path.split(__file__)\n settings_path = os.path.join(this_dir, \"Data\", \"settings.json\")\n\n with open(settings_path, \"r\", encoding=\"utf8\") as settings_file:\n settings = json.loads(settings_file.read())\n\n REMOVE_WORDS = settings[\"remove_words\"]\n NEGATIVE_WORDS = settings[\"negative_words\"]\n\n settings_file.close()", "def load(cls, path):\n config = ConfigParser.ConfigParser()\n\n if path != None:\n cls.settings_path = path\n else:\n # Use default settings file if none was given\n cls.settings_path = os.path.join(miso_path,\n \"settings\",\n \"miso_settings.txt\")\n\n print \"Using MISO settings file: %s\" %(cls.settings_path)\n if not os.path.isfile(cls.settings_path):\n print \"Error: Settings file %s does not exist.\" \\\n %(cls.settings_path)\n sys.exit(1)\n cls.parsed_settings = config.read(cls.settings_path)\n\n cls.global_settings = {}\n\n for section in config.sections():\n for option in config.options(section):\n # Load cluster options as strings, without attempting\n # to evaluate them avoids misinterpretation of words\n # like \"long\" as a data type\n if section == \"cluster\":\n cls.global_settings[option] = \\\n str(config.get(section, option))\n else:\n cls.global_settings[option] = \\\n tryEval(config.get(section, option))\n\n # Set directory paths specific to pipeline\n if 'pipeline_results_dir' in cls.global_settings:\n cls.global_settings['analysis_dir'] = \\\n os.path.join(cls.global_settings['pipeline_results_dir'],\n 'analysis')\n cls.global_settings['rna_events_dir'] = \\\n os.path.join(cls.global_settings['analysis_dir'],\n 'rna_events')", "def load_settings(self):\n LogConfiguration.initialize(self._db)\n self.analytics = Analytics(self._db)\n self.auth = Authenticator(self._db, self.analytics)\n\n self.setup_external_search()\n\n # Track the Lane configuration for each library by mapping its\n # short name to the top-level lane.\n new_top_level_lanes = {}\n # Create a CirculationAPI for each library.\n new_circulation_apis = {}\n\n # Potentially load a CustomIndexView for each library\n new_custom_index_views = {}\n\n # Make sure there's a site-wide public/private key pair.\n self.sitewide_key_pair\n\n for library in self._db.query(Library):\n lanes = load_lanes(self._db, library)\n\n new_top_level_lanes[library.id] = lanes\n\n new_custom_index_views[library.id] = CustomIndexView.for_library(\n library\n )\n\n new_circulation_apis[library.id] = self.setup_circulation(\n library, self.analytics\n )\n self.top_level_lanes = new_top_level_lanes\n self.circulation_apis = new_circulation_apis\n self.custom_index_views = new_custom_index_views\n self.shared_collection_api = self.setup_shared_collection()\n\n # Assemble the list of patron web client domains from individual\n # library registration settings as well as a sitewide setting.\n patron_web_domains = set()\n admin_web_domains = set()\n\n def get_domain(url):\n url = url.strip()\n if url == \"*\":\n return url\n scheme, netloc, path, parameters, query, fragment = urllib.parse.urlparse(\n url)\n if scheme and netloc:\n return scheme + \"://\" + netloc\n else:\n return None\n\n sitewide_patron_web_client_urls = ConfigurationSetting.sitewide(\n self._db, Configuration.PATRON_WEB_HOSTNAMES).value\n if sitewide_patron_web_client_urls:\n for url in sitewide_patron_web_client_urls.split('|'):\n domain = get_domain(url)\n if domain:\n patron_web_domains.add(domain)\n\n sitewide_admin_web_client_urls = ConfigurationSetting.sitewide(\n self._db, Configuration.ADMIN_WEB_HOSTNAMES).value\n if sitewide_admin_web_client_urls:\n for url in sitewide_admin_web_client_urls.split('|'):\n domain = get_domain(url)\n if domain:\n admin_web_domains.add(domain)\n\n from .registry import Registration\n for setting in self._db.query(\n ConfigurationSetting).filter(\n ConfigurationSetting.key == Registration.LIBRARY_REGISTRATION_WEB_CLIENT):\n if setting.value:\n patron_web_domains.add(get_domain(setting.value))\n\n self.patron_web_domains = patron_web_domains\n self.admin_web_domains = admin_web_domains\n self.setup_configuration_dependent_controllers()\n authentication_document_cache_time = int(\n ConfigurationSetting.sitewide(\n self._db, Configuration.AUTHENTICATION_DOCUMENT_CACHE_TIME\n ).value_or_default(0)\n )\n self.authentication_for_opds_documents = ExpiringDict(\n max_len=1000, max_age_seconds=authentication_document_cache_time\n )\n self.wsgi_debug = ConfigurationSetting.sitewide(\n self._db, Configuration.WSGI_DEBUG_KEY\n ).bool_value or False", "def load_data(self):", "def load_conf(self):\n self._read_uconf()", "def load(file):\n _config.load(file)", "def und_setup_setting(udb_file, settings_file_path):\n subprocess.call(f\"und settings @{settings_file_path} {udb_file}\")", "def read_config(self, config_filename):", "def restore_base_data():\n\n db_dirname = os.path.dirname(os.path.realpath(__file__))\n shutil.copyfile(src=os.path.join(db_dirname, 'consolemini.base.json'),\n dst=os.path.join(db_dirname, 'consolemini.test.json'))", "def my_settings():\n print(f\"\\nmy_settings fixture in {__file__}\")\n return {\"name\": \"Eric\"}", "def import_registry_settings(site):\n PROFILE_ID = 'profile-interlegis.portalmodelo.policy:default'\n setup = api.portal.get_tool('portal_setup')\n setup.runImportStepFromProfile(PROFILE_ID, 'plone.app.registry')", "def import_project_dump(self, key):", "def load_settings(user_settings_file: IO[AnyStr], use_default_values: bool = True) -> Json:\n default_settings = load_default_settings()\n user_settings = load_settings_simple(user_settings_file)\n return typing.cast(Json, _merge_settings(default_settings, user_settings, use_default_values))", "def __init__(self, settings):\n self._read_config(settings)", "def initialize_from_config(self):", "def configure_airflow_variables():\n from airflow.bin.cli import import_helper\n for path in glob(os.path.join(airflow_variables_dir, '*.json')):\n import_helper(path)", "def load_from_pickle(f):\n # Skip deprecated first line\n _ = f.readline();\n settings = arg.processCommandLineArguments(f.readline().strip().split(\" \"), None);\n \n try:\n savedVars = pickle.load(f);\n except IndexError:\n return False;\n \n f.close();\n \n return dict(savedVars), settings[0];", "def reload_settings(self):\n importlib.reload(sys.modules['micromasters.settings'])\n # Restore settings to original settings after test\n self.addCleanup(importlib.reload, sys.modules['micromasters.settings'])\n return vars(sys.modules['micromasters.settings'])" ]
[ "0.62687576", "0.6112625", "0.60982084", "0.599278", "0.59767", "0.5931371", "0.5877606", "0.58080935", "0.5805709", "0.58034456", "0.57821137", "0.57619953", "0.57555234", "0.5754049", "0.5751691", "0.57266325", "0.5715067", "0.5707333", "0.5681978", "0.56536126", "0.5650209", "0.56176126", "0.5614506", "0.5605488", "0.5593285", "0.55932117", "0.5577377", "0.5576538", "0.55555993", "0.55518323", "0.55464154", "0.5515463", "0.54987913", "0.5494423", "0.54782975", "0.5477481", "0.54755163", "0.54709905", "0.54669553", "0.5464941", "0.5452641", "0.54501843", "0.54192317", "0.5414157", "0.54116166", "0.5406569", "0.5406396", "0.53949213", "0.53892916", "0.5373651", "0.53529686", "0.5346312", "0.53431565", "0.5326905", "0.53153026", "0.53127104", "0.5312145", "0.52979434", "0.52964", "0.5293442", "0.5292717", "0.5292506", "0.5281447", "0.527982", "0.52729166", "0.5269169", "0.5247233", "0.52453834", "0.5244547", "0.52437186", "0.52394885", "0.52385384", "0.5235579", "0.5227708", "0.5227654", "0.5221396", "0.52121097", "0.5205053", "0.52034914", "0.5198643", "0.51976347", "0.5190031", "0.51876014", "0.5187318", "0.51818717", "0.5181362", "0.5167564", "0.5155185", "0.515381", "0.5153366", "0.51483285", "0.51375055", "0.51373416", "0.51373196", "0.5115188", "0.51132965", "0.5105795", "0.51042956", "0.5102339", "0.5102028" ]
0.7781248
0
return the objects in the layer
def dataObjects(self): return self._objects
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetObjects(self): \r\n return self.model.GetObjects()", "def objects(self):", "def objects(self):\n\t\treturn self._objects", "def objects(self):\r\n return self._objects", "def getAllLayers(self):\n jobItem = self.item(0, 0)\n layerObjects = [jobItem.child(row, 0).data(QtCore.Qt.UserRole)\n for row in range(jobItem.rowCount())]\n return layerObjects", "def get_layer_objects(self, layer, x0, y0, x1, y1, srid):\n l = Layer.objects.filter(code=layer).first()\n if not l:\n return {}\n features = []\n bbox = self.get_bbox(x0, y0, x1, y1, srid)\n for d in GeoData.objects.filter(layer=l, data__geo_within=bbox):\n features += [geojson.Feature(\n id=str(d[\"id\"]),\n geometry=self.transform(d[\"data\"], self.db_proj, srid),\n properties={\n \"object\": str(d.object.id),\n \"label\": d.label.encode(\"utf-8\") if d.label else \"\"\n }\n )]\n return geojson.FeatureCollection(features=features, crs=srid)", "def list(self):\n return self.cell.objects+self.cell.tempObjects", "def get_objects(self):\n return self._objects", "def items(self):\n out = []\n for y,x in self.coords(False):\n out.append(self.retrieve(y, x))\n return out", "def get_objects_data(self):\n pass", "def hbObjects(self):\r\n return self.__hbObjs", "def all(self):\n return (self.__objects)", "def getObjects(self):\n return Cell_Objects(self)", "def _get_layers(self) :\n \n return self._layers", "def get_objects(self):\n objs = self.scene.get_objects()\n objs_attached = self.scene.get_attached_objects()\n return objs, objs_attached", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def all(self):\n return self.__objects", "def add_objects_from_layer(self, layer):\n\n objects = layer.get_allowed_geometry()\n\n typ_plural = layer.path[1]\n typ_sofi = gs.plural_to_sofi[typ_plural]\n\n for obj in objects:\n\n # !! REFACTOR TO CALL PROGRAMATICALLY -> ELIMINATE CONDITIONALS !!\n\n if typ_plural in gs.point_elements:\n\n self.add_node(obj, typ_sofi, layer)\n\n if typ_plural in gs.line_elements:\n\n self.add_line_element(obj, typ_sofi, layer)\n\n if typ_plural in gs.spring_elements:\n\n self.add_spring_sn(obj, typ_sofi, layer) \n\n if typ_plural in gs.area_elements:\n\n self.add_area_element(obj, typ_sofi, layer) \n\n return self", "def convObjs(self):\n\t\treturn self._convObjs", "def obj_list(self):\n return self._obj_list", "def getall(self, cl):\n return sprite.Group([object for object in self.ginfo.sprites() if isinstance(object, cl)])", "def objects (self):\n return InternalObjectList (self)", "def layers(self):\r\n return self._flc.layers", "def layers(self):\n return self['layers']", "def getLayers(self):\n return self.__layers", "def get_geometry(self):\n\n return rs.ObjectsByLayer(self.name)", "def get_layer_instances(conn, stack_name,layer_name):\n\n\tfor ins in conn.describe_instances(get_stack_id(conn, stack_name))['Instances']:\n\t\t#print \"this is the LayerId\",str(ins['LayerIds'])\n\t\t#print get_layer_id(conn,stack_name)\n\t\tif str(ins['LayerIds'][0]) == get_layer_id(conn,stack_name,layer_name):\n\t\t\tlayerInstances.append(ins['InstanceId'])\n\t\t\treturn layerInstances", "def layers(self): # -> LayerView:\n ...", "def getLayers(self,ds):\n numLayers = ds.GetLayerCount()\n layers = []\n for i in range(numLayers):\n layers.append(ds.GetLayerByIndex(i))\n return layers", "def get_drawn_objects(self):\n return self._drawnObjects", "def getAllFeatures(self,layers): \n features = []\n for layer in layers:\n features += self.getFeatures(layer)\n return features", "def _get_subobjects(self) -> Iterable[SymbolicObject]:\n\n return self._subobjects", "def get(self):\r\n\t\treturn list(self)", "def get_objects(self):\n return \\\n self,\\\n self.label,\\\n self.frame_controls, \\\n (\n self.button_decrease,\n self.scale_volume,\n self.button_increase\n )", "def getFeatures(self,layer): \n numFeatures = layer.GetFeatureCount()\n features = []\n for i in range(numFeatures):\n feature = layer.GetNextFeature()\n if feature is not None:\n geomRef = feature.GetGeometryRef()\n if((geomRef is not None and geomRef.GetPointCount() != 0)):\n features.append(self.getFeatureInfo(feature))\n return features", "def get_all(self):\n\n layer_names = rs.LayerNames()\n\n layers = []\n\n for layer_name in layer_names:\n\n layer = GiraffeLayer(layer_name)\n \n layers.append(layer)\n\n return layers", "def ole_objects(self):\n return self.container['ole_objects']", "def get_layers(self):\n layers = []\n\n for s in self.surfaces:\n n = self.miller_to_direction(s)\n r = np.dot(self.get_positions() - self.center, n).max()\n d = self.get_layer_distance(s, 2)\n l = 2 * np.round(r / d).astype(int)\n\n ls = np.arange(l-1,l+2)\n ds = np.array([self.get_layer_distance(s, i) for i in ls])\n\n mask = (np.abs(ds - r) < 1e-10)\n\n layers.append(ls[mask][0])\n\n return np.array(layers, int)", "def objects(self):\n return self.obj_to_id.keys()", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n if obj.modeling_cloth: \n yield (obj, obj.matrix_world.copy())", "def getItems(self):\n return self.getCutPlanes() + self.getIsosurfaces()", "def active_objects(self):\n return self._active_objects", "def objects_rst(self):\n return [_.as_rst for _ in self.objects]", "def get_selected_objects (context):\n return [obj for obj in context.selected_objects if obj.type == 'MESH']", "def get_objects_data(self):\n return dict(result=self.objects)", "def _get_all_objects(self):\n level_objects = Group()\n mobs_objects = Group()\n for sprite_group in self.sprite_level_blocks:\n for object_ in sprite_group:\n if str(object_) == 'Mob':\n mobs_objects.add(object_)\n elif str(object_) == 'Block':\n level_objects.add(object_)\n return {'level_blocks': level_objects,\n 'mobs': mobs_objects}", "def GetShapesOfLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_GetShapesOfLayer(self, *args)", "def entries(self):\n out = []\n for y,x in self.coords(False):\n out.append((y,x,self.retrieve(y,x)))\n return out", "def get_layers(self):\n layers = set()\n for element in itertools.chain(self.polygons, self.paths):\n layers.update(element.layers)\n for reference in self.references:\n layers.update(reference.ref_cell.get_layers())\n for label in self.labels:\n layers.add(label.layer)\n return layers", "def items(self):", "def get_obstList(self,X,Y,Z):\n return []", "def get_obstList(self,X,Y,Z):\n return []", "def layers(fbx_scene):\n # -- Define a list to collate our matched\n # -- layers into\n all_layers = list()\n\n # -- Cycle over all the obects in the scnee\n for idx in range(fbx_scene.GetSrcObjectCount()):\n\n # -- Check if this item is indeed an animation\n # -- layer and scoop it if it is.\n candidate = fbx_scene.GetSrcObject(\n fbx.FbxCriteria.ObjectType(\n fbx.FbxAnimLayer.ClassId,\n ),\n idx,\n )\n\n if candidate:\n all_layers.append(candidate)\n\n return all_layers", "def get_all_object_names(self):\n o_objects = []\n for s in [\"Non Model\", \"Solids\", \"Unclassified\", \"Sheets\", \"Lines\"]:\n o_objects += self.design.modeler.get_objects_in_group(s)\n return o_objects", "def visible_objects_and_duplis():\r\n \r\n for obj in context.visible_objects:\r\n if obj.type == 'MESH':\r\n yield (obj, obj.matrix_world.copy())\r\n \r\n if obj.dupli_type != 'NONE':\r\n obj.dupli_list_create(scene)\r\n for dob in obj.dupli_list:\r\n obj_dupli = dob.object\r\n if obj_dupli.type == 'MESH':\r\n yield (obj_dupli, dob.matrix.copy())\r\n \r\n obj.dupli_list_clear()", "def layers(self):\n\n if not self.last_node:\n return []\n return nuke.layers(self.last_node)", "def getAllLayers(self):\n layersData = []\n for row in range(self.jobRow.rowCount()):\n nameItem = self.jobRow.child(row, 0)\n data = nameItem.data(QtCore.Qt.UserRole)\n layersData.append(data)\n return layersData", "def list(self):\n return [self.x, self.y, self.z]", "def __iter__(self):\n # Ripped off from elasticutils\n return (self.objects[id] for id in self.ids if id in self.objects)", "def get_layers(self, content_retriever):\n pass", "def get_layers(self, content_retriever):\n pass", "def getItems(self):\n for object in self.database:\n print(object)", "def list_objects(self, path):\n return [x for x in self.list_objects_generator(path)]", "def drawall(self):\r\n for x in self.objectlist:\r\n if x.model:\r\n x.model.draw()", "def getLayerNames(self):\n\t\treturn self._fileSystem.getLayerNames()", "def detect_objects(self, image):\n # Feed the input image to the model\n self.set_input_tensor(image)\n self.model.invoke()\n\n # Get all outputs from the model\n boxes = self.get_output_tensor(0)\n classes = self.get_output_tensor(1)\n scores = self.get_output_tensor(2)\n count = int(self.get_output_tensor(3))\n\n results = []\n for i in range(count):\n result = {\n 'bounding_box': boxes[i],\n 'class_id': int(classes[i]),\n 'score': scores[i]\n }\n results.append(result)\n return results", "def consume_layer(self, reports):\n layer_list = []\n layer_count = 1\n for report in reports:\n layer = create_image_layer(report)\n layer.layer_index = layer_count\n layer_list.append(layer)\n layer_count += 1\n return layer_list", "def index(self):\n return list(self._innercontainer)", "def all_nodes(self):\n nodes = []\n for layer in self.layers:\n nodes += layer.nodes\n return nodes", "def get_all_objects():\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist", "def items(self):\n return _osgAnimation.BoneMap_items(self)", "def visible_objects_and_duplis():\n\n for obj in context.visible_objects:\n if obj.type == 'MESH':\n yield (obj, obj.matrix_world.copy())\n\n if obj.instance_type != 'NONE':\n obj.dupli_list_create(scene)\n for dob in obj.dupli_list:\n obj_dupli = dob.object\n if obj_dupli.type == 'MESH':\n yield (obj_dupli, dob.matrix.copy())\n\n obj.dupli_list_clear()", "def getContents(self):\r\n cont=[]\r\n for i in range (len(self._indices)):\r\n cont.append(self._dataset.getPoint(self._indices[i]))\r\n return cont", "def get_list(self):\n obj_list = []\n for group in self.root_item.child_items:\n for item in group.child_items:\n obj_list.append(item.obj)\n\n return obj_list", "def get_features(self):\n return []", "def _get_observation(self, session):\n object_data = session[SESSION_OBJ_2D]\n sess_len = session[SESSION_LEN]\n # print (object_data)\n # print (sess_len)\n\n object_1_name, object_2_name = feature_utils.get_most_active_objects_interval(object_data, object_data.keys(), 0, sess_len)\n\n features = []\n\n for name in [object_1_name, object_2_name]:\n for frame in [-2, -1]:\n object_data[name][frame].transform.position\n features.append(object_data[name][frame].transform.get_feat())\n\n return np.concatenate( features ).flatten()", "def get_objects(self):\r\n bucket = self._get_bucket()\r\n objs = []\r\n for key in bucket:\r\n objs.append(key)\r\n return objs", "def layerListObject(self, parent):\n pass", "def extract_labels_full(scene: \"Scenemaker\") -> List[Tuple[int, np.ndarray]]:\r\n objects = utils.select_collection(scene.target_collection)\r\n boxes_list = []\r\n\r\n for obj in objects:\r\n objclass = obj.name.split(\".\")[0]\r\n dim = obj.dimensions\r\n rot = normalize_rotations(np.array(obj.rotation_euler))\r\n loc = change_to_spawnbox_coords(np.array(obj.location))\r\n boxes_list.append((scene.name2num[objclass], np.concatenate((loc, dim, rot))))\r\n\r\n return boxes_list", "def get_layers(doc):\n\tnodes = []\n\troot = doc.rootNode()\n\tfor node in root.childNodes():\n\t\tprint('Debug: found node of type %s: %s' % (node.type(), node.name()))\n\t\tif node.type() == \"paintlayer\":\n\t\t\tnodes.append(node)\n\treturn nodes", "def all(self):\n return list(self)", "def results(self):\n self.set_limit()\n\n # always get the latest data\n uuids = ITileDataManager(self).get().get(\"uuids\", None)\n\n results = list()\n if uuids:\n ordered_uuids = [(k, v) for k, v in uuids.items()]\n ordered_uuids.sort(key=lambda x: int(x[1][\"order\"]))\n\n for uuid in [i[0] for i in ordered_uuids]:\n obj = uuidToObject(uuid)\n if obj:\n results.append(obj)\n else:\n # maybe the user has no permission to access the object\n # so we try to get it bypassing the restrictions\n catalog = api.portal.get_tool(\"portal_catalog\")\n brain = catalog.unrestrictedSearchResults(UID=uuid)\n if not brain:\n # the object was deleted; remove it from the tile\n self.remove_item(uuid)\n logger.debug(\n \"Non-existent object {0} removed from tile\".format(uuid)\n )\n return results[: self.limit]", "def get_obstacles(self):\n return self.obstacles", "def get(self, *args):\n return _libsbml.ListOfGraphicalObjects_get(self, *args)", "def make_int_object_list(self):\n from libtbx import easy_pickle as ep\n\n if self.params.cctbx.selection.select_only.grid_search_path == None:\n int_dir = misc.set_base_dir('integration', True)\n else:\n int_dir = self.params.cctbx.selection.select_only.grid_search_path\n\n img_objects = []\n\n # Inspect integration folder for image objects\n for root, dirs, files in os.walk(int_dir):\n for filename in files:\n found_file = os.path.join(root, filename)\n if found_file.endswith(('int')):\n obj = ep.load(found_file)\n img_objects.append(obj)\n\n # Pick a randomized subset of images\n if self.params.advanced.random_sample.flag_on and \\\n self.params.advanced.random_sample.number < len(img_objects):\n gs_img_objects = self.select_random_subset(img_objects)\n else:\n gs_img_objects = img_objects\n\n return gs_img_objects", "def all_objects():\n objs = {}\n objs['Section'] = list(h.all_sec())\n objs['Segment'] = []\n for sec in objs['Section']:\n objs['Segment'].extend(list(sec.allseg()))\n objs['PointProcess'] = []\n for seg in objs['Segment']:\n objs['PointProcess'].extend(list(seg.point_processes()))\n \n return objs", "def make_objects(self):\n pass", "def object_lists(self) -> Dict[str, List[Any]]:\n return {name: self.hyperparams[name][2] for name in self.names()\n if self.hyperparams[name][0] == 'object'}", "def get_objects(self, image_np: np.array,\n image: Image) -> Tuple[Dict, object]:\n pass", "def get_related_objects(self):\n result = []\n if self['name'] != None:\n tmp = ObjectDefinition.objects.filter(use__has_field=self['name'], object_type=self['object_type'])\n for i in tmp: result.append(i)\n return result", "def objects(self):\n if not self._objects:\n id_set = {}\n for x in self.addition_events():\n if 'id' in x: id_set[UUID(x['id'])] = 1\n self._objects = id_set.keys()\n\n return self._objects", "def addObjects(self):\n\n self.root = self.addRoot()\n vTemp = transform.getOffsetPosition(self.root, [0, 1, 0])\n self.top_loc = self.addLoc(\"top\", self.root, vTemp)\n centers = [self.root, self.top_loc]\n self.dispcrv = self.addDispCurve(\"crv\", centers)\n\n vTemp = transform.getOffsetPosition(self.root, [0, -1, 0])\n self.bottom_loc = self.addLoc(\"bottom\", self.root, vTemp)\n centers = [self.root, self.bottom_loc]\n self.dispcrv = self.addDispCurve(\"crv\", centers)\n\n vTemp = transform.getOffsetPosition(self.root, [1, 0, 0])\n self.ext_loc = self.addLoc(\"ext\", self.root, vTemp)\n centers = [self.root, self.ext_loc]\n self.dispcrv = self.addDispCurve(\"crv\", centers)\n\n vTemp = transform.getOffsetPosition(self.root, [-1, 0, 0])\n self.int_loc = self.addLoc(\"int\", self.root, vTemp)\n centers = [self.root, self.int_loc]\n self.dispcrv = self.addDispCurve(\"crv\", centers)", "def __getitem__(self, idx):\n obj_idx, rel_idx = self.get_instance_idx(idx)\n\n # print('obj_idx = ', obj_idx, 'rel_idx = ', rel_idx)\n\n observations = []\n observations.append(self.all_instances[obj_idx][rel_idx])\n\n if self.load_depth:\n ground_truth = [{\n 'rgb':ray_bundle['rgb'],\n 'depth':ray_bundle['depth']} for ray_bundle in observations]\n\n else:\n ground_truth = [{'rgb':ray_bundle['rgb']} for ray_bundle in observations]\n # print('** ground_truth = ', ground_truth[0]['rgb'].shape)\n\n return observations, ground_truth", "def getActiveObjects(doc):\n lst = list()\n op = doc.GetFirstObject()\n while op:\n if op.GetBit(c4d.BIT_ACTIVE) == True: \n lst.append(op)\n op = Helpers.getHNext(op)\n return lst", "def FindObjects(*args, **kwargs):\n return _gdi_.PseudoDC_FindObjects(*args, **kwargs)" ]
[ "0.76588994", "0.7365626", "0.7147258", "0.69459087", "0.69181466", "0.6739161", "0.670558", "0.6694696", "0.65370286", "0.6459321", "0.6447105", "0.64106923", "0.63908225", "0.6376006", "0.6317398", "0.63025784", "0.63025784", "0.63025784", "0.63025784", "0.63025784", "0.63025784", "0.6298118", "0.628634", "0.62625235", "0.62346673", "0.6211688", "0.6131865", "0.6126473", "0.6092808", "0.60900414", "0.6070959", "0.6058407", "0.5987503", "0.5956447", "0.5941118", "0.59303653", "0.59268", "0.59229803", "0.59145594", "0.5904918", "0.5901933", "0.59005725", "0.58996993", "0.5894722", "0.5894722", "0.5880257", "0.5871114", "0.58592814", "0.5844526", "0.58292353", "0.5827191", "0.58255565", "0.58192974", "0.5818277", "0.5816807", "0.5816064", "0.5816064", "0.5815471", "0.5788526", "0.57808244", "0.5779725", "0.577762", "0.57605684", "0.57272905", "0.5718496", "0.5718496", "0.5698527", "0.56982774", "0.5694293", "0.56928223", "0.5688194", "0.5687724", "0.5685486", "0.56748664", "0.5672679", "0.56699395", "0.56691396", "0.56683874", "0.56667846", "0.56661034", "0.56562054", "0.56514734", "0.5646343", "0.564325", "0.5637121", "0.56353366", "0.56236935", "0.5613602", "0.5612178", "0.5605293", "0.5605247", "0.56043243", "0.55951303", "0.55922747", "0.5591988", "0.55864674", "0.558556", "0.5584982", "0.5578943", "0.55774635" ]
0.6658779
8
return the overrides in the layer
def dataOverrides(self): return self._overrides
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def overrides(self) -> ConfigNodePropertyArray:\n return self._overrides", "def default_binding_overrides(self):\n return self.__default_binding_overrides", "def ComponentOverrides(self):\n return tuple(self._json_obj.get('component_overrides', {}).items())", "def makeOverrides(self):\n\t\tself.overridesWithValues = self.dataOverrides", "def overrides(self) -> tuple[dict[str, Any], dict[str, Any]]:\n settings = {}\n if self.actions:\n settings = self.actions.overrides\n if self.validations:\n settings |= self.validations.overrides\n\n filter_settings = {}\n if self.extra_fields:\n filter_settings = self.extra_fields.model_dump(exclude_unset=True)\n\n return settings, filter_settings", "def test_get_overrides(self):\n # FormOverrideMixIn.get_overrides\n pass", "def get_overrides(conn):\n with conn.cursor(cursor_factory=RealDictCursor) as cur:\n cur.execute(sql_overrides)\n return cur.fetchall()", "def withOverrides(overrides):", "def filter_contiguity_overrides(self):\n return self.filter_nodes('//ContiguityOverrides/ContiguityOverride')", "def get_overrides(token_fields_base, token_fields_from_args):\n overrides = []\n for key_raw, _ in token_fields_from_args.items():\n keys = key_raw.split('.')\n base_ref = token_fields_base\n try:\n for key in keys:\n base_ref = base_ref[key]\n # no KeyError means that the token_fields_base has an existing value corresponding with the arg\n overrides.append(key_raw)\n except KeyError:\n pass\n return overrides", "def get_overrides(self, app, name, namespace):\n try:\n return self._list(self._path(app) +\n '?name=' + name +\n '&namespace=' + namespace)[0]\n except IndexError:\n return None", "def shrinkage_overrides(self):\n return self._shrinkage_overrides", "def get_overrides_columns(self):\n\n if hasattr(self, '_overrides'):\n return list(self._overrides.columns)\n return []", "def _resolve_overrides(self):\r\n if not self.override_targets:\r\n return self._pre_override_dependencies\r\n\r\n result = OrderedSet()\r\n\r\n # resolve overrides and fetch all of their \"artifact-providing\" dependencies\r\n excludes = set()\r\n for override_target in self.override_targets:\r\n # add pre_override deps of the target as exclusions\r\n for resolved in override_target.resolve():\r\n excludes.update(self._excludes(resolved))\r\n # prepend the target as a new target\r\n result.add(override_target)\r\n\r\n # add excludes for each artifact\r\n for direct_dep in self._pre_override_dependencies:\r\n # add relevant excludes to jar dependencies\r\n for jar_dep in self._jar_dependencies(direct_dep):\r\n for exclude in excludes:\r\n jar_dep.exclude(exclude.org, exclude.name)\r\n result.add(direct_dep)\r\n\r\n return result", "def _get_layers(self) :\n \n return self._layers", "def conditional_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['GoogleCloudChannelV1ConditionalOverrideArgs']]]]:\n return pulumi.get(self, \"conditional_overrides\")", "def add_over(self, override: ItemConfig) -> None:\n self.all_conf = lazy_conf.concat(self.all_conf, override.all_conf)\n\n for vers_id, styles in override.versions.items():\n our_styles = self.versions.setdefault(vers_id, {})\n for sty_id, style in styles.items():\n if sty_id not in our_styles:\n our_styles[sty_id] = style\n else:\n our_styles[sty_id] = lazy_conf.concat(our_styles[sty_id], style)", "def layers(self):\n return self['layers']", "def override_paramset(self, override_str):\n\n paramset = ParamSet()\n if not override_str:\n return paramset\n\n override = eval(override_str, {}, {})\n if not override:\n return paramset\n\n for override_name in override:\n # The override can have a node_name/parm format which allows for point\n # instance overrides to override parms in a network.\n\n cached_override = self.override_cache.get(override_name, None)\n if cached_override is not None:\n # Hint to just skip\n if cached_override == -1:\n continue\n if isinstance(cached_override, PBRTParam):\n # textures which can't be overriden\n paramset.add(cached_override)\n continue\n pbrt_name, pbrt_type, tuple_names = cached_override\n if tuple_names:\n value = [override[x] for x in tuple_names]\n else:\n value = override[override_name]\n pbrt_param = PBRTParam(pbrt_type, pbrt_name, value)\n paramset.add(pbrt_param)\n continue\n\n override_match = self.override_pat.match(override_name)\n spectrum_type = override_match.group(\"spectrum\")\n parm_name = override_match.group(\"parm\")\n override_node = override_match.group(\"node\")\n if override_node is not None and override_node != self.name:\n self.override_cache[override_name] = -1\n continue\n\n # There can be two style of \"overrides\" one is a straight parm override\n # which is similar to what Houdini does. The other style of override is\n # for the spectrum type parms. Since spectrum parms can be of different\n # types and the Material Overrides only support \"rgb\" we are limited\n # in the types of spectrum overrides we can do. To work around this we'll\n # support a different style, override_parm:spectrum_type. If the parm name\n # ends in one of the \"rgb/color\" types then we'll handle it differently.\n # TODO add a comment as to what the value would look like\n\n # NOTE: The material SOP will use a parm style dictionary if there\n # parm name matches exactly\n # ie) if there is a color parm you will get\n # {'colorb':0.372511,'colorg':0.642467,'colorr':0.632117,}\n # But if the parm name doesn't match (which we are allowing\n # for you will get something like this -\n # {'colora':(0.632117,0.642467,0.372511),}\n\n # Once we have a parm name, we need to determine what \"style\" it is.\n # Whether its a hou.ParmTuple or hou.Parm style.\n tuple_names = tuple()\n parm_tuple = self.node.parmTuple(parm_name)\n if parm_tuple is None:\n # We couldn't find a tuple of that name, so let's try a parm\n parm = self.node.parm(parm_name)\n if parm is None:\n # Nope, not valid either, let's move along\n self.override_cache[override_name] = -1\n continue\n # if its a parm but not a parmtuple it must be a split.\n parm_tuple = parm.tuple()\n # we need to \"combine\" these and process them all at once and\n # then skip any other occurances. The skipping is handled by\n # the overall caching mechanism. self.override_cache\n tuple_names = tuple([x.name() for x in parm_tuple])\n\n # This is for wrangling parm names of texture nodes due to having a\n # signature parm.\n pbrt_parm_name = self.pbrt_parm_name(parm_tuple.name())\n\n if spectrum_type is None and tuple_names:\n # This is a \"traditional\" override, no spectrum or node name prefix\n value = [override[x] for x in tuple_names]\n pbrt_param = self._hou_parm_to_pbrt_param(\n parm_tuple, pbrt_parm_name, value\n )\n elif spectrum_type in (\"spectrum\", \"xyz\", \"blackbody\"):\n pbrt_param = PBRTParam(\n spectrum_type, pbrt_parm_name, override[override_name]\n )\n elif not tuple_names:\n pbrt_param = self._hou_parm_to_pbrt_param(\n parm_tuple, pbrt_parm_name, override[override_name]\n )\n else:\n raise ValueError(\"Unable to wrangle override name: %s\" % override_name)\n\n paramset.add(pbrt_param)\n\n # From here to the end of the loop is to allow for caching\n\n if pbrt_param.type == \"texture\":\n self.override_cache[override_name] = pbrt_param\n continue\n\n # we are making an assumption a split parm will never be a spectrum\n # or have a node prefix. The Material SOP doesn't allow for it as well.\n for name in tuple_names:\n # The -1 means \"continue\"\n self.override_cache[name] = -1\n # Sanity check\n if tuple_names and override_name not in tuple_names:\n raise ValueError(\n \"Override name: %s, not valid for a parmTuple\" % override_name\n )\n # override_name must match one of the tuple_names\n self.override_cache[override_name] = (\n pbrt_param.name,\n pbrt_param.param_type,\n tuple_names,\n )\n return paramset", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetHeatmapColorSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def GetDefaultLayerProperties():\r\n pass", "def tweaks(self) -> None:\n pass", "def layer_offsets(self):\n ...", "def get_patches(self):\n self.get_source_patch_masks()\n self.get_target_patch_masks()\n self.get_source_patches()", "def apply_replacements(self, env, **kw):\n ovr = self.replacements.apply(env)\n kw = self.replacements.apply(kw, True)\n return (env.Override(ovr), kw)", "def _overrides(self, tense, overrides, attr_name,persons=None): \n if not hasattr(self, attr_name):\n self_overrides = [ None ] * len(Tense)\n setattr(self, attr_name, self_overrides) \n else:\n self_overrides = getattr(self, attr_name)\n \n if tense in Tense.Person_Agnostic():\n if isinstance(overrides, str) or self_overrides[tense] is None:\n self_overrides[tense] = [ overrides ]\n else:\n self_overrides[tense].append(overrides)\n return\n \n if persons is None:\n _persons = Person\n elif isinstance(persons, int):\n _persons = [ persons ]\n elif isinstance(persons, list):\n _persons = persons\n else:\n self.__raise(\"persons must be None, integer or list of integers\", tense)\n \n if self_overrides[tense] is None:\n self_overrides[tense] = [None] * len(Person)\n \n if isinstance(overrides, str) or inspect.isfunction(overrides) or inspect.ismethod(overrides): \n for person in _persons:\n if isinstance(overrides, str) or self_overrides[tense][person] is None:\n # if a hard replacement (string), previous overrides are discarded because they will be replaced.\n # or this is the first override\n self_overrides[tense][person] = [overrides]\n else:\n self_overrides[tense][person].append(overrides) \n \n elif isinstance(overrides, list):\n for person, override in enumerate(overrides):\n if override is not None:\n if isinstance(override, str) or self_overrides[tense][person] is None:\n # if a hard replacement (string), previous overrides are discarded because they will be replaced.\n # or this is the first override\n self_overrides[tense][person] = [override]\n else:\n self_overrides[tense][person].append(override)", "def control_plane_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'ControlPlaneUpgradeOverride']]]]]:\n return pulumi.get(self, \"control_plane_overrides\")", "def _state_overridden(self, want, have):\n # overridden behavior is the same as replaced except for scope.\n cmds = []\n for i in have:\n obj_in_want = search_obj_in_list(i[\"name\"], want, \"name\")\n if obj_in_want:\n if i != obj_in_want:\n v4_cmds = self._v4_cmds(\n obj_in_want.pop(\"ipv4\", []),\n i.pop(\"ipv4\", []),\n state=\"overridden\",\n )\n replaced_cmds = self._state_replaced(obj_in_want, [i])\n replaced_cmds.extend(v4_cmds)\n self.cmd_order_fixup(replaced_cmds, obj_in_want[\"name\"])\n cmds.extend(replaced_cmds)\n else:\n deleted_cmds = self.generate_delete_commands(i)\n self.cmd_order_fixup(deleted_cmds, i[\"name\"])\n cmds.extend(deleted_cmds)\n\n for i in want:\n if [item for item in have if i[\"name\"] == item[\"name\"]]:\n continue\n cmds.extend(self.add_commands(i, name=i[\"name\"]))\n\n return cmds", "def overrides(self, overrides: ConfigNodePropertyArray):\n\n self._overrides = overrides", "def get_testing_overrides() -> Dict[Callable, Callable]:\n # Every function in the PyTorch API that can be overriden needs an entry\n # in this dict.\n #\n # Optimally we would use inspect to get the function signature and define\n # the lambda function procedurally but that is blocked by generating\n # function signatures for native kernels that can be consumed by inspect.\n # See Issue #28233.\n ret = {}\n ret.update(get_tensor_overrides())\n ret.update(get_torch_overrides())\n ret.update(get_nn_functional_overrides())\n return ret", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetBulletColorSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetFunnelColorSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetLineColorSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetBillboardColorSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def GetProperties(self):\n return [FOLD]", "def GetProperties(self):\n return [FOLD]", "def getProperyOverridesMode(shaderName):\n\n appliedAttributes = []\n c = rsUtility.collection(shaderName.replace(':', '_'), isQuery=True)\n for (index, item) in enumerate(rsUtility.overrideAttributes):\n if c.overrides(item['long']) is not None:\n appliedAttributes.append(c.overrides(item['long'\n ]).attributeName())\n else:\n appliedAttributes.append('')\n\n # If any of the attributes present enable propertyOverridesMode.\n\n if [attr for attr in rsUtility.overrideAttributes if attr['long']\n in appliedAttributes] != []:\n return appliedAttributes\n else:\n return False", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetJsonColorSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def apply_overrides(self, parent_values):\n raise NotImplementedError(\n \"{} does not have implemented `apply_overrides`\".format(self)\n )", "def getLayers(self):\n return self.__layers", "def _sampler_overrides(self, override_kwargs=None):\n if not override_kwargs:\n override_kwargs = {}\n parameter_count = len(self._parameter_names)\n if self.sampler_class == \"sobol\" and parameter_count == 2:\n override_kwargs = {**override_kwargs, \"calc_second_order\": False}\n return override_kwargs", "def get_layer_var_names(self):\n return(self.params)", "def getStyles(self):\r\n return self.styles", "def remove_overrides(self):\n raise NotImplementedError(\n \"{} Method `remove_overrides` not implemented!\".format(\n repr(self)\n )\n )", "def __getattribute__(self,attr):\n if attr in super(BaseTransformer,self).__getattribute__('_overrides'):\n return super(BaseTransformer,self).__getattribute__('_'+attr)\n return super(BaseTransformer,self).__getattribute__(attr)", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetPyColorSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetHeatmapUnitSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def update_overrides(self, app, name, namespace,\n flag='reset', override_values=None):\n if override_values is None:\n override_values = {}\n body = {'flag': flag, 'values': override_values, 'attributes': {}}\n return self._update(self._path(app) +\n '?name=' + name +\n '&namespace=' + namespace, body)", "def get_config(self):\n layer_config = {\n \"anchors\": self._anchors, \n \"classes\": self._classes,\n \"ignore_thresh\": self._ignore_thresh, \n \"truth_thresh\": self._truth_thresh, \n \"iou_thresh\": self._iou_thresh, \n \"loss_type\": self._loss_type, \n \"iou_normalizer\": self._iou_normalizer,\n \"cls_normalizer\": self._cls_normalizer, \n \"scale_x_y\": self._scale_x_y, \n }\n layer_config.update(super().get_config())\n return layer_config", "def inject_overrides(self, overrides):\n for run in self.benchmarks:\n _ = [run.update_spec(key, value) for key, value in overrides.items()]", "def initialiseOverride(self):\n overrideConf = self.overrideConf\n overrideParams = {\n \"command\" : None,\n \"option\" : None,\n \"phedex-node\" : None,\n \"lfn-prefix\" : None,\n }\n\n try:\n overrideParams['command'] = overrideConf['command']\n overrideParams['phedex-node'] = overrideConf['phedex-node']\n overrideParams['lfn-prefix'] = overrideConf['lfn-prefix']\n except Exception as ex:\n msg = \"Unable to extract Override parameters from config:\\n\"\n msg += str(overrideConf)\n raise StageOutInitError(msg)\n if 'option' in overrideConf:\n if len(overrideConf['option']) > 0:\n overrideParams['option'] = overrideConf['option']\n else:\n overrideParams['option'] = \"\"\n\n msg = \"=======StageIn Override Initialised:================\\n\"\n for key, val in viewitems(overrideParams):\n msg += \" %s : %s\\n\" % (key, val)\n msg += \"=====================================================\\n\"\n print(msg)\n self.fallbacks = []\n self.fallbacks.append(overrideParams)\n return", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetTableColorSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetAreaColorSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def defaults_provider():\n return getattr(defaults_provider, 'overrides', {})", "def layers(self):\r\n return self._flc.layers", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetStackedBarColorSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def get_layer_params(self):\n return self.layer_params", "def presets(cls):\n return copy.deepcopy(backbone_presets)", "def get_ingredient_props(self):\n return self.ing_pop", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetHeatmapNullValueSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def getStyles(self):\n return self.styles", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetLogTableColorSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetBarColorSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def get_layers(self, content_retriever):\n pass", "def get_layers(self, content_retriever):\n pass", "def layers(self): # -> LayerView:\n ...", "def __getstate__(self):\n return (self.layers, self.best_loss)", "def set_overrides(self, *dicts):\n self._overrides_configs = [\n d if isinstance(d, ConfigTree) else ConfigFactory.from_dict(d) for d in dicts\n ]\n self.reload()", "def __getstate__(self):\n return {k: v for k, v in self.__dict__.iteritems() if k not in ['x', 'y', '_x', '_y']}", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetBulletUnitSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetFunnelUnitSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def set_layers_affected(self):\n scene = self.set_as_active()\n\n if w_var.cb_only_selected:\n layers_affected = [False, ]*20\n\n for obj in scene.objects:\n if obj.select and obj.type == 'MESH':\n layers_affected = b_tools.manipulate_layerlists('add', layers_affected, obj.layers)\n\n else:\n layers_affected = list(scene.wirebomb.layers_affected)\n\n return layers_affected", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetJsonUnitSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def set_default_binding_overrides(self, overrides):\n self.__default_binding_overrides = overrides or {}", "def get_layers(self):\n layers = set()\n for element in itertools.chain(self.polygons, self.paths):\n layers.update(element.layers)\n for reference in self.references:\n layers.update(reference.ref_cell.get_layers())\n for label in self.labels:\n layers.add(label.layer)\n return layers", "def extended(self) -> List:\n raise NotImplementedError", "def tweak(self):\n\n return tweak_base(self)", "def return_layer_names(self):\n\n existing_layernames = []\n if self.node_data is not []:\n for lyrname, lyr in self.layer_lookup.items():\n if self.node_data[lyr].any():\n existing_layernames.append(lyrname)\n return existing_layernames", "def get_source_patch_masks(self):\n self.source_patch_masks = {\n patch_center: self.get_patch_mask(patch_center)\n for patch_center in self.patch_centers\n if not np.bitwise_and(self.get_patch_mask(patch_center), self.unknown_mask).any()\n }\n self.patch_centers = tuple(list(self.source_patch_masks.keys()))", "def hook(images, augmenter, parents, default):\n return augmenter.__class__.__name__ in MASK_AUGMENTERS", "def parameters(self):\n ps = super().parameters()\n exclude = set(self.estimator.parameters())\n ps = (p for p in ps if not p in exclude)\n return ps", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetHistogramColorSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def _existingLayerItems(self, layer):\n if layer is None:\n return None\n elif not isinstance(layer, int) or layer < 0:\n raise ValueError(\"Invalid layer number requested (%s)\" % (layer))\n else:\n return self._layer_items.get(layer, [])", "def process_overrides(self, db, dest, kvargs, lines):\n logging.info(\"process_overrides db:{} dest:{} kvargs:{} \".format(db.name,dest,kvargs))\n keyword = kvargs['keyword']\n db.create_overrides(keyword)\n return True", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetMarkdownColorSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def common_layers(self):\n return [\n self.attention_layer, self.attention_output_dense,\n self.attention_dropout, self.attention_layer_norm,\n self.intermediate_dense, self.output_dense, self.output_dropout,\n self.output_layer_norm\n ]", "def propagated_properties(self) -> Set[str]:\n propagated_props = {*self.elyra_owned_properties} # all Elyra-owned props should be propagated\n if self.is_generic:\n propagated_props.add(RUNTIME_IMAGE) # generic nodes should also have runtime_image propagated\n return propagated_props", "def _unset_defaults_and_overrides(self):\n for info, group in self._all_opt_infos():\n info.pop('default', None)\n info.pop('override', None)", "def refreshLayerLists(self):\n self.layers = self.iface.legendInterface().layers()\n self.lineLayerIndexMap = dict()\n self.pointLayerIndexMap = dict()\n self.lineLayerList = [] # holds the filtered layer names\n self.pointLayerList = [] # holds the filtered layer names\n for i, layer in enumerate(self.layers):\n try:\n if layer.geometryType() == 0: # 0: point, 1: line\n self.pointLayerIndexMap[len(self.pointLayerList)] = i # put the index pair in the dictionary\n self.pointLayerList.append(layer.name()) # add the layer name to the list\n elif layer.geometryType() == 1: # 0: point, 1: line\n self.lineLayerIndexMap[len(self.lineLayerList)] = i # put the index pair in the dictionary\n self.lineLayerList.append(layer.name()) # add the layer name to the list\n except AttributeError:\n # if the above checks failed, i.e. because of a raster layer, skip it\n continue", "def get_all_latched(self):\n return self.__latched_states", "def get_style2_features(self):\n return self.style2_features", "def hook(images, augmenter, parents, default):\n return augmenter.__class__.__name__ in MASK_AUGMENTERS", "def tract_and_patch(self):\n return {'tract': self.tract, 'patch': self.patch}", "def isOverride(self) -> bool:\n ...", "def state_updates(self):\n state_updates = []\n for layer in self.layers:\n if getattr(layer, 'stateful', False):\n if hasattr(layer, 'updates'):\n state_updates += layer.updates\n return state_updates", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetBillboardUnitSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def _get_base_patch_attribute(self, name):\n return getattr(self.patches[0],name)", "def add_over(self: CorridorGroup, override: CorridorGroup) -> None:\n for key, corr_over in override.corridors.items():\n try:\n corr_base = self.corridors[key]\n except KeyError:\n self.corridors[key] = corr_over\n else:\n corr_base.extend(corr_over)", "def series_overrides(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetBulletNullValueSeriesOverrideArgs']]]]:\n return pulumi.get(self, \"series_overrides\")", "def variations(self):\n keys = set(self._modifiers.keys())\n # add any missing 'Down' variation\n for k in self._modifiers.keys():\n keys.add(k.replace(\"Up\", \"Down\"))\n return keys" ]
[ "0.6976614", "0.6493725", "0.6390087", "0.6375561", "0.6312835", "0.6241755", "0.60139596", "0.59983903", "0.5893521", "0.5878425", "0.5835658", "0.5816514", "0.5811458", "0.58073103", "0.5771545", "0.5764795", "0.57407266", "0.5684022", "0.5561158", "0.5544201", "0.55372936", "0.55325943", "0.549828", "0.54922616", "0.5461393", "0.5453749", "0.5451989", "0.54368544", "0.54365575", "0.5431538", "0.5427523", "0.54244757", "0.53674823", "0.5350593", "0.5332364", "0.5332364", "0.5315886", "0.5302132", "0.5280499", "0.5266841", "0.52666336", "0.52644545", "0.5263425", "0.52596277", "0.52519345", "0.5251174", "0.52456397", "0.52455425", "0.5242524", "0.5240916", "0.5227161", "0.5212622", "0.5187533", "0.51796246", "0.5172715", "0.51710516", "0.5140163", "0.51383364", "0.51360196", "0.51338416", "0.510749", "0.5104349", "0.5095152", "0.5091454", "0.5091454", "0.5085493", "0.508521", "0.50829375", "0.5066589", "0.5063296", "0.50383335", "0.50378764", "0.5034177", "0.5029528", "0.50230706", "0.502082", "0.50178045", "0.501455", "0.50025386", "0.49741495", "0.49674118", "0.49657583", "0.4961789", "0.49564594", "0.49499625", "0.49388635", "0.4936753", "0.4925295", "0.49250108", "0.4924968", "0.4903476", "0.49022716", "0.48970968", "0.48949528", "0.48941645", "0.48850515", "0.48840722", "0.48756662", "0.4873492", "0.4865063" ]
0.72761494
0
return the overrided connections
def dataOverconns(self): return self._overconns
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_connections(self) -> _ConnectionsMap:\n seen: Dict[int, Any] = {}\n for parent in self.target.ancestors:\n if not isinstance(parent, NodeInstance):\n continue\n if parent is self.target.root:\n break\n if self.operation_host:\n self._get_connection(self.operation_host, parent, seen)\n self._get_connection(self.target.root, parent, seen)\n # get the rest of the default connections\n self._get_connection(self.target.root, None, seen)\n\n # reverse so nearest relationships replace less specific ones that have matching names\n connections = _ConnectionsMap( # the list() is for Python 3.7\n (rel.name, rel) for rel in reversed(list(seen.values()))\n )\n return connections", "def getConnectionList(self):\n return []", "def get_connections(self):\n return self.connections", "def get_connections(self):\n return self.connections", "def getConnectionsBetweenSuperPeers(self):\r\n raise NotImplementedError()", "def get_connections(self):\n return self._connections", "def connections(self, recurse = True):\n \n return NeuroObject.connections(self, recurse) + [self.root] + self.arborizations(False) + self.gapJunctions(False) + self.innervations(False) + self.synapses(False)", "def get_outgoing_connections(self, comp):\n return self.connections.get(comp.id, [])", "def sitecurclntconnections(self) :\n\t\ttry :\n\t\t\treturn self._sitecurclntconnections\n\t\texcept Exception as e:\n\t\t\traise e", "def remote_connections(self):\r\n\r\n self.remote = self.newest_connections[~((self.newest_connections['remote_address'] == '0.0.0.0') | (self.newest_connections['remote_address'] == '127.0.0.1'))]\r\n return self.remote", "def all_connections(self):\n for i in _xrange(self.num_patterns):\n for c in self._available_connections[i]:\n yield c\n for c in self._in_use_connections[i]:\n yield c", "def get_connections(self, name):\n cls, pending, connected = self._proxies[name]\n return list(connected)", "def user_connections(self):\r\n return users.UserConnections(self)", "def sitecursrvrconnections(self) :\n\t\ttry :\n\t\t\treturn self._sitecursrvrconnections\n\t\texcept Exception as e:\n\t\t\traise e", "def connections(self, src=False, dst=True, params=True): \n conns = []\n if params:\n if src:\n #grab the node params that this node is a src to\n edges = self.parent.graph.out_edges(self, data=True) \n conns.extend([ edge[2][\"dst_param\"] for edge in edges ])\n if dst:\n #grab the node param that this node is a dst to\n edges = self.parent.graph.in_edges(self, data=True) \n conns.extend([ edge[2][\"src_param\"] for edge in edges ])\n else: \n if src:\n conns.extend(self.parent.graph.successors(self))\n if dst:\n conns.extend(self.parent.graph.predecessors(self))\n \n return conns", "def classic_connect_all(self):\n return [s.classic_connect() for s in self.servers]", "def list_connections(self):\n return self.network.list_connections()", "def get_connection_info(self):\n return [(c.fullname, [u[1] for u in c.objects])\n for c in self._connections]", "def _create_special_connections(self):\n\t\tfor connection in self._infoSpecialConnections:\n\t\t\t# List of source cells ids\n\t\t\tsourcesId = self.cellsId[connection[0]][connection[1]]\n\t\t\t# gather the sources all together\n\t\t\tsourcesId = comm.gather(sourcesId,root=0)\n\t\t\tif rank==0: sourcesId = sum(sourcesId,[])\n\t\t\tsourcesId = comm.bcast(sourcesId,root=0)\n\t\t\t# List of taget cells ids\n\t\t\ttargetsId = self.cellsId[connection[2]][connection[3]]\n\t\t\t# Ratio of connection\n\t\t\tconRatio = connection[4]\n\t\t\t# Number of connections\n\t\t\tconNum = int(connection[5])\n\t\t\t# Weight of connections\n\t\t\tconWeight = float(connection[6])\n\t\t\t# Type of synapse\n\t\t\tsynType = connection[7]\n\t\t\t# connect sources to targets\n\t\t\tself._connect(sourcesId,targetsId,conRatio,conNum,conWeight,synType)", "def connections_outgoing(self):\n return self.client.call('GET', self.name + 'connections/outgoing')", "def sockets(self):\n return [connection.socket() for connection in self]", "def list_conns(self):\n\t\tres = []\n\t\tself.AL.acquire()\n\t\tfor ls in self.ls.keys():\n\t\t\tinfo = self.ls[ls]\n\t\t\tres.append(\n\t\t\t\t(\n\t\t\t\t\t\"Relay\", \"LOCAL\", info[\"local\"], info[\"peer\"],\n\t\t\t\t\tinfo[\"port\"], info[\"got\"], None,\n\t\t\t\t\t)\n\t\t\t\t)\n\t\tfor s in self.s2i.keys():\n\t\t\tinfo = self.s2i[s]\n\t\t\tif info[\"creator\"] == self.cid:\n\t\t\t\tfai = \"LOCAL\"\n\t\t\t\ttai = info[\"peer\"]\n\t\t\telse:\n\t\t\t\tfai = info[\"creator\"]\n\t\t\t\ttai = info[\"peer\"]\n\t\t\tres.append(\n\t\t\t\t(\n\t\t\t\t\t\"Conn\", fai, info[\"local\"], tai, info[\"port\"],\n\t\t\t\t\tinfo[\"recv\"], info[\"send\"]\n\t\t\t\t\t)\n\t\t\t\t)\n\t\tself.AL.release()\n\t\treturn res", "def getConnectedInterfaces(self):\n interfaces = self.connectedInterfaces[:] #make a copy\n interfaces.extend(self.getLinkedInterfaces())\n return interfaces", "def getConnections():\n\n c = psutil.net_connections()\n connects = {}\n\n count = 0\n for connection in c:\n conn = {}\n status = connection.status\n if status == 'ESTABLISHED' or connection.status == 'CLOSE_WAIT':\n conn['status'] = status\n conn['local'] = connection.laddr[0] + ':' + str(connection.laddr[1])\n conn['remote'] = connection.raddr[0] + ':' + str(connection.raddr[1])\n connects[count] = conn\n count += 1\n elif status == 'LISTEN':\n conn['status'] = status\n conn['local'] = connection.laddr[0] + ':' + str(connection.laddr[1])\n connects[count] = conn\n count += 1\n else:\n pass\n\n return connects", "def connections( self, cls = None ):\n scene = self.scene()\n if ( not scene ):\n return []\n \n if ( not cls ):\n cls = XNodeConnection\n \n output = []\n for item in scene.items():\n if ( not isinstance(item, cls) ):\n continue\n \n if ( item.inputNode() == self or item.outputNode() == self ):\n output.append(item)\n \n return output", "def get_cidr_graphs_connection(self):\n return self.m_connection.cidr_graphs", "def find_referring_connections(self, name):\n exprset = set(self._exprmapper.find_referring_exprs(name))\n return [(u, v) for u, v in self.list_connections(show_passthrough=True)\n if u in exprset or v in exprset]", "def get_incoming_connections(self, comp):\n in_connections = []\n for comp_id, connections in self.connections.items():\n for connection in connections:\n source, name = connection\n if source == comp.data:\n in_connections.append(connection)\n return in_connections", "def preferred_connections(\n self,\n ssl=None,\n locations=None,\n schemes=None,\n ):\n if locations is None:\n locations = self.DEFAULT_LOCATION_ORDER[:]\n if schemes is None:\n schemes = self.DEFAULT_SCHEME_ORDER[:]\n\n connections_dict = {location: {scheme: [] for scheme in schemes} for location in locations}\n for connection in self.connections:\n # Only check non-local connections unless we own the resource\n if self.owned or (not self.owned and not connection.local):\n location = 'relay' if connection.relay else ('local' if connection.local else 'remote')\n if location not in locations:\n continue\n if 'http' in schemes:\n connections_dict[location]['http'].append(connection.httpuri)\n if 'https' in schemes:\n connections_dict[location]['https'].append(connection.uri)\n if ssl is True: schemes.remove('http')\n elif ssl is False: schemes.remove('https')\n connections = []\n for location in locations:\n for scheme in schemes:\n connections.extend(connections_dict[location][scheme])\n return connections", "def get_connections_out(self) -> dict:\n return self.__ni_out", "def get_graphs_connection(self):\n return self.m_connection.graphs", "def _get_connection(\n source: NodeInstance, target: Optional[NodeInstance], seen: dict\n ) -> None:\n if source is target:\n return None\n for rel in source.get_requirements(target):\n if id(rel) not in seen:\n seen[id(rel)] = rel", "def get_connections_in(self) -> dict:\n return self.__ni_in", "def get_all_dns_connection(self):\n return self.m_connection.all_dns", "def outputConnections(self, cls=None):\n scene = self.scene()\n if ( not scene ):\n return []\n \n if ( not cls ):\n cls = XNodeConnection\n \n output = []\n for item in scene.items():\n if ( not isinstance(item, cls) ):\n continue\n \n if ( item.outputNode() == self ):\n output.append(item)\n \n return output", "def get_connections(self, kind='inet'):\r\n # Note: in case of UNIX sockets we're only able to determine the\r\n # local bound path while the remote endpoint is not retrievable:\r\n # http://goo.gl/R3GHM\r\n inodes = {}\r\n # os.listdir() is gonna raise a lot of access denied\r\n # exceptions in case of unprivileged user; that's fine:\r\n # lsof does the same so it's unlikely that we can to better.\r\n for fd in os.listdir(\"/proc/%s/fd\" % self.pid):\r\n try:\r\n inode = os.readlink(\"/proc/%s/fd/%s\" % (self.pid, fd))\r\n except OSError:\r\n continue\r\n if inode.startswith('socket:['):\r\n # the process is using a socket\r\n inode = inode[8:][:-1]\r\n inodes[inode] = fd\r\n\r\n if not inodes:\r\n # no connections for this process\r\n return []\r\n\r\n def process(fin, family, type_):\r\n retlist = []\r\n try:\r\n f = open(fin, 'r')\r\n except IOError:\r\n # IPv6 not supported on this platform\r\n err = sys.exc_info()[1]\r\n if err.errno == errno.ENOENT and fin.endswith('6'):\r\n return []\r\n else:\r\n raise\r\n try:\r\n f.readline() # skip the first line\r\n for line in f:\r\n # IPv4 / IPv6\r\n if family in (socket.AF_INET, socket.AF_INET6):\r\n _, laddr, raddr, status, _, _, _, _, _, inode = \\\r\n line.split()[:10]\r\n if inode in inodes:\r\n laddr = self._decode_address(laddr, family)\r\n raddr = self._decode_address(raddr, family)\r\n if type_ == socket.SOCK_STREAM:\r\n status = _TCP_STATES_TABLE[status]\r\n else:\r\n status = \"\"\r\n fd = int(inodes[inode])\r\n conn = nt_connection(fd, family, type_, laddr,\r\n raddr, status)\r\n retlist.append(conn)\r\n elif family == socket.AF_UNIX:\r\n tokens = line.split()\r\n _, _, _, _, type_, _, inode = tokens[0:7]\r\n if inode in inodes:\r\n\r\n if len(tokens) == 8:\r\n path = tokens[-1]\r\n else:\r\n path = \"\"\r\n fd = int(inodes[inode])\r\n type_ = int(type_)\r\n conn = nt_connection(fd, family, type_, path,\r\n None, \"\")\r\n retlist.append(conn)\r\n else:\r\n raise ValueError(family)\r\n return retlist\r\n finally:\r\n f.close()\r\n\r\n tcp4 = (\"tcp\" , socket.AF_INET , socket.SOCK_STREAM)\r\n tcp6 = (\"tcp6\", socket.AF_INET6, socket.SOCK_STREAM)\r\n udp4 = (\"udp\" , socket.AF_INET , socket.SOCK_DGRAM)\r\n udp6 = (\"udp6\", socket.AF_INET6, socket.SOCK_DGRAM)\r\n unix = (\"unix\", socket.AF_UNIX, None)\r\n\r\n tmap = {\r\n \"all\" : (tcp4, tcp6, udp4, udp6, unix),\r\n \"tcp\" : (tcp4, tcp6),\r\n \"tcp4\" : (tcp4,),\r\n \"tcp6\" : (tcp6,),\r\n \"udp\" : (udp4, udp6),\r\n \"udp4\" : (udp4,),\r\n \"udp6\" : (udp6,),\r\n \"unix\" : (unix,),\r\n \"inet\" : (tcp4, tcp6, udp4, udp6),\r\n \"inet4\": (tcp4, udp4),\r\n \"inet6\": (tcp6, udp6),\r\n }\r\n if kind not in tmap:\r\n raise ValueError(\"invalid %r kind argument; choose between %s\"\r\n % (kind, ', '.join([repr(x) for x in tmap])))\r\n ret = []\r\n for f, family, type_ in tmap[kind]:\r\n ret += process(\"/proc/net/%s\" % f, family, type_)\r\n # raise NSP if the process disappeared on us\r\n os.stat('/proc/%s' % self.pid)\r\n return ret", "def getconnection(self):\n\n # If we were able to create the affix_tcpsocket, then we attempt to call\n # getconnection() on the affix tcp server socket first. If we were unable \n # to create it or get a SocketWouldBlockError, we default to the basic\n # repy getconnection() call. The reason for this is to ensure that even\n # if the affixstack breaks down, we are able to revert to the default repy\n # connection.\n if self.tcpserversocket_dict['affix_tcpsocket']:\n try:\n rip, rport, sockobj = self.tcpserversocket_dict['affix_tcpsocket'].getconnection()\n return (rip, rport, AffixSocket(sockobj, self.affix_object)) \n except SocketWouldBlockError:\n pass\n\n return self.tcpserversocket_dict['repy_tcpsocket'].getconnection()", "def _update_connections(self, oldVar, newVar):\n vars = [v for v in self.model.get_all_variables() if v.get_source_variable(True) is oldVar]\n # Remove old connections, including interfaces and types so creating the new connection works\n for v in vars:\n self.remove_connections(v)\n self.del_attr(v, u'public_interface')\n self.del_attr(v, u'private_interface')\n v.clear_dependency_info()\n # Create new connections\n for v in vars:\n self.connect_variables(newVar, v)", "def connections_incoming(self):\n return self.client.call('GET', self.name + 'connections/incoming')", "def get_oc_oc_connections(self, random_conn=False):\n\n print \"Drawing OC - OC connections .... \"\n abstract_weights_non_negative = np.loadtxt(self.params['oc_oc_abstract_weights_fn'])\n abstract_weights = self.take_log_weights(abstract_weights_non_negative)\n if random_conn:\n rnd.shuffle(abstract_weights)\n rnd.seed(self.params['random_oc_oc_seed'])\n np.savetxt(self.params['oc_oc_abstract_weights_fn'].rsplit('.dat')[0] + '_random.dat', abstract_weights)\n\n assert (abstract_weights[:,0].size == self.params['n_hc'] * self.params['n_mc'])\n assert (abstract_weights[0,:].size == self.params['n_hc'] * self.params['n_mc'])\n w_max_abstract = abstract_weights.max()\n w_min_abstract = abstract_weights.min()\n\n w_pyr_pyr_global_max = self.params['w_pyr_pyr_global_max']\n w_pyr_rsnp_max = self.params['w_pyr_rsnp_max']\n output_pyr_pyr = \"\"\n line_cnt_pyr_pyr = 0\n output_pyr_rsnp = \"\"\n line_cnt_pyr_rsnp = 0\n cnt_discarded_conn = 0\n for src_mc in xrange(abstract_weights[:, 0].size):\n for tgt_mc in xrange(abstract_weights[:, 0].size):\n if (src_mc != tgt_mc):\n w_in = abstract_weights[src_mc, tgt_mc]\n if (w_in > 0): # draw several pyr -> pyr connections between the two MC\n src_tgt_dict = {} # src_tgt_dict[src_gid] = [tgt_gid_0, ...] multiple connections between the same source and the same target are forbiddden\n w_out = (w_in / w_max_abstract) * w_pyr_pyr_global_max\n src_pyrs = rnd.randint(0, self.params['n_pyr_per_mc'], self.params['n_pyr_pyr_between_2mc'])\n for src in np.unique(src_pyrs):\n src_tgt_dict[src] = []\n for src in src_pyrs:\n src_pyr = src + src_mc * self.params['n_pyr_per_mc'] + self.params['pyr_offset']\n tgt_pyr = rnd.randint(0, self.params['n_pyr_per_mc']) + tgt_mc * self.params['n_pyr_per_mc'] + self.params['pyr_offset']\n src_tgt_dict[src].append(tgt_pyr)\n\n # remove multiple instances of the same src-tgt connection\n for src in src_pyrs:\n n1 = len(src_tgt_dict[src])\n src_tgt_dict[src] = np.unique(src_tgt_dict[src]).tolist()\n cnt_discarded_conn += n1 - len(src_tgt_dict[src])\n for tgt_pyr in src_tgt_dict[src]:\n w_noise = self.draw_connection(1.0, w_out, noise=self.params['w_pyr_pyr_global_sigma'])\n if (w_noise > self.params['weight_threshold']):\n output_pyr_pyr += \"%d %d %.6e\\n\" % (src_pyr, tgt_pyr, w_noise)\n line_cnt_pyr_pyr += 1\n\n elif (w_in < 0):\n w_out = (w_in / w_min_abstract) * w_pyr_rsnp_max\n src_pyrs = self.get_rnd_targets(self.params['n_pyr_per_mc'], self.params['n_pyr_rsnp_between_2mc']) # avoid double connections\n for src in src_pyrs:\n src_pyr = src + src_mc * self.params['n_pyr_per_mc'] + self.params['pyr_offset'] \n tgt_rsnp = rnd.randint(0, self.params['n_rsnp_per_mc']) + tgt_mc * self.params['n_rsnp_per_mc'] + self.params['rsnp_offset']\n w_noise = self.draw_connection(1.0, w_out, noise=self.params['w_pyr_rsnp_sigma'])\n if (w_noise > self.params['weight_threshold']):\n output_pyr_rsnp += \"%d %d %.6e\\n\" % (src_pyr, tgt_rsnp, w_noise)\n line_cnt_pyr_rsnp += 1\n\n print 'Number of discarded pyr-pyr connections:', cnt_discarded_conn\n print 'Number of pyr-rsnp connections:', line_cnt_pyr_rsnp\n print 'Number of pyr-pyr connections:', line_cnt_pyr_pyr\n print 'Number of OC-OC connections:', line_cnt_pyr_pyr + line_cnt_pyr_rsnp\n output_fn_pyr_pyr = self.params['conn_list_pyr_pyr']\n output_file_pyr_pyr = open(output_fn_pyr_pyr, 'w')\n output_file_pyr_pyr.write(\"%d\\t%d\\n\" % (line_cnt_pyr_pyr, 3))\n output_file_pyr_pyr.write(output_pyr_pyr)\n output_file_pyr_pyr.close()\n\n output_fn_pyr_rsnp = self.params['conn_list_pyr_rsnp']\n output_file_pyr_rsnp = open(output_fn_pyr_rsnp, 'w')\n output_file_pyr_rsnp.write(\"%d\\t%d\\n\" % (line_cnt_pyr_rsnp, 3))\n output_file_pyr_rsnp.write(output_pyr_rsnp)\n output_file_pyr_rsnp.close()", "def get_all(self) -> typing.List[Connection]:\n return [Connection.from_dict(conn) for conn in self.query(CONNECTION_URL)]", "def __repr__(self):\n return f\"{self.name} : {self.get_connections()}\"", "def E(self) -> list:\n res = []\n for v in self.V():\n res.extend([(v.name, i) for i in v.get_connections().keys()])\n return res", "def _out_connections(self, g, tick):\n # outputs could be connected to many different input ports - this is not yet covered\n out_connections=[]\n output_map = {}\n # get the out connections of the given task\n for source,dest in g.get_out_connections(tick):\n if source.port not in output_map.keys():\n output_map[source.port]=[]\n output_map[source.port].append(dest)\n for source,dest in self.body_graph.get_in_connections(graph.FINAL_TICK):\n out_source=graph.Endpoint(source.tick << tick, source.port)\n portname=dest.port\n for out_dest in output_map[portname]:\n out_connections.append((out_source, out_dest))\n return out_connections", "def get_graphs_links_connection(self):\n return self.m_connection.graphs_links", "def list_connections(self, show_passthrough=True):\n return self._exprmapper.list_connections(show_passthrough)", "def get_secondary_connections(network, user):\n if user not in network:\n return None\n if network[user]['connections'] != []:\n result = []\n for conn in get_connections(network, user):\n for conn_2 in get_connections(network, conn):\n if conn_2 not in result:\n result.append(conn_2)\n return result\n return []", "def _get_communities(self):\n return self.__communities", "def learn_connectome(self):\n episode_nodes = [node for node in self.container.nodes if node.is_episode]\n if len(episode_nodes) < 2:\n return\n connections_counter = {}\n for node in episode_nodes:\n self._collect_episode_callout_stats(node, connections_counter)\n\n pair_list = [(key, connections_counter[key]) for key in connections_counter]\n pair_list.sort(key=lambda item: item[1], reverse=True)\n top_count = pair_list[0][1]\n if top_count < 4:\n return\n # make connections for the top half of pairs\n for pair, cnt in pair_list:\n if cnt > top_count // 2:\n self._make_connection_for_pair(pair)", "def get_cert_graphs_connection(self):\n return self.m_connection.cert_graphs", "def _create_common_connections(self):\n\t\tfor muscle,muscAfferentDelay in self._infoMuscles:\n\t\t\tfor connection in self._infoCommonMuscleConnections:\n\t\t\t\t# List of source cells ids\n\t\t\t\tsourcesId = self.cellsId[muscle][connection[0]]\n\t\t\t\t# gather the sources all together\n\t\t\t\tsourcesId = comm.gather(sourcesId,root=0)\n\t\t\t\tif rank==0: sourcesId = sum(sourcesId,[])\n\t\t\t\tsourcesId = comm.bcast(sourcesId,root=0)\n\t\t\t\t# List of taget cells ids\n\t\t\t\ttargetsId = self.cellsId[muscle][connection[1]]\n\t\t\t\t# Ratio of connection\n\t\t\t\tconRatio = connection[2]\n\t\t\t\t# Number of connections\n\t\t\t\tconNum = int(connection[3])\n\t\t\t\t# Weight of connections\n\t\t\t\tconWeight = float(connection[4])\n\t\t\t\t# Type of synapse\n\t\t\t\tsynType = connection[5]\n\t\t\t\t# connect sources to targets\n\t\t\t\tself._connect(sourcesId,targetsId,conRatio,conNum,conWeight,synType)", "def __call__(self) -> list:\n return self.network", "def get_all_ips_connection(self):\n return self.m_connection.all_ips", "def __init__(self):\n self.connections = {}", "def sockets(self):\n socs = [self.listen_socket, self.bridge_socket, self.waker]\n return [x for x in socs if x]", "def __enable_connections(self):\r\n pass", "def get_protocols(self):\r\n\r\n return None", "def ssh_get_connections(cls):\n return cls._ssh_connections", "def clients(self, r):\r\n return r.clients", "def list_connections(self, show_passthrough=True):\n excludes = set([name for name, data in self._exprgraph.nodes(data=True)\n if data['expr'].refs_parent()])\n if show_passthrough:\n return [(u, v) for u, v in self._exprgraph.edges() if not (u in excludes or v in excludes)]\n else:\n return [(u, v) for u, v in self._exprgraph.edges()\n if '.' in u and '.' in v and not (u in excludes or v in excludes)]", "def relationships(self):", "def __init__(self) :\n self.remoteConnections = {}", "def ssh_list_connections(cls):\n for name in cls._ssh_connections.keys():\n print (name)", "def get_all_connections(self, qid):\n if self._kg_symbols is None:\n return {}\n return self._kg_symbols.get_all_connections(qid)", "def edges(self):\n return self.dovetails + self.containments + self.internals", "def _identify_connection(self):\n pass #nothing to identify...\n #raise NotImplementedError(\"Implement!\")", "def configured_connections(hass):\n return set(\n entry.data.get(CONF_NAME) for entry in hass.config_entries.async_entries(DOMAIN)\n )", "def DendriteConnectionListByIndex(self):\n dendrite_connection_list = []\n for each_section in self._section_list:\n if each_section.ParentId() != -1:\n each_section_index = self._section_indices_by_id[each_section.Id()]\n each_section_parent_index = self._section_indices_by_id[\n each_section.ParentId()]\n dendrite_connection_list.append((each_section_index, each_section_parent_index))\n return dendrite_connection_list", "def available_auto_connection():\n path = os.path.dirname(verticapy.__file__) + \"/connections.verticapy\"\n confparser = ConfigParser()\n confparser.optionxform = str\n try:\n confparser.read(path)\n confparser.remove_section(\"VERTICAPY_AUTO_CONNECTION\")\n except:\n pass\n all_connections = confparser.sections()\n return all_connections", "def get_connections(self, id, connection_name, **args):\n return self.request(id + \"/\" + connection_name, args)", "def breakConnections(self):\n for connections in pm.listConnections(self.data['shapeNode'], plugs=True, connections=True):\n # if connections[-1].nodeType() in ['shadingEngine', 'displacementShader']:\n if cmds.getClassification(connections[-1].nodeType(), satisfies=\"shader\"):\n pm.disconnectAttr(str(connections[-1]), str(connections[0]))\n self.logger.info(\"Break Connection : %s > %s\" % (str(connections[-1]), str(connections[0])))", "def getConexion_police(self):\n\t\t\treturn self.policeConn", "async def get_all(self) -> typing.List[Connection]:\n return [Connection.from_dict(conn) for conn in await self.query(CONNECTION_URL)]", "def protocols(self):\n return list(self.query(Protocol))", "def identify_connections(top, index_only=False):\n compound = nx.Graph()\n\n for b in top.bonds:\n compound.add_edge(b.connection_members[0], b.connection_members[1])\n\n compound_line_graph = nx.line_graph(compound)\n\n angle_matches = _detect_connections(compound_line_graph, top, type_=\"angle\")\n dihedral_matches = _detect_connections(\n compound_line_graph, top, type_=\"dihedral\"\n )\n improper_matches = _detect_connections(\n compound_line_graph, top, type_=\"improper\"\n )\n\n if not index_only:\n for conn_matches, conn_type in zip(\n (angle_matches, dihedral_matches, improper_matches),\n (\"angle\", \"dihedral\", \"improper\"),\n ):\n if conn_matches:\n _add_connections(top, conn_matches, conn_type=conn_type)\n else:\n return {\n \"angles\": angle_matches,\n \"dihedrals\": dihedral_matches,\n \"impropers\": improper_matches,\n }\n\n return top", "def all_pairs(self):\n return chain(self.nx_graph.edges(), nx.non_edges(self.nx_graph))", "def get_internal_edges(self):\n raise NotImplementedError()", "def getLinkedInterfacesOnly(self):\n return self.linkedInterfaces", "def _extract_connection_property_sets(cls, dynamics_properties, namespace):\n component_class = dynamics_properties.component_class\n varying_params = set(\n component_class.parameter(p.name).id\n for p in dynamics_properties.properties\n if p.value.nineml_type != 'SingleValue')\n # Get list of ports refereneced (either directly or indirectly) by\n # time derivatives and on-conditions\n not_permitted = set(p.id for p in component_class.required_for(\n chain(component_class.all_time_derivatives(),\n component_class.all_on_conditions())).parameters)\n # If varying params intersects parameters that are referenced in time\n # derivatives they can not be redefined as connection parameters\n if varying_params & not_permitted:\n raise Pype9UnflattenableSynapseException()\n conn_params = defaultdict(dict)\n for on_event in component_class.all_on_events():\n for param in component_class.required_for(\n on_event.state_assignments).parameters:\n if param.id in varying_params:\n conn_params[on_event.src_port_name][param.id] = param\n return [\n ConnectionPropertySet(\n append_namespace(prt, namespace),\n [Property(append_namespace(p.name, namespace),\n dynamics_properties.property(p.name).quantity)\n for p in params.values()])\n for prt, params in conn_params.items()]", "def get_connections(network, user):\n if not user in network:\n return None\n if not 'connections' in network[user]:\n return []\n return network[user]['connections']", "def list(self):\n\t\tif self.client is None:\n\t\t\traise UsageError(\"Not connected!\")\n\t\treturn self.client.list_conns()", "def get_secondary_connections(network, user):\n if user not in network:\n return None\n if network[user][0] == []:\n return []\n return [person\n for group in\n [network[connection][0] for connection in network[user][0]]\n for person in group]", "def node_sources(self, node):\r\n node = self.coalesce_node(node)\r\n nodes =[conn[0] for conn in self.connections if conn[1] == node]\r\n return nodes", "def private_endpoint_connections(self) -> Sequence['outputs.PrivateEndpointConnectionResponse']:\n return pulumi.get(self, \"private_endpoint_connections\")", "def ifaces_confs(self):\n return tuple(self._ifaces_confs)", "def make_connections(self):\n return\n destinations={}\n sources={}\n for gsq in self.gatesqs:\n destinations[self.local2global(gsq)]=set()\n sources[self.local2global(gsq)]=set()\n if rm.all_sols=='timeout':\n return\n for sol in self.all_sols:\n for sa in sol:\n start,indv,path,covered,end=sa\n destinations[self.local2global(start)].add((self.local2global(end),tuple(path)))\n sources[self.local2global(end)].add((self.local2global(start),tuple(path)))\n self.sources=sources\n self.destinations=destinations", "def getConnectedInterfacesOnly(self):\n return self.connectedInterfaces", "def describe_connections_on_interconnect(interconnectId=None):\n pass", "def inputConnections(self, cls=None):\n scene = self.scene()\n if ( not scene ):\n return []\n \n if ( not cls ):\n cls = XNodeConnection\n \n output = []\n for item in scene.items():\n if ( not isinstance(item, cls) ):\n continue\n \n if ( item.inputNode() == self ):\n output.append(item)\n \n return output", "def get_connections_by_relation(self, qid, relation):\n if self._kg_symbols is None:\n return []\n return self._kg_symbols.get_connections_by_relation(qid, relation)", "def getReachableViews(self):\n return [self]", "def sub_graph_merging(self):\n raise NotImplementedError()", "def bindings(self):\n return self.__bindings", "def connected_components(self):\n return [_connected_components.remote(self.rows)]", "def print_connections(self):\n print(\"[Printing Connections]\")\n for key in self.connections.keys():\n print(f\"{key}:\\n\\t{self.connections[key]}\")", "def connecting_vars(self):\n return irdvars_for_ext(self.ext)", "def get_connect(self):\n\t\treturn self.connect", "def connection_groups(self, **kwargs):\n return [\"listener-%s\" % self.path[1:]]", "def get_connection_genes(key, config):\n gene1 = ConnectionGene(key, config)\n gene1.enabled = True\n gene1.weight = 0\n gene2 = ConnectionGene(key, config)\n gene2.enabled = False\n gene2.weight = 1\n return gene1, gene2", "def connection_groups(self, **kwargs):\n return [\"news\"]" ]
[ "0.72107756", "0.719677", "0.71520996", "0.71520996", "0.71160644", "0.6827614", "0.68155587", "0.67435443", "0.6729093", "0.66902184", "0.6677647", "0.66670907", "0.6593339", "0.6425765", "0.639215", "0.63865095", "0.6366969", "0.63638633", "0.63590074", "0.6348137", "0.6316482", "0.629019", "0.6268205", "0.6238343", "0.6198442", "0.6164072", "0.6109483", "0.610394", "0.6054681", "0.60502017", "0.5992911", "0.5976468", "0.5974601", "0.5972328", "0.5960189", "0.5942528", "0.5938033", "0.5937474", "0.5937159", "0.59212416", "0.5917936", "0.5903548", "0.5890902", "0.58885986", "0.5874442", "0.58618", "0.5857746", "0.585496", "0.5853659", "0.5848264", "0.58474845", "0.5842489", "0.5814817", "0.58124423", "0.5809676", "0.58025753", "0.579439", "0.57862496", "0.574121", "0.57391804", "0.5728247", "0.5710596", "0.5700223", "0.56963646", "0.56959546", "0.5695932", "0.5692776", "0.56711257", "0.56682986", "0.5666103", "0.566053", "0.5658342", "0.5654649", "0.5610668", "0.56102073", "0.5603", "0.5601928", "0.5595306", "0.5591688", "0.5572333", "0.5566015", "0.5557495", "0.55358905", "0.5534284", "0.5534163", "0.5525633", "0.55242056", "0.5515658", "0.5515066", "0.5511751", "0.5510756", "0.5507252", "0.5502448", "0.5501697", "0.54994494", "0.5495709", "0.5495583", "0.5494598", "0.5493194", "0.54931474" ]
0.6257197
23
return the overrided shader
def dataShader(self): return self._shader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFragmentShader(self):\n return self.fshader", "def convert_shaders(self):\n raise NotImplementedError()", "def setShader(self, *args):\n return _osgAnimation.RigTransformHardware_setShader(self, *args)", "def get_shader_codes(self):\n vs = VS_TEMPLATE\n fs = FS_TEMPLATE\n \n # Shader headers\n vs_header = self.get_header('vertex')\n fs_header = self.get_header('fragment')\n \n # Varyings\n for varying in self.varyings:\n s1, s2 = get_varying_declarations(varying)\n vs_header += s1\n fs_header += s2\n \n # vs_header += \"\".join(self.vs_headers)\n # fs_header += \"\".join(self.fs_headers)\n \n # Integrate shader headers\n vs = vs.replace(\"%VERTEX_HEADER%\", vs_header)\n fs = fs.replace(\"%FRAGMENT_HEADER%\", fs_header)\n \n # Vertex and fragment main code\n vs_main = self.get_main('vertex')\n fs_main = self.get_main('fragment')\n \n # Integrate shader headers\n vs = vs.replace(\"%VERTEX_MAIN%\", vs_main)\n fs = fs.replace(\"%FRAGMENT_MAIN%\", fs_main)\n \n # frag color or frag data\n if self.fragdata is None:\n fs = fs.replace('%FRAG%', \"\"\"gl_FragColor = out_color;\"\"\")\n else:\n fs = fs.replace('%FRAG%', \"\"\"gl_FragData[%d] = out_color;\"\"\" % self.fragdata)\n \n # Make sure there are no Windows carriage returns\n vs = vs.replace(b\"\\r\\n\", b\"\\n\")\n fs = fs.replace(b\"\\r\\n\", b\"\\n\")\n \n # OLDGLSL does not know the texture function\n if not OLDGLSL:\n fs = fs.replace(\"texture1D(\", \"texture(\" % 2)\n fs = fs.replace(\"texture2D(\", \"texture(\" % 2)\n \n # set default color\n fs = fs.replace('%DEFAULT_COLOR%', str(self.default_color))\n \n # replace GLSL version header\n vs = vs.replace('%GLSL_VERSION_HEADER%', self.version_header)\n fs = fs.replace('%GLSL_VERSION_HEADER%', self.version_header)\n \n # replace GLSL precision header\n vs = vs.replace('%GLSL_PRECISION_HEADER%', self.precision_header)\n fs = fs.replace('%GLSL_PRECISION_HEADER%', self.precision_header)\n \n return vs, fs", "def getCompiled(self):\n if self.isCompiled():\n return self.shader\n else:\n raise Exception(\"el shader no ha sido compilado aun\")", "def getVertexShader(self):\n return self.vshader", "def _reload_shader(self):\n self.render_pipeline.reload_shaders()\n\n self.render_pipeline.set_effect(self.terrain.get_node(), \"effects/terrain.yaml\", {\n \"render_gbuffer\": True,\n \"render_shadows\": False,\n\n })\n\n self.render_pipeline.set_effect(self.terrain_shadow.get_node(), \"effects/terrain_shadow.yaml\", {\n \"render_gbuffer\": False,\n \"render_shadows\": True,\n }, 5000)", "def updateShaderState(self):\n raise NotImplementedError('updateShaderState must be implemented by '\n '{} subclasses'.format(type(self).__name__))", "def location( self, shader, mode ):\n return shader.getLocation( mode, self.name, uniform=True )", "def _get_hook(self, shader, name):\n assert name in ('pre', 'post')\n key = (shader, name)\n if key in self._hooks:\n return self._hooks[key]\n hook = StatementList()\n if shader == 'vert':\n self.view_program.vert[name] = hook\n elif shader == 'frag':\n self.view_program.frag[name] = hook\n self._hooks[key] = hook\n return hook", "def glGetShaderSourceARB( baseOperation, obj ):\n length = int(glGetObjectParameterivARB(obj, GL_OBJECT_SHADER_SOURCE_LENGTH_ARB))\n if length > 0:\n source = ctypes.create_string_buffer(length)\n baseOperation(obj, length, None, source)\n return source.value.strip(_NULL_8_BYTE) # null-termination\n return ''", "def addShaderFromSourceFile(self, Union, QOpenGLShader_ShaderType=None, QOpenGLShader_ShaderTypeBit=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__\n pass", "def shaders(self):\n\n shaders = []\n shaders.extend(self._verts)\n shaders.extend(self._frags)\n shaders.extend(self._geoms)\n return shaders", "def updateShaderState(self):\n\n dopts = self.opts\n copts = self.canvas.opts\n lightPos = None\n flatColour = dopts.getConstantColour()\n useNegCmap = (not dopts.useLut) and dopts.useNegativeCmap\n\n if self.threedee:\n lightPos = np.array(copts.lightPos)\n lightPos *= (copts.zoom / 100.0)\n else:\n lightPos = None\n\n if dopts.useLut:\n delta = 1.0 / (dopts.lut.max() + 1)\n cmapXform = transform.scaleOffsetXform(delta, 0.5 * delta)\n else:\n cmapXform = self.cmapTexture.getCoordinateTransform()\n\n fslgl.glmesh_funcs.updateShaderState(\n self,\n useNegCmap=useNegCmap,\n cmapXform=cmapXform,\n flatColour=flatColour,\n lightPos=lightPos)", "def addCacheableShaderFromSourceFile(self, Union, QOpenGLShader_ShaderType=None, QOpenGLShader_ShaderTypeBit=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__\n pass", "def init_shader(self):\r\n self.attrib_locs = {\r\n \"mc_vertex\": -1,\r\n \"vert_tex_coord\": -1,\r\n }\r\n self.uniform_locs = {\r\n \"model_matrix\": -1,\r\n \"view_matrix\": -1,\r\n \"proj_matrix\": -1,\r\n }\r\n vert_prog = self._compile_shader(ORTH_VERT_SOURCE, gl.GL_VERTEX_SHADER)\r\n frag_prog = self._compile_shader(\r\n ORTH_FRAG_SOURCE, gl.GL_FRAGMENT_SHADER)\r\n self.shader = gl.glCreateProgram()\r\n gl.glAttachShader(self.shader, vert_prog)\r\n gl.glAttachShader(self.shader, frag_prog)\r\n gl.glLinkProgram(self.shader)\r\n assert (gl.glGetProgramiv(self.shader, gl.GL_LINK_STATUS) ==\r\n gl.GL_TRUE), (\r\n \"Error: %s\" % (gl.glGetProgramInfoLog(self.shader)))\r\n\r\n self.attrib_locs = {\r\n name: gl.glGetAttribLocation(self.shader, name)\r\n for name in self.attrib_locs\r\n }\r\n self.uniform_locs = {\r\n name: gl.glGetUniformLocation(self.shader, name)\r\n for name in self.uniform_locs\r\n }\r\n\r\n # Load vertices for final ortho view\r\n self.vao = gl.glGenVertexArrays(1)\r\n gl.glBindVertexArray(self.vao)\r\n self.buffers['mc_vertex'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['mc_vertex'])\r\n\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(ORTH_VERTICES),\r\n ORTH_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['mc_vertex'], 4,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['mc_vertex'])\r\n\r\n self.buffers['vert_tex_coord'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['vert_tex_coord'])\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(TEXTURE_VERTICES),\r\n TEXTURE_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['vert_tex_coord'], 2,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['vert_tex_coord'])\r\n gl.glActiveTexture(gl.GL_TEXTURE0)", "def shaderPath(self):\n\t\treturn mfl.mayaFile( self._path + '/shaders.ma' )", "def rsShaderOverridesMenu(arg):\n\n global shaderOverrideMode\n global selectedShaderOverride\n global overrideShader\n\n sel = getListSelection()\n\n if shaderOverrideMode is False:\n return\n\n for s in sel:\n shaderName = shaderUtility.customStringToShaderName(s)\n if shaderUtility.isActive(s):\n c = rsUtility.collection(shaderName.replace(':', '_'),\n isQuery=True)\n\n if not c.hasChildren():\n continue\n\n for child in c.getChildren():\n if child.typeName() == 'collection' \\\n and '{0}{1}'.format(shaderName.replace(':', '_'),\n COLLECTION_SUFFIX) in child.name():\n\n for o in child.getOverrides():\n if o.typeName() != 'shaderOverride':\n continue\n\n newShader = \\\n shaderUtility.duplicateShader(shaderName,\n choice=arg, isOverride=True)\n o.setSource(newShader + '.outColor')\n\n overrideShader = newShader\n selectedShaderOverride = arg\n\n selectedShaderOverride = arg", "def compile(self, mode, shader):\n holder = self.holderDepend( mode.cache.holder(self,None) )\n # TODO: depend on shader.material as well...\n # TODO: the compiled shader needs to depend on *everything* \n # down the set of objects...\n program = glCreateProgram()\n holder.data = program\n subShaders = []\n for shader in self.shaders:\n # TODO: cache links...\n subShader = shader.compile()\n if subShader:\n glAttachShader(program, subShader )\n subShaders.append( subShader )\n elif shader.source:\n log.warn( 'Failure compiling: %s %s', shader.compileLog, shader.url or shader.source )\n if len(subShaders) == len(self.shaders):\n glLinkProgram(program)\n glUseProgram( program )\n # TODO: retrieve maximum texture count and restrict to that...\n i = 0\n for texture in self.textures:\n if texture.bind( self, mode, i ):\n i += 1\n \n glValidateProgram( program )\n validation = glGetProgramiv( program, GL_VALIDATE_STATUS )\n if validation == GL_FALSE:\n self.compileLog += \"\"\"Validation failure (%s): %s\"\"\"%(\n validation,\n glGetProgramInfoLog( program ),\n )\n program = False \n else:\n link_status = glGetProgramiv( program, GL_LINK_STATUS )\n if link_status == GL_FALSE:\n self.compileLog += \"\"\"Link failure (%s): %s\"\"\"%(\n link_status,\n glGetProgramInfoLog( program ),\n )\n program = False\n for subShader in subShaders:\n glDeleteShader( subShader )\n holder.data = program\n return program\n else:\n log.debug( 'Not done loading shader source yet' )\n holder.data = 0\n return None", "def addShaderFromSourceCode(self, Union, QOpenGLShader_ShaderType=None, QOpenGLShader_ShaderTypeBit=None, *args, **kwargs): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def applyShader(name, obj, color=(.5,.5,.5), sType='lambert', sSet='__none__'):\n ##print 'evaluating'\n if sSet=='__none__':\n sSet=name+'SG'\n ##print 'no SG set given'\n\n if pm.objExists(name)==0 and pm.objExists(sSet)==0:\n ##print 'creating shader'\n myShader=pm.shadingNode(sType, asShader=1, name=name)\n pm.sets(n=sSet, renderable=1, empty=1, noSurfaceShader=1)\n if sType=='surfaceShader':\n myAt='.outColor'\n else:\n myAt='.color'\n pm.connectAttr(myShader+myAt, sSet+'.surfaceShader')\n pm.setAttr(myShader+myAt, color)\n pm.sets(sSet, fe=obj)\n return name", "def run():\n\n def assignToon(context):\n def instanciate_group(nodes, group_name):\n group = nodes.new(type='ShaderNodeGroup')\n group.node_tree = bpy.data.node_groups[group_name]\n return group\n\n def assignToonShader(material):\n '''To do Handle if the material output doesnt exist'''\n toonShader = instanciate_group(material.node_tree.nodes, \"ToonShader_2\")\n node2 = material.node_tree.nodes['Material Output']\n material.node_tree.links.new(toonShader.outputs[0], node2.inputs[0])\n\n objects = bpy.context.selected_objects\n for obj in objects:\n if len(obj.material_slots) < 1:\n\n bpy.ops.object.material_slot_add()\n\n if obj.name not in bpy.data.materials:\n\n mat = bpy.data.materials.new(obj.name)\n else:\n mat = bpy.data.materials[obj.name]\n\n obj.data.materials[0] = mat\n mat.use_nodes = True\n\n for mat in obj.data.materials:\n if mat.name == '':\n mat.name = obj.name\n\n matNodes = mat.node_tree.nodes\n\n assignToonShader(mat)\n if 'Principled BSDF' in matNodes:\n matNodes.remove(matNodes['Principled BSDF'])\n # else:\n # for n in matNodes:\n # if n != material.node_tree.nodes['Material Output']:\n # matNodes.remove(n)\n\n\n shaderPath = r'D:/COMPANIES/loneCoconut/render/MILVIO_CGL/assets/lib/TOONSCEENSETUP/shd/publish/001.000/high/lib_TOONSCEENSETUP_shd.blend'\n collection_name = 'ToonSceneSetup'\n # dict_ = {'company': 'loneCoconut',\n # 'context': 'render',\n # 'project': 'MILVIO',\n # 'scope': 'assets',\n # 'seq': 'lib',\n # 'shot': 'TOONSCEENSETUP',\n # 'task': 'shd',\n # 'user': 'publish',\n # 'resolution': 'high'}\n # shaderPath = lm.LumberObject(dict_)\n # print(shaderPath.latest_version().path_root)\n #\n # collection_name = shaderPath.shot\n\n if collection_name not in bpy.data.collections:\n\n # link all collections starting with 'MyCollection'\n with bpy.data.libraries.load(shaderPath, link=False) as (data_from, data_to):\n data_to.collections = [c for c in data_from.collections if c.startswith(collection_name)]\n\n # link collection to scene collection\n for coll in data_to.collections:\n if coll is not None:\n bpy.data.scenes['Scene'].collection.children.link(coll)\n\n else:\n print(\"Toon Shader Exist\")\n\n\n assignToon(bpy.context)", "def compileShaders(self):\n raise NotImplementedError('compileShaders must be implemented by '\n '{} subclasses'.format(type(self).__name__))", "def addShader(self, QOpenGLShader): # real signature unknown; restored from __doc__\n return False", "def addCacheableShaderFromSourceCode(self, Union, QOpenGLShader_ShaderType=None, QOpenGLShader_ShaderTypeBit=None, *args, **kwargs): # real signature unknown; restored from __doc__ with multiple overloads\n pass", "def use(self):\r\n opengles.glUseProgram(self.program)", "def getShaderOverrideMode(shaderName):\n\n c = rsUtility.collection(shaderName.replace(':', '_'), isQuery=True)\n if c.hasChildren() is False:\n return False\n\n for child in c.getChildren():\n if child.typeName() == 'collection' \\\n and '{0}{1}'.format(shaderName.replace(':', '_'),\n COLLECTION_SUFFIX) in child.name():\n for o in child.getOverrides():\n if o.typeName() == 'shaderOverride':\n\n # Getting shader name via connections\n\n cnxs = cmds.listConnections(o.name())\n\n # Filter collections and 'msg'\n\n arr = [cnx for cnx in cnxs if '_collection'\n not in cnx and '_msg' not in cnx]\n if arr == []:\n return False\n\n for item in arr:\n shaderName = shaderUtility.stripSuffix(item)\n overrideShader = item\n\n mode = shaderUtility.getMode(overrideShader)\n if mode:\n return mode\n else:\n return False", "def needShader(self):\n return (self.threedee or\n (self.draw2DOutlineEnabled() and\n self.opts.vertexData is not None))", "def loadShader(shaderpath, shadername, vertexFormatList=None, fragmentFormatlist=None):\n fragment = Shader(shaderpath + shadername + \".fsh\", FRAGMENT, True, fragmentFormatlist)\n vertex = Shader(shaderpath + shadername + \".vsh\", VERTEX, True, vertexFormatList)\n return ShaderProgram(vertex, fragment, True)", "def __init__(self, shape, pts, texcoords, faces, normals=None, smooth=True):\r\n super(Buffer, self).__init__()\r\n\r\n # Uniform variables all in one array!\r\n self.unib = (c_float * 12)(0.0, 0.0, 0.0,\r\n 0.5, 0.5, 0.5,\r\n 1.0, 1.0, 0.0,\r\n 0.0, 0.0, 0.0)\r\n \"\"\" pass to shader array of vec3 uniform variables:\r\n\r\n ===== ============================ ==== ==\r\n vec3 description python\r\n ----- ---------------------------- -------\r\n index from to\r\n ===== ============================ ==== ==\r\n 0 ntile, shiny, blend 0 2\r\n 1 material 3 5\r\n 2 umult, vmult, point_size 6 8\r\n 3 u_off, v_off (only 2 used) 9 10\r\n ===== ============================ ==== ==\r\n \"\"\"\r\n #self.shape = shape\r\n self.textures = []\r\n pts = np.array(pts, dtype=float)\r\n texcoords = np.array(texcoords, dtype=float)\r\n faces = np.array(faces)\r\n\r\n if normals == None: #i.e. normals will only be generated if explictly None\r\n LOGGER.debug('Calculating normals ...')\r\n\r\n normals = np.zeros(pts.shape, dtype=float) #empty array rights size\r\n\r\n fv = pts[faces] #expand faces with x,y,z values for each vertex\r\n #cross product of two edges of triangles\r\n fn = np.cross(fv[:][:][:,1] - fv[:][:][:,0], fv[:][:][:,2] - fv[:][:][:,0])\r\n fn = Utility.normalize_v3(fn)\r\n normals[faces[:,0]] += fn #add up all normal vectors for a vertex\r\n normals[faces[:,1]] += fn\r\n normals[faces[:,2]] += fn\r\n normals = Utility.normalize_v3(normals)\r\n else:\r\n normals = np.array(normals)\r\n \r\n # keep a copy for speeding up the collision testing of ElevationMap\r\n self.vertices = pts\r\n self.normals = normals\r\n self.tex_coords = texcoords\r\n self.indices = faces\r\n self.material = (0.5, 0.5, 0.5, 1.0)\r\n\r\n # Pack points,normals and texcoords into tuples and convert to ctype floats.\r\n n_verts = len(pts)\r\n if len(texcoords) != n_verts:\r\n if len(normals) != n_verts:\r\n self.N_BYTES = 12 # only use pts\r\n self.array_buffer = c_floats(pts.reshape(-1).tolist())\r\n else:\r\n self.N_BYTES = 24 # use pts and normals\r\n self.array_buffer = c_floats(np.concatenate((pts, normals),\r\n axis=1).reshape(-1).tolist())\r\n else:\r\n self.N_BYTES = 32 # use all three NB doesn't check that normals are there\r\n self.array_buffer = c_floats(np.concatenate((pts, normals, texcoords),\r\n axis=1).reshape(-1).tolist())\r\n\r\n self.ntris = len(faces)\r\n self.element_array_buffer = c_shorts(faces.reshape(-1))\r\n from pi3d.Display import Display\r\n self.disp = Display.INSTANCE # rely on there always being one!\r", "def compile(self):\n if not self.isCompiled():\n if self.file is not None:\n try:\n if self.tipo == VERTEX:\n self.shader = glCreateShader(GL_VERTEX_SHADER)\n else:\n self.shader = glCreateShader(GL_FRAGMENT_SHADER)\n glShaderSource(self.shader, self.file)\n glCompileShader(self.shader)\n self.compiled = True\n except:\n raise Exception(\"error al compilar el shader\")\n else:\n raise Exception(\"no se ha cargado un archivo\")\n else:\n print \"Error :: el shader ya ha sido compilado\"", "def _getActiveShaderRefs(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return getShaderNodes(self)", "def render( self, shader, mode ):\n location = shader.getLocation( mode, self.name, uniform=False )\n if location is not None and location != -1:\n vbo = self.buffer.bind( mode )\n glVertexAttribPointer( \n location, self.size, GL_FLOAT, False, self.stride, \n vbo+self.offset\n )\n glEnableVertexAttribArray( location )\n return (vbo,location)\n return None", "def _getShaderRefs(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return getShaderNodes(self)", "def transfer_shaders(source, target):\n if isinstance(source, pm.nt.Transform):\n source_shape = source.getShape()\n else:\n source_shape = source\n\n if isinstance(target, pm.nt.Transform):\n target_shape = target.getShape()\n else:\n target_shape = target\n\n # get the shadingEngines\n shading_engines = source_shape.outputs(type=pm.nt.ShadingEngine)\n\n data_storage = []\n\n # get the assigned faces\n for shading_engine in shading_engines:\n faces = pm.sets(shading_engine, q=1)\n for faceGroup in faces:\n str_face = str(faceGroup)\n # replace the objectName\n new_face = \\\n str_face.replace(source_shape.name(), target_shape.name())\n data_storage.append((shading_engine.name(), new_face))\n\n for data in data_storage:\n shading_engine = data[0]\n new_face = data[1]\n pm.select(new_face)\n # now assign the newFaces to the set\n pm.sets(shading_engine, fe=1)", "def __str__(self):\n if self.fshader is None:\n f = \"not defined\"\n else:\n f = self.fshader.getPath()\n if self.vshader is None:\n v = \"not defined\"\n else:\n v = self.vshader.getPath()\n if self.enabled:\n e = \"enabled\"\n else:\n e = \"disabled\"\n if self.isCompiled():\n c = \"compiled | {0}\".format(e)\n else:\n c = \"not compiled | {0}\".format(e)\n return \"shader: {3}\\nfragment shader: {0}\\nvertex shader: {1}\\nstatus: {2}\".format(f, v, c, self.getName())", "def __init__(self):\r\n super(Defocus, self).__init__(\"defocus\")\r\n # load blur shader\r\n self.shader = Shader(\"defocus\")", "def render( self, shader, mode, index ):\n location = shader.getLocation( mode, self.name, uniform=True )\n if location is not None and location != -1:\n value = self.currentValue( shader, mode )\n if value:\n self.baseFunction( location, index )\n glActiveTexture( GL_TEXTURE0 + index )\n value.render( mode.visible, mode.lighting, mode )\n return True \n return False", "def surfaceShaderList(*args, add: name=None, remove: name=None, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[None, Any]:\n pass", "def __prepare_shaders(self, rotation_matrix=None, light_matrix=None,\n depth=True):\n self.__sh.add_attribute(0, self.__mean_face, 'mean_position')\n self.__sh.bind_buffer()\n\n self.__sh.use_shaders()\n\n self.__sh.bind_uniform_matrix(light_matrix.dot(rotation_matrix),\n 'light_matrix')\n if not depth:\n self.__sh.bind_uniform_matrix(rotation_matrix, 'rotation_matrix')\n self.__sh.bind_uniform_vector(self.__face.light_cartesian,\n 'light_vector')\n coefficients_amount = len(self.__face.coefficients)\n indices = -ones(199, dtype='i')\n indices[:coefficients_amount] = array(range(coefficients_amount))\n self.__sh.bind_uniform_ints(indices, 'indices')\n\n coefficients = zeros(199, dtype='f')\n coefficients[:coefficients_amount] = self.__face.coefficients\n self.__sh.bind_uniform_floats(coefficients, 'coefficients')\n\n glActiveTexture(GL_TEXTURE0)\n self.__sh.bind_texture(0)\n if not depth:\n glActiveTexture(GL_TEXTURE1)\n self.__sh.bind_texture(1)", "def setShaderOverrideMode(query=False):\n\n global shaderOverrideMode\n global selectedShaderOverride\n global overrideShader\n\n sel = getListSelection()\n q.getQItem('%s_shaderOverrideLabel' % windowID, QtWidgets.QLabel)\n shaderOverrideMode = False\n overrideShader = None\n\n if sel == []:\n\n # Mode is false\n\n shaderOverrideMode = False\n selectedShaderOverride = None\n overrideShader = None\n q.widget.setStyleSheet('QLabel {color: rgb(200,200,200)}')\n q.widget.setText('Apply Shader Overrides')\n return False\n\n # Return false if any of the selected is inactive.\n\n for s in sel:\n shaderName = shaderUtility.customStringToShaderName(s)\n\n if shaderUtility.isActive(s) is False:\n shaderOverrideMode = False\n selectedShaderOverride = None\n overrideShader = None\n q.widget.setStyleSheet('QLabel {color: rgb(200,200,200); font-weight: normal;}'\n )\n q.widget.setText('Apply Shader Overrides')\n return False\n\n mode = getShaderOverrideMode(shaderName)\n if mode is None:\n\n # Doesn't have a shader override\n\n shaderOverrideMode = False\n selectedShaderOverride = None\n overrideShader = None\n q.widget.setStyleSheet('QLabel {color: rgb(105,105,105); font-weight: normal;}'\n )\n q.widget.setText('No shader override in the collection to change'\n )\n return False\n\n for s in sel:\n shaderName = shaderUtility.customStringToShaderName(s)\n mode = getShaderOverrideMode(shaderName)\n\n if mode:\n shaderOverrideMode = True\n selectedShaderOverride = mode['ui']\n selectOptionMenuItem('%s_optionMenu02' % windowID,\n selectedShaderOverride)\n\n if len(sel) == 1:\n q.widget.setStyleSheet('QLabel {color: rgb(200,200,200); font-weight: bold;}'\n )\n q.widget.setText('Swap shader override:')\n if len(sel) > 1:\n q.widget.setStyleSheet('QLabel {color: rgb(200,200,200); font-weight: bold;}'\n )\n q.widget.setText('Swap shader override (multiple):')\n return True\n if mode is False:\n\n # Doesn't have a shader override\n\n shaderOverrideMode = False\n selectedShaderOverride = None\n overrideShader = None\n q.widget.setStyleSheet('QLabel {color: rgb(105,105,105); font-weight: normal;}'\n )\n q.widget.setText('No shader override in the collection to change'\n )\n return False\n break", "def layeredShaderPort(*args, annotation: Union[AnyStr, bool]=\"\", backgroundColor:\n Union[List[float, float, float], bool]=None, defineTemplate: AnyStr=\"\",\n docTag: Union[AnyStr, bool]=\"\", dragCallback: Script=None, dropCallback:\n Script=None, enable: bool=True, enableBackground: bool=True,\n enableKeyboardFocus: bool=True, exists: bool=True, fullPathName:\n bool=True, height: Union[int, bool]=0, highlightColor: Union[List[float,\n float, float], bool]=None, isObscured: bool=True, manage: bool=True,\n noBackground: bool=True, node: name=None, numberOfPopupMenus: bool=True,\n parent: Union[AnyStr, bool]=\"\", popupMenuArray: bool=True,\n preventOverride: bool=True, selectedColorControl: AnyStr=\"\",\n selectedTransparencyControl: AnyStr=\"\", statusBarMessage: AnyStr=\"\",\n useTemplate: AnyStr=\"\", visible: bool=True, visibleChangeCommand:\n Union[Script, bool]=None, width: Union[int, bool]=0, q=True, query=True,\n e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def importShaders(self):\n\t\tif self.shaderPath.exists:\n\t\t\tself.shaderPath.imp()", "def render( self, shader, mode, location=None ):\n if location is None:\n location = self.location( shader, mode )\n if location is not None and location != -1:\n value = self.currentValue( shader, mode )\n shape = value.shape \n shape_length = len(self.shape)\n if shape[-shape_length:] != self.shape:\n # uninitialized at the Python level, do not set...\n return None\n if shape[:-shape_length]:\n size = reduce( operator.mul, shape[:-shape_length] )\n else:\n size = 1\n if self.NEED_TRANSPOSE is not None:\n return self.baseFunction( location, size, self.NEED_TRANSPOSE, value )\n else:\n return self.baseFunction( location, size, value )\n return None", "def set_shader(self, shader):\r\n\r\n self.shader = shader\r\n for b in self.buf:\r\n b.shader = shader", "def init_shaders():\n global shaders\n\n vertex_shader = glCreateShader(GL_VERTEX_SHADER)\n glShaderSource(vertex_shader,open('shaders/vs-phong-interp.c','r').read())\n glCompileShader(vertex_shader)\n result = glGetShaderiv(vertex_shader, GL_COMPILE_STATUS)\n if result:\n print('Vertex shader compilation successful.')\n else:\n print('Vertex shader compilation FAILED:')\n print(glGetShaderInfoLog(vertex_shader))\n sys.exit(-1)\n\n fragment_shader = glCreateShader(GL_FRAGMENT_SHADER)\n glShaderSource(fragment_shader, open('shaders/fs-phong-interp.c','r').read())\n glCompileShader(fragment_shader)\n result = glGetShaderiv(fragment_shader, GL_COMPILE_STATUS)\n if result:\n print('Fragment shader compilation successful.')\n else:\n print('Fragment shader compilation FAILED:')\n print(glGetShaderInfoLog(fragment_shader))\n sys.exit(-1)\n\n shaders = glCreateProgram()\n glAttachShader(shaders,vertex_shader)\n glAttachShader(shaders,fragment_shader)\n glLinkProgram(shaders)", "def shadingNode(*args, asLight: bool=True, asPostProcess: bool=True, asRendering: bool=True,\n asShader: bool=True, asTexture: bool=True, asUtility: bool=True,\n isColorManaged: bool=True, name: AnyStr=\"\", parent: AnyStr=\"\", shared:\n bool=True, skipSelect: bool=True, **kwargs)->AnyStr:\n pass", "def compile_vertex_shader(source):\n vertex_shader = gl.glCreateShader(gl.GL_VERTEX_SHADER)\n gl.glShaderSource(vertex_shader, source)\n gl.glCompileShader(vertex_shader)\n # check compilation error\n result = gl.glGetShaderiv(vertex_shader, gl.GL_COMPILE_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetShaderInfoLog(vertex_shader))\n return vertex_shader", "def compile_vertex_shader(source):\n vertex_shader = gl.glCreateShader(gl.GL_VERTEX_SHADER)\n gl.glShaderSource(vertex_shader, source)\n gl.glCompileShader(vertex_shader)\n # check compilation error\n result = gl.glGetShaderiv(vertex_shader, gl.GL_COMPILE_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetShaderInfoLog(vertex_shader))\n return vertex_shader", "def calculateLighting(x,y,z, xnormal, ynormal, znormal):\n dummy = 0\n clr = dislin.getlit(x,y,z,xn,yn,zn,dummy)", "def _on_load_scene_shaders(self):\n\n artellapipe.ShadersMgr().load_scene_shaders()", "def compile_fragment_shader(source):\n fragment_shader = gl.glCreateShader(gl.GL_FRAGMENT_SHADER)\n gl.glShaderSource(fragment_shader, source)\n gl.glCompileShader(fragment_shader)\n # check compilation error\n result = gl.glGetShaderiv(fragment_shader, gl.GL_COMPILE_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetShaderInfoLog(fragment_shader))\n return fragment_shader", "def uniform(self, name):\n return glGetUniformLocation(self.program(), name.encode('utf_8'))", "def output_vertex_filter():\n return VertexFilterVecNorm()", "def get_shaders(self, nodes):\n shaders = []\n # Fill the assigned shader list\n for node in nodes:\n shader = mc.listConnections(\"{0}.instObjGroups[0]\".format(node))\n if shader is not None:\n shaders.append(shader)\n else:\n shaders.append([])\n return shaders", "def __init__(self, vertex_source, fragment_source):\n self.glid = None\n vert = self._compile_shader(vertex_source, GL.GL_VERTEX_SHADER)\n frag = self._compile_shader(fragment_source, GL.GL_FRAGMENT_SHADER)\n if vert and frag:\n self.glid = GL.glCreateProgram() # pylint: disable=E1111\n GL.glAttachShader(self.glid, vert)\n GL.glAttachShader(self.glid, frag)\n GL.glLinkProgram(self.glid)\n GL.glDeleteShader(vert)\n GL.glDeleteShader(frag)\n status = GL.glGetProgramiv(self.glid, GL.GL_LINK_STATUS)\n if not status:\n #print(GL.glGetProgramInfoLog(self.glid).decode('ascii'))\n GL.glDeleteProgram(self.glid)\n self.glid = None", "def __init__(self, shader_program):\n self.tessellate(20)\n\n self.k_ambient = np.array([0.3, 0.3, 0.21], dtype=np.float32)\n self.k_diffuse = np.array([0.4, 0.5, 0.35], dtype=np.float32)\n self.k_specular = np.array([0.3, 0.3, 0.3], dtype=np.float32)\n self.shininess = 7.0\n\n self.set_buffers(shader_program)", "def printShader(self):\n print self.file", "def __init__(self, vertex_source, fragment_source):\n self.glid = None\n vert = self._compile_shader(vertex_source, GL.GL_VERTEX_SHADER)\n frag = self._compile_shader(fragment_source, GL.GL_FRAGMENT_SHADER)\n if vert and frag:\n self.glid = GL.glCreateProgram() # pylint: disable=E1111\n GL.glAttachShader(self.glid, vert)\n GL.glAttachShader(self.glid, frag)\n GL.glLinkProgram(self.glid)\n GL.glDeleteShader(vert)\n GL.glDeleteShader(frag)\n status = GL.glGetProgramiv(self.glid, GL.GL_LINK_STATUS)\n if not status:\n print(GL.glGetProgramInfoLog(self.glid).decode('ascii'))\n GL.glDeleteProgram(self.glid)\n self.glid = None", "def _build_shaders(self, program):\n\n # Check if we have at least something to attach\n if not self._verts:\n raise ValueError(\"No vertex shader has been given\")\n if not self._frags:\n raise ValueError(\"No fragment shader has been given\")\n\n log.debug(\"GPU: Attaching shaders to program\")\n\n # Attach shaders\n attached = gl.glGetAttachedShaders(program)\n shaders = self._verts + self._frags + self._geoms\n for shader in shaders: #self._verts:\n if shader.need_update:\n if shader.handle in attached:\n gl.glDetachShader(program, handle)\n shader.activate()\n if isinstance(shader, GeometryShader):\n if shader.vertices_out is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_VERTICES_OUT_EXT,\n shader.vertices_out)\n if shader.input_type is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_INPUT_TYPE_EXT,\n shader.input_type)\n if shader.output_type is not None:\n gl.glProgramParameteriEXT(self._handle,\n gl.GL_GEOMETRY_OUTPUT_TYPE_EXT,\n shader.output_type)\n gl.glAttachShader(program, shader.handle)\n shader._program = self", "def render( self, shader, mode, index ):\n location = shader.getLocation( mode, self.name, uniform=True )\n if location is not None and location != -1:\n value = self.currentValue( shader, mode )\n if value:\n self.baseFunction( location, index )\n glActiveTexture( GL_TEXTURE0 + index )\n glBindTexture( GL_TEXTURE_BUFFER, self.texture( mode ) )\n vbo = value.vbo(mode)\n vbo.bind()\n try:\n glTexBuffer( GL_TEXTURE_BUFFER, self.get_format(), int(vbo) )\n finally:\n vbo.unbind()\n return True \n return False", "def __init__(self, vertex_source, fragment_source):\n self.glid = None\n vert = self._compile_shader(vertex_source, GL.GL_VERTEX_SHADER)\n frag = self._compile_shader(fragment_source, GL.GL_FRAGMENT_SHADER)\n if vert and frag:\n self.glid = GL.glCreateProgram() # pylint: disable=E1111\n GL.glAttachShader(self.glid, vert)\n GL.glAttachShader(self.glid, frag)\n GL.glLinkProgram(self.glid)\n GL.glDeleteShader(vert)\n GL.glDeleteShader(frag)\n status = GL.glGetProgramiv(self.glid, GL.GL_LINK_STATUS)\n if not status:\n print(GL.glGetProgramInfoLog(self.glid).decode('ascii'))\n sys.exit(1)", "def getVariable( self, name ):\n for uniform in self.uniforms:\n if uniform.name == name:\n return uniform \n return None", "def compileShaders(self):\n if self.flatShader is not None: self.flatShader.destroy()\n if self.dataShader is not None: self.dataShader.destroy()\n\n self.activeShader = None\n\n fslgl.glmesh_funcs.compileShaders(self)", "def _load_opengl(self):\r\n pass", "def getConnectedShaders(self):\n self.logger.debug(\"Connected Shaders\")\n\n connected = []\n for connections in pm.listConnections(self.data['shapeNode'], plugs=True, connections=True):\n if cmds.getClassification(connections[-1].nodeType(), satisfies=\"shader\"):\n self.logger.debug(\"Connected shader : %s\" % connections[-1].node())\n connected.append(connections[-1].node())\n return connected", "def bs_getShaders(obj):\n pm.select(obj)\n pm.windows.hyperShade(shaderNetworksSelectMaterialNodes=True)\n return pm.ls(sl=True) # Returns all shaders associated with the object (shape, face etc)", "def _getBindParams(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return list()", "def __init__(self, blend_src, blend_dest, program, parent=None):\n super().__init__(parent=parent)\n self.program = program\n self.blend_src = blend_src\n self.blend_dest = blend_dest", "def late_gradient_fusion():\n pass", "def get_loss_fn():\n return reconstruction", "def polyColorMod(*args, alphaScale_FloatValue: Union[float, bool]=0.0, alphaScale_Interp:\n Union[int, bool]=0, alphaScale_Position: Union[float, bool]=0.0,\n baseColorName: AnyStr=\"\", blueScale_FloatValue: Union[float, bool]=0.0,\n blueScale_Interp: Union[int, bool]=0, blueScale_Position: Union[float,\n bool]=0.0, caching: bool=True, constructionHistory: bool=True,\n greenScale_FloatValue: Union[float, bool]=0.0, greenScale_Interp: Union[int,\n bool]=0, greenScale_Position: Union[float, bool]=0.0, huev: Union[float,\n bool]=0.0, intensityScale_FloatValue: Union[float, bool]=0.0,\n intensityScale_Interp: Union[int, bool]=0, intensityScale_Position:\n Union[float, bool]=0.0, name: AnyStr=\"\", nodeState: Union[int, bool]=0,\n redScale_FloatValue: Union[float, bool]=0.0, redScale_Interp: Union[int,\n bool]=0, redScale_Position: Union[float, bool]=0.0, satv: Union[float,\n bool]=1.0, value: Union[float, bool]=1.0, q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def as_es2_command(command):\n\n if command[0] == 'FUNC':\n return (command[0], re.sub(r'^gl([A-Z])',\n lambda m: m.group(1).lower(), command[1])) + command[2:]\n if command[0] == 'SHADERS':\n return command[:2] + convert_shaders('es2', command[2:])\n if command[0] == 'UNIFORM':\n return command[:-1] + (command[-1].tolist(),)\n return command", "def compile( self, mode=None ):\n # This code is not OpenGL 3.1 compatible\n if self.pos.any():\n dl = displaylist.DisplayList()\n #XXX should do sanity checks here...\n dl.start()\n try:\n pos = self.pos\n color = self.color\n colorLen = len(color)\n killThickness = 0\n if self.radius:\n glLineWidth( self.radius*2 )\n killThickness = 1\n try:\n \n glEnable( GL_COLOR_MATERIAL )\n try:\n glBegin( GL_LINE_STRIP )\n try:\n lastColor = None\n for index in range(len(pos)):\n point = pos[index]\n if index < colorLen:\n col = tuple(color[index])\n if col != lastColor:\n glColor3dv( col )\n lastColor = col\n glVertex3dv(point)\n finally:\n glEnd()\n finally:\n glDisable( GL_COLOR_MATERIAL )\n finally:\n if killThickness:\n glLineWidth( 1 )\n finally:\n dl.end()\n holder = mode.cache.holder(self, dl)\n for field in protofunctions.getFields( self ):\n # change to any field requires a recompile\n holder.depend( self, field )\n return dl\n return None", "def _getBindInputs(self):\n warnings.warn(\"This function is deprecated; shader references have been replaced with shader nodes in 1.38.\", DeprecationWarning, stacklevel = 2)\n return self.getInputs()", "def AddDispersionMaterial(GeometryName,RGBData):\n\n r,g,b=RGBData\n onlyR = tuple([r,0,0,1])\n onlyG = tuple([0,g,0,1])\n onlyB = tuple([0,0,b,1])\n\n\n currentMaterial = bpy.data.materials.new(name='TypeA'+GeometryName)\n currentMaterial.use_nodes = True\n nodes = currentMaterial.node_tree.nodes\n\n math01 = nodes.new(\"ShaderNodeMath\")\n math01.operation = \"POWER\"\n\n glassBSDF01 = nodes.new(\"ShaderNodeBsdfGlass\")\n glassBSDF01.inputs[0].default_value = onlyR\n currentMaterial.node_tree.links.new(math01.outputs[0],glassBSDF01.inputs[1])\n\n glassBSDF02 = nodes.new(\"ShaderNodeBsdfGlass\")\n glassBSDF02.inputs[0].default_value = onlyG\n currentMaterial.node_tree.links.new(math01.outputs[0],glassBSDF02.inputs[1])\n\n glassBSDF03 = nodes.new(\"ShaderNodeBsdfGlass\")\n glassBSDF03.inputs[0].default_value = onlyB\n currentMaterial.node_tree.links.new(math01.outputs[0],glassBSDF03.inputs[1])\n\n math02 = nodes.new(\"ShaderNodeMath\")\n currentMaterial.node_tree.links.new(math02.outputs[0],glassBSDF02.inputs[2])\n\n math03 = nodes.new(\"ShaderNodeMath\")\n currentMaterial.node_tree.links.new(math02.outputs[0],math03.inputs[1])\n currentMaterial.node_tree.links.new(math03.outputs[0],glassBSDF01.inputs[2])\n\n addShader01 = nodes.new(\"ShaderNodeAddShader\")\n currentMaterial.node_tree.links.new(glassBSDF01.outputs[0],addShader01.inputs[0])\n currentMaterial.node_tree.links.new(glassBSDF02.outputs[0],addShader01.inputs[1])\n\n addShader02 = nodes.new(\"ShaderNodeAddShader\")\n currentMaterial.node_tree.links.new(addShader01.outputs[0],addShader02.inputs[0])\n currentMaterial.node_tree.links.new(glassBSDF03.outputs[0],addShader02.inputs[1])\n\n volumeAbs = nodes.new(\"ShaderNodeVolumeAbsorption\")\n\n materialOutput=nodes.get(\"Material Output\")\n currentMaterial.node_tree.links.new(addShader02.outputs[0],materialOutput.inputs[0])\n currentMaterial.node_tree.links.new(volumeAbs.outputs[0],materialOutput.inputs[1])\n\n bpy.data.objects[GeometryName].data.materials.append(currentMaterial)", "def _create_base_hdri(self,last_element,node_offset=[0,0]):\n\n # store hdri list\n self._hdri_dict_list = self._cfg[\"HDRIList\"]\n\n # attach last output to Background Shader node #################################################################\n self._semantic_switching_node = self._world_node_tree.node_tree.nodes.new('ShaderNodeMixRGB')\n self._semantic_switching_node.location = (node_offset[0]-400,node_offset[1]-400)\n self._semantic_switching_node.inputs[0].default_value = 0.0\n _current_last_output = self._semantic_switching_node.outputs[0]\n\n # mix rgb to mix last_element with sky #########################################################################\n if last_element is not None:\n _mix_node = self._world_node_tree.node_tree.nodes.new('ShaderNodeMixRGB')\n _mix_node.location = (node_offset[0],node_offset[1])\n\n self._world_node_tree.inputs[0].default_value = 1.0\n self._world_node_tree.blend_type = 'OVERLAY'\n self._world_node_tree.node_tree.links.new(_mix_node.inputs[1],last_element)\n\n self._world_node_tree.node_tree.links.new(_mix_node.inputs[2],self._semantic_switching_node.outputs[0])\n _current_last_output = _mix_node.outputs[0]\n ################################################################## end of mix rgb to mix last_element with sky #\n ########################################################## end of attach last output to Background Shader node #\n \n\n # return new last element\n return _current_last_output", "def polyBlendColor(*args, baseColorName: Union[AnyStr, bool]=\"\", blendFunc: Union[int, bool]=0,\n blendWeightA: Union[float, bool]=0.0, blendWeightB: Union[float, bool]=0.0,\n blendWeightC: Union[float, bool]=0.0, blendWeightD: Union[float, bool]=0.0,\n caching: bool=True, constructionHistory: bool=True, dstColorName:\n Union[AnyStr, bool]=\"\", name: AnyStr=\"\", nodeState: Union[int, bool]=0,\n srcColorName: Union[AnyStr, bool]=\"\", q=True, query=True, e=True, edit=True,\n **kwargs)->Union[AnyStr, Any]:\n pass", "def recompile(self):\n\n self.vaos = []\n try:\n self.program, uniforms = self.build_prog(self.gl)\n self.u_time, self.u_width, self.u_height = uniforms\n vao = GLUtil.screen_vao(self.gl, self.program)\n self.vaos.append(vao)\n\n self.compute, uniforms, buffers = self.build_cs(self.gl)\n self.u_cstime, self.u_cswidth, self.u_csheight = uniforms\n self.buf_in, self.buf_out = buffers\n\n self.set_gpu_wh(width, height)\n\n self.gx, self.gy = int(width / 8), int(height / 8)\n self.set_gpu_time()\n\n log(\"[Renderer] shader recompiled.\")\n\n except Exception as e:\n log(e)", "def add_vertex_main(self, *args, **kwargs):\n kwargs['shader'] = 'vertex'\n self.add_main(*args, **kwargs)", "def draw(self, shape):\n shape.draw(shader=self.shader)", "def glGetUniformLocationARB( baseOperation, program, name ):\n if not name:\n raise ValueError( \"\"\"Non-null name required\"\"\" )\n name = as_8_bit( name )\n if name[-1] != _NULL_8_BYTE:\n name = name + _NULL_8_BYTE\n return baseOperation( program, name )", "def _get_shader_type(varinfo):\n if type(varinfo[\"ndim\"]) == int or type(varinfo[\"ndim\"]) == long:\n if varinfo[\"ndim\"] == 1:\n shader_type = varinfo[\"vartype\"]\n elif varinfo[\"ndim\"] >= 2:\n shader_type = \"vec%d\" % varinfo[\"ndim\"]\n if varinfo[\"vartype\"] != \"float\":\n shader_type = \"i\" + shader_type\n # matrix: (2,2) or (3,3) or (4,4)\n elif type(varinfo[\"ndim\"]) == tuple:\n shader_type = \"mat%d\" % varinfo[\"ndim\"][0]\n return shader_type", "def _addShaderMenuItems(ned, node):\n pass", "def diffuse_light(self):\n return self._diffuse_light", "def __init__(self, texture, texcoords, enabled=True):\n vfunc = Function(\"\"\"\n void pass_coords() {\n $v_texcoords = $texcoords;\n }\n \"\"\")\n ffunc = Function(\"\"\"\n void apply_texture() {\n if ($enabled == 1) {\n gl_FragColor *= texture2D($u_texture, $texcoords);\n }\n }\n \"\"\")\n self._texcoord_varying = Varying('v_texcoord', 'vec2')\n vfunc['v_texcoords'] = self._texcoord_varying\n ffunc['texcoords'] = self._texcoord_varying\n self._texcoords_buffer = VertexBuffer(\n np.zeros((0, 2), dtype=np.float32)\n )\n vfunc['texcoords'] = self._texcoords_buffer\n super().__init__(vcode=vfunc, vhook='pre', fcode=ffunc)\n\n self.enabled = enabled\n self.texture = texture\n self.texcoords = texcoords", "def __init__(self, texture, texcoords, enabled=True):\n vfunc = Function(\"\"\"\n void pass_coords() {\n $v_texcoords = $texcoords;\n }\n \"\"\")\n ffunc = Function(\"\"\"\n void apply_texture() {\n if ($enabled == 1) {\n gl_FragColor *= texture2D($u_texture, $texcoords);\n }\n }\n \"\"\")\n self._texcoord_varying = Varying('v_texcoord', 'vec2')\n vfunc['v_texcoords'] = self._texcoord_varying\n ffunc['texcoords'] = self._texcoord_varying\n self._texcoords_buffer = VertexBuffer(\n np.zeros((0, 2), dtype=np.float32)\n )\n vfunc['texcoords'] = self._texcoords_buffer\n super().__init__(vcode=vfunc, vhook='pre', fcode=ffunc)\n\n self.enabled = enabled\n self.texture = texture\n self.texcoords = texcoords", "def early_gradient_fusion():\n pass", "def render( self, mode, shader=None ):\n renderer = mode.cache.getData(self)\n if renderer is None:\n renderer = self.compile( mode, shader )\n if renderer is False:\n log.warn(\"\"\"%s\"\"\",\n self.compileLog,\n )\n if renderer not in (None,False):\n try:\n GL_shaders.glUseProgram( renderer )\n except error.GLError, err:\n log.error( '''Failure compiling: %s''', '\\n'.join([\n '%s: %s'%(shader.url or shader.source,shader.compileLog)\n for shader in self.shaders\n ]))\n raise\n else:\n for uniform in mode.uniforms:\n uniform.render( self, mode )\n for uniform in self.uniforms:\n uniform.render( self, mode )\n # TODO: retrieve maximum texture count and restrict to that...\n i = 0\n for texture in self.textures:\n if texture.render( self, mode, i ):\n i += 1\n else:\n log.warn( 'Renderer for %s was null: %s', self, self.compileLog )\n return True,True,True,renderer", "def link_shader_program(vertex_shader):\n program = gl.glCreateProgram()\n gl.glAttachShader(program, vertex_shader)\n gl.glLinkProgram(program)\n # check linking error\n result = gl.glGetProgramiv(program, gl.GL_LINK_STATUS)\n if not(result):\n raise RuntimeError(gl.glGetProgramInfoLog(program))\n return program", "def __init__(self, shader=\"post_base\", mipmap=False, add_tex=None,\r\n scale=1.0, camera=None, divide=1):\r\n super(PostProcess, self).__init__(\"postprocess\")\r\n self.scale = scale\r\n # load shader\r\n self.shader = Shader(shader)\r\n if camera == None:\r\n self.viewcam = Camera.instance() # in case this is prior to one being created\r\n else:\r\n self.viewcam = camera\r\n self.camera = Camera(is_3d=False)\r\n self.sprite = LodSprite(z=20.0, w=self.ix, h=self.iy, n=divide)\r\n self.sprite.set_2d_size(w=self.ix, h=self.iy)\r\n for b in self.sprite.buf:\r\n b.unib[6] = self.scale # ufact\r\n b.unib[7] = self.scale # vfact\r\n b.unib[9] = (1.0 - self.scale) * 0.5 # uoffset\r\n b.unib[10] = (1.0 - self.scale) * 0.5 # voffset\r\n self.alpha = False\r\n self.blend = True\r\n self.mipmap = mipmap\r\n self.tex_list = [self] # TODO check if this self reference causes graphics memory leaks\r\n if add_tex:\r\n self.tex_list.extend(add_tex)", "def instantiate_for_spirv_args(self, testcase):\n shader, self.filename = tempfile.mkstemp(\n dir=testcase.directory, suffix=self.suffix)\n shader_object = os.fdopen(shader, 'w')\n shader_object.write(self.source)\n shader_object.close()\n return self.filename", "def setFragmentShader(self, fragment):\n if isinstance(fragment, Shader):\n if fragment.getType() == FRAGMENT:\n self.fshader = fragment\n else:\n raise Exception(\"se esperaba un fragment shader, en cambio se paso un vertex shader\")\n else:\n raise Exception(\"el fragment shader debe ser del tipo Shader\")", "def _createShaderMenuItems(ned, node):\n pass", "def convert_shaders(convert, shaders):\n \n # New version of the shaders\n out = []\n \n if convert == 'es2':\n \n for isfragment, shader in enumerate(shaders):\n has_version = False\n has_prec_float = False\n has_prec_int = False\n lines = []\n # Iterate over lines\n for line in shader.lstrip().splitlines():\n if line.startswith('#version'):\n has_version = True\n continue\n if line.startswith('precision '):\n has_prec_float = has_prec_float or 'float' in line\n has_prec_int = has_prec_int or 'int' in line\n lines.append(line.rstrip())\n # Write\n # BUG: fails on WebGL (Chrome)\n # if True:\n # lines.insert(has_version, '#line 0')\n if not has_prec_float:\n lines.insert(has_version, 'precision highp float;')\n if not has_prec_int:\n lines.insert(has_version, 'precision highp int;')\n # BUG: fails on WebGL (Chrome)\n # if not has_version:\n # lines.insert(has_version, '#version 100')\n out.append('\\n'.join(lines))\n \n elif convert == 'desktop':\n \n for isfragment, shader in enumerate(shaders):\n has_version = False\n lines = []\n # Iterate over lines\n for line in shader.lstrip().splitlines():\n has_version = has_version or line.startswith('#version')\n if line.startswith('precision '):\n line = ''\n for prec in (' highp ', ' mediump ', ' lowp '):\n line = line.replace(prec, ' ')\n lines.append(line.rstrip())\n # Write\n if not has_version:\n lines.insert(0, '#version 120\\n')\n out.append('\\n'.join(lines))\n \n else:\n raise ValueError('Cannot convert shaders to %r.' % convert)\n \n return tuple(out)", "def __call__(self):\n Texture()", "def update_render_passes(self, scene=None, renderlayer=None):\n self.register_pass(scene, renderlayer, \"Combined\", 4, \"RGBA\", 'COLOR')\n\n # Denoiser\n if scene.luxcore.denoiser.enabled:\n self.register_pass(scene, renderlayer, \"DENOISED\", 3, \"RGB\", \"COLOR\")\n\n aovs = renderlayer.luxcore.aovs\n\n # Notes:\n # - It seems like Blender can not handle passes with 2 elements. They must have 1, 3 or 4 elements.\n # - The last argument must be in (\"COLOR\", \"VECTOR\", \"VALUE\") and controls the socket color.\n if aovs.rgb:\n self.register_pass(scene, renderlayer, \"RGB\", 3, \"RGB\", \"COLOR\")\n if aovs.rgba:\n self.register_pass(scene, renderlayer, \"RGBA\", 4, \"RGBA\", \"COLOR\")\n if aovs.alpha:\n self.register_pass(scene, renderlayer, \"ALPHA\", 1, \"A\", \"VALUE\")\n if aovs.depth:\n # In the compositor we need to register the Depth pass\n self.register_pass(scene, renderlayer, \"Depth\", 1, \"Z\", \"VALUE\")\n if aovs.albedo:\n self.register_pass(scene, renderlayer, \"ALBEDO\", 3, \"RGB\", \"COLOR\")\n if aovs.material_id:\n self.register_pass(scene, renderlayer, \"MATERIAL_ID\", 1, \"X\", \"VALUE\")\n if aovs.material_id_color:\n self.register_pass(scene, renderlayer, \"MATERIAL_ID_COLOR\", 3, \"RGB\", \"COLOR\")\n if aovs.object_id:\n self.register_pass(scene, renderlayer, \"OBJECT_ID\", 1, \"X\", \"VALUE\")\n if aovs.emission:\n self.register_pass(scene, renderlayer, \"EMISSION\", 3, \"RGB\", \"COLOR\")\n if aovs.direct_diffuse:\n self.register_pass(scene, renderlayer, \"DIRECT_DIFFUSE\", 3, \"RGB\", \"COLOR\")\n if aovs.direct_glossy:\n self.register_pass(scene, renderlayer, \"DIRECT_GLOSSY\", 3, \"RGB\", \"COLOR\")\n if aovs.indirect_diffuse:\n self.register_pass(scene, renderlayer, \"INDIRECT_DIFFUSE\", 3, \"RGB\", \"COLOR\")\n if aovs.indirect_glossy:\n self.register_pass(scene, renderlayer, \"INDIRECT_GLOSSY\", 3, \"RGB\", \"COLOR\")\n if aovs.indirect_specular:\n self.register_pass(scene, renderlayer, \"INDIRECT_SPECULAR\", 3, \"RGB\", \"COLOR\")\n if aovs.position:\n self.register_pass(scene, renderlayer, \"POSITION\", 3, \"XYZ\", \"VECTOR\")\n if aovs.shading_normal:\n self.register_pass(scene, renderlayer, \"SHADING_NORMAL\", 3, \"XYZ\", \"VECTOR\")\n if aovs.avg_shading_normal:\n self.register_pass(scene, renderlayer, \"AVG_SHADING_NORMAL\", 3, \"XYZ\", \"VECTOR\")\n if aovs.geometry_normal:\n self.register_pass(scene, renderlayer, \"GEOMETRY_NORMAL\", 3, \"XYZ\", \"VECTOR\")\n if aovs.uv:\n # We need to pad the UV pass to 3 elements (Blender can't handle 2 elements)\n self.register_pass(scene, renderlayer, \"UV\", 3, \"UVA\", \"VECTOR\")\n if aovs.direct_shadow_mask:\n self.register_pass(scene, renderlayer, \"DIRECT_SHADOW_MASK\", 1, \"X\", \"VALUE\")\n if aovs.indirect_shadow_mask:\n self.register_pass(scene, renderlayer, \"INDIRECT_SHADOW_MASK\", 1, \"X\", \"VALUE\")\n if aovs.raycount:\n self.register_pass(scene, renderlayer, \"RAYCOUNT\", 1, \"X\", \"VALUE\")\n if aovs.samplecount:\n self.register_pass(scene, renderlayer, \"SAMPLECOUNT\", 1, \"X\", \"VALUE\")\n if aovs.convergence:\n self.register_pass(scene, renderlayer, \"CONVERGENCE\", 1, \"X\", \"VALUE\")\n if aovs.noise:\n self.register_pass(scene, renderlayer, \"NOISE\", 1, \"X\", \"VALUE\")\n if aovs.irradiance:\n self.register_pass(scene, renderlayer, \"IRRADIANCE\", 3, \"RGB\", \"COLOR\")\n\n # Light groups\n lightgroups = scene.luxcore.lightgroups\n lightgroup_pass_names = lightgroups.get_pass_names()\n default_group_name = lightgroups.get_lightgroup_pass_name(is_default_group=True)\n # If only the default group is in the list, it doesn't make sense to show lightgroups\n # Note: this behaviour has to be the same as in the _add_passes() function in the engine/final.py file\n if lightgroup_pass_names != [default_group_name]:\n for name in lightgroup_pass_names:\n self.register_pass(scene, renderlayer, name, 3, \"RGB\", \"COLOR\")", "def render(self, vertex_highlighting=False):\n pass", "def get_normal_texture(self):\n return self.normal_tex", "def get_uniform_declaration(uniform):\n tab = \"\"\n size = uniform.get(\"size\", None)\n if size is not None:\n tab = \"[%d]\" % max(1, size) # ensure that the size is always >= 1\n # add uniform declaration\n declaration = \"uniform %s %s%s;\\n\" % \\\n (_get_shader_type(uniform),\n uniform[\"name\"],\n tab)\n return declaration" ]
[ "0.71515524", "0.6992073", "0.65359896", "0.64718676", "0.6368677", "0.63054544", "0.62507594", "0.59390277", "0.59311384", "0.5861681", "0.584541", "0.5813607", "0.5793342", "0.5786723", "0.5785204", "0.578408", "0.57676625", "0.57501054", "0.57129234", "0.5711793", "0.5710523", "0.56652075", "0.5658809", "0.5629772", "0.5622512", "0.56093985", "0.55466163", "0.55380493", "0.5533169", "0.55223864", "0.550936", "0.54817396", "0.54813206", "0.54572994", "0.5455678", "0.5439277", "0.5434761", "0.5398621", "0.53914726", "0.53913766", "0.5381754", "0.5373948", "0.5359117", "0.5357427", "0.5353231", "0.5331448", "0.5317046", "0.5310281", "0.5310281", "0.5298988", "0.52856594", "0.5249993", "0.5243548", "0.523158", "0.5220734", "0.5218243", "0.51960236", "0.5195492", "0.51669085", "0.516325", "0.514685", "0.51328397", "0.51133466", "0.51015764", "0.50780845", "0.5073284", "0.5046993", "0.50197613", "0.50106966", "0.50032836", "0.4984935", "0.49828783", "0.49791214", "0.49772072", "0.4971343", "0.49690527", "0.49516645", "0.49506852", "0.49503756", "0.49455", "0.49199718", "0.49193206", "0.48979193", "0.48804417", "0.4874545", "0.48707733", "0.48707733", "0.48703256", "0.4863934", "0.4859643", "0.48511866", "0.48352042", "0.48294023", "0.48072377", "0.48041767", "0.4790246", "0.4786497", "0.4785133", "0.4775468", "0.47744802" ]
0.6786076
2
filter data based on asset name and searchAndReplace data
def filterMe(self, asset = '', sAr = ['', ''] ): if self._objects: self._objects = [ mn.Node( o.name.replace( sAr[0], sAr[1] ) ) for o in self._objects ] if self._overrides: self._overrides = dict( [ (mn.Node( a.name.replace( sAr[0], sAr[1] )), self._overrides[a] ) for a in self._overrides.keys() ] ) if self._overconns: self._overconns = dict( [(mn.Node(a.name.replace( sAr[0], sAr[1] )), mn.Node(self._overconns[a].name.replace( sAr[0], sAr[1] ))) for a in self._overconns.keys() ] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def healthcare_filter(df_all): \n #get requested assets under healthcare tag \n df_filtered = pandas.DataFrame(columns=['osm_id','asset','geometry']) #create df for saving data\n for row in range(len(df_all.index)): \n if 'healthcare' in df_all[\"asset\"][row]: #check if healthcare key is present\n df_filtered = df_filtered.append(df_all.loc[row]) #if so, save in df \n if '\"healthcare\"=>\"doctor\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'doctors' #to be consistent with asset list \n elif '\"healthcare\"=>\"pharmacy\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'pharmacy'\n elif '\"healthcare\"=>\"hospital\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'hospital'\n elif '\"healthcare\"=>\"clinic\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'clinic'\n elif '\"healthcare\"=>\"dentist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'dentist'\n elif '\"healthcare\"=>\"physiotherapist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'physiotherapist'\n elif '\"healthcare\"=>\"alternative\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'alternative'\n elif '\"healthcare\"=>\"laboratory\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'laboratory'\n elif '\"healthcare\"=>\"optometrist\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'optometrist'\n elif '\"healthcare\"=>\"rehabilitation\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'rehabilitation'\n elif '\"healthcare\"=>\"blood_donation\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'blood_donation'\n elif '\"healthcare\"=>\"birthing_center\"' in df_filtered[\"asset\"][row]:\n df_filtered[\"asset\"][row] = 'birthing_center'\n else:\n df_filtered = df_filtered.drop(index=row)\n \n return df_filtered", "def importData( self, asset = '', searchAndReplace = ['',''] ):\n\t\tpickleData = pickle.load( open( self.dataPath.path, \"rb\" ) )\n\t\tlayers = [RenderLayerData(l,d) for l,d in pickleData.items() if not ':' in l]\n\t\tfor l in layers:\n\t\t\tif not searchAndReplace [0]== '' or not searchAndReplace[1] == '':\n\t\t\t\tl.filterMe( asset, searchAndReplace )\n\t\t\tl.create()\n\t\t\tl.addObjects()\n\t\t\tl.makeOverrides()\n\t\t\tl.makeOverrideConnections()\n\t\t\tl.makeShaderOverride()", "def filter(self, filters):", "def replace_data(self, original, replacement, pattern=None, use_regex=False, queries=[]):\n import hxl.filters\n replacement = hxl.filters.ReplaceDataFilter.Replacement(original, replacement, pattern, use_regex)\n return hxl.filters.ReplaceDataFilter(self, [replacement], queries=queries)", "def filter_data(self):\n self.data = filter_pandas(self.data, self.filters)", "def filterLightLinksData(self, LayersInfo , asset, sAr = ['',''] ):\n\t\tlightData = [(a.replace( sAr[0], sAr[1] ),LayersInfo[a].replace( sAr[0], sAr[1] )) for a in LayersInfo.keys() if asset in a]\n\t\treturn dict( lightData )", "def replace(self, filter, asset_dict): # client_dict provides the uuid\n mongo_core = MainDb.get_core_db_instance()\n replace_result = mongo_core.get_assets().find_one_and_replace(\n {\"uuid\": asset_dict[\"uuid\"]}, asset_dict, upsert=True, return_document=ReturnDocument.AFTER)\n if replace_result[\"uuid\"] == asset_dict[\"uuid\"]:\n return True, \"MongoAsset replaced\"\n else:\n return False, \"Failed to replace asset\"", "def filter_data(self, data):\n for f in self.filters:\n data = getattr(self, f)(data)\n return data", "def assets_search(ctx, text, pretty):\n ocean = ctx.obj['ocean']\n response = ocean.search(text, pretty)\n echo(response)", "def add_replace_filter(source, args, index):\n original = args.get('replace-pattern%02d' % index)\n replacement = args.get('replace-value%02d' % index)\n tags = args.get('replace-tags%02d' % index)\n use_regex = args.get('replace-regex%02d' % index)\n row_query = args.get('replace-where%02d' % index)\n return source.replace_data(original, replacement, tags, use_regex, queries=row_query)", "def process_asset_data(data):\n buffered_assets = []\n\n for asset in data:\n asset_geom = shape(asset['geometry'])\n buffered_geom = asset_geom.buffer(100)\n\n asset['buffer'] = buffered_geom\n buffered_assets.append(asset)\n\n output = []\n assets_seen = set()\n\n for asset in tqdm(buffered_assets):\n if asset['properties']['Opref'] in assets_seen:\n continue\n assets_seen.add(asset['properties']['Opref'])\n touching_assets = []\n for other_asset in buffered_assets:\n if asset['buffer'].intersects(other_asset['buffer']):\n touching_assets.append(other_asset)\n assets_seen.add(other_asset['properties']['Opref'])\n\n dissolved_shape = cascaded_union([a['buffer'] for a in touching_assets])\n final_centroid = dissolved_shape.centroid\n output.append({\n 'type': \"Feature\",\n 'geometry': {\n \"type\": \"Point\",\n \"coordinates\": [final_centroid.coords[0][0], final_centroid.coords[0][1]],\n },\n 'properties':{\n 'name': asset['properties']['name'],\n }\n })\n\n return output", "async def filter(self, **kwargs):\n\n pass", "def merge_asset(self, other):\n for asset in other.asset:\n asset_name = asset.get(\"name\")\n asset_type = asset.tag\n # Avoids duplication\n pattern = \"./{}[@name='{}']\".format(asset_type, asset_name)\n if self.asset.find(pattern) is None:\n self.asset.append(asset)", "def filter_items(self, context, data, propname):\n\n helper_funcs = bpy.types.UI_UL_list\n\n items = getattr(data, propname)\n\n # Filtering by name\n filtered = helper_funcs.filter_items_by_name(\n self.filter_name, self.bitflag_filter_item, items, \"name\", reverse=False\n )\n\n if not filtered:\n filtered = [self.bitflag_filter_item] * len(items)\n\n d = context.active_object.data\n anim_ret = context.active_object.anim_ret\n\n for index, bone in enumerate(items):\n excluded = False\n found = False\n\n anim_ret_bone = bone.anim_ret_bone\n\n if not anim_ret_bone:\n excluded = True\n if not excluded and anim_ret_bone.source_bone_name == \"\":\n excluded = True\n if bone.name.startswith(ObjectAnimRet.prefix):\n excluded = True\n if not excluded and not anim_ret.show_def and \"DEF-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_mch and \"MCH-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_org and \"ORG-\" in bone.name:\n excluded = True\n if not excluded and not anim_ret.show_fk and \"fk\" in bone.name.lower():\n excluded = True\n if not excluded and not anim_ret.show_ik and \"ik\" in bone.name.lower():\n excluded = True\n if not excluded and anim_ret.filter_layers:\n data_bone = d.bones[bone.name]\n for layer_id, layer in enumerate(d.layers):\n if layer:\n if data_bone.layers[layer_id]:\n found = True\n break\n\n if excluded or not found:\n filtered[index] &= ~self.bitflag_filter_item\n\n ordered = []\n\n # Reorder by name or average weight.\n if self.use_filter_sort_alpha:\n sort = [(idx, getattr(it, \"name\", \"\")) for idx, it in enumerate(items)]\n\n ordered = helper_funcs.sort_items_helper(sort, lambda e: e[1].lower())\n\n return filtered, ordered", "def _filter_universe_from_data_for_prediction(self, data, current_timestamp, universe):\n current_date = current_timestamp.date()\n assets = []\n for idx, row in universe.iterrows():\n if row.start_date <= current_date <= row.end_date:\n assets = row.assets\n break\n\n filtered = {}\n for feature, df in data.items():\n filtered[feature] = df.drop(df.columns.difference(assets), axis=1)\n\n return filtered", "def apply_filters(self, filters):\n self._data = self.model.objects.filter(**filters)", "def _filter_data(self, pattern):\n removed = []\n filtered = []\n for param in self.data:\n if not param[0].startswith(pattern):\n filtered.append(param)\n else:\n removed.append(param)\n self.data = filtered\n return removed", "def filterData(self, filter_group_indices, isCaseSensitive = False):\n\n for index in filter_group_indices:\n\n self.patternFilterData(index, isCaseSensitive)\n\n exclude_flag = self.filter_spec[index][1]\n if exclude_flag:\n self.matched[index] = map(not_, self.matched[index])\n\n\n self.combine_matched_list()\n\n self.update_choice_dict()\n\n self.emit(Qt.SIGNAL(\"sigDataFiltered\"),())", "def asset_get():\n search_assets = request.args.getlist(\"name\")\n find_assets = []\n for asset_name in search_assets:\n if asset_name in app.bank:\n find_assets.append(app.bank[asset_name].to_list())\n find_assets = sorted(find_assets, key=lambda s: s[0])\n return jsonify(find_assets)", "def filter(self, filter_dict):\n pass", "def get_data_filter(args):\n diff_data(args, \"filter\")", "def filter(self, *args, **kwargs):", "def test_instrument_inventory_filtering():\n filt = 'GR150R'\n data = mm.instrument_inventory('niriss',\n add_filters={'filter': filt},\n return_data=True)\n\n filters = [row['filter'] for row in data['data']]\n\n assert all([i == filt for i in filters])", "def prepare_filter_params(context, plan_name=None, **kw):\n from debra.models import Influencer\n from debra import logical_categories\n from django.core.cache import get_cache\n cache = get_cache('memcached')\n params = None #cache.get('filter_params')\n if not params:\n # influencers = Influencer.objects.filter(\n # show_on_search=True).exclude(blacklisted=True)\n # influencers = influencers.filter(\n # score_popularity_overall__isnull=False)\n # influencers = influencers.distinct()\n popularity = [\n {\n \"title\": \"Small\",\n },\n {\n \"title\": \"Medium\",\n },\n {\n \"title\": \"Large\",\n }\n ]\n engagement = [\n {\n \"title\": \"0-20\",\n },\n {\n \"title\": \"21-40\",\n },\n {\n \"title\": \"41-60\",\n },\n {\n \"title\": \"61-80\",\n },\n {\n \"title\": \"81+\",\n },\n ]\n\n price_ranges = [\n {\n \"title\": \"Cheap\",\n \"text\": \"Primarily In-expensive\"\n },\n # {\n # \"title\": \"Mid-level\",\n # },\n { \n \"title\": \"Expensive\",\n \"text\": \"Primarily High-end\"\n }\n ]\n\n genders = [\n {\n \"title\": \"Female\",\n },\n {\n \"title\": \"Male\",\n },\n ]\n\n social = [\n {\n \"value\": \"Facebook\",\n \"icon\": \"icon-social_facebook\"\n },\n {\n \"value\": \"Pinterest\",\n \"icon\": \"icon-social_pinterest2\"\n },\n {\n \"value\": \"Twitter\",\n \"icon\": \"icon-social_twitter\"\n },\n {\n \"value\": \"Instagram\",\n \"icon\": \"icon-social_instagram2\"\n },\n {\n \"value\": \"Youtube\",\n \"icon\": \"icon-social_youtube\"\n },\n ]\n\n age_groups = [\n {\n \"value\": \"0_19\",\n \"icon\": \"0 - 19\"\n },\n {\n \"value\": \"20_24\",\n \"icon\": \"20 - 24\"\n },\n {\n \"value\": \"25_29\",\n \"icon\": \"25 - 29\"\n },\n {\n \"value\": \"30_34\",\n \"icon\": \"30 - 34\"\n },\n {\n \"value\": \"35_39\",\n \"icon\": \"35 - 39\"\n },\n {\n \"value\": \"40\",\n \"icon\": \"40+\",\n }\n ]\n\n activity = [{\"value\": \"Blog\", \"icon\": \"icon icon-letter_quotes2\"}] + social\n\n categories = []\n\n brands = []\n\n locations = redis_cache.get('toplocs') or []\n # locations = Influencer.get_locations_list(num_results=200)\n # locations = Influencer.get_locations_list(num_results=None)\n\n tags = kw.get('tags', [])\n\n source = [{\"title\": \"Signup\", \"value\": \"blogger_signup\"}]\n\n params = {\n 'show_filters': True,\n 'popularity': list(popularity),\n 'engagement': list(engagement),\n 'categories': list(categories),\n 'brands': list(brands),\n 'priceranges': list(price_ranges),\n 'locations': list(locations),\n 'genders': list(genders),\n 'social': list(social),\n 'activity': list(activity),\n 'tags': list(tags),\n 'source': list(source),\n 'age_groups': list(age_groups),\n 'enabled_filters': [\n \"popularity\", \"engagement\", \"categories\", \"brands\",\n \"priceranges\", \"location\", \"genders\", \"socials\", \"activity\",\n \"tags\", \"likes\", \"shares\", \"comments\", \"source\", \"avgAge\",\n \"customCategories\", \"customOccupation\", \"customSex\", \"customEthnicity\",\n \"customTags\", \"customLanguage\", \"customAgeRange\",]\n }\n cache.set('filter_params', params)\n\n for loc in params.get('locations', []):\n loc['value'] = loc['title']\n\n if True: #settings.DEBUG:\n params['categories'] = [{\"title\": \"Fashion\", \"category\": \"fashion\"},\n {\"title\": \"Food\", \"category\": \"food\"},\n {\"title\": \"Kids\", \"category\": \"kids\"},\n {\"title\": \"Beauty\", \"category\": \"beauty\"},\n {\"title\": \"Travel\", \"category\": \"travel\"}]\n else:\n params['categories'] = []\n \n return params", "def filter_data(article):\n filtered = {\n 'id': article['id'],\n 'title': article['title'],\n 'perex': article['perex'],\n 'body': article['body'],\n 'author': article['author'].get('name', None) \n if article['author'] is not None \n else None,\n 'image': get_image(article),\n 'source': article['source']['name'],\n 'label': article['label']\n }\n\n return filtered", "def filter_data(self):\n if(self.filter_classes == []):\n return\n \n filtered_idx = []\n for id in range(len(self.image_ids)):\n anns = self.load_annotations(id)\n found = False\n for ann in anns:\n if ann['label'] in self.filter_classes:\n found = True\n break\n if found:\n filtered_idx.append(id)\n \n self.filtered_ids = [self.image_ids[id] for id in filtered_idx]\n # self.image_ids = self.filtered_ids\n print(\"Number of filtered instances:\", len(self.filtered_ids))", "def getSpecificData(self,\n release=\"\",\n baseline=\"\",\n project=\"\",\n filters=[\"INPUT_DATA\",\"REVIEW\",\"VTPR\",\"\"],\n source=False):\n if source:\n table = []\n type_items = \"(cvtype='ascii' or cvtype='csrc' or cvtype='incl')\"\n else:\n table = [[],[],[],[]]\n type_items = \"(cvtype='xls' or cvtype='doc' or cvtype='pdf' or cvtype='ascii' or cvtype='csrc' or cvtype='incl')\"\n enabled = True\n for list_filter in filters:\n if self._is_array(list_filter):\n for keyword in list_filter:\n self.ihm.log('Search folder containing keyword: ' + keyword)\n else:\n self.ihm.log('Search folder containing keyword: ' + list_filter)\n stdout = self._runFinduseQuery(release,project,type_items,enabled)\n #print \"STDOUT\",stdout\n\n if not stdout:\n if source:\n print \"FILTER/PROJECT\",list_filter,project\n result = []\n if self._is_array(list_filter):\n for keyword in list_filter:\n self.getItemsInFolder(keyword,\n project,\n baseline,\n release,\n only_name=True,\n with_extension=True,\n mute=True,\n recur=True,\n converted_list=result)\n if result:\n break\n else:\n self.getItemsInFolder(list_filter,\n project,\n baseline,\n release,\n only_name=True,\n with_extension=True,\n mute=True,\n recur=True,\n converted_list=result)\n if result:\n table = result\n else:\n index = 0\n for list_filter in filters:\n result = []\n if self._is_array(list_filter):\n for keyword in list_filter:\n print (\"KEYWORD:\",index,keyword)\n self.getItemsInFolder(keyword,\n project,\n baseline,\n release,\n only_name=True,\n with_extension=True,\n mute=True,\n recur=True,\n converted_list=result)\n if result:\n table[index].extend(result)\n else:\n print (\"KEYWORD2:\",list_filter)\n self.getItemsInFolder(list_filter,\n project,\n baseline,\n release,\n only_name=True,\n with_extension=True,\n mute=True,\n recur=True,\n converted_list=result)\n if result:\n table[index] = result\n index += 1\n print \"OLD TABLE\",table\n return table\n else:\n if enabled:\n if stdout != \"\":\n self.ihm.log(stdout,False)\n regexp, list_items_skipped = self._prepareRegexp(filters)\n output = stdout.splitlines()\n if not source:\n ## print \"REGEXP\"\n ## print regexp\n for line in output:\n item = self._filterRegexp(regexp[0],line)\n if item != \"\":\n list_items_skipped[0].append(item)\n item = self._filterRegexp(regexp[1],line)\n if item != \"\":\n list_items_skipped[1].append(item)\n item = self._filterRegexp(regexp[2],line)\n if item != \"\":\n list_items_skipped[2].append(item)\n # ex: SW_PLAN\\SDP\\IS_SDP_SW_PLAN_SQA.xlsm-1.7.0@SW_PLAN-1.3\n table[0] = list(set(list_items_skipped[0]))\n table[1] = list(set(list_items_skipped[1]))\n table[2] = list(set(list_items_skipped[2]))\n for data in table[0]:\n if self._is_array(filters[0]):\n text = \"\"\n for filter in filters[0]:\n text += \" \" + filter\n else:\n text = filters[0]\n self.ihm.log('Found in '+ text +' folder: ' + data,False)\n for data in table[1]:\n if self._is_array(filters[1]):\n text = \"\"\n for filter in filters[1]:\n text += \" \" + filter\n else:\n text = filters[1]\n self.ihm.log('Found in '+ text +' folder: ' + data,False)\n for data in table[2]:\n if self._is_array(filters[2]):\n text = \"\"\n for filter in filters[2]:\n text += \" \" + filter\n else:\n text = filters[2]\n self.ihm.log('Found in '+ text +' folder: ' + data,False)\n else:\n ## print \"REGEXP\"\n ## print regexp\n for line in output:\n item = self._filterRegexp(regexp[0],line)\n if item != \"\":\n list_items_skipped[0].append(item)\n # ex: SW_PLAN\\SDP\\IS_SDP_SW_PLAN_SQA.xlsm-1.7.0@SW_PLAN-1.3\n table = list(set(list_items_skipped[0]))\n for data in table:\n if self._is_array(filters[0]):\n text = \"\"\n for filter in filters[0]:\n text += \" \" + filter\n else:\n text = filters[0]\n self.ihm.log('Found in '+ text +' folder: ' + data,False)\n else:\n self.ihm.log('No items found with finduse command.')\n return table", "def find_street_in_descriprion(self):\n from filtering_functions import find_street\n for ad_id, ad_data in self.ads_data.items():\n if 'Ulica_re' not in ad_data.keys():\n self.ads_data[ad_id]['Ulica_re'] = find_street(self.ads_data[ad_id]['Opis'])", "def filter_dataset(source_path, dataset_path, progress_bar, info_label, progress, root):\n # dictionary to store two source path\n source_path_name = {}\n for d in SUB_DIRS:\n source_path_name[f\"{d}\"] = os.path.join(source_path, d)\n\n if not os.path.exists(source_path + \"/\" + SUB_DIRS[0]) and not os.path.exists(source_path + \"/\" + SUB_DIRS[1]):\n messagebox.showerror(\"Message\", \"Please check whether source directory, \\n \\\n must contain 'attentive' and 'not_attentive' dataset\")\n else:\n attentive = set()\n not_attentive = set()\n\n total_img = len(os.listdir(source_path + \"/\" + SUB_DIRS[0])) + len(os.listdir(source_path + \"/\" + SUB_DIRS[1]))\n i = 0\n\n # for attentive images in format particular format and availability of face\n for image in os.listdir(source_path + \"/\" + SUB_DIRS[0]):\n if len(image.split(\".\")) == 2 and image.split(\".\")[1] in IMG_FORMAT \\\n and check_availability(source_path + \"/\" + SUB_DIRS[0] + \"/\" + image):\n attentive.add(image)\n i += 1\n progress_bar['value'] = int((i / total_img) * 100)\n progress.update()\n\n info_label['text'] = 'Not Attentive set filtering is on progress'\n\n # for not attentive images\n for image in os.listdir(source_path + \"/\" + SUB_DIRS[1]):\n if len(image.split(\".\")) == 2 and image.split(\".\")[1] in IMG_FORMAT \\\n and check_availability(source_path + \"/\" + SUB_DIRS[1] + \"/\" + image):\n not_attentive.add(image)\n i += 1\n progress_bar['value'] = int((i / total_img) * 100)\n progress.update()\n\n info_label['text'] = 'Filtering is completed'\n progress.destroy()\n\n attentive, not_attentive = list(attentive), list(not_attentive)\n\n if len(attentive) > 200 and len(not_attentive) > 200:\n next_page_interface(source_path_name, dataset_path, attentive, not_attentive, root)\n else:\n messagebox.showerror(\"Message\", \"Valid Image Count Is Less Than 100\")", "def filter(self, update):\n\n raise NotImplementedError", "def filter_data(data,filters):\n final_filter = pd.Series(np.array([True] * data.shape[0]))\n for attribute, value in filters:\n final_filter &= data[attribute] == value\n return data[final_filter]", "def filter_items(self, filter_data: Dict[str, str] = None) -> List[WalletItem]:\n filtered_items = self.items\n for key, value in filter_data.items():\n if key == \"category\":\n filtered_items = [item for item in filtered_items\n if re.search(value, item.category, re.IGNORECASE)]\n if key == \"account\":\n filtered_items = [item for item in filtered_items\n if re.search(value, item.account, re.IGNORECASE)]\n if key == \"notes\" in filter_data:\n filtered_items = [item for item in filtered_items\n if re.search(value, item.notes, re.IGNORECASE)]\n if key == \"amt_min\":\n value = float(value)\n filtered_items = [item for item in filtered_items if item.amount >= value]\n if key == \"amt_max\":\n value = float(value)\n filtered_items = [item for item in filtered_items if item.amount <= value]\n if key == \"begin_date\":\n try:\n begin_date = datetime.strptime(value, '%d/%m/%Y')\n filtered_items = [item for item in filtered_items if begin_date <= item.date]\n except ValueError as ex:\n print(ex)\n exit(1)\n if key == \"end_date\":\n try:\n end_date = datetime.strptime(value, '%d/%m/%Y')\n filtered_items = [item for item in filtered_items if item.date <= end_date]\n except ValueError as ex:\n print(ex)\n exit(1)\n return filtered_items", "def filter_data(data, filter_dict):\n for key, match_string in filter_dict.items():\n if key not in data:\n logger.warning(\"{0} doesn't match a top level key\".format(key))\n continue\n values = data[key]\n matcher = re.compile(match_string)\n if isinstance(values, list):\n values = [v for v in values if matcher.search(v)]\n elif isinstance(values, dict):\n values = dict((k, v) for k, v in values.items() if matcher.search(k))\n else:\n raise MiuraException(\"cannot filter a {0}\".format(type(values)))\n data[key] = values", "def add_replace_map_filter(source, args, index):\n url = args.get('replace-map-url%02d' % index)\n row_query = args.get('replace-map-where%02d' % index)\n return source.replace_data_map(util.hxl_data(url, util.make_input_options(args)), queries=row_query)", "def filter(self, data):\n self._calm_the_linter()\n return data, None", "def search_all_records(self, data: dict, execution_context: dict):", "def filter_data(self, json_data):\n\n\t\tdata = json_data['data']\n\t\tlocal_time_convertor = time_convertor.TimeConvertor()\n\n\n\t\tfor event_data in data:\n\t\t\t# go through each event and save data\n\n\t\t\t# first need to get data for all avalible sites\n\t\t\tevent_h2h_odds = []\n\t\t\tevent_site_names = []\n\t\t\tfor i, sites_data in enumerate(event_data['sites']):\n\t\t\t\tif len(sites_data['odds']['h2h']) > 2:\n\t\t\t\t\t# if more the 3 odds values (draw odds given) only take win loss odds\n\t\t\t\t\tevent_h2h_odds.append([sites_data['odds']['h2h'][0], sites_data['odds']['h2h'][1]])\n\t\t\t\telse:\n\t\t\t\t\tevent_h2h_odds.append(sites_data['odds']['h2h'])\n\t\t\t\tevent_site_names.append(sites_data['site_nice'])\n\t\t\t\n\t\t\t# append event data\n\t\t\tself.teams.append(event_data['teams'])\n\t\t\tself.h2h_odds.append(event_h2h_odds)\n\t\t\tself.betting_sites.append(event_site_names)\n\n\t\t\tlocal_time_convertor.convert_to_AEST(event_data['commence_time'])\n\t\t\tself.start_time['string format'].append(local_time_convertor.local_time_string)\n\t\t\tself.start_time['datetime format'].append(local_time_convertor.local_time)\n\n\t\t# debug helper code\n\t\t# print(self.teams)\n\t\t# print(self.betting_sites)\n\t\t# print(self.h2h_odds)", "def search_entity(self, name_filter):\n name_filter=name_filter.lower()\n model_reader=oc.delegator.getModelReader()\n names=model_reader.getEntityNames()\n # print(len(names))\n for name in names:\n if name_filter in name.lower():\n print(name)", "def _filter_data(analyzed_tweet_data: list, start_date, end_date, hashtags, mentions, urls):\n # filter by dates\n filtered_data = get_tweets_in_daterange(\n analyzed_tweet_data, start_date, end_date)\n print(\"Done filtering on date...\")\n if hashtags:\n filtered_data = _filter_search_values(\n 'hashtags', hashtags, filtered_data)\n print(f'Done filtering on hashtags: {hashtags}')\n if mentions:\n filtered_data = _filter_search_values(\n 'mentions', mentions, filtered_data)\n print(f'Done filtering on mentions: {mentions}')\n if urls:\n filtered_data = _filter_search_values(\n 'tweet_urls', urls, filtered_data)\n print(f'Done filtering on urls: {urls}')\n\n return filtered_data", "def apply_filters(filters, items):\n return scom.apply_filters(filters, items)", "def filter_name(self, name):\n return self.form.set_value(\"generating station search\", name)", "def filter_by_symbol(assets: List[Dict], symbols: List) -> List:\n filtered_assets = []\n found = False\n for symbol in symbols:\n for asset in assets:\n if asset['symbol'].lower() == symbol.lower():\n filtered_assets.append(asset)\n found = True\n break\n if not found:\n raise Exception(f'Asset with symbol {symbol} not found in the provided list.')\n return filtered_assets", "def filter_images(data, vgid2idx, meta_vgids):\r\n new_data = []\r\n for vgid in meta_vgids:\r\n new_data.append(data[vgid2idx[vgid]])\r\n return new_data", "def list_cleanup(self, data):\n for data_value in list(data):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.required.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] != filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n data.remove(data_value)\n break\n if filter_value not in data_value[filter_key]:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] not in filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n data.remove(data_value)\n break\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if not found_match:\n data.remove(data_value)\n break\n else:\n self.logger.warning(msg=\"List_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"List_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n for data_value in list(data):\n # TODO: Add DEBUG logging (?)\n for filter_key, filter_value in self.excluded.items():\n if filter_key in data_value.keys():\n if isinstance(filter_value, str) and self.exact_match:\n if data_value[filter_key] == filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, str) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n if filter_value in data_value[filter_key]:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and self.exact_match:\n if data_value[filter_key] in filter_value:\n data.remove(data_value)\n break\n elif isinstance(filter_value, list) and (not self.exact_match):\n if data_value[filter_key] is None:\n continue\n found_match = False\n for filter_value_item in filter_value:\n if filter_value_item in data_value[filter_key]:\n found_match = True\n if found_match:\n data.remove(data_value)\n break\n else:\n self.logger.warning(msg=\"List_Cleanup: None of the cases matched. Data: %s Filter: %s\" % (data_value, self.filter))\n # TODO: Handle other possible cases\n else:\n self.logger.warning(msg=\"List_Cleanup: Filter key: %s not present in Data: %s\" % (filter_key, data_value))\n continue\n\n return data", "def filter_values(self):\n dfilter = self.args.datafilter\n self.logger.info(u'Filtering values with:{f}'.format(f=dfilter))\n data = self.outputdata\n newdata = {}\n for key, value in data.items():\n self.logger.info(u'\\nProcessing Key:{k}, value:{v}'.format(k=key,\n v=value))\n returned_data = dict_value_filter(key, value, dfilter, self.logger)\n if bool(returned_data):\n newdata[key] = returned_data\n self.logger.info(u'Data after filter:{d}'.format(d=newdata))\n\n self.outputdata = newdata", "def filter_data_on_complaince(folder_Path,complaince_rate):\n complaince_df=pd.read_csv(folder_Path+\"complaince.csv\")\n complaince_df['Percent'] = complaince_df['Percent'].apply(converters.ConvertPercent)\n complaince_df = complaince_df.loc[complaince_df.Percent >= complaince_rate]\n IDs= complaince_df.ID.unique()\n # print(IDs)\n df_apps=pd.read_csv(folder_Path+\"app_usage.csv\")\n df_apps = df_apps.loc[df_apps.user_id.isin(IDs)]\n df_apps = df_apps.reset_index(drop=True)\n df_apps.to_csv(folder_Path+\"Filtered/app_usage.csv\")\n\n\n df_battery= pd.read_csv(folder_Path+\"battery_events.csv\")\n df_battery= df_battery.loc[df_battery.user_id.isin(IDs)]\n df_battery = df_battery.reset_index(drop=True)\n df_battery.to_csv(folder_Path+\"Filtered/battery_events.csv\")\n\n\n df_bluetooth = pd.read_csv(folder_Path+\"bluetooth.csv\")\n df_bluetooth = df_bluetooth.loc[df_bluetooth.user_id.isin(IDs)]\n df_bluetooth = df_bluetooth.reset_index(drop=True)\n df_bluetooth.to_csv(folder_Path+\"Filtered/bluetooth.csv\")\n\n df_screen = pd.read_csv(folder_Path+\"screenstate.csv\")\n df_screen = df_screen.loc[df_screen.user_id.isin(IDs)]\n df_screen = df_screen.reset_index(drop=True)\n df_screen.to_csv(folder_Path+\"Filtered/screenstate.csv\")\n\n\n df_wifi = pd.read_csv(folder_Path+\"wifi.csv\")\n df_wifi = df_wifi.loc[df_wifi.user_id.isin(IDs)]\n df_wifi = df_wifi.reset_index(drop=True)\n df_wifi.to_csv(folder_Path+\"Filtered/wifi.csv\")", "def replace_data_map(self, map_source, queries=[]):\n import hxl.filters\n replacements = hxl.filters.ReplaceDataFilter.Replacement.parse_map(hxl.data(map_source))\n return hxl.filters.ReplaceDataFilter(self, replacements, queries=queries)", "def find_values_to_replace(self):\n regexp = re.compile(self.raw_pattern)\n self.to_replace = regexp.findall(self.raw_sql)", "def filterRansac():\n pass", "def filter(self):\n _filter = self.ask_filter.text()\n if _filter:\n self.parent().artists = dmla.list_artists(_filter)\n else:\n self.parent().artists = self.parent().all_artists\n self.parent().artist_filter = _filter\n self.parent().do_select()", "def filterDataset(dat, dataset):\n #\n dat = dat[dat['organism'].isin(dataset)]\n no_mmei_index = dat['mmei']=='no'\n nonstop_index = dat['mutstop']=='no'\n zerofit_index = dat['fitness'].abs()>1e-4\n mutwt_index = dat['mutwt']=='no'\n dat = dat[no_mmei_index & nonstop_index & zerofit_index & mutwt_index]\n #print \"Filtered data\"\n return dat", "def add_clean_filter(source, args, index):\n whitespace_tags = hxl.TagPattern.parse_list(args.get('clean-whitespace-tags%02d' % index, ''))\n upper_tags = hxl.TagPattern.parse_list(args.get('clean-toupper-tags%02d' % index, ''))\n lower_tags = hxl.TagPattern.parse_list(args.get('clean-tolower-tags%02d' % index, ''))\n date_tags = hxl.TagPattern.parse_list(args.get('clean-date-tags%02d' % index, ''))\n date_format = args.get('clean-date-format%02d' % index, None);\n number_tags = hxl.TagPattern.parse_list(args.get('clean-num-tags%02d' % index, ''))\n number_format = args.get('clean-number-format%02d' % index, None);\n latlon_tags = hxl.TagPattern.parse_list(args.get('clean-latlon-tags%02d' % index, ''))\n purge_flag = args.get('clean-purge%02d' % index, False)\n row_query = args.get('clean-where%02d' % index, None)\n return source.clean_data(\n whitespace=whitespace_tags,\n upper=upper_tags,\n lower=lower_tags,\n date=date_tags,\n date_format=date_format,\n number=number_tags,\n number_format=number_format,\n latlon=latlon_tags,\n purge=purge_flag,\n queries=row_query\n )", "def filters_to(start, finish):\n for find, replace in filters:\n start = find.sub(replace, start)\n assert start == finish", "def filter(self, data):\n self.data = pysap.Image(data=self.flt.filter(data))", "def _filter(self, values, asset):\n log.debug(\"Testing trigger filters against asset %s\", asset['id'])\n for filter in self.filters:\n if not filter._apply(values, asset):\n return False\n return True", "def filter_unique_ticker(state: State):\n if state.events.extract_company_list + state.events.load_company_list == 200:\n try:\n state.files.combined_exchanges.columns = map(str.lower, state.files.combined_exchanges.columns)\n\n # Following line is dropping duplicates but there's not?\n state.output = state.files.combined_exchanges[[\"symbol\", 'name', 'lastsale', 'marketcap', 'ipoyear', 'sector', 'industry']].drop_duplicates()\n state.output.to_csv(f\"{PATH}/data/combined_exchanges.csv\")\n state.events.transform_company_list = 100\n except Exception as e:\n state.output = None\n LOGGER.warning(f\"Could not transform company data , error: {e}\")\n\n else:\n state.output = pd.read_csv(f\"{PATH}/data/combined_exchanges_sample.csv\")\n LOGGER.warning(f\"Using old company ticker file\")", "def filter_movie():\n name = request.args.get('name', default=\"\", type=str)\n year = request.args.get('year', default=-1, type=int)\n\n filtered_list = []\n\n if name != \"\":\n name = name.replace('_', ' ')\n name = name.replace('\"', '')\n for movie in movies_data:\n if name.lower() in movie.lower():\n filtered_list.append(movies_data[movie])\n\n if year != -1:\n filtered_list = [movie for movie in filtered_list if year == movie['year']]\n\n return make_response(jsonify(filtered_list), 200)", "def filter(request):\n product = Product.objects.filter(name__icontains=request.GET['q']).filter(brand__icontains=request.GET['brand']).filter(year__icontains=request.GET['year'])\n return render(request, \"search_results.html\", {\"products\": product})", "def filter_data(self):\n self.df = self.df[HeatStrokeDataFiller.important_features]", "def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def filter_data(city, data_filter, Month, Day):\r\n directory = 'C:/Users/The Presence/Documents/Personal Document/Personal Development/Data Science and Analytics/Udacity (Python Course)/Labs Tasks-20200924T221854Z-001/Lab Results/Capstone Project/bikeshare-2/' + city \r\n df = pd.read_csv(directory)\r\n df_1 = df.copy() \r\n df['Start Time'] = pd.to_datetime(df['Start Time'])\r\n df['Months'] = df['Start Time'].dt.month_name(locale='English')\r\n df['Days'] = df['Start Time'].dt.day_name(locale='English')\r\n df['Hours'] = df['Start Time'].dt.hour\r\n if data_filter == 'None':\r\n df = df\r\n elif data_filter == 'Month':\r\n spef_mon = Month\r\n df_month = df[df.Months == spef_mon]\r\n df = df_month\r\n elif data_filter == 'Day':\r\n spef_day = Day\r\n df_day = df[df.Days == spef_day]\r\n df = df_day\r\n elif data_filter == 'Both':\r\n spef_mon, spef_day = Month, Day\r\n df_month = df[df.Months == spef_mon]\r\n df_month_day = df_month[df_month.Days == spef_day]\r\n df = df_month_day\r\n return(df, df_1)", "def manual(self):\n\n\t\tfilter = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\", \"World War II\"],\n\n\t\tself.index[\"authorities\"] = [auth for auth in self.index[\"authorities\"] if auth not in filter]\n\n\t\tfor item in self.index[\"items\"]:\n\t\t\tself.index[\"items\"][item] = [auth for auth in self.index[\"items\"][item] if auth in self.index[\"authorities\"]]", "def asset_by_common_name(self) -> Dict:\n if self._assets_by_common_name is None:\n self._assets_by_common_name = OrderedDict()\n for name, a_meta in self.item.assets.items():\n bands = []\n if 'eo:bands' in a_meta.extra_fields.keys():\n bands = a_meta.extra_fields['eo:bands']\n if len(bands) == 1:\n eo_band = bands[0]\n if 'common_name' in eo_band.keys():\n common_name = eo_band['common_name']\n if not self.is_valid_cname(common_name):\n raise ValueError(f'Must be one of the accepted common names. Got \"{common_name}\".')\n else:\n self._assets_by_common_name[common_name] = {'meta': a_meta, 'name': name}\n if not self._assets_by_common_name:\n raise ValueError(f\"Common names for assets cannot be retrieved\")\n return self._assets_by_common_name", "def _applyFilters(self) -> None:\n self._dataframe = self._source.loc[:, self._visable_columns]\n for column, value in self._filters.items():\n if value is not None:\n self._dataframe = self._dataframe[self._source[column] == value]\n else:\n self._dataframe = self._dataframe[self._source[column].isnull()]\n\n self.layoutChanged.emit()", "def social_healthcare(osm_path): \n df_all = retrieve(osm_path,'multipolygons',['other_tags', 'amenity']).rename(columns={'other_tags': 'asset'}) \n \n #delete rows that are duplicates of social_amenity\n asset_list = ['hospital', 'doctors', 'clinic', 'dentist', 'pharmacy'] #note that this list of assets should be similar to assets extracted in def social_amenity\n for asset in asset_list:\n index_delete = df_all[(df_all['amenity'] == asset)].index\n df_all.drop(index_delete,inplace=True)\n df_all = df_all.drop(['amenity'], axis=1).reset_index(drop=True) #drop amenity column, reset index\n \n #get requested assets \n df = healthcare_filter(df_all)\n \n return df.reset_index(drop=True)", "def office_prefilter_data(parser, args, params):\n local_args = parser.parse_known_args(args)\n \n control.prefilter_data(params)", "def clean_products_splitter(filename):\n \n\n dataset = pd.read_csv(filename, encoding='utf-8-sig')\n \n dataset = dataset.drop_duplicates()\n \n \n \"\"\"\n UNIFY AND SIMPLIFY PRODUCT NAMES\n \"\"\"\n \n # GET ALL LISTS FIRST\n dataset['product_name'] = dataset['product_name'].str.replace(\" & \", \" and \")\n dataset['product_name'] = dataset['product_name'].str.replace(\"Turbomaster\", \"TurboMaster\")\n dataset['product_name'] = dataset['product_name'].str.replace(\" - Listings and Approvals\", \"\")\n dataset['product_name'] = dataset['product_name'].str.replace(\" Trouble Shooting Chart\", \"\")\n dataset['product_name'] = dataset['product_name'].str.replace(\" Trouble\", \"\")\n dataset['product_name'] = dataset['product_name'].str.replace(\"Imersion\", \"Immersion\")\n dataset['product_name'] = dataset['product_name'].str.replace(\"Line‐Voltage\", \"Line Voltage\")\n dataset['product_name'] = dataset['product_name'].str.replace(\"Thermostsat\", \"Thermostat\")\n dataset['product_name'] = dataset['product_name'].str.replace(\"TRUERH\", \"TrueRH\")\n dataset['product_name'] = dataset['product_name'].str.replace(\"™\", \"\")\n dataset['product_name'] = dataset['product_name'].str.replace(\"Gas-Engine-Drive\", \"Gas Engine Drive\")\n dataset['product_name'] = dataset['product_name'].str.replace(\"Centrifugal Chiller\", \"Centrifugal Liquid Chiller\")\n dataset['product_name'] = dataset['product_name'].str.replace(\"the OptiView\", \"OptiView\")\n dataset['product_name'] = dataset['product_name'].str.replace(\"®\", \"\")\n \n \n \n # MULTIPLE IN A LIST\n dataset['product_name'] = np.where(dataset['product_name'].str.contains(\"1/2 or 3/4 Inch NPT 6 ESFR Sprinklers\"), \"1/2 Inch NPT 6 ESFR Sprinklers, 3/4 Inch NPT 6 ESFR Sprinklers\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'].str.contains(\"Chillers with 292/351 and 419/503 VSD Drives - Trap Filter Resistor Wire Routing\"), \"Chillers with 292/351 & 419/503 VSD Drives Trap Filter Resistor Wire Routing\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'].str.contains(\"Metasys System UL 864 10th Edition UUKL/ORD-C100-13 UUKLC Smoke Control System\"), \"Metasys System UL 864 10th Edition UUKL/ORD-C100-13 UUKLC Standard Smoke Control\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'].str.contains(\"Speaker/Visible Notification Appliance with TrueAlert\"), \"Speaker/Visible Notification Appliances with TrueAlert\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'].str.contains(\"F4-CGM General Purpose Application Controllers\"), \"F4-CGM General Purpose Application Controllers\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'].str.contains(\"YCAL and YCUL Style A Chillers Piping Replacement\"), \"YCAL Style A Chillers Piping Replacement, YCUL Style A Chillers Piping Replacement\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'].str.contains(\"YCAL/YCUL Chillers Using Copeland Scroll Compressors\"), \"YCAL Chillers Using Copeland Scroll Compressors, YCUL Chillers Using Copeland Scroll Compressors\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'].str.contains(\"YD, YK, YB, YG, YST, YS, YR, YT\"), \"YD Centrifugal Liquid Chillers, YK Centrifugal Liquid Chillers, YB Centrifugal Liquid Chillers, YG Centrifugal Liquid Chillers, YST Centrifugal Liquid Chillers, YS Centrifugal Liquid Chillers, YR Centrifugal Liquid Chillers, YT Centrifugal Liquid Chillers\", dataset['product_name'])\n \n \n # YB FIX\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/HiNK6l4VBWjJRlXV6832Fg\", \"MicroComputer Control Center\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/7lxnU2jU52bhl0hD8F02YA\", \"YB Design Level A Field Connections Microcomputer Control Center\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/RY7sHgddCKj1lgMLLjbQ7g\", \"YB Design Level A Field Control Modifications Diagram Microcomputer Control Center\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/7u_6Od2UnqkxSqVO_wADzA\", \"YB Design Level A Millennium Gas-Engine-Drive Chillers\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/yCbTCYz2lF7ewGzhk~YGng\", \"YB Design Level A Millennium Gas-Engine-Drive Chillers MicroComputer Control Center 371-01469-000, Control Panel\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/DNrYv_JqmOoJlN~xNIgSXw\", \"YB Design Level A Millennium Gas-Engine-Drive Chillers\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/DNrYv_JqmOoJlN~xNIgSXw\", \"YB Design Level A Wiring Diagram Microcomputer Control Center\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/kJRJ3V1JCifOZWLocxORKQ\", \"YB Design Level A Millennium Gas-Engine-Drive Chillers\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/6oC3oIr9_6mVOVY5~86KZw\", \"YB Design Level A Millennium Gas-Engine-Drive Chillers\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/nK5Erc53vXIoy8bVvat_wQ\", \"YB Style A Millennium Gas Engine Drive Chiller, YG Style A Millennium Gas Engine Drive Chiller\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/mr~dEWMlqLZjA1MYTCLdzQ\", \"YB Style A Millennium Gas Engine Drive Chiller System Status Printers, YG Style A Millennium Gas Engine Drive Chiller System Status Printers\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/J3VpswGdTD4C1jK~RHptnQ\", \"YB Style B Flexlogix Control Center Gas Engine Liquid Chiller Control Panel, YG Style B Flexlogix Control Center Gas Engine Liquid Chiller Control Panel\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/SQ~7ugoT~l4NqVg85xJ~VQ\", \"YD, YK, YB, YG, YST, YS, YR, YT\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/1PRW3hsZTn9wCIjusRw1QQ\", \"YG Style A Millennium Centrifugal Liquid Chillers, YB Style A Millennium Centrifugal Liquid Chillers\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/_qLr3eBTJvuf8PszgL6yvg\", \"YK Centrifugal Liquid Chillers, YD Centrifugal Liquid Chillers, YR Centrifugal Liquid Chillers, YB Centrifugal Liquid Chillers, YG Centrifugal Liquid Chillers\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/vPV38hKdWIdnM482qAweRQ\", \"Model YC Styles A thru D Internally Compounded Compressor Units\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/IJeX0VyW1lRGsQE259LMFQ\", \"R Series - Compressor Units\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/VoCgTeQYx2skajELwlKgkg\", \"2-5/8 Bore V/W Compressors\", dataset['product_name'])\n \n \n \n ############ SPLIT ###############\n dataset['product_name'] = dataset['product_name'].apply(lambda x: x.split(',')) \n product_name_column = dataset.apply(lambda x: pd.Series(x['product_name']), axis=1).stack().reset_index(level=1, drop=True)\n product_name_column.name = 'product_name'\n dataset = dataset.drop('product_name', axis=1).join(product_name_column)\n dataset['product_name'] = pd.Series(dataset['product_name'], dtype=object)\n \n \n \n # SINGLE\n dataset['product_name'] = np.where(dataset['product_name'] == \"OM Compressor\", \"OM Centrifugal Compressor\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"OT\", \"OT Open Turbopak Centrifugal Liquid Chillers\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"P1000 Series Pressure Independent Valve\", \"P1000 Series Pressure Independent Valves\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"PCA\", \"PCA Control Panel\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"PCG\", \"PCG Control Panel\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"PCX\", \"PCX Control Panel\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"SAPPHIRE\", \"SAPPHIRE Fire Suppression Systems\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"YB\", \"YB Style A Millennium Gas-Engine-Drive Chiller\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"YK\", \"YK Centrifugal Liquid Chillers\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"YK Centrifugal Chillers -\", \"YK Centrifugal Liquid Chillers\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"YK Chiller\", \"YK Centrifugal Liquid Chillers\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"CD15-28\", \"CD15 - CD28\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"DC090-150\", \"DC090 - DC150\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"DC090 - DC150\", \"DC090 - DC150\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"PC090 - 240\", \"PC090 - PC240\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"PD180 - 240\", \"PD180 - PD240\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"T-3000\", \"T-3100 Thermostat\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"T-3111\", \"T-3111 Thermostat\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"T-3102\", \"T-3102 Heating Cooling Deadband Thermostat\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"T-5312\", \"T-5312 Receiver-Controller\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"T2000 Thermostat\", \"T2000 Fan Coil Thermostat\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"T2000\", \"T2000 Fan Coil Thermostat\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"YD\", \"YD Centrifugal Liquid Chillerss\", dataset['product_name'])\n \n \n \n # PROBLEMATIC\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/vPV38hKdWIdnM482qAweRQ\", \"Reciprocating Compressors\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/tRBtrRbkMuIu7xa1f~OWvA\", \"Model YC Styles A thru D Internally Compounded Compressor Units\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/IJeX0VyW1lRGsQE259LMFQ\", \"R Series - Compressor Units\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/6gp0iUEo7CJAwoNj3RhjJg\", \"YK Medium Voltage Variable Speed Drive\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['document_link'] == \"/viewer/document/SZCWEgCoCZ1DPZEBnMVR~Q\", \"Millennium Variable Speed Drive, CF-CN, 5CC-5CI, with Optional Harmonic Filter\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == '\"B\" and \"C\" All Mod Levels', \"HT/OT & YT Chillers W/Air-Cooled SSS Mod\", dataset['product_name'])\n dataset['product_name'] = np.where(dataset['product_name'] == \"-22 and 502\", \"Types H and F Series\", dataset['product_name'])\n \n \n # CHILLER/CHILLERS FIX\n \n dataset['product_name'] = dataset['product_name'].str.replace(\"Chillerss\", \"Chiller\")\n dataset['product_name'] = dataset['product_name'].str.replace(\"Chillers\", \"Chiller\")\n dataset['product_name'] = dataset['product_name'].str.replace(\"Chiller\", \"Chillers\")\n \n \n \n dataset['product_name'] = dataset['product_name'].str.strip()\n\n\n\n\n\n dataset.to_csv(\"6_documents_sorted_products.csv\", encoding='utf-8-sig', index=False)\n \n \n return dataset", "def cleaning (data):", "def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(dataset_name=dataset_name)\n url = self.get_url()\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def test_correct_dataset_found_by_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(dataset_name=dataset_name)\n url = self.get_url()\n response = self.client.get(url, {'q': dataset_name})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def filter():\n return get_filter_data(db, MyTable)", "def _apply_filters(self, text, tag):\n\n # The order of the filters below is important\n # and should not be changed\n\n # intial_quotes needs to happen at this point so that\n # attribute values introduced later on do not get affected\n text = self.initial_quotes(text)\n text = self.smarty_pants(text)\n text = self.amp(text)\n text = self.caps(text)\n\n return text", "def filterData(records):\n def isInteresting(record):\n if record[VO_ISSUER] in ('/DC=ch/DC=cern/OU=computers/CN=voms.cern.ch', '/DC=ch/DC=cern/OU=computers/CN=lcg-voms.cern.ch'):\n return True\n if record[VO_NAME] in ('atlas', 'cms', 'alice'):\n return True\n if record[USERSN] == '/C=SI/O=SiGNET/O=IJS/OU=F9/CN=Andrej Filipcic':\n return True\n if record[USERSN] in ('aliprod', '/aliprod'):\n return True\n if record[USERSN].startswith(ALIEN_USER_PREFIX):\n return True\n\n return False\n\n return [ r for r in records if isInteresting(r) ]", "def match_dynamic_data(self, indices):\n self.available_bike_stands = self.available_bike_stands[indices]\n self.availabel_bikes = self.available_bikes[indices]\n self.last_update = self.last_update[indices]\n self.status = self.status[indices]", "def filter_addreplace(name, buffers, tags, regex):\n\n\tif filter_exists(name):\n\t\tfilter_del(name)\n\n\tweechat.command(weechat.buffer_search_main(), \"/mute filter add %s %s %s %s\" % (name, buffers, tags, regex))", "def do_data_filter(self, arg):\n if arg:\n if arg.upper() == \"ON\":\n self.data_only = True\n print(\"Filtering for data\")\n elif arg.upper() == \"OFF\":\n self.data_only = False\n print(\"Raw data, no filtering\")\n else:\n print(\"Incorrect arg\")\n elif self.data_only:\n self.data_only = False\n print(\"Raw data, no filtering\")\n else:\n self.data_only = True\n print(\"Filtering for data\")", "def specialSearch(searchType, start=0, length=20, assetType=None):\n url = \"%s/rest/assets/search/%s/%i/%i\" % (serverString, searchType, start, length)\n if assetType:\n url += \"/\"+assetType\n doc = minidom.parseString(urllib.urlopen(url).read().decode(\"utf-8\", \"ignore\").encode(\"ascii\", \"xmlcharrefreplace\"))\n if int(doc.getElementsByTagName(\"status\")[0].firstChild.data) != 1:\n raise ServerError(doc.getElementsByTagName(\"status\")[0].firstChild.data)\n assets = []\n for element in doc.getElementsByTagName(\"asset\"):\n assets += [Asset()]\n assets[-1]._getInfoFromNode(element)\n return assets", "def filterIEDBFile(filename, field, search):\n X = pd.read_csv(filename)\n cols = ['PubMed ID','Author','Journal','Year','T Cell ID','MHC Allele Name',\n 'Epitope Linear Sequence','Epitope Source Organism Name']\n y = X[X[field].str.contains(search)]\n print y[cols]\n y.to_csv('filtered.csv',cols=cols)\n return y", "def _get_asset_info(item, name):\n\n if name in item.assets:\n return item.assets[name]\n elif name.replace(\"B\", \"B0\") in item.assets:\n # Bx -> B0x\n return item.assets[name.replace(\"B\", \"B0\")]\n elif name.replace(\"B0\", \"B\") in item.assets:\n # B0x -> Bx\n return item.assets[name.replace(\"B0\", \"B\")]\n else:\n available = [key for key in item.assets.keys() if key not in [\"thumbnail\", \"overview\", \"info\", \"metadata\"]]\n raise KeyError(\"asset '%s' not found. Available assets: %s\" % (name, avaialable))", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n city_abbreviations = ['chi', 'ny', 'w']\n while True:\n city = input('Kindly specify a city by typing chicago or new york city or washington: \\n\\n').lower()\n if city in CITY_DATA.keys():\n break\n elif city.lower() == 'new york': #if the user forgot to add city to new york as it's a common mistake between users\n print('\\nPlease notify that the city you typed called new york city, So retype it again right this time: \\n')\n elif city in city_abbreviations: #if the user input was abbreviations of the name as it's a common mistake between users\n print('\\nPlease notify that city abbreviation\\'s is not allowed, Retype city full name!\\n')\n elif city.lower() == 'newyorkcity': #if user's input was newyorkcity without any spaces as it's a common mistake between users\n print('\\nPlease notify that the city you typed called new york city with spaces between words, So retype it again right this time: \\n')\n else: #if user printed any other things like wrong name or used speical chracters like space _ + = ~\n print('\\nThats invalid input....\\n\\nplease choose one of the three cities chicago or new york city or washington.\\n') \n# TO DO: get user input for month (all, january, february, ... , june)\n months = ['january', 'february', 'march', 'april', 'may', 'june', 'all']\n months_abbreviations = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug']\n while True:\n month = input('\\n\\nTo filter data by a particuler month, please type the month or all for not filtering by month: \\n-january\\n-february\\n-march\\n-april\\n-may\\n-june\\n-all\\n\\n').lower()\n if month in months:\n break\n elif month in months_abbreviations:\n print('\\nPlease notify that months abbreviation\\'s is not allowed, Retype month full name!\\n')\n else: #if the user input was abbreviations of the name as it's a common mistake between users \n print('\\nThats invalid input....\\n\\n\\nplease choose one of the six months listed to filter with or use no filter\\n')\n# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)\n days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday','saturday', 'sunday', 'all']\n days_abbreviations = ['mon', 'tu', 'tue', 'tues', 'wed', 'th', 'thu', 'thur', 'thurs', 'fri', 'sat', 'sun']\n while True:\n day = input('\\n\\nTo filter data by a particuler day, please type the day or all for not filtering by day: \\n-saturday\\n-sunday\\n-monday\\n-tuesday\\n-wednesday\\n-thursday\\n-friday\\n-all\\n\\n').lower()\n if day in days:\n break\n elif day in days_abbreviations: #if the user input was abbreviations of the name as it's a common mistake between users\n print('\\nPlease notify that day abbreviation\\'s is not allowed, Retype day full name!\\n') \n else:\n print('\\nThats invalid input....\\n\\n\\nplease choose one of the seven days listed to filter with or use no filter\\n') \n print('-'*40)\n return city, month, day", "def update_table(searchvalue, sortvalue) -> tuple[dict, str, px.bar, px.bar]: # type: ignore\n sorted_data = data.copy(deep=True)\n\n output = \"\"\n\n # sort data\n if sortvalue == \"year\":\n sorted_data = sorted_data.sort_values(by=[\"year\"]) # sort by year\n elif sortvalue == \"title\":\n sorted_data = sorted_data.sort_values(by=[\"title\"]) # sort by title\n elif sortvalue == \"author\":\n sorted_data = sorted_data.sort_values(by=[\"author\"]) # sort by author\n\n data2 = sorted_data.copy(deep=True).to_dict(\"records\")\n\n # search for data\n for row in sorted_data.to_dict(\"records\"):\n found = False\n for key in row:\n if searchvalue.lower().strip() in str(row[key]).lower():\n found = True\n\n if found is False:\n data2.remove(row)\n\n # check if search results are empty\n if not data2:\n output = \"No records included in the sample.\"\n\n return (\n data2,\n output,\n plot_time(pd.DataFrame.from_dict(data2)),\n plot_journals(pd.DataFrame.from_dict(data2)),\n )", "def itemFilter(*args, byBin: Union[AnyStr, List[AnyStr], bool]=\"\", byName: Union[AnyStr,\n bool]=\"\", byScript: Union[AnyStr, bool]=\"\", byType: Union[AnyStr, List[AnyStr],\n bool]=\"\", category: Union[AnyStr, List[AnyStr], bool]=\"\", classification:\n Union[AnyStr, bool]=\"\", clearByBin: bool=True, clearByType: bool=True,\n difference: Union[List[AnyStr, AnyStr], bool]=None, exists: bool=True,\n intersect: Union[List[AnyStr, AnyStr], bool]=None, listBuiltInFilters: bool=True,\n listOtherFilters: bool=True, listUserFilters: bool=True, negate: bool=True,\n parent: Union[AnyStr, bool]=\"\", pythonModule: Union[AnyStr, bool]=\"\",\n secondScript: Union[AnyStr, bool]=\"\", text: Union[AnyStr, bool]=\"\", union:\n Union[List[AnyStr, AnyStr], bool]=None, uniqueNodeNames: bool=True, q=True,\n query=True, e=True, edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def test_replace_software_asset_for_software_component(self):\n pass", "def search(self):\n apk_files = self.apk.get_files_types()\n search_results = []\n for file_path, file_type in apk_files.iteritems():\n file_ext = os.path.splitext(os.path.basename(file_path))[1]\n\n #if file type filter on, and this file is not that type, then skip\n if self.file_types and not any(interested_type in file_type.lower() or interested_type in file_ext for interested_type in self.file_types):\n continue\n\n search_result = None\n file_data = self.apk.get_file(file_path)\n\n if self.search_strings:\n for pattern in self.patterns:\n match = pattern.search(file_data)\n if match:\n search_result = {'file_path': file_path,\n 'file_type': file_type,\n 'search_string': pattern.pattern}\n search_results.append(search_result)\n else:\n search_result = {'file_path': file_path,\n 'file_type': file_type,\n 'search_string': None}\n search_results.append(search_result)\n\n #write individual files\n if search_result and self.save_matched_files_dir:\n #save original structure to avoid duplicate filename collisions\n save_file_path = os.path.join(self.save_matched_files_dir, file_path)\n if not os.path.exists(os.path.dirname(save_file_path)):\n os.makedirs(os.path.dirname(save_file_path))\n\n with open(save_file_path,'wb') as f:\n f.write(file_data)\n\n if 'Android binary XML' in file_type:\n with open(save_file_path,'r+') as axml_f:\n decoded_axml = AXMLPrinter(axml_f.read()).buff\n axml_f.seek(0)\n axml_f.write(decoded_axml)\n axml_f.truncate()\n\n return search_results", "def test_correct_dataset_found_by_case_insensitive_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(dataset_name=dataset_name)\n url = self.get_url()\n response = self.client.get(url, {'q': dataset_name.upper()})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def test_correct_dataset_found_by_case_insensitive_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(dataset_name=dataset_name)\n url = self.get_url()\n response = self.client.get(url, {'q': dataset_name.upper()})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def _data_filter(data: str) -> list:\n\n return PROXIES_DATA_REGEX.findall(str(data))", "def test_ocean_assets_search(publisher_ocean_instance, metadata):\n identifier = str(uuid.uuid1()).replace(\"-\", \"\")\n metadata_copy = metadata.copy()\n metadata_copy[\"main\"][\"name\"] = identifier\n assert len(publisher_ocean_instance.assets.search(identifier)) == 0\n\n publisher = get_publisher_wallet()\n ddo = publisher_ocean_instance.assets.create(metadata_copy, publisher)\n wait_for_ddo(publisher_ocean_instance, ddo.did)\n time.sleep(1) # apparently changes are not instantaneous\n assert len(publisher_ocean_instance.assets.search(identifier)) == 1\n assert len(publisher_ocean_instance.assets.search(\"Gorilla\")) == 0", "def assets_pull(ctx, text, method):\n ocean = ctx.obj['ocean']\n response = []\n for did in ocean.search(text):\n print('pulling:', did)\n response += [ctx.invoke(assets_consume,\n did=did,\n method=method)]", "def get_filters():\n print('Hello! Let\\'s explore some US bikeshare data!')\n # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs\n #Invalid input is administered to by using a while loop.\n while True:\n city=input(\"Choose a city name between Chicago, New York City or Washington:!\").lower()\n if city not in CITY_DATA:\n print(\"\\n Not a valid city\\n\")\n continue\n else:\n break\n\n # TO DO: get user input for month (all, january, february, ... , june)\n while True:\n try\n month=str(input('Enter name of one month(from January to June) to filter by or \"all\" ,for no filter :')).lower()\n months=['january', 'february', 'march', 'april', 'may', 'june']\n if month == 'january':\n month = months[0]\n elif month == 'february':\n month = months[1]\n elif month == 'march':\n month = months[2]\n elif month == 'april':\n month = months[3]\n elif month == 'may':\n month = months[4]\n elif month == 'june':\n month = months[5]\n elif month == 'all':\n print('all')\n else:\n raise(Exception)\n\t\t\texcept Exception as error:\n print('Invalid Input!,please restart again!.')", "def on_pre_enter(self, *args):\n self.ids['search'].text = ''\n self.filter()", "def test_correct_dataset_found_by_case_insensitive_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': dataset_name.upper()})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def test_correct_dataset_found_by_case_insensitive_name(self):\n dataset_name = 'my_unlikely_dataset_name'\n dataset = factories.SourceDatasetFactory.create(\n dataset_name=dataset_name,\n source_study_version=self.source_study_version\n )\n url = self.get_url(self.study.pk)\n response = self.client.get(url, {'q': dataset_name.upper()})\n returned_pks = get_autocomplete_view_ids(response)\n self.assertEqual(returned_pks, [dataset.i_id])", "def _get_data_reference_list_from_cache_by_data_asset_name(\n self, data_asset_name: str\n ) -> List[Any]:\n raise NotImplementedError", "def filter(self, alias, condition, inplace=False):\n data = self._data.copy()\n data.index = pd.Index(list(range(0, len(data.index))))\n filter_idx, _ = get_logic_index(pd.Series(data.index), condition, data)\n filtered_data = data.iloc[filter_idx, :]\n if inplace:\n self.filtered = alias\n self._data = filtered_data\n else:\n new_ds = self.clone()\n new_ds._data = filtered_data\n new_ds.filtered = alias\n return new_ds", "def social_infrastructure_point(osm_path): \n df_all = retrieve(osm_path,'points',['other_tags']).rename(columns={'other_tags': 'asset'}) \n \n #get requested healthcare assets categorized under the key 'healthcare' with correct formatting \n df_h = healthcare_filter(df_all)\n \n #get requested healthcare assets categorized under the key 'amenity' \n df_a = pandas.DataFrame(columns=['osm_id','asset','geometry']) #create df for saving data\n for row in range(len(df_all.index)): \n if 'amenity' in df_all[\"asset\"][row]: \n if not 'healthcare' in df_all[\"asset\"][row]: #check if healthcare key is present\n df_a = df_a.append(df_all.loc[row]) #if so, save in df\n \n if '\"amenity\"=>\"doctors\"' in df_a[\"asset\"][row]:\n df_a[\"asset\"][row] = 'doctors' #to be consistent with asset list \n elif '\"amenity\"=>\"pharmacy\"' in df_a[\"asset\"][row]:\n df_a[\"asset\"][row] = 'pharmacy'\n elif '\"amenity\"=>\"hospital\"' in df_a[\"asset\"][row]:\n df_a[\"asset\"][row] = 'hospital'\n elif '\"amenity\"=>\"clinic\"' in df_a[\"asset\"][row]:\n df_a[\"asset\"][row] = 'clinic'\n elif '\"amenity\"=>\"dentist\"' in df_a[\"asset\"][row]:\n df_a[\"asset\"][row] = 'dentist'\n else:\n df_a = df_a.drop(index=row)\n \n df_social_points = df_a.append(df_h)\n \n return df_social_points.reset_index(drop=True)", "def apply_filters(self, new_filters):\n\t\tself.filters = new_filters", "def file_update(self, data):\n result = self.search(data)\n\n if result == True:\n index = self.hash_function(data)\n self.objects_list[index].remove(data)\n self.display_content_hashtable()\n\n if result == False:\n index = self.hash_function(data)\n self.objects_list[index].append(data)\n self.display_content_hashtable()", "def removeLegacy(self, path=None):\n\n df = pd.read_csv(path, compression='gzip')\n print(df.shape)\n gamelist = pd.read_csv('Resources/Genres.csv.gz', usecols=['appid'])\n gamelist = pd.DataFrame(gamelist.appid.unique(), columns=['appid'])\n print(gamelist)\n filter_df = pd.merge(df, gamelist, on='appid', how='inner')\n filter_df = filter_df.dropna()\n filter_df = filter_df.sort_values(['steamid', 'appid'], ascending=[True, True])\n print('done')\n print(filter_df.shape)\n print(filter_df)\n print(np.setdiff1d(df['appid'].unique(), filter_df['appid'].unique()))\n filter_df.to_csv(path, compression='gzip', columns=['steamid', 'appid', 'rating'], index=None)" ]
[ "0.6367185", "0.59256476", "0.5885273", "0.5829923", "0.5723036", "0.5712686", "0.5658041", "0.55008304", "0.5442001", "0.543063", "0.5343348", "0.53341204", "0.5302959", "0.5286173", "0.52814966", "0.5254674", "0.5248854", "0.5234224", "0.5183874", "0.5160617", "0.51445144", "0.5133518", "0.5114703", "0.50937665", "0.50837463", "0.5078103", "0.5072734", "0.5049597", "0.50115454", "0.49838674", "0.4973434", "0.49518114", "0.49506024", "0.49461472", "0.49338573", "0.49335602", "0.4926117", "0.49227038", "0.4915062", "0.49142787", "0.49097025", "0.49082103", "0.4906147", "0.4895763", "0.48679924", "0.48554948", "0.48472646", "0.48468903", "0.48434436", "0.48379937", "0.48323932", "0.48160616", "0.48152605", "0.48147166", "0.47929606", "0.4791523", "0.4789806", "0.47884524", "0.47838566", "0.47817352", "0.47817352", "0.4781453", "0.47750098", "0.4774639", "0.47695476", "0.47619292", "0.47595266", "0.4746757", "0.47339675", "0.4733863", "0.4733863", "0.47308877", "0.4709151", "0.46993423", "0.46991083", "0.4694785", "0.46867418", "0.46829203", "0.46800065", "0.46726835", "0.46652308", "0.4661854", "0.4657451", "0.46499068", "0.464921", "0.46485978", "0.46485978", "0.4645442", "0.46358246", "0.4632857", "0.46300602", "0.46292418", "0.4629103", "0.4629103", "0.46283627", "0.46269515", "0.46230304", "0.46226844", "0.46194896", "0.4618386" ]
0.52709234
15
make the overrdies based on data
def makeOverrides(self): self.overridesWithValues = self.dataOverrides
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def crossover(p1, p2):\n genotype = []\n \n #Your code here\n \n return {'genotype': genotype, 'fitness': None}", "def crossover(x1,x2):\n for chromo in x1.chromosomes:\n result_chromos = [np.zeros((chromo.shape))]\n #result_chromos = [np.zeros((chromo.shape)) for chromo in x1.chromosomes]\n i = 0\n for j in range(len(x1.chromosomes[i])):\n for k in range(len(x1.chromosomes[i][j])):\n if(np.random.rand(1) < 0.5):\n result_chromos[i][j][k] = x1.chromosomes[i][j][k]\n else:\n result_chromos[i][j][k] = x2.chromosomes[i][j][k]\n if(np.random.rand(1)< 0.8):#at 0.3 very agressive\n result_chromos[i][j][k] += -0.05 + np.random.rand(1)*0.1\n return result_chromos", "def get_data(n):\n data = pd.read_csv('map_data/lior_results_2.csv')\n data = data.drop(['estimated_mass', 'estimated_pop'], axis=1)\n data = data[data.binomial != 'Sus scrofa'] # Wild Boar\n data = data[data.binomial != 'Ursus maritimus'] # Polar bear\n data = data[data.binomial != 'Sus bucculentus'] # EX\n data = data[data.binomial != 'Melomys rubicola'] # EX\n data = data.assign(total_mass=data.AdultBodyMassG * data.pop_density * data.Range,\n total_mass_density=data.AdultBodyMassG * data.pop_density)\n data = data.sort_values(by='total_mass_density', ascending=False)\n data = data.iloc[0:n - 1]\n geo_data = gpd.read_file('TERRESTRIAL_MAMMALS/TERRESTRIAL_MAMMALS.shp').to_crs(\"EPSG:6933\")\n geo_data = geo_data[geo_data.category != 'EX']\n range_polygons = geo_data.loc[(geo_data['legend'] == 'Extant & Introduced (resident)') |\n (geo_data['legend'] == 'Extant & Origin Uncertain (resident)') |\n (geo_data['legend'] == 'Extant & Reintroduced (resident)') |\n (geo_data['legend'] == 'Extant & Vagrant (seasonality uncertain)') |\n (geo_data['legend'] == 'Extant (non breeding)') |\n (geo_data['legend'] == 'Extant (resident)') |\n (geo_data['legend'] == 'Probably Extant & Origin Uncertain (resident)') |\n (geo_data['legend'] == 'Probably Extant (resident)') |\n (geo_data['legend'] == 'Reintroduced')]\n range_polygons = range_polygons.merge(data, on='binomial')\n range_polygons = range_polygons.to_crs(\"EPSG:6933\")\n return range_polygons", "def process_data(data):\n nghds = find_expensive_neighborhoods(data, n=3, summary_statistic=np.mean)\n data = ( data.pipe(add_total_bathrooms)\n .pipe(add_power,'Gr_Liv_Area', 2)\n .pipe(add_power,'Garage_Area', 2)\n .pipe(add_expensive_neighborhood, nghds)\n .pipe(select_columns, ['SalePrice',\n 'Gr_Liv_Area',\n 'Garage_Area',\n 'Gr_Liv_Area2',\n 'Garage_Area2',\n 'TotalBathrooms',\n 'in_expensive_neighborhood']) )\n\n data.dropna(inplace = True)\n X = data.drop(['SalePrice'], axis = 1)\n X = standardize(X)\n y = data.loc[:, 'SalePrice']\n y = standardize(y)\n return X, y", "def detect_crossover(self):\n\t\n\n\t\tif self.trend == 0:\n\t\t\tif self.averages['fast'] < self.averages['slow']:\n\t\t\t\tself.trend = -1\n\t\t\telif self.averages['fast'] >= self.averages['slow']:\n\t\t\t\tself.trend = 1\n\t\telse: \n\t\t\tif self.trend == -1 and self.averages['fast'] >= self.averages['slow']:\n\t\t\t\tself.trend = 1\n\t\t\t\treturn {'action': 0, 'sType': self.strategyType}\n\t\t\telif self.trend == 1 and self.averages['fast'] <= self.averages['slow']:\n\t\t\t\tself.trend = -1\n\t\t\t\treturn {'action': 1, 'sType': self.strategyType}\n\t\t\telse:\n\t\t\t\treturn None # no crossover", "def Overtopping(self):\n\n #sort files\n leftOverTop = list()\n RightOverTop = list()\n # get names of files that has _left or _right at its end\n All1DFiles = os.listdir(self.OneDResultPath)\n for i in range(len(All1DFiles)) :\n if All1DFiles[i].endswith(self.leftOvertopping_Suffix):\n leftOverTop.append(All1DFiles[i])\n if All1DFiles[i].endswith(self.RightOvertopping_Suffix):\n RightOverTop.append(All1DFiles[i])\n\n # two dictionaries for overtopping left and right\n OverToppingSubsLeft = dict()\n OverToppingSubsRight = dict()\n # the _left and _right files has all the overtopping discharge\n # but sometimes the sum of all the overtopping is less than a threshold specified\n # and then the 2D algorithm does not run so these cross sections you will not find\n # any inundation beside it in the maps but you will find it in the _left or _right maps\n\n # for each sub-basin that has overtopping from the left dike\n for i in range(len(leftOverTop)):\n\n try:\n # open the file (if there is no column sthe file is empty)\n data = pd.read_csv(self.OneDResultPath + leftOverTop[i],header =None,delimiter = r'\\s+')\n # add the sub basin to the overtopping dictionary of sub-basins\n OverToppingSubsLeft[leftOverTop[i][:-len(self.leftOvertopping_Suffix)]] = dict()\n except:\n continue\n # get the XS that overtopping happened from\n XSs = list(set(data.loc[:,2]))\n # for each XS get the days\n for j in range(len(XSs)):\n OverToppingSubsLeft[leftOverTop[i][:-len(self.leftOvertopping_Suffix)]][XSs[j]] = list(set(data[0][data[2] == XSs[j]].tolist()))\n\n for i in range(len(RightOverTop)):\n\n try:\n # open the file\n data = pd.read_csv(self.OneDResultPath + RightOverTop[i],header =None,delimiter = r'\\s+')\n # add the sub basin to the overtopping dictionary of sub-basins\n OverToppingSubsRight[RightOverTop[i][:-len(self.RightOvertopping_Suffix)]] = dict()\n except :\n continue\n # get the XS that overtopping happened from\n XSs = list(set(data.loc[:,2]))\n # for each XS get the days\n for j in range(len(XSs)):\n OverToppingSubsRight[RightOverTop[i][:-len(self.RightOvertopping_Suffix)]][XSs[j]] = list(set(data[0][data[2] == XSs[j]].tolist()))\n\n self.OverToppingSubsLeft = OverToppingSubsLeft\n self.OverToppingSubsRight = OverToppingSubsRight", "def olympic_sprints(data_set='rogers_girolami_data'):\r\n X = np.zeros((0, 2))\r\n Y = np.zeros((0, 1))\r\n for i, dataset in enumerate([olympic_100m_men,\r\n olympic_100m_women,\r\n olympic_200m_men,\r\n olympic_200m_women,\r\n olympic_400m_men,\r\n olympic_400m_women]):\r\n data = dataset()\r\n year = data['X']\r\n time = data['Y']\r\n X = np.vstack((X, np.hstack((year, np.ones_like(year)*i))))\r\n Y = np.vstack((Y, time))\r\n data['X'] = X\r\n data['Y'] = Y\r\n data['info'] = \"Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.\"\r\n return data_details_return({\r\n 'X': X,\r\n 'Y': Y,\r\n 'info': \"Olympics sprint event winning for men and women to 2008. Data is from Rogers and Girolami's First Course in Machine Learning.\",\r\n 'output_info': {\r\n 0:'100m Men',\r\n 1:'100m Women',\r\n 2:'200m Men',\r\n 3:'200m Women',\r\n 4:'400m Men',\r\n 5:'400m Women'}\r\n }, data_set)", "def ols_data():\n\n xs = [35.3, 29.7, 30.8, 58.8, 61.4, 71.3, 74.4, 76.7, 70.7, 57.5,\n 46.4, 28.9, 28.1, 39.1, 46.8, 48.5, 59.3, 70, 70, 74.5, 72.1,\n 58.1, 44.6, 33.4, 28.6]\n ys = [10.98, 11.13, 12.51, 8.4, 9.27, 8.73, 6.36, 8.50,\n 7.82, 9.14, 8.24, 12.19, 11.88, 9.57, 10.94, 9.58,\n 10.09, 8.11, 6.83, 8.88, 7.68, 8.47, 8.86, 10.36, 11.08]\n\n\n # self.Xk = 28.6\n # self.ypred_k = 0.3091\n solution = {'slope': -0.0798,\n 'y_intercept': 13.623,\n 'n': len(xs),\n 'pred_x': 28.6,\n 'pred_error': 0.309}\n\n return xs, ys, solution", "def get_over(self, filter_dict, percentage):\n pass", "def _crossover(self, sel):\n offspring = []\n for p1, p2 in sel:\n p1 = copy.deepcopy(p1)\n p2 = copy.deepcopy(p2)\n\n tmp = self.op.crossover(\n copy.deepcopy(p1['individual']),\n copy.deepcopy(p2['individual']))\n if not tmp[0] is None and not tmp[1] is None:\n c1 = {\n 'individual': tmp[0],\n 'fitness': self.op.fitness(tmp[0])\n }\n c2 = {\n 'individual': tmp[1],\n 'fitness': self.op.fitness(tmp[1])\n }\n\n offspring.append(\n c1 if c1['fitness'] < p1['fitness'] else p1)\n offspring.append(\n c2 if c2['fitness'] < p2['fitness'] else p2)\n else:\n offspring.extend((p1, p2))\n return offspring", "def Crossover_Function(data1, data2):\n\n # for this function, I modified the uniform crossover function to take care of duplicates after crossover.\n\n data1[1] = 0\n data2[1] = 0\n chromosome1 = list.copy(data1[0])\n chromosome2 = list.copy(data2[0])\n\n #print(\"\\nChromosomes before crossover - \")\n #print(chromosome1)\n #print(chromosome2)\n\n # for each index in both chromosomes, use a coin toss to determine which index is crossed over\n for i in range(len(chromosome1)):\n\n cointoss = random.randrange(2)\n if cointoss == 0:\n chromosome1[i], chromosome2[i] = chromosome2[i], chromosome1[i]\n\n # find duplicates after crossing over\n dupes_in_ch1 = list(duplicates(chromosome1))\n dupes_in_ch2 = list(duplicates(chromosome2))\n\n\n # handle duplicates if any are found\n for i in dupes_in_ch1:\n if i in chromosome1: chromosome1.remove(i)\n chromosome2.append(i)\n \n for i in dupes_in_ch2:\n if i in chromosome2: chromosome2.remove(i)\n chromosome1.append(i)\n\n # replaced the modified chromosomes in the data\n data1[0] = chromosome1\n data2[0] = chromosome2\n\n #print(\"\\nChromsomes after crossover - \")\n #print(data1[0])\n #print(data2[0])\n\n return [data1, data2]", "def occupation_distribution(data):", "def gen_categ(low=0, up=0):\n share_final = raw.copy()\n if low == 0:\n time = pd.Categorical(share_final.time)\n share_final = share_final.set_index([\"mergeid\", \"time\"])\n share_final[\"time\"] = time\n\n country = pd.Categorical(share_final.country)\n share_final[\"country\"] = country\n return share_final\n else:\n a = raw.loc[(raw[\"yrbirth\"] >= low) & (raw[\"yrbirth\"] <= up)]\n time = pd.Categorical(a.time)\n a = a.set_index([\"mergeid\", \"time\"])\n a[\"time\"] = time\n\n country = pd.Categorical(a.country)\n a[\"country\"] = country\n\n subsample = a.copy()\n\n return subsample", "def outlierCleaner(predictions, ages, net_worths):\n import operator\n cleaned_data = []\n temp = {}\n ### your code goes here\n \n for i in range(len(ages)):\n error = predictions[i] - net_worths[i]\n temp[i] = error\n #print temp\n sorted_x = sorted(temp.items(), key=operator.itemgetter(1))\n sorted_x.reverse() \n #print sorted_x\n ten_p = (int)(0.1*len(ages))\n poop = sorted_x[(ten_p):]\n \n for item in poop:\n idx = item[0]\n cleaned_data.append((ages[idx],net_worths[idx],item[1][0]))\n \n return cleaned_data", "def _cross_over(self,mp,cross_rate,eta):", "def manipulate_data(ds, var, predef_clim, predef_trnd, trn_yrs, all_yrs, \n apply_latw=True, apply_detrending=True, dropna=True):\n\n \n if((var=='SD')|(var=='sd')|(var=='snowc')): \n ds[var] = ds[var].where(ds[var]>=0, other=0.0)\n ds[var] = ds[var].where(ds[var]==0, other=1.0)\n #ds[var].values = Gauss_filter(ds[var].values, (0,3,3))\n \n \"\"\"\n if((var=='hgt')|(var=='z')|(var=='GPT')):\n months = ds.time.to_index().month; ssn_ends = (months==2)|(months==5)|(months==8)|(months==11)\n ds = ds.sel(time=ssn_ends)\n else: \n ds = ds.resample(time='3M').mean()\n \"\"\"\n \n ds = ds.resample(time='3M').mean()\n\n ds = ds.sel(time=slice(str(all_yrs[0])+'-01-01', str(all_yrs[-1])+'-12-31')) \n \n try: \n clim = predef_clim\n ds = ds.groupby('time.season') - clim\n print('Predefined climatology used')\n except:\n clim = ds.sel(time=slice(str(trn_yrs[0])+'-01-01', str(trn_yrs[-1])+'-12-31')).groupby('time.season').mean('time')\n ds = ds.groupby('time.season') - clim\n print('Climatology calculated from data')\n \n if(apply_latw): ds[var].values = lat_weighting(ds[var].values, \n ds.lat, ds.lon)\n if(dropna):\n ds = ds.stack(gridcell=('lat', 'lon')).dropna(dim='gridcell',how='any')\n else: \n ds = ds.stack(gridcell=('lat', 'lon')).fillna(0)\n \n \n trend_models = { }\n if(apply_detrending): \n ds = ds.load()\n for ssn in ('DJF', 'MAM', 'JJA', 'SON'):\n #ssn_idx = ds['time.season'] == ssn\n \n trn_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], trn_yrs))\n all_idx = bool_index_to_int_index(np.isin(ds['time.season'], ssn) & np.isin(ds['time.year'], all_yrs))\n \n trn_x = np.array(ds.time[trn_idx].values.tolist()).reshape(-1,1)\n all_x = np.array(ds.time[all_idx].values.tolist()).reshape(-1,1)\n try:\n trend = predef_trnd[ssn].predict(all_x)\n trend_models[ssn] = predef_trnd[ssn]\n print('Predefined trend model used')\n except:\n #_, trend_model = define_trends(ds[var][trn_idx], trn_x)\n _, trend_model = define_trends(ds[var][all_idx], all_x)\n trend = trend_model.predict(all_x)\n trend_models[ssn] = trend_model\n print('Trends calculated from data')\n \n ds[var][all_idx] = ds[var][all_idx] - trend\n \n\n \n return ds, clim, trend_models", "def CrossoverOX1(p1,p2):\n countryNo=len(p1)\n [start,end] = sorted(random.sample(range(1,countryNo),2))\n ch1 = [0]+[-1 for i in range(1,len(p1))]\n ch2 = [0]+[-1 for i in range(1,len(p1))]\n for i in range(1,countryNo):\n if i>=start and i<=end:\n ch1[i]=p1[i]\n ch2[i]=p2[i]\n for i in range(1,countryNo):\n if p2[i] not in ch1:\n ch1[ch1.index(-1)]=p2[i]\n for i in range(1,countryNo):\n if p1[i] not in ch2:\n ch2[ch2.index(-1)]=p1[i]\n return ch1, ch2", "def __init__(self, data):\n\n self.xs = np.array(sorted(data))\n self.N = float(len(self.xs))\n self.ys = np.arange(1, self.N + 1) / self.N", "def calc_win_lose_data(self):\n\n self.data_win = []\n self.data_lose = []\n\n for i, data_win in enumerate(self.data_sum_win):\n data = np.array(data_win)\n data = data/self.survival_sum[0]\n self.data_win.append(data.tolist())\n\n for i, data_lose in enumerate(self.data_sum_lose):\n data = np.array(data_lose)\n data = data/self.survival_sum[1]\n self.data_lose.append(data)", "def __init__(\n self, data: pd.DataFrame,\n n_picked: int, n_choices: int,\n vmin: int, vmax: int, n_train: int, hits_to_win: tuple\n ):\n self.data = data\n self.n_picked = n_picked\n self.n_choices = n_choices\n self.vmin = vmin\n self.vmax = vmax\n self.n_train = n_train\n self.hits_to_win = hits_to_win\n \n self.k_results = list(range(n_picked))\n self.run()", "def lower_covers(self, x):", "def identify_and_handel_outliers(self):\n col_list = [] # This will hold the column names created for the administration of the modified z-score test\n values_dropped = []\n cont_cols = self.df.select_dtypes(exclude=[\"category\"]).columns # Gets continous columns \n for col in cont_cols:\n#TODO: Add lines to check column len(), if len() == 0, drop drop column, create cont_cols and cat_cols, and drop from there as well. \n df_len = len(self.df)\n top_value = self.df[col].value_counts(normalize=True, ascending=False, dropna=True)\n top_value = top_value.head(1).reset_index().to_numpy()[0] #Gets the top occuring value along with its percentage of occurances\n if top_value[1] > 0.5:#Test if the top occuring value makes up more than 50% of the data\n remaining_col = self.df[col][~self.df[col].isin([top_value[0]])] #Gets all values not within the 50% of single value data\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(remaining_col) #Gets modified z-score for remaining items\n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0) #Fills all missing z-scores\\\n #with zero(because that 50% of data removed would be zero anyways)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n else:\n self.df[f\"{col}_mod_z\"] = phase_one_data_prep.modified_zscore(self.df[col]) #Gets modified z-score \n self.df[f\"{col}_mod_z\"] = self.df[f\"{col}_mod_z\"].fillna(0)\n self.df = self.df[self.df[f\"{col}_mod_z\"] < 3] #Removed all values outside 3\n col_list.append(f\"{col}_mod_z\")#Appends name of column to list\n values_dropped.append((col, df_len - len(self.df)))\n self.df.drop(columns = col_list, inplace=True)#Removed columns created to test modified z-score\n self.outliers_dropped = values_dropped", "def gen_nudge_obs(self, region_info):\n # first evaluate which are the variables and find data for each variable \n # what if there is nan value for the time period??\n # adjust the weights\n cutoff = 1e3 #when weights are less than cutoff, they will be set to zero\n weights = self.gen_region_weight(region_info['attribute'],\n region_info['vertices'])\n weights[weights<weights.max()/cutoff] = 0\n method = region_info['interpolant']['method']\n data = region_info['interpolant']['data']\n variable = region_info['interpolant']['variable']\n none_values = region_info['interpolant']['none_values']\n obs = self.read_data(data) \n weights_list = []\n imap_list = []\n values_list = []\n for v in variable:\n if v in obs.keys():\n vdata = obs[v] \n if region_info['type'] == 'single_obs': \n if np.any(np.isnan(vdata)): \n if none_values == 'interpolate': #other fillna options possible. \n if isinstance(vdata, pd.core.series.Series): # pandas array\n vdata = vdata.interpolate()\n elif isinstance(vdata, xr.core.dataarray.DataArray): #xarray\n vdata = vdata.interpolate_na(dim='time')\n weights_v = weights\n imap_v = np.where(weights_v>0)[0]\n values_v = np.broadcast_to(vdata.values,\n (len(imap_v),\n len(vdata))).T\n elif none_values == 'ignore': #ignore the entire series when there is any none\n weights_v = np.zeros(weights)\n imap_v = np.array([]) # empty array\n values_v = np.array([])\n else:\n weights_v = weights\n imap_v = np.where(weights_v>0)[0]\n values_v = np.broadcast_to(vdata.values,\n (len(imap_v),\n len(vdata))).T \n elif region_info['type'] == 'multi_obs': #multiple observational points\n # multiple input should be stored in netcdf format only. \n if np.any(np.isnan(vdata)): \n vdata = vdata.dropna(dim='site',how='all')\n if none_values == 'interpolate':\n vdata = vdata.interpolate_na(dim='time')\n elif none_values == 'ignore':\n vdata = vdata.dropna(dim='site',how='any')\n \n inc_site = []\n for s in obs.site.values:\n if s in vdata.site.values:\n inc_site.append(True)\n else:\n inc_site.append(False)\n weights_v = weights[inc_site,:].sum(axis=0)\n else:\n weights_v = weights\n imap_v = np.where(weights_v>0)[0] \n obs_x = obs.x.sel(site=vdata.site).values\n obs_y = obs.y.sel(site=vdata.site).values \n \n if method =='nearest':\n nn_id = []\n for nx, ny in zip(self.node_x[imap_v],\n self.node_y[imap_v]):\n dist = (nx-obs_x)**2 + (ny-obs_y)**2\n nn_id.append(np.where(dist==dist.min())[0][0])\n values_v = vdata.isel(site=nn_id).values \n elif method == 'inverse_distance':\n obs_loc = np.array([obs_x,obs_y]).T\n values_v = []\n for t in vdata.time:\n vals = vdata.sel(time=t).values\n invdisttree = Interp2D.Invdisttree(obs_loc, vals,\n leafsize=10, stat=1)\n node_xy = np.array([self.node_x[imap_v],\n self.node_y[imap_v]]).T\n values_v.append(invdisttree(node_xy, nnear=4, p=2))\n else:\n raise NotImplementedError \n else:\n weights_v = []\n values_v = []\n imap_v = []\n weights_list.append(weights_v)\n values_list.append(values_v)\n imap_list.append(imap_v) \n \n return weights_list, values_list, imap_list", "def upper_covers(self, x):", "def forbes():\n oldest = {}\n youngest = {}\n\n for entry in data:\n age = entry['age']\n if not oldest or (age > oldest['age']) and (age < 80):\n oldest.update(entry)\n if not youngest or (age < youngest['age']) and (age > 0):\n youngest.update(entry)\n return \"\"\"\nOldest: Name: %s, Net Worth: %d, Industry: %s , Age: %s\nYoungest: Name: %s, Net Worth: %d, Industry: %s, Age: %s\n\"\"\" % (\n oldest['name'], oldest['net_worth (USD)'], oldest['source'], oldest['age'],\n youngest['name'], youngest['net_worth (USD)'], youngest['source'], youngest['age'],\n )", "def group_by_threshold_02(temp_odds):\n temp_handicaps_per_threshold = defaultdict(int)\n for temp_odd in temp_odds:\n try:\n last_update = temp_odd.last_update\n odd_value = temp_odd.value\n label = temp_odd.label\n except Exception as e:\n logger.data_error('%s', e)\n continue\n\n label_raw_values = label.split(':')\n label_values = []\n for label_value in label_raw_values:\n label_value = label_value.strip(' ')\n label_values.append(label_value)\n choice = label_values[0]\n # logger.debug('handicap extracted values: %s', label_values)\n\n try:\n threshold = float(label_values[1])\n except Exception as e: # ValueError, IndexError, float error if threshold = None\n logger.data_error('odd parsing, error on parsing the threshold value from label field: %s, %s', label, e)\n continue\n\n if not temp_handicaps_per_threshold[threshold]:\n temp_handicaps_per_threshold[threshold] = TempHandicap(threshold=threshold, last_update=last_update)\n if not temp_handicaps_per_threshold[-threshold]:\n # initialize it here so that we can add the away odd\n temp_handicaps_per_threshold[-threshold] = TempHandicap(threshold=-threshold, last_update=last_update)\n\n # logger.debug('choice: %s, threshold: %s', choice, threshold)\n if choice == '1':\n temp_handicaps_per_threshold[threshold].home = odd_value\n elif choice == '2':\n temp_handicaps_per_threshold[-threshold].away = odd_value\n elif choice == 'X':\n temp_handicaps_per_threshold[threshold].draw = odd_value\n else:\n logger.data_error('odd parsing, unknown sportmonks label %s', label)\n # logger.debug('list of temp handicaps: %s', temp_handicaps_per_threshold.values())\n return temp_handicaps_per_threshold.itervalues()", "def sway(data):\n\n def worker(rows, worse, above=[]):\n if len(rows) <= len(data[\"rows\"]) ** globals.the[\"min\"]:\n return rows, utils.many(worse, globals.the[\"rest\"] * len(rows))\n else:\n l, r, A, B, _ = clustering.half(data, rows, None, above)\n\n if query.better(data, B, A):\n l, r, A, B = r, l, B, A\n\n def function(row):\n return worse.append(row)\n\n list(map(function, r))\n\n return worker(l, worse, A)\n\n best, rest = worker(data[\"rows\"], [])\n\n return creation.clone(data, best), creation.clone(data, rest)", "def build_raw_xy_data(params, fold, sub_list):\n # Some repeated code from load_data : not super smart\n X = get_raw_x_data(params, fold, subject_list=sub_list)\n XZ = np.array(X)\n Y = []\n if params[\"data_source\"] == \"ABIDE\":\n classified_file = open(\n \"/scratch/mmahaut/scripts/INT_fMRI_processing/url_preparation/subs_list_asd_classified.json\"\n ) # Hardwriten non-modifiable paths in script is bad practice. modify later !\n classified_dict = json.load(classified_file)\n # no normalisation step (which kind of seems legit for classification)\n for key in classified_dict:\n Y.append(1 if classified_dict[key] == \"asd\" else 0)\n elif params[\"data_source\"] == \"interTVA\":\n # Hardcoding this array is probably not the most reusable solution...\n # Error 1 found on 30/07/2020 : bad correspondance between subject file and hardcoded Y,\n # subjects in subject file were not in the same order\n Y = [\n 81.25,\n 81.25,\n 93.75,\n 93.75,\n 93.75,\n 62.5,\n 81.25,\n 100,\n 100,\n 87.5,\n 87.5,\n 68.75,\n 68.75,\n 87.5,\n 93.75,\n 100,\n 62.5,\n 87.5,\n 93.75,\n 87.5,\n 81.25,\n 81.25,\n 81.25,\n 93.75,\n 50,\n 62.5,\n 93.75,\n 81.25,\n 81.25,\n 87.5,\n 68.75,\n 81.25,\n 87.5,\n 87.5,\n 87.5,\n 75,\n 93.75,\n 93.75,\n 93.75,\n ]\n x = np.array(Y)\n YZ = (x - min(x)) / (max(x) - min(x))\n return XZ, YZ", "def prepare_data(self):\r\n annual_df = self.annual_df\r\n coef_df = self.coef_df\r\n quarter_df = self.quarter_df\r\n # historical_df = self.historical_df\r\n Event_Buffer = self.Event_Buffer\r\n\r\n Tot_Prod = coef_df[\"Product\"].nunique()\r\n # Tot_Week = coef_df[\"wk\"].nunique()\r\n Tot_Week = 52\r\n\r\n EDLP_Events = list(annual_df[\"RP_Events\"])\r\n Min_EDLP_Events = [\r\n i - Event_Buffer if i - Event_Buffer >= 0 else 0 for i in EDLP_Events\r\n ]\r\n Max_EDLP_Events = [\r\n i + Event_Buffer if i + Event_Buffer < Tot_Week + 1 else Tot_Week\r\n for i in EDLP_Events\r\n ]\r\n\r\n TPR_Events = list(annual_df[\"TPR_Events\"])\r\n Min_TPR_Events = [\r\n i - Event_Buffer if i - Event_Buffer >= 0 else 0 for i in TPR_Events\r\n ]\r\n Max_TPR_Events = [\r\n i + Event_Buffer if i + Event_Buffer < Tot_Week + 1 else Tot_Week\r\n for i in TPR_Events\r\n ]\r\n\r\n Target_EDLP_Spend = [i for i in annual_df[\"PPG_RP_Spend\"]]\r\n Target_TPR_Spend = [i for i in annual_df[\"PPG_TPR_Spend\"]]\r\n Target_Trade_Spend = [i for i in annual_df[\"PPG_Total_Spend\"]]\r\n\r\n Mapping = {}\r\n Prod_Ind = coef_df[\"Product\"][0:Tot_Prod]\r\n for i, j in zip(Prod_Ind.index, Prod_Ind.values):\r\n Mapping[j] = i\r\n Mapping_reverse = {i: j for j, i in Mapping.items()}\r\n\r\n constants = [i for i in coef_df[\"constant\"]]\r\n\r\n Cat_Coef = coef_df[\"Catalogue\"][0:Tot_Prod]\r\n\r\n Disp_Coef = coef_df[\"Display\"][0:Tot_Prod]\r\n\r\n Base_Price_stg1 = [i for i in quarter_df[\"Final_baseprice\"]]\r\n Intercepts_stg1 = []\r\n for pr in range(Tot_Prod):\r\n Intercepts_stg1.append(\r\n np.mean([constants[j * Tot_Prod + pr] for j in range(0, Tot_Week)])\r\n )\r\n\r\n Base_Price_stg2 = [[i] * Tot_Week for i in quarter_df[\"Final_baseprice\"]]\r\n Intercepts_stg2 = [\r\n constants[j : j + Tot_Prod] for j in range(0, len(constants), Tot_Prod)\r\n ] # noqa\r\n\r\n EDLP_Coef = np.array(\r\n coef_df[[i for i in coef_df.columns if i.count(\"Retailer_Regular\") == 1]]\r\n )\r\n TPR_Coef = np.array(\r\n coef_df[[i for i in coef_df.columns if i.count(\"Retailer_Promoted\") == 1]]\r\n )\r\n\r\n # ################################ Available EDLP Interactions pairs ##############################\r\n\r\n EDLP = [\r\n re.findall(r\"[0-9]+\", i)\r\n for i in coef_df.columns\r\n if i.count(\"Retailer_Regular\") > 1\r\n ]\r\n EDLP_Interactions = []\r\n for i in EDLP:\r\n temp = []\r\n for j in i:\r\n temp.append(int(j))\r\n EDLP_Interactions.append(temp)\r\n\r\n # ###################################### Available TPR Interactions pairs #########################\r\n\r\n TPR = [\r\n re.findall(r\"[0-9]+\", i)\r\n for i in coef_df.columns\r\n if i.count(\"Retailer_Promoted\") > 1\r\n ]\r\n TPR_Interactions = []\r\n for i in TPR:\r\n temp = []\r\n for j in i:\r\n temp.append(int(j))\r\n TPR_Interactions.append(temp)\r\n\r\n # ###################################### EDLP_Interaction_Coef_Values ############################\r\n\r\n EDLP_Int_Coef_Values = {}\r\n for col in coef_df.columns:\r\n if col.count(\"Retailer_Regular\") > 1:\r\n Pair_name = \"_\".join([str(int(i)) for i in re.findall(r\"[0-9]+\", col)])\r\n EDLP_Int_Coef_Values[Pair_name] = list(coef_df[col])\r\n\r\n # ###################################### TPR_Interaction_Coef_Values #############################\r\n\r\n TPR_Int_Coef_Values = {}\r\n for col in coef_df.columns:\r\n if col.count(\"Retailer_Promoted\") > 1:\r\n Pair_name = \"_\".join([str(int(i)) for i in re.findall(r\"[0-9]+\", col)])\r\n TPR_Int_Coef_Values[Pair_name] = list(coef_df[col])\r\n\r\n # ##################################### Loading Pantry Loading Coefficients #######################\r\n\r\n Pantry_1 = list(coef_df[\"Pantry_Loading_1\"])\r\n Pantry_1 = [\r\n Pantry_1[j : j + Tot_Prod] for j in range(0, len(Pantry_1), Tot_Prod)\r\n ]\r\n Pantry_2 = list(coef_df[\"Pantry_Loading_2\"])\r\n Pantry_2 = [\r\n Pantry_2[j : j + Tot_Prod] for j in range(0, len(Pantry_2), Tot_Prod)\r\n ]\r\n\r\n # TE_Coeff = np.array(Promo_df[[\"TE_Promo\",\"TE_NoPromo\"]])\r\n self.Tot_Prod = Tot_Prod\r\n self.Tot_Week = Tot_Week\r\n self.EDLP_Events = EDLP_Events\r\n self.Min_EDLP_Events = Min_EDLP_Events\r\n self.Max_EDLP_Events = Max_EDLP_Events\r\n self.TPR_Events = TPR_Events\r\n self.Min_TPR_Events = Min_TPR_Events\r\n self.Max_TPR_Events = Max_TPR_Events\r\n\r\n self.Target_EDLP_Spend = Target_EDLP_Spend\r\n self.Target_TPR_Spend = Target_TPR_Spend\r\n self.Target_Trade_Spend = Target_Trade_Spend\r\n self.Mapping = Mapping\r\n self.Mapping_reverse = Mapping_reverse\r\n self.constants = constants\r\n self.EDLP_Coef = EDLP_Coef\r\n self.TPR_Coef = TPR_Coef\r\n\r\n self.EDLP_Interactions = EDLP_Interactions\r\n self.TPR_Interactions = TPR_Interactions\r\n self.EDLP_Int_Coef_Values = EDLP_Int_Coef_Values\r\n self.TPR_Int_Coef_Values = TPR_Int_Coef_Values\r\n self.Pantry_1 = Pantry_1\r\n self.Pantry_2 = Pantry_2\r\n\r\n self.Base_Price_stg1 = Base_Price_stg1\r\n self.Intercepts_stg1 = Intercepts_stg1\r\n self.Base_Price_stg2 = Base_Price_stg2\r\n self.Intercepts_stg2 = Intercepts_stg2\r\n\r\n self.Cat_Coef = Cat_Coef\r\n self.Disp_Coef = Disp_Coef", "def my_featurize(apartment):\n col =np.array([1, 2, 0, 0, 0, 0, 0, 0 ])\n a= pd.DataFrame(apartment[col])\n if(apartment.get('condition')== 'good'):\n col[1] =1\n else:\n if(apartment.get('condition')== 'zero condition'):\n col[1] = 0\n col[2] =apartment.get('num_rooms')\n col[3] =apartment.get('area')\n col[4] =apartment.get('num_bathrooms')\n col[5] =apartment.get('floor')\n col[6] =apartment.get('ceiling_height')\n col[7] =apartment.get('max_floor')\n\n return col, apartment['price']", "def gricells_to_adm0(myC = 'CO', _haz='PF', src = 'pop_affected.csv'):\n global _df\n # Find the pop/space affected\n var = src[:src.index('_')]\n # Get the return period x basin data\n _df = pd.read_csv(src).drop(var, axis = 1)\n # Assign names to the indices\n _df.index.name = 'gridcell'\n _df.columns.name = 'rp'\n # assign dtypes\n _df.columns = _df.columns.astype(int)\n # get a basin,rp index\n _df = _df.stack().to_frame()\n global rps,inv_rps\n # Get a list of RPS\n rps = list(_df.index.levels[1].astype(int))\n # If the first rp isn't 1, then add it to the beginning and assume that there isn't any damage\n if rps[0] != 1.: rps = np.append([1.],[rps])\n # Calculate inverse RPS\n inv_rps = [1/i for i in rps]\n # Calculate final rps... any reason why this is missing 5?\n final_rps = [1, 20, 50, 100,250, 500, 1000,1500,2000]\n # Get an empty dataframe with country, final rps as the x axis\n final_exceedance = pd.DataFrame(index= pd.MultiIndex.from_product([[myC],final_rps]))\n # Set loss to None\n final_exceedance['loss'] = None\n # create dataframe to store random numbers\n loss = pd.DataFrame(index=_df.sum(level='gridcell').index).reset_index()\n loss['myC'] = myC\n loss.set_index(['myC','gridcell'], inplace = True)\n lossc = loss.sum(level = 'myC')\n loss = loss.reset_index().set_index('myC')\n\n # generate random numbers\n NYECOS = int(1E4) # <-- any multiple of 10K\n for _yn in range(NYECOS):\n loss['_'] = [np.random.uniform(0,1) for i in range(loss.shape[0])]\n loss['y'+str(_yn)] = loss.apply(lambda x:random_to_loss(x.gridcell,x['_']),axis=1)\n\n if _yn != 0 and (_yn+1)%500 == 0:\n\n lossc = pd.concat([lossc,loss.drop('_',axis=1).sum(level='myC')],axis=1)\n loss = loss[['gridcell']]\n print(_yn+1)\n\n for _reg in loss.index.values:\n aReg = lossc.loc[_reg].sort_values(ascending=False).reset_index()\n\n for _frp in final_rps:\n final_exceedance.loc[(_reg,_frp),'loss'] = float(aReg.iloc[int((NYECOS-1)/_frp)][_reg])\n\n total_pop = pd.read_csv('{}_affected.csv'.format(var))[var].sum()\n (final_exceedance/total_pop).to_csv('../inputs/'+myC+'regional_exceedance_'+_haz+src[:2]+'.csv')", "def source_gen(stellar, threshold):\n source = []\n for i in stellar:\n if i[2] > threshold:\n source.append(NewPoint(i))\n \n #sort objects by x-axis\n source.sort(key=lambda x: x[1])\n source.sort(key=lambda x: x[0])\n return source", "def random_points_ascending_hillclimber(house, all_houses, waters, total_value_map):\n total_value_map_NEW = total_value_map\n\n # check in welke range het huis geplaats kan worden, niet kijkend naar water of andere \n rangex = MAXIMUM_WIDTH - house.width\n rangey = MAXIMUM_HEIGHT - house.length\n\n for x in range(100):\n # maak random x en y coördinaat\n randomizex = rangex * random()\n randomizey = rangey * random()\n\n # bewaar oude locaties\n tempx = house.bottom_left[0]\n tempy = house.bottom_left[1]\n \n # verander locatie\n bottom_left = (randomizex,randomizey)\n house.location(bottom_left)\n\n # als je je huis op nieuwe locatie kan plaatsen\n if place_house(house, all_houses, waters) == True:\n # bereken nieuw waarde map, waarin huis is verplaatst\n total_value_map_temp = 0\n for item in all_houses.values():\n for house in item:\n house.extra_meters()\n total_value_map_temp += house.totalprice()\n\n # als waarde met nieuwe locatie hoger is, verander deze\n if total_value_map_NEW < total_value_map_temp:\n total_value_map_NEW = total_value_map_temp\n # als waarde niet hoger is verander naar oude locatie en bereken weer totale waarde map\n else:\n bottom_left = (tempx,tempy)\n house.location(bottom_left)\n if place_house(house, all_houses, waters) == True:\n for item in all_houses.values():\n for houses in item:\n houses.extra_meters()\n houses.totalprice()\n # als huis niet geplaats kan worden, verander naar oude locatie en bereken weer totale waarde map\n else:\n bottom_left = (tempx,tempy)\n house.location(bottom_left)\n if place_house(house, all_houses, waters) == True:\n for item in all_houses.values():\n for houses in item:\n houses.extra_meters()\n houses.totalprice()\n\n return all_houses, total_value_map_NEW", "def process_O2(x, lb, ub):\n x = x.abs()\n x.loc[(x <= 1) & (x > 0)] = x.loc[(x <= 1) & (x > 0)] * 100\n x.loc[(x <= 10) & (x > 1)] = x.loc[(x <= 10) & (x > 1)] * 10\n x.loc[(x <= lb ) | (x > ub)] = np.nan\n return x", "def crossover(self, parents):\n\n randomCategory = random.sample(list(ga_.Category), 1)[0]\n randomParent1 = random.sample(parents, 1)[0]\n randomParent2 = None\n for parent in parents:\n if parent != randomParent1:\n randomParent2 = parent\n \n\n # put randomCategory from random parent to the new offpring and the remainder from the second parent\n offspring = ga_.Outfit()\n if randomCategory == ga_.Category.TOP:\n offspring.top = randomParent1.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.BOTTOM:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent1.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.SHOES:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent1.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.NECK:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent1.neck\n offspring.handbag = randomParent2.handbag\n elif randomCategory == ga_.Category.HANDBAG:\n offspring.top = randomParent2.top\n offspring.bottom = randomParent2.bottom\n offspring.shoes = randomParent2.shoes\n offspring.neck = randomParent2.neck\n offspring.handbag = randomParent1.handbag\n\n return offspring", "def getInfoDrum(self, l):\n for i in range(0, len(l)):\n for j in range(len(l[i].info.oameni)):\n l[i].info.oameni[j].timp_asteptat = 0\n l[i].info.oameni[j].timp_mers = 0\n l[i].info.oameni[j].traseu = None\n \n for i in range(1, len(l)):\n for j in range(len(l[i].info.oameni)):\n index_prev = l[i-1].info.getOmIndex(l[i].info.oameni[j].name)\n if l[i].info.oameni[j].state != l[i-1].info.oameni[index_prev].state:\n if l[i-1].info.oameni[index_prev].state == \"waiting\" and l[i].info.oameni[j].state == \"travelling\":\n l[i].info.oameni[j].timp_asteptat = l[i-1].info.oameni[index_prev].timp_asteptat + l[i].info.time - l[i-1].info.time\n l[i].info.oameni[j].timp_mers = l[i-1].info.oameni[index_prev].timp_mers\n elif l[i-1].info.oameni[index_prev].state == \"travelling\" and l[i].info.oameni[j].state == \"waiting\":\n l[i].info.oameni[j].timp_mers = l[i-1].info.oameni[index_prev].timp_mers + l[i].info.time - l[i-1].info.time\n l[i].info.oameni[j].timp_asteptat = l[i-1].info.oameni[index_prev].timp_asteptat\n else:\n if l[i].info.oameni[j].state == \"travelling\":\n l[i].info.oameni[j].timp_mers = l[i-1].info.oameni[index_prev].timp_mers + l[i].info.time - l[i-1].info.time\n l[i].info.oameni[j].timp_asteptat = l[i-1].info.oameni[index_prev].timp_asteptat\n elif l[i].info.oameni[j].state == \"waiting\":\n l[i].info.oameni[j].timp_asteptat = l[i-1].info.oameni[index_prev].timp_asteptat + l[i].info.time - l[i-1].info.time\n l[i].info.oameni[j].timp_mers = l[i-1].info.oameni[index_prev].timp_mers\n\n for i in range(1, len(l)):\n if l[i].info.event.tip == \"boarding\":\n temp_traseu = [l[i].info.event.om.current_loc]\n j = i\n while j < len(l) and (l[j].info.event.tip != \"unboarding\" or l[j].info.event.om.name != l[i].info.event.om.name):\n j += 1\n if j >= len(l):\n return None\n unboarding_loc = l[j].info.event.om.current_loc\n direction = l[i].info.event.autobuz.direction_forward\n index_boarding = l[i].info.event.autobuz.getIndexLoc(temp_traseu[0])\n index_unboarding = l[i].info.event.autobuz.getIndexLoc(unboarding_loc)\n if direction:\n for x in range(index_boarding+1, index_unboarding+1):\n temp_traseu.append(l[i].info.event.autobuz.destinations[x])\n else:\n for x in range(index_boarding-1, index_unboarding-1, -1):\n temp_traseu.append(l[i].info.event.autobuz.destinations[x])\n l[i].info.oameni[l[i].info.getOmIndex(l[i].info.event.om.name)].traseu = temp_traseu\n for x in range(i, j):\n index = l[x].info.getOmIndex(l[i].info.event.om.name)\n assert index != None\n l[x].info.oameni[index].traseu = temp_traseu\n\n return l", "def build_xy_data(params, dimension, fold, sub_list):\n XZ = get_x_data(params, dimension, fold, subject_list=sub_list)\n Y = []\n if params[\"data_source\"] == \"ABIDE\":\n classified_file = open(\n \"/scratch/mmahaut/scripts/INT_fMRI_processing/url_preparation/subs_list_asd_classified.json\"\n ) # Hardwriten non-modifiable paths in script is bad practice. modify later !\n classified_dict = json.load(classified_file)\n # no normalisation step (which kind of seems legit for classification)\n for key in classified_dict:\n Y.append(1 if classified_dict[key] == \"asd\" else 0)\n elif params[\"data_source\"] == \"interTVA\":\n # Hardcoding this array is probably not the most reusable solution...\n # Error 1 found on 30/07/2020 : bad correspondance between subject file and hardcoded Y,\n # subjects in subject file were not in the same order\n Y = [\n 81.25,\n 81.25,\n 93.75,\n 93.75,\n 93.75,\n 62.5,\n 81.25,\n 100,\n 100,\n 87.5,\n 87.5,\n 68.75,\n 68.75,\n 87.5,\n 93.75,\n 100,\n 62.5,\n 87.5,\n 93.75,\n 87.5,\n 81.25,\n 81.25,\n 81.25,\n 93.75,\n 50,\n 62.5,\n 93.75,\n 81.25,\n 81.25,\n 87.5,\n 68.75,\n 81.25,\n 87.5,\n 87.5,\n 87.5,\n 75,\n 93.75,\n 93.75,\n 93.75,\n ]\n y = np.array(Y)\n YZ = (y - min(y)) / (max(y) - min(y))\n return XZ, YZ", "def add_seller_house(houses:pd.DataFrame) -> pd.DataFrame:\n houses[SELLER_HOUSE]= 2\n above= houses[DAYS_ON_MARKET] >= THRESHOLD\n below= (houses[DAYS_ON_MARKET] < THRESHOLD)\n sold= (houses[STATUS] == SOLD)\n s1= [x and y for x,y in zip(above, sold)]\n s2= [x and y for x,y in zip(below, sold)]\n houses.loc[s1, SELLER_HOUSE]= 1\n houses.loc[s2, SELLER_HOUSE]= 0\n return houses[houses.SELLER_HOUSE != 2]", "def paretoOptimize(data,aspects):\n\n if (len(aspects) < 2):\n print(\"Need at least two fields to build paretofront!\");\n sys.exit(-1)\n\n if any([x[0] not in ['>','<'] for x in aspects]):\n print(\"Aspects must indicate minimization (<) or maximization (>) before name\")\n sys.exit(-2)\n \n better = lambda row1,row2 : all([_better_aspect(row1,row2,aspect) for aspect in aspects])\n\n pareto = list()\n for r in data:\n for d in data:\n if d != r and better(d,r): break\n else:\n pareto.append(r)\n return pareto", "def dataset_spl_zeropadding(data):\n\n max_len = 70 # one knot every 20 days (1096 days max) + stab knots (10) --> definitely less than 70 knots in total\n\n zp_data = data.loc[:, [u\"spl_0_t\", u\"spl_0_c\",\n u\"spl_1_t\", u\"spl_1_c\",\n u\"spl_2_t\", u\"spl_2_c\",\n u\"spl_3_t\", u\"spl_3_c\",\n u\"spl_4_t\", u\"spl_4_c\",\n u\"spl_5_t\", u\"spl_5_c\"\n ]\n ].values\n\n # Zero-padding using Numpy and reshape in 1d vector [:,data]\n zp_data = np.asarray([\n [np.pad(a, (0, max_len - len(a)), 'constant', constant_values=0)\n for a in item]\n for item in zp_data])\n\n\n zp_data = zp_data.reshape(zp_data.shape[0], -1)\n zp_data = np.c_[zp_data, data.loc[:, [u'gal_b', u'gal_l', u'hostgal_photoz', u'hostgal_photoz_err', u'hostgal_specz']].values]\n\n\n # Normalise data to be determined\n # Load labels and convert to integer\n labels = data.loc[:, [u'target']].values\n labels = labels.flatten()\n labels_name = np.array([6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95, 99])\n [np.place(labels, labels == labels_name[i], [i]) for i in range(len(labels_name))]\n\n return[zp_data,labels]", "def outlierCleaner(predictions, ages, net_worths):\n \n cleaned_data = []\n temp=[]\n ### your code goes here\n for x in xrange(len(predictions)):\n cleaned_data.append((ages[x],net_worths[x],abs(net_worths[x]-predictions[x])))\n \n cleaned_data.sort(key= lambda tup : tup[2], reverse= False)\n cleaned_data=cleaned_data[:81]\n print(len(cleaned_data))\n return cleaned_data", "def test_replicating_variance_swap(self):\n\n replicating_option_data = [\n {'type':OptionType.Put, 'strike':50, 'v':0.30},\n {'type':OptionType.Put, 'strike':55, 'v':0.29},\n {'type':OptionType.Put, 'strike':60, 'v':0.28},\n {'type':OptionType.Put, 'strike':65, 'v':0.27},\n {'type':OptionType.Put, 'strike':70, 'v':0.26},\n {'type':OptionType.Put, 'strike':75, 'v':0.25},\n {'type':OptionType.Put, 'strike':80, 'v':0.24},\n {'type':OptionType.Put, 'strike':85, 'v':0.23},\n {'type':OptionType.Put, 'strike':90, 'v':0.22},\n {'type':OptionType.Put, 'strike':95, 'v':0.21},\n {'type':OptionType.Put, 'strike':100, 'v':0.20},\n {'type':OptionType.Call, 'strike':100, 'v':0.20},\n {'type':OptionType.Call, 'strike':105, 'v':0.19},\n {'type':OptionType.Call, 'strike':110, 'v':0.18},\n {'type':OptionType.Call, 'strike':115, 'v':0.17},\n {'type':OptionType.Call, 'strike':120, 'v':0.16},\n {'type':OptionType.Call, 'strike':125, 'v':0.15},\n {'type':OptionType.Call, 'strike':130, 'v':0.14},\n {'type':OptionType.Call, 'strike':135, 'v':0.13},\n ]\n\n dates = [self.ex_date]\n\n call_strikes, put_strikes, call_vols, put_vols = [], [], [], []\n\n # Assumes ascending strikes and same min call and max put strikes\n for data in replicating_option_data:\n if data['type'] == OptionType.Call:\n call_strikes.append(data['strike'])\n call_vols.append(data['v'])\n elif data['type'] == OptionType.Put:\n put_strikes.append(data['strike'])\n put_vols.append(data['v'])\n else:\n raise ValueError(\"unknown option type\")\n\n vols = np.zeros((len(replicating_option_data)-1, 1))\n strikes = []\n for j, v in enumerate(put_vols):\n vols[j][0] = v\n strikes.append(put_strikes[j])\n\n for k in range(1,len(call_vols)):\n j = len(put_vols)-1\n vols[j+k][0] = call_vols[k]\n strikes.append(call_strikes[k])\n\n vols_mat = Matrix.from_ndarray(vols)\n\n vol_ts = BlackVarianceSurface(self.today, NullCalendar(), dates, strikes,\n vols_mat, self.dc)\n\n\n stoch_process = BlackScholesMertonProcess(self.spot, self.q_ts, self.r_ts,\n vol_ts)\n\n engine = ReplicatingVarianceSwapEngine(stoch_process,\n call_strikes,\n put_strikes,\n 5.0)\n\n variance_swap = VarianceSwap(self.values['type'],\n self.values['strike'],\n self.values['nominal'],\n self.today,\n self.ex_date,\n )\n\n variance_swap.set_pricing_engine(engine)\n\n calculated = variance_swap.variance\n expected = self.values['result']\n\n self.assertAlmostEqual(calculated, expected, delta=self.values['tol'])", "def make_data_vectors(data):\n result_dict = {'adjusted_close': list(), \n 'raw_close': list(),\n 'high': list(),\n 'low': list(),\n 'open':list(),\n 'volume': list(),\n 'date': list()\n }\n for datum in data:\n ratio = get_adjusted_ratio(datum)\n \n result_dict['adjusted_close'].append(datum['Adj Clos']) if \\\n 'Adj Clos' in datum else result_dict['adjusted_close'].append(None)\n result_dict['raw_close'].append(datum['Close'] * ratio) if \\\n 'Close' in datum else result_dict['raw_close'].append(None)\n result_dict['high'].append(datum['High'] * ratio) if \\\n 'High' in datum else result_dict['high'].append(None)\n result_dict['low'].append(datum['Low'] * ratio) if 'Low' in datum \\\n else result_dict['low'].append(None)\n result_dict['open'].append(datum['Open'] * ratio) if 'Open' in datum \\\n else result_dict['open'].append(None)\n result_dict['volume'].append(datum['Volume']) if 'Volume' in datum \\\n else result_dict['volume'].append(None)\n result_dict['date'].append(datum['date']) if 'date' in datum \\\n else result_dict['date'].append(None)\n\n volume = np.array(result_dict['volume'])\n adj_close = np.array(result_dict['adjusted_close'])\n close = np.array(result_dict['raw_close'])\n high = np.array(result_dict['high'])\n low = np.array(result_dict['low'])\n open_p = np.array(result_dict['open'])\n date = np.array(result_dict['date'])\n\n return {'open':open_p, 'close':close, 'low':low, \n 'high':high, 'adj_close':adj_close, \n 'volume':volume, 'date':date}", "def apply(self):", "def aging_landscape(self):\n for herb in self.herb_pop:\n herb.aging_animal()\n herb.fitness_animal()\n\n for carn in self.carn_pop:\n carn.aging_animal()\n carn.fitness_animal()", "def skater_preprocessing(pos):\n even_cols = ['player', 'player_id', 'season', 'toi_on', 'corsi_f', 'corsi_a', 'goals_f', 'goals_a']\n all_cols = ['player', 'player_id', 'season', 'toi_on', 'goals', 'a1', 'a2', 'icors', 'iblocks', 'pend', 'pent',\n 'ifac_win', 'ifac_loss', 'games']\n\n with open(\"skaters/{}_even.json\".format(pos)) as file_even:\n df_even = pd.DataFrame(json.load(file_even)['data'], columns=even_cols)\n\n # Convert from string to float for some reason\n for col in [\"toi_on\", \"corsi_f\", \"corsi_a\"]:\n df_even[col] = df_even[col].astype(float)\n df_even = df_even.groupby(['player', 'player_id', 'season'], as_index=False).sum()\n df_even = df_even.sort_values(['player', 'player_id', 'season'])\n\n with open(\"skaters/{}_all_sits.json\".format(pos)) as file_all_sits:\n df_all_sits = pd.DataFrame(json.load(file_all_sits)['data'], columns=all_cols)\n df_all_sits['toi_on'] = df_all_sits['toi_on'].astype(float)\n df_all_sits = df_all_sits.groupby(['player', 'player_id', 'season'], as_index=False).sum()\n df_all_sits = df_all_sits.sort_values(['player', 'player_id', 'season'])\n\n # Just transfer over corsi straight to All Situations\n df_all_sits['corsi_f'] = df_even['corsi_f']\n df_all_sits['corsi_a'] = df_even['corsi_a']\n df_all_sits['goals_f'] = df_even['goals_f']\n df_all_sits['goals_a'] = df_even['goals_a']\n df_all_sits['even_toi_on'] = df_even['toi_on']\n\n df_all_sits['gs'] = (.75 * df_all_sits['goals']) + (.7 * df_all_sits['a1']) + (.55 * df_all_sits['a2'])\\\n + (.049 * df_all_sits['icors']) + (.05 * df_all_sits['iblocks']) + (.15 * df_all_sits['pend'])\\\n - (.15 * df_all_sits['pent']) + (.01 * df_all_sits['ifac_win']) - (.01 * df_all_sits['ifac_win'])\\\n + (.05 * df_all_sits['corsi_f']) - (.05 * df_all_sits['corsi_a']) + (.15 * df_all_sits['goals_f'])\\\n - (.15 * df_all_sits['goals_a'])\n\n # Get Per 60\n df_all_sits['gs60'] = df_all_sits['gs'] * 60 / df_all_sits['toi_on']\n\n # Toi per game\n df_all_sits['toi/gp'] = df_all_sits['toi_on'] / df_all_sits['games']\n\n return df_all_sits", "def make_obstab_era5fb_dic(self, dataset = '' , date_time = '', File = ''):\n index_offset = self.unique_dates[dataset][File]['index_offset']\n \n # Removing the index_offset, which is defined only if any slicing was done \n index = self.unique_dates[dataset][File]['indices'][date_time]['low'] - index_offset\n index_up = self.unique_dates[dataset][File]['indices'][date_time]['up'] - index_offset\n \n obs_dic = {} \n for v in self.observations_table_vars:\n obs_dic[v] = data[dataset][File]['observations_table'][v][index:index_up]\n #print('v is : ', v )\n\n \"\"\" Loop over the obs_tab to find duplicates.\n I fill a dictionary for each distinct pressure level, and I put inside\n the observed_variable number.\n If the list lready contains the combination pressure level - observed variable,\n then the record is skipped \"\"\"\n\n indices = [] # these are the only non-duplicates to be kept\n\n already_selected = { }\n \n #print('starting the loop: ' , date_time, ' ' , dataset, ' ', index, ' ' , index_up)\n for p,var,val,ind in zip ( obs_dic['z_coordinate'] , obs_dic['observed_variable'],obs_dic['observation_value'] ,range(len(obs_dic['z_coordinate'])) ):\n #print(p,var,val,ind)\n #if date_time > 2354300000:\n # print('looping :::', var, ' ' , val, ' ' , ind , ' ', dataset, ' ' , index_up, ' ' , index, ' ', File)\n \n if self.only_std_plevels:\n if p not in self.std_plevs:\n continue \n\n \n if p not in already_selected.keys():\n already_selected[p] = []\n \n \n if np.isfinite(val):\n if var not in already_selected[p]:\n already_selected[p].append(var)\n indices.append(ind) # record to be kept\n else:\n pass\n else: # skipping nans\n pass\n\n #print('done with the loop')\n red_obs_dic = {} # dictionary for the reduced (removed duplicates) obs_tab\n for v in self.observations_table_vars:\n red_obs_dic[v] = obs_dic[v][indices]\n\n ''' Simply returns the proper format for ''null' value '''\n def get_null( tipo = ''):\n if tipo == np.int32 :\n void = 0\n elif tipo == np.float32 :\n void = 0.0\n elif tipo == np.bytes_ :\n void = b'nan'\n return void\n \n ''' Filling the feedback table. Only feednack for era5_1 and era5_2 are currently available. \n Reads the total number of possible columns from the dic_type_attributes dictionary.\n Era5_1 and era5_2 fb have different columns.\n If data for a variable is not available, it fills with the appropriate null value '''\n \n #print('making the era5fb ', date_time, ' ' , dataset)\n red_era5fb_dic = {}\n for v in self.era5fb_columns:\n tipo = self.dic_type_attributes['era5fb'][v]['type'] \n if dataset == 'era5_1' or dataset == 'era5_2':\n if v in data[dataset][File]['era5fb_tab'].keys(): \n red_era5fb_dic[v] = data[dataset][File]['era5fb_tab'][v][index:index_up][indices]\n else:\n void = get_null(tipo = tipo)\n red_era5fb_dic[v]= np.full(len(indices), void) \n else: # no feedback for non era%-1 or era5_2 datasets \n void = get_null(tipo = tipo)\n red_era5fb_dic[v]= np.full(len(indices), void)\n \n #print('done making_obstab_era5fb')\n \"\"\"\n try:\n if len(red_obs_dic['date_time']) > 2:\n print('yes')\n else:\n print('check') \n except:\n print('check')\n \"\"\" \n return red_obs_dic , red_era5fb_dic", "def road_not_in_roads(dataframe,list_roads,day,hour):\n list_pred = []\n\n inter = dataframe[(dataframe['day_of_the_week'] == day) \\\n & (dataframe['hour'] == hour)]\n result = inter['collision_severity'].mean()\n\n for road in list_roads:\n list_pred.append(result)\n\n dict_pred = {'names':list_roads,'collision_severity':list_pred}\n df_pred = pd.DataFrame(dict_pred)\n\n return df_pred", "def clean_cases(data):\n newdata=[]\n #Add up Bucks Data\n bucks=defaultdict(list)\n for i in data:\n if i['areaName'] in ['Chiltern','Aylesbury Vale','South Bucks','Wycombe']:\n bucks[i['date']].append(i)\n else:\n newdata.append(i)\n log.debug(bucks)\n for _date,_all in bucks.items():\n item={'areaName': 'Buckinghamshire','areaCode':'E06000060','specimenDate':_date}\n item['newCasesBySpecimenDate']=sum([x['newCasesBySpecimenDate'] for x in _all])\n item['cumCasesBySpecimenDate']=sum([x['cumCasesBySpecimenDate'] for x in _all])\n newdata.append(item)\n\n return newdata", "def populate_homes(self, breakdown):\n #check!#\n\n ###your code here###\n tot=self.nx*self.ny\n for n in range(len(breakdown)):\n breakdown[n]=int(round(tot*breakdown[n]))\n for i in range(breakdown[n]):\n new_home=self.empty_homes.pop(random.randrange(len(self.empty_homes)))\n home_address=(new_home.x,new_home.y)\n x=Person(group=n,home=self.homes[home_address])\n self.homes[home_address].occupant=x\n self.people.append(x)", "def enrich_dataframe(df, name):\n if(name == 'taux_incidence'):\n df['taux_incidence'] = df['P']*100000/df['pop']\n if(name == 'taux_positivite'):\n df['taux_positivite'] = df['P']/df['T'] * 100\n if(name == 'taux_occupation'):\n df['TO'] = df['TO']*100\n if(name == 'vaccins_vaccines_couv_majeurs'):\n df['couv_complet'] = 100 * df['n_cum_complet'] / df['pop']\n if(name == 'vaccins_vaccines_couv_ado_majeurs'):\n df['couv_complet'] = 100 * df['n_cum_complet'] / df['pop']\n if(name == 'taux_classes_fermees'):\n df['taux_classes'] = 100* df['nombre_classes_fermees'] / df['nombre_total_classes']\n if(name == 'taux_structures_fermees'):\n df['taux_structures'] = 100* df['nombre_structures_fermees'] / df['nombre_total_structures']\n\n \n \n return df", "def load_all_data():\r\n\r\n data = dict()\r\n for year in ['2010', '2011', '2014', '2016']:\r\n\r\n data[year] = load_data(int(year))\r\n\r\n # Calculate the dune widths\r\n data[year]['Dune Width'] = data[year]['x_heel'] - data[year]['x_toe']\r\n data[year]['Fenced Dune Width'] = data[year]['x_fence_heel'] - data[year]['x_fence_toe']\r\n data[year]['Fenced Dune System Width'] = data[year]['x_heel'] - data[year]['x_fence_toe']\r\n\r\n # For now, remove all negative widths and volumes, something went wrong with them\r\n width_condition = data[year]['Fenced Dune Width'] <= 0\r\n volume_condition = data[year]['Fenced Dune Volume'] <= 0\r\n\r\n data[year]['y_fence_crest'][width_condition] = np.nan\r\n data[year]['Fenced Dune Width'][width_condition] = np.nan\r\n data[year]['Fenced Dune Volume'][width_condition] = np.nan\r\n\r\n data[year]['y_fence_crest'][volume_condition] = np.nan\r\n data[year]['Fenced Dune Width'][volume_condition] = np.nan\r\n data[year]['Fenced Dune Volume'][volume_condition] = np.nan\r\n\r\n data[year]['Fenced Dune System Width'][data[year]['Fenced Dune System Width'] <= 0] = np.nan\r\n\r\n # Remove instances where the fenced and natural dune crest are not positioned correctly\r\n crest_condition_1 = data[year]['x_fence_crest'] >= data[year]['x_crest']\r\n crest_condition_2 = data[year]['y_fence_crest'] >= data[year]['y_crest']\r\n\r\n data[year]['y_fence_crest'][crest_condition_1] = np.nan\r\n data[year]['Fenced Dune Width'][crest_condition_1] = np.nan\r\n data[year]['Fenced Dune Volume'][crest_condition_1] = np.nan\r\n\r\n data[year]['y_fence_crest'][crest_condition_2] = np.nan\r\n data[year]['Fenced Dune Width'][crest_condition_2] = np.nan\r\n data[year]['Fenced Dune Volume'][crest_condition_2] = np.nan\r\n\r\n data['Fences'] = load_fence_locations(y=0)\r\n\r\n return data", "def MakePosterior(high, dataset, constructor):\n hypos = xrange(1, high+1)\n suite = constructor(hypos)\n suite.name = str(high)\n\n for data in dataset:\n suite.Update(data)\n\n return suite", "def transform(self, y=None):\n\n if self.how == \"city\":\n df = self.X[[\"ORIG_TIME_QUEUED\", \"EVENT\"]].copy()\n df[\"date\"] = pd.to_datetime(df[\"ORIG_TIME_QUEUED\"]).dt.date\n df.drop(\"ORIG_TIME_QUEUED\", axis=1, inplace=True)\n return (\n df.groupby(\"date\")\n .count()\n .rename(columns={\"EVENT\": \"num_calls\"})\n .reset_index()\n )\n\n else:\n df = self.X[[\"NEIGHBORHOOD\", \"ORIG_TIME_QUEUED\", \"EVENT\"]].copy()\n df[\"date\"] = pd.to_datetime(df[\"ORIG_TIME_QUEUED\"]).dt.date\n df.drop(\"ORIG_TIME_QUEUED\", axis=1, inplace=True)\n counts = (\n df.groupby([\"NEIGHBORHOOD\", \"date\"])\n .count()\n .rename(columns={\"NEIGHBORHOOD\": \"neighborhood\", \"EVENT\": \"num_calls\"})\n .reset_index()\n )\n\n neighborhoods = list(counts[\"NEIGHBORHOOD\"].unique())\n num_days = (\n int(\n np.timedelta64((max(counts[\"date\"]) - min(counts[\"date\"])), \"D\")\n / np.timedelta64(1, \"D\")\n )\n + 1\n )\n start = pd.to_datetime(min(counts[\"date\"]))\n neighboor_arr = np.array([(neighborhoods * num_days)])\n neighboor_arr = neighboor_arr.flatten()\n dates = [(start + np.timedelta64(i, \"D\")) for i in range(num_days)] * len(\n neighborhoods\n )\n\n df2 = pd.DataFrame({\"dt_time\": dates})\n df2[\"date\"] = df2[\"dt_time\"].dt.date\n df2[\"neighborhood\"] = neighboor_arr\n df3 = pd.merge(\n df2,\n counts,\n how=\"outer\",\n left_on=[\"date\", \"neighborhood\"],\n right_on=[\"date\", \"NEIGHBORHOOD\"],\n ).fillna(0)\n return df3[[\"date\", \"neighborhood\", \"num_calls\"]]", "def _get_tr(cls, data):\n\t\tprev_close = data['close_-1_s']\n\t\thigh = data['high']\n\t\tlow = data['low']\n\t\tc1 = high - low\n\t\tc2 = np.abs(high - prev_close)\n\t\tc3 = np.abs(low - prev_close)\n\t\tdata['tr'] = np.max((c1, c2, c3), axis=0)", "def extractImpact(data):\n return {key : array([hellingerDistance(i.px, data['{}'].px) for i in val]) for key, val in data.items() if key != '{}'}", "def organise_scans(self):\n self.wh_to_th = {}\n self.th_to_wh = {}\n\n wh_to_th_metrics = []\n th_to_wh_metrics = []\n wh_to_th_params = {}\n th_to_wh_params = {}\n wh_to_th_minim_info = {}\n th_to_wh_minim_info = {}\n wh_to_th_minim_info['time'] = []\n wh_to_th_minim_info['iterations'] = []\n wh_to_th_minim_info['funcevals'] = []\n wh_to_th_minim_info['status'] = []\n th_to_wh_minim_info['time'] = []\n th_to_wh_minim_info['iterations'] = []\n th_to_wh_minim_info['funcevals'] = []\n th_to_wh_minim_info['status'] = []\n\n for injparam in sorted(self.data_sets.keys()):\n injlabels = self.labels[injparam].dict\n for injkey in self.data_sets[injparam].keys():\n h0_metric_val = self.data_sets[injparam][injkey][\n 'h0_fit_to_toy_%s_asimov'\n %(injlabels['data_name'])]['metric_val']\n h1_metric_val = self.data_sets[injparam][injkey][\n 'h1_fit_to_toy_%s_asimov'\n %(injlabels['data_name'])]['metric_val']\n if h1_metric_val > h0_metric_val:\n bestfit = 'h0'\n altfit = 'h1'\n else:\n bestfit = 'h1'\n altfit = 'h0'\n\n wh_to_th_fit = self.data_sets[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)]['fid_asimov']\n th_to_wh_fit = self.data_sets[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)]['fid_asimov']\n\n wh_to_th_metrics.append(wh_to_th_fit['metric_val'])\n th_to_wh_metrics.append(th_to_wh_fit['metric_val'])\n\n for systkey in wh_to_th_fit['params'].keys():\n if systkey not in wh_to_th_params.keys():\n wh_to_th_params[systkey] = []\n wh_to_th_params[systkey].append(\n wh_to_th_fit['params'][systkey]\n )\n for systkey in th_to_wh_fit['params'].keys():\n if systkey not in th_to_wh_params.keys():\n th_to_wh_params[systkey] = []\n th_to_wh_params[systkey].append(\n th_to_wh_fit['params'][systkey]\n )\n\n wh_to_th_minim_info['time'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_time'])\n wh_to_th_minim_info['iterations'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['nit'])\n wh_to_th_minim_info['funcevals'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['nfev'])\n wh_to_th_minim_info['status'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(altfit, bestfit)\n ]['fid_asimov']['minimizer_metadata']['status'])\n \n th_to_wh_minim_info['time'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_time'])\n th_to_wh_minim_info['iterations'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['nit'])\n th_to_wh_minim_info['funcevals'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['nfev'])\n th_to_wh_minim_info['status'].append(\n self.minimiser_info[injparam][injkey][\n '%s_fit_to_%s_fid'%(bestfit, altfit)\n ]['fid_asimov']['minimizer_metadata']['status'])\n\n wh_to_th_params['bestfit'] = bestfit\n wh_to_th_params['altfit'] = altfit\n th_to_wh_params['bestfit'] = bestfit\n th_to_wh_params['altfit'] = altfit\n\n self.wh_to_th['metrics'] = wh_to_th_metrics\n self.th_to_wh['metrics'] = th_to_wh_metrics\n self.wh_to_th['params'] = wh_to_th_params\n self.th_to_wh['params'] = th_to_wh_params\n self.wh_to_th['minim_info'] = wh_to_th_minim_info\n self.th_to_wh['minim_info'] = th_to_wh_minim_info", "def _make_ties(self) -> None:\n\n # get all hint spaces with adjacent '?'s\n frontier = {neighbor: self._lookup[neighbor] for pos, space in self._unknowns.items() for neighbor in\n space.neighbors.values() if neighbor and self._lookup[neighbor].hint.isnumeric()}\n\n # use hints to create \"zones\" of '?'-squares along the frontier,\n # detailing the # of mines left to find in each zone.\n for pos, space in frontier.items():\n local_unknowns = {coord for coord in space.neighbors.values() if coord in self._unknowns}\n for unknown in local_unknowns:\n key = frozenset(local_unknowns)\n self._lookup[unknown].zones[key] = self._lookup[unknown].zones.setdefault(key, space.num_undiscovered)\n self._lookup[unknown].zones[key] = min(space.num_undiscovered, self._lookup[unknown].zones[key])\n self._lookup[unknown].ties |= local_unknowns - {unknown}\n self._remaining_zones.update(self._lookup[unknown].zones)\n\n # split overlapping zones into components\n for unknown in self._unknowns.values():\n for zone, num_undiscovered in list(unknown.zones.items()):\n if zone not in unknown.zones:\n continue\n for other_zone, other_num_undiscovered in list(unknown.zones.items()):\n if other_zone in unknown.zones:\n shared = zone & other_zone\n\n if zone < other_zone or (shared and other_num_undiscovered > num_undiscovered):\n # if \"zone\" & \"other_zone\" share members then\n # it is possible to split the zone w/ the higher # of mines\n # into components, \"shared\" & \"not_shared\".\n\n # unknown.zones.pop(other_zone)\n\n not_shared = other_zone - shared\n unknown.zones[not_shared] = other_num_undiscovered - num_undiscovered\n else:\n print(end='')\n return", "def doClassification(self):\n halfIndex=int(len(self.dict)/2)\n i=0\n for k, v in sorted(self.dict.items(), key=lambda item: item[1]):\n if i<halfIndex:\n self.lowVolumeStockList.append(k)\n else:\n self.highVolumeStockList.append(k)\n i=i+1", "def outlierCleaner(predictions, ages, net_worths):\n \n cleaned_data = []\n\n ### your code goes here\n for i in xrange(len(ages)):\n cleaned_data.append((ages[i],net_worths[i],abs(net_worths[i]-predictions[i])))\n cleaned_data = sorted(cleaned_data,key = lambda x:x[2])\n size_of_new_dataset = int(round(0.9*len(cleaned_data)))\n cleaned_data = cleaned_data[:size_of_new_dataset]\n return cleaned_data", "def add_average_discount_to_target(data):\n\n data['adj_price'] = data.price.map(lambda x: (x - x * 0.05))\n data['adj_price_sqrm'] = data.price_sqrm.map(lambda x: (x - x * 0.05))\n\n return data", "def get_monopolies(self):\n\n purple = self.get_colour_monopolies_owner(\"purple\")\n grey = self.get_colour_monopolies_owner(\"grey\")\n pink = self.get_colour_monopolies_owner(\"pink\")\n orange = self.get_colour_monopolies_owner(\"orange\")\n red = self.get_colour_monopolies_owner(\"red\")\n yellow = self.get_colour_monopolies_owner(\"yellow\")\n green = self.get_colour_monopolies_owner(\"green\")\n blue = self.get_colour_monopolies_owner(\"blue\")\n railroad = self.get_other_monopolies_owner(\"railroad\")\n utility = self.get_other_monopolies_owner(\"utility\")\n\n if (purple[0][0] == purple[1][0]) and (purple[0][0] and purple[1][0] != \"\"):\n self.row_final.append(purple[0])\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Baltic Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Mediterranean Ave.\")\n else:\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Baltic Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Mediterranean Ave.\")\n\n if (grey[0][0] == grey[1][0] == grey[2][0]) and (grey[0][0] and grey[1][0] and grey[2][0] != \"\"):\n self.row_final.append(grey[0])\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Oriental Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Vermont Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Connecticut Ave.\")\n else:\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Oriental Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Vermont Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Connecticut Ave.\")\n\n if (pink[0][0] == pink[1][0] == pink[2][0]) and (pink[0][0] and pink[1][0] and pink[2][0] != \"\"):\n self.row_final.append(pink[0])\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"St. Charles Place\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"States Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Virginia Ave.\")\n else:\n self.db.write_value(\"is_a_monopoly\", \"no\", \"St. Charles Place\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"States Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Virginia Ave.\")\n\n if (orange[0][0] == orange[1][0] == orange[2][0]) and (orange[0][0] and orange[1][0] and orange[2][0] != \"\"):\n self.row_final.append(orange[0])\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"St. James Place\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Tennessee Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"New York Ave.\")\n else:\n self.db.write_value(\"is_a_monopoly\", \"no\", \"St. James Place\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Tennessee Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"New York Ave.\")\n\n if (red[0][0] == red[1][0] == red[2][0]) and (red[0][0] and red[1][0] and red[2][0] != \"\"):\n self.row_final.append(red[0])\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Kentucky Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Indiana Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Illinois Ave.\")\n else:\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Kentucky Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Indiana Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Illinois Ave.\")\n\n if (yellow[0][0] == yellow[1][0] == yellow[2][0]) and (yellow[0][0] and yellow[1][0] and yellow[2][0] != \"\"):\n self.row_final.append(yellow[0])\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Atlantic Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Ventnor Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Marvin Gardens\")\n else:\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Atlantic Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Ventnor Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Marvin Gardens\")\n\n if (green[0][0] == green[1][0] == green[2][0]) and (green[0][0] and green[1][0] and green[2][0] != \"\"):\n self.row_final.append(green[0])\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Pacific Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"North Carolina Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Pennsylvania Ave.\")\n else:\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Pacific Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"North Carolina Ave.\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Pennsylvania Ave.\")\n\n if (blue[0][0] == blue[1][0]) and (blue[0][0] and blue[1][0] != \"\"):\n self.row_final.append(blue[0])\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Park Place\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Boardwalk\")\n else:\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Park Place\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Boardwalk\")\n\n if (railroad[0][0] == railroad[1][0] == railroad[2][0] == railroad[3][0]) and (\n railroad[0][0] and railroad[1][0] and railroad[2][0] and railroad[3][0] != \"\"):\n self.row_final.append(railroad[0])\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Reading Railroad\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Pennsylvania Railroad\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"B. & O. Railroad\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Short Line\")\n else:\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Reading Railroad\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Pennsylvania Railroad\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"B. & O. Railroad\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Short Line\")\n\n if (utility[0][0] == utility[1][0]) and (utility[0][0] and utility[1][0] != \"\"):\n self.row_final.append(utility[0])\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Electric Company\")\n self.db.write_value(\"is_a_monopoly\", \"yes\", \"Water Works\")\n else:\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Electric Company\")\n self.db.write_value(\"is_a_monopoly\", \"no\", \"Water Works\")\n\n print(self.row_final)\n return self.row_final", "def generate(data, d=0.1):\n \n p = pd.DataFrame({\n \"Price\": data\n })\n p[\"Event\"] = ''\n event = 'upturn'\n ph = p['Price'][0] # highest price\n pl = ph # lowest price\n\n for i in range(0, len(p)):\n\n if event is 'upturn':\n if p['Price'][i] <= (ph * (1 - d)):\n event = 'downturn'\n pl = p['Price'][i]\n p.at[i, 'Event'] = 'end downturn'\n p.at[i + 1, 'Event'] = 'start downward os'\n \n else:\n if ph < p['Price'][i]:\n ph = p['Price'][i]\n p.at[i, 'Event'] = 'start downturn'\n p.at[i - 1, 'Event'] = 'end upward os'\n else:\n if p['Price'][i] >= (pl * (1 + d)):\n event = 'upturn'\n ph = p['Price'][i]\n p.at[i, 'Event'] = 'end upturn'\n p.at[i + 1, 'Event'] = 'start upward os'\n \n else:\n if pl > p['Price'][i]:\n pl = p['Price'][i]\n p.at[i, 'Event'] = 'start upturn'\n p.at[i - 1, 'Event'] = 'end downward os'\n \n return p['Event']", "def uniformCrossover(self, cl):\n if cons.env.format_data.discrete_action: #Always crossover condition if the phenotype is discrete (if continuous phenotype, half the time phenotype crossover is performed instead)\n self_specified_atts = copy.deepcopy(self.specified_attributes)\n cl_specified_atts = copy.deepcopy(cl.specified_attributes)\n probability = 0.5 #Equal probability for attribute alleles to be exchanged.\n\n #Make list of attribute references appearing in at least one of the parents.-----------------------------\n combined_atts = []\n for i in self_specified_atts:\n combined_atts.append(i)\n for i in cl_specified_atts:\n if i not in combined_atts:\n combined_atts.append(i)\n elif not cons.env.format_data.attribute_info[i][0]: #Attribute specified in both parents, and the attribute is discrete (then no reason to cross over)\n combined_atts.remove(i)\n combined_atts.sort()\n #--------------------------------------------------------------------------------------------------------\n changed = False;\n for att in combined_atts: #Each condition specifies different attributes, so we need to go through all attributes in the dataset.\n att_info = cons.env.format_data.attribute_info[att]\n #-----------------------------\n ref = 0\n #if att in self.specified_attributes:\n if att in self_specified_atts:\n ref += 1\n #if att in cl.specified_attributes:\n if att in cl_specified_atts:\n ref += 1\n #-----------------------------\n\n if ref == 0: #Attribute not specified in either condition (Attribute type makes no difference)\n print(\"Error: UniformCrossover!\")\n pass\n\n elif ref == 1: #Attribute specified in only one condition - do probabilistic switch of whole attribute state (Attribute type makes no difference)\n if att in self_specified_atts and random.random() > probability:\n i = self.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n cl.condition.append(self.condition.pop(i)) #Take attribute from self and add to cl\n cl.specified_attributes.append(att)\n self.specified_attributes.remove(att)\n changed = True #Remove att from self and add to cl\n\n\n if att in cl_specified_atts and random.random() < probability:\n i = cl.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n self.condition.append(cl.condition.pop(i)) #Take attribute from self and add to cl\n self.specified_attributes.append(att)\n cl.specified_attributes.remove(att)\n changed = True #Remove att from cl and add to self.\n\n\n else: #Attribute specified in both conditions - do random crossover between state alleles. The same attribute may be specified at different positions within either classifier\n #-------------------------------------------------------\n # CONTINUOUS ATTRIBUTE\n #-------------------------------------------------------\n if att_info[0]:\n i_cl1 = self.specified_attributes.index(att) #pairs with self (classifier 1)\n i_cl2 = cl.specified_attributes.index(att) #pairs with cl (classifier 2)\n tmp_key = random.randint(0,3) #Make random choice between 4 scenarios, Swap minimums, Swap maximums, Self absorbs cl, or cl absorbs self.\n if tmp_key == 0: #Swap minimum\n temp = self.condition[i_cl1][0]\n self.condition[i_cl1][0] = cl.condition[i_cl2][0]\n cl.condition[i_cl2][0] = temp\n elif tmp_key == 1: #Swap maximum\n temp = self.condition[i_cl1][1]\n self.condition[i_cl1][1] = cl.condition[i_cl2][1]\n cl.condition[i_cl2][1] = temp\n else: #absorb range\n all_list = self.condition[i_cl1] + cl.condition[i_cl2]\n new_min = min(all_list)\n new_max = max(all_list)\n if tmp_key == 2: #self absorbs cl\n self.condition[i_cl1] = [new_min,new_max]\n #Remove cl\n cl.condition.pop(i_cl2)\n cl.specified_attributes.remove(att)\n else: #cl absorbs self\n cl.condition[i_cl2] = [new_min,new_max]\n #Remove self\n self.condition.pop(i_cl1)\n self.specified_attributes.remove(att)\n #-------------------------------------------------------\n # DISCRETE ATTRIBUTE\n #-------------------------------------------------------\n else:\n pass\n tmp_list1 = copy.deepcopy(self_specified_atts)\n tmp_list2 = copy.deepcopy(cl.specified_attributes)\n tmp_list1.sort()\n tmp_list2.sort()\n if changed and (tmp_list1 == tmp_list2):\n changed = False\n\n if self.action != cl.action and random.random() > probability:\n # Switch phenotypes of 2 classifiers if GA is run in match set\n temp = self.action\n self.action = cl.action\n cl.action = temp\n changed = True\n return changed\n #-------------------------------------------------------\n # CONTINUOUS PHENOTYPE CROSSOVER\n #-------------------------------------------------------\n elif random.random() < 0.5:\n return self.actionCrossover(cl)", "def main():\n housing = pd.read_csv(\"Data/train_original.csv\")\n housing[\"TotalSF\"] = (\n housing[\"TotalBsmtSF\"] + housing[\"1stFlrSF\"] + housing[\"2ndFlrSF\"]\n )\n training_features, testing_features, training_target, testing_target = impute_dummify_and_split(\n housing, drop_target=False\n )\n\n p_values = [\n (c, pearsonr(training_features[\"SalePrice\"], training_features[c])[1])\n for c in training_features.columns\n ]\n\n p_value_limits = [0.05]\n\n result = []\n ps_and_cols = {}\n\n for p_value_limit in p_value_limits:\n\n high_ps = list(\n map(lambda t: t[0], sorted(p_values, key=lambda t1: t1[1])[:15])\n )\n\n print(training_features[high_ps].corr())\n\n columns = [p[0] for p in p_values if p[1] < p_value_limit]\n\n training_features_restricted = training_features[columns].drop(\n \"SalePrice\", axis=\"columns\"\n )\n\n testing_features_restricted = testing_features[columns].drop(\n \"SalePrice\", axis=\"columns\"\n )\n\n for model in (\n linear_model.Lasso(alpha=2.1),\n linear_model.Ridge(alpha=2.1),\n ):\n\n model.fit(training_features_restricted, training_target)\n\n train_score = model.score(\n training_features_restricted, training_target\n )\n\n test_score = model.score(\n testing_features_restricted, testing_target\n )\n\n name = str(model).split(\"(\")[0]\n\n result = result + [\n (\n \"_2_restrict_features\",\n name,\n \"p value limit: {:.3f}, alpha: 2.1\".format(p_value_limit),\n train_score,\n test_score,\n )\n ]\n\n print(ps_and_cols)\n return training_features[high_ps].corr()", "def FlagOutliers(data,medwin,threshold):\n \n dout = {}\n \n \n for portion in data.keys():\n if data[portion]['bool']==False:\n \n\t npts = len(data[portion]['x'])\n \n # defining the window\n medflux = []\n medhalf = (medwin-1)/2\n \n # placing the window and computing the median\n for i in range(npts):\n i1 = max(0,i-medhalf)\n i2 = min(npts, i + medhalf)\n try:\n if num.ma.median(data[portion]['y'][i1:i2]).mask:\n medflux.append(medflux[-1])\n except:\n medflux.append(num.ma.median(data[portion]['y'][i1:i2]))\n # finding outliers\n medflux = num.array(medflux)\n outliers = num.ma.getdata(data[portion]['y']) - medflux\n \n sigma = compute1Sigma(outliers)\n \n outliers = data[portion]['y'] - medflux\n \n idx=num.where( (abs(num.array(outliers))>threshold*sigma))\n \n \n \n # creating the outlier mask\n data[portion]['x'].mask = data[portion]['UnMasked']\n data[portion]['x'][idx[0]] = num.ma.masked\n \n mask2 = num.ma.copy(data[portion]['x'].mask)\n \n data[portion]['OutlierMask']=mask2\n \n mask3 = num.ma.copy(mask2)\n dout[portion] = {'kid':data[portion]['kid'],'x':data[portion]['x'],'y':data[portion]['y'],'yerr':data[portion]['yerr'],'UnMasked':data[portion]['UnMasked'],'OutlierMask':data[portion]['OutlierMask'],'OTMask':mask3,'bool':data[portion]['bool']}\n else:\n\n data[portion]['x'].mask = data[portion]['TransitMask']\n data[portion]['y'].mask = data[portion]['TransitMask']\n data[portion]['yerr'].mask = data[portion]['TransitMask']\n npts = len(data[portion]['x'])\n \n # defining the window\n medflux = []\n medhalf = (medwin-1)/2\n \n # placing the window and computing the median\n for i in range(npts):\n i1 = max(0,i-medhalf)\n i2 = min(npts, i + medhalf)\n try:\n if num.ma.median(data[portion]['y'][i1:i2]).mask:\n medflux.append(medflux[-1])\n except:\n medflux.append(num.ma.median(data[portion]['y'][i1:i2]))\n # finding outliers\n medflux = num.array(medflux)\n outliers = num.ma.getdata(data[portion]['y']) - medflux\n \n sigma = compute1Sigma(outliers)\n \n outliers = data[portion]['y'] - medflux\n \n idx=num.where( (abs(num.array(outliers))>threshold*sigma) & (data[portion]['TransitMask'] == False) )\n \n \n # creating the outlier mask\n data[portion]['x'].mask = data[portion]['UnMasked']\n data[portion]['x'][idx[0]] = num.ma.masked\n \n mask2 = num.ma.copy(data[portion]['x'].mask)\n \n data[portion]['OutlierMask']=mask2\n \n # creating the outlier + transit mask\n mask3 = num.ma.mask_or(data[portion]['TransitMask'],mask2)\n \n dout[portion] = {'kid':data[portion]['kid'],'x':data[portion]['x'],'y':data[portion]['y'],'yerr':data[portion]['yerr'],'TransitMask':data[portion]['TransitMask'],'UnMasked':data[portion]['UnMasked'],'OutlierMask':data[portion]['OutlierMask'],'OTMask':mask3,'bool':data[portion]['bool']}\n \n #if len(data[portion]['y'][i1:i2][num.where(data[portion]['y'][i1:i2].mask == True)]) > 0:\n #print i1, i2, npts, data[portion]['y'][i1:i2], medflux[-1]\n #print type(medflux[-1])\n \n # tagging outliers (which are not part of the transit)\n \n\n #if eData['bool']==False:\n #idx=num.where( (abs(num.array(outliers))>threshold*sigma))\n #else:\n #idx=num.where( (abs(num.array(outliers))>threshold*sigma) & (data[portion]['TransitMask'] == False) )\n\n ## creating the outlier mask\n #data[portion]['x'].mask = data[portion]['UnMasked']\n #data[portion]['x'][idx[0]] = num.ma.masked\n\n #mask2 = num.ma.copy(data[portion]['x'].mask)\n \n #data[portion]['OutlierMask']=mask2\n \n #if eData['bool']==False:\n #mask3 = num.ma.copy(mask2)\n #dout[portion] = {'kid':data[portion]['kid'],'x':data[portion]['x'],'y':data[portion]['y'],'yerr':data[portion]['yerr'],'UnMasked':data[portion]['UnMasked'],'OutlierMask':data[portion]['OutlierMask'],'OTMask':mask3}\n #else:\n ## creating the outlier + transit mask\n #mask3 = num.ma.mask_or(data[portion]['TransitMask'],mask2)\n \n #dout[portion] = {'kid':data[portion]['kid'],'x':data[portion]['x'],'y':data[portion]['y'],'yerr':data[portion]['yerr'],'TransitMask':data[portion]['TransitMask'],'UnMasked':data[portion]['UnMasked'],'OutlierMask':data[portion]['OutlierMask'],'OTMask':mask3}\n return dout", "def _transform_observation_data(\n self,\n observation_data: List[ObservationData],\n ) -> List[ObservationData]:\n # TODO (jej): Transform covariances.\n if self.winsorize:\n winsorization_rates = {}\n for metric_name, vals in self.percentiles.items():\n n = len(vals)\n # Calculate winsorization rate based on number of observations\n # using formula from [Salinas, Shen, Perrone 2020]\n # https://arxiv.org/abs/1909.13595\n winsorization_rates[metric_name] = (\n 1.0 / (4 * math.pow(n, 0.25) * math.pow(math.pi * math.log(n), 0.5))\n if n > 1\n else 0.25\n )\n else:\n winsorization_rates = {\n metric_name: 0 for metric_name in self.percentiles.keys()\n }\n for obsd in observation_data:\n for idx, metric_name in enumerate(obsd.metric_names):\n if metric_name not in self.percentiles:\n raise ValueError(\n f\"Cannot map value to percentile\"\n f\" for unknown metric {metric_name}\"\n )\n # apply map function\n percentile = self._map(obsd.means[idx], metric_name)\n # apply winsorization. If winsorization_rate is 0, has no effect.\n metric_wr = winsorization_rates[metric_name]\n percentile = max(metric_wr, percentile)\n percentile = min((1 - metric_wr), percentile)\n obsd.means[idx] = percentile\n obsd.covariance.fill(float(\"nan\"))\n return observation_data", "def trend_up(self):\n raise NotImplementedError()", "def _run_alg(data, agg_col, cat_cols, model, null_responses):\n agg_units = sorted(set(data[agg_col]), key=lambda x: (str(type(x)), x))\n outlier_scores = collections.defaultdict(dict)\n agg_to_data = {}\n agg_col_to_data = {}\n for agg_unit in agg_units:\n # TODO: could this be smarter and remove data each time? maybe no savings.\n # TODO: support numpy only again\n agg_to_data[agg_unit] = data[data[agg_col] == agg_unit]\n agg_col_to_data[agg_unit] = {}\n \n for col in cat_cols:\n col_vals = sorted(set(data[col]), key=lambda x: (str(type(x)), x))\n col_vals = [c for c in col_vals if c not in null_responses]\n frequencies = {}\n for agg_unit in agg_units:\n frequencies[agg_unit],grouped = _get_frequencies(data, col, col_vals, agg_col, agg_unit, agg_to_data)\n agg_col_to_data[agg_unit][col] = grouped\n outlier_scores_for_col, expected_frequencies_for_col, p_values_for_col = model.compute_outlier_scores(frequencies)\n for agg_unit in agg_units:\n outlier_scores[agg_unit][col] = {'score': outlier_scores_for_col[agg_unit],\n 'observed_freq': frequencies[agg_unit],\n 'expected_freq': expected_frequencies_for_col[agg_unit],\n 'p_value': p_values_for_col[agg_unit]}\n return outlier_scores, agg_col_to_data", "def linkInsOuts(ins,outs,meta,roundabout):\n \n \n for onIns in ins :\n res= np.unique(np.concatenate((meta.loc[onIns]['outs'],outs)))\n meta.at[onIns,'outs']=res[res!=roundabout]\n for oneOuts in outs:\n res= np.unique(np.concatenate((meta.loc[oneOuts]['ins'],ins)))\n meta.at[oneOuts,'ins']=res[res!=roundabout]", "def analyse_rsi(stocks_data, oversold=25, overbought=85):\n print('\\n--- RSI ANALYSIS ---')\n for stock_symbol, df in stocks_data.items():\n rsi = find_rsi(df)\n min_rsi = oversold\n max_rsi = overbought\n for i in range(len(rsi[1])):\n r = rsi[1][i]\n if r < min_rsi:\n min_rsi = r\n elif r > max_rsi:\n max_rsi = r\n\n mins = []\n maxs = []\n for i in range(len(rsi[1])):\n r = rsi[1][i]\n if r <= min_rsi * 1.2:\n mins.append((rsi[0][i], rsi[1][i]))\n elif r >= max_rsi * 0.95:\n maxs.append((rsi[0][i], rsi[1][i]))\n\n if mins:\n print(stock_symbol)\n [print(m[0], m[1]) for m in mins]\n if maxs:\n print(stock_symbol)\n [print(m[0], m[1]) for m in maxs]", "def _get_observations(self):\n food = np.array(self.game.state.data.food.data)\n walls = np.array(self.game.state.data.layout.walls.data)\n map_shape = walls.shape\n capsules = self.game.state.data.capsules\n pacman_pos = self.game.state.data.agentStates[0].configuration.pos\n\n gosts_pos = list(map(lambda agent: agent.configuration.pos,\n self.game.state.data.agentStates[1:]))\n gosts_scared = list(\n map(lambda agent: agent.scaredTimer > 0, self.game.state.data.agentStates[1:]))\n\n \"\"\"\n 0: empty,\n 1: wall,\n 2: food,\n 3: capsules,\n 4: ghost,\n 5: scared ghost,\n 6: pacman\n \"\"\"\n\n view_slices = ((max(pacman_pos[0]-self.view_distance[0], 0), min(pacman_pos[0]+self.view_distance[0]+1, map_shape[0])),\n (max(pacman_pos[1]-self.view_distance[1], 0), min(pacman_pos[1]+self.view_distance[1]+1, map_shape[1])))\n\n def select(l):\n return l[view_slices[0][0]:view_slices[0][1], view_slices[1][0]:view_slices[1][1]]\n\n obs = np.vectorize(lambda v: 1 if v else 0)(select(walls))\n obs = obs + np.vectorize(lambda v: 2 if v else 0)(select(food))\n\n def pos_to_relative_pos(pos):\n if (pos[0] < view_slices[0][0] or view_slices[0][1] <= pos[0]\n or pos[1] < view_slices[1][0] or view_slices[1][1] <= pos[1]):\n return None\n else:\n return pos[0]-view_slices[0][0], pos[1]-view_slices[1][0]\n\n for c_relative_pos in filter(lambda x: x is not None, map(pos_to_relative_pos, capsules)):\n obs[c_relative_pos[0], c_relative_pos[1]] = 3\n\n for i, g_relative_pos in enumerate(map(pos_to_relative_pos, gosts_pos)):\n if (g_relative_pos is not None):\n obs[int(g_relative_pos[0]), int(g_relative_pos[1])\n ] = 5 if gosts_scared[i] else 4\n\n pacman_relative_pos = pos_to_relative_pos(pacman_pos)\n\n obs[pacman_relative_pos[0], pacman_relative_pos[1]] = 6\n\n obs[0, 0] = 2 if np.any(\n food[0:pacman_pos[0]+1, 0:pacman_pos[1]+1]) else 0\n obs[obs.shape[0]-1,\n 0] = 2 if np.any(food[pacman_pos[0]:map_shape[0], 0:pacman_pos[1]+1])else 0\n\n obs[0, obs.shape[1] -\n 1] = 2 if np.any(food[0:pacman_pos[0]+1, pacman_pos[1]:map_shape[0]]) else 0\n obs[obs.shape[0]-1, obs.shape[1]-1] = 2 if np.any(\n food[pacman_pos[0]:map_shape[0], pacman_pos[1]:map_shape[0]]) else 0\n\n # print(np.transpose(obs)[::-1, :])\n\n return obs", "def create_data_model():\r\n data = {}\r\n data['period'] = int(sheet1.cell_value(1, getColumnIndex(sheet1,'调度周期')))\r\n counttype_technician=3\r\n data['technician']=[]\r\n for i in range(1,1+counttype_technician):\r\n data['technician'].append(int(sheet1.cell_value(i, getColumnIndex(sheet1,'技工日工资'))))\r\n data['base'] = {}\r\n count_base=1 # 码头个数\r\n data['base']['coordinate']=[]\r\n for i in range(1,1+count_base):\r\n base_x=sheet1.cell_value(i, getColumnIndex(sheet1,'码头坐标X'))\r\n base_y=sheet1.cell_value(i, getColumnIndex(sheet1,'码头坐标Y'))\r\n data['base']['coordinate'].append((base_x,base_y))\r\n\r\n data['base']['technician']=[]\r\n for b in range(0,count_base):\r\n data['base']['technician'].append([])\r\n for j in range(counttype_technician):\r\n data['base']['technician'][b].append([])\r\n for i in range(data['period']):\r\n data['base']['technician'][b][j].append(int(sheet1.cell_value(i+1, getColumnIndex(sheet1,'%d类技工总人数'% (j+1)))))\r\n\r\n data['wind_farm'] = {}\r\n count_wind_farm=2 #需要维修的风电场个数\r\n count_wind_turbine=[8,8] #每个风电场需要维修的风机个数\r\n count_wind_turbine_sum=[36,36]# 每个风电场所有的风机个数\r\n data['wind_farm']['maintenance_time']=[]\r\n count_wturbine=[] #用于计数,记录不同风电场风机信息在Excel位置\r\n count_wturbine_l=0\r\n for i in range(count_wind_farm):\r\n count_wturbine.append(count_wturbine_l)\r\n count_wturbine_l=count_wturbine_l+count_wind_turbine[i]\r\n count_turbine=[]\r\n count_turbine_l=0\r\n for i in range(count_wind_farm):\r\n count_turbine.append(count_turbine_l)\r\n count_turbine_l=count_turbine_l+count_wind_turbine_sum[i]\r\n\r\n ###设定与风电场相关的参数\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['maintenance_time'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['maintenance_time'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'风机维护时间'))))\r\n\r\n data['wind_farm']['technician']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['technician'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['technician'][i].append([])\r\n for k in range(counttype_technician):\r\n data['wind_farm']['technician'][i][j].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'%d类技工需求量'% (k+1)))))\r\n\r\n\r\n data['wind_farm']['parts_weight']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['parts_weight'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['parts_weight'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'风机所需备件重量'))))\r\n\r\n data['wind_farm']['present']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['present'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['present'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'风机在维修时是否需要船停泊'))))\r\n\r\n data['wind_farm']['deadline']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['deadline'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['deadline'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'最晚建议维修时间'))))\r\n\r\n data['wind_farm']['penalty_cost']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['penalty_cost'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['penalty_cost'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'逾时惩罚成本'))))\r\n\r\n data['vessel'] = {}\r\n counttype_vessel=3\r\n data['vessel']['capacity']=[]\r\n for i in range(counttype_vessel):\r\n data['vessel']['capacity'].append(int(sheet1.cell_value(i+1, getColumnIndex(sheet1,'船的备件容量'))))\r\n\r\n data['vessel']['technician']=[]\r\n for i in range(counttype_vessel):\r\n data['vessel']['technician'].append(int(sheet1.cell_value(i+1, getColumnIndex(sheet1,'船的人员可载量'))))\r\n\r\n data['vessel']['cost']=[]\r\n for i in range(counttype_vessel):\r\n data['vessel']['cost'].append(int(sheet1.cell_value(i+1, getColumnIndex(sheet1,'船的油费'))))\r\n\r\n data['vessel']['speed']=[]\r\n for i in range(counttype_vessel):\r\n data['vessel']['speed'].append(int(sheet1.cell_value(i+1, getColumnIndex(sheet1,'船的航速'))))\r\n\r\n data['vessel']['trans_time']=[] # 这里默认转移时间跟船的类型没有关系,与时期有关\r\n for i in range(data['period']):\r\n data['vessel']['trans_time'].append(sheet1.cell_value(i+1, getColumnIndex(sheet1,'技工转移时间')))\r\n\r\n data['vessel']['time_window']=[]\r\n for i in range(counttype_vessel):\r\n data['vessel']['time_window'].append([])\r\n for j in range(data['period']):\r\n data['vessel']['time_window'][i].append([])\r\n for k in range(count_wind_farm):\r\n data['vessel']['time_window'][i][j].append(int(sheet1.cell_value(j+1, getColumnIndex(sheet1,'风电场%d船%d可作业时间'%(k+1,i+1)))))\r\n\r\n # # 风机坐标\r\n # data['wind_farm']['coordinate']=[]\r\n # for i in range(count_wind_farm):\r\n # data['wind_farm']['coordinate'].append([])\r\n # for j in range(72):\r\n # turbine_x = sheet1.cell_value(j+1, getColumnIndex(sheet1, '风机坐标X'))\r\n # turbine_y = sheet1.cell_value(j+1, getColumnIndex(sheet1, '风机坐标Y'))\r\n # data['wind_farm']['coordinate'][i].append((turbine_x, turbine_y))\r\n\r\n # 风机坐标\r\n data['wind_farm']['coordinate']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['coordinate'].append([])\r\n for j in range(count_wind_turbine_sum[i]):\r\n turbine_x = sheet1.cell_value(j+1+count_turbine[i], getColumnIndex(sheet1, '风机坐标X'))\r\n turbine_y = sheet1.cell_value(j+1+count_turbine[i], getColumnIndex(sheet1, '风机坐标Y'))\r\n data['wind_farm']['coordinate'][i].append((turbine_x, turbine_y))\r\n\r\n\r\n data['wind_farm']['task']=[]\r\n for i in range(count_wind_farm):\r\n data['wind_farm']['task'].append([])\r\n for j in range(count_wind_turbine[i]):\r\n data['wind_farm']['task'][i].append(int(sheet1.cell_value(j+1+count_wturbine[i], getColumnIndex(sheet1,'需要维修风机编号'))))\r\n\r\n return data", "def process_data(data):\n locale.setlocale(locale.LC_ALL, 'en_US.UTF8')\n max_revenue = {\"revenue\": 0}\n max_total_sales = {\"total_sales\": 0}\n all_years = {}\n for item in data:\n # Calculate the revenue generated by this model (price * total_sales)\n # We need to convert the price from \"$1234.56\" to 1234.56\n item_price = locale.atof(item[\"price\"].strip(\"$\"))\n item_revenue = item[\"total_sales\"] * item_price\n if item_revenue > max_revenue[\"revenue\"]:\n item[\"revenue\"] = item_revenue\n max_revenue = item\n # TODO: also handle max sales\n if item['total_sales'] > max_total_sales[\"total_sales\"]:\n max_total_sales = item\n # TODO: also handle most popular car_year\n if item['car']['car_year'] not in all_years:\n all_years[item['car']['car_year']] = 1\n else:\n all_years[item['car']['car_year']] += 1\n\n sorted_all_years = sorted(all_years.items(), key=operator.itemgetter(1))\n that_year = sorted_all_years[-1][0]\n print(that_year)\n year_sales = 0\n for item in data:\n if item['car']['car_year'] == that_year:\n year_sales = year_sales + item['total_sales']\n \n summary = [\n \"The {} generated the most revenue: ${}\".format(format_car(max_revenue[\"car\"]), max_revenue[\"revenue\"]), \"The {} had the most sales: {}\".format(format_car(max_total_sales[\"car\"]), max_total_sales[\"total_sales\"]), \"The most popular year was {} with {} sales.\".format(that_year, year_sales)\n ]\n\n return summary", "def no_overfitting(self):\n\n # Instance with minimun length should be the maximum length\n train_len = []\n [train_len.append(st['Nevents']) for st in self.stats]\n train_len = np.array(train_len)\n max_len = train_len[train_len != 0].min()\n\n # CROPS FEATURE SAMPLES\n onpower_train = pd.DataFrame()\n offpower_train = pd.DataFrame()\n duration_train = pd.DataFrame()\n start = 0\n end = 0\n for ind in np.arange(len(self.stats)):\n if self.stats[ind]['Nevents'] != 0:\n if ind == 0:\n start = 0\n else:\n start = end\n end += self.stats[ind]['Nevents']\n\n aux = self.onpower_train[start:end]\n aux = aux[:max_len]\n onpower_train = pd.concat([onpower_train, aux])\n\n aux = self.offpower_train[start:end]\n aux = aux[:max_len]\n offpower_train = pd.concat([offpower_train, aux])\n\n aux = self.duration_train[start:end]\n aux = aux[:max_len]\n duration_train = pd.concat([duration_train, aux])\n\n # udating stats:\n self.stats[ind]['Nevents'] = max_len\n\n self.onpower_train = onpower_train\n self.offpower_train = offpower_train\n self.duration_train = duration_train\n\n # RE-TRAINS FEATURES:\n self.__retrain(self.onpower, self.onpower_train)\n self.__retrain(self.offpower, self.offpower_train)\n self.__retrain(self.duration, self.duration_train)", "def generateData(building, floors):\n # initialize dictionaries\n floor_dictionary = {}\n up_dictionary = {}\n down_dictionary = {}\n i = 0\n # generates random number of people at each floor\n while i < floors:\n floor_dictionary[i] = random.randint(0,5)\n i += 1\n # allocates which passengers are going up and which are going down\n for key in floor_dictionary:\n counter_up = 0\n up_array = []\n counter_down = 0\n down_array = []\n\n # people at top floor cannot go up, bottom floor cannot go down.\n if key != (floors - 1) and key != 0:\n people_up = random.randint(0,floor_dictionary[key])\n people_down = floor_dictionary[key] - people_up\n elif key == 0:\n people_down = 0\n people_up = floor_dictionary[key]\n elif key == (floors - 1):\n people_down = floor_dictionary[key]\n people_up = 0\n\n # assign random floor to people going up, above current floor\n while counter_up < people_up:\n up_array.append(random.randint(key+1, (floors-1)))\n counter_up += 1\n # assign random floor to people going down, below current floor\n while counter_down < people_down:\n down_array.append(random.randint(0, key-1))\n counter_down += 1\n\n # assign each array to dictionary for that key\n up_dictionary[key] = up_array\n down_dictionary[key] = down_array\n\n # update object with new values generated\n building = Building(floors, floor_dictionary, up_dictionary, down_dictionary)\n return building", "def crossover(self):\n self.sort_population()\n elite_amount = round(self.elite_rate * self.population_size)\n # preserve from the top\n new_population = [ele for ele in self.population if ele.ttl > 0]\n for individual in new_population:\n if individual.ttl > 0:\n individual.ttl -= 1\n new_population += self.population[:elite_amount]\n\n while len(new_population) < self.population_size:\n # newGene = self.crossBelowCrossRate()\n new_gene, new_gene2 = self.cross_on_arb_seq()\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene)\n new_population.append(new_gene)\n if len(new_population) == self.population_size:\n break\n\n if random() <= self.mutate_rate:\n self.mutate_append(new_gene2)\n new_population.append(new_gene2)\n self.population = new_population", "def organize_data(scores, stds):\n\n scores = pd.Series(list(scores.values()),\n index=map(str, scores.keys()),\n name=\"SCORE MEAN\")\n scores.index.name = \"AUXILIARY LOSS WEIGHTS\"\n stds = pd.Series(list(stds.values()),\n index=map(str, stds.keys()),\n name=\"SCORE STD\")\n stds.index.name = \"AUXILIARY LOSS WEIGHTS\"\n return scores, stds", "def generate_aggregate_data(self):\n output = np.zeros((self.number_cycles, self.number_towers))\n\n for cycle in range(self.number_cycles):\n for user in range(self.number_users):\n for tower in range(self.number_towers):\n output[cycle][tower] += self.traces[user][cycle] == tower\n\n return output", "def process_data_p2(data, documentation, geodata):\r\n documentation_state = documentation.get_column(\"ST_FIPS\")\r\n mean_per_state = data.groupby(\"ST_FIPS\", as_index=False).mean()\r\n mean_per_state[\"ST_FIPS\"] = mean_per_state[\"ST_FIPS\"] \\\r\n .map(documentation_state.values_mapping)\r\n return geodata.merge(mean_per_state, how=\"left\",\r\n left_on=\"name\", right_on=\"ST_FIPS\")", "def get_output(data, capital=100000, leverage=1, commission=0, slippage=0):\n total_capital = capital * leverage\n df = data\n df['cnt'] = df.groupby('timestamp')['symbol'].transform(\n lambda x: len(x))\n df['qty'] = (total_capital/df['cnt']/df['price']).round()\n df['profit'] = df.eval('(sell-buy)*qty')\n df['commission'] = df.eval('(sell+buy)*qty') * commission * 0.01\n df['slippage'] = df.eval('(sell+buy)*qty') * slippage * 0.01\n df['net_profit'] = df.eval('profit - commission - slippage')\n return df", "def apply(self, population_current, population_offspring):\n population_current[population_current.worst_index] = population_offspring[0]\n return population_current", "def build_shape_data(self, start=None, end=None):\n # If start and end are None, then set them to be min/max of self.df_demand\n if start is None:\n start = self.df_demand['date'].min()\n if end is None:\n end = self.df_demand['date'].max()\n print(f\"date range for shape data is from {start} to {end}\")\n # Extract part of df_demand that is within start and end\n df_sub = self.df_demand[(self.df_demand['date'] >= start) & (self.df_demand['date'] <= end)]\n assert df_sub['date'].min() >= start\n assert df_sub['date'].max() <= end\n num_days = len(pd.date_range(iso8601.parse_date(start), iso8601.parse_date(end), freq='d'))\n print(f\"number of days is {num_days}\")\n # When finding variance and mean, add in missing days as 0s\n # Obtain the counts for each lat/lng region\n counts = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat']).size().reset_index(name='counts')\n # Group demand data by lat/lng region and average across other cols\n df = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat'])[['avail_count', 'avail_mins', 'trips', 'prob_scooter_avail', 'adj_trips']].mean().reset_index()\n df = df.merge(counts, on=['left_lng', 'right_lng', 'lower_lat', 'upper_lat'])\n # print(df.head())\n # Modify averages by multiplying each by count and divide by num_days\n vars = ['avail_count', 'avail_mins', 'trips', 'prob_scooter_avail', 'adj_trips']\n for var in vars:\n df[var] = df[var]*df['counts']/num_days\n # print(df.head())\n # Calculate the variance for prob_scooter_avail\n probVariance = df_sub.groupby(['left_lng', 'right_lng', 'lower_lat', 'upper_lat']).apply(lambda x: ((x['prob_scooter_avail'] - (x['prob_scooter_avail'].sum()/num_days))**2).sum()/(num_days-1)).reset_index(name='prob_scooter_avail')\n # print(probVariance.head())\n df['prob_scooter_avail_var'] = probVariance['prob_scooter_avail']\n # Check to see if there are any Nan values\n print(f\"Nan values in df? {df.isnull().values.any()}\")\n # print(df.head())\n # For each var col, create corresponding color columns (log and unlog)\n # Also create the factors list that get passed into self.create_rectangle_lst\n factors = [('avail_count', 'decimal'), ('avail_mins', 'decimal'),\n ('trips', 'decimal'), ('prob_scooter_avail', 'percent'), ('adj_trips', 'decimal')]\n i = 0\n original_len = len(factors)\n while i < original_len:\n name, type = factors[i]\n # print(f\"name={name}, type={type}\")\n # Create color column\n df = self.map_values_to_color(df, name)\n # If type is not percent than create log version\n if type != 'percent':\n df = self.create_log_column(df, name)\n factors.append(('log_'+name, type))\n i += 1\n # Deal with estimated demand and unmet demand\n # Filter out rows where prob_scooter_avail sig diff from 0\n sigDiffIdx = df.apply(lambda x: utils.sig_diff_from_zero(x['prob_scooter_avail'], x['prob_scooter_avail_var']), axis=1)\n # print(sigDiffIdx.head())\n df_sig_diff = df[sigDiffIdx]\n # Calculate estimated demand and unmet demand\n df_sig_diff = self.calculate_demand(df_sig_diff)\n # print(df_sig_diff.head())\n # Create color column and log column for unmet demand\n df_sig_diff = self.map_values_to_color(df_sig_diff, 'unmet_demand')\n df_sig_diff = self.map_values_to_color(df_sig_diff, 'estimated_demand')\n df_sig_diff = self.create_log_column(df_sig_diff, 'unmet_demand')\n factors.extend([('estimated_demand', 'decimal'), ('unmet_demand', 'decimal'), ('log_unmet_demand', 'decimal')])\n # Fill in the colors for the grid cells that aren't significantly different\n df_not_sig_diff = df[~sigDiffIdx]\n # print(df_not_sig_diff.head())\n df = pd.concat([df_sig_diff, df_not_sig_diff])\n # df.to_csv('../../../data_files/20210427_estimatedDemand.csv', index=False)\n # Create Rectangle information\n rectangles = self.create_rectangle_lst(df, factors)\n return rectangles, start, end", "def decreaseSearch(self):\n\n self.detections = self.detections()\n self.detections.location = self.location\n for index, row in enumerate(self.magdata):\n\n if row[3] < self.upper*self.background and row[3] < self.background - 2*self.std:\n # less than 50% of background and 2 std\n #print \"Large decrease (less than \" + str(self.upper*self.background) + \")\", row[3]\n self.detections.largeDecrease.append([index,self.timestamps[index], row[3]])\n \n elif row[3] < self.lower*self.background and row[3] < self.background - self.std:\n # less than 25% of background and 1 std\n #print \"Decrease (less than \" + str(self.lower*self.background) + \")\", row[3]\n self.detections.smallDecrease.append([index,self.timestamps[index], row[3]])\n \n elif row[3] > (1 + self.upper)*self.background and row[3] > self.background + 2*self.std:\n # greater than 50% of background\n #print \"Large increase (greater than \" + str((1 + self.upper)*self.background) + \")\", row[3]\n self.detections.largeIncrease.append([index,self.timestamps[index], row[3]])\n \n elif row[3] > (1 + self.lower)*self.background and row[3] > self.background + self.std:\n # greater than 25% of background\n #print \"Increase (greater than \" + str((1 + self.lower)*self.background) + \")\", row[3]\n self.detections.smallIncrease.append([index,self.timestamps[index], row[3]])\n\n \n if resultCounter(self.detections) == 0:\n self.detections.results.append(None)\n print \"No observable jumps/dips in magnetic field strength\"\n else:\n for attribute, bins in classIterator(self.detections):\n if 'crease' in attribute:\n self.detections.results.append('Number of ' + attribute + ': ' + str(len(bins)))", "def __init__(self, Y, X, match=None, sub=None, match_func=np.mean):\n if _data.isfactor(Y):\n if sub is not None:\n Y = Y[sub]\n else:\n Y = _data.asvar(Y, sub)\n \n X = _data.ascategorial(X, sub)\n assert X.N == Y.N\n \n if match:\n match = _data.asfactor(match, sub)\n assert match.N == Y.N\n self.groups = {}\n \n # save args\n self.X = X\n self.Y = Y\n self.sub = sub\n self.match = match\n\n # extract cells and cell data\n self.data = {}\n self.data_indexes = {}\n self.cells = X.cells\n self.indexes = sorted(X.cells.keys())\n for cell in self.indexes:\n sub = X==cell\n self.data_indexes[cell] = sub\n newdata = Y.x[sub]\n if match:\n # get match ids\n group = match.x[sub]\n occurring_ids = np.unique(group)\n \n # sort\n if len(occurring_ids) < len(group):\n newdata = np.array([match_func(newdata[group==ID]) \n for ID in occurring_ids])\n group = occurring_ids\n else:\n sort_arg = np.argsort(group)\n group = group[sort_arg]\n newdata = newdata[sort_arg]\n \n self.groups[cell] = group\n self.data[cell] = newdata\n \n if match:\n # determine which cells compare values for dependent values on \n # match_variable\n# n_cells = len(self.indexes)\n# self.within = np.empty((n_cells, n_cells), dtype=bool)\n self.within = {}\n for cell1 in self.indexes:\n for cell2 in self.indexes:\n if cell1==cell2:\n self.within[cell1,cell2] = True\n else:\n v = self.groups[cell1] == self.groups[cell2]\n if v is not False:\n v = all(v)\n self.within[cell1,cell2] = v\n self.within[cell2,cell1] = v\n self.all_within = np.all(self.within.values())\n else:\n self.within = self.all_within = False", "def topography(x,y):\n \n z = -x/10\n \n N = len(x)\n for i in range(N):\n # Step\n if 10 < x[i] < 12:\n z[i] += 0.4 - 0.05*y[i]\n \n # Constriction\n if 27 < x[i] < 29 and y[i] > 3:\n z[i] += 2\n \n # Pole\n if (x[i] - 34)**2 + (y[i] - 2)**2 < 0.4**2:\n z[i] += 2\n \n return z", "def refine_dataset(original_data,settings):\n data = original_data[original_data.sweep_primary_load_temperature >= settings['valid_load_temp_range'][0]]\n data = data[data.sweep_primary_load_temperature <= settings['valid_load_temp_range'][1]]\n data = data[data.f_0_err/data.f_0 < settings['fractional_f_0_err_limit']]\n data = data[data.Q_err/data.Q < settings['fractional_Q_err_limit']]\n data = data[data.Q >= settings['valid_Q_range'][0]]\n data = data[data.Q <= settings['valid_Q_range'][1]]\n if settings['max_package_temp_deviation'] is not None:\n median_temp = np.median(data.sweep_primary_package_temperature)\n temp_deviations = np.abs(data.sweep_primary_package_temperature - median_temp)\n data = data[temp_deviations < settings['max_package_temp_deviation']]\n #data = data.sort([\"f_0\"])\n data['f_0_max'] = np.zeros((data.shape[0],))#data.groupby(\"resonator_index\")[\"f_0\"].transform(lambda x: x.max())\n data['Q_i_max'] = np.zeros((data.shape[0],))\n data['responsivity_Hz_per_K'] = np.zeros((data.shape[0],))\n data['responsivity_err'] = np.zeros((data.shape[0],))\n data['responsivity_offset'] = np.zeros((data.shape[0],))\n for index in np.unique(data.resonator_index):\n group = data[data.resonator_index == index]\n max = group[group.sweep_primary_load_temperature < settings['f_0_max_temp_limit']].f_0.max()\n data.f_0_max[data.resonator_index == index] = max\n max = group[group.sweep_primary_load_temperature < settings['f_0_max_temp_limit']].Q_i.max()\n data.Q_i_max[data.resonator_index == index] = max\n \n data['delta_f_0_Hz'] = (data.f_0-data.f_0_max)*1e6\n data['fractional_delta_f_0'] = data.delta_f_0_Hz/(1e6*data.f_0_max)#(1e6*data.noise_measurement_freq_MHz)\n data['fractional_delta_Q_i'] = data.Q_i/data.Q_i_max - 1\n\n for index in np.unique(data.resonator_index):\n group = data[data.resonator_index == index]\n try:\n (slope,offset),cov = np.polyfit(group.sweep_primary_load_temperature,group.delta_f_0_Hz,1,cov=True)\n print slope\n data.responsivity_Hz_per_K[data.resonator_index == index] = slope\n data.responsivity_offset[data.resonator_index == index] = offset\n data.responsivity_err[data.resonator_index == index] = np.sqrt(cov[1,1])\n except ValueError:\n continue\n except np.linalg.LinAlgError:\n continue\n eigvals_Hz = []\n nets = []\n for eigvals,freq,responsivity in zip(data.pca_eigvals,data.noise_measurement_freq_MHz,data.responsivity_Hz_per_K):\n # Convert eigvals spectra from 1/Hz units to Hz/sqrt(Hz)\n spectrum_Hz = np.sqrt(eigvals)*freq*1e6\n eigvals_Hz.append(spectrum_Hz)\n # Calculate net in muK sqrt(s). In the following, 1e6 is K -> uK factor, and sqrt(2) is 1/sqrt(Hz) -> sqrt(s) factor\n net = (1e6*spectrum_Hz/abs(responsivity))/np.sqrt(2)\n nets.append(net)\n data['pca_eigvals_Hz_per_rootHz'] = eigvals_Hz \n data['net_uK_rootsec'] = nets\n return data", "def transformation(self):\n for key in self.combination_dict.keys():\n if self.combination_dict[key]['column_count'] == 2:\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'tem' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'tem':\n self.temporal_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'cat' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'cat':\n self.categorical_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num' and self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n self.numerical_transformation(self.combination_dict[key])\n\n elif self.combination_dict[key]['column_count'] == 3:\n num_count = 0\n num_column = []\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num':\n num_count += 1\n num_column.append(0)\n elif self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n num_count += 1\n num_column.append(1)\n elif self.data_dict[self.combination_dict[key]['column3']]['data_type'] == 'num':\n num_count += 1\n num_column.append(2)\n\n if num_count == 1:\n self.three_column_groupby_logic(self.combination_dict[key], num_column)\n\n m_score_pie = []\n m_score_bar = []\n m_score_line = []\n m_score_scatter = []\n # for key in self.scenario_dict:\n # if self.scenario_dict\n for key in self.scenario_dict:\n if math.isnan(self.scenario_dict[key][\"Scatter_chart_score\"]):\n m_score_scatter.append(0)\n else:\n m_score_scatter.append(self.scenario_dict[key][\"Scatter_chart_score\"])\n m_score_pie.append(self.scenario_dict[key][\"Pie_chart_score\"])\n m_score_bar.append(self.scenario_dict[key][\"Bar_chart_score\"])\n m_score_line.append(self.scenario_dict[key][\"Line_chart_score\"])\n\n m_score_pie /= np.max(m_score_pie)\n m_score_bar /= np.max(m_score_bar)\n m_score_line /= np.max(m_score_line)\n m_score_scatter /= np.max(m_score_scatter)\n m_score = [m_score_pie, m_score_bar, m_score_line, m_score_scatter]\n match_index = np.argmax(m_score, axis = 0)\n i = 0\n for key in self.scenario_dict:\n if match_index[i] == 0:\n self.scenario_dict[key][\"Chart_Type\"] = \"pie\"\n if match_index[i] == 1:\n self.scenario_dict[key][\"Chart_Type\"] = \"bar\"\n if match_index[i] == 2:\n self.scenario_dict[key][\"Chart_Type\"] = \"line\"\n if match_index[i] == 3:\n self.scenario_dict[key][\"Chart_Type\"] = \"scatter\"\n self.scenario_dict[key][\"m_score\"] = m_score[match_index[i]][i]\n i += 1\n\n return self.scenario_dict", "def make_rules(self, old_rules):\n rules = defaultdict(set)\n\n def recurse_disc_rule(attr, rule):\n \"\"\"\n Recursively partition multivalued discrete attributes if\n its worth it\n \"\"\"\n\n\n ro = RuleObj(rule,\n self.bad_err_funcs,\n self.good_err_funcs,\n self.bad_tables,\n self.good_tables)\n\n if not self.prune_rule(ro):\n return set([ro])\n \n c = rule.filter.conditions[0]\n var_type = rule.data.domain[c.position].var_type\n\n if (var_type == Orange.feature.Type.Discrete):\n if len(c.values) == 1:\n return [ro]\n \n refiner = BeamRefiner(attrs=[attr], fanout=10)\n ret = set()\n for _, newrule in refiner(rule):\n ret.update(recurse_disc_rule(attr, newrule))\n return ret\n else:\n if len(rule.data) < self.min_pts:\n return [ro]\n return [ro]\n\n # XXX: figure out this logic!\n\n refiner = BeamRefiner(attrs=[attr], fanout=2)\n ret = set()\n for _, newrule in refiner(rule):\n newro = RuleObj(newrule,\n self.bad_err_funcs,\n self.good_err_funcs,\n self.bad_tables,\n self.good_tables)\n ret.update(recurse_disc_rule(attr, newrule))\n\n \n if old_rules is None:\n base_rule = SDRule(self.full_table, None) \n refiner = BeamRefiner(attrs=self.cols, fanout=10)\n #refiner = BeamRefiner(attrs=['recipient_nm'], fanout=30) \n\n \n for attr, rule in refiner(base_rule):\n ros = recurse_disc_rule(attr, rule)\n #self.top_k({None:ros})\n ros = filter(self.prune_rule, ros)\n rules[(attr,)].update(ros)\n\n else:\n attrs = old_rules.keys()\n for a_idx, attr1 in enumerate(attrs):\n for attr2 in attrs[a_idx+1:]:\n merged_attrs = set(attr1).union(attr2)\n max_attrs_len = max(len(attr1), len(attr2))\n if len(merged_attrs) == max_attrs_len:\n continue\n \n \n a1rules, a2rules = old_rules[attr1], old_rules[attr2]\n\n for ro in self.merge_dims(a1rules, a2rules):\n key = ro.rule.attributes\n\n #self.top_k({None:(ro,)})\n if self.prune_rule(ro):\n rules[key].add(ro)\n \n return rules", "def cogtest_manipulation(tbldict, roc_cols):\n \n tbldict['cogtests'] = pd.merge(tbldict['cogtestdates'],tbldict['cogdata'],on=['codeb','NP_Tp'])\n \n del tbldict['cogtestdates']\n del tbldict['cogdata']\n \n for col in roc_cols:\n tbldict['cogtests'] = cf.rate_of_change(tbldict['cogtests'], 'codeb', 'NP_Tp', \n 'NP_Date', col, '%s_sl' %col)\n \n #add column for maximum follow-up time per subject\n tbldict['cogtests'] = cf.max_per_sub(tbldict['cogtests'], 'codeb', 'NP_YrsRelBL', 'NP_Followup_Time')\n \n return tbldict", "def merge_ranges():", "def create_weight(less_data):\n # create data used in weight calc\n less_data['count'] = less_data.groupby('food_id')['food_id'].transform('count')\n less_data['diff'] = (datetime.datetime.now().date() - less_data['timestamp']).astype('timedelta64[D]')\n less_data['avg_diff'] = less_data.groupby('food_id')['diff'].transform('mean')\n less_data['rank'] = less_data['avg_diff'].rank(method='dense', ascending=False)\n\n # calculate weight\n less_data['weight'] = ((less_data['count'] * less_data['rank']) / len(less_data)) * 1000\n less_data = less_data[['food_id', 'weight']].drop_duplicates().sort_values(by=['weight'], ascending=False)\n\n return less_data", "def _fill_impropers_cross_maps(self) -> None:\n impropers, cross_maps = [], []\n for residue in self.residues:\n for improper in residue.impropers:\n impropers.append([self._id_to_index[x] for x in improper])\n for cross_map in residue.cross_maps:\n cross_maps.append([self._id_to_index[x] for x in cross_map])\n self.impropers, self.cross_maps = impropers, cross_maps", "def test_data():\n return [Donor(\"David Andrews\", [200.50, 400.00, 250.75]),\n Donor(\"John Goodfellow\", [25.00, 175.50]),\n Donor(\"Mary Suzuki\", [75.00, 125.00, 250.00]),\n Donor(\"Bonney Lake\", [500.50, 700.75, 500.25]),\n Donor(\"DeMarcus Rollins\", [155.00, 165.00])\n ]", "def parse_odd_data(data, event_sid, source):\n temp_market_types = []\n for market_type_obj in data:\n # logger.debug('market type obj: %s', market_type_obj)\n try:\n market_type = market_type_obj.get('name')\n bookmakers_obj = market_type_obj.get('bookmaker', dict())\n bookmakers_list = bookmakers_obj.get('data', list())\n except Exception as e:\n logger.data_error('%s, get odds, data error on parsing market_type_obj data', e)\n continue\n\n temp_bookmakers = []\n for bookmaker_obj in bookmakers_list:\n try:\n bookmaker_name = bookmaker_obj.get('name')\n bookmaker_sid = bookmaker_obj.get('id')\n odds_obj = bookmaker_obj.get('odds', dict())\n odds_list = odds_obj.get('data', list())\n except Exception as e:\n logger.data_error('%s, get odds, data error on parsing bookmaker_obj data', e)\n continue\n\n temp_odds = []\n for odd_obj in odds_list:\n try:\n label = odd_obj.get('label')\n odd_value = odd_obj.get('value')\n last_update_obj = odd_obj.get('last_update', dict())\n last_update_utc = get_date(last_update_obj, 'date')\n handicap = odd_obj.get('handicap')\n total = odd_obj.get('total')\n winning = odd_obj.get('winning')\n except Exception as e:\n logger.data_error('%s, get odds, data error on parsing odd_obj data', e)\n continue\n\n temp_odd = TempOddValuesObj(label, odd_value, last_update_utc, handicap, total, winning)\n temp_odds.append(temp_odd)\n\n temp_bookmaker = TempBookmaker(bookmaker_name, bookmaker_sid, temp_odds)\n temp_bookmakers.append(temp_bookmaker)\n\n temp_market_type = TempMarketType(market_type, temp_bookmakers, event_sid, source)\n temp_market_types.append(temp_market_type)\n return temp_market_types", "def get_look_a_like_pop_change_per_year(\n self,\n criteria: Literal[\"pop\", \"household\"] = \"pop\",\n ):\n return {\n land.name: land.get_pop_change_per_year(\n self.analyse_start_date,\n self.analyse_end_date,\n criteria=criteria,\n )\n for land in self.get_look_a_like()\n }", "def transform(pokemon):\n \n pokemon = pokemon.withColumnRenamed('Type 1', 'Type_1') \\\n .withColumnRenamed('Type 2', 'Type_2') \\\n .withColumnRenamed('Sp. Atk', 'Sp_Atk') \\\n .withColumnRenamed('Sp. Def', 'Sp_Def') \n \n \n max_attack_per_type = pokemon\n max_attack_per_type = max_attack_per_type.where(F.col(\"Generation\") == 1) \\\n .filter(pokemon.Name.like('%Mega%') == False) \\\n .select('Name', \\\n F.col('Type_1').alias('Type'), \\\n 'Attack', \\\n 'Sp_Atk', \\\n F.row_number().over(Window.partitionBy(\"Type_1\").orderBy(F.col(\"Attack\").desc(),F.col(\"Sp_Atk\").desc())).alias(\"rank\")) \\\n .where(F.col(\"rank\") == 1) \\\n .drop('rank')\n \n \n agg_legend_poke = pokemon\n agg_legend_poke = agg_legend_poke.where((F.col(\"Legendary\") == True) & (F.col(\"Type_2\") == 'Flying')) \\\n .groupBy(\"Type_1\").agg(F.count('Total').alias('Total_Number'), F.mean('Total').alias('Average_Power')) \\\n .orderBy(F.col('Total_Number').desc())\n \n \n special_criteria_poke = pokemon\n special_criteria_poke = special_criteria_poke.where(F.col(\"Generation\").isin(1,2,4,5)) \\\n .where((F.col(\"HP\") > 70) & (F.col(\"Attack\") > 100) & (F.col(\"Defense\") < 80)) \\\n .where(F.col(\"Speed\").between(50,100)) \\\n .withColumn('Name',F.trim(F.when(special_criteria_poke.Name.like('% %'), F.col(\"Name\").substr(F.lit(1), F.instr(F.col(\"Name\"), ' '))) \\\n .otherwise(F.col(\"Name\")))) \\\n .orderBy(F.col('Total').desc())\n\n return max_attack_per_type,agg_legend_poke,special_criteria_poke", "def outlierCleaner(predictions, ages, net_worths):\n \n cleaned_data = []\n \n\n from math import pow as mt\n import numpy as np\n\n \n residual_error = predictions-net_worths\n \n squares=[]\n squares=pow(residual_error,2)\n max=np.sort(squares,axis=None)[-9]\n \n j=0\n for i in predictions:\n if pow(predictions[j]-net_worths[j],2) <max:\n item = (ages[j], net_worths[j],residual_error[j])\n cleaned_data.append(item)\n j+=1\n print 'Length of cleaned data={}'.format(len(cleaned_data))\n\n \n ### your code goes here\n\n \n return cleaned_data", "def coarse_dataframe(geodf, side_square):\n\n # initialise the categories\n\n geodf['category'] = -1\n\n # do calculations on the first date, then extrapolate to the rest\n data_df = geodf[geodf['date'] == np.unique(geodf['date'])[0]]\n\n data_df = data_df.sort_values(by=['longitude', 'latitude'])\n\n n_grids = int(math.sqrt(data_df.shape[0]))\n\n category = 0\n\n for n in range(data_df.shape[0]):\n\n # only process lat,long point that do not have a category\n if data_df['category'].iloc[n] == -1:\n\n # get the side_square^2 nearest indexes to the point.\n indexes = []\n for i in range(side_square):\n for j in range(side_square):\n\n if n + n_grids * i + j < n_grids * n_grids and data_df['category'].iloc[n + n_grids * i + j] == -1:\n indexes.append(n + n_grids * i + j)\n\n # assing them all to the same categorty\n data_df['category'].iloc[indexes] = str(category)\n\n # get the geometry points of that catery\n cat_geometry = data_df[data_df['category'] == str(category)]['geometry']\n\n # get indexes of each point belonging to the category\n indexes_all = []\n for point in cat_geometry:\n indexes_all.append(geodf[geodf['geometry'] == point].index.tolist())\n\n indexes_all_flat = [item for sublist in indexes_all for item in sublist]\n\n geodf['category'].iloc[indexes_all_flat] = str(category)\n\n category = category + 1\n\n geodf['category'] = (geodf['category'].astype(str)).str.cat(geodf['date'], sep=\"_\")\n\n geodf = geodf.dissolve(by=['category', 'date'], aggfunc='mean')\n\n # re-assing the date because we are losing it\n geodf['date'] = [i[1] for i in geodf.index]\n\n geodf['category'] = [i[0] for i in geodf.index]\n\n return geodf", "def makeSlopeMap():\n a=numpy.zeros((ncents/2,2),numpy.int32)\n subFlag=makeSubapMap()#subapFlag.copy()\n # for i in range(7):#ngs 1-3, truth, lgs, lofs, hofs\n # tmp=subFlag[nsub[:i].sum():nsub[:i+1].sum()]\n # tmp.shape=nsuby[i],nsubx[i]\n # if i==5:#lofs\n # tmp[:]=sfNoObs*(i+1)\n # elif i==6:#hofs\n # tmp[:]=sf14NoObs*(i+1)\n # else:\n # tmp[:]=individualSubapFlag*(i+1)\n pos=0\n for i in range(subFlag.size):\n if subFlag[i]!=0:\n a[pos]=subFlag[i]\n pos+=1\n return a", "def create_data_ia(map_size, enemy_id, ia_id):\n data_ia = {'player1': {},\n 'player2': {},\n 'main_turn': 1,\n 'attack_turn': 0,\n 'map_size': map_size,\n 'enemy_id': enemy_id,\n 'ia_id': ia_id}\n\n\n order_unit = {}\n order_unit['if_left'] = [(2,3), (3,2), (1,3), (2,2), (3,1), (1,2), (2,1), (1,1)]\n order_unit['if_right'] = [(map_size -1, map_size -2), (map_size -2, map_size -1), (map_size, map_size -2), (map_size -1, map_size -1), (map_size -1, map_size -1), (map_size -2, map_size), (map_size, map_size-1), (map_size -1, map_size), (map_size, map_size)]\n\n for i in range(2):\n for line in range(1, 4):\n for column in range(1, 4):\n unit = 'E'\n life = 4\n\n if line >= 2 and column >= 2:\n unit = 'D'\n life = 10\n\n if line + column != 6:\n x_pos = abs(i * map_size - line + i)\n y_pos = abs(i * map_size - column + i)\n\n if i == 0:\n unit_id = (order_unit['if_left'].index((x_pos,y_pos))) + 1\n data_ia['player1'][(x_pos, y_pos)] = [unit, life, unit_id]\n else:\n unit_id = (order_unit['if_right'].index((x_pos,y_pos))) + 1\n data_ia['player2'][(x_pos, y_pos)] = [unit, life, unit_id]\n\n return data_ia" ]
[ "0.5584811", "0.55532134", "0.5427016", "0.5426902", "0.53494334", "0.53424084", "0.5334913", "0.52737105", "0.5264165", "0.52625084", "0.52520084", "0.5175564", "0.51129985", "0.5108802", "0.5096884", "0.5085431", "0.50571185", "0.50070727", "0.5000612", "0.49764836", "0.4973562", "0.49608374", "0.49518242", "0.49517295", "0.4936257", "0.49359763", "0.4929286", "0.49244767", "0.4910793", "0.49065593", "0.49051666", "0.48590398", "0.48558164", "0.48517564", "0.4850898", "0.4840327", "0.48395652", "0.48375446", "0.48278415", "0.4826243", "0.48184186", "0.48164758", "0.480995", "0.48044872", "0.47997877", "0.47952473", "0.47888616", "0.47789198", "0.47771984", "0.47614196", "0.4760749", "0.4760351", "0.4752479", "0.47450167", "0.47414845", "0.47351515", "0.473368", "0.4726656", "0.47247797", "0.472135", "0.47190034", "0.47142443", "0.471068", "0.47076708", "0.47035798", "0.47017658", "0.4701643", "0.46971402", "0.4695743", "0.46955726", "0.46951118", "0.469386", "0.46910062", "0.4687176", "0.46856034", "0.46812385", "0.46801493", "0.46795005", "0.46695438", "0.46695134", "0.4664146", "0.4660364", "0.4656565", "0.46557966", "0.4651097", "0.46497196", "0.46467194", "0.46441126", "0.4633722", "0.4632976", "0.46324772", "0.4630372", "0.46273756", "0.46175912", "0.46165642", "0.4613028", "0.46116772", "0.46097645", "0.46081078", "0.46075606", "0.46070355" ]
0.0
-1
When the user posts the find_org_to_create_account form, redirect to that page
def find_org_to_create_account(request): if request.method != 'POST' or not request.POST.get('organization_slug'): return HttpResponseRedirect(reverse('home')) else: org_slug = request.POST.get('organization_slug') return HttpResponseRedirect(reverse('create_org_account', args=[org_slug]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def oauth_start_flow():\n # Have to do authentication!\n rest.default_user_authentication()\n\n account_type = flask.request.args.get('type')\n if account_type is None:\n flask.abort(400)\n\n cls = ACCOUNT_TYPES.get(account_type, None)\n if cls is None:\n flask.about(400)\n\n key = str(uuid.uuid4())\n instance = cls(id=key)\n instance.put()\n\n return flask.redirect(instance.AUTH_URL %\n {'client_id': instance.CLIENT_ID,\n 'state': key})", "def office_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n office_form = OfficeForm()\n return render_to_response('office_form.html', {'form': office_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n office_form = OfficeForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if office_form.is_valid():\n of = office_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('office_form.html', \n {'form': office_form, 'form_errors': office_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def org_organisation_create_onaccept(form):\n\n db = current.db\n s3db = current.s3db\n ftable = s3db.pr_forum\n\n # Lookup the Reserves Forum\n forum = db(ftable.name == \"Reserves\").select(ftable.pe_id,\n limitby = (0, 1)\n ).first()\n try:\n reserves_pe_id = forum.pe_id\n except AttributeError:\n current.log.error(\"Unable to link Org Forum to Reserves Forum: Forum not Found\")\n return\n\n form_vars_get = form.vars.get\n organisation_id = form_vars_get(\"id\")\n\n # Lookup the Organisation\n otable = s3db.org_organisation\n org = db(otable.id == organisation_id).select(otable.pe_id,\n limitby = (0, 1)\n ).first()\n org_pe_id = org.pe_id\n\n # Create Forum\n record = {\"organisation_id\": organisation_id,\n \"name\": \"%s Reserves\" % form_vars_get(\"name\"),\n }\n forum_id = ftable.insert(**record)\n record[\"id\"] = forum_id\n s3db.update_super(ftable, record)\n forum_pe_id = record[\"pe_id\"]\n\n # Add the Hierarchy links\n s3db.pr_add_affiliation(org_pe_id, forum_pe_id, role=\"Realm Hierarchy\")\n s3db.pr_add_affiliation(reserves_pe_id, forum_pe_id, role=\"Realm Hierarchy\")", "def org_view(org_id):\n org_detail = None\n try:\n org_detail = Organisation.query.filter_by(id=org_id).first()\n\n except IndexError:\n pass\n\n if org_detail is not None:\n return render_template('organisations/org_view.html', org_detail=org_detail, org=org_detail)\n\n\n elif org_detail == None:\n return redirect(url_for('main.create_org'))\n\n else:\n abort(404)", "def form_valid(self, form):\n form.instance.auth_user = self.request.user\n form.instance.group = self.get_local_group()\n\n super(CreateApplicationView, self).form_valid(form)\n\n return redirect(self.success_url + '?id=' + str(self.object.pk))", "def post(self, request, *args, **kwargs):\n #set request form as new form\n form = SignUpForm(request.POST)\n #validation; if the form is invalid, return empty form again\n if not form.is_valid():\n return render(request, 'accounts/signup.html', {'form': form})\n\n #save form info into user database\n user_info_save = form.save(commit=False)\n user_info_save.set_password(form.cleaned_data['password'])\n user_info_save.is_manager = self.kwargs.get('is_manager')\n user_info_save.save()\n\n # #login; save the user data and update the database\n # auth_login(request, user_info_save)\n\n # redirect to shift index\n now = datetime.today()\n kwargs['month'] = now.month\n kwargs['year'] = now.year\n kwargs['day'] = now.day\n return redirect('accounts:login')", "def account_profile(request):\n get_or_creat(request)\n return redirect(\"/\")", "def form_valid(self, form):\n redirect_url = self.accept_auth_request(form.get_user())\n return HttpResponseRedirect(redirect_url)", "def post(self, request, *args, **kwargs):\n\n if request.user.is_authenticated:\n return redirect(reverse_lazy('feed_view'))\n\n return super(SignUpView, self).post(request, *args, **kwargs)", "def award_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n award_form = AwardForm()\n return render_to_response('award_form.html', {'form': award_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n award_form = AwardForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if award_form.is_valid():\n af = award_form.save(commit=False)\n af.company = company\n af.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('award_form.html', \n {'form': award_form, 'form_errors': award_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def create_account():\n form = CreateAccountForm(request.form)\n form.set_site_choices()\n\n if not form.validate():\n return create_account_form(form)\n\n screen_name = form.screen_name.data.strip()\n first_names = form.first_names.data.strip()\n last_name = form.last_name.data.strip()\n email_address = form.email_address.data.lower()\n password = form.password.data\n site_id = form.site_id.data\n\n if site_id:\n site = site_service.get_site(site_id)\n else:\n site = None\n\n if user_service.is_screen_name_already_assigned(screen_name):\n flash_error(gettext('This username cannot be used.'))\n return create_account_form(form)\n\n if user_service.is_email_address_already_assigned(email_address):\n flash_error(gettext('This email address cannot be used.'))\n return create_account_form(form)\n\n initiator_id = g.user.id\n\n try:\n user, event = user_creation_service.create_basic_user(\n screen_name,\n email_address,\n password,\n first_names=first_names,\n last_name=last_name,\n creator_id=initiator_id,\n )\n except user_creation_service.UserCreationFailed:\n flash_error(\n gettext(\n 'User \"%(screen_name)s\" could not be created.',\n screen_name=screen_name,\n )\n )\n return create_account_form(form)\n\n flash_success(\n gettext(\n 'User \"%(screen_name)s\" has been created.',\n screen_name=user.screen_name,\n )\n )\n\n if site:\n user_creation_service.request_email_address_confirmation(\n user, email_address, site_id\n )\n flash_success(\n gettext('An email has been sent to the corresponding address.'),\n icon='email',\n )\n\n user_signals.account_created.send(None, event=event)\n\n return redirect_to('.view', user_id=user.id)", "def create_account(request, role):\n context = {}\n if request.method == \"POST\":\n if(role.lower() == \"academic\"):\n form = AcademicRegisterForm(request.POST)\n elif(role.lower() == \"average\"):\n form = AvgRegisterForm(request.POST)\n\n if(form.is_valid()):\n createNewUser(form)\n username = form.cleaned_data.get('username')\n messages.success(request, f\"Account has been created for {username}!\")\n return redirect('login')\n else:\n if(role.lower() == \"academic\"):\n form = AcademicRegisterForm()\n elif(role.lower() == \"average\"):\n form = AvgRegisterForm()\n else:\n context['error'] = \"URL does not exist. Please return to home and try again\"\n return render(request, 'classroom_main/create_account.html', context)\n\n context[\"type\"] = role\n context['title'] = \"Sign up to the Online Coding Classroom\"\n context['form'] = form\n\n return render(request, 'classroom_main/create_account.html', context)", "def funding_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n funding_form = FundingForm()\n return render_to_response('funding_form.html', {'form': funding_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n funding_form = FundingForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if funding_form.is_valid():\n of = funding_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('funding_form.html', \n {'form': funding_form, 'form_errors': funding_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def form_valid(self, form):\n login(self.request, form.get_user())\n return redirect('profile', id=form.get_user().id)", "def goto_make_new_user():\n\n return render_template('users/new.html')", "def post(self):\n cont = self.request_string('continue', default=\"/\")\n self.redirect(users.create_login_url(cont))", "def management_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n management_form = ManagementForm()\n return render_to_response('management_form.html', {'form': management_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n management_form = ManagementForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if management_form.is_valid():\n mf = management_form.save(commit=False)\n mf.company = company\n mf.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('management_form.html', \n {'form': management_form, 'form_errors': management_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def customer_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n customer_form = CustomerForm()\n return render_to_response('customer_form.html', {'form': customer_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n customer_form = CustomerForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if customer_form.is_valid():\n of = customer_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('customer_form.html', \n {'form': customer_form, 'form_errors': customer_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def test_successful_registration_redirects_to_right_place(self):\n response = self.register_bob(follow=True)\n self.assertTrue(\n response.redirect_chain[0][0] == '/registration/register/complete/')", "def create_account():\n\n return render_template('account.html')", "def acquisition_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n acquisition_form = AcquisitionForm()\n return render_to_response('acquisition_form.html', {'form': acquisition_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n acquisition_form = AcquisitionForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if acquisition_form.is_valid():\n aqf = acquisition_form.save(commit=False)\n aqf.company = company\n aqf.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('acquisition_form.html', \n {'form': acquisition_form, 'form_errors': acquisition_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def user_signup():\n\n if request.method == \"GET\":\n return render_template(\"signup_form.html\")\n\n # post request logic starts here\n email = request.form.get(\"email\")\n password = request.form.get(\"password\")\n\n if email_is_valid(email):\n\n flash(\"It looks like you are already signed up for Readerboard! Try signing in instead.\")\n return redirect(\"/signin\")\n\n else:\n\n new_user = User()\n db.session.add(new_user)\n db.session.commit()\n new_acct = Account(user_id=new_user.user_id, email=email, password=password)\n db.session.add(new_acct)\n\n db.session.commit()\n session['acct'] = new_acct.acct_id\n\n return redirect(\"/auth/goodreads\")", "def create_user():\n if request.method == 'POST':\n PLAN.create_user(request.form['fname'],\n request.form['lname'],\n request.form['username'],\n request.form['password'],\n request.form['email'])\n return redirect(url_for('index'))\n return render_template('newuser.html')", "def post(self) :\n self.redirect('/admin')", "def login_success(request):\n if not hasattr(request.user, 'profile'):\n return redirect('index')\n else:\n return redirect('registration_process')", "def post(self, request, *args, **kwargs):\n application = self.get_object()\n app_complete = Application.objects.filter(\n pk=self.kwargs['app_complete']\n ).first()\n if is_application_owner(self.request.user, application) and (\n application.questionnaire.status != 'complete'\n ) and app_complete is not None and (\n app_complete.authorized_email is not None\n ) and app_complete.questionnaire.completed_by_candidate and (\n app_complete.questionnaire.status == 'complete'\n ):\n\n \"\"\"Attach authorized email & questionnaire to application\"\"\"\n application.authorized_email = app_complete.authorized_email\n application.questionnaire = app_complete.questionnaire\n application.save()\n\n \"\"\"Submit application if nomination is complete too\"\"\"\n if application.nomination.status == 'complete':\n submit_application(application)\n\n return redirect(self.get_success_url())\n else:\n raise Http404(_(\"No application found matching the query\"))", "def _signup(request, eamap, retfun=None):\r\n # save this for use by student.views.create_account\r\n request.session['ExternalAuthMap'] = eamap\r\n\r\n if settings.FEATURES.get('AUTH_USE_CERTIFICATES_IMMEDIATE_SIGNUP', ''):\r\n # do signin immediately, by calling create_account, instead of asking\r\n # student to fill in form. MIT students already have information filed.\r\n username = eamap.external_email.split('@', 1)[0]\r\n username = username.replace('.', '_')\r\n post_vars = dict(username=username,\r\n honor_code=u'true',\r\n terms_of_service=u'true')\r\n log.info('doing immediate signup for %s, params=%s', username, post_vars)\r\n student.views.create_account(request, post_vars)\r\n # should check return content for successful completion before\r\n if retfun is not None:\r\n return retfun()\r\n else:\r\n return redirect('/')\r\n\r\n # default conjoin name, no spaces, flattened to ascii b/c django can't handle unicode usernames, sadly\r\n # but this only affects username, not fullname\r\n username = re.sub(r'\\s', '', _flatten_to_ascii(eamap.external_name), flags=re.UNICODE)\r\n\r\n context = {'has_extauth_info': True,\r\n 'show_signup_immediately': True,\r\n 'extauth_domain': eamap.external_domain,\r\n 'extauth_id': eamap.external_id,\r\n 'extauth_email': eamap.external_email,\r\n 'extauth_username': username,\r\n 'extauth_name': eamap.external_name,\r\n 'ask_for_tos': True,\r\n }\r\n\r\n # Some openEdX instances can't have terms of service for shib users, like\r\n # according to Stanford's Office of General Counsel\r\n uses_shibboleth = (settings.FEATURES.get('AUTH_USE_SHIB') and\r\n eamap.external_domain.startswith(SHIBBOLETH_DOMAIN_PREFIX))\r\n if uses_shibboleth and settings.FEATURES.get('SHIB_DISABLE_TOS'):\r\n context['ask_for_tos'] = False\r\n\r\n # detect if full name is blank and ask for it from user\r\n context['ask_for_fullname'] = eamap.external_name.strip() == ''\r\n\r\n # validate provided mail and if it's not valid ask the user\r\n try:\r\n validate_email(eamap.external_email)\r\n context['ask_for_email'] = False\r\n except ValidationError:\r\n context['ask_for_email'] = True\r\n\r\n log.info('EXTAUTH: Doing signup for %s', eamap.external_id)\r\n\r\n return student.views.register_user(request, extra_context=context)", "def home_page():\n return redirect('/register')", "def form_valid(self, form):\n auth_login(self.request, form.get_user())\n return HttpResponseRedirect(self.get_success_url())", "def form_valid(self, form):\n auth_login(self.request, form.get_user())\n return HttpResponseRedirect(self.get_success_url())", "def toLanding():\n return redirect(url_for('landingurl'))", "def signup():", "def what_next(request):\n user = request.user\n account = Account._default_manager.get(user=request.user)\n accounttype = account.accounttype\n if user.is_superuser:\n return HttpResponseRedirect(\"../../admin\")\n elif accounttype==\"patient\":\n \treturn HttpResponseRedirect(\"../../newsfeed\")\n elif accounttype==\"doctor\":\n return HttpResponseRedirect(\"../../patient_management\")\n elif accounttype==\"family\":\n return HttpResponseRedirect(\"../../broadcast/familypage\")\n else:\n return HttpResponse(\"accounttype=\"+accounttype+\"lala\")", "def post(self, request):\n user = self._authenticate(request)\n is_valid = self._validate_user(request, user)\n if user and is_valid:\n login(request, user)\n url = request.POST.get('next', '/')\n print request.POST\n return redirect(url)\n response = self._return_invalid_message(request)\n return response", "def certification_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n edit = validate_user_company_access_or_redirect(request,company)\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n certification_form = CertificationForm()\n return render_to_response('certification_form.html', {'form': certification_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n certification_form = CertificationForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if certification_form.is_valid():\n of = certification_form.save(commit=False)\n of.company = company\n of.save()\n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('certification_form.html', \n {'form': certification_form, 'form_errors': certification_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def create_account_request(request):\n if request.method == \"POST\":\n form = NewAccountForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, \"Creation successful.\")\n return redirect(\"home\")\n messages.error(request, \"Unsuccessful creation. Invalid information.\")\n form = NewAccountForm\n customer_list = Customer.objects.all()\n context = {'customer_list': customer_list, 'account_form': form}\n return render(request, \"accounts/account_creation.html\", context)", "def test_signup_existing_user_redirects_to_login(self):\n tester = app.test_client(self)\n response = tester.post('/signup',\n data=dict(email='demo@email.com',\n usrname='admin',\n password='admin'),\n follow_redirects=True)\n self.assertEqual(response.status_code, 200)", "def done(self):\n #make sure the user doesn't already exist\n u = User.by_name(self.username)\n if u:\n msg = 'That user already exists.'\n self.render('signup-form.html', error_username = msg)\n else:\n u = User.register(self.username, self.password, self.email)\n u.put()\n\n self.login(u)\n self.redirect('/blog')", "def create_account(request):\n if request.POST:\n try:\n username, password = create_account_form(request, request.POST)\n\n user = authenticate(username=username, password=password)\n if user is None:\n messages.add_message(request, messages.ERROR, 'Oops! Something went wrong.')\n hospitals = Hospital.objects.all()\n return render(request, 'create_account.html', {'hospitals': hospitals})\n login(request, user)\n return redirect('base_dashboard')\n except ValueError:\n pass\n\n hospitals = Hospital.objects.all()\n\n return render(request, 'create_account.html', {'hospitals': hospitals})", "def step2(request):\n\tform = UserCreationAdminForm(request.POST or None)\n\tif form.is_valid():\n\t\tcomp = form.save()\n\t\tcomp.groups.add(Group.objects.get(name='usuario-admin-compras'))\n\n\t\treturn HttpResponseRedirect(\"/comprador/paso3/\")\n\n\t# crear el user profile\n\t# redireccionar al home\n\ttemplate = 'customerbuy/step2.html'\n\treturn render(request, template,{'form':form})\n\t#return render_to_response(\"customer/signup.html\", {'form': form,}, context_instance=RequestContext(request))", "def registration_parent_redirect(request, business_id):\n redirect_path = None\n if not request.user.is_authenticated():\n current_business_slots = Slot.current_slots.get_current_business_slots(\n business_id=business_id)\n if current_business_slots:\n try:\n advertiser = Advertiser.objects.get(\n id=current_business_slots[0].business.advertiser.id)\n if advertiser.has_usable_password():\n redirect_path = '%s?next=%s' % (reverse('sign-in'),\n reverse('advertiser-registration'))\n else:\n redirect_path = '%s?next=%s' % (reverse('forgot-password'),\n reverse('advertiser-registration'))\n except Advertiser.DoesNotExist:\n redirect_path = reverse('contact-us')\n return redirect_path", "def get_redirect_url(self, *args, **kwargs):\n if \"next\" in self.request.POST:\n return self.request.POST.get(\"next\")\n return reverse(\"my_reservations\")", "def goto_create(self):\n\n self.create.click()", "def post(self, orgname):\n permission = AdministerOrganizationPermission(orgname)\n if permission.can() or allow_if_superuser():\n try:\n org = model.organization.get_organization(orgname)\n except model.InvalidOrganizationException:\n raise NotFound()\n\n details = request.get_json()\n activating_username = None\n\n if (\n \"activating_user\" in details\n and details[\"activating_user\"]\n and \"name\" in details[\"activating_user\"]\n ):\n activating_username = details[\"activating_user\"][\"name\"]\n\n delegate = details[\"delegate\"] if \"delegate\" in details else {}\n delegate_kind = delegate.get(\"kind\", None)\n delegate_name = delegate.get(\"name\", None)\n\n delegate_username = delegate_name if delegate_kind == \"user\" else None\n delegate_teamname = delegate_name if delegate_kind == \"team\" else None\n\n activating_user = (\n model.user.get_user(activating_username) if activating_username else None\n )\n delegate_user = model.user.get_user(delegate_username) if delegate_username else None\n delegate_team = (\n model.team.get_organization_team(orgname, delegate_teamname)\n if delegate_teamname\n else None\n )\n\n if activating_username and not activating_user:\n raise request_error(message=\"Unknown activating user\")\n\n if not delegate_user and not delegate_team:\n raise request_error(message=\"Missing delegate user or team\")\n\n role_name = details[\"role\"]\n\n prototype = model.permission.add_prototype_permission(\n org, role_name, activating_user, delegate_user, delegate_team\n )\n log_prototype_action(\"create_prototype_permission\", orgname, prototype)\n\n users_filter = {prototype.activating_user, prototype.delegate_user}\n org_members = model.organization.get_organization_member_set(\n org, users_filter=users_filter\n )\n return prototype_view(prototype, org_members)\n\n raise Unauthorized()", "def createOrgProfileFromForm(self):\n\n if self.data.org:\n form = OrgProfileForm(self.data.POST, instance=self.data.org)\n else:\n form = OrgCreateProfileForm(self.data.POST)\n\n if not form.is_valid():\n return None\n\n if not self.data.org:\n form.cleaned_data['founder'] = self.data.user\n form.cleaned_data['scope'] = self.data.program\n form.cleaned_data['scope_path'] = self.data.program.key().name() \n key_name = '%s/%s' % (\n self.data.program.key().name(),\n form.cleaned_data['link_id']\n )\n entity = form.create(commit=True, key_name=key_name)\n self.data.profile.org_admin_for.append(entity.key())\n self.data.profile.put()\n else:\n entity = form.save(commit=True)\n\n return entity", "def sign_up(request):\n #logged in users are redirected\n if request.user.is_authenticated:\n messages.error(request, _('You are already signed in, and can\\'t make a new account until you sign out.'), extra_tags='alert alert-warning')\n return render(request, 'you_did_something.html')\n\n #mark an event - someone visited this site\n event = Event(category='visited_sign_up_view')\n event.save()\n\n #create the form\n form = SignUpForm\n context = {\n 'form': form,\n 'submit_button_text': _('Sign up',)\n }\n # If this is a POST request then process the Form data\n if request.method == 'POST':\n\n # Create a form instance and populate it with data from the request (binding):\n form = SignUpForm(request.POST)\n context.update({'form': form})\n # Check if the form is valid:\n if form.is_valid():\n \n # process the data in form.cleaned_data as required (here we just write it to the model due_back field)\n user = User.objects.create_user(form.cleaned_data['username'], form.cleaned_data['username'], form.cleaned_data['password'])\n user.save()\n organization = Organization(\n owner=user,\n phone = form.cleaned_data['phone'],\n name = form.cleaned_data['name'],\n address_line_1 = form.cleaned_data['address_line_1'],\n address_line_2 = form.cleaned_data['address_line_2'],\n zip_code = form.cleaned_data['zip_code'],\n city = form.cleaned_data['city'],\n country = form.cleaned_data['country'],\n accepted_terms_and_conditions = form.cleaned_data['accepted_terms_and_conditions'],\n )\n organization.save()\n messages.success(request, _(\"Welcome aboard. Let's start by adding some employees to survey!\"), extra_tags='alert alert-success')\n send_mail(\n '[www] New user: %s!'%(user.username),\n 'User: %s has signed up!'%(user.username),\n 'sales@motpanel.com',\n ['sales@motpanel.com'],\n fail_silently=True,\n )\n if user is not None:\n auth.login(request, user)\n\n #mark an event - someone signed up successfully\n event = Event(category='completed_sign_up', user=user)\n event.save()\n\n # redirect to a new URL:\n return HttpResponseRedirect(reverse('surveys-dashboard'))\n else:\n #mark an event - someone failed to sign up\n comment = \"\"\n for field in form.visible_fields():\n if field.field.label != _(\"Choose a password\") and field.field.label != _(\"Confirm password\"):\n field_data = \"%s: %s \\n\"%(field.field.label, field.data)\n comment+=(field_data)\n event = Event(category='failed_sign_up', comment=comment)\n event.save()\n return render(request, 'sign_up_form.html', context)", "def test_register_link_in_sign_in_page_redirects_to_register_page(self):\n self.browser.get(self.warno_url)\n self.browser.find_element_by_link_text(\"Sign In\").click()\n contents = self.browser.find_element_by_class_name(\"sub-title\")\n self.assertTrue(\"Sign In\" in contents.text, \"Redirected page's subtitle did not contain 'Sign In'\")\n\n self.browser.find_element_by_link_text(\"New here? Register.\").click()\n contents = self.browser.find_element_by_class_name(\"sub-title\")\n self.assertTrue(\"Register\" in contents.text, \"Redirected page's subtitle did not contain 'Register'\")", "def new_address():\n user = dbwrangler.get_current_user()\n if user:\n return render_template(\"new_user_more.html\")\n else:\n return redirect(\"/\")", "def user_login(request):\n\n user = request.user\n if user.is_authenticated():\n status = user.get_profile().application.submitted #Getting the submission status\n if status: #If already submitted, takes to Completion Page\n return redirect('/allotter/complete/')\n else: #Otherwise to Details Submission form \n return redirect('/allotter/details/')\n\n if request.method == \"POST\":\n form = UserLoginForm(request.POST)\n if form.is_valid():\n user = form.cleaned_data\n login(request, user)\n status = user.get_profile().application.submitted #Getting the submission status \n if status:\n return redirect('/allotter/complete/') #Redirect to Completion Page\n else: \n return redirect('/allotter/details/') #Redirect to user details submission \n else:\n context = {\"form\": form}\n return render(request, 'allotter/login.html', context)\n else:\n form = UserLoginForm()\n context = {\"form\": form}\n return render(request, 'allotter/login.html', context)", "def create_team(request):\n if request.method == 'POST':\n email = request.session.get('email', None)\n team_name = request.POST.get('team_name', None)\n team = Team(name=team_name)\n team.save()\n\n message = \"Team created, please use the cool search feature and assign yourself to the team\"\n messages.add_message(request, messages.INFO, message)\n return redirect('teamsapp:teams')\n else:\n raise Http404('Not allowed')", "def set_up():\n domain = request.args.get('domain', None)\n #TODO: Check domain is valid and user is admin in apps\n client = Client.get_instance()\n admin_email = users.get_current_user().email()\n if not client:\n #If there is no client object, create it\n Client(id=1, primary_domain_name=domain,\n administrators=[admin_email], reply_to=admin_email).put()\n\n return redirect(url_for('start_oauth2_dance'))", "def landing_page():\n\n print session\n\n if 'acct' in session:\n acct = get_current_account(session['acct'])\n search = False\n return render_template(\"index.html\", acct=acct, search=search)\n\n else:\n return redirect(\"/signup\")", "def test_form_pass(self):\n resp = self.post_step(\"basics\")\n self.assertWizardResponse(resp, \"config\")\n resp = self.post_step(\"config\", session=list(resp._request.session.items()))\n self.assertIsInstance(resp, HttpResponseRedirect)\n self.assertEqual(resp.status_code, 302)\n self.assertEqual(resp[\"location\"], \"/projects/foobar/\")\n\n proj = Project.objects.get(name=\"foobar\")\n self.assertIsNotNone(proj)", "def post(self):\n post_data = request.get_json()\n\n # decode token and check if expired\n token = post_data.get('odoo_contact_token')\n odoo_contact_id, expiration_date = decode_token(token)\n\n if datetime.now() > expiration_date:\n return {\n \"error_id\": \"alumni_register_link_expired_error\",\n \"message\": \"Unauthorized: Registration link is expired.\"\n }, 401\n\n # check if such odoo user exists\n filter_list = []\n filter_list.append(['id', '=', odoo_contact_id])\n from app.controllers.odoo_controller import OdooController\n try:\n contacts_number = OdooController.count_number_of_odoo_contacts_by_filter_list(filter_list)\n except OdooIsDeadError as err:\n abort(503, err, error_id='odoo_connection_error')\n\n if contacts_number == 0:\n return {\n \"error_id\": \"odoo_contact_not_found_error\",\n \"message\": \"Odoo contact not found.\"\n }, 404\n\n # create alumni user\n from app.controllers.alumni_controller import AlumniController\n post_data.update({'odoo_contact_id': odoo_contact_id})\n response = AlumniController.create_alumni_user(post_data)\n\n \n if response[1] == 201:\n # delete record in alumni invite status\n from app.controllers.alumni_invite_status_controller import AlumniInviteStatusController\n AlumniInviteStatusController.delete_invite_status_record(odoo_contact_id)\n\n # send email for confirmation\n receiver_email = response[0]['email']\n alumni_uuid = response[0]['alumni_uuid']\n send_confirmation_email(receiver_email, alumni_uuid)\n\n return response", "def submit(request):\n if not request.user.is_authenticated():\n return proceed(request)\n # If dev has already agreed, continue to next step.\n user = UserProfile.objects.get(pk=request.user.id)\n if not user.read_dev_agreement:\n return redirect('submit.app.terms')\n return manifest(request)", "def create_account(request, post_override=None): # pylint: disable-msg=too-many-statements\r\n js = {'success': False} # pylint: disable-msg=invalid-name\r\n\r\n post_vars = post_override if post_override else request.POST\r\n extra_fields = getattr(settings, 'REGISTRATION_EXTRA_FIELDS', {})\r\n\r\n if settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and pipeline.running(request):\r\n post_vars = dict(post_vars.items())\r\n post_vars.update({'password': pipeline.make_random_password()})\r\n\r\n # if doing signup for an external authorization, then get email, password, name from the eamap\r\n # don't use the ones from the form, since the user could have hacked those\r\n # unless originally we didn't get a valid email or name from the external auth\r\n DoExternalAuth = 'ExternalAuthMap' in request.session\r\n if DoExternalAuth:\r\n eamap = request.session['ExternalAuthMap']\r\n try:\r\n validate_email(eamap.external_email)\r\n email = eamap.external_email\r\n except ValidationError:\r\n email = post_vars.get('email', '')\r\n if eamap.external_name.strip() == '':\r\n name = post_vars.get('name', '')\r\n else:\r\n name = eamap.external_name\r\n password = eamap.internal_password\r\n post_vars = dict(post_vars.items())\r\n post_vars.update(dict(email=email, name=name, password=password))\r\n log.debug(u'In create_account with external_auth: user = %s, email=%s', name, email)\r\n\r\n # Confirm we have a properly formed request\r\n for a in ['username', 'email', 'password', 'name']:\r\n if a not in post_vars:\r\n js['value'] = _(\"Error (401 {field}). E-mail us.\").format(field=a)\r\n js['field'] = a\r\n return JsonResponse(js, status=400)\r\n\r\n if extra_fields.get('honor_code', 'required') == 'required' and \\\r\n post_vars.get('honor_code', 'false') != u'true':\r\n js['value'] = _(\"To enroll, you must follow the honor code.\").format(field=a)\r\n js['field'] = 'honor_code'\r\n return JsonResponse(js, status=400)\r\n\r\n # Can't have terms of service for certain SHIB users, like at Stanford\r\n tos_required = (\r\n not settings.FEATURES.get(\"AUTH_USE_SHIB\") or\r\n not settings.FEATURES.get(\"SHIB_DISABLE_TOS\") or\r\n not DoExternalAuth or\r\n not eamap.external_domain.startswith(\r\n external_auth.views.SHIBBOLETH_DOMAIN_PREFIX\r\n )\r\n )\r\n\r\n if tos_required:\r\n if post_vars.get('terms_of_service', 'false') != u'true':\r\n js['value'] = _(\"You must accept the terms of service.\").format(field=a)\r\n js['field'] = 'terms_of_service'\r\n return JsonResponse(js, status=400)\r\n\r\n # Confirm appropriate fields are there.\r\n # TODO: Check e-mail format is correct.\r\n # TODO: Confirm e-mail is not from a generic domain (mailinator, etc.)? Not sure if\r\n # this is a good idea\r\n # TODO: Check password is sane\r\n\r\n required_post_vars = ['username', 'email', 'name', 'password']\r\n required_post_vars += [fieldname for fieldname, val in extra_fields.items()\r\n if val == 'required']\r\n if tos_required:\r\n required_post_vars.append('terms_of_service')\r\n\r\n for field_name in required_post_vars:\r\n if field_name in ('gender', 'level_of_education'):\r\n min_length = 1\r\n else:\r\n min_length = 2\r\n\r\n if len(post_vars[field_name]) < min_length:\r\n error_str = {\r\n 'username': _('Username must be minimum of two characters long'),\r\n 'email': _('A properly formatted e-mail is required'),\r\n 'name': _('Your legal name must be a minimum of two characters long'),\r\n 'password': _('A valid password is required'),\r\n 'terms_of_service': _('Accepting Terms of Service is required'),\r\n 'honor_code': _('Agreeing to the Honor Code is required'),\r\n 'level_of_education': _('A level of education is required'),\r\n 'gender': _('Your gender is required'),\r\n 'year_of_birth': _('Your year of birth is required'),\r\n 'mailing_address': _('Your mailing address is required'),\r\n 'goals': _('A description of your goals is required'),\r\n 'city': _('A city is required'),\r\n 'country': _('A country is required')\r\n }\r\n js['value'] = error_str[field_name]\r\n js['field'] = field_name\r\n return JsonResponse(js, status=400)\r\n\r\n max_length = 75\r\n if field_name == 'username':\r\n max_length = 30\r\n\r\n if field_name in ('email', 'username') and len(post_vars[field_name]) > max_length:\r\n error_str = {\r\n 'username': _('Username cannot be more than {0} characters long').format(max_length),\r\n 'email': _('Email cannot be more than {0} characters long').format(max_length)\r\n }\r\n js['value'] = error_str[field_name]\r\n js['field'] = field_name\r\n return JsonResponse(js, status=400)\r\n\r\n try:\r\n validate_email(post_vars['email'])\r\n except ValidationError:\r\n js['value'] = _(\"Valid e-mail is required.\").format(field=a)\r\n js['field'] = 'email'\r\n return JsonResponse(js, status=400)\r\n\r\n try:\r\n validate_slug(post_vars['username'])\r\n except ValidationError:\r\n js['value'] = _(\"Username should only consist of A-Z and 0-9, with no spaces.\").format(field=a)\r\n js['field'] = 'username'\r\n return JsonResponse(js, status=400)\r\n\r\n # enforce password complexity as an optional feature\r\n if settings.FEATURES.get('ENFORCE_PASSWORD_POLICY', False):\r\n try:\r\n password = post_vars['password']\r\n\r\n validate_password_length(password)\r\n validate_password_complexity(password)\r\n validate_password_dictionary(password)\r\n except ValidationError, err:\r\n js['value'] = _('Password: ') + '; '.join(err.messages)\r\n js['field'] = 'password'\r\n return JsonResponse(js, status=400)\r\n\r\n # Ok, looks like everything is legit. Create the account.\r\n try:\r\n with transaction.commit_on_success():\r\n ret = _do_create_account(post_vars)\r\n except AccountValidationError as e:\r\n return JsonResponse({'success': False, 'value': e.message, 'field': e.field}, status=400)\r\n\r\n (user, profile, registration) = ret\r\n\r\n dog_stats_api.increment(\"common.student.account_created\")\r\n create_comments_service_user(user)\r\n\r\n context = {\r\n 'name': post_vars['name'],\r\n 'key': registration.activation_key,\r\n }\r\n\r\n # composes activation email\r\n subject = render_to_string('emails/activation_email_subject.txt', context)\r\n # Email subject *must not* contain newlines\r\n subject = ''.join(subject.splitlines())\r\n message = render_to_string('emails/activation_email.txt', context)\r\n\r\n # don't send email if we are doing load testing or random user generation for some reason\r\n if not (settings.FEATURES.get('AUTOMATIC_AUTH_FOR_TESTING')):\r\n from_address = microsite.get_value(\r\n 'email_from_address',\r\n settings.DEFAULT_FROM_EMAIL\r\n )\r\n try:\r\n if settings.FEATURES.get('REROUTE_ACTIVATION_EMAIL'):\r\n dest_addr = settings.FEATURES['REROUTE_ACTIVATION_EMAIL']\r\n message = (\"Activation for %s (%s): %s\\n\" % (user, user.email, profile.name) +\r\n '-' * 80 + '\\n\\n' + message)\r\n send_mail(subject, message, from_address, [dest_addr], fail_silently=False)\r\n else:\r\n user.email_user(subject, message, from_address)\r\n except Exception: # pylint: disable=broad-except\r\n log.warning('Unable to send activation email to user', exc_info=True)\r\n js['value'] = _('Could not send activation e-mail.')\r\n # What is the correct status code to use here? I think it's 500, because\r\n # the problem is on the server's end -- but also, the account was created.\r\n # Seems like the core part of the request was successful.\r\n return JsonResponse(js, status=500)\r\n\r\n # Immediately after a user creates an account, we log them in. They are only\r\n # logged in until they close the browser. They can't log in again until they click\r\n # the activation link from the email.\r\n login_user = authenticate(username=post_vars['username'], password=post_vars['password'])\r\n login(request, login_user)\r\n request.session.set_expiry(0)\r\n\r\n # TODO: there is no error checking here to see that the user actually logged in successfully,\r\n # and is not yet an active user.\r\n if login_user is not None:\r\n AUDIT_LOG.info(u\"Login success on new account creation - {0}\".format(login_user.username))\r\n\r\n if DoExternalAuth:\r\n eamap.user = login_user\r\n eamap.dtsignup = datetime.datetime.now(UTC)\r\n eamap.save()\r\n AUDIT_LOG.info(\"User registered with external_auth %s\", post_vars['username'])\r\n AUDIT_LOG.info('Updated ExternalAuthMap for %s to be %s', post_vars['username'], eamap)\r\n\r\n if settings.FEATURES.get('BYPASS_ACTIVATION_EMAIL_FOR_EXTAUTH'):\r\n log.info('bypassing activation email')\r\n login_user.is_active = True\r\n login_user.save()\r\n AUDIT_LOG.info(u\"Login activated on extauth account - {0} ({1})\".format(login_user.username, login_user.email))\r\n\r\n dog_stats_api.increment(\"common.student.account_created\")\r\n redirect_url = try_change_enrollment(request)\r\n\r\n # Resume the third-party-auth pipeline if necessary.\r\n if settings.FEATURES.get('ENABLE_THIRD_PARTY_AUTH') and pipeline.running(request):\r\n running_pipeline = pipeline.get(request)\r\n redirect_url = pipeline.get_complete_url(running_pipeline['backend'])\r\n\r\n response = JsonResponse({\r\n 'success': True,\r\n 'redirect_url': redirect_url,\r\n })\r\n\r\n # set the login cookie for the edx marketing site\r\n # we want this cookie to be accessed via javascript\r\n # so httponly is set to None\r\n\r\n if request.session.get_expire_at_browser_close():\r\n max_age = None\r\n expires = None\r\n else:\r\n max_age = request.session.get_expiry_age()\r\n expires_time = time.time() + max_age\r\n expires = cookie_date(expires_time)\r\n\r\n response.set_cookie(settings.EDXMKTG_COOKIE_NAME,\r\n 'true', max_age=max_age,\r\n expires=expires, domain=settings.SESSION_COOKIE_DOMAIN,\r\n path='/',\r\n secure=None,\r\n httponly=None)\r\n return response", "def test_redirect(self):\n\n result = self.client.post(\"/registration\", data={\"first_name\": \"Bobby\", \"last_name\": \"Bob\", \"email\": \"bobbers@gmail.com\", \"password\": \"1234\",\n \"birthday_month\": \"January\", \"birthday_day\": 12, \"birthday_year\": 1991}, follow_redirects=True)\n self.assertIn(b\"Email address\", result.data)", "def signup(request):\r\n\tif request.user.is_authenticated:\r\n\t\t# Redirect user to home if already logged in\r\n\t\tgames = Game.objects.all()\r\n\t\treturn redirect('/', {'games': games, 'MEDIA_URL': settings.MEDIA_URL})\r\n\tif request.method == 'POST':\r\n\t\tform = SignUpForm(request.POST)\r\n\t\tif form.is_valid():\r\n\t\t\tuser = form.save()\r\n\t\t\tuser.refresh_from_db() # Retreive the newly saved object\r\n\t\t\tuser.is_active = False\r\n\t\t\tuser.profile.is_developer = form.cleaned_data.get('is_developer')\r\n\t\t\tuser.save()\r\n\t\t\t# Get current domain name and generate the user token\r\n\t\t\tcurrent_site = get_current_site(request)\r\n\t\t\tencodeded_uid = urlsafe_base64_encode(force_bytes(user.pk))\r\n\r\n\t\t\t# Create email subject and body\r\n\t\t\tsubject = 'Activate Your PlayMe Account'\r\n\t\t\tmessage = render_to_string('account_activation_email.html', {\r\n\t\t\t\t'user': user,\r\n\t\t\t\t'domain': current_site.domain,\r\n\t\t\t\t'uid': encodeded_uid.decode('utf-8'),\r\n\t\t\t\t'token': account_activation_token.make_token(user),\r\n\t\t\t})\r\n\t\t\tuser.email_user(subject, message)\r\n\t\t\treturn redirect('account_activation_sent')\r\n\telse:\r\n\t\tform = SignUpForm()\r\n\treturn render(request, 'registration/signup.html', {'form': form})", "def form_valid(self, form):\n # Switching between temporary registration and main registration is easy with the is_active attribute.\n # The withdrawal process will also improve if you only set is_active to False.\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n\n # Send activation URL\n current_site = get_current_site(self.request)\n domain = current_site.domain\n context = {\n 'protocol': 'https' if self.request.is_secure() else 'http',\n 'domain': domain,\n 'token': dumps(user.pk),\n 'user': user,\n }\n\n subject = render_to_string('register/mail_template/create/subject.txt', context)\n message = render_to_string('register/mail_template/create/message.txt', context)\n\n user.email_user(subject, message)\n return redirect('register:user_create_done')", "def post_activation_redirect(self, request, user):\n\t\tnewMember = StaffMember.objects.filter(user_id__exact=user.pk).get()\n\t\tlabGroup = LabGroup.objects.filter(pk=1).get()\n\t\tnewMember.lab_group = labGroup\n\t\tnewMember.save()\n\t\treturn ('registration_activation_complete', (), {})", "def testCreateOrg(self):\n self.timeline.orgSignup()\n self.data.createProfile()\n self.record.createOrgApp('new_org', self.data.user)\n\n url = '/gci/profile/organization/' + self.gci.key().name()\n create_url = url + '?org_id=new_org'\n response = self.get(create_url)\n self.assertResponseOK(response)\n self.assertOrgProfilePageTemplatesUsed(response)\n \n postdata = {\n 'founder': self.data.user, 'home': self.createDocument().key(),\n 'scope': self.gci, 'irc_channel': 'irc://example.com',\n 'pub_mailing_list': 'http://example.com',\n }\n response, properties = self.modelPost(create_url, GCIOrganization, postdata)\n self.assertResponseRedirect(response, url + '/new_org?validated')\n profile = db.get(self.data.profile.key())\n self.assertEqual(1, len(profile.org_admin_for))", "def create_calendar(request):\n if request.method == 'POST':\n\n form = CalendarForm(request.POST)\n \n if form.is_valid():\n calendar = form.save(commit=False) # prvent form from saving since we need to link company\n calendar.company = request.user.company\n calendar.save()\n return redirect('appointment:calendar_list')\n else:\n form = CalendarForm()\n return render(request, 'calendar_form.html', {'form': form})", "def post(self, *args, **kwargs): # pylint: disable=W0613\n reg_type = self.request.POST[\"registration_type\"]\n user = self.request.user\n\n if reg_type == \"registration\":\n message = self.register_user(user)\n elif reg_type == \"deregistration\":\n message = self.deregister_user(user)\n else:\n message = \"Her skjedde det noe galt.\"\n\n self.messages.info(message)\n return HttpResponseRedirect(self.get_object().get_absolute_url())", "def test_user_add_button_redirects_to_register_page(self):\n self.browser.get(self.warno_url)\n self.browser.find_element_by_link_text('Users').click()\n self.browser.find_element_by_id('new-user-redirect-button').click()\n contents = self.browser.find_element_by_class_name('sub-title')\n self.assertTrue('Register' in contents.text, \"Redirected page's subtitle did not contain 'Register'\")", "def entry_page():\n return redirect(url_for('index'))", "def dropbox_auth_finish():\n try:\n access_token, dropbox_id, url_state = (get_auth_flow().\n finish(request.args))\n except DropboxOAuth2Flow.BadRequestException, e:\n abort(400)\n except DropboxOAuth2Flow.BadStateException, e:\n abort(400)\n except DropboxOAuth2Flow.CsrfException, e:\n abort(403)\n except DropboxOAuth2Flow.NotApprovedException, e:\n flash('Not approved? Why not, bro?')\n return redirect(url_for('home'))\n except DropboxOAuth2Flow.ProviderException, e:\n app.logger.exception(\"Auth error\" + e)\n abort(403)\n\n if dropbox_id is None:\n return redirect(url_for('home'))\n\n user = User.query.filter_by(dropbox_id=dropbox_id).first()\n new_user = user is None\n if user is None:\n user = User(dropbox_id)\n user.set_new_emailer()\n db.session.add(user)\n\n user.access_token = access_token\n (user.name, user.email) = get_dropbox_name_email(access_token)\n db.session.commit()\n\n if new_user:\n analytics.track(str(user.id), 'Registered')\n analytics.track(str(user.id), 'Logged in')\n\n session['dropbox_id'] = user.dropbox_id\n\n return redirect(url_for('home'))", "def form_valid(self, form):\n redirect_url = self.accept_consent_request(None)\n return HttpResponseRedirect(redirect_url)", "def post_registration_redirect(self, request, user):\n\t\treturn ('registration_complete', (), {})", "def signup(request):\n if request.method == 'POST':\n form = SignUpForm(request.POST)\n if form.is_valid():\n user = form.save(commit=False)\n user.is_active = False\n user.save()\n current_site = get_current_site(request)\n subject = 'Activate Your neighwatch Account'\n message = render_to_string('registration/activation_email.html', {\n 'user': user,\n 'domain': current_site.domain,\n 'uid': urlsafe_base64_encode(force_bytes(user.pk)),\n 'token': account_activation_token.make_token(user),\n })\n user.email_user(subject, message)\n return redirect('account_activation_sent')\n else:\n form = SignUpForm()\n return render(request, 'registration/registration_form.html', {'form': form})", "def competitors_create(request, slug):\n #verifies if the company exists if not returns a 404 page\n company =get_object_or_404(Company,slug=slug)\n\n #verifies the person has access to the company or is an incubator employee\n edit = validate_user_company_access_or_redirect(request,company)\n\n #if the request is GET presents empty form\n if request.method == 'GET':\n\n competitors_form = CompetitorsForm()\n return render_to_response('competitors_form.html', {'form': competitors_form, 'company':company},\n context_instance=RequestContext(request))\n \n else:\n competitors_form = CompetitorsForm(request.POST)\n #if is POST Validates the form is well filled and save it redirecting to the company page\n if competitors_form.is_valid():\n cf = competitors_form.save(commit=False)\n\n #verify if other companies with the same info exists anywhere\n try: \n comparison = Competitors.objects.get(name=cf.name,company= company)\n \n if str(comparison.name) != str(cf.name):\n cf.company = company\n cf.save()\n \n else:\n form_errors = {\"Name - The competitor \" + str(comparison.name).capitalize() + \" has been already created for \"+ str(company.name).capitalize() + \".\"}\n return render_to_response('competitors_form.html', \n {'form': competitors_form, 'form_errors': form_errors, 'company':company},\n context_instance=RequestContext(request))\n\n except Competitors.DoesNotExist :\n cf.company = company\n cf.save()\n\n\n \n return HttpResponseRedirect('/company/'+str(slug))\n\n #if not well filled redirect to the original create and display error\n else:\n return render_to_response('competitors_form.html', \n {'form': competitors_form, 'form_errors': competitors_form.errors, 'company':company},\n context_instance=RequestContext(request))", "def sign_up():\n if CURR_USER_KEY in session:\n del session[CURR_USER_KEY]\n\n form = SignUp_Form()\n\n if form.validate_on_submit():\n try:\n employee = Employee.register(\n username = form.username.data,\n password = form.password.data, \n email = form.email.data, \n first_name = form.first_name.data,\n last_name = form.last_name.data,\n hire_date = form.hire_date.data,\n is_admin = form.is_admin.data\n )\n \n db.session.commit()\n\n except IntegrityError:\n flash(\"Email already in use\", \"danger\")\n return render_template(\"sign-up.html\", form = form)\n login(employee)\n msg = EmailMessage()\n msg['Subject'] = \"BLAH\"\n msg['From'] = EMAIL_ADDRESS\n msg['To'] = f\"{employee.email}\"\n msg.set_content('This is the plain text for the email')\n msg.add_alternative(\"\"\"\\\n <h1> Welcome to My Certs!</h1>\n \"\"\", subtype = \"html\")\n\n with smtplib.SMTP(\"smtp.gmail.com\", 587) as smtp:\n smtp.ehlo()\n smtp.starttls()\n smtp.ehlo()\n smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)\n smtp.send_message(msg)\n return redirect (f\"/mycerts/{employee.id}\")\n\n else: \n\n return render_template(\"sign-up.html\", form = form)", "def view_signup(self):\n with self.client.get(\"/signup\", catch_response=True) as response:\n for r_hist in response.history:\n if r_hist.status_code > 200 and r_hist.status_code < 400:\n response.failure(\"Logged on: Got redirect to /home\")", "def home_redirect(request):\n if request.user.is_authenticated() and request.user.is_staff:\n return redirect(\"volunteers\")\n elif request.user.is_authenticated() and not request.user.is_superuser:\n related_volunteer = get_object_or_404(Volunteer, user_id=request.user.pk)\n return redirect(\"edit-volunteer-profile\", volunteer_id=related_volunteer.pk)\n else:\n return redirect(\"new-volunteer\")", "def start_oauth_view(request):\n url = get_oauth_url()\n return redirect(url)", "def test_successful_registration_redirects(self):\n response = self.register_bob()\n self.assertTrue(response.status_code == 302)", "def create(owner):\n data = request_content(request)\n resource = logic.resource.create(owner, data)\n return redirect(url_for('.get', owner=owner, \n resource=resource.name))", "def get(self, request, *args, **kwargs):\n\n if request.user.is_authenticated:\n return redirect(reverse_lazy('feed_view'))\n\n return super(SignUpView, self).get(request, *args, **kwargs)", "def form_valid(self, ppform, address_form,cuform):\n addr = address_form.save()\n cuformo = cuform.save()\n ppform.save()\n self.object.address = addr\n self.object.user = cuformo\n self.object.save()\n\n return HttpResponseRedirect(self.get_success_url())", "def CreateAccount():\n login_frame.forget()\n self.LoadCreateAccountWindow()", "def signup():\n print(\"In signup.....\")\n auth_service = AuthService()\n form = SignUpForm()\n if request.method == 'GET':\n return render_template('auth/signup.html', title='Sign Up', form=form)\n\n elif request.method == 'POST':\n if form.validate_on_submit():\n user_dto = UserDto(form.email.data, form.password.data, form.name.data, form.contact.data)\n try:\n auth_service.create_user(user_dto)\n flash('SignUp successfull name = \"%s\" , email = \"%s\"' % (form.name.data, form.email.data))\n return redirect(url_for('auth.signin'))\n except UserExistsException:\n flash(\"User already exists\")\n return redirect(url_for('auth.signup'))\n flash('SignUp Failed')\n return render_template('auth/signup.html', title='Sign Up', form=form)", "def post(self, orgname):\n permission = AdministerOrganizationPermission(orgname)\n if permission.can():\n organization = model.organization.get_organization(orgname)\n assert organization.stripe_id\n\n request_data = request.get_json()\n success_url = request_data[\"success_url\"]\n cancel_url = request_data[\"cancel_url\"]\n\n try:\n cus = billing.Customer.retrieve(organization.stripe_id)\n except stripe.error.APIConnectionError as e:\n abort(503, message=\"Cannot contact Stripe\")\n\n if not cus:\n raise InvalidRequest(\"Invalid Stripe customer\")\n\n if not cus.subscription:\n raise InvalidRequest(\"Invalid Stripe subscription\")\n\n try:\n checkout_session = stripe.checkout.Session.create(\n payment_method_types=[\"card\"],\n mode=\"setup\",\n customer=organization.stripe_id,\n setup_intent_data={\n \"metadata\": {\n \"kind\": \"account_change_cc\",\n \"namespace\": organization.username,\n \"performer\": get_authenticated_user().username,\n \"ip\": get_request_ip(),\n \"subscription_id\": cus.subscription.id,\n },\n },\n success_url=success_url,\n cancel_url=cancel_url,\n )\n\n return checkout_session\n except stripe.error.APIConnectionError as se:\n abort(503, message=\"Cannot contact Stripe: %s\" % se)\n except Exception as e:\n abort(500, message=str(e))\n\n raise Unauthorized()", "def home(request):\n assert isinstance(request, HttpRequest)\n return redirect('/departments')", "def landing():\n if g.user:\n return render_template('landing.html', user=g.user)\n return redirect(url_for('login'))", "def corso(request, nomeCorso):\n url = '/courseManager/'+nomeCorso\n return HttpResponseRedirect(url)", "def _go_to_page(self):\n self.salesforce.go_to_setup_home()\n self.eda.wait_for_new_window(\"Home | Salesforce\")\n self.selenium.switch_window(\"Home | Salesforce\")\n self.salesforce.wait_until_loading_is_complete()", "def create_account():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n\n user = create_user(username, password)\n\n if not user:\n return redirect(url_for('login'))\n\n session['username'] = user.username\n session['user_id'] = user.id\n session['logged_in'] = True\n session['is_admin'] = user.is_admin\n\n return redirect(url_for('index'))\n\n return render_template('createaccount.html')", "def newRestaurantPage():\n if 'username' not in login_session:\n return redirect('/login')\n if request.method == 'POST':\n res_name = request.form['res_name']\n user_id = login_session['user_id']\n if res_name:\n db_methods.addNewRestaurant(res_name, user_id)\n time.sleep(0.1)\n return redirect(\"/restaurants\")\n else:\n error = \"You need to enter the name of the restaurant you want to add.\"\n return render_template('newrestaurant.html', error = error)\n else:\n return render_template('newrestaurant.html')", "def form_valid(self, form):\n\n ai = form.save(\n token=self.request.session.get('token', False),\n aiid=self.kwargs.get('aiid', '')\n )\n\n # Check if save was successful\n if ai['status']['code'] in [200, 201]:\n level = messages.SUCCESS\n redirect_url = HttpResponseRedirect(\n reverse_lazy(\n self.request.GET.get('next', self.success_url),\n kwargs={'aiid': ai.get('aiid', self.kwargs.get('aiid'))}\n )\n )\n else:\n level = messages.ERROR\n redirect_url = self.render_to_response(\n self.get_context_data(form=form)\n )\n\n messages.add_message(self.request, level, ai['status']['info'])\n\n return redirect_url", "def checkout(self): \n mtool = getToolByName(self.context, \"portal_membership\")\n ICheckoutManagement(self.context).redirectToNextURL(\"AFTER_START\")", "def get_redirect_url(self):\n return reverse('accounts:home')", "def createForm(request):\n if request.method == 'POST':\n form = QuestionFormForm(request.POST)\n if form.is_valid():\n #return the uuid so the organization can use that link in the post to connect to the questionform\n formID = form.save().UUID\n #send them the url for the form\n messages.success(request, 'You have made your question form accessible at: ' + request.build_absolute_uri('/post/') + f'apply/{formID}')\n context = {'form': form}\n return render(request, 'scholarship.html', context=context)\n form = QuestionFormForm()\n context = {'form': form}\n return render(request, 'scholarship.html', context=context)", "def rsvp(request):\n if request.method == 'POST':\n try:\n \"\"\" Create if neither ('address' or 'post') are filled in \"\"\"\n if not (request.POST['address'] or request.POST['phone']):\n person = Person(name=request.POST['name'],\n email=request.POST['email'],\n ynm=request.POST['response'])\n person.save()\n\n return HttpResponseRedirect(reverse('chipy:index'))\n else:\n\n return HttpResponse(\"Not a valid rsvp submission\")\n except Person.DoesNotExist:\n raise Http404\n\n\n return HttpResponseRedirect(reverse('chipy:index'))", "def form_valid(self, form):\n\n entity = form.save(\n token=self.request.session.get('token', False),\n **self.kwargs\n )\n\n # Check if save was successful\n if entity['status']['code'] in [200, 201]:\n level = messages.SUCCESS\n\n redirect_url = HttpResponseRedirect(\n reverse_lazy(\n self.success_url,\n kwargs={\n 'aiid': self.kwargs['aiid'],\n 'entity_name': form.cleaned_data['entity_name']\n }\n )\n )\n\n else:\n level = messages.ERROR\n redirect_url = self.render_to_response(\n self.get_context_data(form=form)\n )\n\n messages.add_message(self.request, level, entity['status']['info'])\n\n return redirect_url", "def sign_up():\n #POST - the info coming from the sign-up-form\n\n #get username and password that was filled in sign-up form\n #if username exits - flash \"username taken\" and redirct to /sign-up-form\n\n #else save the new user to the database - user table, flash success message\n #and redirect back to /more-details/cat_id", "def dispatch(self, request, *args, **kwargs):\n if request.user.is_authenticated:\n return HttpResponseRedirect(reverse(\"home\"))\n return super(RegisterView, self).dispatch(request, *args, **kwargs)", "def defaultlanding():\n #send user to description page if not logged in\n if not g.user:\n return redirect(url_for('description'))\n #display leaderboard for competition if logged in\n return redirect(url_for('leaderboard'))", "def join():\n # getting event id from homepage\n event_id = request.args.get('eventId')\n user_id = session['user']\n register = Register(user_id = user_id, event_id = event_id)\n db.session.add(register)\n db.session.commit()\n\n return redirect('/')", "def form_valid(self, form):\n user = form.save(commit=False)\n # print(user)\n messages.success(self.request, 'Successfully registered')\n user.save()\n login(self.request, user)\n return redirect('post:home')\n\n return kwargs", "def users_page(request):\n if request.method == 'POST':\n user = request.user\n form = CompetenceForm(request.POST)\n\n if form.is_valid():\n form.instance.person = request.user\n form.save()\n # return redirect('user-page')\n # competence = Competence.objects.create_competence(user, form.title_of_competence, form.level_of_competence)\n else:\n form = CompetenceForm()\n\n return render(request, 'core/user-page.html', {'form': form})", "def registered_form():\n # print \"Hello POST\"\n # if request.method == \"POST\":\n reg_email = request.form.get(\"email\")\n\n reg_password = request.form.get(\"password\")\n\n # Get age value, or assign as None.\n if request.form.get(\"age\"):\n age = request.form.get(\"age\")\n else:\n age = None\n\n # Get zipcode value, or assign as None.\n if request.form.get(\"zipcode\"):\n zipcode = request.form.get(\"zipcode\")\n else:\n zipcode = None\n\n print reg_email\n\n if User.query.filter(User.email == reg_email):\n flash(\"There is already an account for that email address.\")\n return redirect('/')\n else:\n new_user = User(email=reg_email, password=reg_password, age=age, zipcode=zipcode)\n print new_user\n db.session.add(new_user)\n db.session.commit()\n \n return redirect(\"/\")" ]
[ "0.603463", "0.6004999", "0.5971245", "0.5959396", "0.5919654", "0.58651173", "0.58611304", "0.58471966", "0.582282", "0.57899594", "0.578932", "0.57571214", "0.57511485", "0.574679", "0.57035786", "0.56987613", "0.56865185", "0.5668221", "0.5650241", "0.5633905", "0.5633754", "0.56261593", "0.56210124", "0.558143", "0.5575769", "0.5558815", "0.55578035", "0.55500686", "0.55401987", "0.55401987", "0.5534506", "0.5524755", "0.5520805", "0.5516249", "0.55149466", "0.55125654", "0.5505887", "0.54998606", "0.54874504", "0.54824245", "0.5472167", "0.54650146", "0.5462077", "0.54581547", "0.5458145", "0.5443073", "0.5441383", "0.54369766", "0.5435017", "0.54314244", "0.5425797", "0.5424073", "0.5420394", "0.541911", "0.54186624", "0.5415462", "0.5409062", "0.5405631", "0.539872", "0.5397788", "0.5393508", "0.5393295", "0.53919977", "0.53888863", "0.5383921", "0.53829104", "0.53778887", "0.5373787", "0.53580767", "0.534295", "0.53395677", "0.5332566", "0.5326604", "0.5314323", "0.5312373", "0.53058034", "0.5302263", "0.5298163", "0.52955085", "0.52817243", "0.52801067", "0.5276926", "0.5276666", "0.52609986", "0.52592593", "0.5258898", "0.5258365", "0.52581805", "0.5256874", "0.52472526", "0.524653", "0.52457315", "0.5242069", "0.5241498", "0.5237883", "0.52360505", "0.52359545", "0.52348036", "0.5234096", "0.5230272" ]
0.81982374
0
runs gradient descent to find a minimum of x^2 + y^2
def run_gradient_descent(seed=0): random.seed(seed) colors = [color for color in matplotlib.colors.cnames] def random_point(): return (2 * random.random() - 1, 2 * random.random() - 1) def df(x_i): """this is the gradient of x^2 + y^2""" return [2 * x_ij for x_ij in x_i] for color in random.sample(colors, 50): path = take(10, gradient_descent(df, random_point())) for i, (x, y) in enumerate(path): plt.plot(x, y, color=color, marker='*', markersize=20-2*i) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gradient_descent(self, x, y):\n # Initialize weights vector\n self.weights = np.zeros(len(x[0]))\n\n # Storing number of training example in a variable \n n = len(x)\n\n # Initiate variables to keep track of the current and smallest loss recorded\n lowest_loss = sys.float_info.max\n current_loss = sys.float_info.max\n\n # Initiate variables to keep track of step sizes\n norm = sys.float_info.max\n smallest_norm = sys.float_info.max\n\n # Initiate list variable that stores all previous weights\n prev_weights = []\n\n # Initiate list that stores all the errors. \n errors = []\n \n # Variable to keep track of the number of iterations that returns a bigger loss than current loss\n k_loss_iteration = 1\n\n # Learning loop\n for i in range(self.max_iter):\n\n # Append current weights\n prev_weights.append(np.array(self.weights))\n \n # Minimizing Loss Function Error by adjusting weights using Gradient Descent\n self.weights += self.learning_rate * (sum([x[i] * (y[i] - self.logistic_function(self.weights.dot(x[i]))) for i in range(n)]) - 2 * self.l2 * self.weights)\n\n # Compute the error of the Cost Function and store it in a list\n current_loss = self.cost(x,y)\n\n if len(errors) > 1 and current_loss > errors[-1]:\n k_loss_iteration += 1\n else: \n k_loss_iteration = 1\n\n errors.append(current_loss)\n \n # Track smallest loss\n if current_loss < lowest_loss:\n lowest_loss = current_loss\n\n # Compute the L2 Norm of the difference between current weights and previous weights\n norm = np.linalg.norm(self.weights - prev_weights[-1])\n\n # Track smallest step size and set it as error threshold\n if norm < smallest_norm:\n smallest_norm = norm\n\n # If this L2 norm is smaller than the error_threshold it means that it converged, hence we can break. In other words, repeat until the step size is too small\n if self.error_threshold != None and norm < self.error_threshold:\n print(\"Converged after {} iterations!\".format(i))\n break\n\n # stop if error hasn't gone down in k iterations\n if k_loss_iteration >= 10:\n print(k_loss_iteration + \" iterations of loss not decreasing on {}th itertion.\".format(i))\n break\n\n # Log final weights\n print(\"Final norm: \" + str(norm) + \"\\nSmallest step size recorded: \" + str(smallest_norm) + \"\\nFinal error: \" + str(current_loss) + \"\\nLowest error recorded: \" + str(lowest_loss) + \"\\nNumber of epochs: \" + str(len(errors)) + \"\\nFinal weights: \" + str(self.weights))", "def gradient_ascent(self, w, X, y, lr):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n # gradient = x_j*(y-σ(wTX))\r\n return np.dot(X.T, y-self.sigmoid(np.dot(X, w)))", "def projected_gradient_descent(self, x, y):\n x_adv = x.clone().detach().requires_grad_(True).to(x.device)\n targeted = self.y_target is not None\n num_channels = x.shape[1]\n\n if self.random:\n x_adv = random_perturbation(x_adv, self.norm, self.eps)\n\n for i in range(self.num_steps):\n _x_adv = x_adv.clone().detach().requires_grad_(True)\n\n prediction = self.model(_x_adv)\n loss = self.loss_fn(prediction, self.y_target if targeted else y)\n loss.backward()\n\n with torch.no_grad():\n # Force the gradient step to be a fixed size in a certain norm\n if self.norm == 'inf':\n gradients = _x_adv.grad.sign() * self.step_size\n else:\n # Note .view() assumes batched image data as 4D tensor\n gradients = _x_adv.grad * self.step_size / _x_adv.grad.view(\n _x_adv.shape[0], -1) \\\n .norm(self.norm, dim=-1) \\\n .view(-1, num_channels, 1, 1)\n\n if targeted:\n # Targeted: Gradient descent with on the loss of the (incorrect) target label\n # w.r.t. the image data\n x_adv -= gradients\n else:\n # Untargeted: Gradient ascent on the loss of the correct label w.r.t.\n # the model parameters\n x_adv += gradients\n\n # Project back into l_norm ball and correct range\n if self.norm == 'inf':\n # Workaround as PyTorch doesn't have elementwise clip\n x_adv = torch.max(torch.min(x_adv, x + self.eps), x - self.eps)\n else:\n delta = x_adv - x\n\n # Assume x and x_adv are batched tensors where the first dimension is\n # a batch dimension\n mask = delta.view(delta.shape[0], -1).norm(self.norm,\n dim=1) <= self.eps\n\n scaling_factor = delta.view(delta.shape[0], -1).norm(self.norm,\n dim=1)\n scaling_factor[mask] = self.eps\n\n # .view() assumes batched images as a 4D Tensor\n delta *= self.eps / scaling_factor.view(-1, 1, 1, 1)\n\n x_adv = x + delta\n\n x_adv = x_adv.clamp(*self.clamp)\n\n return x_adv.detach()", "def least_squares_GD(y, tx, initial_w, max_iters, gamma, verbose=False):\n return gradient_descent(y, tx, initial_w, max_iters, gamma, compute_mse, \n compute_mse_gradient, verbose=verbose)", "def minfunc(beta, yvec, xmat ):\n return yvec - exp(dot(xmat, beta))", "def linearReg(x,y):\n X=np.array(x).reshape(-1,1)\n Y=np.array(y).reshape(-1,1)\n x_shape = X.shape\n num_var = x_shape[1] \n yintercept = 0\n slope = 0\n progress = []\n #intialize the parameter\n weight_matrix = np.random.normal(-1,1,(num_var,1))\n yintercept = np.random.rand(1)\n #cost minmization\n for i in range(200):\n dcostdm = np.sum(np.multiply(((np.matmul(X,weight_matrix)+ yintercept)-Y),X))*2/x_shape[0] #w.r.t to the weight\n dcostdc = np.sum(((np.matmul(X,weight_matrix)+yintercept)-Y))*2/x_shape[0] #partial derivative of cost w.r.t the intercept\n weight_matrix -= 0.1*dcostdm \n #updating the weights with the calculated gradients\n yintercept -= 0.1*dcostdc #updating the weights with the calculated gradients\n progress.append(np.array((weight_matrix,yintercept)))\n slope = weight_matrix\n return (slope[-1],yintercept)", "def gradient_descent(x, y, w, max_iter, alpha = 0.001):\n \n N = y.shape[0]\n \n J_hist = np.zeros(max_iter)\n\n print(\"\\nGradient descent starts\\n\")\n\n for i in range(0, max_iter):\n \n J = np.sum( (y_hat(x, w) - y) ** 2 ) / (2 * N)\n\n J_hist[i] = J\n \n print(\"Iteration %d, J(w): %f\\n\" % (i, J))\n \n gradient = np.dot(x.T, y_hat(x, w) - y) / N \n \n w = w - alpha * gradient\n\n print(\"Gradient descent finished.\\n\")\n \n return (J_hist, w)", "def least_squares_GD(y, tx, initial_w, max_iters, gamma):\n # Define parameters to store w and loss\n w = initial_w\n for n_iter in range(max_iters):\n # compute gradient\n grad = compute_gradient(y, tx, w)\n # gradient w by descent update\n if n_iter % (max_iters//10) == 0:\n print(compute_cost(y, tx, w))\n w -= gamma * grad\n\n return w, compute_cost(y, tx, w)", "def least_squares_gradient(y, tx, w): \n e = y - tx.dot(w)\n grad = -tx.T.dot(e) / len(e)\n return grad, e", "def least_squares_GD(y, tx, initial_w, max_iters, gamma, debug = False):\n losses, ws = gradient_descent(y, tx, initial_w, max_iters, gamma, loss_f = model_linear.compute_loss, grad_f = model_linear.compute_gradient, debug = debug)\n return get_last_ans(ws, losses)", "def compute_gradient(theta, X, y):\n m = X.shape[0]\n grad_theta = np.dot(X.transpose(), (np.dot(X, theta) - y)) / m\n #print theta, grad_theta, objective_function(theta, X, y)\n return grad_theta", "def _gradient_descent(self, X, y, epochs, learning_rate, batch_size):\n num_feats = X.shape[1]\n num_samples = X.shape[0]\n\n y = y.reshape(num_samples, 1)\n W = np.random.rand(num_feats, 1)\n training_loss_epochs = []\n\n for ix in range(epochs):\n shuffled_ix = (np.arange(0, len(X)))\n np.random.shuffle(shuffled_ix)\n X = X[shuffled_ix, :]\n y = y[shuffled_ix, :]\n\n for batch_ix in np.arange(0, X.shape[0], batch_size):\n dW = self._compute_gradient(W, X[batch_ix:batch_ix + batch_size], y[batch_ix:batch_ix + batch_size])\n W -= learning_rate * dW\n\n if ix % 10 == 0:\n y_pred = np.dot(X, W)\n training_loss = self.mse(y, y_pred)\n print('epoch {0} : training loss {1}'.format(ix, training_loss))\n training_loss_epochs.append(training_loss[0])\n\n self.weights = W\n self.training_loss = training_loss_epochs\n return None", "def least_squares_GD(y, tx, initial_w, max_iters, gamma):\n w_start = initial_w\n w = w_start\n\n for n_iter in range(max_iters):\n gradient = compute_gradient(y, tx, w)\n loss = compute_loss(y,tx,w)\n w = w - gamma * gradient\n\n return w, loss", "def gradient_descent(x, y, theta=[[0], [0]]):\n m = y.size\n j_history = []\n for i in range(ITERATIONS):\n h = x.dot(theta)\n theta = theta - (ALPHA / m) * (x.T.dot(h - y))\n j_history.append(compute_cost(x, y, theta))\n return theta, j_history", "def costFunction(self, x, y ):\n self.yEst = self.forward_propagate(x)\n sqErrors = ( self.yEst - y ) ** 2\n J = sqErrors.sum() / 2\n return J", "def run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta):\n n = len_data\n # WE NEED TO transpose data_x into (p+1) *n ,theta is 1*(p+1)\n prod = np.dot(theta, data_x.transpose())\n\n prod -= data_y\n print(\"pro: data_x\", prod.shape, data_x.shape)\n #prod represent the loss of the hypothesis and true label\n sum_grad = np.dot(prod, data_x)\n print(\"总梯度的值:\",sum_grad.shape)\n\n # batch-gradient descent\n theta = theta -(alpha / n) * sum_grad\n return theta", "def compute_gradient(self, X, y, weights):\n sigmoid = self.sigmoid(np.dot(X, weights))\n return np.dot(X.T, y - sigmoid)", "def run_gradient_descent2(seed=0):\n\n colors = [color for color in matplotlib.colors.cnames]\n\n def random_point():\n return (3 * random.random() - 1, 3 * random.random() - 1)\n\n def f(x):\n \"\"\"has min at (1,0), saddle point at (-1,0)\"\"\"\n return -math.exp(x[0]**3/-3 + x[0] - x[1]**2)\n\n def df(x):\n return ((1 - x[0]**2) * f(x), -2 * x[1] * f(x))\n\n for color in random.sample(colors, 50):\n path = take(100, gradient_descent(df, random_point()))\n for i, (x, y) in enumerate(path):\n plt.plot(x, y, color=color, marker='*', markersize=25-i/4)\n\n plt.show()", "def differential_of_minimized_cost_function_theta_1(m = no_of_training_examples, x, y, theta_0, theta_1, initial_theta1):\r\n initial_val = initial_theta1\r\n compute_1 = [val cost_function(val, theta_0, theta_1)] # computes the corresponding value of x given theta_0 and theta_1(list comprehension)\r\n compute_2 = [var_x-var_y for var_x, var_y in zip(compute_1,y)] # subtracts the corresponding values of x and y (list comprehension)\r\n theta = sum(compute_2)/m\r\n if initial_theta == updated_theta:\r\n return updated_theta\r\n else:\r\n differential_of_minimized_cost_function(m = no_of_training_examples, x, y, theta_0, theta_1 = theta, initial_theta1 = initial_val) # returns itself (recursion)\r", "def minimize(func, grad_func, x, y, theta_0, alpha_0=0.01, max_it=100):\n data = list(zip(x, y))\n theta, alpha = theta_0, alpha_0\n min_theta, min_value, it = None, float(\"inf\"), 0\n\n while it < max_it:\n\n value = sum(func(x_i, y_i, theta) for x_i, y_i in data)\n\n if value < min_value:\n min_theta = theta\n min_value = value\n it = 0\n alpha = alpha_0\n else:\n it += 1\n alpha *= 0.9\n\n for (x_i, y_i) in in_random_order(data):\n grad_i = grad_func(x_i, y_i, theta)\n theta = vector_subtract(theta, scalar_multiply(alpha, grad_i))\n\n return min_theta", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma, batch_size=10, verbose=False):\n return stochastic_gradient_descent(y, tx, initial_w, max_iters, gamma, compute_mse, \n compute_mse_gradient, batch_size=batch_size, verbose=verbose)", "def differential_of_minimized_cost_function_theta_0(m = no_of_training_examples, x, y, theta_0, theta_1, initial_theta0):\r\n initial_val = initial_theta0\r\n compute_1 = [val cost_function(val, theta_0, theta_1)] # computes the corresponding value of x given theta_0 and theta_1(list comprehension)\r\n compute_2 = [var_x-var_y for var_x, var_y in zip(compute_1,y)] # subtracts the corresponding values of x and y\r\n theta = sum(compute_2)/m\r\n if initial_theta0 == theta:\r\n return theta\r\n else:\r\n differential_of_minimized_cost_function(m = no_of_training_examples, x, y, theta_0 = theta, theta_1, initial_theta0 = initial_val) # returns itself (recursion)\r", "def gradient_descent(X, Y, epsilon=1e-6, l=1, step_size=1e-4, max_steps=1000):\n beta = np.zeros(X.shape[1])\n for s in range(max_steps):\n # TODO: Implement iterations.\n pass\n return beta", "def gradientFunction(theta, X, y):\n y = y[:, 0]\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad /= m\n return grad", "def gradient_descent(\n self,\n coeffs, \n x_values, y_values):\n old_loss = self.old_loss\n mse = self.loss\n\n for i in range(self.steps):\n new_loss = self.loss_mse(coeffs, x_values, y_values)\n mse = np.append(mse, new_loss)\n if abs(new_loss - old_loss) <= self.early_stop:\n print(f\"Early cut off, difference of losses between steps is less that {self.early_stop}.\")\n break\n old_loss = new_loss\n\n coeffs = coeffs - (self.learning_rate)*self.gradient_calculation(coeffs, x_values, y_values)\n\n mse = np.append(mse, self.loss_mse(coeffs, x_values, y_values))\n self.coefficients = coeffs\n self.loss = mse", "def stochastic_gradient_descent(X, y, max_niter=100):\n m, n = X.shape\n w = np.zeros((n, 1))\n\n for i in range(max_niter):\n data_indices = list(range(m))\n for j in range(m):\n alpha = 4.0 / (i + j + 1.0) + 0.01\n rand_idx = int(np.random.uniform(0, len(data_indices)))\n h = sigmoid(np.dot(X[rand_idx, :], w))\n error = h - float(y[rand_idx])\n w = w - alpha * np.outer(X[rand_idx, :], error)\n print('{0} iterations with error {1} weight {2} alpha={3}'.format(i, error, w, alpha))\n del(data_indices[rand_idx])\n classify.w = w\n return w", "def gradient_descent(initial_theta, X, y, niter, alpha, Lambda=0.0):\n theta_list = []\n cost_list = []\n\n theta = initial_theta\n for i in range(0, niter):\n theta -= alpha*gradient(theta, X, y, Lambda)\n theta_list.append(theta)\n cost_list.append(cost(theta, X, y, Lambda))\n\n return theta_list, cost_list", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma, debug = False):\n losses, ws = stochastic_gradient_descent(y, tx, initial_w, 1, max_iters, gamma, loss_f = model_linear.compute_loss, grad_f = model_linear.compute_gradient, debug = debug)\n return get_last_ans(ws, losses)", "def least_squares_GD(y, tx, initial_w, max_iters, gamma, loss_function=mse, gradient=mse_grad):\n w = initial_w\n for iter in range(max_iters):\n # compute gradient\n grad = gradient(y, tx, w)\n # update w\n w = w - gamma * grad\n loss = loss_function(y, tx, w)\n return w, loss", "def train_gradient_descent(self, X, y, learning_rate=0.01, n_iters=100):\r\n # Step 0: Initialize the parameters\r\n n_samples, n_features = X.shape\r\n self.weights = np.zeros(shape=(n_features,1))\r\n self.bias = 0\r\n costs = []\r\n\r\n for i in range(n_iters):\r\n # Step 1: Compute a linear combination of the input features and weights\r\n y_predict = np.dot(X, self.weights) + self.bias\r\n\r\n # Step 2: Compute cost over training set\r\n cost = (1 / n_samples) * np.sum((y_predict - y)**2)\r\n costs.append(cost)\r\n\r\n if i % 100 == 0:\r\n print(f\"Cost at iteration {i}: {cost}\")\r\n\r\n # Step 3: Compute the gradients\r\n dJ_dw = (2 / n_samples) * np.dot(X.T, (y_predict - y))\r\n dJ_db = (2 / n_samples) * np.sum((y_predict - y)) \r\n \r\n # Step 4: Update the parameters\r\n self.weights = self.weights - learning_rate * dJ_dw\r\n self.bias = self.bias - learning_rate * dJ_db\r\n\r\n return self.weights, self.bias, costs", "def gradientdescent(cost_func, theta, args=(), delta_func = 0):\n step = 1\n old_cost = 0\n while True:\n theta_old = theta.copy()\n cost = cost_func(theta, *args)\n delta = delta_func(theta, *args)\n theta = theta - step * delta\n if cost > old_cost and old_cost != 0:\n step = step*0.7\n if np.allclose(theta_old, theta):\n break\n old_cost = cost\n return theta", "def gradient_descent(self, X ,eta, tol,iter):\n gd=[]\n gd_x=[X]\n iteration=0\n # current_pt=X\n first_derivative=sym.diff(self.gdfunc)\n #print(first_derivative)\n x=sym.Symbol('x')\n first_derivative=sym.lambdify(x,first_derivative)\n learn_rate=eta\n \n \n prev_x=X\n new_x=prev_x -(learn_rate*first_derivative(prev_x))\n gd_x.append(new_x)\n #print(\"prev_x = \",prev_x,\" Next x = \",new_x)\n for i in range(iter):\n prev_x=new_x\n #print(prev_x)\n new_x=prev_x -(learn_rate*first_derivative(prev_x))\n gd_x.append(new_x)\n # print(\"x = \",new_x,\"Gradient =\",learn_rate*self.func(prev_x))\n if abs(self.func(new_x)) <= self.func(tol) :\n break\n iteration=iteration+1\n #print(\"Best at GD x= \",new_x)\n gd.append(gd_x)\n gd.append(new_x)\n gd.append(iteration)\n\n return gd", "def approx_grad(theta, X, y):\n grad_a = np.array([(cost(theta + e, X, y) - cost(theta - e, X, y)) / (2 * 1e-5)\n for e in np.identity(len(theta)) * 1e-5])\n return grad_a", "def calculate_gradient(y, tx, w): \n return tx.T@(sigmoid(tx@w)-y)", "def compute_gradient (w, x, y):\n (n,d) = x.shape\n g = np.zeros(d)\n for i in range(0,d):\n g[i] = (w*x-y)*np.transpose(x[i])\n g += 0.5*w\n return g", "def gradient_descent(self, X, theta, Y, m):\n\n Z = X.dot(theta)\n H = Predict.g(Z)\n gradient = np.dot(X.T, (H - Y)) / m\n return self.alpha * gradient", "def gradient_descent(f, df, x, sigma=0.5, epsilon=1e-8):\n pass", "def gradient_step(self):\n n = 10 #Granularity of line search\n grad = self.gradient()\n W = project(self.W[-1] + grad)\n A = np.linspace(0., self.alpha, n+2)[1:-1]\n Objective = map(self, [(1. - a)*self.W[-1] + a*W for a in A])\n a = A[np.argmax(Objective)]\n W = (1. - a)*self.W[-1] + a*W\n obj = np.max(Objective)\n self.objective.append(obj)\n self.W.append(W)\n self.iterations += 1", "def fit(self, X, y):\n self.x_values = X\n self.y_values = y\n self.gradient_descent(self.coefficients, X, y)", "def gradient_step(self):\n n = 3 #Granularity of line search\n grad = self.gradient()\n #grad = grad/np.linalg.norm(grad, 2)\n W = project(self.W[-1] + grad)\n A = np.linspace(0., 1., n+2)[1:-1]\n Objective = map(self, [(1. - a)*self.W[-1] + a*W for a in A])\n a = A[np.argmax(Objective)]\n W = (1. - a)*self.W[-1] + a*W\n obj = np.max(Objective)\n self.objective.append(obj)\n self.W.append(W)\n self.iterations += 1", "def calculate_gradient(y, tx, w):\n\n\tret = tx.T.dot(sigmoid(np.dot(tx, w)) - y)\n\treturn ret", "def run_gradient_descent(data,theta,alpha,num_iters):\n population = data[:,0]\n prices = data[:,1]\n x = ones(shape=(len(population),2)) #add ones for theta0 \n x[:,1] = population\n x = transpose(x)\n error_history = zeros(shape=(num_iters,1))\n \n for i in range(num_iters):\n predictions = theta.dot(x)\n errors_x1 = (predictions - prices) * x[0,:]\n errors_x2 = (predictions - prices) * x[1,:]\n theta[0][0] = theta[0][0] - alpha*(1.0/len(population))*errors_x1.sum()\n theta[0][1] = theta[0][1] - alpha*(1.0/len(population))*errors_x2.sum()\n error_history[i,0] = calculate_cost(theta,data)\n \n return theta, error_history", "def grad_step1(self, x, y):\n t = 0.25 * (x @ self.w)\n u_prime = t - 0.5 * y\n if self.debug:\n print(f\"-> u_prime={u_prime}\\n\\n\\n\")\n return u_prime", "def least_squares_GD(y, tx, initial_w, max_iters, gamma):\r\n w_list = [initial_w]\r\n loss_list = []\r\n w = initial_w\r\n for n_iter in range(max_iters):\r\n gradient = compute_gradient(y,tx,w)\r\n loss = compute_loss_MSE(y,tx,w)\r\n w = w - gamma * gradient\r\n w_list.append(w)\r\n loss_list.append(loss)\r\n return w_list[-1], loss_list[-1]", "def auxminf2(x):\n # Sum over data points\n f = 0.0\n for m_ind in range(cfg.ntrain):\n f += auxminrho1(x,m_ind) + auxminrho2(x,m_ind) \n \n return f", "def gradient(cls, x):\n y = Sigmoid.apply(x)\n return np.multiply(y, 1 - y)", "def fit(self, x, y):\n # *** START CODE HERE ***\n num_examples = x.shape[0]\n num_features = x.shape[1]\n iteration = 1\n if self.theta == None:\n self.theta = np.zeros((num_features,))\n while iteration <= self.max_iter:\n h_theta = np.dot(x, self.theta)\n g_theta = self.sigmoid(h_theta)\n J_cost = -np.mean(y*np.log(g_theta) + (1 - y)*np.log(1 - g_theta))\n H = 1/num_examples*(np.dot(np.transpose(g_theta*(1-g_theta))*np.transpose(x), x))\n J_prime = - 1/num_examples*np.dot(np.transpose(y - g_theta), x)\n d_theta = - np.linalg.solve(H, J_prime)\n self.theta += d_theta\n if np.linalg.norm(d_theta, 1) < self.eps:\n break\n if self.verbose:\n print(\"Loss value: \", J_cost)\n iteration += 1\n # *** END CODE HERE ***", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma):\n # Define parameters to store w and loss\n w = initial_w\n loss = compute_gradient(y, tx, initial_w)\n for it, (yb, txb) in enumerate(random_batches(y, tx, max_iters)):\n # compute 1 SGD and the loss\n grad = compute_gradient(np.array([yb]), txb[np.newaxis, :], w)\n # update w\n w -= gamma * grad\n if it % (max_iters//10) == 0:\n print(compute_cost(y, tx, w))\n return w, compute_cost(y, tx, w)", "def gradient_descent(f, intial_guess, step_size = 0.01, max_iter = 10000, tol = 1e-12):\n\n x = np.array(intial_guess)\n for i in range(max_iter):\n x_vector = ad.create_vector('x', x)\n fn_at_x = f(x_vector)\n gradient = fn_at_x.getGradient(['x{}'.format(i) for i in range(1, len(x) + 1)])\n if np.sqrt(np.abs(gradient).sum()) < tol:\n break\n x = x - step_size * gradient\n return (x, i + 1)", "def GradientDescent(X, Y, alpha, iterations):\n\n\tn = X.shape[0]\n\tbeta = np.zeros((X.shape[1],1))\n\n\tfor i in range(1,iterations):\n\t\tbeta = beta - alpha*np.dot(np.transpose(X), np.dot(X, beta) - Y)/float(n)\n\t\t# risk = ((np.dot(X, beta) - Y)**2)/(2*float(n))\n\n\treturn beta", "def get_gradient(self, y, x, weight):\n y = np.reshape(y, (len(y),))\n return np.dot(x.T, sigmoid(np.dot(x, weight)) - y) \\\n + self.regularizer.get_gradient(weight)", "def grad_l2(w, X, y, **kwargs):\n return -1 * np.dot(X.T, y - np.dot(X, w)) / X.shape[0]", "def argminY( self ):\n min = 1e30\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] < min: min = p[1]\n return min", "def stochastic_gradient_descent(X, Y, epsilon=0.0001, l=1, step_size=0.01,\n max_steps=1000):\n beta = np.ones(X.shape[1])\n for s in range(max_steps):\n # TODO: Implement iterations.\n pass\n return beta", "def function_minimization():\n # Initialize a random value for our initial x\n x_v = tf.Variable([tf.random.normal([1])])\n print(\"Initializing x={}\".format(x_v.numpy()))\n\n learning_rate = 1e-2 # learning rate for SGD\n history = []\n # Define the target value\n x_f = 4\n\n # We will run SGD for a number of iterations. At each iteration, we compute the loss,\n # compute the derivative of the loss with respect to x, and perform the SGD update.\n for _ in range(500):\n with tf.GradientTape() as tape:\n # define the loss as described above\n loss = tf.pow((x_v - x_f), 2)\n # loss minimization using gradient tape\n grad = tape.gradient(loss, x_v) # compute the derivative of the loss with respect to x\n new_x = x_v - learning_rate * grad # sgd update\n x_v.assign(new_x) # update the value of x\n history.append(x_v.numpy()[0])\n\n # Plot the evolution of x as we optimize towards x_f!\n plt.plot(history)\n plt.plot([0, 500], [x_f, x_f])\n plt.legend(('Predicted', 'True'))\n plt.xlabel('Iteration')\n plt.ylabel('x value')\n plt.savefig(os.path.join(os.path.dirname(__file__), '../results/tutorial', 'minimization.png'))", "def sdot_asgd(y, nu, C, x_sample, W = None):\n # if W == None: W = np.zeros(y.shape[0]) else: assert(W.shape[0] == y.shape[0])\n W = np.zeros(y.shape[0]) # (500, 0)\n W_tmp = np.copy(W)\n #source_density = dp.get_density_by_name(name_source) # Density of source distribution\n h_save = np.empty_like(0)\n # Print iteration status\n niter = np.shape(x_sample)[0]\n for t in range(niter):\n if (t+1) % 10000 == 0:\n print(\"Iteration: {}\".format(t+1))\n \n # Sample from source distribution\n #x = source_density.sample_from(1).numpy()\n x = x_sample[t]\n\n # Gradient Step\n r = np.sum(np.square(x-y) , axis=1) - W_tmp # |x-y|^2 - W_tmp (900, )\n indx_min = np.argmin(r)\n grad = np.copy(nu)\n grad[indx_min] = grad[indx_min] - 1 # (900, )\n\n # Evaluate empirical Reward\n r2 = np.sum(np.square(x-y) , axis=1) - W # |x-y|^2 - W_tmp (900, )\n h = np.min(r2) + np.dot(W,nu) \n h_save = np.hstack((h_save,h))\n\n # Gradient Ascent \n W_tmp = W_tmp + C/np.sqrt(t+1) *grad # t+1 because it starts from 0\n W = t/(t+1) *W + 1/(t+1)*W_tmp # t+1 because it starts from 0\n # W = W / np.max(np.abs(W)) \n\n return W, h_save", "def minimum(x, y):\r\n # see decorator for function body\r", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma):\n w = initial_w\n loss = compute_loss(y, tx, w)\n batch_size=1\n\n for minibatch_y, minibatch_tx in batch_iter(y, tx, batch_size, max_iters):\n loss = compute_loss(y, tx, w) # avant: minibatch_y et minibatch_tx\n gradients = compute_gradient(minibatch_y, minibatch_tx, w)\n w = w - gamma * gradients\n\n return w, loss", "def coordinate_descent(X, y, lam, theta=None, maxit=10000, eta=1e-8):\n \n n, m = X.shape\n \n # initialize the coefficients\n if theta is None: theta = np.random.randn(m, 1)\n \n # compute squared columns\n v = np.diag(np.dot(X.T, X))\n \n # coordinate optimization\n i = 0\n chg = 1\n theta_old = np.empty((m, 1))\n Xtheta = np.dot(X, theta)\n while i < maxit and chg > eta:\n i += 1\n theta_old = theta.copy()\n for j in range(m): \n Xtheta -= theta[j]*np.atleast_2d(X[:,j]).T\n alpha = np.dot(X[:,j].T, y - Xtheta)\n if abs(alpha) > lam: \n theta[j] = np.sign(alpha) * (abs(alpha) - lam) / v[j]\n Xtheta += theta[j]*np.atleast_2d(X[:,j]).T\n else:\n theta[j] = 0\n chg = np.sum((theta - theta_old)**2)/np.sum(theta_old**2)\n \n return theta, i", "def stochasticGradientDescent(x,y,theta,alpha):\n m,n = np.shape(x)\n convergence = 0.000000001\n lastCost = 0\n cost = -1 \n recurseCount = 0\n while abs(lastCost - cost) > convergence: # rcurse until converge\n lastCost = cost\n hypothesis = np.dot(x,theta) \n for i in range(m):\n # alpha = 4.0 / (1.0 + i) + 0.01 \n loss = hypothesis[i] - y[i]\n # gradient = np.dot(x[i],loss)\n gradient = x[i,:].transpose() * loss \n theta = theta - alpha * gradient\n cost = np.sum((hypothesis-y)**2)/(2*m)\n recurseCount += 1\n return recurseCount,theta", "def fmin_gradient_descent(f, x0, fprime=None, learn_rate=1e-2, momentum=0,\n weight_decay=0, learn_rate_schedule=None, momentum_schedule=None,\n max_grad_norm=0, learn_rate_drop_iters=0, decrease_type='linear',\n adagrad_start_iter=0, max_iters=1000, iprint=1, f_info=None, i_exe=0,\n check_points=None, f_exe=None, verbose=True):\n f_and_fprime = f_and_fprime_decorator(f, fprime, weight_decay)\n\n opt_schedule = OptimizationSchedule(learn_rate, momentum,\n learn_rate_schedule=learn_rate_schedule,\n momentum_schedule=momentum_schedule,\n learn_rate_drop_iters=learn_rate_drop_iters,\n adagrad_start_iter=adagrad_start_iter, decrease_type=decrease_type)\n\n x = x0\n x_inc = x * 0\n x_adagrad_history = x * 0\n\n max_grad_norm = float(max_grad_norm)\n\n t_start = time.time()\n y, x_grad = f_and_fprime(x)\n grad_scale = np.linalg.norm(x_grad, ord=2)\n\n if f_exe is not None and (i_exe > 0 or (check_points is not None and 0 in check_points)):\n f_exe(0, x)\n\n if verbose:\n s = 'iter %5d, f=%.8f, |change|_max=%10s, |grad|=%.8f' % (0, y, 'N/A', grad_scale)\n if f_info is not None:\n s += ', ' + f_info(x)\n s += ', time %.2f' % (time.time() - t_start)\n print s\n\n t_start = time.time()\n for i in range(max_iters):\n i_iter = i + 1\n learn_rate, momentum = opt_schedule.get_learn_rate_and_momentum(i_iter)\n\n if adagrad_start_iter > 0 and i_iter >= adagrad_start_iter:\n if i_iter == adagrad_start_iter:\n lr_scale = np.abs(x_grad).mean()\n x_adagrad_history += x_grad**2\n learn_rate = learn_rate * lr_scale / (np.sqrt(x_adagrad_history) + _DIVISION_EPS)\n\n if max_grad_norm > 0 and grad_scale > max_grad_norm:\n x_grad *= max_grad_norm / grad_scale\n\n x_inc = momentum * x_inc - learn_rate * x_grad\n x += x_inc\n\n \"\"\"\n if i == 546:\n import ipdb\n ipdb.set_trace()\n \"\"\"\n\n y, x_grad = f_and_fprime(x)\n grad_scale = np.linalg.norm(x_grad, ord=2)\n\n if iprint > 0 and i_iter % iprint == 0:\n s = 'iter %5d, f=%.8f, |change|_max=%.8f, |grad|=%.8f' % (i_iter, y, np.abs(x_inc).max(), grad_scale)\n if f_info is not None:\n s += ', ' + f_info(x)\n s += ', time %.2f' % (time.time() - t_start)\n if verbose:\n print s\n t_start = time.time()\n\n if f_exe is not None:\n if check_points is not None:\n if (i+1) in check_points:\n f_exe(i+1, x)\n elif i_exe > 0 and (i+1) % i_exe == 0:\n f_exe(i+1, x)\n\n return x", "def run_linear_regression(data_x, data_y):\n iteration_s = 100\n alpha = 0.0001550\n\n no_features = data_x.shape[1]\n len_data = data_x.shape[0]\n print(\"no_feature :, len_data: \", no_features , len_data)\n #intinilize the the\n theta = np.zeros(no_features)\n #iterations how many time do\n for i in range(0,iteration_s):\n theta = run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta)\n error = sum_of_square_error(data_x, data_y, len_data, theta)\n print(\"at iteration %d - Error is %.5f \" % (i+1, error))\n print(\"theta shape\", theta.shape)\n return theta", "def linreg_stochastic_grad(X, y, alpha=.01):\n m = X.shape[0]\n n = X.shape[1]\n theta = np.zeros(n)\n for i in range(m):\n delta = alpha * (np.dot(theta.transpose(), X[i,:]) -y[i]) * X[i,:]\n theta = theta - delta\n return theta", "def ols_gradient(\n w: FloatTensor, x: FloatTensor, y: FloatTensor, _: float\n) -> FloatTensor:\n n, d = x.size()\n return (2 / n) * (x.t().matmul(x.matmul(w) - y))", "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w = w-gamma*grad\n return w, loss", "def calculate_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-np.reshape(y,(len(y),1)))", "def check_gradient(self, x, y):\n x = x.transpose()\n y = y.transpose()\n layers_copy = deepcopy(self.layers)\n epsilon = 10 ** -4\n a, layer = self.forward_propagation(x)\n delta = self.calculate_delta(a, y, layer)\n self.backpropagation(delta=delta, theta=layer.theta)\n previous_layer_output = x\n for layer in self.layers:\n theta_copy = deepcopy(layer.theta)\n real_theta_size = theta_copy.shape\n delta = layer.delta\n dc_dtheta = np.outer(previous_layer_output, delta).transpose()\n previous_layer_output = layer.a\n R, C = theta_copy.shape\n for i in range(R):\n for j in range(C):\n theta_plus = deepcopy(theta_copy)\n theta_plus[i, j] += epsilon\n layer.theta = theta_plus\n a_plus, l_plus = self.forward_propagation(x)\n err_plus = self.calculate_loss(a_plus, y)\n theta_minus = deepcopy(theta_copy)\n theta_minus[i, j] -= epsilon\n layer.theta = theta_minus\n a_minus, l_minus = self.forward_propagation(x)\n err_minus = self.calculate_loss(a_minus, y)\n limit = (err_plus - err_minus)/(2*epsilon)\n grad_diff = abs(dc_dtheta[i,j] - limit)\n assert grad_diff < 10 ** -6, f\"Diff {grad_diff} is too big.\"\n layer.theta = theta_copy", "def _optimize(self, X, y, W, steps):\n\n X = X.flatten(1)\n\n min_x, max_x = X.min(), X.max()\n len_x = max_x - min_x\n \n bestd = 1\n bestp = min_x\n minerr = W.sum()\n\n if len_x > 0.0:\n for p in np.arange(min_x, max_x, len_x/steps):\n for d in [-1, 1]:\n gy = np.ones((y.size))\n gy[X*d < p*d] = -1\n err = np.sum((gy != y)*W)\n if err < minerr:\n minerr = err\n bestd = d\n bestp = p\n\n return minerr, bestd, bestp", "def gradient_descent(x0,df,rate=0.1,max_iters=1000,min_step=1e-6,max_step=1e5,\n projection=None,trajectory=False,step_history=False,f=None,\n cost_history=False,feedback=False,plot_history=False):\n if feedback is True:\n print(\"gd.gradient_descent():\")\n if f is not None:\n assert callable(f)\n fx0 = f(x0)\n if feedback is True:\n print(f\" initial cost = {fx0:.2e}\")\n if projection is not None:\n assert callable(projection)\n project = True\n else:\n project = False\n if trajectory is True:\n xx = [x0.copy()]\n if step_history is True:\n steps = []\n if cost_history is True:\n assert callable(f)\n fx = [fx0]\n\n x = x0.copy()\n for i in range(max_iters):\n dx = -rate*df(x)\n if project is True:\n x0 = x.copy()\n x = projection(x0+dx)\n dx = x-x0\n else:\n x += dx\n if trajectory is True:\n xx.append(x.copy())\n if cost_history is True:\n fx += [f(x)]\n step_size = np.linalg.norm(dx)\n if step_history is True:\n steps += [step_size]\n if step_size < min_step or step_size > max_step:\n break\n\n results = dict()\n results['output'] = x\n if trajectory is True:\n results['trajectory'] = xx\n if cost_history is True:\n results['cost_history'] = fx\n if step_history is True:\n results['step_history'] = steps\n if plot_history is True:\n assert step_history is True or cost_history is True\n plt.figure()\n if step_history is True:\n plt.semilogy(steps,label='step size')\n if cost_history is True:\n plt.semilogy(fx,label='cost')\n plt.xlabel('iteration number')\n plt.title('Gradient Descent')\n plt.legend()\n results['figure'] = plt\n plt.show(block=False)\n \n if feedback is True:\n if f is not None:\n print(f\" final cost = {f(x):.2e}\")\n \n return results", "def grad(self, A, y, x):\n z = y * A.dot(x) # decision value for each observation\n grad_x = -1*A[z < 1].T.dot(y[z < 1])\n # Gradient normalized by the num obs\n return grad_x / y.size", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma, loss_function=mse, gradient=mse_grad):\n w = initial_w\n for iter in range(max_iters):\n # randomly select datapoint\n id = np.random.randint(y.shape[0])\n sample_y, sample_x = y[id], tx[id]\n # compute gradient\n grad = gradient(y, tx, w)\n # update w\n w = w - gamma * grad\n loss = loss_function(y, tx, w)\n return w, loss", "def gradient(self, x):\n pass", "def minimize(self, func, grad, x0, args=()):\n learning_rate = self._learning_rate\n best_x = x = x0\n best_value = func(x, *args)\n iters_without_improve = 0\n\n for iteration in range(self._max_iterations):\n gradient = grad(x, *args)\n\n # If absolute values of all partial derivatives are equal to 0 with specified accuracy, then parameters are\n # close enough to the minimum and there is no need to continue gradient descent.\n if np.abs(gradient).max() <= self._accuracy:\n break\n\n x = x - learning_rate * gradient\n\n # If new values of x haven't lead to decrease of the function value for the specified number of iteration,\n # the x is reverted to its previous best value and the learning rate is reduced\n value = func(x, *args)\n if value > best_value:\n iters_without_improve += 1\n if iters_without_improve >= self._lr_reduce_patience:\n x = best_x\n learning_rate *= self._lr_reduce_factor\n else:\n iters_without_improve = 0\n best_value = value\n best_x = x\n\n return best_x", "def batchGradientDescent(x,y,theta,alpha):\n m,n = np.shape(x)\n xTran = x.transpose()\n convergence = 0.000000001\n lastCost = 0\n cost = -1 \n recurseCount = 0\n while abs(lastCost - cost) > convergence: # rcurse until converge\n lastCost = cost\n hypothesis = np.dot(x,theta)\n loss = hypothesis - y\n cost = np.sum(loss**2)/(2*m)\n gradient = np.dot(xTran,loss)/m\n theta = theta - alpha*gradient\n recurseCount += 1\n return recurseCount,theta", "def logit_cost_grad(self, theta, X, y):\n\n grad = np.zeros(len(theta))\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n # sig = np.subtract(sig, y)\n sig = sig - y\n grad = np.dot(X.T, sig) + 2 * self.params['lamb'] * self.regularizer[1](self.weights)\n ### END YOUR CODE\n\n return grad", "def run_gradient_descent_iteration(X, Y, theta, alpha, lambda_factor, temp_parameter):\n delta = sparse.coo_matrix(theta.shape).toarray()\n\n h = compute_probabilities(X, theta, temp_parameter)\n\n for j in range(delta.shape[0]):\n y = Y\n y = np.where(y != j, 0, 1)\n p = y - h[j]\n\n x = X.T * p\n x = x.T\n x = x.sum(axis=0)\n\n grad = -x / (temp_parameter * X.shape[0]) + lambda_factor * theta[j]\n\n delta[j] += grad\n\n theta -= alpha * delta\n\n return theta", "def compute_cost_gradient1(x, y0, W, b):\n # compute cost\n A = x @ W + b\n y = sigmoid(A)\n if y0 is None:\n return y\n cost = -1 * np.sum(y0 * np.log(y) + (1 - y0) * np.log(1 - y))\n # compute gradients\n dy = -(y0 * (y ** -1) - (1 - y0) * ((1 - y) ** -1))\n dA = dy * (y * (1 - y))\n dW = x.T @ dA\n db = np.sum(dA)\n return cost, dW, db", "def train_step(self, x, y):\n\n with tf.GradientTape() as tape:\n y_pred = self.model(x)\n loss = self.loss_object(y, y_pred)\n gradients = tape.gradient(loss, self.model.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))\n\n return loss", "def gradient_descent_step(self, x, y, learning_rate):\n # compute derivative of loss wrt Z\n dZ = self.derivative_loss(y, self.predict(x))\n dW = np.dot(dZ, x)\n # subtract average derivative from weights\n self.w -= learning_rate * 1.0/dW.shape[0] * dW\n if self.fit_b:\n self.b -= learning_rate * (1.0/x.shape[0] * np.sum(dZ))", "def regress_origin(x, y):\r\n x, y = array(x, 'Float64'), array(y, 'Float64')\r\n return sum(x * y) / sum(x * x), 0", "def d_func(x, y):\n return np.array((2.0 * (x - 1) - 400.0 * x * (y - x**2), 200.0 * (y - x**2)))", "def zerofcn2min(params, xvalues, yvalues):\n thetaS = params['thetaS']\n a = params['a']\n ds = params['ds']\n d2thetaS = (2*thetaS)/(1+2*thetaS*a)\n p2 = d2thetaS*numpy.ones(len(xvalues))\n return p2 - yvalues", "def gradient_descent(X, Y, iterations, alpha, l = 0):\n \n # initialize B0, B1, ..., Bp\n betas = np.array([0.0]*(len(X[0])+1))\n \n # initialize list of cost vs iterations; should see a gradual descent\n costs = np.array([0.0]*iterations)\n \n # number of observations\n m = len(X)\n \n for i in range(iterations):\n sumterms = 1.0/m * ([estimation(xvec,betas) for xvec in X] - Y)\n errors = np.array([0.0]*len(betas))\n errors[0] = sum(sumterms) # error term for B0 has no multiplier\n for k in range(1,len(betas)):\n errors[k] = np.dot(sumterms, [row[k-1] for row in X]) + l/m*betas[k]\n \n betas = betas - alpha * errors\n costs[i] = cost(X, Y, betas, l)\n \n return betas, costs", "def gradientDescent(self,X, y, theta): \n # number of instances\n m = len(y)\n J_history = np.zeros((self.NUM_ITERS,1))\n for i in range(self.NUM_ITERS):\n h = self.sigmoid(X@theta)\n grad = 1 / m * X.T @ (h - y)\n theta = theta - self.ALPHA * grad \n J_history[i] = self.costFunction(theta, X, y)\n \n \n return theta, J_history", "def miniBatchStochasticGD(self, x, y, batchSize, epochs):\n print(\"Training using stochastic minibatch gradient descent\")\n epoch = 0\n fullSetSize = x.shape[0]\n #output training progress ten times in run\n outputChunk = int ( epochs / 10 )\n\n while epoch <= epochs:\n \n #output progress?\n if epoch % outputChunk is 0:\n J = self.costFunction(x,y)\n print(\"Epoch=\", epoch, \"J=\", J)\n \n counter = 0\n #shuffle training data once per epoch\n shuffled_x, shuffled_y = self.shuffleData(x, y)\n \n while counter < fullSetSize:\n #take training batches from shuffled data\n x_batch, y_batch = self.getNextBatch(shuffled_x, shuffled_y, \\\n batchSize, counter)\n #get analytic gradients using minibatch \n partial_J_w_ih, partial_J_w_ho, partial_J_b_h, partial_J_b_o = \\\n self.deriv_costFunction( x_batch, y_batch )\n #take a GD step\n #To-do - implement variable learning rate\n self.w_ih -= partial_J_w_ih\n self.w_ho -= partial_J_w_ho\n self.b_h -= partial_J_b_h\n self.b_o -= partial_J_b_o\n counter += batchSize\n\n epoch += 1", "def compute_gradient_lasso(y, tx, w, lambda_):\n e = y - tx.dot(w)\n subgrad = lambda_ * np.sign(w)\n\n return -tx.T.dot(e)/len(e) + subgrad", "def gradient(self, x):\n return 0.0", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma):\n\tif len(initial_w.shape)==2:\n\t\tinitial_w = initial_w.reshape((max(initial_w.shape)))\n\tif len(y.shape)==2:\n\t\ty = y.reshape((max(y.shape)))\n\n\tbatch_size = 5000\n\tw = initial_w\n\n\tfor n_iter in range(max_iters):\n\t\ty_, tx_ = batch_iter(y, tx, batch_size).__next__()\n\t\tgradient = compute_gradient(y_, tx_, w)\n\t\tw = w - gamma * gradient\n\t\tif n_iter%3==0:\n\t\t\tgamma = gamma/1.2\n\n\tloss = compute_loss(y, tx, w)\n\n\treturn w, loss", "def grad_loss_wrt_w(self, x, y):\n (N, D) = x.shape\n k1 = np.matmul(x, np.transpose(self.w)) + self.b\n y1 = y.reshape((N,1))\n dr = (1 + np.exp(1 * y1 * k1))\n nr = -y1 * x\n c1 = nr/dr\n #(N1,D1) = self.w.shape\n #c2 = np.zeros((N1,D1))\n #for i in range(N):\n # c2[i-1] = c1[i-1,:] + c1[i,:]\n #l_w = c2/N\n l_w1 = np.mean(c1,axis=0)\n return l_w1\n\n\n #raise NotImplementedError", "def step_maxL_gradient_descent(y, tx, w, gamma):\n loss=loss_maxL(y, tx, w)\n grad=calculate_maxL_gradient(y,tx,w)\n # update w by gradient\n w=w-gamma*grad\n return w, loss", "def gradientDescent(f, df, x, niter=10):\n\n points = []\n\n for i in xrange(niter):\n point = -dfx\n slope = np.dot(point,-point)\n \n #calculate a\n a = backtracking(f,slope,x,point)\n \n\n #update the search point\n x_k = x + a*p\n points.append(x_k)\n x = x_k\n\n return points", "def gradient(data_x, data_y, parameters):\n return data_x.T @ (data_x @ parameters - data_y) / data_x.shape[0]", "def solve(self):\n max_iter = 1000\n iter_count = 0\n yhat = self.predict()\n loss = self.cross_entropy(yhat)\n gradloss = self.cross_entropy_gradient(yhat)\n while la.norm(gradloss) > 1e-6 and iter_count < max_iter:\n alpha = 1.0\n slope = la.norm(gradloss)**2\n beta_new = self.beta + alpha * gradloss\n yhat = self.predict(beta=beta_new)\n loss_new = self.cross_entropy(yhat)\n while loss_new < loss + 1e-4 * alpha * slope:\n alpha = alpha / 2\n beta_new = self.beta + alpha * gradloss\n yhat = self.predict(beta=beta_new)\n loss_new = self.cross_entropy(yhat)\n self.beta = beta_new\n loss = loss_new\n gradloss = self.cross_entropy_gradient(yhat)\n iter_count += 1", "def compute_gradient(self): # TODO: try to change to square loss since it's hessian is easier to obtain\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C)", "def least_squares_GD(y, tx, initial_w, max_iters, gamma):\n w = initial_w.copy()\n ws = [w]\n loss = compute_loss_LS(y, tx, w)\n losses = [loss]\n for n_iter in range(max_iters):\n gradient = compute_gradient_LS(y, tx, w)\n w -= gamma * gradient\n loss = compute_loss_LS(y, tx, w)\n ws.append(w)\n losses.append(loss)\n# print(\"Gradient Descent({bi}/{ti}): loss={l}, w0={w0}, w1={w1}\".format(\n# bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]))\n\n return losses[-1], ws[-1]", "def gradient(cls, y, y_target):\n return (y - y_target) / MEE.euclidean_distance(y, y_target)", "def costFunction(self,theta, X, y): \n m = len(y)\n h = self.sigmoid(X@theta)\n J = 1 / m * (- y.T @ self.log(h) - (1-y).T @ self.log(1-h)) \n # grad = 1/ m * X.T @ (h - y)\n return J", "def fit(self, x, y):\n def initiate_theta(dim):\n self.theta = np.zeros(dim)\n # print('self.theta initiated is {}'.format(self.theta))\n \n def implement_sigmoid(x):\n if self.theta is None:\n initiate_theta(x.shape[1])\n z = np.matmul(np.transpose(self.theta), np.transpose(x))\n return 1/(np.ones(x.shape[0]) + np.exp(-z))\n \n def implement_partial_loss(x, y):\n return -np.matmul(np.transpose(y - implement_sigmoid(x)), x)/x.shape[0]\n \n def implement_transposed_hess(x):\n sigmoid_hadamard = implement_sigmoid(x) * (np.ones(x.shape[0]) - implement_sigmoid(x))\n hess2 = np.diag(sigmoid_hadamard)\n hess = np.matmul(hess2,x)\n hess = np.matmul(np.transpose(x),hess)/x.shape[0]\n hess_inverse = np.linalg.inv(hess)\n return hess_inverse\n \n def train(x, y):\n count = 0\n if self.theta is None:\n initiate_theta(x.shape[1])\n while count < self.max_iter:\n if self.verbose:\n loss_y1 = np.matmul(np.transpose(y), np.log(implement_sigmoid(x)))\n loss_y0 = np.matmul(np.transpose(np.ones(x.shape[0]) - y), np.log(np.ones(x.shape[0]) - implement_sigmoid(x)))\n loss = -(loss_y1 + loss_y0 )/x.shape[0]\n print('Average empirical loss for step {} is {}'.format(count, loss))\n delta = np.matmul(implement_transposed_hess(x), implement_partial_loss(x, y))\n new_theta = self.theta - delta * self.step_size\n delta_theta = np.linalg.norm(new_theta - self.theta)\n # print('delta is {}'.format(delta_theta))\n if delta_theta < self.eps:\n return new_theta\n else:\n self.theta = new_theta\n count += 1\n return self.theta\n \n return train(x, y)", "def calc_cost(y, x, theta_1, theta_0):\n h = theta_1 * x + theta_0\n d = h - y\n cost = np.dot(d.T, d) / (2*x.shape[0])\n return cost.flat[0]", "def gradient_descent(X, Y, max_iter=1000, eta=0.1, mu=0.01):\n Y_onehot = onehot_encoder.fit_transform(Y.reshape(-1,1))\n W = np.zeros((X.shape[1], Y_onehot.shape[1]))\n step = 0\n step_lst = []\n loss_lst = []\n W_lst = []\n\n while step < max_iter:\n step += 1\n W -= eta * gradient(X, Y_onehot, W, mu)\n step_lst.append(step)\n W_lst.append(W)\n loss_lst.append(loss(X, Y_onehot, W))\n\n df = pd.DataFrame({\n 'step': step_lst,\n 'loss': loss_lst\n })\n return df, W" ]
[ "0.6790553", "0.66664946", "0.6630496", "0.64925885", "0.6476258", "0.64171803", "0.6405911", "0.6358458", "0.63555044", "0.6333278", "0.6325413", "0.62547517", "0.62520844", "0.624353", "0.6242326", "0.62349296", "0.62339544", "0.622683", "0.62201583", "0.61891985", "0.6185743", "0.6172043", "0.6171753", "0.6160388", "0.61512274", "0.613648", "0.60999745", "0.60893935", "0.6082421", "0.6082087", "0.60778207", "0.6072613", "0.60725206", "0.6056564", "0.6048478", "0.6045875", "0.6039787", "0.6039761", "0.60221905", "0.6017293", "0.6011788", "0.60023785", "0.5996162", "0.5991527", "0.5984391", "0.5980123", "0.596552", "0.5963548", "0.59625554", "0.5957342", "0.595572", "0.59453475", "0.593878", "0.59387666", "0.5933735", "0.5923884", "0.5918355", "0.59178704", "0.59163797", "0.5912935", "0.5911859", "0.58889806", "0.58882624", "0.58832085", "0.587784", "0.5876219", "0.58709687", "0.5865807", "0.5853473", "0.5853279", "0.58530426", "0.5852205", "0.5849844", "0.58392316", "0.58346194", "0.58343416", "0.5832069", "0.58233243", "0.5821222", "0.5816459", "0.581414", "0.5808397", "0.58054656", "0.5803576", "0.57923913", "0.5792262", "0.5792061", "0.5791541", "0.57804495", "0.57759464", "0.57752687", "0.5774953", "0.57735014", "0.57700014", "0.5769381", "0.5767676", "0.57651246", "0.57604533", "0.5759988", "0.57516426" ]
0.5771183
93
this is the gradient of x^2 + y^2
def df(x_i): return [2 * x_ij for x_ij in x_i]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gradient(self, x):\n pass", "def compute_gradient (w, x, y):\n (n,d) = x.shape\n g = np.zeros(d)\n for i in range(0,d):\n g[i] = (w*x-y)*np.transpose(x[i])\n g += 0.5*w\n return g", "def gradient(self, x):\n return 0.0", "def calculate_gradient(y, tx, w): \n return tx.T@(sigmoid(tx@w)-y)", "def gradient(self, x, Y):\n if self.is_sparse:\n x = x.todense()\n Y = Y.todense()\n assert(len(shape(x))==1)\n assert(len(shape(Y))==2)\n assert(len(x)==shape(Y)[1])\n \n x_2d=reshape(x, (1, len(x)))\n k = self.kernel(x_2d, Y)\n differences = Y - x\n G = (1.0 / self.width ** 2) * (k.T * differences)\n return G", "def gradient(cls, x):\n y = Sigmoid.apply(x)\n return np.multiply(y, 1 - y)", "def gradient(cls, x):\n return 1 - TanH.apply(x) ** 2", "def compute_gradient(theta, X, y):\n m = X.shape[0]\n grad_theta = np.dot(X.transpose(), (np.dot(X, theta) - y)) / m\n #print theta, grad_theta, objective_function(theta, X, y)\n return grad_theta", "def grad_l2(w, X, y, **kwargs):\n return -1 * np.dot(X.T, y - np.dot(X, w)) / X.shape[0]", "def ols_gradient(\n w: FloatTensor, x: FloatTensor, y: FloatTensor, _: float\n) -> FloatTensor:\n n, d = x.size()\n return (2 / n) * (x.t().matmul(x.matmul(w) - y))", "def gradient(cls, y, y_target):\n return y - y_target", "def calculate_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-np.reshape(y,(len(y),1)))", "def get_gradient(self, y, x, weight):\n y = np.reshape(y, (len(y),))\n return np.dot(x.T, sigmoid(np.dot(x, weight)) - y) \\\n + self.regularizer.get_gradient(weight)", "def gradient(theta, x, y):\n m = len(y)\n n = len(theta)\n z = theta.dot(x.T)\n grad = np.zeros(n)\n for i in xrange(m):\n grad += (g(z[i]) - y[i]) * x[i]\n return 1. / m * grad", "def calculate_gradient(y, tx, w):\n\n\tret = tx.T.dot(sigmoid(np.dot(tx, w)) - y)\n\treturn ret", "def compute_gradient(self, X, y, weights):\n sigmoid = self.sigmoid(np.dot(X, weights))\n return np.dot(X.T, y - sigmoid)", "def gradientFunction(theta, X, y):\n y = y[:, 0]\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad /= m\n return grad", "def gradient(cls, y, y_target):\n return (y - y_target) / MEE.euclidean_distance(y, y_target)", "def approx_grad(theta, X, y):\n grad_a = np.array([(cost(theta + e, X, y) - cost(theta - e, X, y)) / (2 * 1e-5)\n for e in np.identity(len(theta)) * 1e-5])\n return grad_a", "def gradient(self, x):\n u = np.asarray([x[0]])\n C = self.C_func(u)\n dC = self.dC_func(u, order=1)\n P = self.P\n numerator = np.sum((C - P) * dC, axis=0)\n denominator = np.sum(np.sum((C - P) ** 2, axis=0) ** (1 / 2))\n if np.abs(denominator) > 0:\n gradient = numerator/denominator\n else:\n gradient = np.asarray(0)[np.newaxis]\n return gradient", "def gradient(self, x):\n g = self._grad(\n time_series=self.observed_data,\n a=x[0],\n b=x[1],\n c=x[2],\n sigma=self.sigma\n )\n return g", "def gradient(self, theta):\n pass", "def gradient(self, theta):\n pass", "def _calc_gradients(self, X, y, y_hat):\n # calculate gradient of weight and bias\n grad_b = 2 * np.mean(y_hat - y)\n grad_W = 2 * np.mean(np.matmul((y_hat - y), X))\n return grad_W, grad_b", "def least_squares_gradient(y, tx, w): \n e = y - tx.dot(w)\n grad = -tx.T.dot(e) / len(e)\n return grad, e", "def gradient(self):\n result = np.zeros(len(self.variables))\n result[self.bivariateGradInd] = (self.shape-1)/self.variable - self.rate\n return result", "def computeGradient(self, X, y, w):\n n = len(X)\n if self.loss == 'linear':\n gradient = -2 * np.dot(X.T, (y - X.dot(w)))\n elif self.loss == 'logistic':\n g = self.logistic(X, w)\n gradient = -2 * np.dot(X.T, (y - g) * g * (1 - g))\n elif self.loss == 'perceptron':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = ((np.dot(X, w) >= 0).astype(int) != y)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = -np.dot(usedX.T, usedY)\n elif self.loss == 'svm':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = (np.dot(X, w) * newY < 1)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = 2 * w - self.C * np.dot(usedX.T, usedY)\n gradient[0] = gradient[0] + 2 * w[0]\n\n return gradient", "def compute_gradient(self, function, arguments):", "def gradient(theta, X, y, learning_rate):\n m = len(y)\n\n theta = theta.reshape((-1,1))\n grad = np.zeros(theta.shape)\n h = sigmoid(np.dot(X, theta)) \n \n grad = np.dot((h-y).T, X)/m\n grad = grad.T\n grad[1:] += (learning_rate/m)*theta[1:]\n return grad", "def gradient(data_x, data_y, parameters):\n return data_x.T @ (data_x @ parameters - data_y) / data_x.shape[0]", "def compute_gradient(y, tx, w):\n\tN = y.shape[0]\n\te = y - np.dot(tx, w)\n\n\tgradLw = -1/N * np.dot(tx.T, e)\n\treturn gradLw", "def gradient(x1: int, x2: int) -> int:\n\n if x1 == x2:\n return 0\n\n dx = x1 - x2\n return int(dx / abs(dx))", "def compute_gradient(self): # TODO: try to change to square loss since it's hessian is easier to obtain\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C)", "def compute_gradient(self):\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C)", "def compute_gradient(c, x, y):\n\n vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]])\n rows, cols = c.shape\n\n result = np.empty_like(x)\n\n for i in nb.prange(rows):\n for j in nb.prange(cols):\n c_remainder = c[i, j] % 4\n gradient_co = vectors[c_remainder]\n result[i, j] = gradient_co[0] * x[i, j] + gradient_co[1] * y[i, j]\n\n return result", "def gradient(cls, x):\n return np.minimum(-2.5 < x, x < 2.5) * 0.2", "def calc_gradient(self, W, X, y, reg):\n\n N = X.shape[0]\n grad_W = np.zeros_like(W)\n I = np.ones((1,10))\n score = np.dot(X, W) # (N, C)\n out = np.exp(score-np.dot(np.max(score, axis=1, keepdims=True ),I))\n #print(\"out\", out)\n out /= np.sum(out, axis=1, keepdims=True) # (N, C)\n \n dout = np.copy(out) # (N, C)\n dout[np.arange(N), y] -= 1\n grad_W = np.dot(X.T, dout) # (D, C)\n grad_W /= N\n #grad_W += reg * W\n \n return grad_W", "def grad(self, w):\n l1_grad = self.r * np.sign(w)\n l2_grad = np.asarray(1 - self.r) * w \n\n gradient_penalty = self.alpha * (l1_grad + l2_grad)\n\n # Insert 0 for bias term.\n return np.insert(gradient_penalty, 0, 0, axis=0)", "def grad(self,w):\n # Calculate the vector -sigma(-y_i * x_i.w)\n s = -np.array([sigmoid(-yi * np.dot(xi,w)) for xi,yi in zip(self.x,self.y)])\n # Multiply it by xy\n g = np.array([np.dot(xyj,s) for xyj in self.xy.transpose()])\n # Add regularisation\n g += self.alpha*w\n return g\n #g = np.array([self.grad_j(w,j) for j in xrange(len(w))])", "def compute_grad(beta, lambdat, X, y):\n return -2/len(y)*(np.maximum(0, 1-(\n (y[:, np.newaxis]*X).dot(beta)))).dot(\n y[:, np.newaxis]*X) + 2 * lambdat * beta", "def grad_loss_wrt_w(self, x, y):\n (N, D) = x.shape\n k1 = np.matmul(x, np.transpose(self.w)) + self.b\n y1 = y.reshape((N,1))\n dr = (1 + np.exp(1 * y1 * k1))\n nr = -y1 * x\n c1 = nr/dr\n #(N1,D1) = self.w.shape\n #c2 = np.zeros((N1,D1))\n #for i in range(N):\n # c2[i-1] = c1[i-1,:] + c1[i,:]\n #l_w = c2/N\n l_w1 = np.mean(c1,axis=0)\n return l_w1\n\n\n #raise NotImplementedError", "def gradient(self, x_in):\r\n\r\n return 1 - torch.pow(self.forward(x_in), 2)", "def grad_loss_wrt_b(self, x, y):\n (N,D) = x.shape\n k1 = np.matmul(x,np.transpose(self.w)) + self.b\n y1=y.reshape((N,1))\n dr = (1+np.exp(1*y1*k1))\n nr = -y1\n c2=0\n c1 = nr/dr\n for i in range(N):\n c2 +=c1[i][0]\n l_b = c2 / N\n #b2 = np.copy(self.b)\n #b1 = np.zeros((10,1))\n #b1[0] = b2\n #for i in range(1,10):\n #b1[i] = b1[i-1] - self.lr*l_b\n\n\n\n return l_b\n\n\n #raise NotImplementedError", "def logit_cost_grad(self, theta, X, y):\n\n grad = np.zeros(len(theta))\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n # sig = np.subtract(sig, y)\n sig = sig - y\n grad = np.dot(X.T, sig) + 2 * self.params['lamb'] * self.regularizer[1](self.weights)\n ### END YOUR CODE\n\n return grad", "def calculate_gradients(self, X, Y):\n Z1 = np.matmul(self.weights[0], X) + self.biases[0] #(30, m)\n A1 = sigmoid(Z1) #(30, m)\n Z2 = np.matmul(self.weights[1], A1) + self.biases[1] #(10, m)\n A2 = sigmoid(Z2) #(10, m)\n # number of examples\n m = X.shape[1]\n dZ2 = A2 - Y #(784, m)\n dW2 = (1 / m) * np.matmul(dZ2, A1.T) #(10, 30)\n db2 = (1 / m) * np.sum(dZ2, axis = 1, keepdims = True) #(10, 1)\n dZ1 = np.multiply(np.matmul(self.weights[1].T, dZ2), sigmoid_deri(Z1)) #(30, m)\n dW1 = (1 / m) * np.matmul(dZ1, X.T) #(30, 784)\n db1 = (1 / m) * np.sum(dZ1, axis = 1, keepdims = True) #(30, 1)\n \n grads = {\"dW1\":dW1, \"db1\":db1, \"dW2\":dW2, \"db2\":db2} \n return grads", "def check_gradient(self, x, y):\n x = x.transpose()\n y = y.transpose()\n layers_copy = deepcopy(self.layers)\n epsilon = 10 ** -4\n a, layer = self.forward_propagation(x)\n delta = self.calculate_delta(a, y, layer)\n self.backpropagation(delta=delta, theta=layer.theta)\n previous_layer_output = x\n for layer in self.layers:\n theta_copy = deepcopy(layer.theta)\n real_theta_size = theta_copy.shape\n delta = layer.delta\n dc_dtheta = np.outer(previous_layer_output, delta).transpose()\n previous_layer_output = layer.a\n R, C = theta_copy.shape\n for i in range(R):\n for j in range(C):\n theta_plus = deepcopy(theta_copy)\n theta_plus[i, j] += epsilon\n layer.theta = theta_plus\n a_plus, l_plus = self.forward_propagation(x)\n err_plus = self.calculate_loss(a_plus, y)\n theta_minus = deepcopy(theta_copy)\n theta_minus[i, j] -= epsilon\n layer.theta = theta_minus\n a_minus, l_minus = self.forward_propagation(x)\n err_minus = self.calculate_loss(a_minus, y)\n limit = (err_plus - err_minus)/(2*epsilon)\n grad_diff = abs(dc_dtheta[i,j] - limit)\n assert grad_diff < 10 ** -6, f\"Diff {grad_diff} is too big.\"\n layer.theta = theta_copy", "def gradient(self,x=None,y=None,save=True):\n\n\t\tif (x is not None) and (y is not None):\n\n\t\t\tassert x.shape==y.shape,\"x and y must have the same shape!\"\n\n\t\t\t#x coordinates\n\t\t\tif type(x)==u.quantity.Quantity:\n\t\t\t\n\t\t\t\tassert x.unit.physical_type==self.side_angle.unit.physical_type\n\t\t\t\tj = np.mod(((x / self.resolution).decompose().value).astype(np.int32),self.data.shape[1])\n\n\t\t\telse:\n\n\t\t\t\tj = np.mod((x / self.resolution.to(u.rad).value).astype(np.int32),self.data.shape[1])\t\n\n\t\t\t#y coordinates\n\t\t\tif type(y)==u.quantity.Quantity:\n\t\t\t\n\t\t\t\tassert y.unit.physical_type==self.side_angle.unit.physical_type\n\t\t\t\ti = np.mod(((y / self.resolution).decompose().value).astype(np.int32),self.data.shape[0])\n\n\t\t\telse:\n\n\t\t\t\ti = np.mod((y / self.resolution.to(u.rad).value).astype(np.int32),self.data.shape[0])\n\n\t\telse:\n\t\t\ti = None\n\t\t\tj = None\n\t\t\n\t\t#Call the C backend\n\t\tgradient_x,gradient_y = _topology.gradient(self.data,j,i)\n\n\t\t#Return the gradients\n\t\tif (x is not None) and (y is not None):\n\n\t\t\treturn gradient_x.reshape(x.shape),gradient_y.reshape(x.shape)\n\n\t\telse:\n\t\t\n\t\t\tif save:\n\t\t\t\tself.gradient_x = gradient_x\n\t\t\t\tself.gradient_y = gradient_y\n\t\t\n\t\t\treturn gradient_x,gradient_y", "def gradient(f,h,X):\n # X = list(X)# flip to a list from tuple so we can modify elements\n fx = f(*X) # only need this once\n dX = []\n for i in range(len(X)):\n # Make a vector of Value(X_i, [0 ... 1 ... 0]) with 1 in ith position\n X[i] += h # tweak in dimension i\n y = f(*X)\n X[i] -=h # undo the tweak for next round\n dx = (y - fx)/h\n dX.append(dx)\n return dX", "def loss_gradient(self, x, y):\n x_preproc = self._apply_processing(x)\n x_defences, y_defences = self._apply_defences(x_preproc, y, fit=False)\n\n # Adjust the shape of y for loss functions that do not take labels in one-hot encoding\n if self._reduce_labels:\n y_defences = np.argmax(y_defences, axis=1)\n\n grads = self._loss_grads([x_defences, y_defences])[0]\n grads = self._apply_defences_gradient(x_preproc, grads)\n grads = self._apply_processing_gradient(grads)\n assert grads.shape == x_preproc.shape\n\n return grads", "def grad(self, A, y, x):\n z = y - A.dot(x) # Error for each observation\n grad_x = -1 * A.T.dot(np.sign(z))\n # Gradient normalized by the num obs\n return grad_x / y.size", "def gradient(self, X, V, W, Y):\n one, d_plus_one = X.shape\n K, H_plus_one = W.shape\n d = d_plus_one - 1\n H = H_plus_one - 1\n\n Z, Yhat = self.forward(X, V, W)\n assert one == 1\n x = X\n y = Y\n z = Z.ravel()\n yhat = Yhat.ravel()\n\n # Update W\n # grad__L__yhat = (yhat - y) / np.clip(yhat * (1 - yhat), EPSILON, inf)\n # grad__L__z[:] = 0.0\n # for k in range(K):\n # grad__yhat_k__W_k = z * yhat[k] * (1 - yhat[k])\n # # Last element corresponds to constant offset 1 appended to z\n # # vector; it does not change / has no derivative.\n # grad__yhat_k__z = W[k, :-1] * yhat[k] * (1 - yhat[k])\n # grad__L__z += grad__L__yhat[k] * grad__yhat_k__z\n # W[k, :] -= self.learning_rate * grad__L__yhat[k] * grad__yhat_k__W_k\n grad__L__z = (W.T * (yhat - y)).sum(axis=1)\n zz = z.reshape((1, H + 1)).repeat(K, 0)\n grad__L__W = diag(yhat - y) @ zz\n\n # Update V\n # for h in range(H):\n # grad__z_h__V_h = x * (1 - z[h] ** 2)\n # grad__L__V_h = grad__L__z[h] * grad__z_h__V_h\n # V[h, :] -= self.learning_rate * grad__L__V_h\n xx = x.reshape((1, d + 1)).repeat(H + 1, 0)\n grad__L__V = diag((1 - z ** 2) * grad__L__z) @ xx\n\n return grad__L__V, grad__L__W", "def _get_gradient_delta(self, Xi, yi):\n\n z = sum(wi * xij for wi, xij in zip(self.weights, Xi)) + self.bias\n y_hat = 1 / (1 + exp(-z))\n bias_grad_delta = yi - y_hat\n weights_grad_delta = [bias_grad_delta * Xij for Xij in Xi]\n return bias_grad_delta, weights_grad_delta", "def gradient(self, inputs):\n raise NotImplementedError", "def gradient(x, y, theta):\n if x.ndim == 1:\n x = x[:, np.newaxis]\n if y.ndim == 2 and y.shape[1] == 1:\n y = y.flatten()\n if theta.ndim == 2 and theta.shape[1] == 1:\n theta = theta.flatten()\n\n if (x.size == 0 or y.size == 0 or theta.size == 0\n or x.ndim != 2 or y.ndim != 1 or theta.ndim != 1\n or x.shape[0] != y.shape[0] or x.shape[1] + 1 != theta.shape[0]):\n return None\n\n x_padded = np.c_[np.ones(x.shape[0]), x]\n\n return x_padded.T.dot(x_padded.dot(theta) - y) / y.shape[0]", "def get_grad(self, X):\n raise NotImplementedError", "def gradient(self, theta):\n a = -(6 * self.scale ** 2)\n b = 3 * self.scale ** 2 + np.exp(2 * theta)\n b *= np.log(3 * self.scale ** 2 * np.exp(-2 * theta) + 1)\n return a / b", "def compute_gradient(self, grad=None):\n if grad is None:\n grad = backend.ones_like(self.output_value)\n x, y = [node.output_value for node in self.input_nodes]\n\n dx = backend.dot(grad, backend.transpose(y))\n dy = backend.dot(backend.transpose(x), grad)\n\n return [dx, dy]", "def gradient(self):\n return NotImplemented", "def _get_gradient(self, X: array, y: array):\n\n # Use predict_prob method if this is a classifier.\n if hasattr(self, \"predict_prob\"):\n y_hat = self.predict_prob(X)\n else:\n y_hat = self.predict(X)\n\n # Calculate the gradient according to the dimention of X, y.\n grad_bias = y - y_hat\n if X.ndim is 1:\n grad_weights = grad_bias * X\n elif X.ndim is 2:\n grad_weights = grad_bias[:, None] * X\n grad_weights = grad_weights.mean(axis=0)\n grad_bias = grad_bias.mean()\n else:\n raise ValueError(\"Dimension of X has to be 1 or 2!\")\n return grad_bias, grad_weights", "def gradient(self):\n functional = self\n\n if self.exponent == 1:\n class L1Gradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\"\"\"\n return x.ufuncs.sign()\n\n return L1Gradient()\n elif self.exponent == 2:\n class L2Gradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\n\n The gradient is not defined in 0.\n \"\"\"\n norm_of_x = x.norm()\n if norm_of_x == 0:\n return self.domain.zero()\n else:\n return x / norm_of_x\n\n return L2Gradient()\n else:\n raise NotImplementedError('`gradient` only implemented for p=1 or '\n 'p=2')", "def gradient(img):\n G = np.zeros(img.shape)\n theta = np.zeros(img.shape)\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n Gx = partial_x(img)\n Gy = partial_y(img)\n G = np.sqrt(np.square(Gx) + np.square(Gy))\n theta = np.degrees(np.arctan2(Gy, Gx)) % 360\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return G, theta", "def gradient(self):\n return ConstantFunctional(self.domain, self.scalar)", "def gradient(w, x, t):\n return 2 * np.dot(x.T, (nn(x, w) - t))", "def gradient_ascent(self, w, X, y, lr):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n # gradient = x_j*(y-σ(wTX))\r\n return np.dot(X.T, y-self.sigmoid(np.dot(X, w)))", "def grad(self, A, y, x):\n z = y * A.dot(x) # decision value for each observation\n grad_x = -1*A[z < 1].T.dot(y[z < 1])\n # Gradient normalized by the num obs\n return grad_x / y.size", "def compute_gradient_lasso(y, tx, w, lambda_):\n e = y - tx.dot(w)\n subgrad = lambda_ * np.sign(w)\n\n return -tx.T.dot(e)/len(e) + subgrad", "def gradient_loss_and_output(self, a_o, y):\n if self.regression:\n return 2*(a_o - y)\n else:\n return -y / (1 + np.exp(y * a_o))", "def _compute_func_grad(self, w):\n W = w.reshape((self.X.shape[1], self.Y.shape[1]))\n self.nll_, self.grad_ = calculate_gradient(self.X, self.Y, W, self.prior, self.weighted,0)", "def d_func(x, y):\n return np.array((2.0 * (x - 1) - 400.0 * x * (y - x**2), 200.0 * (y - x**2)))", "def gradient(img):\n G = np.zeros(img.shape)\n theta = np.zeros(img.shape)\n\n ### YOUR CODE HERE\n Gx = partial_x(img)\n Gy = partial_y(img)\n G = np.sqrt(np.power(Gx,2)+np.power(Gy,2))\n theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 360\n ### END YOUR CODE\n\n return G, theta", "def compute_square_loss_gradient(X, y, theta):\n #TODO\n P = (np.dot(X, theta)-y)\n m = X.shape[0]\n\n return (2/m)*np.dot(X.T, P)", "def gradient(self):\n return ScalingOperator(self.domain, 2.0)", "def disconnected_grad(x):\n return disconnected_grad_(x)", "def calc_grad(X, Y, theta):\n m, n = X.shape\n\n margins = Y * X.dot(theta)\n probs = 1. / (1 + np.exp(margins))\n grad = -(1./m) * (X.T.dot(probs * Y))\n\n return grad", "def gradientFunctionReg(theta, X, y, Lambda): \n y = np.squeeze(y)\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad[1:] = grad[1:] + Lambda*theta[1:]\n grad /= m\n\n return grad", "def compute_subgradient(w, data):\n x, = data\n return -x / (w @ x)", "def square_loss_grad(X, Y, W):\n return 2.0 / X.shape[0] * np.dot(X.T, (np.dot(X, W) - Y))", "def d2(x0,y0,x1,y1):\n return (x0-x1)*(x0-x1) + (y0-y1)*(y0-y1)", "def gradient(self):\n functional = self\n\n class KLGradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\n The gradient is not defined in points where one or more\n components are non-positive.\n \"\"\"\n if functional.prior is None:\n return (-1.0) / x + 1\n else:\n return (-functional.prior) / x + 1\n\n return KLGradient()", "def gradient(self,i,f):\n\n diff = self.points[f, :] - self.points[i, :]\n gradient = diff[1]/diff[0]\n\n return gradient", "def gradient(self):\n functional = self\n\n class KLCCGradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\n\n The gradient is not defined in points where one or more\n components are larger than or equal to one.\n \"\"\"\n if functional.prior is None:\n return 1.0 / (1 - x)\n else:\n return functional.prior / (1 - x)\n\n return KLCCGradient()", "def compute_cost_gradient2(x, y0, W, V, U, b0, b1, b2):\n # compute cost\n A1 = x @ W + b0\n A2 = x @ V + b1\n z0 = sigmoid(A1)\n z1 = sigmoid(A2)\n z = np.array([z0, z1]).T\n A3 = z @ U + b2\n y = sigmoid(A3)\n if y0 is None:\n return y\n cost = np.sum((y - y0) ** 2)\n # compute gradient\n dy = 2 * (y - y0)\n dA3 = dy * (y * (1 - y))\n dz0 = dA3 * U[0]\n dz1 = dA3 * U[1]\n dA1 = dz0 * (z0 * (1 - z0))\n dA2 = dz1 * (z1 * (1 - z1))\n dW = x.T @ dA1\n dV = x.T @ dA2\n dU = z.T @ dA3\n db0 = np.sum(dA1)\n db1 = np.sum(dA2)\n db2 = np.sum(dA3)\n return cost, dW, dV, dU, db0, db1, db2", "def compute_gradient(self, grad=None):\n input_value = self.input_nodes[0].output_value\n\n if grad is None:\n grad = backend.ones_like(self.output_value)\n return grad * backend.multiply(2.0, input_value)", "def gradient(self, node, output_grad):\r\n return [conv2d_grad_op1(node.inputs[0], node.inputs[1], node.const_attr , output_grad),conv2d_grad_op2(node.inputs[0], node.inputs[1], node.const_attr , output_grad)]", "def func_grad(self, X, Y):\n Q = 0\n gradd = np.zeros((N, 2*n+1))\n for x, y in zip(X, Y):\n #pairs of required arr coordinates\n iterat = [(int(x[i] // (self.max / N)), i) for i in range(2*n+1)]\n prediction = 0\n for j, i in iterat:\n prediction += self.arr[j, i]\n delta = prediction - y\n #in a meantime I precalculate new_step and, if all right, Ill use it to make new step\n for j, i in iterat:\n gradd[j, i] += delta \n Q += delta * delta\n return Q / len(X), gradd / len(X)", "def gradient(self, r):\n sigma = self.params['sigma']\n epsilon = self.params['epsilon']\n s = sigma / r\n s6 = s**6; s12 = s6 * s6\n grad = 4.0 * epsilon * ((-12.0/r) * s12 - (-6/r) * s6)\n grad = 0.5 * (r - 5.0)\n return grad", "def compute_gradient(self, grad=None):\n if grad is None:\n grad = backend.ones_like(self.output_value)\n dx = -grad\n return dx", "def gradient(self, theta):\n return (1 / (self.sigma * np.sqrt(2 * np.pi))) * (\n -theta / (self.sigma ** 2) * np.exp(-(theta ** 2) / (2 * self.sigma ** 2))\n )", "def numerical_gradient(f, x: np.ndarray):\n h = 1e-4\n grad = np.zeros_like(x)\n for i in range(x.size):\n tmp_val = x.flat[i]\n x.flat[i] = tmp_val + h\n fxh1 = f(x)\n\n x.flat[i] = tmp_val - h\n fxh2 = f(x)\n grad.flat[i] = (fxh1 - fxh2) / (2 * h)\n x.flat[i] = tmp_val\n return grad", "def grad_ReLU(self):\n y = self.x\n y[y<=0] = 0\n y[y>0] = 1\n return y\n raise NotImplementedError(\"ReLU gradient not implemented\")", "def gradient_calculation(self, coefficients, x_values, y_values):\n gradient_coeffs = np.array([0]*len(coefficients))\n\n for xi in range(len(x_values)):\n x = x_values[xi]\n power_array = np.power(\n np.array([x]*len(coefficients)), np.array(range(len(coefficients))))\n\n diff = (2/len(x_values))*(self.f(x, coefficients) - y_values[xi])\n gradient_coeffs = gradient_coeffs + np.multiply(diff, power_array)\n\n return gradient_coeffs", "def compute_logistic_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w)) - y) / len(y)", "def compute_square_loss_gradient(X, y, theta):\n #TODO\n (N,p) = np.shape(X)\n grad = -(1/np.float(N))*np.array([(y - X.dot(theta))*X[:,i] for i in range(p)])\n return np.sum(grad,axis=1)", "def gradient(self):\n return ZeroOperator(self.domain)", "def getGradientsToVector(self, x, y):\n partial_J_w_ih, partial_J_w_ho, partial_J_b_h, partial_J_b_o = \\\n self.deriv_costFunction( x, y )\n #vectorise gradients\n return np.concatenate( ( partial_J_w_ih.flatten(), partial_J_w_ho.flatten(), \\\n partial_J_b_h, partial_J_b_o ) )", "def compute_subgradient(w, data):\n x, y = data\n return -x if w @ x < y else x", "def compute_gradients(self, x_i, y_i):\n dw = 0\n db = 0\n if y_i * (np.dot(x_i, self.w) - self.b) >= 1: # if correct prediction, only margin updated\n dw = 2 * self.lam * self.w\n db = 0\n else:\n dw = 2 * self.lam * self.w - np.dot(x_i, y_i) # if wrong prediction, margin and bias updated\n db = y_i\n\n return dw, db", "def compute_loss_and_gradients(self, X, y):\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model", "def objective_grad(self, wb, X, y):\n N, D = X.shape\n w = wb[:-1]\n b = wb[-1]\n loss_grad = np.zeros(D+1) \n # grad wrt regularization\n loss_grad[-1] = 2 * self.reg_param * (b - self.b0) # grad_b\n loss_grad[:-1] = 2 * self.reg_param * (w - self.w0) # grad_w\n\n for i in range(N):\n tmpvar = np.exp(-1 * y[i] * (np.dot(w, X[i]) + b)) \n loss_grad[-1] += tmpvar/(1 + tmpvar) * -1 * y[i] # grad_b \n loss_grad[:-1] += tmpvar/(1 + tmpvar) * -1 * y[i] * X[i] # grad_w\n\n return loss_grad", "def gradient(image):\n g_mag = np.zeros_like(image)\n g_theta = np.zeros_like(image)\n \n kh = np.array([[1,0,-1]])\n kv = np.array([[1],[0],[-1]])\n \n gX = convolve(image, kh, mode = 'same')\n gY = convolve(image, kv, mode = 'same')\n \n g_mag = abs(np.sqrt( np.square(gX) + np.square(gY) ))\n g_theta = (180/np.pi)*np.arctan(gY/gX)\n \n '''\n plt.figure(1)\n plt.imshow(gX, cmap='gray')\n plt.show()\n plt.figure(2)\n plt.imshow(gY, cmap='gray')\n plt.show()\n\n plt.figure(3)\n plt.imshow(g_mag, cmap='gray')\n plt.show()\n \n plt.figure(4)\n plt.imshow(g_theta, cmap='gray')\n plt.show()\n '''\n return g_mag, g_theta", "def gradient(cls, x):\n return np.ones(x.shape)" ]
[ "0.8220284", "0.7768869", "0.7596814", "0.7590641", "0.7536355", "0.7535873", "0.7492485", "0.74524486", "0.74460936", "0.74246055", "0.7422228", "0.73773456", "0.7359993", "0.73566616", "0.73448485", "0.7335415", "0.7270468", "0.72532976", "0.7230785", "0.7145674", "0.7110913", "0.7103678", "0.7103678", "0.71027917", "0.7095147", "0.70950806", "0.70921534", "0.70846", "0.707896", "0.70673573", "0.70585746", "0.7044142", "0.7023406", "0.70209056", "0.7020631", "0.7014932", "0.6976652", "0.6950403", "0.6947523", "0.6944287", "0.6922487", "0.6898744", "0.68549305", "0.68548226", "0.68514925", "0.6841194", "0.68327963", "0.68263984", "0.6819748", "0.67977", "0.67893314", "0.6780321", "0.67687804", "0.676794", "0.6754971", "0.6750939", "0.6749321", "0.6747039", "0.67403936", "0.67245346", "0.67143756", "0.6711341", "0.6710687", "0.66978407", "0.66937613", "0.6693574", "0.66882616", "0.6686115", "0.66762346", "0.6675903", "0.6674058", "0.6670138", "0.6663298", "0.6662724", "0.6656496", "0.6655237", "0.66537195", "0.6647035", "0.66318715", "0.66310877", "0.6628175", "0.6627805", "0.6622852", "0.6622521", "0.6619227", "0.6619187", "0.6618987", "0.66150004", "0.6602518", "0.6596813", "0.65887135", "0.65858907", "0.6584519", "0.65794575", "0.6577528", "0.6571867", "0.65636665", "0.6559313", "0.65510166", "0.6546691", "0.65411264" ]
0.0
-1
this is the gradient of x^2 + y^2
def df(x_i): return [2 * x_ij for x_ij in x_i]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gradient(self, x):\n pass", "def compute_gradient (w, x, y):\n (n,d) = x.shape\n g = np.zeros(d)\n for i in range(0,d):\n g[i] = (w*x-y)*np.transpose(x[i])\n g += 0.5*w\n return g", "def gradient(self, x):\n return 0.0", "def calculate_gradient(y, tx, w): \n return tx.T@(sigmoid(tx@w)-y)", "def gradient(self, x, Y):\n if self.is_sparse:\n x = x.todense()\n Y = Y.todense()\n assert(len(shape(x))==1)\n assert(len(shape(Y))==2)\n assert(len(x)==shape(Y)[1])\n \n x_2d=reshape(x, (1, len(x)))\n k = self.kernel(x_2d, Y)\n differences = Y - x\n G = (1.0 / self.width ** 2) * (k.T * differences)\n return G", "def gradient(cls, x):\n y = Sigmoid.apply(x)\n return np.multiply(y, 1 - y)", "def gradient(cls, x):\n return 1 - TanH.apply(x) ** 2", "def compute_gradient(theta, X, y):\n m = X.shape[0]\n grad_theta = np.dot(X.transpose(), (np.dot(X, theta) - y)) / m\n #print theta, grad_theta, objective_function(theta, X, y)\n return grad_theta", "def grad_l2(w, X, y, **kwargs):\n return -1 * np.dot(X.T, y - np.dot(X, w)) / X.shape[0]", "def ols_gradient(\n w: FloatTensor, x: FloatTensor, y: FloatTensor, _: float\n) -> FloatTensor:\n n, d = x.size()\n return (2 / n) * (x.t().matmul(x.matmul(w) - y))", "def gradient(cls, y, y_target):\n return y - y_target", "def calculate_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-np.reshape(y,(len(y),1)))", "def get_gradient(self, y, x, weight):\n y = np.reshape(y, (len(y),))\n return np.dot(x.T, sigmoid(np.dot(x, weight)) - y) \\\n + self.regularizer.get_gradient(weight)", "def gradient(theta, x, y):\n m = len(y)\n n = len(theta)\n z = theta.dot(x.T)\n grad = np.zeros(n)\n for i in xrange(m):\n grad += (g(z[i]) - y[i]) * x[i]\n return 1. / m * grad", "def calculate_gradient(y, tx, w):\n\n\tret = tx.T.dot(sigmoid(np.dot(tx, w)) - y)\n\treturn ret", "def compute_gradient(self, X, y, weights):\n sigmoid = self.sigmoid(np.dot(X, weights))\n return np.dot(X.T, y - sigmoid)", "def gradientFunction(theta, X, y):\n y = y[:, 0]\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad /= m\n return grad", "def gradient(cls, y, y_target):\n return (y - y_target) / MEE.euclidean_distance(y, y_target)", "def approx_grad(theta, X, y):\n grad_a = np.array([(cost(theta + e, X, y) - cost(theta - e, X, y)) / (2 * 1e-5)\n for e in np.identity(len(theta)) * 1e-5])\n return grad_a", "def gradient(self, x):\n u = np.asarray([x[0]])\n C = self.C_func(u)\n dC = self.dC_func(u, order=1)\n P = self.P\n numerator = np.sum((C - P) * dC, axis=0)\n denominator = np.sum(np.sum((C - P) ** 2, axis=0) ** (1 / 2))\n if np.abs(denominator) > 0:\n gradient = numerator/denominator\n else:\n gradient = np.asarray(0)[np.newaxis]\n return gradient", "def gradient(self, x):\n g = self._grad(\n time_series=self.observed_data,\n a=x[0],\n b=x[1],\n c=x[2],\n sigma=self.sigma\n )\n return g", "def _calc_gradients(self, X, y, y_hat):\n # calculate gradient of weight and bias\n grad_b = 2 * np.mean(y_hat - y)\n grad_W = 2 * np.mean(np.matmul((y_hat - y), X))\n return grad_W, grad_b", "def gradient(self, theta):\n pass", "def gradient(self, theta):\n pass", "def least_squares_gradient(y, tx, w): \n e = y - tx.dot(w)\n grad = -tx.T.dot(e) / len(e)\n return grad, e", "def gradient(self):\n result = np.zeros(len(self.variables))\n result[self.bivariateGradInd] = (self.shape-1)/self.variable - self.rate\n return result", "def computeGradient(self, X, y, w):\n n = len(X)\n if self.loss == 'linear':\n gradient = -2 * np.dot(X.T, (y - X.dot(w)))\n elif self.loss == 'logistic':\n g = self.logistic(X, w)\n gradient = -2 * np.dot(X.T, (y - g) * g * (1 - g))\n elif self.loss == 'perceptron':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = ((np.dot(X, w) >= 0).astype(int) != y)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = -np.dot(usedX.T, usedY)\n elif self.loss == 'svm':\n newY = (y > 0).astype(int) * 2 - 1 # change from (0, 1) to (-1, 1)\n index = (np.dot(X, w) * newY < 1)\n usedX = X[index[:, 0]]\n usedY = newY[index[:, 0]]\n gradient = 2 * w - self.C * np.dot(usedX.T, usedY)\n gradient[0] = gradient[0] + 2 * w[0]\n\n return gradient", "def compute_gradient(self, function, arguments):", "def gradient(theta, X, y, learning_rate):\n m = len(y)\n\n theta = theta.reshape((-1,1))\n grad = np.zeros(theta.shape)\n h = sigmoid(np.dot(X, theta)) \n \n grad = np.dot((h-y).T, X)/m\n grad = grad.T\n grad[1:] += (learning_rate/m)*theta[1:]\n return grad", "def gradient(data_x, data_y, parameters):\n return data_x.T @ (data_x @ parameters - data_y) / data_x.shape[0]", "def compute_gradient(y, tx, w):\n\tN = y.shape[0]\n\te = y - np.dot(tx, w)\n\n\tgradLw = -1/N * np.dot(tx.T, e)\n\treturn gradLw", "def gradient(x1: int, x2: int) -> int:\n\n if x1 == x2:\n return 0\n\n dx = x1 - x2\n return int(dx / abs(dx))", "def compute_gradient(self): # TODO: try to change to square loss since it's hessian is easier to obtain\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C)", "def compute_gradient(c, x, y):\n\n vectors = np.array([[0, 1], [0, -1], [1, 0], [-1, 0]])\n rows, cols = c.shape\n\n result = np.empty_like(x)\n\n for i in nb.prange(rows):\n for j in nb.prange(cols):\n c_remainder = c[i, j] % 4\n gradient_co = vectors[c_remainder]\n result[i, j] = gradient_co[0] * x[i, j] + gradient_co[1] * y[i, j]\n\n return result", "def compute_gradient(self):\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C)", "def gradient(cls, x):\n return np.minimum(-2.5 < x, x < 2.5) * 0.2", "def calc_gradient(self, W, X, y, reg):\n\n N = X.shape[0]\n grad_W = np.zeros_like(W)\n I = np.ones((1,10))\n score = np.dot(X, W) # (N, C)\n out = np.exp(score-np.dot(np.max(score, axis=1, keepdims=True ),I))\n #print(\"out\", out)\n out /= np.sum(out, axis=1, keepdims=True) # (N, C)\n \n dout = np.copy(out) # (N, C)\n dout[np.arange(N), y] -= 1\n grad_W = np.dot(X.T, dout) # (D, C)\n grad_W /= N\n #grad_W += reg * W\n \n return grad_W", "def grad(self, w):\n l1_grad = self.r * np.sign(w)\n l2_grad = np.asarray(1 - self.r) * w \n\n gradient_penalty = self.alpha * (l1_grad + l2_grad)\n\n # Insert 0 for bias term.\n return np.insert(gradient_penalty, 0, 0, axis=0)", "def grad(self,w):\n # Calculate the vector -sigma(-y_i * x_i.w)\n s = -np.array([sigmoid(-yi * np.dot(xi,w)) for xi,yi in zip(self.x,self.y)])\n # Multiply it by xy\n g = np.array([np.dot(xyj,s) for xyj in self.xy.transpose()])\n # Add regularisation\n g += self.alpha*w\n return g\n #g = np.array([self.grad_j(w,j) for j in xrange(len(w))])", "def compute_grad(beta, lambdat, X, y):\n return -2/len(y)*(np.maximum(0, 1-(\n (y[:, np.newaxis]*X).dot(beta)))).dot(\n y[:, np.newaxis]*X) + 2 * lambdat * beta", "def grad_loss_wrt_w(self, x, y):\n (N, D) = x.shape\n k1 = np.matmul(x, np.transpose(self.w)) + self.b\n y1 = y.reshape((N,1))\n dr = (1 + np.exp(1 * y1 * k1))\n nr = -y1 * x\n c1 = nr/dr\n #(N1,D1) = self.w.shape\n #c2 = np.zeros((N1,D1))\n #for i in range(N):\n # c2[i-1] = c1[i-1,:] + c1[i,:]\n #l_w = c2/N\n l_w1 = np.mean(c1,axis=0)\n return l_w1\n\n\n #raise NotImplementedError", "def gradient(self, x_in):\r\n\r\n return 1 - torch.pow(self.forward(x_in), 2)", "def logit_cost_grad(self, theta, X, y):\n\n grad = np.zeros(len(theta))\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n # sig = np.subtract(sig, y)\n sig = sig - y\n grad = np.dot(X.T, sig) + 2 * self.params['lamb'] * self.regularizer[1](self.weights)\n ### END YOUR CODE\n\n return grad", "def grad_loss_wrt_b(self, x, y):\n (N,D) = x.shape\n k1 = np.matmul(x,np.transpose(self.w)) + self.b\n y1=y.reshape((N,1))\n dr = (1+np.exp(1*y1*k1))\n nr = -y1\n c2=0\n c1 = nr/dr\n for i in range(N):\n c2 +=c1[i][0]\n l_b = c2 / N\n #b2 = np.copy(self.b)\n #b1 = np.zeros((10,1))\n #b1[0] = b2\n #for i in range(1,10):\n #b1[i] = b1[i-1] - self.lr*l_b\n\n\n\n return l_b\n\n\n #raise NotImplementedError", "def calculate_gradients(self, X, Y):\n Z1 = np.matmul(self.weights[0], X) + self.biases[0] #(30, m)\n A1 = sigmoid(Z1) #(30, m)\n Z2 = np.matmul(self.weights[1], A1) + self.biases[1] #(10, m)\n A2 = sigmoid(Z2) #(10, m)\n # number of examples\n m = X.shape[1]\n dZ2 = A2 - Y #(784, m)\n dW2 = (1 / m) * np.matmul(dZ2, A1.T) #(10, 30)\n db2 = (1 / m) * np.sum(dZ2, axis = 1, keepdims = True) #(10, 1)\n dZ1 = np.multiply(np.matmul(self.weights[1].T, dZ2), sigmoid_deri(Z1)) #(30, m)\n dW1 = (1 / m) * np.matmul(dZ1, X.T) #(30, 784)\n db1 = (1 / m) * np.sum(dZ1, axis = 1, keepdims = True) #(30, 1)\n \n grads = {\"dW1\":dW1, \"db1\":db1, \"dW2\":dW2, \"db2\":db2} \n return grads", "def check_gradient(self, x, y):\n x = x.transpose()\n y = y.transpose()\n layers_copy = deepcopy(self.layers)\n epsilon = 10 ** -4\n a, layer = self.forward_propagation(x)\n delta = self.calculate_delta(a, y, layer)\n self.backpropagation(delta=delta, theta=layer.theta)\n previous_layer_output = x\n for layer in self.layers:\n theta_copy = deepcopy(layer.theta)\n real_theta_size = theta_copy.shape\n delta = layer.delta\n dc_dtheta = np.outer(previous_layer_output, delta).transpose()\n previous_layer_output = layer.a\n R, C = theta_copy.shape\n for i in range(R):\n for j in range(C):\n theta_plus = deepcopy(theta_copy)\n theta_plus[i, j] += epsilon\n layer.theta = theta_plus\n a_plus, l_plus = self.forward_propagation(x)\n err_plus = self.calculate_loss(a_plus, y)\n theta_minus = deepcopy(theta_copy)\n theta_minus[i, j] -= epsilon\n layer.theta = theta_minus\n a_minus, l_minus = self.forward_propagation(x)\n err_minus = self.calculate_loss(a_minus, y)\n limit = (err_plus - err_minus)/(2*epsilon)\n grad_diff = abs(dc_dtheta[i,j] - limit)\n assert grad_diff < 10 ** -6, f\"Diff {grad_diff} is too big.\"\n layer.theta = theta_copy", "def gradient(self,x=None,y=None,save=True):\n\n\t\tif (x is not None) and (y is not None):\n\n\t\t\tassert x.shape==y.shape,\"x and y must have the same shape!\"\n\n\t\t\t#x coordinates\n\t\t\tif type(x)==u.quantity.Quantity:\n\t\t\t\n\t\t\t\tassert x.unit.physical_type==self.side_angle.unit.physical_type\n\t\t\t\tj = np.mod(((x / self.resolution).decompose().value).astype(np.int32),self.data.shape[1])\n\n\t\t\telse:\n\n\t\t\t\tj = np.mod((x / self.resolution.to(u.rad).value).astype(np.int32),self.data.shape[1])\t\n\n\t\t\t#y coordinates\n\t\t\tif type(y)==u.quantity.Quantity:\n\t\t\t\n\t\t\t\tassert y.unit.physical_type==self.side_angle.unit.physical_type\n\t\t\t\ti = np.mod(((y / self.resolution).decompose().value).astype(np.int32),self.data.shape[0])\n\n\t\t\telse:\n\n\t\t\t\ti = np.mod((y / self.resolution.to(u.rad).value).astype(np.int32),self.data.shape[0])\n\n\t\telse:\n\t\t\ti = None\n\t\t\tj = None\n\t\t\n\t\t#Call the C backend\n\t\tgradient_x,gradient_y = _topology.gradient(self.data,j,i)\n\n\t\t#Return the gradients\n\t\tif (x is not None) and (y is not None):\n\n\t\t\treturn gradient_x.reshape(x.shape),gradient_y.reshape(x.shape)\n\n\t\telse:\n\t\t\n\t\t\tif save:\n\t\t\t\tself.gradient_x = gradient_x\n\t\t\t\tself.gradient_y = gradient_y\n\t\t\n\t\t\treturn gradient_x,gradient_y", "def gradient(f,h,X):\n # X = list(X)# flip to a list from tuple so we can modify elements\n fx = f(*X) # only need this once\n dX = []\n for i in range(len(X)):\n # Make a vector of Value(X_i, [0 ... 1 ... 0]) with 1 in ith position\n X[i] += h # tweak in dimension i\n y = f(*X)\n X[i] -=h # undo the tweak for next round\n dx = (y - fx)/h\n dX.append(dx)\n return dX", "def loss_gradient(self, x, y):\n x_preproc = self._apply_processing(x)\n x_defences, y_defences = self._apply_defences(x_preproc, y, fit=False)\n\n # Adjust the shape of y for loss functions that do not take labels in one-hot encoding\n if self._reduce_labels:\n y_defences = np.argmax(y_defences, axis=1)\n\n grads = self._loss_grads([x_defences, y_defences])[0]\n grads = self._apply_defences_gradient(x_preproc, grads)\n grads = self._apply_processing_gradient(grads)\n assert grads.shape == x_preproc.shape\n\n return grads", "def grad(self, A, y, x):\n z = y - A.dot(x) # Error for each observation\n grad_x = -1 * A.T.dot(np.sign(z))\n # Gradient normalized by the num obs\n return grad_x / y.size", "def gradient(self, X, V, W, Y):\n one, d_plus_one = X.shape\n K, H_plus_one = W.shape\n d = d_plus_one - 1\n H = H_plus_one - 1\n\n Z, Yhat = self.forward(X, V, W)\n assert one == 1\n x = X\n y = Y\n z = Z.ravel()\n yhat = Yhat.ravel()\n\n # Update W\n # grad__L__yhat = (yhat - y) / np.clip(yhat * (1 - yhat), EPSILON, inf)\n # grad__L__z[:] = 0.0\n # for k in range(K):\n # grad__yhat_k__W_k = z * yhat[k] * (1 - yhat[k])\n # # Last element corresponds to constant offset 1 appended to z\n # # vector; it does not change / has no derivative.\n # grad__yhat_k__z = W[k, :-1] * yhat[k] * (1 - yhat[k])\n # grad__L__z += grad__L__yhat[k] * grad__yhat_k__z\n # W[k, :] -= self.learning_rate * grad__L__yhat[k] * grad__yhat_k__W_k\n grad__L__z = (W.T * (yhat - y)).sum(axis=1)\n zz = z.reshape((1, H + 1)).repeat(K, 0)\n grad__L__W = diag(yhat - y) @ zz\n\n # Update V\n # for h in range(H):\n # grad__z_h__V_h = x * (1 - z[h] ** 2)\n # grad__L__V_h = grad__L__z[h] * grad__z_h__V_h\n # V[h, :] -= self.learning_rate * grad__L__V_h\n xx = x.reshape((1, d + 1)).repeat(H + 1, 0)\n grad__L__V = diag((1 - z ** 2) * grad__L__z) @ xx\n\n return grad__L__V, grad__L__W", "def _get_gradient_delta(self, Xi, yi):\n\n z = sum(wi * xij for wi, xij in zip(self.weights, Xi)) + self.bias\n y_hat = 1 / (1 + exp(-z))\n bias_grad_delta = yi - y_hat\n weights_grad_delta = [bias_grad_delta * Xij for Xij in Xi]\n return bias_grad_delta, weights_grad_delta", "def gradient(x, y, theta):\n if x.ndim == 1:\n x = x[:, np.newaxis]\n if y.ndim == 2 and y.shape[1] == 1:\n y = y.flatten()\n if theta.ndim == 2 and theta.shape[1] == 1:\n theta = theta.flatten()\n\n if (x.size == 0 or y.size == 0 or theta.size == 0\n or x.ndim != 2 or y.ndim != 1 or theta.ndim != 1\n or x.shape[0] != y.shape[0] or x.shape[1] + 1 != theta.shape[0]):\n return None\n\n x_padded = np.c_[np.ones(x.shape[0]), x]\n\n return x_padded.T.dot(x_padded.dot(theta) - y) / y.shape[0]", "def gradient(self, inputs):\n raise NotImplementedError", "def get_grad(self, X):\n raise NotImplementedError", "def gradient(self, theta):\n a = -(6 * self.scale ** 2)\n b = 3 * self.scale ** 2 + np.exp(2 * theta)\n b *= np.log(3 * self.scale ** 2 * np.exp(-2 * theta) + 1)\n return a / b", "def compute_gradient(self, grad=None):\n if grad is None:\n grad = backend.ones_like(self.output_value)\n x, y = [node.output_value for node in self.input_nodes]\n\n dx = backend.dot(grad, backend.transpose(y))\n dy = backend.dot(backend.transpose(x), grad)\n\n return [dx, dy]", "def gradient(self):\n return NotImplemented", "def _get_gradient(self, X: array, y: array):\n\n # Use predict_prob method if this is a classifier.\n if hasattr(self, \"predict_prob\"):\n y_hat = self.predict_prob(X)\n else:\n y_hat = self.predict(X)\n\n # Calculate the gradient according to the dimention of X, y.\n grad_bias = y - y_hat\n if X.ndim is 1:\n grad_weights = grad_bias * X\n elif X.ndim is 2:\n grad_weights = grad_bias[:, None] * X\n grad_weights = grad_weights.mean(axis=0)\n grad_bias = grad_bias.mean()\n else:\n raise ValueError(\"Dimension of X has to be 1 or 2!\")\n return grad_bias, grad_weights", "def gradient(self):\n functional = self\n\n if self.exponent == 1:\n class L1Gradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\"\"\"\n return x.ufuncs.sign()\n\n return L1Gradient()\n elif self.exponent == 2:\n class L2Gradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\n\n The gradient is not defined in 0.\n \"\"\"\n norm_of_x = x.norm()\n if norm_of_x == 0:\n return self.domain.zero()\n else:\n return x / norm_of_x\n\n return L2Gradient()\n else:\n raise NotImplementedError('`gradient` only implemented for p=1 or '\n 'p=2')", "def gradient(img):\n G = np.zeros(img.shape)\n theta = np.zeros(img.shape)\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n Gx = partial_x(img)\n Gy = partial_y(img)\n G = np.sqrt(np.square(Gx) + np.square(Gy))\n theta = np.degrees(np.arctan2(Gy, Gx)) % 360\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return G, theta", "def gradient(w, x, t):\n return 2 * np.dot(x.T, (nn(x, w) - t))", "def gradient(self):\n return ConstantFunctional(self.domain, self.scalar)", "def gradient_ascent(self, w, X, y, lr):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n # gradient = x_j*(y-σ(wTX))\r\n return np.dot(X.T, y-self.sigmoid(np.dot(X, w)))", "def grad(self, A, y, x):\n z = y * A.dot(x) # decision value for each observation\n grad_x = -1*A[z < 1].T.dot(y[z < 1])\n # Gradient normalized by the num obs\n return grad_x / y.size", "def compute_gradient_lasso(y, tx, w, lambda_):\n e = y - tx.dot(w)\n subgrad = lambda_ * np.sign(w)\n\n return -tx.T.dot(e)/len(e) + subgrad", "def gradient_loss_and_output(self, a_o, y):\n if self.regression:\n return 2*(a_o - y)\n else:\n return -y / (1 + np.exp(y * a_o))", "def _compute_func_grad(self, w):\n W = w.reshape((self.X.shape[1], self.Y.shape[1]))\n self.nll_, self.grad_ = calculate_gradient(self.X, self.Y, W, self.prior, self.weighted,0)", "def d_func(x, y):\n return np.array((2.0 * (x - 1) - 400.0 * x * (y - x**2), 200.0 * (y - x**2)))", "def gradient(img):\n G = np.zeros(img.shape)\n theta = np.zeros(img.shape)\n\n ### YOUR CODE HERE\n Gx = partial_x(img)\n Gy = partial_y(img)\n G = np.sqrt(np.power(Gx,2)+np.power(Gy,2))\n theta = (np.arctan2(Gy, Gx) * 180 / np.pi) % 360\n ### END YOUR CODE\n\n return G, theta", "def compute_square_loss_gradient(X, y, theta):\n #TODO\n P = (np.dot(X, theta)-y)\n m = X.shape[0]\n\n return (2/m)*np.dot(X.T, P)", "def gradient(self):\n return ScalingOperator(self.domain, 2.0)", "def disconnected_grad(x):\n return disconnected_grad_(x)", "def calc_grad(X, Y, theta):\n m, n = X.shape\n\n margins = Y * X.dot(theta)\n probs = 1. / (1 + np.exp(margins))\n grad = -(1./m) * (X.T.dot(probs * Y))\n\n return grad", "def gradientFunctionReg(theta, X, y, Lambda): \n y = np.squeeze(y)\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad[1:] = grad[1:] + Lambda*theta[1:]\n grad /= m\n\n return grad", "def compute_subgradient(w, data):\n x, = data\n return -x / (w @ x)", "def square_loss_grad(X, Y, W):\n return 2.0 / X.shape[0] * np.dot(X.T, (np.dot(X, W) - Y))", "def d2(x0,y0,x1,y1):\n return (x0-x1)*(x0-x1) + (y0-y1)*(y0-y1)", "def gradient(self):\n functional = self\n\n class KLGradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\n The gradient is not defined in points where one or more\n components are non-positive.\n \"\"\"\n if functional.prior is None:\n return (-1.0) / x + 1\n else:\n return (-functional.prior) / x + 1\n\n return KLGradient()", "def gradient(self,i,f):\n\n diff = self.points[f, :] - self.points[i, :]\n gradient = diff[1]/diff[0]\n\n return gradient", "def compute_cost_gradient2(x, y0, W, V, U, b0, b1, b2):\n # compute cost\n A1 = x @ W + b0\n A2 = x @ V + b1\n z0 = sigmoid(A1)\n z1 = sigmoid(A2)\n z = np.array([z0, z1]).T\n A3 = z @ U + b2\n y = sigmoid(A3)\n if y0 is None:\n return y\n cost = np.sum((y - y0) ** 2)\n # compute gradient\n dy = 2 * (y - y0)\n dA3 = dy * (y * (1 - y))\n dz0 = dA3 * U[0]\n dz1 = dA3 * U[1]\n dA1 = dz0 * (z0 * (1 - z0))\n dA2 = dz1 * (z1 * (1 - z1))\n dW = x.T @ dA1\n dV = x.T @ dA2\n dU = z.T @ dA3\n db0 = np.sum(dA1)\n db1 = np.sum(dA2)\n db2 = np.sum(dA3)\n return cost, dW, dV, dU, db0, db1, db2", "def gradient(self):\n functional = self\n\n class KLCCGradient(Operator):\n\n \"\"\"The gradient operator of this functional.\"\"\"\n\n def __init__(self):\n \"\"\"Initialize a new instance.\"\"\"\n super().__init__(functional.domain, functional.domain,\n linear=False)\n\n def _call(self, x):\n \"\"\"Apply the gradient operator to the given point.\n\n The gradient is not defined in points where one or more\n components are larger than or equal to one.\n \"\"\"\n if functional.prior is None:\n return 1.0 / (1 - x)\n else:\n return functional.prior / (1 - x)\n\n return KLCCGradient()", "def gradient(self, node, output_grad):\r\n return [conv2d_grad_op1(node.inputs[0], node.inputs[1], node.const_attr , output_grad),conv2d_grad_op2(node.inputs[0], node.inputs[1], node.const_attr , output_grad)]", "def compute_gradient(self, grad=None):\n input_value = self.input_nodes[0].output_value\n\n if grad is None:\n grad = backend.ones_like(self.output_value)\n return grad * backend.multiply(2.0, input_value)", "def func_grad(self, X, Y):\n Q = 0\n gradd = np.zeros((N, 2*n+1))\n for x, y in zip(X, Y):\n #pairs of required arr coordinates\n iterat = [(int(x[i] // (self.max / N)), i) for i in range(2*n+1)]\n prediction = 0\n for j, i in iterat:\n prediction += self.arr[j, i]\n delta = prediction - y\n #in a meantime I precalculate new_step and, if all right, Ill use it to make new step\n for j, i in iterat:\n gradd[j, i] += delta \n Q += delta * delta\n return Q / len(X), gradd / len(X)", "def compute_gradient(self, grad=None):\n if grad is None:\n grad = backend.ones_like(self.output_value)\n dx = -grad\n return dx", "def gradient(self, r):\n sigma = self.params['sigma']\n epsilon = self.params['epsilon']\n s = sigma / r\n s6 = s**6; s12 = s6 * s6\n grad = 4.0 * epsilon * ((-12.0/r) * s12 - (-6/r) * s6)\n grad = 0.5 * (r - 5.0)\n return grad", "def gradient(self, theta):\n return (1 / (self.sigma * np.sqrt(2 * np.pi))) * (\n -theta / (self.sigma ** 2) * np.exp(-(theta ** 2) / (2 * self.sigma ** 2))\n )", "def numerical_gradient(f, x: np.ndarray):\n h = 1e-4\n grad = np.zeros_like(x)\n for i in range(x.size):\n tmp_val = x.flat[i]\n x.flat[i] = tmp_val + h\n fxh1 = f(x)\n\n x.flat[i] = tmp_val - h\n fxh2 = f(x)\n grad.flat[i] = (fxh1 - fxh2) / (2 * h)\n x.flat[i] = tmp_val\n return grad", "def grad_ReLU(self):\n y = self.x\n y[y<=0] = 0\n y[y>0] = 1\n return y\n raise NotImplementedError(\"ReLU gradient not implemented\")", "def gradient_calculation(self, coefficients, x_values, y_values):\n gradient_coeffs = np.array([0]*len(coefficients))\n\n for xi in range(len(x_values)):\n x = x_values[xi]\n power_array = np.power(\n np.array([x]*len(coefficients)), np.array(range(len(coefficients))))\n\n diff = (2/len(x_values))*(self.f(x, coefficients) - y_values[xi])\n gradient_coeffs = gradient_coeffs + np.multiply(diff, power_array)\n\n return gradient_coeffs", "def compute_logistic_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w)) - y) / len(y)", "def compute_square_loss_gradient(X, y, theta):\n #TODO\n (N,p) = np.shape(X)\n grad = -(1/np.float(N))*np.array([(y - X.dot(theta))*X[:,i] for i in range(p)])\n return np.sum(grad,axis=1)", "def getGradientsToVector(self, x, y):\n partial_J_w_ih, partial_J_w_ho, partial_J_b_h, partial_J_b_o = \\\n self.deriv_costFunction( x, y )\n #vectorise gradients\n return np.concatenate( ( partial_J_w_ih.flatten(), partial_J_w_ho.flatten(), \\\n partial_J_b_h, partial_J_b_o ) )", "def gradient(self):\n return ZeroOperator(self.domain)", "def compute_subgradient(w, data):\n x, y = data\n return -x if w @ x < y else x", "def compute_gradients(self, x_i, y_i):\n dw = 0\n db = 0\n if y_i * (np.dot(x_i, self.w) - self.b) >= 1: # if correct prediction, only margin updated\n dw = 2 * self.lam * self.w\n db = 0\n else:\n dw = 2 * self.lam * self.w - np.dot(x_i, y_i) # if wrong prediction, margin and bias updated\n db = y_i\n\n return dw, db", "def compute_loss_and_gradients(self, X, y):\n # TODO Compute loss and fill param gradients\n # by running forward and backward passes through the model", "def objective_grad(self, wb, X, y):\n N, D = X.shape\n w = wb[:-1]\n b = wb[-1]\n loss_grad = np.zeros(D+1) \n # grad wrt regularization\n loss_grad[-1] = 2 * self.reg_param * (b - self.b0) # grad_b\n loss_grad[:-1] = 2 * self.reg_param * (w - self.w0) # grad_w\n\n for i in range(N):\n tmpvar = np.exp(-1 * y[i] * (np.dot(w, X[i]) + b)) \n loss_grad[-1] += tmpvar/(1 + tmpvar) * -1 * y[i] # grad_b \n loss_grad[:-1] += tmpvar/(1 + tmpvar) * -1 * y[i] * X[i] # grad_w\n\n return loss_grad", "def gradient(image):\n g_mag = np.zeros_like(image)\n g_theta = np.zeros_like(image)\n \n kh = np.array([[1,0,-1]])\n kv = np.array([[1],[0],[-1]])\n \n gX = convolve(image, kh, mode = 'same')\n gY = convolve(image, kv, mode = 'same')\n \n g_mag = abs(np.sqrt( np.square(gX) + np.square(gY) ))\n g_theta = (180/np.pi)*np.arctan(gY/gX)\n \n '''\n plt.figure(1)\n plt.imshow(gX, cmap='gray')\n plt.show()\n plt.figure(2)\n plt.imshow(gY, cmap='gray')\n plt.show()\n\n plt.figure(3)\n plt.imshow(g_mag, cmap='gray')\n plt.show()\n \n plt.figure(4)\n plt.imshow(g_theta, cmap='gray')\n plt.show()\n '''\n return g_mag, g_theta", "def gradient(cls, x):\n return np.ones(x.shape)" ]
[ "0.8220871", "0.7769543", "0.7597072", "0.7590164", "0.75366026", "0.7535663", "0.7492575", "0.7453598", "0.7445228", "0.7424073", "0.74229896", "0.73769474", "0.73603284", "0.735691", "0.73446774", "0.7335506", "0.727039", "0.7253472", "0.7231205", "0.7145222", "0.71104634", "0.710413", "0.71040803", "0.71040803", "0.7094459", "0.70932674", "0.70925045", "0.7084499", "0.7078722", "0.7067359", "0.7057449", "0.7045639", "0.7021868", "0.70216066", "0.7019076", "0.70157766", "0.6975934", "0.69492334", "0.69456315", "0.6943899", "0.69224024", "0.6898025", "0.68553185", "0.68550754", "0.6852189", "0.6842685", "0.68337977", "0.6826126", "0.68205225", "0.67971134", "0.67882586", "0.6780678", "0.67690444", "0.67678124", "0.6755039", "0.6750014", "0.67488176", "0.67467713", "0.67416346", "0.6723602", "0.6715717", "0.6709912", "0.67098206", "0.66979736", "0.669362", "0.6692773", "0.6689429", "0.6684704", "0.66782975", "0.6677152", "0.66743183", "0.6668968", "0.66633064", "0.6662438", "0.6656174", "0.66550314", "0.6652617", "0.6648898", "0.6630833", "0.66294336", "0.6627823", "0.6627454", "0.6622279", "0.6621457", "0.66192424", "0.6617842", "0.66170347", "0.66148394", "0.66014004", "0.65951145", "0.6589024", "0.65853506", "0.6584729", "0.65791935", "0.6578411", "0.6572931", "0.65649503", "0.65600884", "0.65503937", "0.65469337", "0.65412" ]
0.0
-1
runs gradient descent to find a minimum of exp(x^3 / 3 + x y^2)
def run_gradient_descent2(seed=0): colors = [color for color in matplotlib.colors.cnames] def random_point(): return (3 * random.random() - 1, 3 * random.random() - 1) def f(x): """has min at (1,0), saddle point at (-1,0)""" return -math.exp(x[0]**3/-3 + x[0] - x[1]**2) def df(x): return ((1 - x[0]**2) * f(x), -2 * x[1] * f(x)) for color in random.sample(colors, 50): path = take(100, gradient_descent(df, random_point())) for i, (x, y) in enumerate(path): plt.plot(x, y, color=color, marker='*', markersize=25-i/4) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gradient_ascent(self, w, X, y, lr):\r\n # INSERT YOUR CODE HERE\r\n #raise Exception('Function not yet implemented!')\r\n # gradient = x_j*(y-σ(wTX))\r\n return np.dot(X.T, y-self.sigmoid(np.dot(X, w)))", "def gradient_descent(self, x, y):\n # Initialize weights vector\n self.weights = np.zeros(len(x[0]))\n\n # Storing number of training example in a variable \n n = len(x)\n\n # Initiate variables to keep track of the current and smallest loss recorded\n lowest_loss = sys.float_info.max\n current_loss = sys.float_info.max\n\n # Initiate variables to keep track of step sizes\n norm = sys.float_info.max\n smallest_norm = sys.float_info.max\n\n # Initiate list variable that stores all previous weights\n prev_weights = []\n\n # Initiate list that stores all the errors. \n errors = []\n \n # Variable to keep track of the number of iterations that returns a bigger loss than current loss\n k_loss_iteration = 1\n\n # Learning loop\n for i in range(self.max_iter):\n\n # Append current weights\n prev_weights.append(np.array(self.weights))\n \n # Minimizing Loss Function Error by adjusting weights using Gradient Descent\n self.weights += self.learning_rate * (sum([x[i] * (y[i] - self.logistic_function(self.weights.dot(x[i]))) for i in range(n)]) - 2 * self.l2 * self.weights)\n\n # Compute the error of the Cost Function and store it in a list\n current_loss = self.cost(x,y)\n\n if len(errors) > 1 and current_loss > errors[-1]:\n k_loss_iteration += 1\n else: \n k_loss_iteration = 1\n\n errors.append(current_loss)\n \n # Track smallest loss\n if current_loss < lowest_loss:\n lowest_loss = current_loss\n\n # Compute the L2 Norm of the difference between current weights and previous weights\n norm = np.linalg.norm(self.weights - prev_weights[-1])\n\n # Track smallest step size and set it as error threshold\n if norm < smallest_norm:\n smallest_norm = norm\n\n # If this L2 norm is smaller than the error_threshold it means that it converged, hence we can break. In other words, repeat until the step size is too small\n if self.error_threshold != None and norm < self.error_threshold:\n print(\"Converged after {} iterations!\".format(i))\n break\n\n # stop if error hasn't gone down in k iterations\n if k_loss_iteration >= 10:\n print(k_loss_iteration + \" iterations of loss not decreasing on {}th itertion.\".format(i))\n break\n\n # Log final weights\n print(\"Final norm: \" + str(norm) + \"\\nSmallest step size recorded: \" + str(smallest_norm) + \"\\nFinal error: \" + str(current_loss) + \"\\nLowest error recorded: \" + str(lowest_loss) + \"\\nNumber of epochs: \" + str(len(errors)) + \"\\nFinal weights: \" + str(self.weights))", "def least_squares_GD(y, tx, initial_w, max_iters, gamma, verbose=False):\n return gradient_descent(y, tx, initial_w, max_iters, gamma, compute_mse, \n compute_mse_gradient, verbose=verbose)", "def gradient_descent(f, df, x, sigma=0.5, epsilon=1e-8):\n pass", "def gradient_descent(x, y, w, max_iter, alpha = 0.001):\n \n N = y.shape[0]\n \n J_hist = np.zeros(max_iter)\n\n print(\"\\nGradient descent starts\\n\")\n\n for i in range(0, max_iter):\n \n J = np.sum( (y_hat(x, w) - y) ** 2 ) / (2 * N)\n\n J_hist[i] = J\n \n print(\"Iteration %d, J(w): %f\\n\" % (i, J))\n \n gradient = np.dot(x.T, y_hat(x, w) - y) / N \n \n w = w - alpha * gradient\n\n print(\"Gradient descent finished.\\n\")\n \n return (J_hist, w)", "def run_gradient_descent(data,theta,alpha,num_iters):\n population = data[:,0]\n prices = data[:,1]\n x = ones(shape=(len(population),2)) #add ones for theta0 \n x[:,1] = population\n x = transpose(x)\n error_history = zeros(shape=(num_iters,1))\n \n for i in range(num_iters):\n predictions = theta.dot(x)\n errors_x1 = (predictions - prices) * x[0,:]\n errors_x2 = (predictions - prices) * x[1,:]\n theta[0][0] = theta[0][0] - alpha*(1.0/len(population))*errors_x1.sum()\n theta[0][1] = theta[0][1] - alpha*(1.0/len(population))*errors_x2.sum()\n error_history[i,0] = calculate_cost(theta,data)\n \n return theta, error_history", "def gradient_descent(self, X ,eta, tol,iter):\n gd=[]\n gd_x=[X]\n iteration=0\n # current_pt=X\n first_derivative=sym.diff(self.gdfunc)\n #print(first_derivative)\n x=sym.Symbol('x')\n first_derivative=sym.lambdify(x,first_derivative)\n learn_rate=eta\n \n \n prev_x=X\n new_x=prev_x -(learn_rate*first_derivative(prev_x))\n gd_x.append(new_x)\n #print(\"prev_x = \",prev_x,\" Next x = \",new_x)\n for i in range(iter):\n prev_x=new_x\n #print(prev_x)\n new_x=prev_x -(learn_rate*first_derivative(prev_x))\n gd_x.append(new_x)\n # print(\"x = \",new_x,\"Gradient =\",learn_rate*self.func(prev_x))\n if abs(self.func(new_x)) <= self.func(tol) :\n break\n iteration=iteration+1\n #print(\"Best at GD x= \",new_x)\n gd.append(gd_x)\n gd.append(new_x)\n gd.append(iteration)\n\n return gd", "def projected_gradient_descent(self, x, y):\n x_adv = x.clone().detach().requires_grad_(True).to(x.device)\n targeted = self.y_target is not None\n num_channels = x.shape[1]\n\n if self.random:\n x_adv = random_perturbation(x_adv, self.norm, self.eps)\n\n for i in range(self.num_steps):\n _x_adv = x_adv.clone().detach().requires_grad_(True)\n\n prediction = self.model(_x_adv)\n loss = self.loss_fn(prediction, self.y_target if targeted else y)\n loss.backward()\n\n with torch.no_grad():\n # Force the gradient step to be a fixed size in a certain norm\n if self.norm == 'inf':\n gradients = _x_adv.grad.sign() * self.step_size\n else:\n # Note .view() assumes batched image data as 4D tensor\n gradients = _x_adv.grad * self.step_size / _x_adv.grad.view(\n _x_adv.shape[0], -1) \\\n .norm(self.norm, dim=-1) \\\n .view(-1, num_channels, 1, 1)\n\n if targeted:\n # Targeted: Gradient descent with on the loss of the (incorrect) target label\n # w.r.t. the image data\n x_adv -= gradients\n else:\n # Untargeted: Gradient ascent on the loss of the correct label w.r.t.\n # the model parameters\n x_adv += gradients\n\n # Project back into l_norm ball and correct range\n if self.norm == 'inf':\n # Workaround as PyTorch doesn't have elementwise clip\n x_adv = torch.max(torch.min(x_adv, x + self.eps), x - self.eps)\n else:\n delta = x_adv - x\n\n # Assume x and x_adv are batched tensors where the first dimension is\n # a batch dimension\n mask = delta.view(delta.shape[0], -1).norm(self.norm,\n dim=1) <= self.eps\n\n scaling_factor = delta.view(delta.shape[0], -1).norm(self.norm,\n dim=1)\n scaling_factor[mask] = self.eps\n\n # .view() assumes batched images as a 4D Tensor\n delta *= self.eps / scaling_factor.view(-1, 1, 1, 1)\n\n x_adv = x + delta\n\n x_adv = x_adv.clamp(*self.clamp)\n\n return x_adv.detach()", "def gradient_descent(x, y, theta=[[0], [0]]):\n m = y.size\n j_history = []\n for i in range(ITERATIONS):\n h = x.dot(theta)\n theta = theta - (ALPHA / m) * (x.T.dot(h - y))\n j_history.append(compute_cost(x, y, theta))\n return theta, j_history", "def _gradient_descent(self, X, y, epochs, learning_rate, batch_size):\n num_feats = X.shape[1]\n num_samples = X.shape[0]\n\n y = y.reshape(num_samples, 1)\n W = np.random.rand(num_feats, 1)\n training_loss_epochs = []\n\n for ix in range(epochs):\n shuffled_ix = (np.arange(0, len(X)))\n np.random.shuffle(shuffled_ix)\n X = X[shuffled_ix, :]\n y = y[shuffled_ix, :]\n\n for batch_ix in np.arange(0, X.shape[0], batch_size):\n dW = self._compute_gradient(W, X[batch_ix:batch_ix + batch_size], y[batch_ix:batch_ix + batch_size])\n W -= learning_rate * dW\n\n if ix % 10 == 0:\n y_pred = np.dot(X, W)\n training_loss = self.mse(y, y_pred)\n print('epoch {0} : training loss {1}'.format(ix, training_loss))\n training_loss_epochs.append(training_loss[0])\n\n self.weights = W\n self.training_loss = training_loss_epochs\n return None", "def gradient_descent(\n self,\n coeffs, \n x_values, y_values):\n old_loss = self.old_loss\n mse = self.loss\n\n for i in range(self.steps):\n new_loss = self.loss_mse(coeffs, x_values, y_values)\n mse = np.append(mse, new_loss)\n if abs(new_loss - old_loss) <= self.early_stop:\n print(f\"Early cut off, difference of losses between steps is less that {self.early_stop}.\")\n break\n old_loss = new_loss\n\n coeffs = coeffs - (self.learning_rate)*self.gradient_calculation(coeffs, x_values, y_values)\n\n mse = np.append(mse, self.loss_mse(coeffs, x_values, y_values))\n self.coefficients = coeffs\n self.loss = mse", "def least_squares_GD(y, tx, initial_w, max_iters, gamma):\n # Define parameters to store w and loss\n w = initial_w\n for n_iter in range(max_iters):\n # compute gradient\n grad = compute_gradient(y, tx, w)\n # gradient w by descent update\n if n_iter % (max_iters//10) == 0:\n print(compute_cost(y, tx, w))\n w -= gamma * grad\n\n return w, compute_cost(y, tx, w)", "def least_squares_GD(y, tx, initial_w, max_iters, gamma, debug = False):\n losses, ws = gradient_descent(y, tx, initial_w, max_iters, gamma, loss_f = model_linear.compute_loss, grad_f = model_linear.compute_gradient, debug = debug)\n return get_last_ans(ws, losses)", "def least_squares_GD(y, tx, initial_w, max_iters, gamma):\n w_start = initial_w\n w = w_start\n\n for n_iter in range(max_iters):\n gradient = compute_gradient(y, tx, w)\n loss = compute_loss(y,tx,w)\n w = w - gamma * gradient\n\n return w, loss", "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w = w-gamma*grad\n return w, loss", "def gradient_descent(self, X, theta, Y, m):\n\n Z = X.dot(theta)\n H = Predict.g(Z)\n gradient = np.dot(X.T, (H - Y)) / m\n return self.alpha * gradient", "def gradient_descent(x0,df,rate=0.1,max_iters=1000,min_step=1e-6,max_step=1e5,\n projection=None,trajectory=False,step_history=False,f=None,\n cost_history=False,feedback=False,plot_history=False):\n if feedback is True:\n print(\"gd.gradient_descent():\")\n if f is not None:\n assert callable(f)\n fx0 = f(x0)\n if feedback is True:\n print(f\" initial cost = {fx0:.2e}\")\n if projection is not None:\n assert callable(projection)\n project = True\n else:\n project = False\n if trajectory is True:\n xx = [x0.copy()]\n if step_history is True:\n steps = []\n if cost_history is True:\n assert callable(f)\n fx = [fx0]\n\n x = x0.copy()\n for i in range(max_iters):\n dx = -rate*df(x)\n if project is True:\n x0 = x.copy()\n x = projection(x0+dx)\n dx = x-x0\n else:\n x += dx\n if trajectory is True:\n xx.append(x.copy())\n if cost_history is True:\n fx += [f(x)]\n step_size = np.linalg.norm(dx)\n if step_history is True:\n steps += [step_size]\n if step_size < min_step or step_size > max_step:\n break\n\n results = dict()\n results['output'] = x\n if trajectory is True:\n results['trajectory'] = xx\n if cost_history is True:\n results['cost_history'] = fx\n if step_history is True:\n results['step_history'] = steps\n if plot_history is True:\n assert step_history is True or cost_history is True\n plt.figure()\n if step_history is True:\n plt.semilogy(steps,label='step size')\n if cost_history is True:\n plt.semilogy(fx,label='cost')\n plt.xlabel('iteration number')\n plt.title('Gradient Descent')\n plt.legend()\n results['figure'] = plt\n plt.show(block=False)\n \n if feedback is True:\n if f is not None:\n print(f\" final cost = {f(x):.2e}\")\n \n return results", "def gradient_descent(initial_theta, X, y, niter, alpha, Lambda=0.0):\n theta_list = []\n cost_list = []\n\n theta = initial_theta\n for i in range(0, niter):\n theta -= alpha*gradient(theta, X, y, Lambda)\n theta_list.append(theta)\n cost_list.append(cost(theta, X, y, Lambda))\n\n return theta_list, cost_list", "def run_steep_gradient_descent(data_x, data_y, len_data, alpha, theta):\n n = len_data\n # WE NEED TO transpose data_x into (p+1) *n ,theta is 1*(p+1)\n prod = np.dot(theta, data_x.transpose())\n\n prod -= data_y\n print(\"pro: data_x\", prod.shape, data_x.shape)\n #prod represent the loss of the hypothesis and true label\n sum_grad = np.dot(prod, data_x)\n print(\"总梯度的值:\",sum_grad.shape)\n\n # batch-gradient descent\n theta = theta -(alpha / n) * sum_grad\n return theta", "def gradientdescent(cost_func, theta, args=(), delta_func = 0):\n step = 1\n old_cost = 0\n while True:\n theta_old = theta.copy()\n cost = cost_func(theta, *args)\n delta = delta_func(theta, *args)\n theta = theta - step * delta\n if cost > old_cost and old_cost != 0:\n step = step*0.7\n if np.allclose(theta_old, theta):\n break\n old_cost = cost\n return theta", "def least_squares_GD(y, tx, initial_w, max_iters, gamma, loss_function=mse, gradient=mse_grad):\n w = initial_w\n for iter in range(max_iters):\n # compute gradient\n grad = gradient(y, tx, w)\n # update w\n w = w - gamma * grad\n loss = loss_function(y, tx, w)\n return w, loss", "def least_squares_gradient(y, tx, w): \n e = y - tx.dot(w)\n grad = -tx.T.dot(e) / len(e)\n return grad, e", "def gradient_descent(y, tx, initial_w, max_iters, gamma, compute_loss, compute_grad, verbose=False):\n \n w = initial_w.copy()\n loss = 0\n\n for n_iter in range(max_iters):\n grad = compute_grad(y, tx, w)\n loss = compute_loss(y, tx, w)\n\n w -= gamma * grad\n\n if verbose:\n print(f\"Gradient Descent ({n_iter}/{max_iters - 1}): loss={loss}, w={w}\")\n \n return w, loss", "def learning_by_gradient_descent(y, tx, w, gamma):\n loss = calculate_loss(y,tx,w)\n grad = calculate_gradient(y,tx,w)\n w_new = w - gamma*grad\n #grad is for debugging purpose\n return loss, w_new,grad", "def gradientFunction(theta, X, y):\n y = y[:, 0]\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad /= m\n return grad", "def step_maxL_gradient_descent(y, tx, w, gamma):\n loss=loss_maxL(y, tx, w)\n grad=calculate_maxL_gradient(y,tx,w)\n # update w by gradient\n w=w-gamma*grad\n return w, loss", "def stochastic_gradient_descent(X, y, max_niter=100):\n m, n = X.shape\n w = np.zeros((n, 1))\n\n for i in range(max_niter):\n data_indices = list(range(m))\n for j in range(m):\n alpha = 4.0 / (i + j + 1.0) + 0.01\n rand_idx = int(np.random.uniform(0, len(data_indices)))\n h = sigmoid(np.dot(X[rand_idx, :], w))\n error = h - float(y[rand_idx])\n w = w - alpha * np.outer(X[rand_idx, :], error)\n print('{0} iterations with error {1} weight {2} alpha={3}'.format(i, error, w, alpha))\n del(data_indices[rand_idx])\n classify.w = w\n return w", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n\r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range (num_iterations):\r\n \r\n h = numpy.dot(features, theta)\r\n \r\n theta = theta - alpha / m * numpy.dot((h-values),features)\r\n \r\n cost = compute_cost(features, values, theta)\r\n \r\n cost_history.append(cost)\r\n\r\n return theta, pandas.Series(cost_history) # leave this line for the grader\r", "def train_gradient_descent(self, X, y, learning_rate=0.01, n_iters=100):\r\n # Step 0: Initialize the parameters\r\n n_samples, n_features = X.shape\r\n self.weights = np.zeros(shape=(n_features,1))\r\n self.bias = 0\r\n costs = []\r\n\r\n for i in range(n_iters):\r\n # Step 1: Compute a linear combination of the input features and weights\r\n y_predict = np.dot(X, self.weights) + self.bias\r\n\r\n # Step 2: Compute cost over training set\r\n cost = (1 / n_samples) * np.sum((y_predict - y)**2)\r\n costs.append(cost)\r\n\r\n if i % 100 == 0:\r\n print(f\"Cost at iteration {i}: {cost}\")\r\n\r\n # Step 3: Compute the gradients\r\n dJ_dw = (2 / n_samples) * np.dot(X.T, (y_predict - y))\r\n dJ_db = (2 / n_samples) * np.sum((y_predict - y)) \r\n \r\n # Step 4: Update the parameters\r\n self.weights = self.weights - learning_rate * dJ_dw\r\n self.bias = self.bias - learning_rate * dJ_db\r\n\r\n return self.weights, self.bias, costs", "def least_squares_GD(y, tx, initial_w, max_iters, gamma):\r\n w_list = [initial_w]\r\n loss_list = []\r\n w = initial_w\r\n for n_iter in range(max_iters):\r\n gradient = compute_gradient(y,tx,w)\r\n loss = compute_loss_MSE(y,tx,w)\r\n w = w - gamma * gradient\r\n w_list.append(w)\r\n loss_list.append(loss)\r\n return w_list[-1], loss_list[-1]", "def logistic_regression_gradient_descent(y, tx, initial_w, max_iters, gamma):\n\tw = initial_w\n\n\tfor iter in range(max_iters):\n\t\tw = learning_by_gradient_descent(y, tx, w, gamma)\n\n\treturn w", "def gradient_descent(X, y, theta, learning_rate, num_iters, lambda_reg):\n thetas = [theta]\n cost = np.zeros(num_iters)\n\n J = mean_cross_entropy_costs(X, y, lambda_reg)\n cost[0] = J(thetas[0])\n for i in range(1, num_iters):\n thetas.append(compute_new_theta(X, y, thetas[i - 1], learning_rate, lambda_reg))\n cost[i] = J(thetas[i])\n return cost, thetas", "def minfunc(beta, yvec, xmat ):\n return yvec - exp(dot(xmat, beta))", "def gradient_descent(X, Y, max_iter=1000, eta=0.1, mu=0.01):\n Y_onehot = onehot_encoder.fit_transform(Y.reshape(-1,1))\n W = np.zeros((X.shape[1], Y_onehot.shape[1]))\n step = 0\n step_lst = []\n loss_lst = []\n W_lst = []\n\n while step < max_iter:\n step += 1\n W -= eta * gradient(X, Y_onehot, W, mu)\n step_lst.append(step)\n W_lst.append(W)\n loss_lst.append(loss(X, Y_onehot, W))\n\n df = pd.DataFrame({\n 'step': step_lst,\n 'loss': loss_lst\n })\n return df, W", "def learning_by_gradient_descent(y, tx, w, gamma):\n\tgrad = calculate_gradient(y, tx, w)\n\n\tw = w - gamma * grad\n\treturn w", "def gradientDescent(self,X, y, theta): \n # number of instances\n m = len(y)\n J_history = np.zeros((self.NUM_ITERS,1))\n for i in range(self.NUM_ITERS):\n h = self.sigmoid(X@theta)\n grad = 1 / m * X.T @ (h - y)\n theta = theta - self.ALPHA * grad \n J_history[i] = self.costFunction(theta, X, y)\n \n \n return theta, J_history", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma, batch_size=10, verbose=False):\n return stochastic_gradient_descent(y, tx, initial_w, max_iters, gamma, compute_mse, \n compute_mse_gradient, batch_size=batch_size, verbose=verbose)", "def gradient_descent(X, Y, epsilon=1e-6, l=1, step_size=1e-4, max_steps=1000):\n beta = np.zeros(X.shape[1])\n for s in range(max_steps):\n # TODO: Implement iterations.\n pass\n return beta", "def approx_grad(theta, X, y):\n grad_a = np.array([(cost(theta + e, X, y) - cost(theta - e, X, y)) / (2 * 1e-5)\n for e in np.identity(len(theta)) * 1e-5])\n return grad_a", "def compute_gradient(theta, X, y):\n m = X.shape[0]\n grad_theta = np.dot(X.transpose(), (np.dot(X, theta) - y)) / m\n #print theta, grad_theta, objective_function(theta, X, y)\n return grad_theta", "def gradient_descent(f, intial_guess, step_size = 0.01, max_iter = 10000, tol = 1e-12):\n\n x = np.array(intial_guess)\n for i in range(max_iter):\n x_vector = ad.create_vector('x', x)\n fn_at_x = f(x_vector)\n gradient = fn_at_x.getGradient(['x{}'.format(i) for i in range(1, len(x) + 1)])\n if np.sqrt(np.abs(gradient).sum()) < tol:\n break\n x = x - step_size * gradient\n return (x, i + 1)", "def gradient_descent(X, Y, iterations, alpha, l = 0):\n \n # initialize B0, B1, ..., Bp\n betas = np.array([0.0]*(len(X[0])+1))\n \n # initialize list of cost vs iterations; should see a gradual descent\n costs = np.array([0.0]*iterations)\n \n # number of observations\n m = len(X)\n \n for i in range(iterations):\n sumterms = 1.0/m * ([estimation(xvec,betas) for xvec in X] - Y)\n errors = np.array([0.0]*len(betas))\n errors[0] = sum(sumterms) # error term for B0 has no multiplier\n for k in range(1,len(betas)):\n errors[k] = np.dot(sumterms, [row[k-1] for row in X]) + l/m*betas[k]\n \n betas = betas - alpha * errors\n costs[i] = cost(X, Y, betas, l)\n \n return betas, costs", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n \r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range(num_iterations):\r\n # your code here\r\n cost = compute_cost(features, values, theta)/(2.0*m)\r\n cost_history.append([cost])\r\n \r\n error = features.dot(theta) - values\r\n error = np.reshape(error,(error.shape[0], 1))\r\n errorWeighted = features*error\r\n errorSum = (np.sum(errorWeighted,0))/(m*1.0)\r\n theta = theta - alpha*errorSum \r\n \r\n return theta, pandas.Series(cost_history)", "def gradient_descent_step(self, x, y, learning_rate):\n # compute derivative of loss wrt Z\n dZ = self.derivative_loss(y, self.predict(x))\n dW = np.dot(dZ, x)\n # subtract average derivative from weights\n self.w -= learning_rate * 1.0/dW.shape[0] * dW\n if self.fit_b:\n self.b -= learning_rate * (1.0/x.shape[0] * np.sum(dZ))", "def calculate_gradient(y, tx, w): \n return tx.T@(sigmoid(tx@w)-y)", "def costFunction(self, x, y ):\n self.yEst = self.forward_propagate(x)\n sqErrors = ( self.yEst - y ) ** 2\n J = sqErrors.sum() / 2\n return J", "def exp_grad(self, xs, *args, **kwargs):\n raise NotImplementedError", "def gradient_descent(features, values, theta, alpha, num_iterations):\n m = len(values)\n cost_history = []\n\n for i in range(num_iterations):\n predicted_values = np.dot(features, theta)\n delta = alpha / m * np.dot((predicted_values - values), features)\n theta = theta - delta\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n return theta, pandas.Series(cost_history)", "def run_gradient_descent_iteration(X, Y, theta, alpha, lambda_factor, temp_parameter):\n delta = sparse.coo_matrix(theta.shape).toarray()\n\n h = compute_probabilities(X, theta, temp_parameter)\n\n for j in range(delta.shape[0]):\n y = Y\n y = np.where(y != j, 0, 1)\n p = y - h[j]\n\n x = X.T * p\n x = x.T\n x = x.sum(axis=0)\n\n grad = -x / (temp_parameter * X.shape[0]) + lambda_factor * theta[j]\n\n delta[j] += grad\n\n theta -= alpha * delta\n\n return theta", "def linearReg(x,y):\n X=np.array(x).reshape(-1,1)\n Y=np.array(y).reshape(-1,1)\n x_shape = X.shape\n num_var = x_shape[1] \n yintercept = 0\n slope = 0\n progress = []\n #intialize the parameter\n weight_matrix = np.random.normal(-1,1,(num_var,1))\n yintercept = np.random.rand(1)\n #cost minmization\n for i in range(200):\n dcostdm = np.sum(np.multiply(((np.matmul(X,weight_matrix)+ yintercept)-Y),X))*2/x_shape[0] #w.r.t to the weight\n dcostdc = np.sum(((np.matmul(X,weight_matrix)+yintercept)-Y))*2/x_shape[0] #partial derivative of cost w.r.t the intercept\n weight_matrix -= 0.1*dcostdm \n #updating the weights with the calculated gradients\n yintercept -= 0.1*dcostdc #updating the weights with the calculated gradients\n progress.append(np.array((weight_matrix,yintercept)))\n slope = weight_matrix\n return (slope[-1],yintercept)", "def fit(self, x, y):\n def initiate_theta(dim):\n self.theta = np.zeros(dim)\n # print('self.theta initiated is {}'.format(self.theta))\n \n def implement_sigmoid(x):\n if self.theta is None:\n initiate_theta(x.shape[1])\n z = np.matmul(np.transpose(self.theta), np.transpose(x))\n return 1/(np.ones(x.shape[0]) + np.exp(-z))\n \n def implement_partial_loss(x, y):\n return -np.matmul(np.transpose(y - implement_sigmoid(x)), x)/x.shape[0]\n \n def implement_transposed_hess(x):\n sigmoid_hadamard = implement_sigmoid(x) * (np.ones(x.shape[0]) - implement_sigmoid(x))\n hess2 = np.diag(sigmoid_hadamard)\n hess = np.matmul(hess2,x)\n hess = np.matmul(np.transpose(x),hess)/x.shape[0]\n hess_inverse = np.linalg.inv(hess)\n return hess_inverse\n \n def train(x, y):\n count = 0\n if self.theta is None:\n initiate_theta(x.shape[1])\n while count < self.max_iter:\n if self.verbose:\n loss_y1 = np.matmul(np.transpose(y), np.log(implement_sigmoid(x)))\n loss_y0 = np.matmul(np.transpose(np.ones(x.shape[0]) - y), np.log(np.ones(x.shape[0]) - implement_sigmoid(x)))\n loss = -(loss_y1 + loss_y0 )/x.shape[0]\n print('Average empirical loss for step {} is {}'.format(count, loss))\n delta = np.matmul(implement_transposed_hess(x), implement_partial_loss(x, y))\n new_theta = self.theta - delta * self.step_size\n delta_theta = np.linalg.norm(new_theta - self.theta)\n # print('delta is {}'.format(delta_theta))\n if delta_theta < self.eps:\n return new_theta\n else:\n self.theta = new_theta\n count += 1\n return self.theta\n \n return train(x, y)", "def gradient_descent(features, values, theta, alpha, num_iterations):\n \n # number of points\n npoints = len(values)\n \n # intialize cost history\n cost_history = []\n \n # num_interations iterations\n for iiter in range(num_iterations):\n \n # compute and store cost\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n \n # update values of theta\n values_predicted = np.dot(features, theta)\n theta = theta + (alpha/npoints)*(np.dot(values - values_predicted,features))\n \n return theta, pandas.Series(cost_history)", "def fit(self, X, y):\n self.x_values = X\n self.y_values = y\n self.gradient_descent(self.coefficients, X, y)", "def gradientFunctionReg(theta, X, y, Lambda): \n y = np.squeeze(y)\n m = y.shape # number of training samples\n grad = X.T.dot(sigmoid(theta.dot(X.T))-1*y)\n grad[1:] = grad[1:] + Lambda*theta[1:]\n grad /= m\n\n return grad", "def GradientDescent(X, Y, alpha, iterations):\n\n\tn = X.shape[0]\n\tbeta = np.zeros((X.shape[1],1))\n\n\tfor i in range(1,iterations):\n\t\tbeta = beta - alpha*np.dot(np.transpose(X), np.dot(X, beta) - Y)/float(n)\n\t\t# risk = ((np.dot(X, beta) - Y)**2)/(2*float(n))\n\n\treturn beta", "def loss_and_grad(self, X, y):\n\n # Initialize the loss and gradient to zero.\n loss = 0.0\n grad = np.zeros_like(self.W)\n grad_tmp = np.zeros_like(self.W)\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and the gradient. Store the gradient\n # as the variable grad.\n # ================================================================ #\n \n exp_a = np.zeros((num_classes,num_train))\n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n \n #if i==0:\n grada = np.zeros(X.shape[1])\n \n for j in range(num_classes):\n if j != y[i]:\n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) \n else: \n grad_tmp[j,:] = X[i,:].T * (exp_a[j,i] / np.sum(exp_a[:,i])) - X[i,:].T \n\n grad += grad_tmp\n loss += Loss \n \n pass\n\n\n loss /= num_train\n grad /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def calculate_gradient(y, tx, w):\n\n\tret = tx.T.dot(sigmoid(np.dot(tx, w)) - y)\n\treturn ret", "def compute_gradient(self, X, y, weights):\n sigmoid = self.sigmoid(np.dot(X, weights))\n return np.dot(X.T, y - sigmoid)", "def least_squares_GD(y, tx, initial_w, max_iters, gamma):\n w = initial_w.copy()\n ws = [w]\n loss = compute_loss_LS(y, tx, w)\n losses = [loss]\n for n_iter in range(max_iters):\n gradient = compute_gradient_LS(y, tx, w)\n w -= gamma * gradient\n loss = compute_loss_LS(y, tx, w)\n ws.append(w)\n losses.append(loss)\n# print(\"Gradient Descent({bi}/{ti}): loss={l}, w0={w0}, w1={w1}\".format(\n# bi=n_iter, ti=max_iters - 1, l=loss, w0=w[0], w1=w[1]))\n\n return losses[-1], ws[-1]", "def least_squares_GD(y, tx, initial_w=None, max_iters=50, gamma=0.1):\n # Define parameters to store w and loss\n if np.all(initial_w == None): initial_w = np.zeros(tx.shape[1]) \n ws = [initial_w] # Initial guess w0 generated randomly\n losses = []\n w = ws[0]\n for n_iter in range(max_iters):\n # compute loss, gradient\n grad, err = least_squares_gradient(y, tx, w)\n loss = compute_mse(y,tx,w)\n # gradient w by descent update\n w = w - gamma * grad\n # store w and loss\n ws.append(w)\n losses.append(loss)\n #if (n_iter % int(max_iters/5)) == 0:\n #print(\"Gradient Descent({bi}/{ti}): loss={l}\".format(bi=n_iter, ti=max_iters,l=loss))\n return w,loss", "def fit(self, x, y):\n # *** START CODE HERE ***\n num_examples = x.shape[0]\n num_features = x.shape[1]\n iteration = 1\n if self.theta == None:\n self.theta = np.zeros((num_features,))\n while iteration <= self.max_iter:\n h_theta = np.dot(x, self.theta)\n g_theta = self.sigmoid(h_theta)\n J_cost = -np.mean(y*np.log(g_theta) + (1 - y)*np.log(1 - g_theta))\n H = 1/num_examples*(np.dot(np.transpose(g_theta*(1-g_theta))*np.transpose(x), x))\n J_prime = - 1/num_examples*np.dot(np.transpose(y - g_theta), x)\n d_theta = - np.linalg.solve(H, J_prime)\n self.theta += d_theta\n if np.linalg.norm(d_theta, 1) < self.eps:\n break\n if self.verbose:\n print(\"Loss value: \", J_cost)\n iteration += 1\n # *** END CODE HERE ***", "def gradientDescent(f, df, x, niter=10):\n\n points = []\n\n for i in xrange(niter):\n point = -dfx\n slope = np.dot(point,-point)\n \n #calculate a\n a = backtracking(f,slope,x,point)\n \n\n #update the search point\n x_k = x + a*p\n points.append(x_k)\n x = x_k\n\n return points", "def gradient_descent(x_data, starting_b, starting_w, learning_rate, num_iterations):\n\n b = starting_b\n w = starting_w\n\n for i in range(num_iterations):\n b, w = step_gradient(b, w, x_data, learning_rate)\n b_history.append(b) # stores bias approximations to plot\n w_history.append(w) # stores weight approximations to plot\n err = error(b, w, x_data)\n if err <= .6: # if the error is acceptable exit iterations loop\n print('error = % f' % err)\n break\n return [b, w]", "def logit_cost_grad(self, theta, X, y):\n\n grad = np.zeros(len(theta))\n\n ### YOUR CODE HERE\n sig = utils.sigmoid(theta)\n # sig = np.subtract(sig, y)\n sig = sig - y\n grad = np.dot(X.T, sig) + 2 * self.params['lamb'] * self.regularizer[1](self.weights)\n ### END YOUR CODE\n\n return grad", "def costFunction(self,theta, X, y): \n m = len(y)\n h = self.sigmoid(X@theta)\n J = 1 / m * (- y.T @ self.log(h) - (1-y).T @ self.log(1-h)) \n # grad = 1/ m * X.T @ (h - y)\n return J", "def gradient_descent(data_x, data_y, parameters, learn_rate, nb_iterations):\n\n # Cost history\n cost_tracking = np.zeros(nb_iterations)\n\n for _i in range(nb_iterations):\n parameters -= learn_rate * gradient(data_x, data_y, parameters)\n # recording the cost for each iteration\n cost_tracking[_i] = cost_function(data_x, data_y, parameters)\n\n return parameters, cost_tracking", "def gradient(data_x, data_y, parameters):\n return data_x.T @ (data_x @ parameters - data_y) / data_x.shape[0]", "def compute_gradient(X, t, w): # TODO: try to change to square loss since it's hessian is easier to obtain\n # TODO : print to console the max gradient in every run\n A = np.dot(X, w)\n m = t.shape[0]\n C = -1 * t * (1 / (1 + np.exp(A * t)))\n return (1 / m) * np.dot(X.T, C)", "def batchGradientDescent(x,y,theta,alpha):\n m,n = np.shape(x)\n xTran = x.transpose()\n convergence = 0.000000001\n lastCost = 0\n cost = -1 \n recurseCount = 0\n while abs(lastCost - cost) > convergence: # rcurse until converge\n lastCost = cost\n hypothesis = np.dot(x,theta)\n loss = hypothesis - y\n cost = np.sum(loss**2)/(2*m)\n gradient = np.dot(xTran,loss)/m\n theta = theta - alpha*gradient\n recurseCount += 1\n return recurseCount,theta", "def coordinate_descent(X, y, lam, theta=None, maxit=10000, eta=1e-8):\n \n n, m = X.shape\n \n # initialize the coefficients\n if theta is None: theta = np.random.randn(m, 1)\n \n # compute squared columns\n v = np.diag(np.dot(X.T, X))\n \n # coordinate optimization\n i = 0\n chg = 1\n theta_old = np.empty((m, 1))\n Xtheta = np.dot(X, theta)\n while i < maxit and chg > eta:\n i += 1\n theta_old = theta.copy()\n for j in range(m): \n Xtheta -= theta[j]*np.atleast_2d(X[:,j]).T\n alpha = np.dot(X[:,j].T, y - Xtheta)\n if abs(alpha) > lam: \n theta[j] = np.sign(alpha) * (abs(alpha) - lam) / v[j]\n Xtheta += theta[j]*np.atleast_2d(X[:,j]).T\n else:\n theta[j] = 0\n chg = np.sum((theta - theta_old)**2)/np.sum(theta_old**2)\n \n return theta, i", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma, debug = False):\n losses, ws = stochastic_gradient_descent(y, tx, initial_w, 1, max_iters, gamma, loss_f = model_linear.compute_loss, grad_f = model_linear.compute_gradient, debug = debug)\n return get_last_ans(ws, losses)", "def compute_gradient(self): # TODO: try to change to square loss since it's hessian is easier to obtain\n A = np.dot(self.X, self.w)\n m = self.t.shape[0]\n C = -1 * self.t * (1 / (1 + np.exp(A * self.t)))\n return (1 / m) * np.dot(self.X.T, C)", "def batchGD(self, x, y, epochs):\n print(\"Training using batch gradient descent\")\n epoch = 0\n #output training progress ten times in run\n outputChunk = int ( epochs / 10 )\n\n while epoch <= epochs:\n\n #output progress? \n if epoch % outputChunk is 0:\n J = self.costFunction(x,y)\n print(\"Epoch=\", epoch, \"J=\", J)\n\n #get analytic gradients \n partial_J_w_ih, partial_J_w_ho, partial_J_b_h, partial_J_b_o = \\\n self.deriv_costFunction( x, y )\n #take a GD step\n #To-do - implement variable learning rate\n self.w_ih -= partial_J_w_ih\n self.w_ho -= partial_J_w_ho\n self.b_h -= partial_J_b_h\n self.b_o -= partial_J_b_o\n \n epoch += 1", "def grad(self, A, y, x):\n z = y * A.dot(x) # decision value for each observation\n grad_x = -1*A[z < 1].T.dot(y[z < 1])\n # Gradient normalized by the num obs\n return grad_x / y.size", "def batch_grad_descent(X, y, alpha=0.1, num_iter=1000, check_gradient=False):\n num_instances, num_features = X.shape[0], X.shape[1]\n theta_hist = np.zeros((num_iter+1, num_features)) #Initialize theta_hist\n loss_hist = np.zeros(num_iter+1) #initialize loss_hist\n theta = np.ones(num_features) #initialize theta\n\n count = 0\n while count < num_iter+1:\n if check_gradient:\n assert grad_checker(X,y,theta)\n\n grad = compute_square_loss_gradient(X,y,theta)\n theta -= alpha*grad\n theta_hist[count] = theta\n loss_hist[count] = compute_square_loss(X,y,theta)\n count += 1\n \n return theta_hist, loss_hist", "def steepest(Xf, yf, gamma=0.001, iterations=1000): # DONT WORK, be happy\n K = len(Xf[0,:])\n beta = np.random.randn(K, 1)\n for i in range(iterations):\n t = Xf@beta\n sigmoid = expit(t)\n #print(sigmoid)\n #siggy = 1./(1 + np.exp(t))\n #loss = yf - sigmoid\n #print(\"iteration %g, cost: %f\" % (i, loss))\n grad = 2/K*Xf.T@(sigmoid - yf)\n beta = beta - gamma*grad\n #cost = -np.sum(np.transpose(yf)@np.log(1 + siggy) - np.transpose(1-yf)@np.log(siggy))\n #print(cost)\n #print(i)\n #break\n return beta", "def stochastic_grad_descent(X, y, alpha=0.1, lambda_reg=1, num_iter=1000, checkin=100):\n num_instances, num_features = X.shape[0], X.shape[1]\n theta = np.ones(num_features) #Initialize theta\n theta_hist = np.zeros((num_iter, num_instances, num_features)) #Initialize theta_hist\n loss_hist = np.zeros((num_iter, num_instances)) #Initialize loss_hist\n epoch = 1\n while epoch < num_iter:\n instance = 1\n while instance < num_instances:\n if alpha == \"1/sqrt(t)\":\n alpha_0 = .01/np.sqrt(instance)\n elif alpha == \"1/t\":\n alpha_0 = .01/float(instance)\n else:\n alpha_0 = alpha\n index = np.random.randint(num_instances)\n vec = np.reshape(X[index,:].T,(1,49))\n grad = compute_regularized_square_loss_gradient(vec,y[index],theta,lambda_reg)\n theta = theta - alpha_0*grad\n theta_hist[epoch][instance] = theta\n loss_hist[epoch][instance] = compute_square_loss(vec,y[index],theta)\n instance += 1\n\n if type(checkin) is int and epoch%checkin==0:\n print(\"completed training epoch {}...\".format(epoch))\n \n epoch += 1\n\n return theta_hist, loss_hist", "def compute_gradient_lasso(y, tx, w, lambda_):\n e = y - tx.dot(w)\n subgrad = lambda_ * np.sign(w)\n\n return -tx.T.dot(e)/len(e) + subgrad", "def gradient(theta, X, y, learning_rate):\n m = len(y)\n\n theta = theta.reshape((-1,1))\n grad = np.zeros(theta.shape)\n h = sigmoid(np.dot(X, theta)) \n \n grad = np.dot((h-y).T, X)/m\n grad = grad.T\n grad[1:] += (learning_rate/m)*theta[1:]\n return grad", "def costFunction(theta,X,y):\n m = X.shape[0]\n J = 0\n h = sigmoid (np.dot(X,theta))\n \n J = (1/m)* ((-np.dot(y.T,(np.log(h)))) - np.dot((1 - y).T,(np.log(1-h))))\n \n #grad = (1/m) * np.dot(X.T,(h-y))\n grad = (1/m) * np.dot((h.T - y), X).T\n \n return J, grad", "def stochasticGradientDescent(x,y,theta,alpha):\n m,n = np.shape(x)\n convergence = 0.000000001\n lastCost = 0\n cost = -1 \n recurseCount = 0\n while abs(lastCost - cost) > convergence: # rcurse until converge\n lastCost = cost\n hypothesis = np.dot(x,theta) \n for i in range(m):\n # alpha = 4.0 / (1.0 + i) + 0.01 \n loss = hypothesis[i] - y[i]\n # gradient = np.dot(x[i],loss)\n gradient = x[i,:].transpose() * loss \n theta = theta - alpha * gradient\n cost = np.sum((hypothesis-y)**2)/(2*m)\n recurseCount += 1\n return recurseCount,theta", "def minimize(func, grad_func, x, y, theta_0, alpha_0=0.01, max_it=100):\n data = list(zip(x, y))\n theta, alpha = theta_0, alpha_0\n min_theta, min_value, it = None, float(\"inf\"), 0\n\n while it < max_it:\n\n value = sum(func(x_i, y_i, theta) for x_i, y_i in data)\n\n if value < min_value:\n min_theta = theta\n min_value = value\n it = 0\n alpha = alpha_0\n else:\n it += 1\n alpha *= 0.9\n\n for (x_i, y_i) in in_random_order(data):\n grad_i = grad_func(x_i, y_i, theta)\n theta = vector_subtract(theta, scalar_multiply(alpha, grad_i))\n\n return min_theta", "def calculate_gradient(y, tx, w):\n return tx.T.dot(sigmoid(tx.dot(w))-np.reshape(y,(len(y),1)))", "def sdot_asgd(y, nu, C, x_sample, W = None):\n # if W == None: W = np.zeros(y.shape[0]) else: assert(W.shape[0] == y.shape[0])\n W = np.zeros(y.shape[0]) # (500, 0)\n W_tmp = np.copy(W)\n #source_density = dp.get_density_by_name(name_source) # Density of source distribution\n h_save = np.empty_like(0)\n # Print iteration status\n niter = np.shape(x_sample)[0]\n for t in range(niter):\n if (t+1) % 10000 == 0:\n print(\"Iteration: {}\".format(t+1))\n \n # Sample from source distribution\n #x = source_density.sample_from(1).numpy()\n x = x_sample[t]\n\n # Gradient Step\n r = np.sum(np.square(x-y) , axis=1) - W_tmp # |x-y|^2 - W_tmp (900, )\n indx_min = np.argmin(r)\n grad = np.copy(nu)\n grad[indx_min] = grad[indx_min] - 1 # (900, )\n\n # Evaluate empirical Reward\n r2 = np.sum(np.square(x-y) , axis=1) - W # |x-y|^2 - W_tmp (900, )\n h = np.min(r2) + np.dot(W,nu) \n h_save = np.hstack((h_save,h))\n\n # Gradient Ascent \n W_tmp = W_tmp + C/np.sqrt(t+1) *grad # t+1 because it starts from 0\n W = t/(t+1) *W + 1/(t+1)*W_tmp # t+1 because it starts from 0\n # W = W / np.max(np.abs(W)) \n\n return W, h_save", "def gradient(cls, x):\n y = Sigmoid.apply(x)\n return np.multiply(y, 1 - y)", "def learning_by_penalized_gradient(y, tx, w, gamma, lambda_):\n\n #on test avec Newton\n\n loss,gradient,_ = penalized_logistic_regression(y,tx,w,lambda_)\n\n w = w - gamma*gradient\n return loss, w,gradient", "def fast_loss_and_grad(self, X, y):\n loss = 0.0\n grad = np.zeros(self.W.shape) # initialize the gradient as zero\n \n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the softmax loss and gradient WITHOUT any for loops.\n # ================================================================ #\n \n num_train = X.shape[0]\n num_classes = self.W.shape[0]\n \n# # vectorized loss calculation #\n class_scores_matrix = np.dot(self.W,X.T) # calculating class scores matrix (C x m): rows are class scores transposes\n class_scores_matrix -= np.max(class_scores_matrix) # considering the possible issue for numerical instability and account for it\n exp_a = np.exp(class_scores_matrix) # calculating the exponents\n \n# y_exp = np.array(exp_a[y, np.arange(0, class_scores_matrix.shape[1])])\n# #print(exp_a[:,:3])\n# #print(y[:3])\n# #print(y_exp[:3])\n \n# tt = np.sum(exp_a,axis=0)\n# tt2 = np.divide(tt,y_exp)\n# print(num_train)\n# tt3 = np.power(tt2,1/num_train)\n# loss = np.log(np.prod(tt3))\n \n \n \n \n (C, D) = self.W.shape\n N = X.shape[0]\n\n scores = np.dot(self.W, X.T)\n scores -= np.max(scores) # shift by log C to avoid numerical instability\n\n y_mat = np.zeros(shape = (C, N))\n y_mat[y, range(N)] = 1\n\n # matrix of all zeros except for a single wx + log C value in each column that corresponds to the\n # quantity we need to subtract from each row of scores\n correct_wx = np.multiply(y_mat, scores)\n\n # create a single row of the correct wx_y + log C values for each data point\n sums_wy = np.sum(correct_wx, axis=0) # sum over each column\n\n exp_scores = np.exp(scores)\n sums_exp = np.sum(exp_scores, axis=0) # sum over each column\n result = np.log(sums_exp)\n\n result -= sums_wy\n\n loss = np.sum(result)\n loss /= num_train\n \n \n # vectorized gradient calculation #\n exp_a_sum = np.sum(exp_a,axis=0)\n\n y_mat_corres = np.zeros(shape = (num_classes, num_train))\n y_mat_corres[y, range(num_train)] = 1\n sum_exp_scores = np.sum(exp_a, axis=0) \n sum_exp_scores = 1.0 / exp_a_sum # division by sum over columns\n exp_a *= sum_exp_scores\n grad = np.dot(exp_a, X)\n grad -= np.dot(y_mat_corres, X)\n grad /= num_train\n \n\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss, grad", "def fmin_gradient_descent(f, x0, fprime=None, learn_rate=1e-2, momentum=0,\n weight_decay=0, learn_rate_schedule=None, momentum_schedule=None,\n max_grad_norm=0, learn_rate_drop_iters=0, decrease_type='linear',\n adagrad_start_iter=0, max_iters=1000, iprint=1, f_info=None, i_exe=0,\n check_points=None, f_exe=None, verbose=True):\n f_and_fprime = f_and_fprime_decorator(f, fprime, weight_decay)\n\n opt_schedule = OptimizationSchedule(learn_rate, momentum,\n learn_rate_schedule=learn_rate_schedule,\n momentum_schedule=momentum_schedule,\n learn_rate_drop_iters=learn_rate_drop_iters,\n adagrad_start_iter=adagrad_start_iter, decrease_type=decrease_type)\n\n x = x0\n x_inc = x * 0\n x_adagrad_history = x * 0\n\n max_grad_norm = float(max_grad_norm)\n\n t_start = time.time()\n y, x_grad = f_and_fprime(x)\n grad_scale = np.linalg.norm(x_grad, ord=2)\n\n if f_exe is not None and (i_exe > 0 or (check_points is not None and 0 in check_points)):\n f_exe(0, x)\n\n if verbose:\n s = 'iter %5d, f=%.8f, |change|_max=%10s, |grad|=%.8f' % (0, y, 'N/A', grad_scale)\n if f_info is not None:\n s += ', ' + f_info(x)\n s += ', time %.2f' % (time.time() - t_start)\n print s\n\n t_start = time.time()\n for i in range(max_iters):\n i_iter = i + 1\n learn_rate, momentum = opt_schedule.get_learn_rate_and_momentum(i_iter)\n\n if adagrad_start_iter > 0 and i_iter >= adagrad_start_iter:\n if i_iter == adagrad_start_iter:\n lr_scale = np.abs(x_grad).mean()\n x_adagrad_history += x_grad**2\n learn_rate = learn_rate * lr_scale / (np.sqrt(x_adagrad_history) + _DIVISION_EPS)\n\n if max_grad_norm > 0 and grad_scale > max_grad_norm:\n x_grad *= max_grad_norm / grad_scale\n\n x_inc = momentum * x_inc - learn_rate * x_grad\n x += x_inc\n\n \"\"\"\n if i == 546:\n import ipdb\n ipdb.set_trace()\n \"\"\"\n\n y, x_grad = f_and_fprime(x)\n grad_scale = np.linalg.norm(x_grad, ord=2)\n\n if iprint > 0 and i_iter % iprint == 0:\n s = 'iter %5d, f=%.8f, |change|_max=%.8f, |grad|=%.8f' % (i_iter, y, np.abs(x_inc).max(), grad_scale)\n if f_info is not None:\n s += ', ' + f_info(x)\n s += ', time %.2f' % (time.time() - t_start)\n if verbose:\n print s\n t_start = time.time()\n\n if f_exe is not None:\n if check_points is not None:\n if (i+1) in check_points:\n f_exe(i+1, x)\n elif i_exe > 0 and (i+1) % i_exe == 0:\n f_exe(i+1, x)\n\n return x", "def gradient_step(self):\n n = 10 #Granularity of line search\n grad = self.gradient()\n W = project(self.W[-1] + grad)\n A = np.linspace(0., self.alpha, n+2)[1:-1]\n Objective = map(self, [(1. - a)*self.W[-1] + a*W for a in A])\n a = A[np.argmax(Objective)]\n W = (1. - a)*self.W[-1] + a*W\n obj = np.max(Objective)\n self.objective.append(obj)\n self.W.append(W)\n self.iterations += 1", "def gradientFunctionReg(theta, X, y, Lambda):\n m = len(y) # number of training examples\n grad = np.zeros(theta.shape[0])\n theta = np.transpose(theta)\n sum_1 = 0\n X = X.values\n y = y.values\n #calcuate the theta_0 \n# ====================== YOUR CODE HERE ======================\n# Instructions: Compute the gradient of a particular choice of theta.\n# Compute the partial derivatives and set grad to the partial\n# derivatives of the cost w.r.t. each parameter in theta\n for i in range(theta.shape[0]):\n if i == 0:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i]\n else:\n for j in range(m):\n sum_1 += (sigmoid(np.dot(X[j],theta)) - y[j]) * X[j,i] + Lambda*theta[i]\n grad[i] = sum_1/m\n sum_1 = 0\n\n# =============================================================\n\n return grad", "def least_squares_SGD(y, tx, initial_w, max_iters, gamma):\n # Define parameters to store w and loss\n w = initial_w\n loss = compute_gradient(y, tx, initial_w)\n for it, (yb, txb) in enumerate(random_batches(y, tx, max_iters)):\n # compute 1 SGD and the loss\n grad = compute_gradient(np.array([yb]), txb[np.newaxis, :], w)\n # update w\n w -= gamma * grad\n if it % (max_iters//10) == 0:\n print(compute_cost(y, tx, w))\n return w, compute_cost(y, tx, w)", "def gradient_ascent(f, df, theta_init, step_size, max_iter):\n\n fs = []\n xs = []\n thetas = theta_init\n for i in range(max_iter): #for each data example\n fs.append(f(thetas))\n\n temp = step_size*df(thetas)\n thetas = step_size*df(thetas) #modify that feature by using the derivative of log likelihood\n xs.append(thetas.flatten())\n if i % 10 == 0:\n print(i, thetas)\n\n return thetas, fs, xs", "def step_maxL_penalized_gradient_descent(y, tx, w, gamma, lambda_):\n grad=calculate_maxL_gradient(y,tx,w)+ 2*lambda_*w \n loss=loss_maxL(y, tx, w)+ lambda_* np.linalg.norm(w)**2\n # update w by gradient\n w=w-gamma*grad\n return w, loss", "def gradientDescent(X, y, theta, alpha, num_iters):\n\n # Initialize some useful values\n J_history = []\n m = y.size # number of training examples\n\n for i in range(num_iters):\n # ====================== YOUR CODE HERE ======================\n # Instructions: Perform a single gradient step on the parameter vector\n # theta.\n #\n # Hint: While debugging, it can be useful to print out the values\n # of the cost function (computeCost) and gradient here.\n #\n # Calculate the gradient step according to the equation for theta1:\n g_step1 = (alpha / m * np.sum( (np.dot(X,theta) - y) * X[:,1]) )\n # Gradient step for theta knot:\n g_step0 = (alpha / m * np.sum( (np.dot(X,theta) - y) ) )\n \n #update theta\n theta[0] = (theta[0] - g_step0)\n theta[1] = (theta[1] - g_step1)\n \n #print([theta , g_step1, g_step0])\n\n # ============================================================\n\n # Save the cost J in every iteration\n J_history.append(computeCost(X, y, theta))\n\n return theta, J_history", "def logistic_regression(y, tx, initial_w, max_iters, gamma, verbose=False):\n return gradient_descent(y, tx, initial_w, max_iters, gamma, \n compute_logistic_loss, compute_logistic_gradient, verbose=verbose)", "def calc_gradient(self, W, X, y, reg):\n\n N = X.shape[0]\n grad_W = np.zeros_like(W)\n I = np.ones((1,10))\n score = np.dot(X, W) # (N, C)\n out = np.exp(score-np.dot(np.max(score, axis=1, keepdims=True ),I))\n #print(\"out\", out)\n out /= np.sum(out, axis=1, keepdims=True) # (N, C)\n \n dout = np.copy(out) # (N, C)\n dout[np.arange(N), y] -= 1\n grad_W = np.dot(X.T, dout) # (D, C)\n grad_W /= N\n #grad_W += reg * W\n \n return grad_W", "def grad_reglog(w, X, y, **kwargs):\n p = np.exp(-y * (np.dot(X, w)))\n P = p / (1. + p)\n return -1 * np.dot(X.T, P * y) / X.shape[0]", "def grad(self, A, y, x):\n z = y - A.dot(x) # Error for each observation\n grad_x = -1 * A.T.dot(np.sign(z))\n # Gradient normalized by the num obs\n return grad_x / y.size", "def stochastic_gradient_descent(y, tx, initial_w, max_iters, gamma, compute_loss, compute_grad,\n batch_size=1, verbose=False):\n \n w = initial_w.copy()\n loss = 0\n\n for n_iter, (minibatch_y, minibatch_tx) in \\\n enumerate(batch_iter(y, tx, batch_size, num_batches=max_iters)):\n \n grad = compute_loss(minibatch_y, minibatch_tx, w)\n loss = compute_grad(minibatch_y, minibatch_tx, w)\n\n w -= gamma * grad\n\n if verbose:\n print(f\"Stochastic Gradient Descent ({n_iter}/{max_iters - 1}): loss={loss}, w={w}\")\n\n return w, loss", "def learning_by_penalized_gradient_descent(y, tx, w, gamma, lambda_):\n loss = calculate_loss(y, tx, w) + lambda_ * np.squeeze(w.T.dot(w))\n grad = calculate_gradient(y, tx, w) + 2 * lambda_ * w\n w = w-gamma*grad\n return w, loss" ]
[ "0.69871354", "0.683685", "0.68088645", "0.6712275", "0.6709997", "0.66748154", "0.664998", "0.66304016", "0.6604216", "0.6590052", "0.65628034", "0.655809", "0.65575576", "0.647663", "0.64651537", "0.6445425", "0.64412385", "0.6429857", "0.64228696", "0.64162236", "0.64112717", "0.6346254", "0.63448113", "0.6318568", "0.6313317", "0.63116133", "0.62896365", "0.6288922", "0.6288777", "0.628868", "0.6271682", "0.62691283", "0.6265301", "0.625417", "0.6252475", "0.6236693", "0.62364036", "0.6214225", "0.62079084", "0.6199678", "0.61881125", "0.6185137", "0.6182693", "0.61747926", "0.6170685", "0.616478", "0.6156658", "0.615359", "0.61417603", "0.61346817", "0.6132093", "0.6108432", "0.610824", "0.6106273", "0.61057913", "0.6102462", "0.6093128", "0.60928327", "0.6085659", "0.60750484", "0.6057491", "0.60573906", "0.60551804", "0.6052469", "0.6044399", "0.6036861", "0.6033501", "0.60279137", "0.6027601", "0.6026224", "0.6016501", "0.6011959", "0.5995049", "0.5991516", "0.5991103", "0.59869725", "0.5979811", "0.5979082", "0.59750104", "0.59713703", "0.5971038", "0.59708345", "0.5964872", "0.59626096", "0.59598774", "0.5953133", "0.59503436", "0.59493446", "0.5948634", "0.59481204", "0.5944446", "0.59297454", "0.59185517", "0.5917788", "0.5912355", "0.59074676", "0.5904948", "0.59033215", "0.58982605", "0.5896677" ]
0.5987452
75
has min at (1,0), saddle point at (1,0)
def f(x): return -math.exp(x[0]**3/-3 + x[0] - x[1]**2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saddle_point(self):\n\n maxmin_value, maxmin_strategy_set = self.maxmin(0)\n minmax_value, minmax_strategy_set = self.minmax(1)\n\n if maxmin_value == minmax_value:\n return maxmin_strategy_set.intersection(minmax_strategy_set)\n return None", "def exact_saddle(V,X,Y,Z,dim,Z0=None):\n #from all_functions import find_saddle,sum_of_e_field\n if dim==3:\n print \"here\"\n print find_saddle(V,X,Y,Z,3)\n [I,J,K]=find_saddle(V,X,Y,Z,3) # guess saddle point; Z0 not needed\n print I,J,K\n r0=[X[I],Y[J],Z[K]]\n if I<2 or I>V.shape[0]-2: \n print('exact_saddle.py: Saddle point out of bounds in radial direction.')\n return r0\n if J<2 or J>V.shape[1]-2:\n print('exact_saddle.py: Saddle point out of bounds in vertical direction.')\n return r0\n if K<2 or K>V.shape[2]-2:\n print('exact_saddle.py: Saddle point out of bounds in axial direction.')\n return r0\n if V.shape[0]>100:\n Vn = V[I-2:I+3,J-2:J+3,K-2:K+3] # create smaller 5x5x5 grid around the saddle point to speed up optimization\n # note that this does not prevent the optimization function from trying values outside this\n Xn,Yn,Zn=X[I-2:I+3],Y[J-2:J+3],Z[K-2:K+3] # change grid vectors as well\n else:\n Vn,Xn,Yn,Zn = V,X,Y,Z\n #################################### Minimize\n r=spo.minimize(sum_of_e_field,r0,args=(Vn,Xn,Yn,Zn)) \n r=r.x # unpack for desired values\n Xs,Ys,Zs=r[0],r[1],r[2] \n ################################################################################################# \n if dim==2: \n if len(V.shape)==3:\n K=0 # in case there is no saddle\n for i in range(len(Z)):\n if Z[i-1]<Z0 and Z[i]>=Z0:\n K=i-1\n Vs = V.shape\n if K>=Vs[2]: # Matlab had Z, not V; also changed from == to >=\n return('The selected coordinate is at the end of range.')\n v1=V[:,:,K-1] # potential to left\n v2=V[:,:,K] # potential to right (actually right at estimate; K+1 to be actually to right)\n V2=v1+(v2-v1)*(Z0-Z[K-1])/(Z[K]-Z[K-1]) # averaged potential around given coordinate\n [I,J,K0]=find_saddle(V,X,Y,Z,2,Z0) \n r0=X[I],Y[J]\n print 1\n if (I<2 or I>V.shape[0]-2): \n print('exact_saddle.py: Saddle point out of bounds in radial direction.\\n')\n return r0\n if (J<2 or J>V.shape[1]-1):\n print('exact_saddle.py: Saddle point out of bounds in vertical direction.\\n')\n return r0\n if V.shape[0]>100:\n Vn = V[I-2:I+3,J-2:J+3,K-2:K+3] # create smaller 5x5x5 grid around the saddle point to speed up optimization\n # note that this does not prevent the optimization function from trying values outside this\n Xn,Yn,Zn=X[I-2:I+3],Y[J-2:J+3],Z[K-2:K+3] # Matlab 4, not 2\n else:\n Vn,Xn,Yn,Zn = V,X,Y,Z\n ################################## Minimize\n r=spo.minimize(sum_of_e_field_2d,r0,args=(Z0,Vn,Xn,Yn,Zn)) \n r=r.x # unpack for desired values\n Xs,Ys,Zs=r[0],r[1],Z0\n print Xs\n print Ys\n print Zs\n return [Xs,Ys,Zs]", "def __init__(self):\n self.center = Point()\n #x coordinate is set in these amount of pixels to leave a slight gap between the screen and paddle just like in real pong video games\n self.center.x = SCREEN_WIDTH - 10\n #when game starts, paddle is placed on the middle of screen's right edge\n self.center.y = SCREEN_HEIGHT / 2", "def saddle_point(I):\n #--- FILL ME IN ---\n\n m, n = I.shape\n\n #compute the inputs to the function lstsq\n\n #get sci\n sci = I.reshape(m*n, 1)\n #get A\n A = []\n for y in range(n):\n for x in range(m):\n #print((x,y))\n #print([x*x, x*y, y*y, x, y, 1])\n A.append([x*x, x*y, y*y, x, y, 1])\n\n A = np.array(A)\n \n parms = np.linalg.lstsq(A,sci)[0]\n #print(parms)\n r1 = np.array([[2*parms[0][0], parms[1][0]], \n [parms[1][0], 2*parms[2][0]]])\n r1 = np.linalg.inv(r1)\n r2 = np.array([[parms[3][0]], \n [parms[4][0]]])\n\n pt = np.negative(np.matmul(r1, r2))\n\n #------------------\n\n return pt", "def find_paddle(grid):\n for x in range(X_COLS):\n if grid[x][CURSOR_ROW] == 3:\n paddle_x = x\n\n return paddle_x", "def is_saddle(self, idx) -> bool:\n if idx == 0 or idx == len(self) - 1:\n logger.warning(\"Cannot be saddle point, index was at the end\")\n return False\n\n if any(self[i].energy is None for i in (idx - 1, idx, idx + 1)):\n logger.error(\n f\"Could not determine if point {idx} was a saddle \"\n f\"point, an energy close by was None\"\n )\n return False\n\n energy = self[idx].energy\n return self[idx - 1].energy < energy and self[idx + 1].energy < energy", "def saddle_points(matrix):\n if not all(len(row) == len(matrix[0]) for row in matrix[1:]):\n raise ValueError('Provided matrix is irregular.')\n columns = [col for col in zip(*matrix)]\n points = set()\n for ridx, row in enumerate(matrix):\n for cidx, element in enumerate(row):\n if element == max(row) and element == min(columns[cidx]):\n points.add((ridx, cidx))\n return points", "def point(k, steer):\r\n\tglobal translation\r\n\tdirection, sens = translation[steer]\r\n\tfront = (sens+1)+int(direction==\"y\")\r\n\tif front != k[\"front\"]:\r\n\t\tk[\"front\"] = front # Change le sens\r\n\t\tk[\"c\"] = k[\"graphism\"][front] # Met à jour le caractère\r\n\t\treturn True", "def point_in_map(self, x, y):\r\n return 0 <= x < self.width and 0 <= y < self.height and (x,y) not in self.walls", "def draw_horizontal_paddle(self):\n pygame.draw.rect(self.screen, self.color, self.top_rect)\n pygame.draw.rect(self.screen, self.color, self.bot_rect)", "def determine_round_winner(self):\n\n if self.getX() + self.SIZE[0] < 0:\n # point for player two\n return 2\n elif self.getX() > Configuration.windowWidth:\n # point for player one\n return 1", "def center_horizontal_paddle(self):\n self.top_center = self.screen_rect.centerx - (self.screen_rect.centerx/2)\n self.bot_center = self.screen_rect.centerx - (self.screen_rect.centerx/2)", "def wall_check(x: int, y: int, state: bool) -> bool:\r\n if state:\r\n if x == 0 or x == shape-1 or y == 0 or y == shape-1:\r\n return True\r\n else:\r\n if x < 0 or x >= shape or y < 0 or y >= shape:\r\n return True\r\n return False", "def _step_their_paddle(self):\n if random.random() < self.their_update_probability:\n if self.paddle_l.y < self.ball.y:\n if self.paddle_l.top_bound < self.top_bound:\n self.paddle_l.up()\n else:\n if self.paddle_l.bottom_bound > self.bottom_bound:\n self.paddle_l.down()", "def find_point(ball_loc, direction, kick_positions, positions):\n # unpack leg positions\n kick_x, kick_y, kick_z = kick_positions\n pos_x, pos_y, pos_z = positions \n # ball position relative to the kicking foot\n ball_loc = [100, -100.55154471, 0.09521921 ]\n # all boundaries for the kicking leg\n min_x = int(kick_positions[0] - 75) #- 0.13641\n max_x = int(kick_positions[0] +75)\n #min_y = int(kick_positions[1] - 75) #0.1340\n #max_y = int(kick_positions[1] + 75)#0.1014\n min_y = -140\n max_y = -90\n #min_z = int(kick_positions[2] ) #0.05\n #max_z = int(kick_positions[2] + 50) #0.1526\n min_z = 40\n max_z = 75\n\n # make ball position in world_space coordinates\n bal_x = ball_loc[0]\n bal_y = ball_loc[1]\n bal_z = ball_loc[2]\n # make direction in world_space coordinates\n #direction_x = kick_x + direction[0]\n #direction_y = kick_y + direction[1]\n #direction_z = kick_z + direction[2]\n direction = np.matrix([ [direction[0]], [direction[1]], [direction[2]]])\n # no retraction when other leg is there(change these values)\n #if( pos_y < max_y or pos_y > min_y):\n # if( abs(pos_y - max_y) > abs(pos_y - min_y)):\n # min_y = pos_y\n # else:\n # max_y = pos_y\n best_pos = 0\n # make matrix of the world_space ball and direction coordinates\n bal_loc = np.matrix([[bal_x], [bal_y], [bal_z]])\n #direction = np.matrix([[direction_x], [direction_y], [direction_z]])\n for x in xrange(min_x, max_x, 10):\n for y in xrange(min_y, max_y, 10):\n for z in xrange(min_z, max_z, 10):\n global x_pos\n global y_pos\n global z_pos\n x_pos = x_pos + [x]\n y_pos = y_pos + [y]\n z_pos = z_pos + [z]\n contact_point, value = retractionPoint(bal_loc, np.matrix([[x], [y],\n [z]]), direction, 1)\n #print \"contact\", contact_point\n if value > best_pos:\n best_pos = value\n kick_x = x\n kick_y = y\n kick_z = z\n \n contact = [contact_point[0,0], contact_point[1,0], contact_point[2,0]]\n return contact, [kick_x, kick_y, kick_z]", "def __init__(self, x, y):\r\n super(paddle, self).__init__(image=paddle.paddle2, x=x, y=y)\r\n self.points=games.Text(value=0, size=50, color=color.white, top=5, right=games.screen.width-5)\r\n games.screen.add(self.points)", "def __init__(self, ai_settings, screen):\n super(PlayerHorizontalPaddle, self).__init__()\n self.screen = screen\n self.ai_settings = ai_settings\n self.top_rect = pygame.Rect(0, 0, ai_settings.horizontal_paddle_width, ai_settings.horizontal_paddle_height)\n self.bot_rect = pygame.Rect(0, 0, ai_settings.horizontal_paddle_width, ai_settings.horizontal_paddle_height)\n self.screen_rect = screen.get_rect()\n self.color = ai_settings.horizontal_paddle_color\n self.height = float(ai_settings.horizontal_paddle_height)\n # Paddle starts at the bottom and top\n self.top_rect.centerx = self.screen_rect.centerx - (self.screen_rect.centerx/2)\n self.top_rect.top = self.screen_rect.top\n self.bot_rect.centerx = self.screen_rect.centerx - (self.screen_rect.centerx/2)\n self.bot_rect.bottom = self.screen_rect.bottom\n # Store a decimal value for the ship's center.\n self.x = float(self.top_rect.x)\n self.x = float(self.bot_rect.x)\n self.top_center = float(self.top_rect.centerx)\n self.bot_center = float(self.bot_rect.centerx)\n # Movement flag for continuous movement\n self.moving_left = False\n self.moving_right = False", "def shorten_paddle_exec(self):\n if self.shorten_paddle_count == 0 and self.glitch_count == 1:\n self.window.remove(self.paddle)\n self.paddle = GRect(self.paddle_width-20, self.paddle_height, x=(self.window_width - self.paddle_width) / 2,\n y=self.window_height - self.paddle_offset)\n self.paddle.color = 'magenta'\n self.paddle.filled = True\n self.paddle.fill_color = 'magenta'\n self.window.add(self.paddle)\n self.glitch_count += 1\n elif 0 < self.shorten_paddle_count <= 5:\n pass\n elif self.shorten_paddle_count > 5:\n self.window.remove(self.paddle)\n self.paddle = GRect(self.paddle_width, self.paddle_height, x=(self.window_width - self.paddle_width) / 2,\n y=self.window_height - self.paddle_offset)\n self.paddle.color = 'black'\n self.paddle.filled = True\n self.paddle.fill_color = 'black'\n self.window.add(self.paddle)\n self.shorten_paddle_count = 0\n self.shorten_paddle_exist = False\n self.shorten_paddle_start = False\n self.glitch_count = 1", "def test_first_pos() -> None:\n assert sw.walk_to(1) == sw.Coordinate(0, 0)", "def checkXPos(self, *args):\n x = self.initialXScale.get()\n y = self.initialYScale.get()\n\n if x ** 2 + y ** 2 > self.radius**2:\n if x > 0:\n self.initialXScale.set(np.sqrt(self.radius**2 - y ** 2))\n else:\n self.initialXScale.set(-np.sqrt(self.radius**2 - y ** 2))", "def update_ball(self):\n\t\tself.ball_x += self.velocity_x\n\t\tself.ball_y += self.velocity_y\n\t\tif self.ball_y < 0:\n\t\t\tself.ball_y = -self.ball_y\n\t\t\tself.velocity_y = -self.velocity_y\n\t\tif self.ball_y > 1:\n\t\t\tself.ball_y = 2 - self.ball_y\n\t\t\tself.velocity_y = -self.velocity_y\n\t\tif self.ball_x < 0:\n\t\t\tself.ball_x = -self.ball_x\n\t\t\tself.velocity_x = -self.velocity_x\n\t\tif self.ball_x < 1:\n\t\t\treturn 0\n\t\tif self.ball_y > self.paddle_y + State.paddle_height or self.ball_y < self.paddle_y:\n\t\t\treturn -1\n\t\tself.ball_x = 2 - self.ball_x\n\t\tself.velocity_x = random.uniform(-0.015, 0.015) - self.velocity_x\n\t\tif abs(self.velocity_x) < 0.03:\n\t\t\tself.velocity_x = 0.03 if self.velocity_x > 0 else -0.03\n\t\tself.velocity_y = random.uniform(-0.03, 0.03) - self.velocity_y\n\t\tself.velocity_x = max(min(self.velocity_x, 1.0), -1.0)\n\t\tself.velocity_y = max(min(self.velocity_y, 1.0), -1.0)\n\t\treturn 1", "def detectPaddleCollision(self, paddle):\n if paddle.contains(self.left,self.top) and self._vy < 0:\n self.bouncesound.play()\n self.verticalBounce()\n self._vx = random.uniform(5.0, 15.0)\n #print 'topright paddle collision'\n if paddle.contains(self.left,self.bottom) and self._vy < 0:\n self.bouncesound.play()\n self.verticalBounce()\n self._vx = random.uniform(5.0, 13.0)\n #print 'bottomright paddle collision'\n if paddle.contains(self.right,self.top) and self._vy < 0:\n self.bouncesound.play()\n self.verticalBounce()\n self._vx = random.uniform(5.0, 13.0)\n #print 'topleft paddle collision'\n if paddle.contains(self.right,self.bottom) and self._vy < 0:\n self.bouncesound.play()\n self.verticalBounce()\n self._vx = random.uniform(-15.0,-5.0)\n #print 'bottomleft paddle collision'", "def drawOrigin():\n if xMin < 0 < xMax:\n if yMin < 0 < yMax:\n x, y = cartesianToScreen(0, 0)\n\n pygame.draw.line(display, WHITE, (x - 6, y),\n (x + 6, y), 3)\n\n pygame.draw.line(display, WHITE, (x, y - 6),\n (x, y + 6), 3)", "def lmin(scape, start):\n i = start\n while scape[i - 1] < scape[i] - 0.06:\n i -= 1\n while scape[i + 1] < scape[i] - 0.06:\n i += 1\n return i", "def find_saddle(V,X,Y,Z,dim,Z0=None):\n debug=False # internal code only; typically False\n from project_parameters import scale\n if (dim==2 and Z0==None):\n return 'z0 needed for evaluation'\n if dim==3:\n if len(V.shape)!=3:\n return('Problem with find_saddle.m dimensionalities.')\n f=V/float(np.amax(V)) # Normalize field\n [Ex,Ey,Ez]=np.gradient(f,abs(X[1]-X[0])/scale,abs(Y[1]-Y[0])/scale,abs(Z[1]-Z[0])/scale) # grid spacing is automatically consistent thanks to BEM-solver\n E=np.sqrt(Ex**2+Ey**2+Ez**2) # magnitude of gradient (E field)\n m=E[1,1,1]\n origin=[1,1,1]\n for i in range(E.shape[0]):\n for j in range(E.shape[1]):\n for k in range(E.shape[2]):\n if E[i,j,k]<m:\n m=E[i,j,k]\n origin=[i,j,k] \n if debug:\n print('DEBUGGING...')\n fig=plt.figure()\n e=np.reshape(E,(1,E.shape[0]*E.shape[1]*E.shape[2]))\n ind,e=np.argsort(e),np.sort(e)\n e=e[0]\n ind=ind[0] #Sort V by the same indexing.\n v=np.reshape(V,(1,V.shape[0]*V.shape[1]*V.shape[2]))\n v=v[0]\n plt.plot(e/float(np.amax(e)))\n def index_sort(v,e):\n \"\"\"Takes in two lists of the same length and returns the first sorted by the indexing of i sorted.\"\"\"\n es=np.sort(e)\n ix=np.argsort(e)\n vs=np.ones(len(v)) #Sorted by the sorting defined by f being sorted. \n # If v==e, this returns es.\n for i in range(len(v)):\n j=ix[i]\n vs[i]=v[j]\n return vs\n v=index_sort(v,e) # Is it supposed to look like this?\n plt.plot(v/float(np.amax(v)))\n plt.title('Debugging: blue is sorted gradient, green is potential sorted by gradient')\n plt.show() #f is blue and smooth, v is green and fuzzy.\n if origin[0]==(1 or V.shape[0]):\n print('find_saddle: Saddle out of bounds in x (i) direction.\\n')\n return origin\n if origin[0]==(1 or V.shape[1]):\n print('find_saddle: Saddle out of bounds in y (j) direction.\\n')\n return origin\n if origin[0]==(1 or V.shape[2]): \n print('find_saddle: Saddle out of bounds in z (k) direction.\\n')\n return origin\n #################################################################################################\n if dim==2: # Extrapolate to the values of A at z0.\n V2=V\n if len(V.shape)==3:\n Ks=0 # in case there is no saddle point\n for i in range(len(Z)):\n if Z[i-1]<Z0 and Z[i]>=Z0:\n Ks=i-1\n if Z0<1:\n Ks+=1\n Vs=V.shape\n if Ks>=Vs[2]: # Matlab had Z, not V; also changed from == to >=\n return('The selected coordinate is at the end of range.')\n v1=V[:,:,Ks] \n v2=V[:,:,Ks+1]\n V2=v1+(v2-v1)*(Z0-Z[Ks])/(Z[Ks+1]-Z[Ks])\n V2s=V2.shape\n if len(V2s)!=2: # Old: What is this supposed to check? Matlab code: (size(size(A2),2) ~= 2)\n return('Problem with find_saddle.py dimensionalities. It is {}.'.format(V2s))\n f=V2/float(np.max(abs(V2)))\n [Ex,Ey]=np.gradient(f,abs(X[1]-X[0]),abs(Y[1]-Y[0]))\n E=np.sqrt(Ex**2+Ey**2)\n m=float(np.min(E))\n if m>1e-4: # This requires a grid with step size 0.01, not just 0.1.\n if debug:\n Is,Js=np.NaN,np.NaN\n print('Warning, there seems to be no saddle point.')\n mr=E[0,0]\n Is,Js=1,1 # in case there is no saddle\n for i in range(E.shape[0]):\n for j in range(E.shape[1]):\n if E[i,j]<mr:\n mr=E[i,j]\n Is,Js=i,j\n origin=[Is,Js,Ks]\n if Is==1 or Is==V.shape[0]:\n print('find_saddle: Saddle out of bounds in x (i) direction.\\n')\n return origin\n if Js==1 or Js==V.shape[1]:\n print('find_saddle: Saddle out of bounds in y (j) direction.\\n')\n return origin\n return origin", "def max_just_sway(self):\n minx, maxx = np.min(self.x_sway), np.max(self.x_sway)\n miny, maxy = np.min(self.y_sway), np.max(self.y_sway)\n right = np.abs(maxx-self.baseline['xc'])\n left = np.abs(minx-self.baseline['xc'])\n down = np.abs(miny-self.baseline['yc'])\n up = np.abs(maxy-self.baseline['yc'])\n return left, right, up, down", "def hit_paddle(self):\n pass\n\n #Implement if collision with paddle is detected\n\n #Add randomness to how ball direction will change and return value", "def is_position_allowed(new_x, new_y):\n\n return min_x <= new_x <= max_x and min_y <= new_y <= max_y", "def hit_wall(self):\n if self.ball.x <= 0 or self.ball.x + self.ball.width > self.window.width:\n self.__dx = -self.__dx\n if self.ball.y <= 0:\n self.__dy = -self.__dy", "def check_collision(self):\n if self.window.get_object_at(self.ball.x,self.ball.y+self.radius*2) is self.paddle:\n self.bounce()\n if self.window.get_object_at(self.ball.x+self.radius*2,self.ball.y+self.radius*2) is self.paddle:\n self.bounce()", "def create_paddle(self, pos):\n\n self.shape(\"square\")\n self.penup()\n self.color(\"blue\")\n self.shapesize(stretch_wid=1, stretch_len=4)\n self.setpos(pos)", "def show_paddle(self, screen, fgColor):\r\n if self.player_Num == 1:\r\n pygame.draw.rect(screen, fgColor, pygame.Rect((0, self.y, self.Width, self.Height)))\r\n elif self.player_Num == 2:\r\n pygame.draw.rect(screen, fgColor, pygame.Rect((self.screen_Width-self.Width, self.y, self.Width, self.Height)))", "def hit_wall(s):\n if s == [1, 1]: # We would enter the None-field\n return True\n elif s[0] < 0 or s[0] > 2 or s[1] < 0 or s[1] > 3: # We would be out of bounds\n return True\n else:\n return False", "def minX(self):\n return min(self.getx())", "def check_collide(self):\r\n for paddle in self.overlapping_sprites:\r\n self.score.value +=10", "def argminX( self ):\n min = 1e30\n minX = None\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] < min:\n min = p[1]\n minX = p[0]\n return minX", "def FindClosestPoint(self, ):\n ...", "def check_pos(self, x, y):\n if x >= WINDOWWIDTH or y >= WINDOWHEIGHT or x <=0 or y <= 0:\n return True", "def test_anchor_point(self):\n nb_points = 5\n points = np.array([[1, 2], [2, 1], [3, 7], [7, 2]]) # example of points\n\n anchor_point = convex_hull.lowest_coordinate(points) # anchor point\n right_anchor_point = [2, 1] # the right anchor points\n\n self.assertTrue((anchor_point == right_anchor_point).all())", "def always_touching(self):\n assert int(self.snake[0].real - self.snake[1].real) in [1, 0, -1] and int(\n self.snake[0].real - self.snake[1].real) in [1, 0, -1]", "def getMinimum(self):\n v1 = Vector(*self.p1)\n v2 = Vector(*self.p2)\n if v1.angle < v2.angle:\n return self.p1\n else:\n return self.p2", "def find_min(self):\n\n\n min_x = 1000\n min_y = 1000\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x < min_x:\n min_x = x\n if y < min_y:\n min_y = y\n return min_x, min_y", "def is_at_wall(self):\n return self.distmin < self.distmax*0.8", "def find_basin(self, s):\n \n assert s.size==self.n\n atMin = False\n thisState = s.astype(np.int8)\n\n while not atMin: \n dE = self.neighbor_dE(thisState)\n if np.any( dE<0 ):\n ix = dE.argmin()\n thisState[ix] *= -1\n else:\n atMin = True\n return thisState", "def draw_skin_player(self, id):\n if id == self.id:\n self.screen.blit(self.paddle_1, (self.first_player_x, self.first_player_y))\n else:\n self.screen.blit(self.paddle_2, (self.second_player_x, self.second_player_y))\n return", "def paddle_reset_position(self, mouse):\n if (0 + self.paddle.width / 2) <= mouse.x <= (self.window.width - self.paddle.width / 2):\n self.paddle_x = mouse.x - self.paddle.width / 2\n self.window.add(self.paddle, self.paddle_x, self.paddle_y)", "def paddle_moving(self, mouse):\n # when the paddle is in the window\n if 0 + self.paddle.width/2 <= mouse.x <= self.window.width - self.paddle.width/2:\n self.paddle.x = mouse.x - self.paddle.width / 2\n\n # when the paddle is about to leave the left side of the window\n elif mouse.x < 0 + self.paddle.width/2:\n self.paddle.x = 0\n\n # when the paddle is about to leave the right side of the window\n elif mouse.x > self.window.width - self.paddle.width/2:\n self.paddle.x = self.window.width - self.paddle.width\n\n # the paddle's y coordinate will always be at the same as below\n self.paddle.y = self.window.height - self.paddle_offset", "def _findMinNode(self, s):\n\n minNode = None\n minVal = self.inf\n for vertex in s:\n if self.dist[vertex] < minVal:\n minVal = self.dist[vertex]\n minNode = vertex\n return minNode", "def min_powerflow_rule(_m, l, y, s, t):\r\n\r\n return m.POWERFLOW_MIN[l] - m.p_L[l, y, s, t] <= 0", "def my_constraint_function(candidate):\r\n # In this case, we'll just say that the point has to lie \r\n # within a circle centered at (0, 0) of radius 1.\r\n if candidate[0]**2 + candidate[1]**2 > 1:\r\n return 1\r\n else:\r\n return 0", "def test_check_point():\n board = Board(640, 640, 8)\n board.start_game()\n assert board.check_point(board.SPACE_SIZE/2, board.SPACE_SIZE/2) is not None\n assert board.check_point(0, 0) is None", "def walls (x, y):\n North = True\n West = True\n East = True\n South = True\n if x == 1:\n West = False\n if x == 3:\n East = False\n if y == 1:\n South = False\n if y == 3:\n North = False\n\n if (y == 1):\n West = False\n East = False\n elif (x == 2) and (y == 2):\n East = False\n North = False\n elif (x == 3) and (y == 2):\n West = False\n elif (x == 2) and (y == 3):\n South = False\n return North, South, West, East", "def halfway(self, target):\r\n mx = (self.x + target.x)/2\r\n my = (self.y + target.y)/2\r\n return Point(mx, my)", "def center_flows(L_wprime, U_wprime, L_w3, U_w3, L_overlap, U_overlap):\n # examine every possible point\n current_dist_to_edge = -1\n point = (0,0)\n #print(\"w3 range: [{}, {}]\".format(L_w3, U_w3))\n #print(\"w' range: [{}, {}]\".format(L_wprime, U_wprime))\n #print(\"overlap range: [{},{}]\".format(L_overlap, U_overlap))\n for y in range(L_w3, U_w3 + 1):\n #print(\"y={}\".format(y))\n LH_bound = max(L_wprime, L_overlap - y)\n #print(\"LH bound = {}\".format(LH_bound))\n RH_bound = min(U_wprime, U_overlap - y)\n #print(\"RH bound = {}\".format(RH_bound))\n for x in range(LH_bound, RH_bound + 1):\n # w3 UB: 0x + 1y - U_w3 = 0\n # w3 LB: 0x + 1y - L_w3 = 0\n # wprime UB: 1x + 0y - U_wprime\n # wprime LB: 1x + 0y - L_wprime\n # wprime + w3 UB: 1x + 1y - U_wprime,wk\n # wprime + w3 LB: 1x + 1y - L_wprime,wk\n dist_to_edge = min(distance_point_to_line(x, y, 0, -1, U_w3), #0x-1y+U_w3=0\n distance_point_to_line(x, y, 0, -1, L_w3), #0x-1y+L_w3=0\n # -1x + 0y + U_wprime = 0\n distance_point_to_line(x, y, -1, 0, U_wprime),\n # -1x + 0y + L_wprime = 0\n distance_point_to_line(x, y, -1, 0, L_wprime),\n # -1x - 1y + U_overlap = 0\n distance_point_to_line(x, y, -1, -1, U_overlap),\n # -1 x - y + L_overlap = 0\n distance_point_to_line(x, y, -1, -1, L_overlap))\n if dist_to_edge > current_dist_to_edge:\n #print(\"At point ({},{}), distance to edge increased from {} to {}.\"\\\n # .format(x,y,current_dist_to_edge,dist_to_edge))\n current_dist_to_edge = dist_to_edge\n point = (x,y)\n return(point)", "def collision(self,x):\n return (1.092*x - 171)", "def __init__(self, screen_Size, paddle_Width):\r\n self.screen_Width, self.screen_Height = screen_Size\r\n\r\n # Setup x,y limits for ball position\r\n self.left_x = paddle_Width\r\n self.right_x = self.screen_Width - paddle_Width\r\n self.top_y = self.Radius\r\n self.bot_y = self.screen_Height - self.Radius\r\n\r\n self.x = self.screen_Width//2\r\n self.y = np.random.randint(self.Radius, self.screen_Height-self.Radius)\r\n\r\n self.vx = np.random.choice([-1, 1]) * np.random.randint(25, 30)\r\n self.vy = np.random.choice([-1, 1]) * np.random.randint(25, 30)\r\n\r\n # Ralley counter to see game progress\r\n self.rallies = 0", "def random_item_sp(self):\n if random.random() < 0.3:\n self.window.add(self.shorten_paddle, x=self.ball.x+self.objects_length/2, y=self.ball.y)\n self.shorten_paddle_exist = True", "def point_with_min_y(points):\n\n\tmin_idx = None\n\n\tfor a,coord in enumerate(points):\n\n\t\tif min_idx == None:\n\t\t\tmin_idx = a\n\t\t\tP0_Y = coord[1]\n\t\t\tP0_X = coord[0]\n\t\telif coord[1] < P0_Y:\n\t\t\t# look for the point with lowest y co-ordinate\n\t\t\tmin_idx = a\n\t\t\tP0_X = coord[0]\n\t\t\tP0_Y = coord[1]\n\t\telif (coord[1] == P0_Y) & (coord[0] < P0_X):\n\t\t\t# In-case of tie with lowest y co-ordinate\n\t\t\t# take one which is leftmost or lowest x \n\t\t\t# co-ordinate\n\t\t\tmin_idx = a\n\t\t\tP0_X = coord[0]\n\t\t\tP0_Y = coord[1]\n\n\n\treturn (P0_X,P0_Y)", "def neighbor(self,s):\n jump=20\n while True:\n s+=random.randint(-1*jump,jump)\n if s < pow(10,5) and s > pow(10,-5):return s", "def define_spot(self,mpos):\n mpos_coord = ((mpos[0] - 199)/87, (mpos[1] - 116)/87)\n if mpos_coord == (1,2):\n spot = \"1\"\n return spot\n if mpos_coord == (2,2):\n spot = \"2\" \n return spot\n if mpos_coord == (4,0):\n spot = \"3\"\n return spot\n if mpos_coord == (4,1):\n spot = \"4\" \n return spot\n else:\n return False", "def _wall_to(self, other):\n assert abs(self.x - other.x) + abs(self.y - other.y) == 1, '{}, {}'.format(self, other)\n if other.y < self.y:\n return N\n elif other.y > self.y:\n return S\n elif other.x < self.x:\n return W\n elif other.x > self.x:\n return E\n else:\n assert False", "def draw_X():\r\n x,y = pygame.mouse.get_pos()\r\n x = 3*x/300\r\n y = 3*y/300\r\n x = approximate(x)\r\n y = approximate(y)\r\n pos_x = 0\r\n pos_y = 0\r\n if x == 50:pos_x = 0\r\n elif x == 150:pos_x=1\r\n elif x == 250:pos_x=2\r\n if y == 50:pos_y=0\r\n elif y == 150:pos_y=1\r\n elif y == 250:pos_y=2\r\n if positions[pos_y][pos_x] == 0:\r\n positions[pos_y][pos_x] = -1\r\n pygame.draw.line(screen,(255,255,255),(x-40,y-40),(x+40,y+40),10)\r\n pygame.draw.line(screen,(255,255,255),(x+40,y-40),(x-40,y+40),10)\r\n players.reverse()\r\n else: \r\n print('the spot was occupied')\r\n time.sleep(.25)", "def reset_paddle(self):\r\n self.y = self.screen_Height // 2\r\n self.vy = 0", "def get_point_on(self, s):\n\n x = self.n1.x * (1 - s) + self.n2.x * s\n y = self.n1.y * (1 - s) + self.n2.y * s\n z = self.n1.z * (1 - s) + self.n2.z * s\n\n return [x, y, z]", "def detectWallCollision(self):\n if self.right >= GAME_WIDTH or self.left <= 0:\n self._vx = -1.0 * self._vx\n if self.top >= GAME_HEIGHT:\n self._vy = -1.0 * self._vy", "def halfway(self, target):\n mx = (self.x + target.x) / 2\n my = (self.y + target.y) / 2\n return Point(mx, my)", "def halfway(self, target):\n mx = (self.x + target.x)/2\n my = (self.y + target.y)/2\n return Point(mx, my)", "def halfway(self, target):\n mx = (self.x + target.x)/2\n my = (self.y + target.y)/2\n return Point(mx, my)", "def halfway(self, target):\n mx = (self.x + target.x)/2\n my = (self.y + target.y)/2\n return Point(mx, my)", "def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 0)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size", "def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 0)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size", "def min_power_in_storage_rule(_m, g, y, s, t):\r\n\r\n return - m.p_in[g, y, s, t] <= 0", "def draw_circle():\r\n board = positions.copy()\r\n best = -math.inf\r\n for i in range(3):\r\n for j in range(3):\r\n if board[i][j]==0:\r\n board[i][j]=1\r\n score = MinMax(board,'min')\r\n board[i][j]=0\r\n if score>best:\r\n best = score\r\n move = [i,j]\r\n y = move[0]\r\n x = move[1]\r\n positions[y][x]=1\r\n if x == 0: x = 50\r\n elif x == 1: x = 150\r\n elif x == 2: x = 250\r\n if y == 0: y = 50\r\n elif y == 1: y = 150\r\n elif y == 2: y = 250\r\n pygame.draw.circle(screen,(255,255,255),(x,y),-10+WIDTH/6,10)", "def point(self, x, y):\n d1 = super().point(x, y)\n top = self._lifetime.top\n bottom = self._lifetime.bottom\n d2 = distance_line_point(top.pos, bottom.pos, (x, y))[0]\n return min(d1, d2)", "def sprinkler(l):\n t.right(90)\n t.forward(l / 2)\n t.right(-90)\n t.circle(l / 2)\n t.circle(- l / 2)\n t.left(90)\n t.forward(l / 2)\n t.right(90)\n t.forward(l)\n t.right(90)\n t.forward(l / 2)\n t.right(-90)\n t.circle(l / 2)\n t.circle(- l / 2)", "def sw_corner(self):\n return (self.min_lat, self.min_lon)", "def minimax(board):\n\n current_player = player(board)", "def check_borders(self):\n # Go Homer!\n # https://en.wikipedia.org/wiki/Torus#Flat_torus\n if self._posn.x < 0:\n self._posn.x += self._win_w\n elif self._posn.x > self._win_w:\n self._posn.x -= self._win_w\n if self._posn.y < 0:\n self._posn.y += self._win_h\n elif self._posn.y > self._win_h:\n self._posn.y -= self._win_h", "def boundary(self):\n if self.pos.x < 0:\n self.pos.x = 0\n if self.pos.x > WIDTH - 48:\n self.pos.x = WIDTH - 48\n if self.pos.y < 0:\n self.pos.y = 0\n if self.pos.y > HEIGHT - 48:\n self.pos.y = HEIGHT - 48\n\n self.rect.topleft = self.pos", "def initialCoordinates():\r\n return (-250,-250)", "def ground_min(self):\n\n def compare(e):\n if is_wire(e):\n return e.potential\n else:\n return float('inf')\n self.move_ground(min(self.elements, key=compare))", "def inSmallBlindPosition(self):\n return len(self.in_game) > 0 and ((self.dealer + 1) % len(self.in_game)) == self.position", "def handle_slopes(self, slopeG):\n\n\n\n colSprite = pygame.sprite.spritecollideany(self, slopeG)\n if colSprite: #and self.rect.y < colSprite.rect.y:\n self.fall = False\n\n tl = colSprite.rect.topleft # used for slope calculation only\n br = colSprite.rect.bottomright\n\n m1 = float((br[1]-tl[1])/(br[0]-tl[0])) # y2-y1/(x2-x1)\n angle_rad = math.atan(m1) # from atan(m1 - m1 /(1+m1m2))\n # The angle is normally 45 degrees\n\n if self.x_vel:\n #le = self.x_vel / abs(self.x_vel) * 4\n le = self.x_vel\n else:\n le = 0\n\n x_move_len = le\n y_move_len = self.calc_vertical(x_move_len, angle_rad)\n\n # just for debugging\n self.d1 = x_move_len\n self.d2 = y_move_len\n\n # Now, it is needed to move the player down till\n # he reaches the 'essence' of the slope. This is because I\n # am too lazy to implement pixel-perfect collision.\n # Since this is to be done only once, a variable will be used\n # to keep track of whether this has beend donef for one slope or not\n\n # tolerance for height changing\n tol = False\n if abs(colSprite.rect.topleft[1] - self.rect.bottomleft[1]) <= 10:\n tol = True\n #print \"ABS \", abs(colSprite.rect.topleft[1] - self.rect.bottomleft[1])\n\n if not self.prev_slope and tol:\n self.prev_slope = True\n\n x_off_mov = colSprite.rect.topleft[0] - self.rect.bottomleft[0]\n y_off_mov = self.calc_vertical(x_off_mov, angle_rad)\n\n # handling for rightwards velocity\n if self.direction == RIGHT:\n y_off_mov = -y_off_mov\n\n\n self.rect.move_ip((0, y_off_mov))\n\n # check collision with any slope\n\n #self.rect.move_ip((x_move_len, y_move_len))\n # it seems that the above code is redundant; will check\n self.rect.move_ip((-self.x_vel, 0)) # undo the shifting\n self.rect.move_ip((x_move_len, y_move_len))\n\n else:\n self.prev_slope = False", "def check_offset(self):\n\n for d in range(self.n_dmps):\n if abs(self.y0[d] - self.goal[d]) < 1e-4:\n self.goal[d] += 1e-4", "def boundary(active, objects):\n limit = SIZE[1]\n for obj in objects:\n if active.pos_x == obj.pos_x:\n limit = min(limit, obj.pos_y)\n active.pos_y = limit-active.height\n active.col_d = True", "def stay_on_screen(self):\n if self.x <= 0 + SHIP_WIDTH/2:\n self.x += SHIP_MOVEMENT\n if self.x >= GAME_WIDTH- SHIP_WIDTH/2:\n self.x -= SHIP_MOVEMENT", "def advanceSilver():\n global silverBallX, silverSpeed\n silverBallX += silverSpeed\n if silverBallX <= -4:\n # Reached the bottom - switch directions\n silverBallX = -4\n silverSpeed = -silverSpeed\n elif silverBallX >= 2.8:\n # Reached the top - switch directions\n silverBallX = 2.8\n silverSpeed = -silverSpeed", "def update(self):\n # Get where the mouse is\n pos = pygame.mouse.get_pos()\n # Set the left side of the player bar to the mouse position\n self.rect.x = pos[0]\n # Make sure we don't push the player paddle \n # off the right side of the screen\n if self.rect.x > self.screenwidth - self.width:\n self.rect.x = self.screenwidth - self.width", "def checkIntoWall(MazeTupleSet,SolnTupleSet):\n ele = MazeTupleSet.intersection(SolnTupleSet)\n if(len(ele)==0): #if the intersection of wall and solution is zero\n return True #means we do not run into wall\n else:\n return False", "def canopy(self, h=3, b1=1):\n mu = min(5, 2 * h / self.sk)\n self.mu_canopy = mu\n self.s_canopy = self.mu_canopy * self.Ce * self.Ct * self.sk\n print(f'The canopy snow shape coeffeicient = {self.mu_canopy :.2f}')\n print(f'The peak canopy snow load = {self.s_canopy :.2f}kPa')", "def setupCollisions(self) :", "def borders((u,v)):\r\n return ((u,v+1,S), (u+1,v,W), (u,v,S), (u,v,W))", "def detectWallCollision(self): \n if self.posn_x > cw - self.ball_width: # Collision with right-hand container wall. \n self.velocity_x = -self.velocity_x * self.coef_restitution # reverse direction. \n self.posn_x = cw - self.ball_width * 1.1 # anti-stick to the wall \n if self.posn_x < 1: # Collision with left-hand wall. \n self.velocity_x = -self.velocity_x * self.coef_restitution \n self.posn_x = 2 # anti-stick to the wall \n if self.posn_y < self.ball_height: # Collision with ceiling. \n self.velocity_y = -self.velocity_y * self.coef_restitution \n self.posn_y = self.ball_height * 1.1 # ceiling collision anti-stick \n if self.posn_y > ch - self.ball_height * 1.1 : # Floor collision. \n self.velocity_y = - self.velocity_y * self.coef_restitution \n self.posn_y = ch - self.ball_height * 1.1 # anti-stick. Prevents out-of-bounds ball loss (stickiness) ", "def shorten_paddle_switch(self):\n for i in range(self.paddle.width):\n maybe_shorten = self.window.get_object_at(self.paddle.x + i, self.paddle.y)\n if maybe_shorten is self.shorten_paddle:\n self.window.remove(maybe_shorten)\n self.shorten_paddle_start = True", "def update(self, paddle_1, paddle_2):\r\n done = False\r\n \r\n p1_reward = 0\r\n p2_reward = 0\r\n\r\n # Move ball and move to edges if past boundary\r\n x_ = self.x + self.vx\r\n y_ = self.y + self.vy\r\n\r\n if x_ < self.left_x:\r\n x_ = self.left_x\r\n elif x_ > self.right_x:\r\n x_ = self.right_x\r\n\r\n if y_ < self.top_y:\r\n y_ = self.top_y\r\n elif y_ > self.bot_y:\r\n y_ = self.bot_y\r\n\r\n\r\n # Contact with top or bottom\r\n if y_ == self.top_y or y_ == self.bot_y:\r\n self.vy *= -1\r\n\r\n\r\n # Left side\r\n if x_ == self.left_x:\r\n if paddle_1.y <= y_ <= (paddle_1.y + paddle_1.Height):\r\n x_ += self.Radius\r\n change = abs(paddle_1.vy//8)\r\n self.vx = -1*self.vx + change//2\r\n if self.vy < 0:\r\n self.vy -= change\r\n else:\r\n self.vy += change\r\n\r\n\r\n self.rallies += 1\r\n\r\n p1_reward += 100\r\n p2_reward -= 0\r\n else:\r\n p1_reward -= 100\r\n p2_reward += 0\r\n done = True\r\n\r\n\r\n # Right side\r\n elif x_ == self.right_x:\r\n if paddle_2.y <= y_ <= (paddle_2.y + paddle_2.Height):\r\n x_ -= self.Radius\r\n change = abs(paddle_2.vy//8)\r\n self.vx = -1*self.vx - change//2\r\n if self.vy < 0:\r\n self.vy -= change\r\n else:\r\n self.vy += change\r\n\r\n self.rallies += 1\r\n\r\n p1_reward -= 0\r\n p2_reward += 100\r\n else:\r\n p1_reward += 0\r\n p2_reward -= 100\r\n done = True\r\n\r\n\r\n\r\n # Update ball position and velocity if exceeded\r\n if not done:\r\n self.x = x_\r\n self.y = y_\r\n\r\n if self.vx > self.V_max:\r\n self.vx = self.V_max\r\n elif self.vx < -self.V_max:\r\n self.vx = -self.V_max\r\n \r\n if self.vy > self.V_max:\r\n self.vy = self.V_max\r\n elif self.vy < -self.V_max:\r\n self.vy = -self.V_max\r\n\r\n\r\n p1_state, p2_state = self.state_observation(paddle_1, paddle_2)\r\n\r\n return p1_state, p2_state, p1_reward, p2_reward, done", "def is_ate(self, snake_x, snake_y):\n if snake_x == self.x and snake_y == self.y:\n return True", "def __init__(self, x, y):\n self.x = x\n self.y = y\n self.x1 = self.x + 30 # largeur et hauteur fixees\n self.y1 = self.y + 30", "def to_pygame(point):\n return int(point.x), int(-point.y+500)", "def find_lone(self, board):\n res = False\n if (self.player):\n if (self.board.p2vec[0] > board.p2vec[0]):\n res = True\n else:\n if (self.board.p1vec[0] > board.p1vec[0]):\n res = True\n return res", "def l_point(self):\n self.l_score += 1\n self.update_scoreboard()", "def min_players(self):\n return 2" ]
[ "0.71217895", "0.6409307", "0.6010213", "0.58238053", "0.5752256", "0.57095367", "0.56663626", "0.5627546", "0.55863583", "0.5570218", "0.55443054", "0.55192417", "0.54011667", "0.53589064", "0.5356541", "0.53509325", "0.5303866", "0.5300293", "0.52988297", "0.5297635", "0.5293489", "0.52684414", "0.5262667", "0.5248076", "0.5230867", "0.5217862", "0.5212916", "0.52111244", "0.52027273", "0.52019304", "0.519126", "0.51905537", "0.51626915", "0.51573247", "0.5148206", "0.51352787", "0.5132712", "0.5128786", "0.5121812", "0.5121771", "0.5111286", "0.51055133", "0.50903136", "0.5090101", "0.5089237", "0.50840306", "0.50770104", "0.507572", "0.5071829", "0.50672513", "0.50577617", "0.5047452", "0.504491", "0.5042747", "0.50426036", "0.5038939", "0.50374514", "0.50357634", "0.50348085", "0.50324243", "0.5014629", "0.50141793", "0.5013991", "0.50114864", "0.5007923", "0.50025314", "0.5000074", "0.5000074", "0.5000074", "0.49982432", "0.49982432", "0.4995334", "0.49889064", "0.49858952", "0.49811077", "0.497974", "0.49691105", "0.49659568", "0.496489", "0.49610075", "0.49606717", "0.49605015", "0.49582118", "0.49575725", "0.49550268", "0.495349", "0.49527526", "0.4952468", "0.49465543", "0.49456188", "0.4944616", "0.49400762", "0.4937095", "0.4934013", "0.49338886", "0.49306953", "0.49227414", "0.49172264", "0.49164945", "0.49128988", "0.4910994" ]
0.0
-1
has min at (1,0), saddle point at (1,0)
def f(x): return -math.exp(x[0]**3/-3 + x[0] - x[1]**2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def saddle_point(self):\n\n maxmin_value, maxmin_strategy_set = self.maxmin(0)\n minmax_value, minmax_strategy_set = self.minmax(1)\n\n if maxmin_value == minmax_value:\n return maxmin_strategy_set.intersection(minmax_strategy_set)\n return None", "def exact_saddle(V,X,Y,Z,dim,Z0=None):\n #from all_functions import find_saddle,sum_of_e_field\n if dim==3:\n print \"here\"\n print find_saddle(V,X,Y,Z,3)\n [I,J,K]=find_saddle(V,X,Y,Z,3) # guess saddle point; Z0 not needed\n print I,J,K\n r0=[X[I],Y[J],Z[K]]\n if I<2 or I>V.shape[0]-2: \n print('exact_saddle.py: Saddle point out of bounds in radial direction.')\n return r0\n if J<2 or J>V.shape[1]-2:\n print('exact_saddle.py: Saddle point out of bounds in vertical direction.')\n return r0\n if K<2 or K>V.shape[2]-2:\n print('exact_saddle.py: Saddle point out of bounds in axial direction.')\n return r0\n if V.shape[0]>100:\n Vn = V[I-2:I+3,J-2:J+3,K-2:K+3] # create smaller 5x5x5 grid around the saddle point to speed up optimization\n # note that this does not prevent the optimization function from trying values outside this\n Xn,Yn,Zn=X[I-2:I+3],Y[J-2:J+3],Z[K-2:K+3] # change grid vectors as well\n else:\n Vn,Xn,Yn,Zn = V,X,Y,Z\n #################################### Minimize\n r=spo.minimize(sum_of_e_field,r0,args=(Vn,Xn,Yn,Zn)) \n r=r.x # unpack for desired values\n Xs,Ys,Zs=r[0],r[1],r[2] \n ################################################################################################# \n if dim==2: \n if len(V.shape)==3:\n K=0 # in case there is no saddle\n for i in range(len(Z)):\n if Z[i-1]<Z0 and Z[i]>=Z0:\n K=i-1\n Vs = V.shape\n if K>=Vs[2]: # Matlab had Z, not V; also changed from == to >=\n return('The selected coordinate is at the end of range.')\n v1=V[:,:,K-1] # potential to left\n v2=V[:,:,K] # potential to right (actually right at estimate; K+1 to be actually to right)\n V2=v1+(v2-v1)*(Z0-Z[K-1])/(Z[K]-Z[K-1]) # averaged potential around given coordinate\n [I,J,K0]=find_saddle(V,X,Y,Z,2,Z0) \n r0=X[I],Y[J]\n print 1\n if (I<2 or I>V.shape[0]-2): \n print('exact_saddle.py: Saddle point out of bounds in radial direction.\\n')\n return r0\n if (J<2 or J>V.shape[1]-1):\n print('exact_saddle.py: Saddle point out of bounds in vertical direction.\\n')\n return r0\n if V.shape[0]>100:\n Vn = V[I-2:I+3,J-2:J+3,K-2:K+3] # create smaller 5x5x5 grid around the saddle point to speed up optimization\n # note that this does not prevent the optimization function from trying values outside this\n Xn,Yn,Zn=X[I-2:I+3],Y[J-2:J+3],Z[K-2:K+3] # Matlab 4, not 2\n else:\n Vn,Xn,Yn,Zn = V,X,Y,Z\n ################################## Minimize\n r=spo.minimize(sum_of_e_field_2d,r0,args=(Z0,Vn,Xn,Yn,Zn)) \n r=r.x # unpack for desired values\n Xs,Ys,Zs=r[0],r[1],Z0\n print Xs\n print Ys\n print Zs\n return [Xs,Ys,Zs]", "def __init__(self):\n self.center = Point()\n #x coordinate is set in these amount of pixels to leave a slight gap between the screen and paddle just like in real pong video games\n self.center.x = SCREEN_WIDTH - 10\n #when game starts, paddle is placed on the middle of screen's right edge\n self.center.y = SCREEN_HEIGHT / 2", "def saddle_point(I):\n #--- FILL ME IN ---\n\n m, n = I.shape\n\n #compute the inputs to the function lstsq\n\n #get sci\n sci = I.reshape(m*n, 1)\n #get A\n A = []\n for y in range(n):\n for x in range(m):\n #print((x,y))\n #print([x*x, x*y, y*y, x, y, 1])\n A.append([x*x, x*y, y*y, x, y, 1])\n\n A = np.array(A)\n \n parms = np.linalg.lstsq(A,sci)[0]\n #print(parms)\n r1 = np.array([[2*parms[0][0], parms[1][0]], \n [parms[1][0], 2*parms[2][0]]])\n r1 = np.linalg.inv(r1)\n r2 = np.array([[parms[3][0]], \n [parms[4][0]]])\n\n pt = np.negative(np.matmul(r1, r2))\n\n #------------------\n\n return pt", "def find_paddle(grid):\n for x in range(X_COLS):\n if grid[x][CURSOR_ROW] == 3:\n paddle_x = x\n\n return paddle_x", "def is_saddle(self, idx) -> bool:\n if idx == 0 or idx == len(self) - 1:\n logger.warning(\"Cannot be saddle point, index was at the end\")\n return False\n\n if any(self[i].energy is None for i in (idx - 1, idx, idx + 1)):\n logger.error(\n f\"Could not determine if point {idx} was a saddle \"\n f\"point, an energy close by was None\"\n )\n return False\n\n energy = self[idx].energy\n return self[idx - 1].energy < energy and self[idx + 1].energy < energy", "def saddle_points(matrix):\n if not all(len(row) == len(matrix[0]) for row in matrix[1:]):\n raise ValueError('Provided matrix is irregular.')\n columns = [col for col in zip(*matrix)]\n points = set()\n for ridx, row in enumerate(matrix):\n for cidx, element in enumerate(row):\n if element == max(row) and element == min(columns[cidx]):\n points.add((ridx, cidx))\n return points", "def point(k, steer):\r\n\tglobal translation\r\n\tdirection, sens = translation[steer]\r\n\tfront = (sens+1)+int(direction==\"y\")\r\n\tif front != k[\"front\"]:\r\n\t\tk[\"front\"] = front # Change le sens\r\n\t\tk[\"c\"] = k[\"graphism\"][front] # Met à jour le caractère\r\n\t\treturn True", "def point_in_map(self, x, y):\r\n return 0 <= x < self.width and 0 <= y < self.height and (x,y) not in self.walls", "def draw_horizontal_paddle(self):\n pygame.draw.rect(self.screen, self.color, self.top_rect)\n pygame.draw.rect(self.screen, self.color, self.bot_rect)", "def determine_round_winner(self):\n\n if self.getX() + self.SIZE[0] < 0:\n # point for player two\n return 2\n elif self.getX() > Configuration.windowWidth:\n # point for player one\n return 1", "def center_horizontal_paddle(self):\n self.top_center = self.screen_rect.centerx - (self.screen_rect.centerx/2)\n self.bot_center = self.screen_rect.centerx - (self.screen_rect.centerx/2)", "def wall_check(x: int, y: int, state: bool) -> bool:\r\n if state:\r\n if x == 0 or x == shape-1 or y == 0 or y == shape-1:\r\n return True\r\n else:\r\n if x < 0 or x >= shape or y < 0 or y >= shape:\r\n return True\r\n return False", "def _step_their_paddle(self):\n if random.random() < self.their_update_probability:\n if self.paddle_l.y < self.ball.y:\n if self.paddle_l.top_bound < self.top_bound:\n self.paddle_l.up()\n else:\n if self.paddle_l.bottom_bound > self.bottom_bound:\n self.paddle_l.down()", "def find_point(ball_loc, direction, kick_positions, positions):\n # unpack leg positions\n kick_x, kick_y, kick_z = kick_positions\n pos_x, pos_y, pos_z = positions \n # ball position relative to the kicking foot\n ball_loc = [100, -100.55154471, 0.09521921 ]\n # all boundaries for the kicking leg\n min_x = int(kick_positions[0] - 75) #- 0.13641\n max_x = int(kick_positions[0] +75)\n #min_y = int(kick_positions[1] - 75) #0.1340\n #max_y = int(kick_positions[1] + 75)#0.1014\n min_y = -140\n max_y = -90\n #min_z = int(kick_positions[2] ) #0.05\n #max_z = int(kick_positions[2] + 50) #0.1526\n min_z = 40\n max_z = 75\n\n # make ball position in world_space coordinates\n bal_x = ball_loc[0]\n bal_y = ball_loc[1]\n bal_z = ball_loc[2]\n # make direction in world_space coordinates\n #direction_x = kick_x + direction[0]\n #direction_y = kick_y + direction[1]\n #direction_z = kick_z + direction[2]\n direction = np.matrix([ [direction[0]], [direction[1]], [direction[2]]])\n # no retraction when other leg is there(change these values)\n #if( pos_y < max_y or pos_y > min_y):\n # if( abs(pos_y - max_y) > abs(pos_y - min_y)):\n # min_y = pos_y\n # else:\n # max_y = pos_y\n best_pos = 0\n # make matrix of the world_space ball and direction coordinates\n bal_loc = np.matrix([[bal_x], [bal_y], [bal_z]])\n #direction = np.matrix([[direction_x], [direction_y], [direction_z]])\n for x in xrange(min_x, max_x, 10):\n for y in xrange(min_y, max_y, 10):\n for z in xrange(min_z, max_z, 10):\n global x_pos\n global y_pos\n global z_pos\n x_pos = x_pos + [x]\n y_pos = y_pos + [y]\n z_pos = z_pos + [z]\n contact_point, value = retractionPoint(bal_loc, np.matrix([[x], [y],\n [z]]), direction, 1)\n #print \"contact\", contact_point\n if value > best_pos:\n best_pos = value\n kick_x = x\n kick_y = y\n kick_z = z\n \n contact = [contact_point[0,0], contact_point[1,0], contact_point[2,0]]\n return contact, [kick_x, kick_y, kick_z]", "def __init__(self, x, y):\r\n super(paddle, self).__init__(image=paddle.paddle2, x=x, y=y)\r\n self.points=games.Text(value=0, size=50, color=color.white, top=5, right=games.screen.width-5)\r\n games.screen.add(self.points)", "def __init__(self, ai_settings, screen):\n super(PlayerHorizontalPaddle, self).__init__()\n self.screen = screen\n self.ai_settings = ai_settings\n self.top_rect = pygame.Rect(0, 0, ai_settings.horizontal_paddle_width, ai_settings.horizontal_paddle_height)\n self.bot_rect = pygame.Rect(0, 0, ai_settings.horizontal_paddle_width, ai_settings.horizontal_paddle_height)\n self.screen_rect = screen.get_rect()\n self.color = ai_settings.horizontal_paddle_color\n self.height = float(ai_settings.horizontal_paddle_height)\n # Paddle starts at the bottom and top\n self.top_rect.centerx = self.screen_rect.centerx - (self.screen_rect.centerx/2)\n self.top_rect.top = self.screen_rect.top\n self.bot_rect.centerx = self.screen_rect.centerx - (self.screen_rect.centerx/2)\n self.bot_rect.bottom = self.screen_rect.bottom\n # Store a decimal value for the ship's center.\n self.x = float(self.top_rect.x)\n self.x = float(self.bot_rect.x)\n self.top_center = float(self.top_rect.centerx)\n self.bot_center = float(self.bot_rect.centerx)\n # Movement flag for continuous movement\n self.moving_left = False\n self.moving_right = False", "def shorten_paddle_exec(self):\n if self.shorten_paddle_count == 0 and self.glitch_count == 1:\n self.window.remove(self.paddle)\n self.paddle = GRect(self.paddle_width-20, self.paddle_height, x=(self.window_width - self.paddle_width) / 2,\n y=self.window_height - self.paddle_offset)\n self.paddle.color = 'magenta'\n self.paddle.filled = True\n self.paddle.fill_color = 'magenta'\n self.window.add(self.paddle)\n self.glitch_count += 1\n elif 0 < self.shorten_paddle_count <= 5:\n pass\n elif self.shorten_paddle_count > 5:\n self.window.remove(self.paddle)\n self.paddle = GRect(self.paddle_width, self.paddle_height, x=(self.window_width - self.paddle_width) / 2,\n y=self.window_height - self.paddle_offset)\n self.paddle.color = 'black'\n self.paddle.filled = True\n self.paddle.fill_color = 'black'\n self.window.add(self.paddle)\n self.shorten_paddle_count = 0\n self.shorten_paddle_exist = False\n self.shorten_paddle_start = False\n self.glitch_count = 1", "def test_first_pos() -> None:\n assert sw.walk_to(1) == sw.Coordinate(0, 0)", "def checkXPos(self, *args):\n x = self.initialXScale.get()\n y = self.initialYScale.get()\n\n if x ** 2 + y ** 2 > self.radius**2:\n if x > 0:\n self.initialXScale.set(np.sqrt(self.radius**2 - y ** 2))\n else:\n self.initialXScale.set(-np.sqrt(self.radius**2 - y ** 2))", "def update_ball(self):\n\t\tself.ball_x += self.velocity_x\n\t\tself.ball_y += self.velocity_y\n\t\tif self.ball_y < 0:\n\t\t\tself.ball_y = -self.ball_y\n\t\t\tself.velocity_y = -self.velocity_y\n\t\tif self.ball_y > 1:\n\t\t\tself.ball_y = 2 - self.ball_y\n\t\t\tself.velocity_y = -self.velocity_y\n\t\tif self.ball_x < 0:\n\t\t\tself.ball_x = -self.ball_x\n\t\t\tself.velocity_x = -self.velocity_x\n\t\tif self.ball_x < 1:\n\t\t\treturn 0\n\t\tif self.ball_y > self.paddle_y + State.paddle_height or self.ball_y < self.paddle_y:\n\t\t\treturn -1\n\t\tself.ball_x = 2 - self.ball_x\n\t\tself.velocity_x = random.uniform(-0.015, 0.015) - self.velocity_x\n\t\tif abs(self.velocity_x) < 0.03:\n\t\t\tself.velocity_x = 0.03 if self.velocity_x > 0 else -0.03\n\t\tself.velocity_y = random.uniform(-0.03, 0.03) - self.velocity_y\n\t\tself.velocity_x = max(min(self.velocity_x, 1.0), -1.0)\n\t\tself.velocity_y = max(min(self.velocity_y, 1.0), -1.0)\n\t\treturn 1", "def detectPaddleCollision(self, paddle):\n if paddle.contains(self.left,self.top) and self._vy < 0:\n self.bouncesound.play()\n self.verticalBounce()\n self._vx = random.uniform(5.0, 15.0)\n #print 'topright paddle collision'\n if paddle.contains(self.left,self.bottom) and self._vy < 0:\n self.bouncesound.play()\n self.verticalBounce()\n self._vx = random.uniform(5.0, 13.0)\n #print 'bottomright paddle collision'\n if paddle.contains(self.right,self.top) and self._vy < 0:\n self.bouncesound.play()\n self.verticalBounce()\n self._vx = random.uniform(5.0, 13.0)\n #print 'topleft paddle collision'\n if paddle.contains(self.right,self.bottom) and self._vy < 0:\n self.bouncesound.play()\n self.verticalBounce()\n self._vx = random.uniform(-15.0,-5.0)\n #print 'bottomleft paddle collision'", "def drawOrigin():\n if xMin < 0 < xMax:\n if yMin < 0 < yMax:\n x, y = cartesianToScreen(0, 0)\n\n pygame.draw.line(display, WHITE, (x - 6, y),\n (x + 6, y), 3)\n\n pygame.draw.line(display, WHITE, (x, y - 6),\n (x, y + 6), 3)", "def lmin(scape, start):\n i = start\n while scape[i - 1] < scape[i] - 0.06:\n i -= 1\n while scape[i + 1] < scape[i] - 0.06:\n i += 1\n return i", "def find_saddle(V,X,Y,Z,dim,Z0=None):\n debug=False # internal code only; typically False\n from project_parameters import scale\n if (dim==2 and Z0==None):\n return 'z0 needed for evaluation'\n if dim==3:\n if len(V.shape)!=3:\n return('Problem with find_saddle.m dimensionalities.')\n f=V/float(np.amax(V)) # Normalize field\n [Ex,Ey,Ez]=np.gradient(f,abs(X[1]-X[0])/scale,abs(Y[1]-Y[0])/scale,abs(Z[1]-Z[0])/scale) # grid spacing is automatically consistent thanks to BEM-solver\n E=np.sqrt(Ex**2+Ey**2+Ez**2) # magnitude of gradient (E field)\n m=E[1,1,1]\n origin=[1,1,1]\n for i in range(E.shape[0]):\n for j in range(E.shape[1]):\n for k in range(E.shape[2]):\n if E[i,j,k]<m:\n m=E[i,j,k]\n origin=[i,j,k] \n if debug:\n print('DEBUGGING...')\n fig=plt.figure()\n e=np.reshape(E,(1,E.shape[0]*E.shape[1]*E.shape[2]))\n ind,e=np.argsort(e),np.sort(e)\n e=e[0]\n ind=ind[0] #Sort V by the same indexing.\n v=np.reshape(V,(1,V.shape[0]*V.shape[1]*V.shape[2]))\n v=v[0]\n plt.plot(e/float(np.amax(e)))\n def index_sort(v,e):\n \"\"\"Takes in two lists of the same length and returns the first sorted by the indexing of i sorted.\"\"\"\n es=np.sort(e)\n ix=np.argsort(e)\n vs=np.ones(len(v)) #Sorted by the sorting defined by f being sorted. \n # If v==e, this returns es.\n for i in range(len(v)):\n j=ix[i]\n vs[i]=v[j]\n return vs\n v=index_sort(v,e) # Is it supposed to look like this?\n plt.plot(v/float(np.amax(v)))\n plt.title('Debugging: blue is sorted gradient, green is potential sorted by gradient')\n plt.show() #f is blue and smooth, v is green and fuzzy.\n if origin[0]==(1 or V.shape[0]):\n print('find_saddle: Saddle out of bounds in x (i) direction.\\n')\n return origin\n if origin[0]==(1 or V.shape[1]):\n print('find_saddle: Saddle out of bounds in y (j) direction.\\n')\n return origin\n if origin[0]==(1 or V.shape[2]): \n print('find_saddle: Saddle out of bounds in z (k) direction.\\n')\n return origin\n #################################################################################################\n if dim==2: # Extrapolate to the values of A at z0.\n V2=V\n if len(V.shape)==3:\n Ks=0 # in case there is no saddle point\n for i in range(len(Z)):\n if Z[i-1]<Z0 and Z[i]>=Z0:\n Ks=i-1\n if Z0<1:\n Ks+=1\n Vs=V.shape\n if Ks>=Vs[2]: # Matlab had Z, not V; also changed from == to >=\n return('The selected coordinate is at the end of range.')\n v1=V[:,:,Ks] \n v2=V[:,:,Ks+1]\n V2=v1+(v2-v1)*(Z0-Z[Ks])/(Z[Ks+1]-Z[Ks])\n V2s=V2.shape\n if len(V2s)!=2: # Old: What is this supposed to check? Matlab code: (size(size(A2),2) ~= 2)\n return('Problem with find_saddle.py dimensionalities. It is {}.'.format(V2s))\n f=V2/float(np.max(abs(V2)))\n [Ex,Ey]=np.gradient(f,abs(X[1]-X[0]),abs(Y[1]-Y[0]))\n E=np.sqrt(Ex**2+Ey**2)\n m=float(np.min(E))\n if m>1e-4: # This requires a grid with step size 0.01, not just 0.1.\n if debug:\n Is,Js=np.NaN,np.NaN\n print('Warning, there seems to be no saddle point.')\n mr=E[0,0]\n Is,Js=1,1 # in case there is no saddle\n for i in range(E.shape[0]):\n for j in range(E.shape[1]):\n if E[i,j]<mr:\n mr=E[i,j]\n Is,Js=i,j\n origin=[Is,Js,Ks]\n if Is==1 or Is==V.shape[0]:\n print('find_saddle: Saddle out of bounds in x (i) direction.\\n')\n return origin\n if Js==1 or Js==V.shape[1]:\n print('find_saddle: Saddle out of bounds in y (j) direction.\\n')\n return origin\n return origin", "def max_just_sway(self):\n minx, maxx = np.min(self.x_sway), np.max(self.x_sway)\n miny, maxy = np.min(self.y_sway), np.max(self.y_sway)\n right = np.abs(maxx-self.baseline['xc'])\n left = np.abs(minx-self.baseline['xc'])\n down = np.abs(miny-self.baseline['yc'])\n up = np.abs(maxy-self.baseline['yc'])\n return left, right, up, down", "def hit_paddle(self):\n pass\n\n #Implement if collision with paddle is detected\n\n #Add randomness to how ball direction will change and return value", "def is_position_allowed(new_x, new_y):\n\n return min_x <= new_x <= max_x and min_y <= new_y <= max_y", "def hit_wall(self):\n if self.ball.x <= 0 or self.ball.x + self.ball.width > self.window.width:\n self.__dx = -self.__dx\n if self.ball.y <= 0:\n self.__dy = -self.__dy", "def check_collision(self):\n if self.window.get_object_at(self.ball.x,self.ball.y+self.radius*2) is self.paddle:\n self.bounce()\n if self.window.get_object_at(self.ball.x+self.radius*2,self.ball.y+self.radius*2) is self.paddle:\n self.bounce()", "def create_paddle(self, pos):\n\n self.shape(\"square\")\n self.penup()\n self.color(\"blue\")\n self.shapesize(stretch_wid=1, stretch_len=4)\n self.setpos(pos)", "def show_paddle(self, screen, fgColor):\r\n if self.player_Num == 1:\r\n pygame.draw.rect(screen, fgColor, pygame.Rect((0, self.y, self.Width, self.Height)))\r\n elif self.player_Num == 2:\r\n pygame.draw.rect(screen, fgColor, pygame.Rect((self.screen_Width-self.Width, self.y, self.Width, self.Height)))", "def hit_wall(s):\n if s == [1, 1]: # We would enter the None-field\n return True\n elif s[0] < 0 or s[0] > 2 or s[1] < 0 or s[1] > 3: # We would be out of bounds\n return True\n else:\n return False", "def minX(self):\n return min(self.getx())", "def check_collide(self):\r\n for paddle in self.overlapping_sprites:\r\n self.score.value +=10", "def argminX( self ):\n min = 1e30\n minX = None\n for i in range( 0, self.GetN() ):\n p = ( ROOT.Double(), ROOT.Double() )\n self.GetPoint( i, p[0], p[1] )\n if p[1] < min:\n min = p[1]\n minX = p[0]\n return minX", "def FindClosestPoint(self, ):\n ...", "def check_pos(self, x, y):\n if x >= WINDOWWIDTH or y >= WINDOWHEIGHT or x <=0 or y <= 0:\n return True", "def always_touching(self):\n assert int(self.snake[0].real - self.snake[1].real) in [1, 0, -1] and int(\n self.snake[0].real - self.snake[1].real) in [1, 0, -1]", "def test_anchor_point(self):\n nb_points = 5\n points = np.array([[1, 2], [2, 1], [3, 7], [7, 2]]) # example of points\n\n anchor_point = convex_hull.lowest_coordinate(points) # anchor point\n right_anchor_point = [2, 1] # the right anchor points\n\n self.assertTrue((anchor_point == right_anchor_point).all())", "def getMinimum(self):\n v1 = Vector(*self.p1)\n v2 = Vector(*self.p2)\n if v1.angle < v2.angle:\n return self.p1\n else:\n return self.p2", "def find_min(self):\n\n\n min_x = 1000\n min_y = 1000\n k = len(self.__col_lista)\n for i in range(k):\n x, y = self.__col_lista[i]\n if x < min_x:\n min_x = x\n if y < min_y:\n min_y = y\n return min_x, min_y", "def is_at_wall(self):\n return self.distmin < self.distmax*0.8", "def find_basin(self, s):\n \n assert s.size==self.n\n atMin = False\n thisState = s.astype(np.int8)\n\n while not atMin: \n dE = self.neighbor_dE(thisState)\n if np.any( dE<0 ):\n ix = dE.argmin()\n thisState[ix] *= -1\n else:\n atMin = True\n return thisState", "def draw_skin_player(self, id):\n if id == self.id:\n self.screen.blit(self.paddle_1, (self.first_player_x, self.first_player_y))\n else:\n self.screen.blit(self.paddle_2, (self.second_player_x, self.second_player_y))\n return", "def paddle_reset_position(self, mouse):\n if (0 + self.paddle.width / 2) <= mouse.x <= (self.window.width - self.paddle.width / 2):\n self.paddle_x = mouse.x - self.paddle.width / 2\n self.window.add(self.paddle, self.paddle_x, self.paddle_y)", "def paddle_moving(self, mouse):\n # when the paddle is in the window\n if 0 + self.paddle.width/2 <= mouse.x <= self.window.width - self.paddle.width/2:\n self.paddle.x = mouse.x - self.paddle.width / 2\n\n # when the paddle is about to leave the left side of the window\n elif mouse.x < 0 + self.paddle.width/2:\n self.paddle.x = 0\n\n # when the paddle is about to leave the right side of the window\n elif mouse.x > self.window.width - self.paddle.width/2:\n self.paddle.x = self.window.width - self.paddle.width\n\n # the paddle's y coordinate will always be at the same as below\n self.paddle.y = self.window.height - self.paddle_offset", "def _findMinNode(self, s):\n\n minNode = None\n minVal = self.inf\n for vertex in s:\n if self.dist[vertex] < minVal:\n minVal = self.dist[vertex]\n minNode = vertex\n return minNode", "def min_powerflow_rule(_m, l, y, s, t):\r\n\r\n return m.POWERFLOW_MIN[l] - m.p_L[l, y, s, t] <= 0", "def my_constraint_function(candidate):\r\n # In this case, we'll just say that the point has to lie \r\n # within a circle centered at (0, 0) of radius 1.\r\n if candidate[0]**2 + candidate[1]**2 > 1:\r\n return 1\r\n else:\r\n return 0", "def test_check_point():\n board = Board(640, 640, 8)\n board.start_game()\n assert board.check_point(board.SPACE_SIZE/2, board.SPACE_SIZE/2) is not None\n assert board.check_point(0, 0) is None", "def walls (x, y):\n North = True\n West = True\n East = True\n South = True\n if x == 1:\n West = False\n if x == 3:\n East = False\n if y == 1:\n South = False\n if y == 3:\n North = False\n\n if (y == 1):\n West = False\n East = False\n elif (x == 2) and (y == 2):\n East = False\n North = False\n elif (x == 3) and (y == 2):\n West = False\n elif (x == 2) and (y == 3):\n South = False\n return North, South, West, East", "def halfway(self, target):\r\n mx = (self.x + target.x)/2\r\n my = (self.y + target.y)/2\r\n return Point(mx, my)", "def center_flows(L_wprime, U_wprime, L_w3, U_w3, L_overlap, U_overlap):\n # examine every possible point\n current_dist_to_edge = -1\n point = (0,0)\n #print(\"w3 range: [{}, {}]\".format(L_w3, U_w3))\n #print(\"w' range: [{}, {}]\".format(L_wprime, U_wprime))\n #print(\"overlap range: [{},{}]\".format(L_overlap, U_overlap))\n for y in range(L_w3, U_w3 + 1):\n #print(\"y={}\".format(y))\n LH_bound = max(L_wprime, L_overlap - y)\n #print(\"LH bound = {}\".format(LH_bound))\n RH_bound = min(U_wprime, U_overlap - y)\n #print(\"RH bound = {}\".format(RH_bound))\n for x in range(LH_bound, RH_bound + 1):\n # w3 UB: 0x + 1y - U_w3 = 0\n # w3 LB: 0x + 1y - L_w3 = 0\n # wprime UB: 1x + 0y - U_wprime\n # wprime LB: 1x + 0y - L_wprime\n # wprime + w3 UB: 1x + 1y - U_wprime,wk\n # wprime + w3 LB: 1x + 1y - L_wprime,wk\n dist_to_edge = min(distance_point_to_line(x, y, 0, -1, U_w3), #0x-1y+U_w3=0\n distance_point_to_line(x, y, 0, -1, L_w3), #0x-1y+L_w3=0\n # -1x + 0y + U_wprime = 0\n distance_point_to_line(x, y, -1, 0, U_wprime),\n # -1x + 0y + L_wprime = 0\n distance_point_to_line(x, y, -1, 0, L_wprime),\n # -1x - 1y + U_overlap = 0\n distance_point_to_line(x, y, -1, -1, U_overlap),\n # -1 x - y + L_overlap = 0\n distance_point_to_line(x, y, -1, -1, L_overlap))\n if dist_to_edge > current_dist_to_edge:\n #print(\"At point ({},{}), distance to edge increased from {} to {}.\"\\\n # .format(x,y,current_dist_to_edge,dist_to_edge))\n current_dist_to_edge = dist_to_edge\n point = (x,y)\n return(point)", "def collision(self,x):\n return (1.092*x - 171)", "def __init__(self, screen_Size, paddle_Width):\r\n self.screen_Width, self.screen_Height = screen_Size\r\n\r\n # Setup x,y limits for ball position\r\n self.left_x = paddle_Width\r\n self.right_x = self.screen_Width - paddle_Width\r\n self.top_y = self.Radius\r\n self.bot_y = self.screen_Height - self.Radius\r\n\r\n self.x = self.screen_Width//2\r\n self.y = np.random.randint(self.Radius, self.screen_Height-self.Radius)\r\n\r\n self.vx = np.random.choice([-1, 1]) * np.random.randint(25, 30)\r\n self.vy = np.random.choice([-1, 1]) * np.random.randint(25, 30)\r\n\r\n # Ralley counter to see game progress\r\n self.rallies = 0", "def random_item_sp(self):\n if random.random() < 0.3:\n self.window.add(self.shorten_paddle, x=self.ball.x+self.objects_length/2, y=self.ball.y)\n self.shorten_paddle_exist = True", "def neighbor(self,s):\n jump=20\n while True:\n s+=random.randint(-1*jump,jump)\n if s < pow(10,5) and s > pow(10,-5):return s", "def point_with_min_y(points):\n\n\tmin_idx = None\n\n\tfor a,coord in enumerate(points):\n\n\t\tif min_idx == None:\n\t\t\tmin_idx = a\n\t\t\tP0_Y = coord[1]\n\t\t\tP0_X = coord[0]\n\t\telif coord[1] < P0_Y:\n\t\t\t# look for the point with lowest y co-ordinate\n\t\t\tmin_idx = a\n\t\t\tP0_X = coord[0]\n\t\t\tP0_Y = coord[1]\n\t\telif (coord[1] == P0_Y) & (coord[0] < P0_X):\n\t\t\t# In-case of tie with lowest y co-ordinate\n\t\t\t# take one which is leftmost or lowest x \n\t\t\t# co-ordinate\n\t\t\tmin_idx = a\n\t\t\tP0_X = coord[0]\n\t\t\tP0_Y = coord[1]\n\n\n\treturn (P0_X,P0_Y)", "def define_spot(self,mpos):\n mpos_coord = ((mpos[0] - 199)/87, (mpos[1] - 116)/87)\n if mpos_coord == (1,2):\n spot = \"1\"\n return spot\n if mpos_coord == (2,2):\n spot = \"2\" \n return spot\n if mpos_coord == (4,0):\n spot = \"3\"\n return spot\n if mpos_coord == (4,1):\n spot = \"4\" \n return spot\n else:\n return False", "def draw_X():\r\n x,y = pygame.mouse.get_pos()\r\n x = 3*x/300\r\n y = 3*y/300\r\n x = approximate(x)\r\n y = approximate(y)\r\n pos_x = 0\r\n pos_y = 0\r\n if x == 50:pos_x = 0\r\n elif x == 150:pos_x=1\r\n elif x == 250:pos_x=2\r\n if y == 50:pos_y=0\r\n elif y == 150:pos_y=1\r\n elif y == 250:pos_y=2\r\n if positions[pos_y][pos_x] == 0:\r\n positions[pos_y][pos_x] = -1\r\n pygame.draw.line(screen,(255,255,255),(x-40,y-40),(x+40,y+40),10)\r\n pygame.draw.line(screen,(255,255,255),(x+40,y-40),(x-40,y+40),10)\r\n players.reverse()\r\n else: \r\n print('the spot was occupied')\r\n time.sleep(.25)", "def _wall_to(self, other):\n assert abs(self.x - other.x) + abs(self.y - other.y) == 1, '{}, {}'.format(self, other)\n if other.y < self.y:\n return N\n elif other.y > self.y:\n return S\n elif other.x < self.x:\n return W\n elif other.x > self.x:\n return E\n else:\n assert False", "def reset_paddle(self):\r\n self.y = self.screen_Height // 2\r\n self.vy = 0", "def get_point_on(self, s):\n\n x = self.n1.x * (1 - s) + self.n2.x * s\n y = self.n1.y * (1 - s) + self.n2.y * s\n z = self.n1.z * (1 - s) + self.n2.z * s\n\n return [x, y, z]", "def detectWallCollision(self):\n if self.right >= GAME_WIDTH or self.left <= 0:\n self._vx = -1.0 * self._vx\n if self.top >= GAME_HEIGHT:\n self._vy = -1.0 * self._vy", "def halfway(self, target):\n mx = (self.x + target.x) / 2\n my = (self.y + target.y) / 2\n return Point(mx, my)", "def halfway(self, target):\n mx = (self.x + target.x)/2\n my = (self.y + target.y)/2\n return Point(mx, my)", "def halfway(self, target):\n mx = (self.x + target.x)/2\n my = (self.y + target.y)/2\n return Point(mx, my)", "def halfway(self, target):\n mx = (self.x + target.x)/2\n my = (self.y + target.y)/2\n return Point(mx, my)", "def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 0)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size", "def smallest_bounding_box(msk):\n x, y, z = np.where(msk > 0)\n corner = np.array([x.min(), y.min(), z.min()])\n size = np.array([x.max() + 1, y.max() + 1, z.max() + 1])\n return corner, size", "def min_power_in_storage_rule(_m, g, y, s, t):\r\n\r\n return - m.p_in[g, y, s, t] <= 0", "def draw_circle():\r\n board = positions.copy()\r\n best = -math.inf\r\n for i in range(3):\r\n for j in range(3):\r\n if board[i][j]==0:\r\n board[i][j]=1\r\n score = MinMax(board,'min')\r\n board[i][j]=0\r\n if score>best:\r\n best = score\r\n move = [i,j]\r\n y = move[0]\r\n x = move[1]\r\n positions[y][x]=1\r\n if x == 0: x = 50\r\n elif x == 1: x = 150\r\n elif x == 2: x = 250\r\n if y == 0: y = 50\r\n elif y == 1: y = 150\r\n elif y == 2: y = 250\r\n pygame.draw.circle(screen,(255,255,255),(x,y),-10+WIDTH/6,10)", "def point(self, x, y):\n d1 = super().point(x, y)\n top = self._lifetime.top\n bottom = self._lifetime.bottom\n d2 = distance_line_point(top.pos, bottom.pos, (x, y))[0]\n return min(d1, d2)", "def sprinkler(l):\n t.right(90)\n t.forward(l / 2)\n t.right(-90)\n t.circle(l / 2)\n t.circle(- l / 2)\n t.left(90)\n t.forward(l / 2)\n t.right(90)\n t.forward(l)\n t.right(90)\n t.forward(l / 2)\n t.right(-90)\n t.circle(l / 2)\n t.circle(- l / 2)", "def sw_corner(self):\n return (self.min_lat, self.min_lon)", "def minimax(board):\n\n current_player = player(board)", "def check_borders(self):\n # Go Homer!\n # https://en.wikipedia.org/wiki/Torus#Flat_torus\n if self._posn.x < 0:\n self._posn.x += self._win_w\n elif self._posn.x > self._win_w:\n self._posn.x -= self._win_w\n if self._posn.y < 0:\n self._posn.y += self._win_h\n elif self._posn.y > self._win_h:\n self._posn.y -= self._win_h", "def boundary(self):\n if self.pos.x < 0:\n self.pos.x = 0\n if self.pos.x > WIDTH - 48:\n self.pos.x = WIDTH - 48\n if self.pos.y < 0:\n self.pos.y = 0\n if self.pos.y > HEIGHT - 48:\n self.pos.y = HEIGHT - 48\n\n self.rect.topleft = self.pos", "def ground_min(self):\n\n def compare(e):\n if is_wire(e):\n return e.potential\n else:\n return float('inf')\n self.move_ground(min(self.elements, key=compare))", "def initialCoordinates():\r\n return (-250,-250)", "def inSmallBlindPosition(self):\n return len(self.in_game) > 0 and ((self.dealer + 1) % len(self.in_game)) == self.position", "def handle_slopes(self, slopeG):\n\n\n\n colSprite = pygame.sprite.spritecollideany(self, slopeG)\n if colSprite: #and self.rect.y < colSprite.rect.y:\n self.fall = False\n\n tl = colSprite.rect.topleft # used for slope calculation only\n br = colSprite.rect.bottomright\n\n m1 = float((br[1]-tl[1])/(br[0]-tl[0])) # y2-y1/(x2-x1)\n angle_rad = math.atan(m1) # from atan(m1 - m1 /(1+m1m2))\n # The angle is normally 45 degrees\n\n if self.x_vel:\n #le = self.x_vel / abs(self.x_vel) * 4\n le = self.x_vel\n else:\n le = 0\n\n x_move_len = le\n y_move_len = self.calc_vertical(x_move_len, angle_rad)\n\n # just for debugging\n self.d1 = x_move_len\n self.d2 = y_move_len\n\n # Now, it is needed to move the player down till\n # he reaches the 'essence' of the slope. This is because I\n # am too lazy to implement pixel-perfect collision.\n # Since this is to be done only once, a variable will be used\n # to keep track of whether this has beend donef for one slope or not\n\n # tolerance for height changing\n tol = False\n if abs(colSprite.rect.topleft[1] - self.rect.bottomleft[1]) <= 10:\n tol = True\n #print \"ABS \", abs(colSprite.rect.topleft[1] - self.rect.bottomleft[1])\n\n if not self.prev_slope and tol:\n self.prev_slope = True\n\n x_off_mov = colSprite.rect.topleft[0] - self.rect.bottomleft[0]\n y_off_mov = self.calc_vertical(x_off_mov, angle_rad)\n\n # handling for rightwards velocity\n if self.direction == RIGHT:\n y_off_mov = -y_off_mov\n\n\n self.rect.move_ip((0, y_off_mov))\n\n # check collision with any slope\n\n #self.rect.move_ip((x_move_len, y_move_len))\n # it seems that the above code is redundant; will check\n self.rect.move_ip((-self.x_vel, 0)) # undo the shifting\n self.rect.move_ip((x_move_len, y_move_len))\n\n else:\n self.prev_slope = False", "def check_offset(self):\n\n for d in range(self.n_dmps):\n if abs(self.y0[d] - self.goal[d]) < 1e-4:\n self.goal[d] += 1e-4", "def boundary(active, objects):\n limit = SIZE[1]\n for obj in objects:\n if active.pos_x == obj.pos_x:\n limit = min(limit, obj.pos_y)\n active.pos_y = limit-active.height\n active.col_d = True", "def stay_on_screen(self):\n if self.x <= 0 + SHIP_WIDTH/2:\n self.x += SHIP_MOVEMENT\n if self.x >= GAME_WIDTH- SHIP_WIDTH/2:\n self.x -= SHIP_MOVEMENT", "def advanceSilver():\n global silverBallX, silverSpeed\n silverBallX += silverSpeed\n if silverBallX <= -4:\n # Reached the bottom - switch directions\n silverBallX = -4\n silverSpeed = -silverSpeed\n elif silverBallX >= 2.8:\n # Reached the top - switch directions\n silverBallX = 2.8\n silverSpeed = -silverSpeed", "def update(self):\n # Get where the mouse is\n pos = pygame.mouse.get_pos()\n # Set the left side of the player bar to the mouse position\n self.rect.x = pos[0]\n # Make sure we don't push the player paddle \n # off the right side of the screen\n if self.rect.x > self.screenwidth - self.width:\n self.rect.x = self.screenwidth - self.width", "def canopy(self, h=3, b1=1):\n mu = min(5, 2 * h / self.sk)\n self.mu_canopy = mu\n self.s_canopy = self.mu_canopy * self.Ce * self.Ct * self.sk\n print(f'The canopy snow shape coeffeicient = {self.mu_canopy :.2f}')\n print(f'The peak canopy snow load = {self.s_canopy :.2f}kPa')", "def checkIntoWall(MazeTupleSet,SolnTupleSet):\n ele = MazeTupleSet.intersection(SolnTupleSet)\n if(len(ele)==0): #if the intersection of wall and solution is zero\n return True #means we do not run into wall\n else:\n return False", "def setupCollisions(self) :", "def borders((u,v)):\r\n return ((u,v+1,S), (u+1,v,W), (u,v,S), (u,v,W))", "def detectWallCollision(self): \n if self.posn_x > cw - self.ball_width: # Collision with right-hand container wall. \n self.velocity_x = -self.velocity_x * self.coef_restitution # reverse direction. \n self.posn_x = cw - self.ball_width * 1.1 # anti-stick to the wall \n if self.posn_x < 1: # Collision with left-hand wall. \n self.velocity_x = -self.velocity_x * self.coef_restitution \n self.posn_x = 2 # anti-stick to the wall \n if self.posn_y < self.ball_height: # Collision with ceiling. \n self.velocity_y = -self.velocity_y * self.coef_restitution \n self.posn_y = self.ball_height * 1.1 # ceiling collision anti-stick \n if self.posn_y > ch - self.ball_height * 1.1 : # Floor collision. \n self.velocity_y = - self.velocity_y * self.coef_restitution \n self.posn_y = ch - self.ball_height * 1.1 # anti-stick. Prevents out-of-bounds ball loss (stickiness) ", "def shorten_paddle_switch(self):\n for i in range(self.paddle.width):\n maybe_shorten = self.window.get_object_at(self.paddle.x + i, self.paddle.y)\n if maybe_shorten is self.shorten_paddle:\n self.window.remove(maybe_shorten)\n self.shorten_paddle_start = True", "def update(self, paddle_1, paddle_2):\r\n done = False\r\n \r\n p1_reward = 0\r\n p2_reward = 0\r\n\r\n # Move ball and move to edges if past boundary\r\n x_ = self.x + self.vx\r\n y_ = self.y + self.vy\r\n\r\n if x_ < self.left_x:\r\n x_ = self.left_x\r\n elif x_ > self.right_x:\r\n x_ = self.right_x\r\n\r\n if y_ < self.top_y:\r\n y_ = self.top_y\r\n elif y_ > self.bot_y:\r\n y_ = self.bot_y\r\n\r\n\r\n # Contact with top or bottom\r\n if y_ == self.top_y or y_ == self.bot_y:\r\n self.vy *= -1\r\n\r\n\r\n # Left side\r\n if x_ == self.left_x:\r\n if paddle_1.y <= y_ <= (paddle_1.y + paddle_1.Height):\r\n x_ += self.Radius\r\n change = abs(paddle_1.vy//8)\r\n self.vx = -1*self.vx + change//2\r\n if self.vy < 0:\r\n self.vy -= change\r\n else:\r\n self.vy += change\r\n\r\n\r\n self.rallies += 1\r\n\r\n p1_reward += 100\r\n p2_reward -= 0\r\n else:\r\n p1_reward -= 100\r\n p2_reward += 0\r\n done = True\r\n\r\n\r\n # Right side\r\n elif x_ == self.right_x:\r\n if paddle_2.y <= y_ <= (paddle_2.y + paddle_2.Height):\r\n x_ -= self.Radius\r\n change = abs(paddle_2.vy//8)\r\n self.vx = -1*self.vx - change//2\r\n if self.vy < 0:\r\n self.vy -= change\r\n else:\r\n self.vy += change\r\n\r\n self.rallies += 1\r\n\r\n p1_reward -= 0\r\n p2_reward += 100\r\n else:\r\n p1_reward += 0\r\n p2_reward -= 100\r\n done = True\r\n\r\n\r\n\r\n # Update ball position and velocity if exceeded\r\n if not done:\r\n self.x = x_\r\n self.y = y_\r\n\r\n if self.vx > self.V_max:\r\n self.vx = self.V_max\r\n elif self.vx < -self.V_max:\r\n self.vx = -self.V_max\r\n \r\n if self.vy > self.V_max:\r\n self.vy = self.V_max\r\n elif self.vy < -self.V_max:\r\n self.vy = -self.V_max\r\n\r\n\r\n p1_state, p2_state = self.state_observation(paddle_1, paddle_2)\r\n\r\n return p1_state, p2_state, p1_reward, p2_reward, done", "def is_ate(self, snake_x, snake_y):\n if snake_x == self.x and snake_y == self.y:\n return True", "def __init__(self, x, y):\n self.x = x\n self.y = y\n self.x1 = self.x + 30 # largeur et hauteur fixees\n self.y1 = self.y + 30", "def to_pygame(point):\n return int(point.x), int(-point.y+500)", "def find_lone(self, board):\n res = False\n if (self.player):\n if (self.board.p2vec[0] > board.p2vec[0]):\n res = True\n else:\n if (self.board.p1vec[0] > board.p1vec[0]):\n res = True\n return res", "def l_point(self):\n self.l_score += 1\n self.update_scoreboard()", "def min_players(self):\n return 2" ]
[ "0.712052", "0.6408869", "0.60100293", "0.5823926", "0.57521075", "0.5708044", "0.56659967", "0.5627963", "0.5585765", "0.5570389", "0.5544212", "0.5519314", "0.5401504", "0.5359145", "0.53573614", "0.5350745", "0.5303898", "0.53009754", "0.5297841", "0.529778", "0.5294022", "0.52678794", "0.5262462", "0.5248198", "0.52303433", "0.52176684", "0.521256", "0.52102745", "0.5202335", "0.5201983", "0.51910055", "0.5190477", "0.5161941", "0.51581097", "0.51476526", "0.51355135", "0.5132682", "0.5127892", "0.5121573", "0.512089", "0.5110909", "0.51051843", "0.5090187", "0.5090043", "0.5088943", "0.50840795", "0.50772554", "0.50754637", "0.5073258", "0.50664866", "0.5057438", "0.5047639", "0.50439066", "0.50438356", "0.50434166", "0.50393254", "0.5038288", "0.50347257", "0.50344336", "0.5032683", "0.5014477", "0.5014266", "0.5013764", "0.501119", "0.50075114", "0.5001541", "0.49990723", "0.49990723", "0.49990723", "0.4997661", "0.4997661", "0.499678", "0.49896038", "0.4985652", "0.49823084", "0.49790633", "0.4970653", "0.4966593", "0.49645334", "0.4961709", "0.4961498", "0.49601662", "0.49587888", "0.49577975", "0.49551934", "0.49542117", "0.49534988", "0.4952921", "0.494735", "0.49464133", "0.4944786", "0.49401253", "0.49374658", "0.4934799", "0.4933879", "0.4929749", "0.49232095", "0.49170306", "0.49158564", "0.49139655", "0.4911248" ]
0.0
-1
df is a function of x_i, y_i, beta
def sgd_step(df, alpha, prev_beta, xy_i): x_i, y_i = xy_i gradient = df(x_i, y_i, prev_beta) return [beta_j + alpha * df_j for beta_j, df_j in zip(prev_beta, gradient)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_beta(self,df,tick,ind):\n cov = get_cov(df,tick,ind)\n var = df[ind].var()\n beta = cov / var\n return beta", "def create_beta_posteriors(df):\n goods = df.num_matured - df.fpd\n df['alpha_p'] = df.alpha + df.fpd\n df['beta_p'] = df.beta + goods\n return df", "def create_beta_priors(df):\n df['alpha'] = np.minimum(np.maximum((1 - df.expected) * np.power(df.expected, 2) / df.variance - df.expected, 0.1), 15)\n df['beta'] = df.alpha / df.expected - df.alpha\n return df", "def smale_beta(f, x0, df, args=()):\n _args = (x0,) + args\n beta = numpy.abs(f(*_args) / df[0](*_args))\n return beta", "def fun(_, y):\n return np.array([-self.r * self.beta * y[2] * y[0] / self.N,\n self.r * self.beta * y[2] * y[0] / self.N - self.sigma * y[1],\n self.sigma * y[1] - self.gamma * y[2],\n self.gamma * y[2]])", "def fun(_, y):\n return np.array([-self.r * self.beta * y[1] * y[0] / self.N,\n self.r * self.beta * y[1] * y[0] / self.N - self.gamma * y[1],\n self.gamma * y[1]])", "def df(x):\n raise NotImplementedError", "def test_fn(df, fn):\n\n y_pred = []\n y_true = []\n\n for key in df.index:\n y_t, *inputs = df.loc[key]\n y_true.append(y_t)\n y_p = fn(*inputs)\n y_pred.append(y_p)\n\n # linear regression without intercept\n c = np.mean(y_true) / np.mean(y_pred)\n y_pred = np.multiply(y_pred, c)\n\n rmse = np.sqrt(np.mean(np.subtract(y_pred, y_true) ** 2))\n return rmse, y_pred, y_true, c", "def eval(self, df):\n ## Check invariant; model inputs must be subset of df columns\n if not set(self.var).issubset(set(df.columns)):\n raise ValueError(\n \"Model function `{}` var not a subset of given columns\".format(\n self.name\n )\n )\n\n ## Set up output\n n_rows = df.shape[0]\n results = zeros((n_rows, len(self.out)))\n\n for ind in range(n_rows):\n results[ind] = self.func(*df.loc[ind, self.var])\n\n ## Package output as DataFrame\n return DataFrame(data=results, columns=self.out)", "def from_dataframe(df):\n X = sm.add_constant(np.array(df['x']))\n y = np.array(df['y']).reshape(-1,1)\n return y, X", "def _fit(self, df):\n return df", "def evaluate_df(self, df):\n ## Check invariant; model inputs must be subset of df columns\n var_diff = set(self.var).difference(set(df.columns))\n if len(var_diff) != 0:\n raise ValueError(\n \"Model inputs not a subset of given columns;\\n\"\n + \"missing var = {}\".format(var_diff)\n )\n\n df_tmp = df.copy().drop(self.out, axis=1, errors=\"ignore\")\n ## Evaluate each function\n for func in self.functions:\n ## Concatenate to make intermediate results available\n df_tmp = concat((df_tmp, func.eval(df_tmp)), axis=1)\n\n return df_tmp[self.out]", "def df_model(self):\n return self.Kernel.df(self.xdata)", "def df(x_i):\n return [2 * x_ij for x_ij in x_i]", "def df(x_i):\n return [2 * x_ij for x_ij in x_i]", "def eval(self, df):\n df_res = self.func(df)\n return df_res[self.out]", "def objective(beta, lambdat, X, y):\n return 1/len(y) * (np.sum(\n (np.maximum(0, 1-((y[:, np.newaxis]*X).dot(beta)))**2)))\\\n + lambdat * np.linalg.norm(beta)**2", "def SGD_beta(X, y, eta=1e-4, gamma=0.01):\n\n\t# Stochastic Gradient Descent, shuffle?\n\tbeta = np.random.randn(len(X[0]), 1)\n\tn = len(X)\n\tM = 10 #0.05*n \t # Size of each minibatch, should be smaller than n\n\tm = int(n/M) \t # Number of minibatches\n\tn_epochs = 500 \t\t # Nmber of epochs\n\n\tacc = np.zeros(n_epochs+1)\n\tepoch_list = np.zeros(n_epochs+1)\n\n\t#z_i = np.zeros(m)\n\t#model_i = np.zeros(m)\n\t#y_i = np.zeros(m)\n\n\tfor epoch in range(1,n_epochs+1):\n\t\tfor i in range(m):\n\n\t\t\trandom_index = np.random.randint(m) #Pick the k-th minibatch at random\n\t\t\txi = X[random_index:random_index+1]\n\t\t\tyi = y[random_index:random_index+1]\n\n\t\t\t#Compute the gradient using the data in minibatch Bk\n\t\t\tgrad_beta_C = beta_gradients(xi, yi, beta)\n\t\t\tbeta -= eta - gamma * grad_beta_C\n\n\t\t\t#y_i[i] = yi\n\t\t\t#z_i[i] = xi@beta\n\t\t\t#model_i[i] = logistic_function(z_i[i])\n\n\t\t#acc[epoch] = accuracy(model_i, y_i)\n\t\t#epoch_list[epoch] = epoch\n\n\treturn beta", "def solve_beta_mnt(X, Y, pos=False, learning_rate=0.01, stop_criteria=10**-4):\n n = len(Y)\n p = X.shape[1]\n iso_order = np.arange(p)\n \n # initialize\n beta_prev = np.ones(p)\n beta = np.random.normal(size = X.shape[1])\n \n # gradient descent\n i = 0.0 # iteration number\n while sum((beta-beta_prev)**2)**0.5 > stop_criteria:\n i += 1\n# print(sum((beta-beta_prev)**2)**0.5) # used for debug\n \n # calculate gradient\n beta_grad = -2/n * (X.T@Y - X.T@X@beta)\n # update beta_prev\n beta_prev = beta\n # update beta with projection\n beta = beta - (1/i) * learning_rate * beta_grad\n beta = IsotonicRegression().fit_transform(iso_order, beta)\n # if pos == True, assign zero to negative coordinates\n if pos: beta = np.where(beta > 0, beta, 0)\n# print(sum((beta-beta_prev)**2)**0.5) # used for testing\n return beta", "def bayes_cov_col(Y,X,cols,lm):\n\n #EM iterateit\n Yhat=pd.DataFrame(lm.predict(X))\n Yhat.index=Y.index\n Yhat.columns=Y.columns\n SSE_all=np.square(Y.subtract(Yhat))\n X_adjust=X.copy()\n\n\n df_SSE = []\n df_logit = []\n\n for curcov in cols:\n\n curcells=X[X[curcov]>0].index\n\n if len(curcells)>2:\n\n X_notcur=X.copy()\n X_notcur[curcov]=[0]*len(X_notcur)\n\n X_sub=X_notcur.loc[curcells]\n\n Y_sub=Y.loc[curcells]\n\n GENE_var=2.0*Y_sub.var(axis=0)\n vargenes=GENE_var[GENE_var>0].index\n\n Yhat_notcur=pd.DataFrame(lm.predict(X_sub))\n Yhat_notcur.index=Y_sub.index\n Yhat_notcur.columns=Y_sub.columns\n\n SSE_notcur=np.square(Y_sub.subtract(Yhat_notcur))\n SSE=SSE_all.loc[curcells].subtract(SSE_notcur)\n SSE_sum=SSE.sum(axis=1)\n\n SSE_transform=SSE.div(GENE_var+0.5)[vargenes].sum(axis=1)\n logitify=np.divide(1.0,1.0+np.exp(SSE_transform))#sum))\n\n df_SSE.append(SSE_sum)\n df_logit.append(logitify)\n\n X_adjust[curcov].loc[curcells]=logitify\n\n return X_adjust", "def beta(self, index):\n index_change = index.close.pct_change()\n beta = self.pct_change.cov(index_change) / index_change.var()\n return beta", "def gradient_descent(f, df, x, sigma=0.5, epsilon=1e-8):\n pass", "def compute_grad(beta, lambdat, X, y):\n return -2/len(y)*(np.maximum(0, 1-(\n (y[:, np.newaxis]*X).dot(beta)))).dot(\n y[:, np.newaxis]*X) + 2 * lambdat * beta", "def df(x):\n\n # coefficients\n A = 728.0\n B = 0.317\n C = 0.486\n D = -8.99 * 1.6\n\n # function\n dfx = 2 * D / x**3 + A / B**2 * math.exp(- x / B) - 42 * C / x**8\n\n return dfx", "def get_dynamic_bias_from_df(self, x: pd.Series,\n country_df: pd.DataFrame) -> np.ndarray:", "def calc_beta(fx, dfx):\n assert fx.ndim == 1 and fx.shape == dfx.shape\n n = fx.size\n f_bar = fx.mean()\n ratio = (dfx**2).sum() / ((fx - f_bar)**2).sum() * (n-1) / float(n)\n beta = sqrt(((fx - f_bar)**2).sum() / (n-1) * exp(-ratio))\n return beta", "def construct_df(t,y):\n\n df = np.zeros((3,3))\n\n df[0][0] = 77.27*(1.0 - y(1) -2.*8.375e-6*y(0))\n df[0][1] = 77.27*(1.0 -y(0) )\n df[0][2] = 0.0;\n df[1][0] = -1.0/77.27;\n df[1][1] = (-1.0/77.27)*(1.0+y(0))\n df[1][2] = 1.0/77.27\n df[2][0] = 0.161\n df[2][1] = 0.0\n df[2][2] = -0.161\n\n return df", "def gradientDescent(f, df, x, niter=10):\n\n points = []\n\n for i in xrange(niter):\n point = -dfx\n slope = np.dot(point,-point)\n \n #calculate a\n a = backtracking(f,slope,x,point)\n \n\n #update the search point\n x_k = x + a*p\n points.append(x_k)\n x = x_k\n\n return points", "def df(self):\n return (self.x-1.0)*(self.y-1.0)", "def ml_df(df, parameters, t_size, model = DecisionTreeRegressor()):\n ndf = df[parameters]\n x = ndf.loc[:, ndf.columns != 'T_exp']\n y = ndf['T_exp']\n x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=t_size)\n model = model\n p = PolynomialFeatures(degree = 2)\n X_poly = p.fit_transform(x_train)\n X_poly_test = p.fit_transform(x_test)\n model.fit(X_poly,y_train)\n y_train_pred = model.predict(X_poly)\n y_test_pred = model.predict(X_poly_test)\n result = pd.DataFrame()\n result['T_exp'] = y_test\n result['T_prd'] = y_test_pred\n result['ratio'] = result['T_exp']/result['T_prd']\n return result", "def get_femat(self, beta: ndarray) -> ndarray:\n return self.data.weight[:, None]*self.fevar.mapping.jac(beta)", "def gradient_of_selection(f_jk,benefit_function,b,c=1,*params):\n f_jk['gos'] = f_jk.A*(b*benefit_function(f_jk.j+1,f_jk.k+1,*params)-c) - f_jk.B*b*benefit_function(f_jk.j,f_jk.k+1,*params)\n gos = f_jk.groupby('n')['gos'].sum() \n gos = gos*(Z-gos.index.values)*gos.index.values/(Z**2)*DELTA\n gos.loc[0]=gos.loc[100]=0\n return gos", "def get_alphas_and_betas(context, data):\r\n all_assets = context.portfolio.positions.keys()\r\n if context.index not in all_assets:\r\n all_assets.append(context.index)\r\n prices = data.history(all_assets, 'price', context.lookback, '1d')\r\n returns = prices.pct_change()[1:]\r\n # index_returns = returns[context.index]\r\n factors = {}\r\n for asset in context.portfolio.positions:\r\n try:\r\n y = returns[asset]\r\n factors[asset] = linreg(returns[context.index], y)\r\n except:\r\n log.warn(\"[Failed Beta Calculation] asset = %s\" % asset.symbol)\r\n return pd.DataFrame(factors, index=['alpha', 'beta'])", "def nnRegression(data):", "def df(self, x):\n\n return 2*math.exp(x*2) - math.exp(x)", "def predict(df,x):\n ml,g,sl2,sm2,sd2= x\n _,in_dic = rl.build_data_strucutre(df,'length_box_um',1)\n pred_mat = rl.prediction_total(ml,g,sl2,sm2,in_dic['reind_v'],in_dic['dat_v'],in_dic['s'],in_dic['S'],in_dic['dt'],in_dic['lane_ID_v'],in_dic['val_v'],1,sd2,nproc=10)\n df = rl.merge_df_pred(df,pred_mat)\n return df", "def with_beta(self, beta):\n def beta_div(loss):\n return beta * loss\n return self.with_post_function(beta_div)", "def df(self, x, X):\n if type(x) == list:\n x = np.array(x)\n return self.model.df(x, X, *self.params)", "def fit(self, df):\n return self", "def fit(self, df):\n return self", "def IRLS(self, y, X, beta, w, delta = 1e-8):\n W = sparse.diags(w)\n beta = inv(X.T @ W @ X) @ X.T @ W @ y\n w = 1 / np.maximum(delta, np.abs(y - X @ beta))\n return beta, w", "def GradientDescent(X, Y, alpha, iterations):\n\n\tn = X.shape[0]\n\tbeta = np.zeros((X.shape[1],1))\n\n\tfor i in range(1,iterations):\n\t\tbeta = beta - alpha*np.dot(np.transpose(X), np.dot(X, beta) - Y)/float(n)\n\t\t# risk = ((np.dot(X, beta) - Y)**2)/(2*float(n))\n\n\treturn beta", "def _fv(self):\n return self.beta * (self.x ** self.c)", "def other_regression(df, x_cols, y_col):\n df = df[~ np.isnan(df[y_col])]\n for col in x_cols:\n df = df[~ np.isnan(df[col])]\n\n X = df[x_cols].to_numpy()\n X = sm.add_constant(X)\n y = df[y_col].to_numpy()\n mod = sm.OLS(y, X)\n res = mod.fit()\n return res\n #print(res.summary())", "def df(W, *args):\n # Get parameters needed to evaluate the objective function from args\n N = args[0]\n D = args[1]\n S = args[2]\n sigma_e = args[3]\n\n # Reshape W to a 20x20 matrix\n W = np.reshape(W, (10, 2))\n\n # Evaluate C matrix\n C = np.matmul(W, np.transpose(W)) + ((sigma_e**2) * np.identity(D))\n C_inv = np.linalg.inv(C)\n df = -N * (np.matmul(C_inv, np.matmul(S, np.matmul(C_inv, W)))\n - np.matmul(C_inv, W))\n return np.reshape(df, (20,))", "def logistic_regression(x,y,beta_start=None,verbose=False,CONV_THRESH=1.e-3,\n MAXIT=500):\n if x.shape[-1] != len(y):\n raise ValueError, \"x.shape[-1] and y should be the same length!\"\n try:\n N, npreds = x.shape[1], x.shape[0]\n except: # single predictor, use simple logistic regression routine.\n return _simple_logistic_regression(x,y,beta_start=beta_start,\n CONV_THRESH=CONV_THRESH,MAXIT=MAXIT,verbose=verbose)\n if beta_start is None:\n beta_start = NA.zeros(npreds+1,x.dtype.char)\n X = NA.ones((npreds+1,N), x.dtype.char)\n X[1:, :] = x\n Xt = NA.transpose(X)\n iter = 0; diff = 1.; beta = beta_start # initial values\n if verbose:\n print 'iteration beta log-likliehood |beta-beta_old|' \n while iter < MAXIT:\n beta_old = beta \n ebx = NA.exp(NA.dot(beta, X))\n p = ebx/(1.+ebx)\n l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likeliehood\n s = NA.dot(X, y-p) # scoring function\n J_bar = NA.dot(X*p,Xt) # information matrix\n beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta\n diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences\n if verbose:\n print iter+1, beta, l, diff\n if diff <= CONV_THRESH: break\n iter = iter + 1\n if iter == MAXIT and diff > CONV_THRESH: \n print 'warning: convergence not achieved with threshold of %s in %s iterations' % (CONV_THRESH,MAXIT)\n return beta, J_bar, l", "def _gradient(self, beta, y, mask=None):\n # if \"mask\" vector isn't already computed, compute it\n mask = 1 - y*self.X_train.dot(beta) if mask is None else mask\n\n if self.large:\n # more efficient for larger feature matrices\n empirical = 0\n for i in range(self.n):\n if mask[i] >= -self.h:\n if mask[i] > +self.h:\n empirical -= y[i]*self.X_train[i, :]\n else:\n empirical -= (y[i]*(mask[i] + self.h)*self.X_train[i, :])/(2*self.h)\n empirical /= self.n\n\n else:\n # more efficient for smaller feature matrices\n empirical = np.zeros((self.n, self.d))\n piecewise_2 = np.where(abs(mask) <= self.h)\n piecewise_3 = np.where(mask > self.h)\n\n # multiplies each row in the feature matrix by a scalar\n empirical[piecewise_2] = -((y[piecewise_2]*(mask[piecewise_2] + self.h))[:, np.newaxis]*self.X_train[piecewise_2])/(2*self.h)\n empirical[piecewise_3] = -(y[piecewise_3][:, np.newaxis]*self.X_train[piecewise_3])\n\n empirical = np.mean(empirical, axis=0)\n\n regularization = 2*self.lambduh*beta\n return empirical + regularization, mask", "def _(x: Iterable, y: Iterable, ddof: int = 1) -> DataFrame:\n # ddof: numpy v1.5+\n return numpy.cov(x, y, ddof=ddof)[0][1]", "def y(x,xi):\n return np.exp(-xi)-np.exp(-xi)*(x-xi)", "def test_df(x):\n dfx = np.array([x[1]*np.cos(x[0])+np.cos(x[1]),\n np.sin(x[0])-x[0]*np.sin(x[1])])\n return dfx", "def alpha_beta(returns, factor_returns):\n\n ret_index = returns.index\n beta, alpha = sp.sp.stats.linregress(factor_returns.loc[ret_index].values,\n returns.values)[:2]\n\n return alpha * APPROX_BDAYS_PER_YEAR, beta", "def strategy_returns(df, df_price_of_strategy):\r\n df_return_of_strategy = pd.DataFrame(index=df_price_of_strategy.index)\r\n cols = df_price_of_strategy.columns\r\n\r\n for priceSeries in cols:\r\n df_return_of_strategy[priceSeries] = (df_price_of_strategy[priceSeries]\r\n - df_price_of_strategy[priceSeries].shift()) / (\r\n df_price_of_strategy[priceSeries])\r\n\r\n return df_return_of_strategy", "def strategy_returns(df, df_price_of_strategy):\r\n df_return_of_strategy = pd.DataFrame(index=df_price_of_strategy.index)\r\n cols = df_price_of_strategy.columns\r\n\r\n for priceSeries in cols:\r\n df_return_of_strategy[priceSeries] = (df_price_of_strategy[priceSeries]\r\n - df_price_of_strategy[priceSeries].shift()) / (\r\n df_price_of_strategy[priceSeries])\r\n\r\n return df_return_of_strategy", "def y_func(x, a, ydim, dg):\n theta = 0.5\n y = 1 + 2 * x[:, 1].reshape(-1,1) + \\\n theta * np.multiply((a-1), (x[:, 0] > 0.5).reshape(-1,1)) + \\\n theta * np.multiply((2-a), (x[:, 0] <= 0.5).reshape(-1,1)) + \\\n np.random.randn(x.shape[0], ydim)\n return y", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n \r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range(num_iterations):\r\n # your code here\r\n cost = compute_cost(features, values, theta)/(2.0*m)\r\n cost_history.append([cost])\r\n \r\n error = features.dot(theta) - values\r\n error = np.reshape(error,(error.shape[0], 1))\r\n errorWeighted = features*error\r\n errorSum = (np.sum(errorWeighted,0))/(m*1.0)\r\n theta = theta - alpha*errorSum \r\n \r\n return theta, pandas.Series(cost_history)", "def error(beta_0: float, beta_1: float, x_i: float, y_i: float) -> float:\n return predict(beta_0, beta_1, x_i) - y_i", "def calc(self) -> pd.DataFrame:\n raise NotImplementedError", "def __getitem__(self, i):\n return eos80.beta(\n self.nc.variables['SSS'].__getitem__(i),\n self.nc.variables['SST'].__getitem__(i),\n self.p, pt=True)", "def beta_and_alpha(self):\n # make scatter plot\n sp_temp = self.daily_returns(self.sp.rename(columns={'Adj Close': '^GSPC'}))\n symbol_temp = self.daily_returns(self.daily.rename(columns={'Adj Close': self.symbol}))\n joined = sp_temp.merge(symbol_temp, on='Date')\n\n # beta and alpha\n beta, alpha = np.polyfit(joined[\"^GSPC\"], joined[self.symbol], 1)\n beta = round(beta, 3)\n alpha = round(alpha, 5)\n if alpha > 0:\n self.buys += 1\n self.debug += '\\nAlpha > 0: buys + {}'.format(alpha)\n else:\n self.debug += '\\nAlpha < 0: {}'.format(alpha)\n\n # assuming favorable market conditions. else, it would be sells + 1.\n if beta > 1:\n self.buys += 1\n self.debug += '\\nBeta > 1: buys + {}'.format(beta)\n else:\n self.debug += '\\nBeta < 1: {}'.format(beta)\n\n # finish plotting scatter\n if self.will_plot:\n ax = joined.plot(title=self.symbol + ' vs The Market', kind = 'scatter', x='^GSPC', y=self.symbol)\n ax.set_xlabel(\"S&P 500\")\n plt.plot(joined[\"^GSPC\"], beta * joined['^GSPC'] + alpha, '-', color='r', label='Correlation')\n\n # plot expected beta (slope) of 1 and alpha (y- int.) of zero\n plt.plot(joined[\"^GSPC\"], 1 * joined['^GSPC'] + 0, '-', color='gray', label='Beta of 1')\n plt.plot(joined[\"^GSPC\"], 0 * joined['^GSPC'] + 0, '-', color='gray', label='Alpha of 0')\n plt.legend(loc='best')", "def heat_rate_regression(df, x_cols, y_col):\n df = df[~ np.isnan(df[y_col])]\n for col in x_cols:\n df = df[~ np.isnan(df[col])]\n\n X = df[x_cols].to_numpy()\n\n y = df[y_col].to_numpy()\n\n reg = LinearRegression().fit(X, y)\n return reg", "def beta_from_likelihood(t, q, prob, npix, **kwargs):\n v2_ml = np.zeros((t.size + 1, q.size + 1, 2), dtype=np.float32)\n v2_ml[1:, 0, 0] = t\n v2_ml[0, 1:, 0] = q\n for it in range(t.size):\n for iq in range(q.size):\n probi = prob[0][it][iq + 1]\n v2_ml[it + 1, iq + 1] = fit_pg_likelihood(probi, npix[iq], **kwargs)[0]\n\n return v2_ml", "def passing(df):\n pass", "def f(z, X, Y, _lambda):\r\n w = z[:-1]\r\n beta = np.squeeze(z[-1])\r\n term_1_d = (X * (Y[:, np.newaxis] - g(X.dot(w) + beta))).sum(axis=0)\r\n term_d_1 = (Y[:, np.newaxis] - g(X.dot(w) + beta)).sum() + 2 * _lambda * beta\r\n return np.hstack((term_1_d, term_d_1))[:, np.newaxis]", "def fun(params, slope, data):\n x, y_true = data\n return y_true - model_fun(params, slope, x)", "def predict(self, df):\n data = xgb.DMatrix(df.values)\n return self.model.predict(data)", "def gradient_descent(features, values, theta, alpha, num_iterations):\r\n\r\n m = len(values)\r\n cost_history = []\r\n\r\n for i in range (num_iterations):\r\n \r\n h = numpy.dot(features, theta)\r\n \r\n theta = theta - alpha / m * numpy.dot((h-values),features)\r\n \r\n cost = compute_cost(features, values, theta)\r\n \r\n cost_history.append(cost)\r\n\r\n return theta, pandas.Series(cost_history) # leave this line for the grader\r", "def RHS(y,t):\r\n\r\n return np.multiply(A.dot(y),ones-y)-beta*y", "def modelOnBetaGrid(sample,bins,N,l,u):\r\n\r\n betaGrid=np.linspace(l,u,N)\r\n traces=[]\r\n WAIC=dict()\r\n index=0\r\n\r\n for beta in betaGrid:\r\n trace=intensityLogGauss(sample,bins,beta)\r\n traces.append(trace['intensity'])\r\n WAIC[index]=trace\r\n index+=1\r\n\r\n df=pm.compare(WAIC,ic='WAIC')\r\n\r\n return betaGrid,df,traces", "def f(p, phi, phib, df):\n\treturn - p + exp( - df + Ns*(log((1 - p*phi)/(1 - phi - phib)) + \\\n\t\t(p - 1)*phi - phib + (9./4)*alpha*((phi + phib)**(5./4) - (p*phi)**(5./4))))", "def theta_AB(df):\n j_dist = get_coop_coop_neighbour_dist(df) \n degree_dist = get_degree_distribution(df) \n return get_theta_AB(j_dist,degree_dist)", "def f_cv(x, dt):\n b = 400\n x[0] = x[0] + x[3] * dt\n x[1] = x[1] + x[4] * dt\n x[2] = x[2] + x[5] * dt\n x[3] = x[3]\n x[4] = x[4]\n x[5] = x[5] + ((0.0034 * g * np.exp(-x[2] / 22000) * x[5] ** 2) / (2 * b) - g)*dt\n return x", "def js_beta(d1, d2, beta):\n part1 = - soft_relu(-d1).mean()\n part2 = (beta + 2.0 - (- soft_relu(-d2)).exp()).log().mean()\n return part1 + part2 - np.log(1.0 + beta)", "def linear_regression(features, values):\n clf = SGDRegressor(n_iter=100)\n clf.fit(features,values)\n print(clf.score(features,values))\n intercept = clf.intercept_ \n params = clf.coef_\n \n return intercept, params", "def loess(xvals, yvals, alpha, poly_degree=1):\r\n # Sort dataset by xvals.\r\n all_data = sorted(zip(xvals, yvals), key=lambda x: x[0])\r\n xvals, yvals = zip(*all_data)\r\n\r\n locsDF = pd.DataFrame(\r\n columns=[\r\n 'loc','x','weights','v','y','raw_dists',\r\n 'scale_factor','scaled_dists'\r\n ])\r\n evalDF = pd.DataFrame(\r\n columns=[\r\n 'loc','est','b','v','g'\r\n ])\r\n\r\n n = len(xvals)\r\n m = n + 1\r\n q = int(np.floor(n * alpha) if alpha <= 1.0 else n)\r\n avg_interval = ((max(xvals)-min(xvals))/len(xvals))\r\n v_lb = max(0,min(xvals)-(.5*avg_interval))\r\n v_ub = (max(xvals)+(.5*avg_interval))\r\n v = enumerate(np.linspace(start=v_lb, stop=v_ub, num=m), start=1)\r\n #print('liste v=', list(v))\r\n # Generate design matrix based on poly_degree.\r\n xcols = [np.ones_like(xvals)]\r\n for j in range(1, (poly_degree + 1)):\r\n xcols.append([i ** j for i in xvals])\r\n X = np.vstack(xcols).T\r\n\r\n\r\n for i in v:\r\n\r\n print('i=', i) #pour voir à quelle vitesse cela défile.\r\n iterpos = i[0]\r\n iterval = i[1]\r\n\r\n # Determine q-nearest xvals to iterval.\r\n iterdists = sorted([(j, np.abs(j-iterval)) \\\r\n for j in xvals], key=lambda x: x[1])\r\n\r\n _, raw_dists = zip(*iterdists)\r\n\r\n # Scale local observations by qth-nearest raw_dist.\r\n scale_fact = raw_dists[q-1]\r\n scaled_dists = [(j[0],(j[1]/scale_fact)) for j in iterdists]\r\n weights = [(j[0],((1-np.abs(j[1]**3))**3 \\\r\n if j[1]<=1 else 0)) for j in scaled_dists]\r\n\r\n # Remove xvals from each tuple:\r\n _, weights = zip(*sorted(weights, key=lambda x: x[0]))\r\n _, raw_dists = zip(*sorted(iterdists, key=lambda x: x[0]))\r\n _, scaled_dists = zip(*sorted(scaled_dists,key=lambda x: x[0]))\r\n\r\n iterDF1 = pd.DataFrame({\r\n 'loc' :iterpos,\r\n 'x' :xvals,\r\n 'v' :iterval,\r\n 'weights' :weights,\r\n 'y' :yvals,\r\n 'raw_dists' :raw_dists,\r\n 'scale_fact' :scale_fact,\r\n 'scaled_dists':scaled_dists\r\n })\r\n\r\n locsDF = pd.concat([locsDF, iterDF1])\r\n W = np.diag(weights)\r\n y = yvals\r\n b = np.linalg.inv(X.T @ W @ X) @ (X.T @ W @ y)\r\n local_est = loc_eval(iterval, b)\r\n iterDF2 = pd.DataFrame({\r\n 'loc':[iterpos],\r\n 'b' :[b],\r\n 'v' :[iterval],\r\n 'g' :[local_est]\r\n })\r\n\r\n evalDF = pd.concat([evalDF, iterDF2])\r\n\r\n # Reset indicies for returned DataFrames.\r\n locsDF.reset_index(inplace=True)\r\n locsDF.drop('index', axis=1, inplace=True)\r\n locsDF['est'] = 0; evalDF['est'] = 0\r\n locsDF = locsDF[['loc','est','v','x','y','raw_dists',\r\n 'scale_fact','scaled_dists','weights']]\r\n\r\n # Reset index for evalDF.\r\n evalDF.reset_index(inplace=True)\r\n evalDF.drop('index', axis=1, inplace=True)\r\n evalDF = evalDF[['loc','est', 'v', 'b', 'g']]\r\n\r\n return(locsDF, evalDF)", "def fit(self, df):\n try:\n df = df.astype(float)\n except Exception:\n raise ValueError(\"Data Cannot Be Converted to Numeric Float\")\n\n y = df.values\n if y.shape[1] == 1:\n y = y.ravel()\n X = date_part(df.index, method=self.datepart_method)\n from autots.models.sklearn import retrieve_regressor\n\n multioutput = True\n if y.ndim < 2:\n multioutput = False\n elif y.shape[1] < 2:\n multioutput = False\n self.model = retrieve_regressor(\n regression_model=self.regression_model,\n verbose=0,\n verbose_bool=False,\n random_seed=2020,\n multioutput=multioutput,\n )\n self.model = self.model.fit(X, y)\n self.shape = df.shape\n return self", "def _beta(m, d, Q):\n\n if d % 2 == 1:\n w2 = np.array([1, -1]) # 1 - t\n else:\n w2 = np.array([0, 1, -1]) # t - t^2\n mat_y = _lambda(m, d + 1 - len(w2), Q)\n return _mult_poly_matrix_poly(w2, mat_y)", "def _simple_logistic_regression(x,y,beta_start=None,verbose=False,\n CONV_THRESH=1.e-3,MAXIT=500):\n if len(x) != len(y):\n raise ValueError, \"x and y should be the same length!\"\n if beta_start is None:\n beta_start = NA.zeros(2,x.dtype.char)\n iter = 0; diff = 1.; beta = beta_start # initial values\n if verbose:\n print 'iteration beta log-likliehood |beta-beta_old|' \n while iter < MAXIT:\n beta_old = beta \n p = NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))\n l = NA.sum(y*NA.log(p) + (1.-y)*NA.log(1.-p)) # log-likliehood\n s = NA.array([NA.sum(y-p), NA.sum((y-p)*x)]) # scoring function\n # information matrix\n J_bar = NA.array([[NA.sum(p*(1-p)),NA.sum(p*(1-p)*x)],\n [NA.sum(p*(1-p)*x),NA.sum(p*(1-p)*x*x)]])\n beta = beta_old + NA.dot(LA.inverse(J_bar),s) # new value of beta\n diff = NA.sum(NA.fabs(beta-beta_old)) # sum of absolute differences\n if verbose:\n print iter+1, beta, l, diff\n if diff <= CONV_THRESH: break\n iter = iter + 1\n return beta, J_bar, l", "def gradient_descent(features, values, theta, alpha, num_iterations):\n m = len(values)\n cost_history = []\n\n for i in range(num_iterations):\n predicted_values = np.dot(features, theta)\n delta = alpha / m * np.dot((predicted_values - values), features)\n theta = theta - delta\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n return theta, pandas.Series(cost_history)", "def loocv(df, response_var, pred_vars):\n MSE = 0\n n = len(df)\n for ii in range(n):\n ind_train = np.setdiff1d(np.arange(n), ii)\n X_train = df.loc[ind_train, pred_vars]\n Y_train = df.loc[ind_train, response_var]\n X_test = df.loc[ii, pred_vars]\n Y_test = df.loc[ii, response_var]\n model = LinearRegression().fit(X_train, Y_train)\n\n X_test = np.transpose(X_test.values)\n MSE += np.power(Y_test - model.predict([X_test]), 2)\n\n return MSE[0] / n", "def y(df,x):\r\n x_p=np.array(df['Vertices'])\r\n y_p=np.array(df['DIxPRE 252'])\r\n cs = scipy.interpolate.splrep(x_p,y_p)\r\n return scipy.interpolate.splev(x,cs)", "def gradient_ascent(f, df, theta_init, step_size, max_iter):\n\n fs = []\n xs = []\n thetas = theta_init\n for i in range(max_iter): #for each data example\n fs.append(f(thetas))\n\n temp = step_size*df(thetas)\n thetas = step_size*df(thetas) #modify that feature by using the derivative of log likelihood\n xs.append(thetas.flatten())\n if i % 10 == 0:\n print(i, thetas)\n\n return thetas, fs, xs", "def gradient_descent_beta(self):\n return self._gradient_descent_beta", "def gradient_descent(features, values, theta, alpha, num_iterations):\n \n # number of points\n npoints = len(values)\n \n # intialize cost history\n cost_history = []\n \n # num_interations iterations\n for iiter in range(num_iterations):\n \n # compute and store cost\n cost = compute_cost(features, values, theta)\n cost_history.append(cost)\n \n # update values of theta\n values_predicted = np.dot(features, theta)\n theta = theta + (alpha/npoints)*(np.dot(values - values_predicted,features))\n \n return theta, pandas.Series(cost_history)", "def predict_features(self, df_features, df_target, idx=0, **kwargs):\n\n y = np.transpose(df_target.values)\n X = np.transpose(df_features.values)\n\n path, beta, A, lam = hsiclasso(X, y)\n\n return beta", "def varying_lamda(x, y, z, lambda_min, lambda_max, n_lambda, k, save_fig = None, method = 'Ridge', split = True, train = 0.7, seed = 42, max_iter = 1001, l_min = False, plot_indexes = [0,1,2]):\n\n lambdas = np.array([0] + np.logspace(lambda_min, lambda_max, n_lambda).tolist())\n polynomials = np.array(k)\n X, Y = np.meshgrid(lambdas, polynomials)\n MSE = np.zeros(np.shape(X))\n\n j = 0\n for k in polynomials:\n print(k)\n\n model = regression(x, y, z, k = int(k), split = split, train = train, seed = seed)\n if method == 'Ridge':\n model.SVD()\n i = 0\n for lam in lambdas:\n\n if method == 'Ridge':\n beta = model.Ridge(lam = lam)\n elif method == 'Lasso':\n beta = model.Lasso(lam = lam, max_iter = max_iter)\n\n z_tilde = model.z_tilde(beta = beta, X = model.X_test)\n MSE[j, i] = model.MSE(z_tilde = z_tilde, z = model.z_test)\n i += 1\n j += 1\n\n print('Method = ', method)\n lambdas_min = []\n for i in range(len(polynomials)):\n minimum_index = MSE[i].argmin()\n print('Minimum lambda for polynomial %.i: ' %(polynomials[i]), lambdas[minimum_index], MSE[i].min())\n lambdas_min.append(int(minimum_index))\n\n #plt.pcolormesh(lambdas.tolist() + [lambdas[-1] + lambdas[1]], polynomials.tolist() + [polynomials[-1] + 1], MSE)\n #plt.colorbar()\n #plt.show()\n\n plt.title('MSE for the test data with ' + method)\n plt.contourf(lambdas, polynomials, MSE)\n plt.colorbar()\n plt.ylabel('Polynomial order', fontsize = 14)\n plt.xlabel('Lambda', fontsize = 14)\n try:\n plt.savefig(results_dir + save_fig + 'contour' + '.png')\n except:\n pass\n plt.show()\n\n plt.title('MSE for the test data with ' + method)\n plt.plot(lambdas, MSE[plot_indexes[0], :], label = 'k = ' + str(polynomials[plot_indexes[0]]))\n plt.plot(lambdas, MSE[plot_indexes[1], :], label = 'k = ' + str(polynomials[plot_indexes[1]]))\n plt.plot(lambdas, MSE[plot_indexes[2], :], label = 'k = ' + str(polynomials[plot_indexes[2]]))\n if l_min:\n plt.plot(lambdas[lambdas_min[1]], MSE[1, lambdas_min[1]], 'ro', label = 'Lambda min = %.4g' %(lambdas[lambdas_min[1]]))\n else:\n pass\n plt.legend()\n plt.xlabel('Lambda', fontsize = 14)\n plt.ylabel('MSE', fontsize = 14)\n plt.tight_layout()\n try:\n plt.savefig(results_dir + save_fig + '.png')\n except:\n pass\n plt.show()\n return lambdas_min", "def predictions(dataframe):\n # Select Features\n features = dataframe[['rain', 'precipi', 'Hour', 'meantempi']]\n\n # Add UNIT to features using dummy variables\n dummy_units = pandas.get_dummies(dataframe['UNIT'], prefix='unit')\n features = features.join(dummy_units)\n\n # Values\n values = dataframe['ENTRIESn_hourly']\n m = len(values)\n\n features, mu, sigma = normalize_features(features)\n features['ones'] = np.ones(m) # Add a column of 1s (y intercept)\n\n # Convert features and values to numpy arrays\n features_array = np.array(features)\n values_array = np.array(values)\n\n # Set values for alpha, number of iterations.\n alpha = 0.1 # please feel free to change this value\n num_iterations = 75 # please feel free to change this value\n\n # Initialize theta, perform gradient descent\n theta_gradient_descent = np.zeros(len(features.columns))\n theta_gradient_descent, cost_history = gradient_descent(features_array,\n values_array,\n theta_gradient_descent,\n alpha,\n num_iterations)\n\n plot = None\n # -------------------------------------------------\n # Uncomment the next line to see your cost history\n # -------------------------------------------------\n # plot = plot_cost_history(alpha, cost_history)\n #\n # Please note, there is a possibility that plotting\n # this in addition to your calculation will exceed\n # the 30 second limit on the compute servers.\n\n predicted = np.dot(features_array, theta_gradient_descent)\n return predicted, plot", "def etl(self, df: pd.DataFrame):\n df['divided_by_ten'] = df['feature'] / 10\n return df", "def dphidalpha_j(x, alpha_j, beta_j, gamma_j):\n def f(xp):\n delta = xp - gamma_j\n return csrbf(math.sqrt(beta_j*delta*beta_j*delta))\n vf = num.vectorize(f)\n return vf(x)", "def minfunc(beta, yvec, xmat ):\n return yvec - exp(dot(xmat, beta))", "def gradient(data_x, data_y, parameters):\n return data_x.T @ (data_x @ parameters - data_y) / data_x.shape[0]", "def solve_beta_slope(X, Y, lbd_vec, h=0.1, lr=5.0):\n n, p = X.shape[0], X.shape[1]\n \n# i = 0\n beta_prev = np.zeros(p)\n beta_new = np.ones(p)\n while abs(obj_slope(X, Y, lbd_vec, beta_prev)-obj_slope(X, Y, lbd_vec, beta_new)) > lr:\n beta_prev = beta_new\n beta_new = prox_slope(beta_new - (h/n) * (X.T @ (X @ beta_new - Y)), h/n, lbd_vec)\n \n# i += 1\n# if i % 2 == 0:\n# print(i)\n# print(\"prev value: \", obj_slope(X, Y, lbd_vec, beta_prev))\n# print(\"new value: \", obj_slope(X, Y, lbd_vec, beta_new))\n# print(sum(abs(beta_new)))\n# print(beta_new)\n return beta_new", "def allocate(self,weights, df):\n return df.dot(weights)", "def get_alpha_beta(data, market, risk_free=0, scale=1, dspl=False):\n logger = logging.getLogger(__name__)\n if data.ndim!=1:\n raise ValueError(\"invest.calculation.get_alpha_beta only takes pandas Series\")\n df = get_returns(data, style='log', fillna=False).rename(\"data\").to_frame()\n df['market'] = get_returns(market, style='log', fillna=False)\n df['risk_free'] = risk_free / 100\n # A complicated way to get risk-free rate:\n # df['risk_free'] = df.interest * 0.01 * (df.date-df.date.shift(1)).dt.days / 260\n df.dropna(axis=0, how='any', inplace=True)\n y = (df.data * scale - df.risk_free).values\n x = (df.market * scale - df.risk_free).values\n from machine_learning.Msklearn import LinearRegression\n lm = LinearRegression(intercept=True)\n lm.fit(x, y)\n if dspl:\n lm.summary()\n alpha, beta = lm.beta\n return alpha, beta", "def pvalue_beta(self):\n return self._pvalue_beta", "def _regression_loop(endog, exog, model, lasso_positive, alpha=50):\n mod_result = _build_regression(exog, endog, model, lasso_positive, alpha=alpha)\n beta = np.hstack([mod_result.intercept_, mod_result.coef_])\n \n return beta", "def calcprob(beta, x):\n try:\n N, npreds = x.shape[1], x.shape[0]\n except: # single predictor, x is a vector, len(beta)=2.\n N, npreds = len(x), 1\n if len(beta) != npreds+1:\n raise ValueError,'sizes of beta and x do not match!'\n if npreds==1: # simple logistic regression\n return 100.*NA.exp(beta[0]+beta[1]*x)/(1.+NA.exp(beta[0]+beta[1]*x))\n X = NA.ones((npreds+1,N), x.dtype.char)\n X[1:, :] = x\n ebx = NA.exp(NA.dot(beta, X))\n return 100.*ebx/(1.+ebx)", "def log_gamma_unnormalised_lpdf(x, alpha, beta):\n return alpha * x - beta * tf.exp(x)", "def fill_df(pos, vel, acc, gamma, velmag, tvec, i):\n\n posx = pd.DataFrame(pos[:, 0], index=tvec, columns=[str(i)])\n posz = pd.DataFrame(pos[:, 1], index=tvec, columns=[str(i)])\n\n velx = pd.DataFrame(vel[:, 0], index=tvec, columns=[str(i)])\n velz = pd.DataFrame(vel[:, 1], index=tvec, columns=[str(i)])\n\n accx = pd.DataFrame(acc[:, 0], index=tvec, columns=[str(i)])\n accz = pd.DataFrame(acc[:, 1], index=tvec, columns=[str(i)])\n\n gamm = pd.DataFrame(gamma, index=tvec, columns=[str(i)])\n vmag = pd.DataFrame(velmag, index=tvec, columns=[str(i)])\n\n return posx, posz, velx, velz, accx, accz, gamm, vmag", "def beta_gen_slope(p):\n cardi = 0.005\n return np.array( [0]*int(p-int(cardi*p)) + list(np.arange(1, int(cardi*p)+1, 1)) )", "def gamma_pdf(a,b):\n df = DataFrame(columns=['Day','Gamma_Values'])\n for day in range(181):\n df = df.append({'Day': int(day), 'Gamma_Values': float(gamma.pdf(day,a,0,b))}, ignore_index=True)\n return df" ]
[ "0.68780404", "0.6571103", "0.65438884", "0.6472203", "0.62649596", "0.622851", "0.6134441", "0.6110367", "0.6021685", "0.5859265", "0.5851953", "0.5832189", "0.57926506", "0.5776251", "0.5776251", "0.5743775", "0.56781954", "0.56520283", "0.5651162", "0.5608268", "0.5604049", "0.5603729", "0.5601341", "0.5599801", "0.5573712", "0.5558541", "0.55469805", "0.5507145", "0.5475143", "0.54740906", "0.5469058", "0.546279", "0.5440565", "0.54398346", "0.5431927", "0.54290044", "0.54206294", "0.54149234", "0.541239", "0.541239", "0.54002833", "0.5399015", "0.5398398", "0.5398056", "0.53969276", "0.5382155", "0.53695345", "0.53500473", "0.53332376", "0.5333237", "0.53116167", "0.52972704", "0.52972704", "0.5296916", "0.52854514", "0.52827114", "0.5281549", "0.52797973", "0.5274234", "0.5269561", "0.52681184", "0.52657217", "0.5262455", "0.5261927", "0.52587897", "0.52474374", "0.52448076", "0.5241321", "0.52386796", "0.52377176", "0.523559", "0.5234067", "0.5231464", "0.5227193", "0.52263457", "0.52240676", "0.5223155", "0.5214338", "0.52057856", "0.51812744", "0.51713383", "0.5167514", "0.5165378", "0.5164298", "0.5161955", "0.516103", "0.5159173", "0.5155842", "0.5154545", "0.5150844", "0.5143103", "0.51300174", "0.51292044", "0.5129085", "0.51276124", "0.511703", "0.51152825", "0.5110634", "0.5109989", "0.51022243" ]
0.66248494
1
Just a simple decorator to make the process of generating tests easier.
def Generatable(cls): if hasattr(cls, 'generate_tests') and callable(cls.generate_tests): def create_test_func(name, test_func): setattr(cls, 'test_' + name.replace(' ', '_').lower(), test_func) cls.generate_tests(create_test_func) return cls
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def spec_tests():\n pass", "def test_something():", "def unitary_test():", "def test_dummy():", "def tests():", "def test_generate_all_testing(self):\n pass", "def test_1():", "def test_func():\n pass", "def test_dummy_test():\n pass", "def test_T1():", "def test_T1():", "def test_T01():", "def test_wraps():\n print('func')", "def pytest_generate_tests(metafunc):\n parent_conftest.pytest_generate_tests(metafunc, __file__)", "def test_single_test_case():\n pass", "def test_T2():", "def test_T2():", "def test_main():\n # Setup\n # Exercise\n # Verify", "def test_5():", "def test_basic_execution(self):", "def test_4():", "def test_2():", "def test_3():", "def test():\n pass", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def test():", "def test():", "def inner_test():\n pass", "def inner_test():\n pass", "def test(func):\n register_tests(func, [func.__name__])", "def test(self):", "def test(self):", "def test_let(self):", "def test(self):\n pass", "def test():\n pass", "def test_method(self):", "def test_decorator(f):\n return f", "def test():\r\n pass", "def tests(self):\n return [self]", "def test_passed():\n pass", "def test_decorated(*args):\n for i in args:\n yield i", "def pytest_generate_tests(metafunc):\n\t\n\tif not metafunc.cls:\n\t\treturn\n\t\n\tinst = metafunc.cls()\n\t\n\tif 'valid' in metafunc.fixturenames:\n\t\tmetafunc.parametrize('valid', inst.valid)\n\t\n\tif 'invalid' in metafunc.fixturenames:\n\t\tmetafunc.parametrize('invalid', inst.invalid)", "def generate(func, *inputs):\n # http://blog.kevinastone.com/generate-your-tests.html\n def decorator(testcase):\n for input in inputs:\n test_input = make_method(func, input)\n setattr(testcase, test_input.__name__, test_input)\n return testcase\n\n return decorator", "def test_T3():", "def test_T3():", "def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --", "def test_required_methods(self):", "def test_nothing(self):", "def test_T4():", "def test_T4():", "def _test():\n import doctest", "def _test():\n import doctest\n return doctest.testmod(verbose=True)", "def _test():\n import doctest\n return doctest.testmod(verbose=True)", "def unitdoctest():\r\n\r\n pass", "def test_doc():\n pass", "def template_for_test_functions():\r\n\r\n expected = \"\"\r\n actual = \"\"\r\n print_test_results(func, expected, actual)", "def __integration_doctest():\n pass", "def test_exp(doctest):", "def test(ctx):\n pass", "def generate_test_method(test_name):\n\n def run_test(self):\n # backup any existing files with our expected output_name\n output_name = \"{}.png\".format(test_name)\n backup_name = output_name + \".backup\"\n if os.path.isfile(output_name):\n os.rename(output_name, backup_name)\n self.addCleanup(cleanup_backup, backup_name, output_name)\n\n # run the test\n ret = subprocess.call(\"python {}.py\".format(test_name), shell=True)\n self.assertEqual(ret, 0)\n\n output_exists = os.path.isfile(output_name)\n if output_exists:\n self.addCleanup(cleanup_output, output_name)\n\n ps_output_name = \"{}.ps\".format(test_name)\n if os.path.isfile(ps_output_name):\n # some tests may also generate postscript files which need to be deleted\n self.addCleanup(cleanup_output, ps_output_name)\n\n self.assertTrue(output_exists)\n\n return run_test", "def test_cont_larvaemutattion(): \n pass", "def pytest_generate_tests(self, metafunc):\n\n # function for pretty test name\n def id_func(x):\n return \"-\".join([f\"{k}={v}\" for k, v in x.items()])\n\n # get arguments for the test function\n funcarglist = metafunc.cls.params.get(metafunc.function.__name__, None)\n if funcarglist is None:\n return\n else:\n # equivalent of pytest.mark.parametrize applied on the metafunction\n metafunc.parametrize(\"fields\", funcarglist, ids=id_func)", "def test_require():", "def istest(func):\n func.__test__ = True\n return func", "def test_begin(self):", "def test_app():\n pass", "def test_T0():", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def pytest_generate_tests(metafunc):\n if \"retrospective\" in metafunc.fixturenames:\n metafunc.parametrize(\"retrospective\", [False, True])\n if \"test_type\" in metafunc.fixturenames:\n metafunc.parametrize(\"test_type\", [FILES_TEST, STATE_TEST])\n if \"raise_error\" in metafunc.fixturenames:\n metafunc.parametrize(\"raise_error\", [False, True])", "def before_test(self, func, *args, **kwargs):\n pass", "def _test():\n import doctest\n doctest.testmod(verbose=1)", "def test_expt(doctest):", "def runtest(self):", "def _test():\n import doctest\n doctest.testmod()", "def _test():\n import doctest\n doctest.testmod()", "def local_test():\n pass", "def test_stub(self):\n pass", "def test_create_run(self):\n pass", "def runTests(self):\n \n pass", "def test_if(self):", "def test(self, func):\r\n @wraps(func)\r\n def wrapper():\r\n with nested(self._contexts) as context:\r\n context = [c for c in context if c is not None]\r\n argc = len(inspect.getargspec(func)[0])\r\n args = []\r\n for arg in context:\r\n if type(arg) is tuple: # type() is intentional\r\n args.extend(arg)\r\n else:\r\n args.append(arg)\r\n func(*args[:argc])\r\n wrapper.__wrapped__ = func\r\n self._tests.append(wrapper)\r\n if self.replace_tests:\r\n return wrapper\r\n return func", "def test_model():\n pass", "def test_new(self):", "def test_new(self):", "def test_by_variable():\n pass", "def test1(self):\n\n log.info('This is a test')\n self.assertTrue((random.randint(0,9) % 2) == 0)#! /usr/bin/env python", "def fancy_test_decorator(\n lister, arguments=lambda x: x, attributes=lambda x: {\"id\": str(x)}, naming=lambda x: str(x), debug=False\n):\n\n def for_all_stuff(check):\n for x in lister():\n if debug:\n logger.info(\"add test %s / %s \" % (check, x))\n add_checker_f(check, x, arguments, attributes, naming)\n return check\n\n return for_all_stuff", "def test_the_tests():\n\n assert True is True", "def test_cases():\n CasesTestCase.generate_tests()\n yield CasesTestCase\n yield DocTestsTestCase", "def test(self):\n raise NotImplementedError", "def test1():", "def test_generate(monkeypatch, capsys):\n monkeypatch.setattr(sys, \"argv\", [\"\", \"generate\", os.path.join(PATH, \"generate.feature\")])\n main()\n out, err = capsys.readouterr()\n assert out == textwrap.dedent(\n '''\n # coding=utf-8\n \"\"\"Code generation feature tests.\"\"\"\n\n from pytest_bdd import (\n given,\n scenario,\n then,\n when,\n )\n\n\n @scenario('scripts/generate.feature', 'Given and when using the same fixture should not evaluate it twice')\n def test_given_and_when_using_the_same_fixture_should_not_evaluate_it_twice():\n \"\"\"Given and when using the same fixture should not evaluate it twice.\"\"\"\n\n\n @given('1 have a fixture (appends 1 to a list) in reuse syntax')\n def have_a_fixture_appends_1_to_a_list_in_reuse_syntax():\n \"\"\"1 have a fixture (appends 1 to a list) in reuse syntax.\"\"\"\n raise NotImplementedError\n\n\n @given('I have an empty list')\n def i_have_an_empty_list():\n \"\"\"I have an empty list.\"\"\"\n raise NotImplementedError\n\n\n @when('I use this fixture')\n def i_use_this_fixture():\n \"\"\"I use this fixture.\"\"\"\n raise NotImplementedError\n\n\n @then('my list should be [1]')\n def my_list_should_be_1():\n \"\"\"my list should be [1].\"\"\"\n raise NotImplementedError\n\n '''[\n 1:\n ].replace(\n u\"'\", u\"'\"\n )\n )", "def test_setup(funct):\n\n def decorated_setup():\n \"\"\"Decorated test setup.\"\"\"\n testdb.reload_db()\n funct()\n return decorated_setup", "def test_empty_functions():" ]
[ "0.78306496", "0.76522076", "0.76440585", "0.7547918", "0.7521638", "0.73572", "0.7326144", "0.7307783", "0.7230892", "0.7225894", "0.7225894", "0.7212739", "0.7207286", "0.71436405", "0.7143635", "0.71356994", "0.71356994", "0.71143264", "0.711168", "0.7106708", "0.7092831", "0.7081324", "0.7077096", "0.7076129", "0.7063532", "0.7063532", "0.7063532", "0.7063532", "0.7063532", "0.7046352", "0.7046352", "0.7026275", "0.7026275", "0.6965711", "0.69284993", "0.69284993", "0.6912691", "0.68942606", "0.6864156", "0.6861965", "0.68561155", "0.6827035", "0.6813738", "0.6807848", "0.677458", "0.67565423", "0.6747103", "0.6747076", "0.6747076", "0.67312956", "0.6728854", "0.67258704", "0.6721252", "0.6721252", "0.66982913", "0.66937536", "0.66937536", "0.6678963", "0.66728634", "0.66568184", "0.6639579", "0.66255474", "0.6611283", "0.6598516", "0.6598427", "0.65972203", "0.65855384", "0.657845", "0.6566248", "0.6564637", "0.65637845", "0.65357697", "0.65357697", "0.65357697", "0.6532558", "0.653221", "0.65314513", "0.65295213", "0.65144193", "0.65123785", "0.65123785", "0.6505497", "0.64918923", "0.64723015", "0.64573115", "0.645646", "0.6446561", "0.64452434", "0.6439296", "0.6439296", "0.64190423", "0.6411487", "0.64076596", "0.6407246", "0.639407", "0.63848215", "0.63759404", "0.637336", "0.6372411", "0.6370459" ]
0.6944086
34
Get the color of the mask at position. Using 2 bits as a color.
def get_color(mask: int, position: int): return (mask >> (position << 1)) & 3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mask_color(self):\n return self._mask_color", "def set_color(mask: int, position: int, color: int):\n return mask | (color << (position << 1))", "def get_color(self, point):\n \n d = point - self._origin\n dist = int(d.dot(d) ** 0.5) % 2\n if dist == 0:\n return self.c1.dup()\n else:\n return self.c2.dup()", "def get_color(self, _pos):\n return self.__framebuffer[_pos]", "def _to_color(indx, base):\n base2 = base * base\n b = 2 - indx / base2\n r = 2 - (indx % base2) / base\n g = 2 - (indx % base2) % base\n return b * 127, r * 127, g * 127", "def getPixelColor(self, n):\n\t\treturn self.leds[n]", "def get_red(x, y, slot = 0):\r\n return __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3]", "def get_color(self, point):\n return self._color.dup()", "def getColor(self):\n return self._l[2]", "def get_color(self, coord):\n return self.board[coord[0], coord[1]]", "def decode_target_colorful(cls, mask):\n return cls.cmap[mask]", "def decode_target_colorful(cls, mask):\n return cls.cmap[mask]", "def _get_color(self, r, g, b):\n clr = (r, g, b)\n return clr", "def show_red_mask(img, mask):\n img_ = img\n mask_ = np.bool_(mask)\n red = img_[:, :, 0]\n green = img_[:, :, 1]\n blue = img_[:, :, 2]\n red[mask_] = 255\n green[mask_] = 0\n blue[mask_] = 0\n return img_", "def get_colour(self, x, y):\n if x >= self.width or y >= self.height:\n return (0, 0, 0)\n\n return self.env_img.get_at((int(x), int(y))).normalize()[0:3]", "def getPixelColor(self, n):\n self._logger.debug(\"getPixelColor\")", "def color_map(val):\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def get_color(self, value):\n value = min(max(0,value), 1) * 510\n\n if value < 255:\n redValue = 255\n greenValue = math.sqrt(value) * 16\n greenValue = int(greenValue)\n else:\n greenValue = 255\n value = value - 255\n redValue = 255 - (value * value / 255)\n redValue = int(redValue)\n return '#' + f\"{redValue:0{2}x}\" + f\"{greenValue:0{2}x}\" + '00'", "def to_color(self):\n return (int(self.r * 255), int(self.g * 255), int(self.b * 255))", "def color_motion_mask(mask, color=None):\n if color is None:\n color = (220, 20, 60)\n h, w = mask.shape\n ext_mask = np.stack([mask, mask, mask], -1).astype(np.uint8)\n color = np.ones_like(ext_mask) * color\n index = np.ones_like(ext_mask) * 1.0\n final_mask = np.where(ext_mask == index, color, ext_mask).astype(np.uint8)\n return final_mask", "def colorize_mask(mask):\n # mask: numpy array of the mask\n new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')\n new_mask.putpalette(palette)\n return new_mask", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def color_map(val):\n # NOTE: This relies on remap_interval, which you must provide\n color_code = remap_interval(val, -1, 1, 0, 255)\n return int(color_code)", "def mask2rgb(mask: NDArray[Int]) -> ndarray:\n color_label_dict = {0: (0, 0, 0),\n 1: (128, 0, 0),\n 2: (0, 128, 0),\n 3: (128, 128, 0),\n 4: (0, 0, 128),\n 5: (128, 0, 128),\n 6: (0, 128, 128),\n 7: (128, 128, 128)}\n\n maskRGB = np.empty((mask.shape[1], mask.shape[2], 3))\n mask = np.squeeze(mask)\n for key in color_label_dict.keys():\n pixel_value = color_label_dict[key]\n maskRGB[mask == key] = pixel_value\n\n return maskRGB.astype(np.uint8)", "def get_red(self, x, y):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].get_red()", "def mask(self):\n return ((2**(self.width) - 1) << self.lsb)", "def get_color(in_val, min_val=0, max_val=100):\n width = max_val - min_val\n unit = width / len(continuum)\n return continuum[min(int(in_val / unit), 19)]", "def get_at(\n self,\n pos: Tuple2NumberType,\n ignore_alpha: bool = False\n ) -> Union[Tuple3IntType, Tuple4IntType, 'pygame.Color']:\n assert_vector(pos, 2)\n color = self._surface.get_at(pos)\n if ignore_alpha:\n return color[0], color[1], color[2]\n return color", "def pos_mask(row: int, col: int) -> int:\n assert 0 <= row < 8\n assert 0 <= col < 8\n return 0x8000000000000000 >> col >> row * 8", "def color_rgb(self):\n return tuple(int(self.color[i : i + 2], 16) for i in (0, 2, 4))", "def get_colour(self, r1, r2, r3, b1, b2, b3):\n\n def colour(z, i):\n \"\"\"\n Gets the colour of a z and step value.\n\n :param complex z: the z value from the mandelbrot set\n :param int i: the step value\n\n :rtype: tuple\n :return: the three RGB colours\n \"\"\"\n if abs(z) < self.threshold:\n return self.background\n v = np.log2(i + self.threshold - np.log2(np.log2(abs(z)))) / self.threshold\n if v < 1.0:\n return v ** b1, v ** b2, v ** b3 # background\n else:\n v = max(0, 2 - v)\n return v ** r1, v ** r2, v ** r3 # main tones\n\n return colour", "def apply_mask(im, mask, color=(1, 0, 0)):\n masked = np.zeros(im.shape)\n for x, y in mask: masked[x][y] = color\n return masked", "def get_rgb(self, x, y):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].get_rgb()", "def getRandColor():\n\treturn (randrange(0,256), randrange(0,256), randrange(0,256))", "def getColor(self,number):\n if number >= 0:\n if self.inverse:\n ret = cs.hsv_to_rgb(0,0,abs(number/self.maxp))\n else:\n ret = cs.hsv_to_rgb(0,0,1-abs(number/self.maxp))\n else:\n if self.inverse:\n ret = cs.hsv_to_rgb(0,1-abs(number/self.maxn),1)\n else:\n ret = cs.hsv_to_rgb(0,abs(number/self.maxn),1)\n return [ret[0]*255.0,ret[1]*255.0,ret[2]*255.0]", "def get_inner_colour(self, r1, r2, r3, b1, b2, b3):\n\n def colour(z, i):\n \"\"\"\n Gets the colour of a z and step value.\n\n :param z: the z value from the mandelbrot set\n :param i: the step value\n\n :rtype: list\n :return: list containing the RGB colours\n \"\"\"\n if abs(z) < self.threshold:\n return 0, 0, 0\n v = np.log2(i + self.threshold - np.log2(np.log2(abs(z)))) / self.threshold\n if v < 1.0:\n return v ** b1, v ** b2, v ** b3 # coloured tones\n else:\n v = max(0, 2 - v)\n return v ** r1, v ** r2, v ** r3 # sepia tones\n\n return colour", "def get_green(x, y, slot = 0):\r\n return __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3 + 1]", "def get_color(rank):\n if rank == 1:\n color = int(0xffd700)\n elif rank == 2:\n color = int(0xc0c0c0)\n elif rank == 3:\n color = int(0xcd7f32)\n else:\n color = random.randint(1, 16777215)\n\n return discord.Color(color)", "def get_color(self):\r\n return self._color", "def get_color(self):\r\n return self.__color", "def __getitem__(self, pos: tuple) -> Color:\n a = pos[0] * self._width\n b = pos[1] * self._height\n return self._subscript( int(a), int(b))", "def GetColor(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_GetColor(self, *args)", "def get_color_marker(self):\r\n return self._board.get_color_marker_b()", "def get_color(self):\n return self.color", "def colors(self):\n\t\treturn [(0, 30, 255),(0, 30, 120)]", "def _color(self,c):\n return self.colorlist[c%len(self.colorlist)]", "def get_pixel(framebuf, x, y):\n index = (y >> 3) * framebuf.stride + x\n offset = y & 0x07\n return (framebuf.buf[index] >> offset) & 0x01", "def fl_getmcolor(colr):\n _fl_getmcolor = library.cfuncproto(\n library.load_so_libforms(), \"fl_getmcolor\",\\\n cty.c_ulong, [xfdata.FL_COLOR, cty.POINTER(cty.c_int),\\\n cty.POINTER(cty.c_int), cty.POINTER(cty.c_int)],\\\n \"\"\"long unsigned int fl_getmcolor(FL_COLOR i, int * r, int * g,\n int * b)\"\"\")\n library.check_if_flinitialized()\n #library.checknonfatal_allowed_value_in_list(colr, xfdata.COLOR_list)\n ul_colr = library.convert_to_FL_COLOR(colr)\n i_red, ptr_red = library.make_intc_and_pointer()\n i_green, ptr_green = library.make_intc_and_pointer()\n i_blue, ptr_blue = library.make_intc_and_pointer()\n library.keep_elem_refs(colr, ul_colr, i_red, i_green, i_blue, \\\n ptr_red, ptr_green, ptr_blue)\n retval = _fl_getmcolor(ul_colr, ptr_red, ptr_green, ptr_blue)\n return retval, i_red.value, i_green.value, i_blue.value", "def get_binary_mask(self,index):\n mask = self.load_mask_png(index)\n (rows,cols) = np.where(mask>0)[0:2] #pixels in mask disregarding the color\n new_mask = np.zeros(shape=mask.shape[0:2], dtype=np.uint8)\n new_mask[(rows,cols)] = 255\n return new_mask", "def get_blue(x, y, slot = 0):\r\n return __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3 + 2]", "def get_color(self):\n return self._color", "def get_color(self):\n return self._color", "def color(self):\n if self._simplecell:\n self.fetch()\n return self._color", "def GetRGB(self, *args):\n return _XCAFDoc.XCAFDoc_Color_GetRGB(self, *args)", "def GetChannelColorRGBA(vDataSet,aIndexC):\r\n\r\n rgba = vDataSet.GetChannelColorRGBA(aIndexC)\r\n r = rgba & 255\r\n g = (rgba >> 8) & 255\r\n b = (rgba >> 16) & 255\r\n a = (rgba >> 24) & 255\r\n return r,g,b,a", "def FindColor(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_FindColor(self, *args)", "def get_blue(self, x, y):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].get_blue()", "def int2color(x):\n # r = int(1000 * x % 255)\n # g = int(10000 * x % 255)\n # b = int(100000 * x % 255)\n x = 0 if x == 0 else int(1/x)\n b = x & 0xff\n g = (x >> 8) & 0xff\n r = (x >> 16) & 0xff\n return [r, g, b]", "def get_label_color_mapping(idx):\n # https://gist.github.com/wllhf/a4533e0adebe57e3ed06d4b50c8419ae\n def bitget(byteval, ch):\n return (byteval & (1 << ch)) != 0\n r = g = b = 0\n for j in range(8):\n r = r | (bitget(idx, 0) << 7 - j)\n g = g | (bitget(idx, 1) << 7 - j)\n b = b | (bitget(idx, 2) << 7 - j)\n idx = idx >> 3\n return np.array([r, g, b], dtype=np.uint8)", "def rgb_2_scalar_idx(r, g, b):\n return 256 ** 2 * r + 256 * g + b", "def get_pixel(framebuf, x, y):\n index = (y * framebuf.stride + x) * 2\n lobyte, hibyte = framebuf.buf[index : index + 2]\n r = hibyte & 0xF8\n g = ((hibyte & 0x07) << 5) | ((lobyte & 0xE0) >> 5)\n b = (lobyte & 0x1F) << 3\n return (r << 16) | (g << 8) | b", "def getPixelColour(self, item, pixel):\n return item.get_at(pixel)", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def _get_color(self):\n return self.__color", "def fl_get_pixel(colr):\n _fl_get_pixel = library.cfuncproto(\n library.load_so_libforms(), \"fl_get_pixel\",\\\n cty.c_ulong, [xfdata.FL_COLOR],\\\n \"\"\"long unsigned int fl_get_pixel(FL_COLOR col)\"\"\")\n library.check_if_flinitialized()\n #library.checknonfatal_allowed_value_in_list(colr, xfdata.COLOR_list)\n ul_colr = library.convert_to_FL_COLOR(colr)\n library.keep_elem_refs(colr, ul_colr)\n retval = _fl_get_pixel(ul_colr)\n return retval", "def get_color(self):\n\n return self.color", "def color(self):\n return 0x2f3136", "def get_color(im_obj):\n #im = Image.open(path, 'r')\n x, y = im_obj.size\n\n r, g, b = 0, 0, 0\n for i in xrange(x):\n for j in xrange(y):\n color_px = im_obj.getpixel((i, j))\n #print color_px\n r += color_px[0]\n g += color_px[1]\n b += color_px[2]\n\n r = r / (x * y)\n g = g / (x * y)\n b = b / (x * y)\n return (r, g, b)", "def get_rgb(input):\n rgb_band_idxs = [bands.index(b) for b in [\"S2B4\", \"S2B3\", \"S2B2\"]] # could be also hardcoded as [3,2,1]\n return input[rgb_band_idxs]", "def get_pixel(framebuf, x, y):\n index = (y * framebuf.stride + x) // 8\n offset = 7 - x & 0x07\n return (framebuf.buf[index] >> offset) & 0x01", "def GetPixel(*args, **kwargs):\n return _gdi_.Colour_GetPixel(*args, **kwargs)", "def getColor(self):\r\n return self.color", "def color(self, label):\n if self.grayscale:\n return (\"#ffffff\", \"#555555\", \"#888888\", \"#bbbbbb\", \"#222222\")[label]\n # COC WL WR SL SR\n return (\"#4e73b0\", \"#fdb863\", \"#b2abd2\", \"#e66101\", \"#5e3c99\")[label]", "def mask(self):\n return self.mask_index", "def get_pixel(self, frame: int, x: int, y: int) -> Color:\n return self.get_frame(frame).clone()[x, y]", "def getPixel(self, px, py):\n if not self.inBounds(px,py):\n return IColor()\n idx = py*self.w + px\n return self.data[idx]", "def color(self):\n return self._rgba", "def color(self):\n return self._rgba", "def get_color_index_for_level(self, color, level):\n index = 0\n mask = 0x80 >> level\n if color.red & mask:\n index |= 4\n if color.green & mask:\n index |= 2\n if color.blue & mask:\n index |= 1\n return index", "def pickColor(point):\n x = point[0]\n y = point[1]\n depth = 40\n if 0 < x <= 72 and 0 < y <= depth:\n return (255, 255, 255) # eraser\n if 72 < x <= 138 and 0 < y <= depth:\n return (0,0,0) # black\n if 138 < x <= 204 and 0 < y <= depth:\n return (122,78,32) # brown\n if 204 < x <= 270 and 0 < y <= depth:\n return (242,0,255) # purple\n if 270 < x <= 336 and 0 < y <= depth:\n return (0,0,255) # blue\n if 336 < x <= 402 and 0 < y <= depth:\n return (63,255,0) # green\n if 402 < x <= 468 and 0 < y <= depth:\n return (255,250,0) # yellow\n if 468 < x <= 534 and 0 < y <= depth:\n return (255,174,0) # orange\n if 534 < x <= 600 and 0 < y <= depth:\n return (255,0,0) # red", "def fom_rgb(direction, inclination, mask=None):\n if mask is None:\n mask = np.ones_like(direction, dtype=np.bool)\n\n rgb = np.zeros((mask.shape[0], mask.shape[1], 3), np.uint8)\n for x in range(mask.shape[0]):\n for y in range(mask.shape[1]):\n if not mask[x, y]:\n continue\n\n rgb[x,\n y, :] = _vec_to_rgb(\n np.sin(0.5 * np.pi - inclination[x, y]) *\n np.cos(direction[x, y]),\n np.sin(0.5 * np.pi - inclination[x, y]) *\n np.sin(direction[x, y]),\n np.cos(0.5 * np.pi - inclination[x, y]))\n return rgb", "def getColorFlag(color):\n if color == 0: # MONO\n return 0\n elif color == 1: # BAYER\n return -1\n elif color == 2: # AS IS RBG\n return 1", "def int2color_tuple(x):\n red_val = int(1000 * x % 255)\n green_val = int(10000 * x % 255)\n blue_val = int(100000 * x % 255)\n return red_val, green_val, blue_val", "def get_mask(self, h, k):\n return self.mask[(self.h==h)&(self.k==k)]", "def get_color(self):\n\n return self._color", "def green_channel(input_image):\n return input_image[:, :, 1]", "def getColor(self):\n return self.color", "def getPixel(self,x,y):\n return color_to_rgb(self._image.get(x, y))", "def get_mask(self):\n\t\treturn pygame.mask.from_surface(self.img)", "def _calcColor(self, colorTuple):\n return milight.color_from_rgb(*colorTuple)", "def get_rgb(self, r,g,b):\n return \"#%02x%02x%02x\" % (r,g,b)", "def get_colour(self, address):\n return idaapi.get_item_color(address)", "def _get_color(self, c, x, max_num):\n\n ratio = 5*(float(x)/max_num)\n i = int(math.floor(ratio))\n j = int(math.ceil(ratio))\n ratio -= i\n r = (1 - ratio) * self._colors[i][c] + ratio*self._colors[j][c]\n return int(255*r)", "def COL(x):\n return (x & 7)", "def COL(x):\n return (x & 7)", "def get_channel_mask(self,this_obsid,feed):\n\n db = FileTools.safe_hdf5_open(self.database,'r')\n channel_mask = db[str(this_obsid)]['Vane/Level2Mask'][feed-1,...]\n db.close()\n return channel_mask" ]
[ "0.6762996", "0.6617752", "0.6400042", "0.6352335", "0.6179756", "0.607116", "0.60303086", "0.5943762", "0.59284455", "0.58960706", "0.58665735", "0.58665735", "0.58594924", "0.5858215", "0.58338976", "0.57897294", "0.5776139", "0.5772793", "0.5763152", "0.5751781", "0.57433766", "0.5725651", "0.5725651", "0.5725651", "0.5725651", "0.5704427", "0.5688157", "0.5670523", "0.5668946", "0.56093186", "0.55931693", "0.55673474", "0.5543315", "0.55134207", "0.55067194", "0.5497447", "0.5469024", "0.54665357", "0.54569364", "0.5456411", "0.5447002", "0.5442873", "0.5434484", "0.54330647", "0.54225117", "0.54127824", "0.54006", "0.54005027", "0.5388962", "0.53862923", "0.5375568", "0.5369094", "0.53652966", "0.53652966", "0.5362423", "0.5361078", "0.5359082", "0.5352914", "0.53443044", "0.5334619", "0.5331386", "0.5323584", "0.532344", "0.531832", "0.53101707", "0.53101707", "0.53101707", "0.53101707", "0.53094333", "0.53037524", "0.52964723", "0.52915955", "0.5275448", "0.52691364", "0.5266421", "0.5261996", "0.5252573", "0.5248432", "0.52452725", "0.52447593", "0.5243251", "0.5243251", "0.5236187", "0.5230936", "0.5228266", "0.5225137", "0.5218371", "0.52086836", "0.5201867", "0.5192439", "0.51841795", "0.51804495", "0.51781094", "0.5171017", "0.51707447", "0.51574725", "0.5157067", "0.51562786", "0.51562786", "0.5150288" ]
0.8632774
0
Set the color of the mask at position. Using 2 bits as a color.
def set_color(mask: int, position: int, color: int): return mask | (color << (position << 1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_color(mask: int, position: int):\n return (mask >> (position << 1)) & 3", "def SetMaskColour(*args, **kwargs):\n return _gdi_.Bitmap_SetMaskColour(*args, **kwargs)", "def setColorIndex(idx):\n dislin.setclr(idx)", "def set_pixel(framebuf, x, y, color):\n index = (y >> 3) * framebuf.stride + x\n offset = y & 0x07\n framebuf.buf[index] = (framebuf.buf[index] & ~(0x01 << offset)) | (\n (color != 0) << offset\n )", "def set_pixel(framebuf, x, y, color):\n index = (y * framebuf.stride + x) >> 2\n pixel = framebuf.buf[index]\n\n shift = (x & 0b11) << 1\n mask = 0b11 << shift\n color = (color & 0b11) << shift\n\n framebuf.buf[index] = color | (pixel & (~mask))", "def set_red(x, y, value, slot = 0):\r\n __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3] = value", "def set_pixel(framebuf, x, y, color):\n index = (y * framebuf.stride + x) // 8\n offset = 7 - x & 0x07\n framebuf.buf[index] = (framebuf.buf[index] & ~(0x01 << offset)) | (\n (color != 0) << offset\n )", "def set_pixel(self, x, y, v):\n self.buf[y][x] = v & 0x07", "def set_pixel(framebuf, x, y, color):\n index = (y * framebuf.stride + x) * 3\n if isinstance(color, tuple):\n framebuf.buf[index : index + 3] = bytes(color)\n else:\n framebuf.buf[index : index + 3] = bytes(\n ((color >> 16) & 255, (color >> 8) & 255, color & 255)\n )", "def _set_color_mode(self, mode):\n self._write(ST7789_COLMOD, bytes([mode & 0x77]))", "def set_red(self, x, y, newval):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].set_red(newval)", "def setPixelColor(self, n, color):\n self._logger.debug(\"setPixelColor\")", "def SetMask(*args, **kwargs):\n return _gdi_.Bitmap_SetMask(*args, **kwargs)", "def set_mask(self, h, k, value):\n self.mask[(self.h==h)&(self.k==k)] = value", "def set_pixel(self, framebuf, x, y, color):\n index = (y * framebuf.stride + x) * 2\n framebuf.buf[index : index + 2] = self.color_to_rgb565(color)", "def set_pixel(self, pos, color):\n if pos[0] >= 0 and pos[0] < self.width and pos[1] >= 0 and pos[1] < self.height:\n # Ensure that the y axis increases upwards\n inv_y = self.height - 1 - pos[1]\n pos = (inv_y * self.width * 3) + (pos[0] * 3)\n self.data[pos + 0] = color[0]\n self.data[pos + 1] = color[1]\n self.data[pos + 2] = color[2]", "def set_pixel(self, x, y, value):\r\n \r\n # Rotation and mirroring\r\n a = x\r\n x = y\r\n y = 7-a\r\n \r\n # From the baseclass\r\n if x < 0 or x > 7 or y < 0 or y > 7:\r\n # Ignore out of bounds pixels.\r\n return\r\n # Set green LED based on 1st bit in value.\r\n self.set_led(y * 16 + x, 1 if value & Display.COLOR_GREEN > 0 else 0)\r\n # Set red LED based on 2nd bit in value.\r\n self.set_led(y * 16 + x + 8, 1 if value & Display.COLOR_RED > 0 else 0)", "def setPixelColor(self, n, color):\n\t\t#print \"pxl %s = %s\" % (n, color)\n\t\tif isinstance(n, slice):\n\t\t\tself.leds[n] = [color]*len(self.leds[n])\n\t\telse:\n\t\t\tif n >= 0 or n <= self.size:\n\t\t\t\tself.leds[n] = color\n\t\t#pprint(self.leds)", "def set_pixel(image, pt, color):\n\timage[pt[0], pt[1]] = color", "def set_mask(self, mask):\n self.mask = mask", "def __setitem__(self, pos, value):\n\t\t#pprint(pos)\n\t\t#pprint(self.leds.__getitem__(pos))\n\t\t# Handle if a slice of positions are passed in by setting the appropriate\n\t\t# LED data values to the provided values.\n\t\tself.setPixelColor(pos, value)", "def setPixel (self, x, y, colour):\r\n self.image [y][x] = colour", "def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))", "def set_blue(x, y, value, slot = 0):\r\n __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3 + 2] = value", "def set_at(self, pos: Tuple2NumberType, color: ColorInputType) -> 'BaseImage':\n assert_vector(pos, 2)\n self._surface.set_at(pos, assert_color(color))\n return self", "def put_color(self, _pos, _color):\n assert(((len(_pos) == 2) and (len(_color) == self.__resolution[2])) or\n ((len(_pos) == 3) and (len(_color) == 1)))\n self.__framebuffer[_pos] = _color", "def set_green(x, y, value, slot = 0):\r\n __g[slot].pixels_rgb[__g[slot].width * 3 * y + x * 3+ 1] = value", "def setMask(self, mask):\n self.mask = mask", "def set_general_position_red(self, position):\n\n self._general_position_red = position", "def set_bitmask(self, value):\r\n self.__bitmask__ = value | 0xFF00", "def setPixel(self, x, y, val):\r\n self.__buffer[y][x].setValue(val)", "def set_color(self):\n self.image[self.x, self.y] = self.color\n if self.diffusion:\n r = g = b = 0\n for i in range(self.convolution_matrix.shape[0]):\n for j in range(self.convolution_matrix.shape[1]):\n r = g = b = 0\n for k in range(self.convolution_matrix.shape[0]):\n for l in range(self.convolution_matrix.shape[1]):\n m = (self.x + i + k - 2 + self.image.shape[0]) % self.image.shape[0]\n n = (self.y + j + l - 2 + self.image.shape[1]) % self.image.shape[1]\n r += self.convolution_matrix[k][l] * self.image[m, n][2]\n g += self.convolution_matrix[k][l] * self.image[m, n][1]\n b += self.convolution_matrix[k][l] * self.image[m, n][0]\n self.image[self.x, self.y] = (b, g, r)", "def set_pixel(self, x, y, value):\n if x < 0 or x > 7 or y < 0 or y > 7:\n # Ignore out of bounds pixels.\n return\n\n self.set_led(y * 16 + ((x + 7) % 8), value)", "def set_blue(self, x, y, newval):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].set_blue(newval)", "def apply_mask(im, mask, color=(1, 0, 0)):\n masked = np.zeros(im.shape)\n for x, y in mask: masked[x][y] = color\n return masked", "def _set_color(self, r):\n c = COLORS[self.color]\n r.setLineColor(c[0], c[1], c[2])\n r.setColor(c[0], c[1], c[2])", "def _draw_mask_on_image(self, mask):\n mask = self.STANDARD_COLORS_ARRAY[mask]\n cv2.addWeighted(mask,self.config.ALPHA,self.image,1.0,0,self.image)", "def setPixel(self, x, y, r, g, b):\n self.array[x, y, 0] = (r)\n\tself.array[x, y, 1] = (g)\n\tself.array[x, y, 2] = (b)\n #QD & DT 4.2.15\n\n #_tkExec(self.image.put, \"{%s}\"%color_rgb(r,g,b), (x, y))", "def set_color(color='black', index=-1): # (8)\n if index == -1:\n global color_buffer\n color_buffer = deque([color]*NUM_LEDS, maxlen=NUM_LEDS)\n else:\n color_buffer[index] = color", "def show_red_mask(img, mask):\n img_ = img\n mask_ = np.bool_(mask)\n red = img_[:, :, 0]\n green = img_[:, :, 1]\n blue = img_[:, :, 2]\n red[mask_] = 255\n green[mask_] = 0\n blue[mask_] = 0\n return img_", "def setPixel(self, value, position):\n (x,y,z) = position\n if z<0 or z>=self.length:\n mamba.raiseExceptionOnError(mambaCore.ERR_BAD_SIZE)\n err = mambaCore.MB_PutPixel(self.seq[z].mbIm, value, position[0], position[1])\n mamba.raiseExceptionOnError(err)", "def set_pixel(self, x, y, new_color):\n assert self.valid_coordinates(x, y)\n self.pixels[self.pixel_offset(x, y)] = new_color", "def set_pixel(self, x, y, value):\n if x < 0 or x > 7 or y < 0 or y > 15:\n # Ignore out of bounds pixels.\n return\n if y < 8:\n self.set_led( y * 16 + x, value)\n else:\n self.set_led((y-8) * 16 + (x+8), value)", "def set_linked_mask(\n self,\n val=None\n ):\n if val != None:\n self.linked_mask = val", "def set(self, x, y, color):\n if x < 0 or x >= self.width or y < 0 or y >= self.height:\n return\n i = self.map[y][x]\n super().set(i, color)", "def pixel( self, x, y, c = '#ffffff' ):\n self.raster.put( c, ( x, y ) )", "def setPositionalMask(self, value):\n return self._set(positionalMask=value)", "def apply_mask(image, mask, color):\r\n for c in range(3):\r\n image[:, :, c] = np.where(mask == 1,\r\n image[:, :, c] + color[c],\r\n image[:, :, c])\r\n return image", "def setPixelColorRGB(self, n, red, green, blue):\n\t\tself.setPixelColor(n, Color(red, green, blue))", "def color_motion_mask(mask, color=None):\n if color is None:\n color = (220, 20, 60)\n h, w = mask.shape\n ext_mask = np.stack([mask, mask, mask], -1).astype(np.uint8)\n color = np.ones_like(ext_mask) * color\n index = np.ones_like(ext_mask) * 1.0\n final_mask = np.where(ext_mask == index, color, ext_mask).astype(np.uint8)\n return final_mask", "def setPixelColorRGB(self, n, red, green, blue, white=0):\n self._logger.debug(\"setPixelColorRGB\")", "def colorize_mask(mask):\n # mask: numpy array of the mask\n new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')\n new_mask.putpalette(palette)\n return new_mask", "def setMask(self, mask):\n try:\n self.mask = mask\n self.inds = na.nonzero(self.mask.flat)[0]\n #print \"length of self.inds\",len(self.inds)\n #print self.inds\n self.dim = self.mask.shape[::-1]\n #print self.mask.shape\n return True\n except Exception as error:\n print(\"failed in setMask\", error)", "def setColor(self,value):\n\t\tself.politics = value if(type(value) is int)else int(value[1:],16)\n\t\tself.canvas.itemconfig('node_'+self.identifier,fill=self.toRGB())", "def setColorMode(mode='full'):\n mdict = {'low':'NONE','full':'FULL'}\n dislin.clrmod(mdict[mode])", "def set_green(self, x, y, newval):\n self.__check_dimensions(x, y)\n return self.pixels[(x, y)].set_green(newval)", "def color(self, value: tuple) -> None:\n if value in Color.PALETTE:\n self._color = value", "def set_at(self,x,y,set=True):\n\t\tif ( not self._validate(x,y )):\n\t\t\treturn\n\n\t\t# set the bit in the grid\n\t\tif set:\n\t\t\tself.Grid[y] = self.Grid[y] | (1 << x)\n\t\telse:\n\t\t\tself.Grid[y] = self.Grid[y] & ~(1 << x)", "def setPixel(self, px, py, color):\n if not self.inBounds(px,py):\n return\n idx = py*self.w + px\n self.data[idx] = color", "def set_color(self, color):\n\t\tpass", "def setMask(self, other=None):\n if other is None:\n self.call('setMask', '')\n else:\n self.call('setMask', other.groupName)", "def setFlag(flagbyte, pos, status):\n if status:\n return flagbyte | 2**pos\n else:\n return flagbyte & ~2**pos", "def mask_color(self):\n return self._mask_color", "def setLeds(number: int, red: int, green: int, blue: int):\n pass", "def mask(self, mask):\n\n self._mask = mask", "def mask(self, mask):\n\n self._mask = mask", "def set_color(self, color):\n pass", "def setreferencepixel(self, *args, **kwargs):\n return _coordsys.coordsys_setreferencepixel(self, *args, **kwargs)", "def _MaskedImage_set(self, x, y=None, values=None):\n\n if values is None:\n assert (y is None)\n values = x\n try:\n self.getImage().set(values[0])\n self.getMask().set(values[1])\n self.getVariance().set(values[2])\n except TypeError:\n self.getImage().set(values)\n self.getMask().set(0)\n self.getVariance().set(0)\n else:\n try:\n self.getImage().set(x, y, values[0])\n if len(values) > 1:\n self.getMask().set(x, y, values[1])\n if len(values) > 2:\n self.getVariance().set(x, y, values[2])\n except TypeError:\n self.getImage().set(x)\n self.getMask().set(y)\n self.getVariance().set(values)", "def color_pixels(self, image, color):\r\n\r\n image[self.ally, self.allx] = color\r\n return image", "def Set(*args):\n return _XCAFDoc.XCAFDoc_ColorTool_Set(*args)", "def setColor(self, value):\n _res = self.mAPIContext.SDGraphObjectFrame_setColor(self.mHandle, ctypes.byref(value))\n if _res != SDApiError.NoError.value:\n if _res == SDApiError.NoErrorOutputParamNotSet.value:\n return None\n raise APIException(SDApiError(_res))\n return None", "def set_color(self, value):\n _lib.caca_set_dither_color.argtypes = [_Dither, ctypes.c_char_p]\n _lib.caca_set_dither_color.restype = ctypes.c_int\n\n return _lib.caca_set_dither_color(self, value)", "def red2blue(self):\r\n for x in range(self.xspan):\r\n for y in range(self.yspan):\r\n if (self.cells[x][y] == 1):\r\n self.cells[x][y] = 2", "def set_rgb(self, value):\n act = RGBAction(self, value)\n return act.invoke()", "def set_pixel(self, x, y, r, g, b, a):\n\t\t\n\t\ti = 4 * (y * self.width + x)\n\t\tself.buffer[i : i + 4] = array.array('f', struct.pack('ffff', r, g, b, a))", "def changeColor(self):\n self.layer.new_colormap()", "def set_green_yellow_filter(self, position):\n if position not in {'green', 'yellow'}:\n raise ValueError('\"position\" parameter must be either \"green\" or \"yellow\"')\n if position == 'green':\n self._iotool.execute(self._iotool_enable_green_command())\n else:\n self._iotool.execute(self._iotool_enable_yellow_command())\n time.sleep(self._spconfig.FILTER_SWITCH_DELAY)\n self._green_yellow_pos = position\n self._update_property('green_yellow_filter', position)", "def apply_mask(image, mask, color, alpha=0.5):\n for c in range(3):\n image[:, :, c] = np.where(mask == 1,\n image[:, :, c] *\n (1 - alpha) + alpha * color[c] * 255,\n image[:, :, c])\n # cv2.imshow(\"TEST\",image.astype(np.uint8))\n # print(color)\n return image", "def set_general_position_blue(self, position):\n\n self._general_position_blue = position", "def update_color(self):\r\n \r\n \r\n colorset = self.colorset\r\n \r\n self.grfx[0].colorset = colorset\r\n pass", "def setColorDiffuse(*args):", "def set_color(self, color, filled):\n for cell in filled:\n self.board[cell[0], cell[1]] = color", "def set_tile_color(self, x, y, color):\n self.__tile_grid[y][x].configure(bg=color)", "def resetColor(self):\n self.setColor(255, 255, 255 ,255)", "def changeMask(self, mask): \n if self.fileDialogShow:\n return\n if mask == \"file\":\n self.fileDialogShow = True\n self.openFileDialog()\n else:\n if self.avatarConfiguration[\"gender\"] == \"boy\":\n img = \"masko.png\"\n else:\n img = \"maska.png\"\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(os.path.join(GG.utils.PATH_EDITOR_INTERFACE, img))\n img = ocempgui.draw.Image.load_image(imgPath)\n self.imgOptionsTab.picture = img \n self.avatarConfiguration[\"mask\"] = None\n self.paintMask()", "def SetColor(self, rgbtuple):\n if not rgbtuple:\n rgbtuple = wx.SystemSettings.GetColour(wx.SYS_COLOUR_BTNFACE).Get()\n col = [c/255.0 for c in rgbtuple]\n self.figure.set_facecolor(col)\n self.figure.set_edgecolor(col)\n self.canvas.SetBackgroundColour(wx.Colour(*rgbtuple))", "def SetMaskImage(self, arg0: 'itkImageUC2') -> \"void\":\n return _itkScalarImageToRunLengthFeaturesFilterPython.itkScalarImageToRunLengthFeaturesFilterIUC2_SetMaskImage(self, arg0)", "def set(self, coords, colors):\n if all(isinstance(e, list) for e in coords):\n # unpack list of coordinates\n for e, c in zip(coords, colors):\n self.set(e, c)\n else:\n led_nr = self.pos_to_led_nr(coords)\n #print \"Setting LED at [%d, %d] (nr. %d) to color %s\" % (coords[0], coords[1], led_nr, colors)\n self.strip.setPixelColor(led_nr, colors)", "def setbit(integer, nth_bit):\n if nth_bit < 0:\n raise ValueError('Negative bit number.')\n mask = 1 << nth_bit\n integer |= mask\n return integer", "def setColor(clr):\n if type(clr) == types.StringType:\n setColorString(clr)\n return \n if type(clr) == types.IntType:\n setColorIndex(clr)\n return\n if type(clr) == types.TupleType:\n setColorRGB(*clr)", "def _reset_mask(self, reset_to=False):\n self.data.mask = reset_to", "def set_black(self, x, y):\n self.pieces[x + (y * self.width)].set_black()", "def pixel(self, x: int, y: int, color: int):\n if (\n (x < self.size[0] and y < self.size[1]) and (x >= 0 and y >= 0)\n ):\n index, offset = self.position(x, y)\n self.image[index] = (\n self.image[index] & ~(0x01 << offset)\n ) | (\n (color != 0) << offset\n )\n else:\n return", "def setColor(self, color, group=None):\n group = group is None and self.group or group\n r = self.controller.send(self.light.color(milight.color_from_rgb(*color), group))\n logger.debug('Set color to %s (group: %s): %s' % (color, self.group, r))", "def change_color(self, x, y, state):\n if state == 1:\n color = self.tile_color\n else:\n color = self.background_color\n self.canvas.itemconfig(self.board[(x, y)], fill=color)", "def set_color(self, r=0, g=0, b=0):\n r = clamp(r)\n g = clamp(g)\n b = clamp(b)\n self._state.color = (r, g, b)\n self.send_command(Command.SET_COLOR, [int(r), int(g), int(b)])", "def set(self, row: int, col: int, color: Color) -> None:\n super(ColorGrid, self).set(row, col, color)", "def Set(*args, **kwargs):\n return _gdi_.Colour_Set(*args, **kwargs)", "def fill(self, color: int) -> None:\n red = (color >> 16) & 0xFF\n green = (color >> 8) & 0xFF\n blue = color & 0xFF\n for x in range(24):\n offset = unpack_from(\">HHH\", self.ledmap_bytes, x * 6)\n self._is31[offset[self.r_offset]] = red\n self._is31[offset[self.g_offset]] = green\n self._is31[offset[self.b_offset]] = blue" ]
[ "0.67938983", "0.67375386", "0.6516931", "0.6429108", "0.64179796", "0.6417907", "0.63744414", "0.6285334", "0.61303943", "0.6114359", "0.61034447", "0.6082238", "0.6080526", "0.6059235", "0.60527635", "0.6043658", "0.60351825", "0.59847206", "0.5964086", "0.5936446", "0.59183013", "0.59039086", "0.5898309", "0.58022964", "0.5801325", "0.5767835", "0.5766899", "0.57531756", "0.57263404", "0.57239443", "0.5693483", "0.5689004", "0.5678221", "0.567104", "0.56701696", "0.5661354", "0.56593126", "0.565085", "0.5641762", "0.5622717", "0.5615307", "0.56107485", "0.56039536", "0.55984527", "0.5584686", "0.55840766", "0.5566181", "0.5565974", "0.5551526", "0.553311", "0.55284613", "0.5501454", "0.5494332", "0.5489848", "0.5480622", "0.5451421", "0.5440903", "0.54259825", "0.5421315", "0.54068565", "0.54065824", "0.5388208", "0.5376381", "0.5372349", "0.5367933", "0.5367933", "0.5353782", "0.5344289", "0.53418636", "0.5326786", "0.532335", "0.5309285", "0.5308788", "0.53024405", "0.5281767", "0.52791613", "0.52719355", "0.5266258", "0.5265692", "0.5257998", "0.5257437", "0.5255291", "0.524791", "0.52359426", "0.5233213", "0.52236307", "0.522312", "0.52200687", "0.52071893", "0.5191133", "0.5187951", "0.51739967", "0.5163429", "0.51628476", "0.5161735", "0.5158998", "0.5158522", "0.51560867", "0.5153607", "0.5127639" ]
0.8528009
0
Create a new ir.Set instance with given attributes. Absolutely all ir.Set instances must be created using this constructor.
def new_set(*, ctx: context.ContextLevel, **kwargs) -> irast.Set: ir_set = irast.Set(**kwargs) ctx.all_sets.append(ir_set) return ir_set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, **attributes):\n self.set(**attributes)", "def __init__(self,\n *,\n attributes: List['Attribute'] = None) -> None:\n self.attributes = attributes", "def newChemAtomSet(self, **attrlinks):\n return ChemAtomSet(self, **attrlinks)", "def __init__(self, **attributes):\n for key, value in attributes.items():\n setattr(self, key, value)", "def __init__(self, values=None):\n\n self.dict = {} # each instance of Set has its own dict property\n # which is what we'll use to track memnerships\n if values is not None:\n for value in values:\n self.add(value)", "def __init__(self,s={}) -> None:\n\n self.set=list()", "def new_set_from_set(\n ir_set: irast.Set, *,\n preserve_scope_ns: bool=False,\n path_id: typing.Optional[irast.PathId]=None,\n stype: typing.Optional[s_types.Type]=None,\n ctx: context.ContextLevel) -> irast.Set:\n if path_id is None:\n path_id = ir_set.path_id\n if not preserve_scope_ns:\n path_id = path_id.merge_namespace(ctx.path_id_namespace)\n if stype is None:\n stype = ir_set.stype\n result = new_set(\n path_id=path_id,\n path_scope_id=ir_set.path_scope_id,\n stype=stype,\n expr=ir_set.expr,\n ctx=ctx\n )\n result.rptr = ir_set.rptr\n return result", "def set_attributes(self, attributes):\n self.attributes = attributes", "def __init__(self, attributes_names: list):\r\n self.attributes_names = attributes_names", "def __init__(self, attributes_names: list):\r\n self.attributes_names = attributes_names", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def set_attributes(self, attributes):\n self.attributes = dict(attributes) # overwrite the existing registry of attributes with the input attributes", "def __init__(self):\n self.EntireSet = []", "def set_hdf5_attributes(dset, attributes):\n for key in attributes.iterkeys():\n dset.attrs[key] = attributes[key]\n\n return dset", "def __init__(self):\n self.set = set()", "def __init__(self, set):\n Rule.__init__(self)\n self.__set = set", "def __init__(self, set):\n Rule.__init__(self)\n self.__set = set", "def __init__(self, *args):\n _snap.TIntSet_swiginit(self, _snap.new_TIntSet(*args))", "def __init__(self, set_ptr=None):\n\n if set_ptr is None:\n self.set = ipset.ipset_new()\n else:\n self.set = set_ptr", "def __init__(self, name: unicode, set: ghidra.util.graph.KeyIndexableSet):\n ...", "def __init__(self, attribute_names):\r\n self.attribute_names = attribute_names\r\n self.tree = None", "def __init__(self, name: str, attributes: List[Attribute], description: str = \"\"):\n self.name: str = name\n self.attributes = sorted(\n attributes, key=lambda x: x.name\n ) # type: List[Attribute]\n self._check_validity()\n self.attributes_by_name = {a.name: a for a in self.attributes}\n self.description = description", "def __init__(self, **attrs):\n \n # set given attributes\n for name, value in attrs.items():\n if hasattr(self, name):\n setattr(self, name, value)\n else:\n raise AttributeError(\"Attribute not found! --> %s\" % name)", "def __init__(self, **initial_attributes):\n\n for attribute_name, attribute_value in initial_attributes.items():\n setattr(self, attribute_name, attribute_value)", "def create_set(self, setname='new_set', based_on='data file', included=None,\n excluded=None, strings='keep', arrays='masks', replace=None,\n overwrite=False):\n meta = self._meta\n sets = meta['sets']\n # prove setname\n if not isinstance(setname, str):\n raise TypeError(\"'setname' must be a str.\")\n if setname in sets and not overwrite:\n raise KeyError(\"{} is already in `meta['sets'].`\".format(setname))\n # prove based_on\n if not based_on in sets:\n raise KeyError(\"based_on set '{}' is not in meta['sets'].\".format(based_on))\n # prove included\n if not included: included = [var.split('@')[-1] for var in sets[based_on]['items']]\n\n # prove replace\n if not replace: replace = {}\n elif not isinstance(replace, dict):\n raise TypeError(\"'replace' must be a dict.\")\n else:\n for var in list(replace.keys()) + list(replace.values()):\n if var not in included:\n raise KeyError(\"{} is not in 'included'\".format(var))\n\n # prove arrays\n if not arrays in ['masks', 'columns']:\n raise ValueError (\n \"'arrays' must be either 'masks' or 'columns'.\")\n # filter set and create new set\n fset = filtered_set(meta=meta,\n based_on=based_on,\n masks=True if arrays == 'masks' else False,\n included=included,\n excluded=excluded,\n strings=strings)\n\n # if arrays=='both':\n # new_items = []\n # items = fset['items']\n # for item in items:\n # new_items.append(item)\n # if item.split('@')[0]=='masks':\n # for i in meta['masks'][item.split('@')[-1]]['items']:\n # new_items.append(i['source'])\n # fset['items'] = new_items\n\n if replace:\n new_items = fset['items']\n for k, v in list(replace.items()):\n for x, item in enumerate(new_items):\n if v == item.split('@')[-1]: posv, move = x, item\n if k == item.split('@')[-1]: posk = x\n new_items[posk] = move\n new_items.pop(posv)\n fset['items'] = new_items\n\n add = {setname: fset}\n sets.update(add)\n\n return None", "def __init__(self):\n self.ds = set()\n self.keys = []", "def __init__(self, elements):\n self.elements = set()\n for el in elements:\n if not isinstance(el, Element):\n el = Element(el)\n self.elements.add(el)", "def __init__(self, tag=None, attributes=(), header=None, column_number=None):\n if tag:\n tag = tag.lower()\n self.tag = tag\n self.header = header\n self.column_number = column_number\n self.attributes = set([a.lower() for a in attributes])\n self.attribute_list = [a.lower() for a in attributes] # to preserve order", "def construct(self):\n\n newSet = {}\n current_index = 0\n\n for key_1, value_1 in self._sets[self._currentSet].items():\n current_index += 1\n for key_2,value_2 in list(self._sets[self._currentSet].items())[current_index:]:\n # join the 2 tuples\n join = key_1 + key_2\n # remove duplicates\n join = tuple(set(join))\n # get combinations\n combined = tuple(combinations(join, self._currentSet+1))\n # sort combination\n combined = tuple(sorted(combined[0]))\n\n # append new combination to dict\n if len(combined) != 0 :\n newSet[combined] = 0\n\n self._currentSet += 1\n # append the new itemset in the sets dict \n self._sets[self._currentSet] = newSet", "def __init__(self, name=None):\n self.id = id # Unique identifier for the set\n self._next_id = 0 # Holds unique ids for graphs\n self._graphs = {} # Holds graphs, keyed by unique id\n self.name = name # Holds description of graph", "def __init__(self):\n self.randomSet = dict()", "def __init__(self, sets: List[ColdStartUserSet]):\n self.sets = sets", "def __new__(mcs, name, bases, attrs, **kwargs):\r\n attrs['__fields__'] = set()\r\n attrs['__store_attrs__'] = set()\r\n return super().__new__(mcs, name, bases, attrs, **kwargs)", "def __init__(self):\n self.s = set()", "def create_intrusion_set(\n name: str,\n created_by: Optional[stix2.Identity] = None,\n created: Optional[datetime] = None,\n modified: Optional[datetime] = None,\n description: Optional[str] = None,\n aliases: Optional[List[str]] = None,\n first_seen: Optional[datetime] = None,\n last_seen: Optional[datetime] = None,\n goals: Optional[List[str]] = None,\n resource_level: Optional[str] = None,\n primary_motivation: Optional[str] = None,\n secondary_motivations: Optional[List[str]] = None,\n labels: Optional[List[str]] = None,\n confidence: Optional[int] = None,\n external_references: Optional[List[stix2.ExternalReference]] = None,\n object_markings: Optional[List[stix2.MarkingDefinition]] = None,\n) -> stix2.IntrusionSet:\n return stix2.IntrusionSet(\n id=IntrusionSet.generate_id(name),\n created_by_ref=created_by,\n created=created,\n modified=modified,\n name=name,\n description=description,\n aliases=aliases,\n first_seen=first_seen,\n last_seen=last_seen,\n goals=goals,\n resource_level=resource_level,\n primary_motivation=primary_motivation,\n secondary_motivations=secondary_motivations,\n labels=labels,\n confidence=confidence,\n external_references=external_references,\n object_marking_refs=object_markings,\n )", "def __init__(self, harvest_attribs=None, copy_attribs='copy', **kwargs):\n ClassWithCollections.__init__(self, **kwargs)\n\n self.__atribs = harvest_attribs\n self.__copy_attribs = copy_attribs\n\n self._setAttribs(harvest_attribs)", "def set_attributes(instance, **attributes):\n proxy = type('SetAttr', (Proxy,), attributes)\n return proxy(instance)", "def set_attributes(self, new_attributes=None):\n self.attributes = new_attributes", "def create_sets(self,FD_SET=[],VA_SET=[]):\n \n self.m.S = Set(initialize=self.sectors, doc='sectors')\n\n if self.EORA is True:\n self.m.rROW = Set(initialize=self.countries+['ROW'],ordered=True, doc='regions including export')\n self.m.R = Set(initialize=self.countries+['ROW'],ordered=True, doc='regions')\n else:\n self.m.rROW = Set(initialize=self.countries,ordered=True, doc='regions including export')\n self.m.R = Set(initialize=self.countries,ordered=True, doc='regions')\n\n if self.EORA is True:\n self.m.fdemand = Set(initialize=['P3h', 'P3n','P3g', 'P51','P52','P53'], doc='Final Demand')\n else:\n self.m.fdemand = Set(initialize=self.fd_cat, doc='Final Demand')\n\n if self.EORA is True:\n self.m.VA = Set(initialize=['VA'], doc='value added')\n else:\n self.m.VA = Set(initialize=VA_SET, doc='value added')", "def create_sets(self,FD_SET=['FinalD'],VA_SET=['VA']):\n \n self.m.S = Set(initialize=self.sectors, doc='sectors')\n self.m.P = Set(initialize=self.products, doc='sectors')\n self.m.row = Set(initialize=self.products, doc='products')\n self.m.col = Set(initialize=self.sectors+['FinalD'], doc='sectors and final demand')\n \n self.m.rROW = Set(initialize=self.countries,ordered=True, doc='regions including export')\n self.m.R = Set(initialize=self.countries,ordered=True, doc='regions')\n\n self.m.fdemand = Set(initialize=FD_SET, doc='Final Demand')\n\n self.m.VA = Set(initialize=VA_SET, doc='value added')", "def set(self) -> set:\n return set(self)", "def __init__(self, *args, **kwargs):\n TaxonSetLinked.__init__(self,\n taxon_set=kwargs.get(\"taxon_set\", None),\n label=kwargs.get(\"label\", None),\n oid=kwargs.get(\"oid\", None))\n self.taxon_seq_map = CharacterDataMap()\n self.character_types = []\n self.character_subsets = containers.OrderedCaselessDict()\n self.markup_as_sequences = True\n if len(args) > 1:\n raise error.TooManyArgumentsError(func_name=self.__class__.__name__, max_args=1, args=args)\n if len(args) == 1:\n if (\"stream\" in kwargs and kwargs[\"stream\"] is not None) \\\n or (\"schema\" in kwargs and kwargs[\"schema\"] is not None):\n raise error.MultipleInitializationSourceError(class_name=self.__class__.__name__, arg=args[0])\n if isinstance(args[0], self.__class__):\n self.clone_from(args[0])\n else:\n raise error.InvalidArgumentValueError(func_name=self.__class__.__name__, arg=args[0])\n else:\n self.process_source_kwargs(**kwargs)\n if \"oid\" in kwargs:\n self.oid = kwargs[\"oid\"]\n if \"label\" in kwargs:\n self.label = kwargs[\"label\"]", "def __init__(self, cardinality, index=None, operations={}, relations={},\n **kwargs):\n\n self.cardinality = cardinality\n self.index = index\n self.operations = operations\n self.relations = relations\n for attr in kwargs: setattr(self,attr,kwargs[attr])", "def attributes(self) -> Set[str]:\n return set()", "def set(self):\n return AttributeFunctor(self, lambda x, y: y)", "def _yamlSetAttributes(self, attributes):\n extra = dict([(key, value)\n for key, value in attributes.items()\n if key not in self._yamlAttributeKeys])\n self._preservedExtraAttributes.update(extra)\n\n keys = [key for key in attributes.keys()\n if (key in self._yamlAttributeKeys)\n and (key not in self._yamlSpeciallyHandledAttributes)]\n for key in keys:\n setattr(self, key, attributes[key])", "def __init__(self, simulation_attributes):\n for attr in ['locations','dprime_fnc','next_fixation',\n 'threshold', 'num_of_searches']:\n if getattr(simulation_attributes,attr) is None:\n assert False, (\n \"Precondition violation: none attribute in simulation_attributes \"\n + attr\n )\n if not isinstance(simulation_attributes, SimulationAttributes):\n raise TypeError(\n \"The argument isn't an instance of SimulationAttributes class\"\n )\n self.senzory_map = self._locations_to_senzory_map(\n simulation_attributes.locations\n )\n self.number_of_locs = self.senzory_map.shape[0]\n self.dprime_fnc = simulation_attributes.dprime_fnc\n self.dprime_map = generate_dprime_map(self.dprime_fnc,self.senzory_map)\n self.next_fixation = simulation_attributes.next_fixation\n self.threshold = simulation_attributes.threshold\n self.num_of_searches = simulation_attributes.num_of_searches", "def __init__(self, attribs):\n self.__instanced = False\n self.__initAccessor(attribs)\n self.__setValues(attribs)\n self.__instanced = True", "def __init__(self, attributes=None):\n super().__init__(attributes)\n \n # processing parameters\n self.set = _Settings()\n\n # results storage\n self.measure_time = None # store here in case we average FIDs, filled by chain!\n self.frequency_shift = None\n self.phase_0 = None\n self.data = None\n \n if attributes is not None:\n self.inflate(attributes)\n\n self.chain = None", "def attributes(self, attributes):\n\n self._attributes = attributes", "def attributes(self, attributes):\n\n self._attributes = attributes", "def __init__(self) -> None:\n self._cached_datatype: Set[str] = set()", "def test_construct_fieldset_tag(attributes):\n fset = FieldSet(**attributes)\n assert fset.construct() == fieldset.render(attributes)", "def set_attributes(self, attributes: typing.Dict[str, types.AttributeValue]) -> None:\n if not attributes:\n return\n for key, value in attributes.items():\n self.set_attribute(key, value)", "def __init__(self, attrs = None):\n\n if attrs != None:\n self.__dict__.update(attrs)", "def __init__(self):\n self.relation = ''\n self.attributes = []\n self.attribute_types = dict()\n self.attribute_data = dict()\n self.comment = []\n self.data = []\n pass", "def create(data):\n \n return Setlist(\n list_id = data['id'],\n name = data['name'],\n items = data['num_sets'])", "def __init__(self, field = None, value_set = None, discard = False):\r\n super(SetSelectNode, self).__init__()\r\n self.field = field\r\n self.value_set = value_set\r\n self.discard = discard", "def __init__(self, eset, invalidator='invalidate'):\n\n # Initialize the superclass\n super(FlagSetAttr, self).__init__(invalidator)\n\n # Save the EnumSet\n self.eset = eset", "def __init__(self, attrs: Dict[str, Any]) -> None:\n self.attrs = attrs", "def create_data_set(num_attributes):\n data_set = {}\n for index in range(num_attributes):\n size = random.randint(1, 10) # nosec\n key = str(index).encode(\"utf-8\")\n data_set[key] = get_random_bytes(size)\n return data_set", "def __init__(self):\n self.container = set()", "def fill(self, simulation_attributes):\n if not isinstance(simulation_attributes,SimulationAttributes):\n raise TypeError(\"The argument isn't instance of SimulationAttributes\")\n for attribute in ['_locations', '_dprime_fnc', '_next_fixation',\n '_threshold', '_num_of_searches']:\n if getattr(self,attribute) is None:\n setattr(self,attribute,getattr(simulation_attributes,attribute))\n if simulation_attributes.id_name is not None:\n if self._id_name is not None:\n self._id_name += '_' + simulation_attributes.id_name\n else:\n self._id_name = simulation_attributes.id_name", "def __init__(\n self,\n unique_id: str | None,\n name: str,\n source: str,\n attribute: str | None,\n precision: int,\n polynomial: np.poly1d,\n unit_of_measurement: str | None,\n minimum: tuple[float, float] | None,\n maximum: tuple[float, float] | None,\n ) -> None:\n self._source_entity_id = source\n self._precision = precision\n self._source_attribute = attribute\n self._attr_native_unit_of_measurement = unit_of_measurement\n self._poly = polynomial\n self._coefficients = polynomial.coefficients.tolist()\n self._attr_unique_id = unique_id\n self._attr_name = name\n self._minimum = minimum\n self._maximum = maximum", "async def create(\n self, *, header: Optional[headers.RequestHeader] = None\n ) -> CreateResponse:\n\n request = CreateRequest()\n if header is not None:\n request.header = header\n\n return await self._unary_unary(\n \"/atomix.set.SetService/Create\", request, CreateResponse,\n )", "def __init__(self, elements_or_ids=None, uidoc=revit.uidoc):\r\n\r\n BaseObjectWrapper.__init__(self, uidoc.Selection)\r\n self.uidoc = uidoc\r\n\r\n if not elements_or_ids:\r\n # Is List of elements is not provided, uses uidoc selection\r\n elements_or_ids = [e for e in uidoc.Selection.GetElementIds()]\r\n\r\n ElementSet.__init__(self, elements_or_ids, doc=self.uidoc.Document)", "def test_10_import_attribute_set(self):\n with mock_api(magento_attribute_responses):\n import_record(self.session, 'magento.attribute.set',\n self.backend_id, '9')\n\n mag_attr_obj = self.registry('magento.attribute.set')\n cr, uid = self.cr, self.uid\n mag_attr_set_ids = mag_attr_obj.search(cr, uid, [\n ('magento_id', '=', '9'),\n ('backend_id', '=', self.backend_id),\n ])\n self.assertEqual(len(mag_attr_set_ids), 1)\n mag_attr_set = mag_attr_obj.browse(cr, uid, mag_attr_set_ids[0])\n self.assertEqual(mag_attr_set.attribute_set_name, 'Default')", "def __init__ (self, uid=None):\n assert (uid is None) or (self.uid() == uid), 'UniqueIdentifier: ctor %s, actual %s' % (uid, self.uid())\n self.__associatedObjects = set()", "def _set_attributes(self):", "def __init__(self, name, attributes, incident_edges):\n self.name = name # initialize all necessary fields\n self.attributes = attributes\n self.incident_edges = incident_edges", "def __init__(self, name, attributes, incident_edges):\n self.name = name # initialize all necessary fields\n self.attributes = attributes\n self.incident_edges = incident_edges", "def modified(self, **attributes):\n new_obj = deepcopy(self)\n new_obj.__dict__.update(attributes)\n new_obj.initialize()\n return new_obj", "def set():", "def __init__(self, gene_sets: dict, gene_set_names: dict = None):\n self.gene_sets = gene_sets\n\n self.gene_set_names = gene_set_names if gene_set_names else dict()\n\n # initialize the lengths\n self.gene_set_size = dict([(gene_set_id, len(self.gene_sets[gene_set_id]))\n for gene_set_id in self.gene_sets])\n\n # number of curated genes - all\n self.n_curated = deepcopy(self.gene_set_size)\n\n # number of interactors - initialized with 0\n self.n_interactors = dict([(gene_set_id, 0) for gene_set_id in self.gene_sets])\n\n self.interactors = dict()", "def __init__(self, *args):\n this = _libsbml.new_XMLAttributes(*args)\n try: self.this.append(this)\n except: self.this = this", "def __init__(self):\n\n\t\tself.__id = None\n\t\tself.__name = None\n\t\tself.__system_name = None\n\t\tself.__display_value = None\n\t\tself.__shared_type = None\n\t\tself.__category = None\n\t\tself.__sort_by = None\n\t\tself.__sort_order = None\n\t\tself.__favorite = None\n\t\tself.__offline = None\n\t\tself.__default = None\n\t\tself.__system_defined = None\n\t\tself.__criteria = None\n\t\tself.__shared_details = None\n\t\tself.__fields = None\n\t\tself.__key_modified = dict()", "def create_setlike_dataset(py_obj,h_group,name,**kwargs):\n\n # set objects do not support indexing thus determination of item dtype has to\n # be handled specially. Call create_listlike_dataset for proper creation\n # of corresponding dataset\n if not py_obj:\n # dump empty set\n return h_group.create_dataset(\n name, data = list(py_obj), shape = None, dtype = int, **no_compression(kwargs)\n ),()\n set_iter = iter(py_obj)\n first_item = next(set_iter)\n item_dtype = check_iterable_item_type(first_item,set_iter)\n return create_listlike_dataset(\n py_obj, h_group, name, list_len = len(py_obj), item_dtype = item_dtype, **kwargs\n )", "def __init__(self, degree=None, length=None):\n Parent.__init__(self, category=Sets())\n self.degree = degree\n self.length = length", "def add_attributes(self, attributes):\n self.attributes = dict(self.attributes, **attributes)", "def from_sets(cls, set1, set2, universe_size=None):\n if not isinstance(set1, Set):\n set1 = set(set1)\n if not isinstance(set2, Set):\n set2 = set(set2)\n TP = len(set1 & set2)\n FP = len(set2) - TP\n FN = len(set1) - TP\n if universe_size is None:\n TN = 0\n else:\n TN = universe_size - TP - FP - FN\n if TN < 0:\n raise ValueError(\n \"universe_size must be at least as large as set union\")\n return cls(TP, FN, FP, TN)", "def __init__(self, *args, **kwargs):\n self.logger = logging.getLogger(self.__module__ + '.' + self.__class__.__name__)\n\n self.logger.addHandler(logging.NullHandler())\n\n # These are common to all objects\n self._id = None\n self._version = None\n self._links = {}\n self._tags = []\n\n # These are particular to SubjectAttribute objects\n self._aerobics = None\n self._alcohol = None\n self._allergies = None\n self._asthma = None\n self._cad = None\n self._chf = None\n self._comment = None\n self._contact = None\n self._diabetes = None\n self._education = None\n self._family_history = None\n self._father = None\n self._ga_at_delivery = None\n self._gallbladder = None\n self._hyperlipidemia = None\n self._hypertension = None\n self._illicit_drug = None\n self._kidney = None\n self._liver = None\n self._lmp = None\n self._mother = None\n self._occupation = None\n self._osa = None\n self._pancreatitis = None\n self._postmenopausal = None\n self._preg_term = None\n self._pvd = None\n self._rx = None\n self._siblings = None\n self._study = None\n self._subproject = None\n self._survey_id = None\n self._tobacco = None\n\n super(SubjectAttribute, self).__init__(*args, **kwargs)", "def __init__(self, vertices=None, edges=None, attr={}):\n if vertices is None:\n vertices = {}\n self.vertices = vertices\n\n if edges is None:\n edges = {}\n self.edges = edges\n\n self.attr = attr", "def __init__(self, *args, **kwargs):\n\n self.mandatory_attributes = {'keywords': [], 'rules': [], 'desc': \"\",}\n models.AssetCollection.__init__(self, *args, **kwargs)\n self.set_gear_vars()", "def __init__(self):\n self._data = set()", "def __init__(self, attributes: List[AttributeName], g1: G1Element, Y1: Dict[str, G1Element], g2: G2Element, X2: G2Element, Y2: Dict[AttributeName, G2Element]):\n self.attributes = attributes\n self.g1 = g1\n self.Y1 = Y1\n self.g2 = g2\n self.X2 = X2\n self.Y2 = Y2", "def __init__(self):\n self.value_set = set()\n self.values = []", "def __init__(self, identifier):\r\n if self.__class__.all is None:\r\n self.__class__.all = set()\r\n\r\n self.__class__.all.add(self)\r\n\r\n self.identifier = identifier", "def __init__(self, rf: bool=False, atts: Sctids=None, eq: bool=True, ecv: Sctids=None, query=None, _mt_instance=None):\n Set.__init__(self, Quads)\n _Instance.__init__(self, RF2_Quad)\n RF2_Substrate_Common.__init__(self)\n self._val = self\n self._type = Quads\n if _mt_instance:\n self._query = \"SELECT id AS rid, sourceId AS id, typeId, destinationId, gid FROM %s WHERE 0\" % \\\n (RelationshipDB.fname() + \"_ext\")\n self.rf = False\n self._len = 0\n else:\n self._len = None # number of elements\n if query:\n self._query = query\n else:\n self._query = \"SELECT id AS rid, sourceId\" + (\" AS id,\" if not rf else \",\")\n self._query += \" typeId, destinationId\" + (\" AS id,\" if rf else \",\")\n self._query += \" gid FROM %s\" % RelationshipDB.fname() + '_ext'\n self._query += \" WHERE \"\n if atts is not None:\n self._query += ((\"typeId IN (%s)\" % atts.as_sql()) if eq else\n (\"typeId NOT IN (%s)\" % atts.as_sql())) + \" AND \"\n if ecv is not None:\n self._query += ((\"sourceId IN (%s)\" % ecv.as_sql()) if rf else\n (\"destinationId IN (%s)\" % ecv.as_sql())) + \" AND \"\n self._query += \"active=1 AND locked=0\"\n self.rf = rf", "def setA(cls,*items):\n cls.A = sympy.FiniteSet(*items)\n cls.id2items= {}\n num = 0\n for i in cls.A:\n cls.id2items[num] = i\n num = num + 1", "def __init__(__self__, *,\n extra: Optional[pulumi.Input[Mapping[str, pulumi.Input[Sequence[pulumi.Input[str]]]]]] = None,\n groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n non_resource_attributes: Optional[pulumi.Input['NonResourceAttributesArgs']] = None,\n resource_attributes: Optional[pulumi.Input['ResourceAttributesArgs']] = None,\n uid: Optional[pulumi.Input[str]] = None,\n user: Optional[pulumi.Input[str]] = None):\n if extra is not None:\n pulumi.set(__self__, \"extra\", extra)\n if groups is not None:\n pulumi.set(__self__, \"groups\", groups)\n if non_resource_attributes is not None:\n pulumi.set(__self__, \"non_resource_attributes\", non_resource_attributes)\n if resource_attributes is not None:\n pulumi.set(__self__, \"resource_attributes\", resource_attributes)\n if uid is not None:\n pulumi.set(__self__, \"uid\", uid)\n if user is not None:\n pulumi.set(__self__, \"user\", user)", "def init_dset(self, dset_name, dset_shape, dset_attrs):\n dtype = dset_attrs['dtype']\n chunks = dset_attrs['chunks']\n attrs = dset_attrs['attrs']\n name = dset_attrs.get('name', None)\n if name is not None:\n dset_name = name\n\n if chunks:\n chunks = tuple(chunks)\n\n logger.debug('Creating {} with shape: {}, dtype: {}, chunks: {}'\n .format(dset_name, dset_shape, dtype, chunks))\n ds = self._dst_h5.create_dataset(dset_name, shape=dset_shape,\n dtype=dtype, chunks=chunks)\n if attrs:\n for attr, value in attrs.items():\n if attr not in ['freq', 'start']:\n ds.attrs[attr] = value\n\n logger.info('- {} initialized'.format(dset_name))\n\n return ds", "def __init__(self, mRID='', aliasName='', name='', Names=None, DiagramObjects=None, ModelingAuthoritySet=None, *args, **kw_args):\n #: A Model Authority issues mRIDs. Given that each Model Authority has a unique id and this id is part of the mRID, then the mRID is globally unique. Global uniqeness is easily achived by using a UUID for the mRID. It is strongly recommended to do this. For CIMXML data files the mRID is mapped to rdf:ID or rdf:about attributes that identifies CIM object elements.\n self.mRID = mRID\n\n #: The aliasName is free text human readable name of the object alternative to IdentifiedObject.name. It may be non unique and may not correlate to a naming hierarchy. The attribute aliasName is put back because of backwards compatibility between CIM relases. It is however recommended to replace aliasName with the Name class as aliasName is planned for retirement at a future time. This was decided at a joint WG13/14 meeting in Minneapolis 2010-10-06.\n self.aliasName = aliasName\n\n #: The name is any free human readable and possibly non unique text naming the object.\n self.name = name\n\n self._Names = []\n self.Names = [] if Names is None else Names\n\n self._DiagramObjects = []\n self.DiagramObjects = [] if DiagramObjects is None else DiagramObjects\n\n self._ModelingAuthoritySet = None\n self.ModelingAuthoritySet = ModelingAuthoritySet\n\n super(IdentifiedObject, self).__init__(*args, **kw_args)", "def __init__(\n self,\n omit=None,\n normalizers=None,\n keys_by_type=None,\n accept=None,\n reject=None,\n auto_omit=True,\n ):\n\n def to_set(x):\n if x is None:\n return set()\n if isinstance(x, (list, tuple)):\n return set(x)\n return set([x])\n\n def make_match(m):\n return m and {k: to_set(v) for k, v in m.items()}\n\n self.accept, self.reject = make_match(accept), make_match(reject)\n self.omit = to_set(omit)\n if auto_omit and self.accept:\n self.omit.update(k for k, v in self.accept.items() if len(v) == 1)\n\n self.normalizers = normalizers or {}\n if keys_by_type is None:\n self.keys_by_type = None\n else:\n self.keys_by_type = {}\n for k, v in keys_by_type.items():\n if isinstance(v, str):\n v = [v]\n self.keys_by_type[k] = tuple(\n i for i in v if i not in self.omit\n )", "def create_from_file(filename: str):\n if not os.path.isfile(filename):\n raise FileNotFoundError(\"Failed to find GeneSet file \" + filename)\n\n with open(filename, \"rb\") as binary_reader:\n (gene_sets, gene_set_size, gene_set_names, interactors, n_curated, n_interactors) = \\\n pickle.load(binary_reader)\n\n gene_set = GeneSet(dict())\n gene_set.gene_sets = gene_sets\n gene_set.gene_set_size = gene_set_size\n gene_set.gene_set_names = gene_set_names\n gene_set.interactors = interactors\n gene_set.n_curated = n_curated\n gene_set.n_interactors = n_interactors\n\n return gene_set", "def __init__(self, **kwargs):\n # loop over the given kwargs\n for key, value in kwargs.items():\n # treat them like attribute assignments\n setattr(self, key, value)", "def __init__(self, *args, **kwargs):\n\n kwargs.setdefault('unique', True)\n\n super().__init__(*args, **kwargs)", "def __init__(self, *args, **kwargs):\n\n kwargs.setdefault('unique', True)\n\n super().__init__(*args, **kwargs)", "def __init__(self, items={}, strict=True):\n\n self.strict = strict\n self._names = []\n self._items = {}\n\n for name, value in items.iteritems():\n self[name] = value", "def __init__(self, tags=None, **kwargs):\n self._tags = None\n self.tags = tags\n for i in kwargs.keys():\n setattr(self, i, kwargs[i])" ]
[ "0.7283344", "0.6427488", "0.64081055", "0.621952", "0.620814", "0.6190213", "0.6113432", "0.6045844", "0.600017", "0.600017", "0.59091234", "0.59091234", "0.59091234", "0.58970034", "0.584854", "0.5807153", "0.5795997", "0.5795997", "0.579595", "0.57853985", "0.57681274", "0.5765502", "0.5747845", "0.5729245", "0.5681843", "0.5653271", "0.56460977", "0.56094784", "0.56030023", "0.5585468", "0.5547494", "0.552431", "0.551637", "0.55147505", "0.5503475", "0.5478137", "0.54569", "0.54494154", "0.54300463", "0.5417998", "0.54043436", "0.53720105", "0.5367086", "0.5361253", "0.536113", "0.5358201", "0.5346374", "0.53418887", "0.53407454", "0.5322716", "0.53195065", "0.53195065", "0.5319284", "0.5314439", "0.53102106", "0.5262946", "0.52616566", "0.5247777", "0.5245835", "0.5204437", "0.5197062", "0.51916903", "0.5188403", "0.5188137", "0.5187209", "0.51660836", "0.5159674", "0.5149501", "0.51423436", "0.51356614", "0.51299846", "0.51299846", "0.5118979", "0.5116305", "0.51117367", "0.5111121", "0.5110842", "0.5108161", "0.5101927", "0.5099703", "0.50974077", "0.50939035", "0.5085104", "0.50842345", "0.50832", "0.50816077", "0.5081346", "0.50735164", "0.5068004", "0.50677353", "0.5060458", "0.5045128", "0.50421983", "0.5034924", "0.5034372", "0.5031612", "0.50298256", "0.50298256", "0.5024085", "0.50198334" ]
0.7015722
1
Create a new ir.Set from another ir.Set. The new Set inherits source Set's scope, schema item, expression, and, if preserve_scope_ns is set, path_id. If preserve_scope_ns is False, the new Set's path_id will be namespaced with the currently active scope namespace.
def new_set_from_set( ir_set: irast.Set, *, preserve_scope_ns: bool=False, path_id: typing.Optional[irast.PathId]=None, stype: typing.Optional[s_types.Type]=None, ctx: context.ContextLevel) -> irast.Set: if path_id is None: path_id = ir_set.path_id if not preserve_scope_ns: path_id = path_id.merge_namespace(ctx.path_id_namespace) if stype is None: stype = ir_set.stype result = new_set( path_id=path_id, path_scope_id=ir_set.path_scope_id, stype=stype, expr=ir_set.expr, ctx=ctx ) result.rptr = ir_set.rptr return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_set(*, ctx: context.ContextLevel, **kwargs) -> irast.Set:\n ir_set = irast.Set(**kwargs)\n ctx.all_sets.append(ir_set)\n return ir_set", "def create_set(self, setname='new_set', based_on='data file', included=None,\n excluded=None, strings='keep', arrays='masks', replace=None,\n overwrite=False):\n meta = self._meta\n sets = meta['sets']\n # prove setname\n if not isinstance(setname, str):\n raise TypeError(\"'setname' must be a str.\")\n if setname in sets and not overwrite:\n raise KeyError(\"{} is already in `meta['sets'].`\".format(setname))\n # prove based_on\n if not based_on in sets:\n raise KeyError(\"based_on set '{}' is not in meta['sets'].\".format(based_on))\n # prove included\n if not included: included = [var.split('@')[-1] for var in sets[based_on]['items']]\n\n # prove replace\n if not replace: replace = {}\n elif not isinstance(replace, dict):\n raise TypeError(\"'replace' must be a dict.\")\n else:\n for var in list(replace.keys()) + list(replace.values()):\n if var not in included:\n raise KeyError(\"{} is not in 'included'\".format(var))\n\n # prove arrays\n if not arrays in ['masks', 'columns']:\n raise ValueError (\n \"'arrays' must be either 'masks' or 'columns'.\")\n # filter set and create new set\n fset = filtered_set(meta=meta,\n based_on=based_on,\n masks=True if arrays == 'masks' else False,\n included=included,\n excluded=excluded,\n strings=strings)\n\n # if arrays=='both':\n # new_items = []\n # items = fset['items']\n # for item in items:\n # new_items.append(item)\n # if item.split('@')[0]=='masks':\n # for i in meta['masks'][item.split('@')[-1]]['items']:\n # new_items.append(i['source'])\n # fset['items'] = new_items\n\n if replace:\n new_items = fset['items']\n for k, v in list(replace.items()):\n for x, item in enumerate(new_items):\n if v == item.split('@')[-1]: posv, move = x, item\n if k == item.split('@')[-1]: posk = x\n new_items[posk] = move\n new_items.pop(posv)\n fset['items'] = new_items\n\n add = {setname: fset}\n sets.update(add)\n\n return None", "def extend_path(\n source_set: irast.Set,\n ptrcls: s_pointers.Pointer,\n direction: PtrDir=PtrDir.Outbound,\n target: typing.Optional[s_nodes.Node]=None, *,\n ignore_computable: bool=False,\n force_computable: bool=False,\n unnest_fence: bool=False,\n same_computable_scope: bool=False,\n ctx: context.ContextLevel) -> irast.Set:\n\n if ptrcls.is_link_property(ctx.env.schema):\n src_path_id = source_set.path_id.ptr_path()\n else:\n if direction != s_pointers.PointerDirection.Inbound:\n source = ptrcls.get_near_endpoint(ctx.env.schema, direction)\n if not source_set.stype.issubclass(ctx.env.schema, source):\n # Polymorphic link reference\n source_set = class_indirection_set(\n source_set, source, optional=True, ctx=ctx)\n\n src_path_id = source_set.path_id\n\n if target is None:\n target = ptrcls.get_far_endpoint(ctx.env.schema, direction)\n path_id = src_path_id.extend(ptrcls, direction, target,\n ns=ctx.path_id_namespace,\n schema=ctx.env.schema)\n\n target_set = new_set(stype=target, path_id=path_id, ctx=ctx)\n\n ptr = irast.Pointer(\n source=source_set,\n target=target_set,\n ptrcls=ptrcls,\n direction=direction\n )\n\n target_set.rptr = ptr\n\n if (not ignore_computable and _is_computable_ptr(\n ptrcls, force_computable=force_computable, ctx=ctx)):\n target_set = computable_ptr_set(\n ptr, unnest_fence=unnest_fence,\n same_computable_scope=same_computable_scope, ctx=ctx)\n\n return target_set", "def copy(self):\n products_by_target = defaultdict(OrderedSet)\n for key, value in self._products_by_target.items():\n products_by_target[key] = OrderedSet(value)\n return UnionProducts(products_by_target=products_by_target)", "def copy(self):\n r = SubsSet()\n r.rewrites = self.rewrites.copy()\n for expr, var in self.items():\n r[expr] = var\n return r", "def copySet(_session, _set_src, _set_dst, _segment):\n it = _session.create_iterator(_session.sc_constraint_new(sc_constants.CONSTR_3_f_a_a,\n _set_src,\n sc.SC_ARC,\n 0), True)\n \n while not it.is_over():\n# s_el = it.value(2)\n# _idtf = _session.get_idtf(s_el)\n# el = s_el\n# if isSystemId(_idtf):\n# el = _session.create_el(_segment, _session.get_type(s_el))\n createPair(_session, _segment, _set_dst, it.value(2), _session.get_type(it.value(1)))\n it.next()", "def fixSets(namespace):\n\ttry:\n\t\tset\n\texcept:\n\t\timport sets\n\t\tnamespace[\"set\"] = sets.Set\n\t\tnamespace[\"frozenset\"] = sets.ImmutableSet", "def __iter__(self):\n new_set = self._clone()\n new_set.tree.iterator = self.tree.traverse()\n return new_set", "def make_set(node):\n node.parent = node\n node.rank = 0", "def copy(self):\n copy = GeneSet(dict())\n\n copy.gene_sets = deepcopy(self.gene_sets)\n copy.gene_set_names = deepcopy(self.gene_set_names)\n copy.gene_set_size = deepcopy(self.gene_set_size)\n copy.interactors = deepcopy(self.interactors)\n copy.n_curated = deepcopy(self.n_curated)\n copy.n_interactors = deepcopy(self.n_interactors)\n\n return copy", "def copy(self):\n return self.__class__(*self.sets)", "def from_sets(cls, set1, set2, universe_size=None):\n if not isinstance(set1, Set):\n set1 = set(set1)\n if not isinstance(set2, Set):\n set2 = set(set2)\n TP = len(set1 & set2)\n FP = len(set2) - TP\n FN = len(set1) - TP\n if universe_size is None:\n TN = 0\n else:\n TN = universe_size - TP - FP - FN\n if TN < 0:\n raise ValueError(\n \"universe_size must be at least as large as set union\")\n return cls(TP, FN, FP, TN)", "def copy(self) -> 'RangeSet':\n return RangeSet(self)", "def newChemAtomSet(self, **attrlinks):\n return ChemAtomSet(self, **attrlinks)", "def from_node(cls, variable):\n return cls(variable.name, variable.container is ast.ContainerTypes.Set)", "def make_network_set(name, networkUris=[]):\n\n return {\n 'name': name,\n 'type': 'network-set',\n 'nativeNetworkUri': None,\n 'networkUris': networkUris[:],\n 'connectionTemplateUri': None}", "def subset(self, variables=None, from_set=None, inplace=False):\n if not (variables or from_set) or (variables and from_set):\n err = \"Must pass either 'variables' or 'from_set'!\"\n raise ValueError(err)\n subset_ds = self.clone() if not inplace else self\n sets = subset_ds._meta['sets']\n if variables:\n from_set = 'subset'\n subset_ds.create_set(setname='subset', included=variables)\n else:\n if not from_set in sets:\n err = \"'{}' not found in meta 'sets' collection!\"\n raise KeyError(err.format(from_set))\n variables = [v.split('@')[-1] for v in sets[from_set]['items']]\n all_vars = subset_ds.columns() + subset_ds.masks()\n for var in all_vars:\n if not var in variables:\n if not self._is_array_item(var): subset_ds.drop(var)\n sets['data file']['items'] = sets[from_set]['items']\n del subset_ds._meta['sets'][from_set]\n\n if not inplace:\n return subset_ds\n else:\n return None", "def __init__(self, set_ptr=None):\n\n if set_ptr is None:\n self.set = ipset.ipset_new()\n else:\n self.set = set_ptr", "def cast_value_to_set(self, name: str, value: Iterable) -> Set:\n return set(self.get_object_from_name(elem, name) for elem in value)", "def set(self) -> set:\n return set(self)", "def copySetFrom (self, other):\n\n if other.hasLocalTimeString():\n self.localTimeString=other.localTimeString\n self._myHasLocalTimeString=other._myHasLocalTimeString\n self._myLocalTimeStringRequested=other._myLocalTimeStringRequested\n \n if other.hasUtcTimeString():\n self.utcTimeString=other.utcTimeString\n self._myHasUtcTimeString=other._myHasUtcTimeString\n self._myUtcTimeStringRequested=other._myUtcTimeStringRequested\n \n if other.hasDaylightSavingTime():\n self.daylightSavingTime=other.daylightSavingTime\n self._myHasDaylightSavingTime=other._myHasDaylightSavingTime\n self._myDaylightSavingTimeRequested=other._myDaylightSavingTimeRequested\n \n if other.hasEpoch():\n self.epoch=other.epoch\n self._myHasEpoch=other._myHasEpoch\n self._myEpochRequested=other._myEpochRequested\n \n if other.hasUtcOffsetMinutes():\n self.utcOffsetMinutes=other.utcOffsetMinutes\n self._myHasUtcOffsetMinutes=other._myHasUtcOffsetMinutes\n self._myUtcOffsetMinutesRequested=other._myUtcOffsetMinutesRequested", "def to_id_set_entity(self) -> dict:\n id_set_entity = self.dict()\n id_set_entity[\"file_path\"] = str(self.path)\n id_set_entity[\"pack\"] = self.in_pack.object_id # type: ignore[union-attr]\n return id_set_entity", "def __eq__(self, values):\n set_values = [set_value(val) for val in values]\n self.filter.filter_domain.set.elements.extend(set_values)\n return self", "def copy(self):\n return set(self)", "def construct(self):\n\n newSet = {}\n current_index = 0\n\n for key_1, value_1 in self._sets[self._currentSet].items():\n current_index += 1\n for key_2,value_2 in list(self._sets[self._currentSet].items())[current_index:]:\n # join the 2 tuples\n join = key_1 + key_2\n # remove duplicates\n join = tuple(set(join))\n # get combinations\n combined = tuple(combinations(join, self._currentSet+1))\n # sort combination\n combined = tuple(sorted(combined[0]))\n\n # append new combination to dict\n if len(combined) != 0 :\n newSet[combined] = 0\n\n self._currentSet += 1\n # append the new itemset in the sets dict \n self._sets[self._currentSet] = newSet", "def __and__(self, rs):\n revs = {}\n for r in self._revs.keys():\n if r in rs:\n revs[r] = 1\n return RevisionSet(revs)", "def get_id_set(id_set_path: str) -> dict:\n if id_set_path:\n id_set = open_id_set_file(id_set_path)\n else:\n id_set, _, _ = IDSetCreator(print_logs=False).create_id_set()\n return id_set", "def copy(self):\n return IntervalSet(self)", "def clone(self):\n return _libsbml.QualPkgNamespaces_clone(self)", "def add_elements_to_set(s: set, *args) -> set:\n s.update(set(*args))\n return s", "def __sub__(self, rs):\n revs = {}\n for r in self._revs.keys():\n if r not in rs:\n revs[r] = 1\n return RevisionSet(revs)", "def visit_Set(self, node):\n self.generic_visit(node)\n return to_call(to_attribute(self.operator, '__set__'), node.elts)", "def make_iq_set(self, sub=None):\n iq = self.Iq()._set_stanza_values({'type': 'set'})\n if sub != None:\n iq.append(sub)\n return iq", "def _variable_or_iterable_to_set(x):\n if x is None:\n return frozenset([])\n\n if isinstance(x, str):\n return frozenset([x])\n\n if not isinstance(x, Iterable) or not all(isinstance(xx, str) for xx in x):\n raise ValueError(\n f\"{x} is expected to be either a string, set of strings, or an iterable of strings\"\n )\n\n return frozenset(x)", "def createAddressSet(self) -> ghidra.program.model.address.AddressSet:\n ...", "def to_set(elem_sort, *elems):\n res = LambdaSet.get_empty(elem_sort)\n for elem in elems:\n res = res.insert(elem)\n return res", "def __init__(self, field = None, value_set = None, discard = False):\r\n super(SetSelectNode, self).__init__()\r\n self.field = field\r\n self.value_set = value_set\r\n self.discard = discard", "def create_sets(self,FD_SET=[],VA_SET=[]):\n \n self.m.S = Set(initialize=self.sectors, doc='sectors')\n\n if self.EORA is True:\n self.m.rROW = Set(initialize=self.countries+['ROW'],ordered=True, doc='regions including export')\n self.m.R = Set(initialize=self.countries+['ROW'],ordered=True, doc='regions')\n else:\n self.m.rROW = Set(initialize=self.countries,ordered=True, doc='regions including export')\n self.m.R = Set(initialize=self.countries,ordered=True, doc='regions')\n\n if self.EORA is True:\n self.m.fdemand = Set(initialize=['P3h', 'P3n','P3g', 'P51','P52','P53'], doc='Final Demand')\n else:\n self.m.fdemand = Set(initialize=self.fd_cat, doc='Final Demand')\n\n if self.EORA is True:\n self.m.VA = Set(initialize=['VA'], doc='value added')\n else:\n self.m.VA = Set(initialize=VA_SET, doc='value added')", "def add_set(self, repres):\n s = self.set_indx(repres)\n if not s is None:\n raise Exception\n self._data.append(set(repres))", "def bulk_add_to_set(self, set_ids, element_ids):\n if len(set_ids) != len(element_ids):\n raise ValueError\n setpairs = zip(set_ids, element_ids)\n setlist = self._aggregate_set_id_element_pairs(setpairs)\n with self.table.batch_write() as batch:\n for pair in setlist:\n set_id, element_ids = pair\n item = self._get_or_create_item('set', set_id)\n if 'value' not in item.keys() or not isinstance(\n item['value'], set):\n item['value'] = set()\n item['value'].update(element_ids)\n batch.put_item(item)", "def define_set():\n set_1 = set([1, 2, 3])\n print type(set_1)\n print set_1\n\n set_2 = {2, 3, 2}\n print type(set_2)\n # <type 'set'>\n print set_2\n # set([2, 3])\n\n a = set((1, 2, 3, 4))\n b = set([3, 4, 5, 6])\n print a | b # Union\n # {1, 2, 3, 4, 5, 6}\n print a & b # Intersection\n # {3, 4}\n print a < b # Subset\n # False\n print a - b # Difference\n # {1, 2}\n print a ^ b # Symmetric Difference\n # {1, 2, 5, 6}", "def build_set(self, s):\n comma = self.art_type([self.string_type(', ')], baseline=0)\n repr_elems = self.concatenate(s, comma)\n return self.build_container(\n repr_elems, self.left_curly_brace, self.right_curly_brace)", "def copy(self):\n \n \n G = DiGraph()\n G.node_set = copy.deepcopy(self.node_set)\n G.prefix = copy.deepcopy(self.prefix)\n G.suffix = copy.deepcopy(self.suffix)\n G.num_node = copy.deepcopy(self.num_node)\n G.edges = copy.deepcopy(self.edges)\n \n return G", "def _to_rangeset(other: Union[Rangelike, Iterable[Rangelike]]) -> 'RangeSet':\n if not isinstance(other, RangeSet):\n try:\n other = RangeSet(other)\n except ValueError:\n raise ValueError(f\"Cannot convert {type(other)} to a RangeSet\")\n return other", "def partition(self, **kwargs):\n return TaxonSetPartition(self, **kwargs)", "def getOneItemSet(self, transListSet):\n itemSet = set()\n for line in transListSet:\n for item in line:\n itemSet.add(frozenset([item]))\n return itemSet", "def clone_graph(source_graph, target_graph=None, identifier=None):\n if target_graph is None:\n g = rdflib.Graph(identifier=identifier)\n for p, n in source_graph.namespace_manager.namespaces():\n g.namespace_manager.bind(p, n, override=True, replace=True)\n else:\n g = target_graph\n for p, n in source_graph.namespace_manager.namespaces():\n g.namespace_manager.bind(p, n, override=False, replace=False)\n for t in iter(source_graph):\n g.add(t)\n return g", "def xpathNewNodeSet(self):\n ret = libxml2mod.xmlXPathNewNodeSet(self._o)\n if ret is None:raise xpathError('xmlXPathNewNodeSet() failed')\n return xpathObjectRet(ret)", "def _op_copy(self, op: str, other: t.Any) -> InspectableSet[_C]:\n if hasattr(self.__members__, op):\n if isinstance(other, InspectableSet):\n other = other.__members__\n retval = getattr(self.__members__, op)(other)\n if retval is not NotImplemented:\n return InspectableSet(retval)\n return NotImplemented", "def __sub__(self, other):\n if not isinstance(other, (list, Set)):\n raise TypeError(\"only sets can be removed from sets\")\n\n new_set = self._clone()\n\n for element in other:\n new_set.delete(element)\n\n return new_set", "def new_TaskSet(self, taskset):\n if not self.has_TaskSet(taskset.metadata): \n self.add_TaskSet(taskset)\n return self.get_TaskSet(taskset.metadata)", "def union(self, contextset):\n new = ContextSet([])\n new.set_contexts(self._contexts.union(contextset.get_contexts()))\n return new", "def mkset(item):\n if isinstance(item, set):\n return item\n elif item is None:\n return set()\n elif isIterable(item):\n return set(item)\n else:\n return set([item])", "def ipset_same_x():\n return IPSet(x=np.linspace(0, 10, 11), y=np.linspace(-1, 1, 11), x_new=np.linspace(0, 10, 11))", "def clone(self):\n joined_function = lambda: dot_joiner(self.path, self.path_type)\n return self.__class__(self.path, self.configuration, self.converters, self.ignore_converters, joined_function=joined_function)", "def init_dset(self, dset_name, dset_shape, dset_attrs):\n dtype = dset_attrs['dtype']\n chunks = dset_attrs['chunks']\n attrs = dset_attrs['attrs']\n name = dset_attrs.get('name', None)\n if name is not None:\n dset_name = name\n\n if chunks:\n chunks = tuple(chunks)\n\n logger.debug('Creating {} with shape: {}, dtype: {}, chunks: {}'\n .format(dset_name, dset_shape, dtype, chunks))\n ds = self._dst_h5.create_dataset(dset_name, shape=dset_shape,\n dtype=dtype, chunks=chunks)\n if attrs:\n for attr, value in attrs.items():\n if attr not in ['freq', 'start']:\n ds.attrs[attr] = value\n\n logger.info('- {} initialized'.format(dset_name))\n\n return ds", "def symmetric_difference_update(self, rng_set: Union[Rangelike, Iterable[Rangelike]]) -> None:\n # the easiest way to do this is just to do regular symmetric_difference and then copy the result\n rng_set = RangeSet._to_rangeset(rng_set)\n self._ranges = self.symmetric_difference(rng_set)._ranges", "def _as_delimited_set(self, name):\n org_type = self._get_type(name)\n if org_type == 'delimited set': return None\n valid = ['single', 'string']\n if not org_type in valid:\n msg = 'Cannot convert variable {} of type {} to delimited set!'\n raise TypeError(msg.format(name, org_type))\n if org_type == 'single':\n self._meta['columns'][name]['type'] = 'delimited set'\n self._data[name] = self._data[name].apply(\n lambda x: str(int(x)) + ';' if not np.isnan(x) else np.NaN)\n return None\n elif org_type == 'string':\n # we assume we have a delimited set in the string variable\n # delimited with a semicolon agree;disagree;agree\n original_column = self._data[name]\n # we encapsulate each line with !; ;! so that the string\n # replacement works correctly\n if original_column.dropna().tolist()[0][-1] == \";\":\n original_column = \"!;\" + original_column + \"!\"\n else:\n original_column = \"!;\" + original_column + \";!\"\n\n original_column = original_column.replace(pd.np.nan,'')\n original_column = original_column.str.replace(\"; \",\";\")\n original_column = original_column.str.replace(\" ;\",\";\")\n\n all_values_split = [i.split(\";\") for i in original_column]\n flat = [i for sublist in all_values_split for i in sublist]\n trim = [i.strip() for i in flat]\n trim = [i for i in trim if len(i)>0]\n unique = list(set(trim))\n if \"!\" in unique:\n unique.remove(\"!\")\n unique.sort()\n value_map = []\n quantipy_values = []\n for k,item in enumerate(unique):\n value_map.append((k,item))\n quantipy_values.append({'text':{self.meta()['lib']['default text']:item},'value':k})\n original_column = original_column.str.replace(\";\" + re.escape(item) + \";\",\";\" + str(k) + \";\")\n original_column = original_column.str.replace(\"; \",\";\")\n # remove the ;! !; we placed at the beginning and end of each string\n original_column = original_column.str.replace(\"!;\",\"\")\n original_column = original_column.str.replace(\"!\",\"\")\n self._meta['columns'][name]['values'] = quantipy_values\n self._meta['columns'][name]['type'] = 'delimited set'\n self._data[name] = original_column", "def __init__(self, *args, **kwargs):\n TaxonSetLinked.__init__(self,\n taxon_set=kwargs.get(\"taxon_set\", None),\n label=kwargs.get(\"label\", None),\n oid=kwargs.get(\"oid\", None))\n self.taxon_seq_map = CharacterDataMap()\n self.character_types = []\n self.character_subsets = containers.OrderedCaselessDict()\n self.markup_as_sequences = True\n if len(args) > 1:\n raise error.TooManyArgumentsError(func_name=self.__class__.__name__, max_args=1, args=args)\n if len(args) == 1:\n if (\"stream\" in kwargs and kwargs[\"stream\"] is not None) \\\n or (\"schema\" in kwargs and kwargs[\"schema\"] is not None):\n raise error.MultipleInitializationSourceError(class_name=self.__class__.__name__, arg=args[0])\n if isinstance(args[0], self.__class__):\n self.clone_from(args[0])\n else:\n raise error.InvalidArgumentValueError(func_name=self.__class__.__name__, arg=args[0])\n else:\n self.process_source_kwargs(**kwargs)\n if \"oid\" in kwargs:\n self.oid = kwargs[\"oid\"]\n if \"label\" in kwargs:\n self.label = kwargs[\"label\"]", "def __init__(self, set):\n Rule.__init__(self)\n self.__set = set", "def __init__(self, set):\n Rule.__init__(self)\n self.__set = set", "def patch_set(self, *patch_tuples):\n return PatchSet(*patch_tuples)", "def getSet(unique_name):", "def getSet(unique_name):", "def create_setlike_dataset(py_obj,h_group,name,**kwargs):\n\n # set objects do not support indexing thus determination of item dtype has to\n # be handled specially. Call create_listlike_dataset for proper creation\n # of corresponding dataset\n if not py_obj:\n # dump empty set\n return h_group.create_dataset(\n name, data = list(py_obj), shape = None, dtype = int, **no_compression(kwargs)\n ),()\n set_iter = iter(py_obj)\n first_item = next(set_iter)\n item_dtype = check_iterable_item_type(first_item,set_iter)\n return create_listlike_dataset(\n py_obj, h_group, name, list_len = len(py_obj), item_dtype = item_dtype, **kwargs\n )", "def __init__(self):\n self.EntireSet = []", "def __add__(self, other):\n if not isinstance(other, (list, Set)):\n raise TypeError(\"sets can only be joined with sets\")\n new_set = self._clone()\n for element in other:\n new_set._insert(element)\n return new_set", "def clone(self):\n return _libsbml.MultiPkgNamespaces_clone(self)", "def set():", "def computable_ptr_set(\n rptr: irast.Pointer, *,\n unnest_fence: bool=False,\n same_computable_scope: bool=False,\n ctx: context.ContextLevel) -> irast.Set:\n ptrcls = rptr.ptrcls\n source_set = rptr.source\n source_scls = source_set.stype\n # process_view() may generate computable pointer expressions\n # in the form \"self.linkname\". To prevent infinite recursion,\n # self must resolve to the parent type of the view NOT the view\n # type itself. Similarly, when resolving computable link properties\n # make sure that we use rptr.ptrcls.derived_from.\n if source_scls.is_view(ctx.env.schema):\n source_set = new_set_from_set(\n source_set, preserve_scope_ns=True, ctx=ctx)\n source_set.stype = source_scls.peel_view(ctx.env.schema)\n source_set.shape = []\n\n if source_set.rptr is not None:\n schema = ctx.env.schema\n derived_from = source_set.rptr.ptrcls.get_derived_from(schema)\n if (derived_from is not None and\n not derived_from.generic(schema) and\n derived_from.get_derived_from(schema) is not None and\n ptrcls.is_link_property(schema)):\n source_set.rptr.ptrcls = derived_from\n\n try:\n qlexpr, qlctx, inner_source_path_id, path_id_ns = \\\n ctx.source_map[ptrcls]\n except KeyError:\n ptrcls_default = ptrcls.get_default(ctx.env.schema)\n if not ptrcls_default:\n ptrcls_sn = ptrcls.get_shortname(ctx.env.schema)\n raise ValueError(\n f'{ptrcls_sn!r} is not a computable pointer')\n\n if isinstance(ptrcls_default, s_expr.ExpressionText):\n qlexpr = astutils.ensure_qlstmt(qlparser.parse(ptrcls_default))\n else:\n qlexpr = qlast.BaseConstant.from_python(ptrcls_default)\n\n qlctx = None\n inner_source_path_id = None\n path_id_ns = None\n\n if qlctx is None:\n # Schema-level computable, completely detached context\n newctx = ctx.detached\n else:\n newctx = _get_computable_ctx(\n rptr=rptr,\n source=source_set,\n source_scls=source_scls,\n inner_source_path_id=inner_source_path_id,\n path_id_ns=path_id_ns,\n same_scope=same_computable_scope,\n qlctx=qlctx,\n ctx=ctx)\n\n if ptrcls.is_link_property(ctx.env.schema):\n source_path_id = rptr.source.path_id.ptr_path()\n else:\n source_path_id = rptr.target.path_id.src_path()\n\n path_id = source_path_id.extend(\n ptrcls,\n s_pointers.PointerDirection.Outbound,\n ptrcls.get_target(ctx.env.schema),\n ns=ctx.path_id_namespace,\n schema=ctx.env.schema)\n\n with newctx() as subctx:\n subctx.view_scls = ptrcls.get_target(ctx.env.schema)\n subctx.view_rptr = context.ViewRPtr(\n source_scls, ptrcls=ptrcls, rptr=rptr)\n subctx.anchors[qlast.Source] = source_set\n subctx.empty_result_type_hint = ptrcls.get_target(ctx.env.schema)\n\n if isinstance(qlexpr, qlast.Statement) and unnest_fence:\n subctx.stmt_metadata[qlexpr] = context.StatementMetadata(\n is_unnest_fence=True)\n\n comp_ir_set = dispatch.compile(qlexpr, ctx=subctx)\n\n if ptrcls in ctx.pending_cardinality:\n comp_ir_set_copy = copy.copy(comp_ir_set)\n specified_card, source_ctx = ctx.pending_cardinality[ptrcls]\n\n stmtctx.get_pointer_cardinality_later(\n ptrcls=ptrcls, irexpr=comp_ir_set_copy,\n specified_card=specified_card, source_ctx=source_ctx,\n ctx=ctx)\n\n def _check_cardinality(ctx):\n if ptrcls.singular(ctx.env.schema):\n stmtctx.enforce_singleton_now(comp_ir_set_copy, ctx=ctx)\n\n stmtctx.at_stmt_fini(_check_cardinality, ctx=ctx)\n\n comp_ir_set.stype = ptrcls.get_target(ctx.env.schema)\n comp_ir_set.path_id = path_id\n comp_ir_set.rptr = rptr\n\n rptr.target = comp_ir_set\n\n return comp_ir_set", "def parse_set(field, star_range):\n ranges = tuple(parse_range(r, star_range) for r in field.split(\",\"))\n return crontab.Set(ranges)", "def set_of(element: Type) -> SetType:\n return SetType(element)", "def make_set(g, nodes):\n s = Set()\n names = nodes['names']\n for ii,name in enumerate(names):\n \"\"\" \n We will assume node is entirely contained\n in group if they have one atom in common\n \"\"\" \n atoms = mdn.dic2list(nodes[name]['atoms'])\n atom0 = atoms[0]\n if (atom0 in mdn.dic2list(g['atoms'])):\n s.add(ii)\n return s", "def copy(self):\n copy = TemporalGraph(self._start, self._end)\n copy.update(self)\n return copy", "def aspset(self):\n try:\n return pset([x.aspset() for x in self])\n except Exception:\n try:\n return frozenpset([x.aspset() for x in self])\n except Exception:\n pass\n return frozenpset([x for x in self])", "def create_intrusion_set(\n name: str,\n created_by: Optional[stix2.Identity] = None,\n created: Optional[datetime] = None,\n modified: Optional[datetime] = None,\n description: Optional[str] = None,\n aliases: Optional[List[str]] = None,\n first_seen: Optional[datetime] = None,\n last_seen: Optional[datetime] = None,\n goals: Optional[List[str]] = None,\n resource_level: Optional[str] = None,\n primary_motivation: Optional[str] = None,\n secondary_motivations: Optional[List[str]] = None,\n labels: Optional[List[str]] = None,\n confidence: Optional[int] = None,\n external_references: Optional[List[stix2.ExternalReference]] = None,\n object_markings: Optional[List[stix2.MarkingDefinition]] = None,\n) -> stix2.IntrusionSet:\n return stix2.IntrusionSet(\n id=IntrusionSet.generate_id(name),\n created_by_ref=created_by,\n created=created,\n modified=modified,\n name=name,\n description=description,\n aliases=aliases,\n first_seen=first_seen,\n last_seen=last_seen,\n goals=goals,\n resource_level=resource_level,\n primary_motivation=primary_motivation,\n secondary_motivations=secondary_motivations,\n labels=labels,\n confidence=confidence,\n external_references=external_references,\n object_marking_refs=object_markings,\n )", "def owningSet(self) -> ghidra.util.graph.KeyIndexableSet:\n ...", "def frozenset(self) -> frozenset:\n return frozenset(self)", "def __init__(self, name=None):\n self.id = id # Unique identifier for the set\n self._next_id = 0 # Holds unique ids for graphs\n self._graphs = {} # Holds graphs, keyed by unique id\n self.name = name # Holds description of graph", "def create_relation_superset(self):\n return filter(lambda x: x[0] != x[1],\n super().create_relation_superset())", "def ChangeEdgeSet(self, *args):\n return _BRepAlgo.BRepAlgo_DSAccess_ChangeEdgeSet(self, *args)", "def fresh_copy(self):\n return OrderedMultiDiGraph()", "def __init__(self, values=None):\n\n self.dict = {} # each instance of Set has its own dict property\n # which is what we'll use to track memnerships\n if values is not None:\n for value in values:\n self.add(value)", "def set(self, **variables):\n new = copy(self)\n new.__dict__.update(variables)\n return new", "def _build_ID_sets(self):\n raise NotImplementedError", "def load_set_by_id(set_id):\n return get_default_repo().get_set_by_id(set_id)", "def getSets(unique_name=None):", "def add_set(self): # TODO test\n self.set_tree.remove_node(self.adding_node)\n i = len(self.exercise.sets)\n self.exercise.sets.append(Set())\n item = TreeViewLabel(text=\"Set \" + str(i))\n set_node = TreeViewSet(exercise=self.exercise, set_id=i, session=self.session)\n self.set_tree.add_node(item)\n self.set_tree.add_node(set_node, item)\n self.set_tree.add_node(self.adding_node)\n print(\"add set\")", "def copy(self,\n oid=None,\n local_edges=None,\n foreign_edges=None,\n transaction_id=None):\n if oid is None:\n oid = self.oid\n if local_edges is None:\n local_edges = self.local_edges\n if foreign_edges is None:\n foreign_edges = self.foreign_edges\n if transaction_id is None:\n transaction_id = self._transaction_id\n\n return _GraphRow(oid, local_edges, foreign_edges, transaction_id)", "def __rsub__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__rsub__', other)", "def assign_ids(ast):\n def f_either(obj, *child_results):\n id_ = slast.SlAst.id_\n obj.id_ = id_[0]\n id_[0] += 1\n\n # def f_either(obj, *child_results):\n # _id_dict = slast.SlAst._id_dict\n # id_ = slast.SlAst.id_\n # # FIXME: Assign same id to all data predicate calls with the same root/stop-nodes\n # key = str(obj.to_sl_expr())\n # if key in _id_dict:\n # obj.id_ = _id_dict[key]\n # else:\n # obj.id_ = id_[0]\n # _id_dict[key] = id_[0]\n # id_[0] += 1\n\n astutils.fold(f_either, f_either, ast)", "def copy(self):\n new = self.__class__()\n do_not_copy_by_ref = {\"alleles\", \"strains\", \"base_cobra_model\", \"notes\",\n \"annotation\"}\n for attr in self.__dict__:\n if attr not in do_not_copy_by_ref:\n new.__dict__[attr] = self.__dict__[attr]\n new.notes = deepcopy(self.notes)\n new.annotation = deepcopy(self.annotation)\n\n new.alleles = DictList()\n do_not_copy_by_ref = {\"_strains\", \"_model\"}\n for allele in self.alleles:\n new_allele = allele.__class__()\n for attr, value in iteritems(allele.__dict__):\n if attr not in do_not_copy_by_ref:\n new_allele.__dict__[attr] = copy(\n value) if attr == \"formula\" else value\n new_allele._model = new\n new.alleles.append(new_allele)\n\n new.strains = DictList()\n do_not_copy_by_ref = {\"_model\", \"_alleles\", \"_base_cobra_model\"}\n for strain in self.strains:\n new_strain = strain.__class__()\n for attr, value in iteritems(strain.__dict__):\n if attr not in do_not_copy_by_ref:\n new_strain.__dict__[attr] = copy(value)\n new_strain._model = new\n new.strains.append(new_strain)\n # update awareness\n for allele, stoic in iteritems(strain._alleles):\n new_allele = new.alleles.get_by_id(allele.id)\n new_strain._alleles[new_allele] = stoic\n new_allele._strain.add(new_strain)\n # it doesn't make sense to retain the context of a copied model so\n # assign a new empty context\n new._contexts = list()", "def difference(self, rng_set: Union[Rangelike, Iterable[Rangelike]]) -> 'RangeSet':\n new_rng_set = self.copy()\n new_rng_set.difference_update(RangeSet(rng_set))\n return new_rng_set", "def __init__(self, elements_or_ids=None, uidoc=revit.uidoc):\r\n\r\n BaseObjectWrapper.__init__(self, uidoc.Selection)\r\n self.uidoc = uidoc\r\n\r\n if not elements_or_ids:\r\n # Is List of elements is not provided, uses uidoc selection\r\n elements_or_ids = [e for e in uidoc.Selection.GetElementIds()]\r\n\r\n ElementSet.__init__(self, elements_or_ids, doc=self.uidoc.Document)", "def _get_set(self, key, operation, create=False, decode=False):\n return self._get_by_type(key, operation, create, b'set', set(), decode=decode)", "def copy(self):\n return self.__class__(self.name, list(self.gRNAs))", "def duplicate(self):\n\t\treturn Graph(self.vertices[:], self.edges[:])", "def create_sets(self,FD_SET=['FinalD'],VA_SET=['VA']):\n \n self.m.S = Set(initialize=self.sectors, doc='sectors')\n self.m.P = Set(initialize=self.products, doc='sectors')\n self.m.row = Set(initialize=self.products, doc='products')\n self.m.col = Set(initialize=self.sectors+['FinalD'], doc='sectors and final demand')\n \n self.m.rROW = Set(initialize=self.countries,ordered=True, doc='regions including export')\n self.m.R = Set(initialize=self.countries,ordered=True, doc='regions')\n\n self.m.fdemand = Set(initialize=FD_SET, doc='Final Demand')\n\n self.m.VA = Set(initialize=VA_SET, doc='value added')", "def copy(self):\n cls = self.__class__\n new_graph = cls.__new__(cls)\n new_graph._nodes = self._nodes[:]\n new_graph._node_wip = self._node_wip[:]\n new_graph._edges = self._edges[:]\n if self._sorted_nodes:\n new_graph._sorted_nodes = self._sorted_nodes[:]\n else:\n new_graph._sorted_nodes = None\n new_graph.predecessors = {}\n for key, val in self.predecessors.items():\n new_graph.predecessors[key] = self.predecessors[key][:]\n new_graph.successors = {}\n for key, val in self.successors.items():\n new_graph.successors[key] = self.successors[key][:]\n return new_graph", "def import_set(dset, in_stream):\n dset.wipe()\n dict = to_dict(in_stream)\n dict = harmonize(dict)\n dset.dict = dict" ]
[ "0.6440217", "0.58299524", "0.56834817", "0.53686655", "0.5344023", "0.5315017", "0.52834666", "0.5264932", "0.5153018", "0.50790906", "0.5055263", "0.5012567", "0.49638426", "0.49290437", "0.49098164", "0.4880802", "0.48652387", "0.4844571", "0.48155996", "0.48035938", "0.47817656", "0.477516", "0.47622675", "0.47521996", "0.4746269", "0.47249737", "0.46983075", "0.46812952", "0.46733373", "0.46682075", "0.46395242", "0.46312666", "0.46204618", "0.46146473", "0.46067572", "0.4595296", "0.4550714", "0.45506534", "0.45406657", "0.45342878", "0.45218968", "0.45204055", "0.45116603", "0.45098263", "0.4502614", "0.45021406", "0.44919753", "0.44641063", "0.44477436", "0.44392678", "0.4428327", "0.44104052", "0.44084042", "0.4406673", "0.4401632", "0.44007856", "0.43878657", "0.43859968", "0.43782264", "0.4375806", "0.4375806", "0.43565103", "0.4338568", "0.4338568", "0.43363193", "0.43328252", "0.4330082", "0.43298435", "0.43224257", "0.4319166", "0.43186027", "0.4309196", "0.43081716", "0.4305811", "0.42941204", "0.42864016", "0.42842525", "0.42798576", "0.4279729", "0.42383277", "0.42366457", "0.42365825", "0.4234195", "0.42304686", "0.42292875", "0.42241278", "0.42225826", "0.42212874", "0.42180583", "0.42168665", "0.4215049", "0.4212075", "0.42093727", "0.420805", "0.4207993", "0.4206148", "0.4201236", "0.41982222", "0.41975042", "0.41965845" ]
0.85868055
0
Create an ir.Set representing the given EdgeQL path expression.
def compile_path(expr: qlast.Path, *, ctx: context.ContextLevel) -> irast.Set: anchors = ctx.anchors path_tip = None if expr.partial: if ctx.partial_path_prefix is not None: path_tip = ctx.partial_path_prefix else: raise errors.QueryError( 'could not resolve partial path ', context=expr.context) extra_scopes = {} computables = [] path_sets = [] for i, step in enumerate(expr.steps): if isinstance(step, qlast.Source): # 'self' can only appear as the starting path label # syntactically and is a known anchor path_tip = anchors[step.__class__] elif isinstance(step, qlast.Subject): # '__subject__' can only appear as the starting path label # syntactically and is a known anchor path_tip = anchors[step.__class__] elif isinstance(step, qlast.ObjectRef): if i > 0: raise RuntimeError( 'unexpected ObjectRef as a non-first path item') refnode = None if not step.module and step.name not in ctx.aliased_views: # Check if the starting path label is a known anchor refnode = anchors.get(step.name) if refnode is not None: path_tip = new_set_from_set( refnode, preserve_scope_ns=True, ctx=ctx) else: stype = schemactx.get_schema_type( step, item_types=(s_objtypes.ObjectType,), ctx=ctx) if (stype.get_view_type(ctx.env.schema) is not None and stype.get_name(ctx.env.schema) not in ctx.view_nodes): # This is a schema-level view, as opposed to # a WITH-block or inline alias view. stype = stmtctx.declare_view_from_schema(stype, ctx=ctx) path_tip = class_set(stype, ctx=ctx) view_set = ctx.view_sets.get(stype) if view_set is not None: path_tip = new_set_from_set(view_set, ctx=ctx) path_scope = ctx.path_scope_map.get(view_set) extra_scopes[path_tip] = path_scope.copy() view_scls = ctx.class_view_overrides.get( stype.get_name(ctx.env.schema)) if view_scls is not None: path_tip.stype = view_scls elif isinstance(step, qlast.Ptr): # Pointer traversal step ptr_expr = step ptr_target = None direction = (ptr_expr.direction or s_pointers.PointerDirection.Outbound) if ptr_expr.target: # ... link [IS Target] ptr_target = schemactx.get_schema_type( ptr_expr.target.maintype, ctx=ctx) if not isinstance(ptr_target, s_objtypes.ObjectType): raise errors.QueryError( f'invalid type filter operand: ' f'{ptr_target.get_name(ctx.env.schema)} ' f'is not an object type', context=ptr_expr.target.context) ptr_name = ptr_expr.ptr.name if ptr_expr.type == 'property': # Link property reference; the source is the # link immediately preceding this step in the path. source = path_tip.rptr.ptrcls else: source = path_tip.stype with ctx.newscope(fenced=True, temporary=True) as subctx: if isinstance(source, s_abc.Tuple): path_tip = tuple_indirection_set( path_tip, source=source, ptr_name=ptr_name, source_context=step.context, ctx=subctx) else: path_tip = ptr_step_set( path_tip, source=source, ptr_name=ptr_name, direction=direction, ptr_target=ptr_target, ignore_computable=True, source_context=step.context, ctx=subctx) ptrcls = path_tip.rptr.ptrcls if _is_computable_ptr(ptrcls, ctx=ctx): computables.append(path_tip) else: # Arbitrary expression if i > 0: raise RuntimeError( 'unexpected expression as a non-first path item') with ctx.newscope(fenced=True, temporary=True) as subctx: path_tip = ensure_set( dispatch.compile(step, ctx=subctx), ctx=subctx) if path_tip.path_id.is_type_indirection_path(ctx.env.schema): scope_set = path_tip.rptr.source else: scope_set = path_tip extra_scopes[scope_set] = subctx.path_scope for key_path_id in path_tip.path_id.iter_weak_namespace_prefixes(): mapped = ctx.view_map.get(key_path_id) if mapped is not None: path_tip = new_set( path_id=mapped.path_id, stype=path_tip.stype, expr=mapped.expr, rptr=mapped.rptr, ctx=ctx) break path_sets.append(path_tip) path_tip.context = expr.context pathctx.register_set_in_scope(path_tip, ctx=ctx) for ir_set in computables: scope = ctx.path_scope.find_descendant(ir_set.path_id) if scope is None: # The path is already in the scope, no point # in recompiling the computable expression. continue with ctx.new() as subctx: subctx.path_scope = scope comp_ir_set = computable_ptr_set(ir_set.rptr, ctx=subctx) i = path_sets.index(ir_set) if i != len(path_sets) - 1: path_sets[i + 1].rptr.source = comp_ir_set else: path_tip = comp_ir_set path_sets[i] = comp_ir_set for ir_set, scope in extra_scopes.items(): node = ctx.path_scope.find_descendant(ir_set.path_id) if node is None: # The path portion not being a descendant means # that is is already present in the scope above us, # along with the view scope. continue fuse_scope_branch(ir_set, node, scope, ctx=ctx) if ir_set.path_scope_id is None: pathctx.assign_set_scope(ir_set, node, ctx=ctx) return path_tip
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def new_set_from_set(\n ir_set: irast.Set, *,\n preserve_scope_ns: bool=False,\n path_id: typing.Optional[irast.PathId]=None,\n stype: typing.Optional[s_types.Type]=None,\n ctx: context.ContextLevel) -> irast.Set:\n if path_id is None:\n path_id = ir_set.path_id\n if not preserve_scope_ns:\n path_id = path_id.merge_namespace(ctx.path_id_namespace)\n if stype is None:\n stype = ir_set.stype\n result = new_set(\n path_id=path_id,\n path_scope_id=ir_set.path_scope_id,\n stype=stype,\n expr=ir_set.expr,\n ctx=ctx\n )\n result.rptr = ir_set.rptr\n return result", "def new_set(*, ctx: context.ContextLevel, **kwargs) -> irast.Set:\n ir_set = irast.Set(**kwargs)\n ctx.all_sets.append(ir_set)\n return ir_set", "def EdgesSetCreate(TrajectoryEdges):\n listOfEdges = []\n for edgesList in TrajectoryEdges:\n for edge in edgesList:\n listOfEdges.append(edge)\n setOfEdges = list(set(listOfEdges))\n return setOfEdges, listOfEdges", "def visit_Set(self, node):\n self.generic_visit(node)\n return to_call(to_attribute(self.operator, '__set__'), node.elts)", "def parse_set(field, star_range):\n ranges = tuple(parse_range(r, star_range) for r in field.split(\",\"))\n return crontab.Set(ranges)", "def ChangeEdgeSet(self, *args):\n return _BRepAlgo.BRepAlgo_DSAccess_ChangeEdgeSet(self, *args)", "def __init__(self, rf: bool=False, atts: Sctids=None, eq: bool=True, ecv: Sctids=None, query=None, _mt_instance=None):\n Set.__init__(self, Quads)\n _Instance.__init__(self, RF2_Quad)\n RF2_Substrate_Common.__init__(self)\n self._val = self\n self._type = Quads\n if _mt_instance:\n self._query = \"SELECT id AS rid, sourceId AS id, typeId, destinationId, gid FROM %s WHERE 0\" % \\\n (RelationshipDB.fname() + \"_ext\")\n self.rf = False\n self._len = 0\n else:\n self._len = None # number of elements\n if query:\n self._query = query\n else:\n self._query = \"SELECT id AS rid, sourceId\" + (\" AS id,\" if not rf else \",\")\n self._query += \" typeId, destinationId\" + (\" AS id,\" if rf else \",\")\n self._query += \" gid FROM %s\" % RelationshipDB.fname() + '_ext'\n self._query += \" WHERE \"\n if atts is not None:\n self._query += ((\"typeId IN (%s)\" % atts.as_sql()) if eq else\n (\"typeId NOT IN (%s)\" % atts.as_sql())) + \" AND \"\n if ecv is not None:\n self._query += ((\"sourceId IN (%s)\" % ecv.as_sql()) if rf else\n (\"destinationId IN (%s)\" % ecv.as_sql())) + \" AND \"\n self._query += \"active=1 AND locked=0\"\n self.rf = rf", "def path_conditions(self) -> [Exp]:\n raise NotImplementedError()", "def getSet(unique_name):", "def getSet(unique_name):", "def make_set(node):\n node.parent = node\n node.rank = 0", "def edges(self) -> Set[Tuple[int, int]] : \n edges : Set[Tuple[int, int]] = set()\n for node_id in self.nodes: # iterator over id's\n for adj_node in self.nodes[node_id]:\n edge = (node_id, adj_node)\n if self.directed:\n edges.add(edge)\n else:\n if edge[::-1] not in edges: # if reverse edge not in edges...\n edges.add(edge)\n return edges", "def __iter__(self):\n new_set = self._clone()\n new_set.tree.iterator = self.tree.traverse()\n return new_set", "def _dag_dependents(db: Redis[bytes], dag_of: hash_t, op_from: hash_t) -> set[hash_t]:\n return __set_as_hashes(\n db, join(DAG_OPERATIONS, dag_of), join(OPERATIONS, op_from, \"children\")\n )", "def paths_set(self):\n return self._paths_set", "def make_iq_set(self, sub=None):\n iq = self.Iq()._set_stanza_values({'type': 'set'})\n if sub != None:\n iq.append(sub)\n return iq", "def _expand_synset(self, synset: str, cq: str) -> List[str]:\n expanded_variants = set()\n if re.search(synset, cq) is None:\n # given synset does not occur in a CQ\n return [cq] # nothing to expand\n else:\n for synonym in self.synonymes[synset]:\n expanded_variants.add(re.sub(re.escape(synset), synonym, cq))\n return expanded_variants", "def _makeEdges(self):\n self.edges = set()\n\n for i in range(self.size):\n self.edges.add(makePair(self.tour[i - 1], self.tour[i]))", "def build_set(self, s):\n comma = self.art_type([self.string_type(', ')], baseline=0)\n repr_elems = self.concatenate(s, comma)\n return self.build_container(\n repr_elems, self.left_curly_brace, self.right_curly_brace)", "def path_condition(self) -> Exp:\n return EAll(self.path_conditions())", "def make_set(g, nodes):\n s = Set()\n names = nodes['names']\n for ii,name in enumerate(names):\n \"\"\" \n We will assume node is entirely contained\n in group if they have one atom in common\n \"\"\" \n atoms = mdn.dic2list(nodes[name]['atoms'])\n atom0 = atoms[0]\n if (atom0 in mdn.dic2list(g['atoms'])):\n s.add(ii)\n return s", "def add_set(self, elts):\n self.bloom.add_set([x.filter_type for x in elts])", "def path2edge(iterable,graph):\r\n return (graph.es[graph.get_eid(pair[0],pair[1])] for pair in pairwise(iterable))", "def set():", "def get_edges(self):\n return \\\n set({\n edge\n for node in self.nodeset\n for edge in node.get_incident_edges()\n })", "def cast_value_to_set(self, name: str, value: Iterable) -> Set:\n return set(self.get_object_from_name(elem, name) for elem in value)", "def get_extended_by(self, edge):\n return Path(self, edge)", "def make_query(self, qsrc):\n\n g = self.world.as_rdflib_graph()\n\n r = g.query_owlready(qsrc)\n res_list = []\n for elt in r:\n # ensure that here each element is a sequences of lenght 1\n assert len(elt) == 1\n res_list.append(elt[0])\n\n # drop duplicates\n return set(res_list)", "def test_pathop8(self):\n xpb = XPathBuilder()\n xp = (xpb.foo.bar | xpb.foobar).parenthesize() & xpb.action.source\n exp = '(/foo/bar or /foobar) and /action/source'\n self.assertEqual(xp.tostring(), exp)", "def set(self) -> set:\n return set(self)", "def make_set_from_start_endpoint(start_endpoint: Text,\n endpoints: Sequence[Text]):\n if start_endpoint not in endpoints:\n return set()\n start_index = endpoints.index(start_endpoint)\n return set(endpoints[start_index:])", "def get_opcode_set(path):\n opcode_set = set()\n input_files = glob.glob(path + \"\\\\**\\\\*.txt\", recursive=True)\n for input_file in input_files:\n with open(input_file) as file_handler:\n for opcode in [line.rstrip('\\n') for line in file_handler.readlines()]:\n opcode_set.add(opcode)\n return opcode_set", "def build_set(ls, dsets):\n\n def noh(ls, dsets):\n \"\"\"\n This function remove hydrogens from the selection\n \"\"\"\n data_set = build_set(ls[1], dsets)\n\n noh_set = set()\n pred = oechem.OEIsHydrogen()\n\n for idx in data_set:\n atom = system.GetAtom(oechem.OEHasAtomIdx(idx))\n if not pred(atom):\n noh_set.add(idx)\n\n return noh_set\n\n def residues(ls):\n \"\"\"\n This function select residues based on the residue numbers. An example of\n selection can be:\n mask = 'resid A:16 17 19 B:1'\n \"\"\"\n # List residue atom index to be restrained\n res_atom_set = set()\n\n # Dictionary of lists with the chain residues selected to be restrained\n # e.g. {chainA:[res1, res15], chainB:[res19, res17]}\n chain_dic = {'': []}\n\n # Fill out the chain dictionary\n i = 0\n while i < len(ls):\n if ls[i].isdigit():\n chain_dic[''].append(int(ls[i]))\n i += 1\n else:\n try:\n chain_dic[ls[i]].append(int(ls[i + 2]))\n except:\n chain_dic[ls[i]] = []\n chain_dic[ls[i]].append(int(ls[i + 2]))\n i += 3\n\n # Loop over the molecular system to select the atom indexes to be selected\n hv = oechem.OEHierView(system, oechem.OEAssumption_BondedResidue + oechem.OEAssumption_ResPerceived)\n for chain in hv.GetChains():\n chain_id = chain.GetChainID()\n if chain_id not in chain_dic:\n continue\n for frag in chain.GetFragments():\n for hres in frag.GetResidues():\n res_num = hres.GetOEResidue().GetResidueNumber()\n if res_num not in chain_dic[chain_id]:\n continue\n for oe_at in hres.GetAtoms():\n res_atom_set.add(oe_at.GetIdx())\n\n return res_atom_set\n\n def around(dist, ls):\n \"\"\"\n This function select atom not far than the threshold distance from\n the current selection. The threshold distance is in Angstrom\n\n selection can be:\n mask = '5.0 around ligand'\n \"\"\"\n # at = system.GetAtom(oechem.OEHasAtomIdx(idx))\n\n # Atom set selection\n atom_set_around = set()\n\n # Create a OE bit vector mask for each atoms\n bv_around = oechem.OEBitVector(system.GetMaxAtomIdx())\n\n # Set the mask atom\n for at in system.GetAtoms():\n if at.GetIdx() in ls:\n bv_around.SetBitOn(at.GetIdx())\n\n # Predicate\n pred = oechem.OEAtomIdxSelected(bv_around)\n\n # Create the system molecule based on the atom mask\n molecules = oechem.OEMol()\n oechem.OESubsetMol(molecules, system, pred)\n\n # Create the Nearest neighbours\n nn = oechem.OENearestNbrs(system, float(dist))\n\n for nbrs in nn.GetNbrs(molecules):\n for atom in oechem.OEGetResidueAtoms(nbrs.GetBgn()):\n if atom.GetIdx() in ls:\n continue\n atom_set_around.add(atom.GetIdx())\n\n return atom_set_around\n\n # Start Body of the selection function by language\n\n # Terminal Literal return the related set\n if isinstance(ls, str):\n return dsets[ls]\n # Not or Noh\n if len(ls) == 2:\n if ls[0] == 'noh': # Noh case\n return noh(ls, dsets)\n elif ls[0] == 'not': # Not case\n return dsets['system'] - build_set(ls[1], dsets)\n else: # Resid case with one index\n return residues(ls[1])\n\n if len(ls) == 3:\n if ls[1] == 'or': # Or Case (set union)\n return build_set(ls[0], dsets) | build_set(ls[2], dsets)\n elif ls[1] == 'and': # And Case (set intersection)\n return build_set(ls[0], dsets) & build_set(ls[2], dsets)\n elif ls[1] == 'diff': # Diff case (set difference)\n return build_set(ls[0], dsets) - build_set(ls[2], dsets)\n elif ls[1] == 'around': # Around case\n return around(ls[0], build_set(ls[2], dsets))\n else:\n return residues(ls[1:]) # Resid case with one or two indexes\n else:\n if ls[0] == 'resid':\n return residues(ls[1:]) # Resid case with multiple indexes\n else:\n raise ValueError(\"The passed list have too many tokens: {}\".format(ls))", "def createAddressSet(self) -> ghidra.program.model.address.AddressSet:\n ...", "def build_set(x, y):\n # E_w[yy^T]\n y_y_t = la.inv(np.dot(y, y.transpose()))\n h_matrix = np.dot(np.dot(x, y), y_y_t)\n return h_matrix", "def test_pathop7(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar | xpb.foobar & xpb.action.source\n exp = '/foo/bar or /foobar and /action/source'\n self.assertEqual(xp.tostring(), exp)", "def build_keyset(self, field):\n keys = set()\n for r in self.__elements__:\n keys.add(getattr(r, field))\n return list(keys)", "def expand_paths_by_nodes(self, paths):\n paths_formatted = set()\n # Expand each path\n for path in paths:\n if len(path) < 2:\n continue\n expanded_paths = set()\n if self.include_entity:\n relations_for_each_step = [[path[0]]]\n else:\n relations_for_each_step = []\n for index in range(1, len(path)):\n node1 = path[index-1]\n node2 = path[index]\n if (node1, node2) in self.pair_to_relations:\n relations = self.pair_to_relations[(node1, node2)]\n else:\n print(node1, node2)\n relations_for_each_step.append(relations)\n if self.include_entity:\n relations_for_each_step.append([node2])\n expanded_paths.update(list(itertools.product(*relations_for_each_step)))\n paths_formatted.update(expanded_paths)\n return paths_formatted", "def getSets(unique_name=None):", "def get_edges(self):\n return_set = set()\n for outer_index, outer_list in enumerate(self._adjmatrix):\n for inner_index, inner_item in enumerate(outer_list):\n if(inner_item):\n return_set.add(\n (self._name[outer_index],\n self._name[inner_index]))\n return return_set", "def getSets():", "def test_pathop3(self):\n xpb = XPathBuilder()\n # do not confuse with xpath's union op!\n xp = xpb.a.b.c | xpb.foo\n exp = '/a/b/c or /foo'\n self.assertEqual(xp.tostring(), exp)", "def copy(self):\n r = SubsSet()\n r.rewrites = self.rewrites.copy()\n for expr, var in self.items():\n r[expr] = var\n return r", "def get_query_set(self):\n return ArchiverQuerySet(self.model, using=self._db)", "def SetOperator(self, op):\n return _hypre.HypreGMRES_SetOperator(self, op)", "def iden(self, q):\n return self.append(IdGate(), [q], [])", "def SetOperator(self, op):\n return _hypre.HypreEuclid_SetOperator(self, op)", "def test_set_passed_as_iterable():\n tree = Tree([10, 5, 100])\n assert tree.root.value == 10\n assert tree.root.left.value == 5\n assert tree.root.right.value == 100", "def _subset(self, idxs):\n vertices = [self.vertices[i] for i in idxs]\n subset = Line(vertices, properties=self.properties, crs=self.crs)\n return subset", "def __eq__(self, values):\n set_values = [set_value(val) for val in values]\n self.filter.filter_domain.set.elements.extend(set_values)\n return self", "def aspset(self):\n try:\n return pset([x.aspset() for x in self])\n except Exception:\n try:\n return frozenpset([x.aspset() for x in self])\n except Exception:\n pass\n return frozenpset([x for x in self])", "def create_path_query(path, action, start=None):\n supported_actions = ['MATCH', 'CREATE UNIQUE']\n if action.upper() in supported_actions:\n if not start:\n query = 'START r=node:root(root_name = \"ndn\")\\n' +\\\n '%s (r)' % action.upper()\n else:\n query = 'START s=node(%s)\\n' % start + \\\n '%s (s)' % action.upper()\n else:\n raise UnsupportedQueryException(\"unsupported query\")\n\n assert(len(path) % 2 == 0)\n path_len = len(path) / 2\n items = ['-[%s]->(%s)'] * path_len\n query += ''.join(items)\n query = query % tuple(path)\n query += ' \\nRETURN (%s)' % path[-1].split(':')[0]\n\n return query", "def __init__(self):\n self.EntireSet = []", "def __init__(self, set):\n Rule.__init__(self)\n self.__set = set", "def __init__(self, set):\n Rule.__init__(self)\n self.__set = set", "def extend_path(\n source_set: irast.Set,\n ptrcls: s_pointers.Pointer,\n direction: PtrDir=PtrDir.Outbound,\n target: typing.Optional[s_nodes.Node]=None, *,\n ignore_computable: bool=False,\n force_computable: bool=False,\n unnest_fence: bool=False,\n same_computable_scope: bool=False,\n ctx: context.ContextLevel) -> irast.Set:\n\n if ptrcls.is_link_property(ctx.env.schema):\n src_path_id = source_set.path_id.ptr_path()\n else:\n if direction != s_pointers.PointerDirection.Inbound:\n source = ptrcls.get_near_endpoint(ctx.env.schema, direction)\n if not source_set.stype.issubclass(ctx.env.schema, source):\n # Polymorphic link reference\n source_set = class_indirection_set(\n source_set, source, optional=True, ctx=ctx)\n\n src_path_id = source_set.path_id\n\n if target is None:\n target = ptrcls.get_far_endpoint(ctx.env.schema, direction)\n path_id = src_path_id.extend(ptrcls, direction, target,\n ns=ctx.path_id_namespace,\n schema=ctx.env.schema)\n\n target_set = new_set(stype=target, path_id=path_id, ctx=ctx)\n\n ptr = irast.Pointer(\n source=source_set,\n target=target_set,\n ptrcls=ptrcls,\n direction=direction\n )\n\n target_set.rptr = ptr\n\n if (not ignore_computable and _is_computable_ptr(\n ptrcls, force_computable=force_computable, ctx=ctx)):\n target_set = computable_ptr_set(\n ptr, unnest_fence=unnest_fence,\n same_computable_scope=same_computable_scope, ctx=ctx)\n\n return target_set", "def test_pathop10(self):\n xpb = XPathBuilder()\n xp = (xpb.foo & xpb.bar | xpb.baz).parenthesize() & xpb.foobar\n exp = '(/foo and /bar or /baz) and /foobar'\n self.assertEqual(xp.tostring(), exp)", "def edge_set(maze, objects, someset, dictionary):\n # TODO: Write your code here\n #objs = maze.getObjectives()\n objs = objects\n objs_cached = copy.copy(objs)\n edge_set = someset\n # shortest_path_by_edge = dictionary\n length = len(objs)\n for i in range(0, length):\n for j in range(i+1, length):\n path = astar_set(maze, objs[i], objs[j])\n bisect.insort(edge_set, (len(path) - 1, (objs[i], objs[j])))\n # shortest_path_by_edge[(objs[i], objs[j])] = path\n return", "def get_id_set(id_set_path: str) -> dict:\n if id_set_path:\n id_set = open_id_set_file(id_set_path)\n else:\n id_set, _, _ = IDSetCreator(print_logs=False).create_id_set()\n return id_set", "def test_pathop12(self):\n xpb = XPathBuilder()\n # braces not needed\n xp = xpb.foo & (xpb.bar.foo).parenthesize() | xpb.foobar\n exp = '/foo and (/bar/foo) or /foobar'\n self.assertEqual(xp.tostring(), exp)", "def _create_edge_ist(self) -> EdgeList:\r\n return EdgeList(self)", "def createEdge(lines, list):\n res = lines.split('\\\\n')\n mains = res[0].split(' ')\n sid = mains[3]\n sid = sid[4:-1]\n ssource = mains[4]\n ssource = ssource[8:-1]\n starget = mains[5]\n starget = starget[8:-2]\n slabel = ''\n i = 2\n\n while ('key=' in res[i]):\n i = i + 1\n\n if ('EdgeLabel' in res[i + 4]):\n slabels = res[i + 4].split('>')\n slabel = slabels[1]\n slabel = slabel.split('<')[0]\n slabel = umlautHelper(slabel)\n\n source = findInList(ssource, list)\n target = findInList(starget, list)\n\n nline = Edge(sid, source, target)\n nline.setLabel(slabel)\n\n j = i + 1\n while ('Path' in res[j] or 'Point' in res[j]):\n j = j + 1\n\n allarrows = res[j + 1]\n if ('source=\"standard' in allarrows or 'source=\"delta' in allarrows):\n nline.setArrowSource(True)\n if ('target=\"standard' in allarrows or 'target=\"delta' in allarrows):\n nline.setArrowTarget(True)\n\n if (type(source) == Entity and type(target) == Attribute):\n source.addAttribute(target)\n if (type(target) == Entity and type(source) == Attribute):\n target.addAttribute(source)\n if (type(source) == Relation and type(target) == Attribute):\n source.addAttribute(target)\n if (type(target) == Relation and type(source) == Attribute):\n target.addAttribute(source)\n list.append(nline)", "def test_pathop1(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar & xpb.bar.foo\n exp = '/foo/bar and /bar/foo'\n self.assertEqual(xp.tostring(), exp)", "def test_pandas_edge_set_to_cugraph_edge_set():\n dpr = mg.resolver\n pdf = pd.DataFrame({\"src\": (0, 0, 2, 1, 3), \"dst\": (1, 2, 0, 2, 2)})\n x = dpr.wrappers.EdgeSet.PandasEdgeSet(\n pdf, src_label=\"src\", dst_label=\"dst\", is_directed=True\n )\n\n sources = [0, 0, 1, 2, 3]\n destinations = [1, 2, 2, 0, 2]\n cdf = cudf.DataFrame({\"source\": sources, \"destination\": destinations})\n g = cugraph.DiGraph()\n g.from_cudf_edgelist(cdf, source=\"source\", destination=\"destination\")\n intermediate = dpr.wrappers.EdgeSet.CuGraphEdgeSet(g)\n y = dpr.translate(x, CuGraphEdgeSet)\n dpr.assert_equal(y, intermediate)\n assert len(dpr.plan.translate(x, CuGraphEdgeSet)) == 1", "def _build_ID_sets(self):\n raise NotImplementedError", "def test_pathop11(self):\n xpb = XPathBuilder()\n xp = (xpb.foo.log_and(xpb.bar)\n .log_or(xpb.baz).parenthesize()\n .log_and(xpb.foobar))\n exp = '(/foo and /bar or /baz) and /foobar'\n self.assertEqual(xp.tostring(), exp)\n # different notation but same xpath expression (no explicit braces!)\n xp = ((xpb.foo.log_and(xpb.bar.log_or(xpb.baz)))\n .parenthesize().log_and(xpb.foobar))", "def __pow__(self, other):\n if other == 2:\n # cartesian product\n new_set = Set()\n for s1 in self:\n for s2 in self:\n new_set += Set(List([[s1, s2]]))\n return new_set\n raise TypeError(\n f\"{other} must be 2 to compute cartesian product of a set with itself\")", "def get_special_paths(set_path: str, set_value, sp_to_label: Dict, label_to_ship: Dict):\n additions = set()\n if type(set_value) is not dict:\n # If this path is already labelled as special, then check that the ship matched with it does fit\n if set_path in sp_to_label:\n if label_to_ship[sp_to_label[set_path]].check_fit(set_value):\n return additions\n\n # If this is something a ship covers, add it as a special path\n for ship in label_to_ship.values():\n if ship.check_fit(set_value):\n additions.add( (set_path, ship.get_label()) )\n\n # If this is a dict, recursively build the set of additional paths\n else:\n for k, v in set_value.items():\n assert check_valid_key_name(k), \"Invalid Key: {}\".format(k)\n if set_path != Path.rootPath():\n child_path = \"{}.{}\".format(set_path, k)\n else:\n child_path = \".{}\".format(k)\n child_add = get_special_paths(child_path, v, sp_to_label, label_to_ship)\n additions = additions.union(child_add)\n\n return additions", "def filter_paths(self, paths):\n formatted_paths = set()\n for path in paths:\n formatted_path = []\n if self.include_entity:\n if len(path) == 3:\n continue\n formatted_path.append(self.idx_to_node[path[0]].get_name())\n for rdx in range(0, (len(path)-1)/2):\n formatted_path.append(self.idx_to_relation[path[rdx*2+1]])\n formatted_path.append(self.idx_to_node[path[rdx*2+2]].get_name())\n else:\n if len(path) == 1:\n continue\n for rel_idx in path:\n formatted_path.append(self.idx_to_relation[rel_idx])\n formatted_paths.add(tuple(formatted_path))\n return formatted_paths", "def as_relational(self, symbol):\n return And(*[set.as_relational(symbol) for set in self.args])", "def dstruc_from_edge_set(self, edge_set):\n\t\tself.edge_dict = {}\n\t\tself.vertex_dict = {}\n\t\tedge_list = edge_set[2:-2].split('},{')\n\t\tfor edge in edge_list:\n\t\t\tvertex_list = edge.split(',')\n\t\t\tvertex_list = map(int, vertex_list)\n\t\t\tvertex_list = (vertex_list[0], vertex_list[1])\n\t\t\tself.edge_dict[vertex_list] = 1\n\t\t\tvertex1 = vertex_list[0]\n\t\t\tvertex2 = vertex_list[1]\n\t\t\tself.vertex_pool.add(vertex1)\n\t\t\tself.vertex_pool.add(vertex2)\n\t\t\tif vertex1 not in self.vertex_dict:\n\t\t\t\tself.vertex_dict[vertex1] = 1\n\t\t\tif vertex2 not in self.vertex_dict:\n\t\t\t\tself.vertex_dict[vertex2] = 1", "def __and__(self, rs):\n revs = {}\n for r in self._revs.keys():\n if r in rs:\n revs[r] = 1\n return RevisionSet(revs)", "def _decode_v1(self, line: str) -> Set[str]:\n\n result = set()\n\n local_line = line.strip()\n\n if local_line.startswith(\"||\") and (\n local_line.endswith(\"^\") or local_line.endswith(\"$\")\n ):\n local_line = local_line.replace(\"||\", \"\", 1)\n\n if local_line.endswith(\"^\"):\n local_line = \"\".join(local_line.rsplit(\"^\", 1))\n elif local_line.endswith(\"$\"):\n local_line = \"\".join(local_line.rsplit(\"$\", 1))\n\n result.update(self._decode_multiple_subject(local_line))\n\n return {x for x in result if \".\" in x}", "def _eidset():\n\n dbrosters = set()\n with sqlite3.connect(DB) as db:\n cursor = db.cursor()\n cursor.execute(\"SELECT eid FROM players\")\n rows = cursor.fetchall()\n for row in rows:\n dbrosters.add(int(row[0]))\n # return set.\n return dbrosters", "def find_all_path(self, start_vertex, end_vertex, path=[]):\n\n graph = self.__graph_dict\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n if start_vertex not in graph:\n return []\n\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_path(vertex, end_vertex,path)\n for p in extended_paths:\n paths.append(p)\n return paths", "def from_iterable (cls, iterable):\n\n iterable = tuple (iterable)\n return Path (iterable, 0, len (iterable))", "def parseSet(cmds):\n if len(cmds) != 0:\n first = str.strip(cmds[0])\n if first[0] == 'w':\n pass\n elif first[0] == 'r':\n pass\n else:\n parseExpr(first)\n parseSet(cmds[1:])", "def visited_set(self):\n return visited_set(self._dtrajs)", "def get_seen_statements_from(path: str) -> set:\n return set([int(s) for s in replace_multiple_chars(path, ['/', '-', '?'], ' ').split() if s.isdigit()])", "def getVertexInfluenceSet(self):\n return _osgAnimation.RigGeometry_getVertexInfluenceSet(self)", "def __init__(self, field = None, value_set = None, discard = False):\r\n super(SetSelectNode, self).__init__()\r\n self.field = field\r\n self.value_set = value_set\r\n self.discard = discard", "def power_set(self):\n if self._is_empty():\n return Set([Set()])\n\n copy_set = self._clone()\n\n element = copy_set.__from__()\n\n power_set = copy_set.power_set()\n\n result = Set()\n\n for item in power_set:\n result += Set([Set([element]) + item]) + Set([item])\n return result", "def to_set(self) -> Set[Tuple[int, int]]:\n return set(self.steps)", "def test_build_run_id_query(self):\n run_ids = [\"bar\"]\n query = build_run_id_query(run_ids)\n # (AND: (('filemetadata__metadata__runId', 'bar'))\n expected_query = Q(metadata__runId=\"bar\")\n self.assertEqual(query, expected_query)\n\n # run them through set then back to list to ensure ordering for testing\n run_ids = set([\"bar\", \"baz\"])\n query = build_run_id_query(run_ids)\n # query.__dict__\n # {'children': [('filemetadata__metadata__runId', 'baz'), ('filemetadata__metadata__runId', 'bar')], 'connector': 'OR', 'negated': False}\n\n # order not guaranteed due to usage of set inside build_run_id_query\n expected_query = Q(filemetadata__metadata__runId=\"bar\") | Q(filemetadata__metadata__runId=\"baz\")\n # (OR: ('filemetadata__metadata__runId', 'baz'), ('filemetadata__metadata__runId', 'bar'))\n\n self.assertTrue((\"metadata__runId\", \"baz\") in query.__dict__[\"children\"])\n self.assertTrue((\"metadata__runId\", \"bar\") in query.__dict__[\"children\"])\n self.assertTrue(query.__dict__[\"connector\"] == \"OR\")\n self.assertTrue(query.__dict__[\"negated\"] == False)", "def convert_tree_as_set_to_adjacencies(tree):\n edges = {}\n for i, j in tree:\n if i not in edges:\n edges[i] = [j]\n else:\n edges[i].append(j)\n if j not in edges:\n edges[j] = [i]\n else:\n edges[j].append(i)\n return edges", "def expand (keys):\r\n finalkeys = set()\r\n\r\n for key in keys:\r\n returnkeyset = set()\r\n\r\n if SLASH in key:\r\n has_tags = True\r\n tag_tail = key.split(SLASH)[1]\r\n key = key.split(SLASH)[0]\r\n else:\r\n has_tags = False\r\n tag_tail = EMPTYCHAR\r\n if ATSIGN in key or PERIOD not in key or PERIOD+BLANK in key or key[0].isnumeric():\r\n all_keys = [key]\r\n else:\r\n key_parts = key.split(PERIOD)\r\n if len(key_parts)==2:\r\n all_keys = [key_parts[1],\r\n key_parts[0]+BLANK+key_parts[1],\r\n key_parts[0][0]+BLANK+key_parts[1]]\r\n else:\r\n abbreviated = EMPTYCHAR\r\n for x in key_parts[0:-1]:\r\n abbreviated += x[0].upper()\r\n\r\n\r\n all_keys = [key_parts[-1],\r\n key_parts[0]+BLANK+key_parts[-1],\r\n BLANK.join(key_parts),\r\n abbreviated+BLANK+key_parts[-1]]\r\n for k in all_keys:\r\n returnkeyset.add(k+SLASH*has_tags+tag_tail)\r\n\r\n if len(returnkeyset) > 1:\r\n if input('ADD '+', '.join(returnkeyset)+' AS EQUIVALENCES?') in YESTERMS:\r\n\r\n display.noteprint(('ADDING EQUIVALENTS',', '.join(returnkeyset)))\r\n self.default_dict['equivalences'].new_class(list(returnkeyset))\r\n finalkeys.add(key.replace('.',' '))\r\n else:\r\n finalkeys.update(returnkeyset)\r\n else:\r\n finalkeys.update(returnkeyset)\r\n\r\n return finalkeys", "def split_equation_set_v1(eqn_set):\n\n # used for tiebreaker of priority key\n nEq = len(eqn_set.eqns)\n\n solve_sets = set()\n underconstrained_set = EqnSet()\n\n # keep track of what has been visited\n unique_eqn_combos = set()\n unsolved_eqns = set(eqn_set.eqns)\n\n # Initialize priority queue with the equations in the input set\n pq = [EqnSet().add(eqn) for eqn in eqn_set.eqns]\n pq.sort(key=lambda p: p.key(nEq))\n\n while pq:\n eqn_set = pq.pop()\n\n if eqn_set.is_solvable():\n # set this equation set as solved\n solve_sets.add(eqn_set)\n eqn_set.set_solved()\n unsolved_eqns.difference_update(eqn_set.eqns)\n\n # discard this equation set from all sets in the pq\n for p in pq:\n p.discard(eqn_set)\n\n # delete any empty eqn sets and re-sort the pq\n pq = filter(lambda p: not p.is_empty(), pq)\n pq.sort(key=lambda p: p.key(nEq))\n\n unique_eqn_combos = set(frozenset(eqs.eqns | eqs.vars) for eqs in pq)\n\n else:\n # add the frontier to the pq\n for eqs in eqn_set.frontier():\n eqn_combo = frozenset(eqs.eqns | eqs.vars)\n if eqn_combo not in unique_eqn_combos:\n unique_eqn_combos.add(eqn_combo)\n pq.add(eqs)\n\n pq.sort(key=lambda p: p.key(nEq))\n\n # create eqn set(s) of underconstrained systems\n underconstrained_set = EqnSet()\n for eqn in unsolved_eqns:\n underconstrained_set.add(eqn)\n\n underconstrained_set.set_solved()\n\n return solve_sets, underconstrained_set", "def str_to_productionset(string):\r\n return strlist_to_production_set(string.split('\\n'))", "def path_to_edges(path):\n return list((u, v) for u, v in zip(path[:-1], path[1:]))", "def transitive_closure(self, term_id_list):\n\n edges = set()\n visited = set(term_id_list)\n\n for term_id in term_id_list:\n current_term = term_id\n\n while current_term != 'KEGG Pathway':\n next_term = self.term2parent[current_term]\n edges.add((current_term, 'is_a', next_term))\n visited.add(current_term)\n current_term = next_term\n\n return visited, edges", "def derive_path(self, path):\n next_node = self\n for identifier in path:\n next_node = next_node.derive_one(identifier)\n\n return next_node", "def test_path8(self):\n xpb = XPathBuilder()\n xp_1 = xpb.foo.baz\n xp_2 = xpb.bar.abc.join(xp_1)\n exp = '/bar/abc/foo/baz'\n self.assertEqual(xp_1, xp_2)\n self.assertEqual(xp_2.tostring(), exp)", "def split_equation_set_v2(eqn_set):\n\n # used for tiebreaker of priority key\n nEq = len(eqn_set.eqns)\n\n solve_sets = set()\n underconstrained_set = EqnSet()\n\n # keep track of what has been visited\n unique_eqn_combos = set()\n unsolved_eqns = set(eqn_set.eqns)\n\n # Initialize priority queue with the equations in the input set\n pq = [EqnSet().add(eqn) for eqn in eqn_set.eqns]\n pq.sort(key=lambda p: p.key(nEq))\n\n while pq:\n # try to solve as many eqn sets as possible\n eqn_set = None\n while pq and pq[-1].is_solvable():\n eqn_set = pq.pop()\n\n # set this equation set as solved\n solve_sets.add(eqn_set)\n eqn_set.set_solved()\n unsolved_eqns.difference_update(eqn_set.eqns)\n\n # discard this equation set from all sets in the pq\n for p in pq:\n p.discard(eqn_set)\n\n # then sort the pq if solves happened, otherwise add to the pq\n if eqn_set:\n # delete any empty eqn sets and re-sort the pq\n pq = filter(lambda p: not p.is_empty(), pq)\n pq.sort(key=lambda p: p.key(nEq))\n\n unique_eqn_combos = set(frozenset(eqs.eqns | eqs.vars) for eqs in pq)\n\n else:\n eqn_set = pq.pop()\n\n # add the frontier to the pq\n for eqs in eqn_set.frontier():\n eqn_combo = frozenset(eqs.eqns | eqs.vars)\n if eqn_combo not in unique_eqn_combos:\n unique_eqn_combos.add(eqn_combo)\n pq.add(eqs)\n\n pq.sort(key=lambda p: p.key(nEq))\n\n # create eqn set(s) of underconstrained systems\n underconstrained_set = EqnSet()\n for eqn in unsolved_eqns:\n underconstrained_set.add(eqn)\n\n underconstrained_set.set_solved()\n\n return solve_sets, underconstrained_set", "def convert_formula(formula):\n return [set(clause) for clause in formula]", "def get_nodeset(self):\n return set(self.nodeset) # return the nodeset", "def query_set(self):\n return self._query_tag_bitmask.masked_select_from_list(\n self.api_workflow_client.filenames_on_server\n )", "def GetEdgesQuad(self):\n\n p = self.InferPolynomialDegree()\n\n # DO NOT COMPUTE IF ALREADY COMPUTED\n if isinstance(self.all_edges,np.ndarray):\n if self.all_edges.shape[0] > 1:\n # IF LINEAR VERSION IS COMPUTED, DO COMPUTE HIGHER VERSION\n if self.all_edges.shape[1]==2 and p > 1:\n pass\n else:\n return self.all_edges\n\n node_arranger = NodeArrangementQuad(p-1)[0]\n\n # GET ALL EDGES FROM THE ELEMENT CONNECTIVITY\n edges = np.concatenate((self.elements[:,node_arranger[0,:]],self.elements[:,node_arranger[1,:]],\n self.elements[:,node_arranger[2,:]],self.elements[:,node_arranger[3,:]]),axis=0).astype(np.uint64)\n\n # REMOVE DUPLICATES\n edges, idx = unique2d(edges,consider_sort=True,order=False,return_index=True)\n\n edge_to_element = np.zeros((edges.shape[0],2),np.int64)\n edge_to_element[:,0] = idx % self.elements.shape[0]\n edge_to_element[:,1] = idx // self.elements.shape[0]\n\n self.edge_to_element = edge_to_element\n\n # DO NOT SET all_edges IF THE CALLER FUNCTION IS GetBoundaryEdgesHex\n import inspect\n curframe = inspect.currentframe()\n calframe = inspect.getouterframes(curframe, 2)[1][3]\n\n if calframe != \"GetBoundaryEdgesHex\":\n self.all_edges = edges\n\n return edges", "def sat_apply_assignment(self, assignment):\n # YOUR CODE HERE\n o = set()\n print(s)\n print({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})\n for x in s.clauses:\n if not isinstance(x.simplify(assignment), bool):\n o.add(x.simplify(assignment))\n print(\"ASSIGN SET\", o)\n\n return SAT(o)\n # return SAT({x.simplify(assignment) for x in self.clauses if not isinstance(x.simplify(assignment), bool)})", "def xpath_as_xml(self, expr=''):\n results = []\n for result in self.xpath(expr):\n if result:\n results.append(result.toxml())\n \n return results", "def load_rep_word(et_rel_path):\n rep_word_set = set()\n\n with open(et_rel_path) as f:\n for edge in f:\n entry = edge.strip().split()\n rep_word_set.add(entry[1])\n\n return rep_word_set" ]
[ "0.5790698", "0.5771053", "0.5408858", "0.5098475", "0.50746995", "0.49623165", "0.4907437", "0.48602664", "0.48467776", "0.48467776", "0.48342997", "0.4825272", "0.48136362", "0.4735757", "0.47344112", "0.47056362", "0.47041735", "0.46564844", "0.46460888", "0.4642975", "0.46206197", "0.46159732", "0.46038043", "0.4586158", "0.45848596", "0.4583798", "0.45677322", "0.4562677", "0.45515785", "0.45423552", "0.45332563", "0.45316088", "0.45193663", "0.45155537", "0.45143923", "0.45132774", "0.45122856", "0.4504969", "0.44996712", "0.44969845", "0.4486553", "0.44681317", "0.44672546", "0.44461608", "0.44359374", "0.44284707", "0.4428444", "0.44284424", "0.4422148", "0.4420771", "0.44152924", "0.44151515", "0.44110885", "0.44061223", "0.44061223", "0.44056457", "0.43995994", "0.43977606", "0.43941", "0.43851867", "0.43787572", "0.43690196", "0.43566242", "0.43546823", "0.43540826", "0.4347318", "0.43450662", "0.43414003", "0.43395856", "0.43392763", "0.43295828", "0.43285218", "0.4317298", "0.43161634", "0.4315666", "0.43104166", "0.43020666", "0.42999545", "0.4297152", "0.42933413", "0.42890126", "0.4285418", "0.4285286", "0.42812264", "0.42776522", "0.42725053", "0.42650837", "0.42640096", "0.42624402", "0.4260529", "0.4258935", "0.42564285", "0.42529982", "0.4246861", "0.42456058", "0.42410222", "0.4231742", "0.4230255", "0.4229587", "0.42293975" ]
0.5753197
2
Return a Set node representing the new path tip.
def extend_path( source_set: irast.Set, ptrcls: s_pointers.Pointer, direction: PtrDir=PtrDir.Outbound, target: typing.Optional[s_nodes.Node]=None, *, ignore_computable: bool=False, force_computable: bool=False, unnest_fence: bool=False, same_computable_scope: bool=False, ctx: context.ContextLevel) -> irast.Set: if ptrcls.is_link_property(ctx.env.schema): src_path_id = source_set.path_id.ptr_path() else: if direction != s_pointers.PointerDirection.Inbound: source = ptrcls.get_near_endpoint(ctx.env.schema, direction) if not source_set.stype.issubclass(ctx.env.schema, source): # Polymorphic link reference source_set = class_indirection_set( source_set, source, optional=True, ctx=ctx) src_path_id = source_set.path_id if target is None: target = ptrcls.get_far_endpoint(ctx.env.schema, direction) path_id = src_path_id.extend(ptrcls, direction, target, ns=ctx.path_id_namespace, schema=ctx.env.schema) target_set = new_set(stype=target, path_id=path_id, ctx=ctx) ptr = irast.Pointer( source=source_set, target=target_set, ptrcls=ptrcls, direction=direction ) target_set.rptr = ptr if (not ignore_computable and _is_computable_ptr( ptrcls, force_computable=force_computable, ctx=ctx)): target_set = computable_ptr_set( ptr, unnest_fence=unnest_fence, same_computable_scope=same_computable_scope, ctx=ctx) return target_set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def xpathNewNodeSet(self):\n ret = libxml2mod.xmlXPathNewNodeSet(self._o)\n if ret is None:raise xpathError('xmlXPathNewNodeSet() failed')\n return xpathObjectRet(ret)", "def make_set(node):\n node.parent = node\n node.rank = 0", "def visit_Set(self, node):\n self.generic_visit(node)\n return to_call(to_attribute(self.operator, '__set__'), node.elts)", "def NodesSetCreate(TrajectoryPoints):\n listOfNodes = []\n for dictChord in TrajectoryPoints:\n for node in dictChord.values():\n listOfNodes.append(node)\n setOfNodes = list(set(listOfNodes))\n return setOfNodes", "def __repr__(self):\n return 'TreeSet([{0}])'.format(','.join(str(item) for item in self))", "def nodeset(self):\n return self._nodeset", "def ipset_one_x_new():\n x = np.linspace(-3, 3, 11)\n return IPSet(x=x, y=x ** 2, x_new=1)", "def create_path_new(self):\n\n \n first_lasts = []\n first_lasts.append([0,0])\n matrices = []\n matrices.append([[[0 for i in range(self.graph.cols)] for i in range(self.graph.rows)],0])\n edge_sets = []\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n cur_line = self.line_num\n nodes_visited = []\n nodes_visited.append([])\n normalized = normalize_simple(self.graph.lines[cur_line])\n matrices_index = 0\n prev_coords = (-1,-1)\n prev_gps = (-1.0,-1.0)\n while normalized[0] == self.trip_id:\n lat = normalized[1]\n lon = normalized[2]\n coords = self.graph.gps_to_coords(lat,lon)\n node = self.graph.coords_to_node(coords[0],coords[1])\n\n if prev_coords == (-1,-1) and coords[0] != -1:\n first_lasts[matrices_index][0] = node\n\n if coords[0] == -1 and prev_coords[0] != -1:\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n\n if prev_coords != (-1,-1) and coords[0] != -1 and coords != prev_coords:\n edge_num = self.graph.edge_num(prev_coords[0],prev_coords[1],coords[0],coords[1])\n if edge_num == -1:\n new_edges = self.find_edges((lat,lon),prev_gps)\n for add_edge in new_edges:\n edge_sets[matrices_index][add_edge] = 1\n else:\n edge_sets[matrices_index][edge_num] = 1\n\n if coords[0] == -1:\n matrices.append([[[0 for i in range(self.graph.cols)] for i in range(self.graph.rows)],0])\n first_lasts.append([0,0])\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n nodes_visited.append([])\n matrices_index += 1\n \n elif coords[0] < self.graph.rows and coords[1] < self.graph.cols and not matrices[matrices_index][0][coords[0]][coords[1]]:\n matrices[matrices_index][1] += 1\n matrices[matrices_index][0][coords[0]][coords[1]] = 1\n nodes_visited[matrices_index].append(coords)\n\n prev_coords = coords\n\n cur_line += 1\n if cur_line == len(self.graph.lines):\n break\n normalized = normalize_simple(self.graph.lines[cur_line])\n prev_gps = (lat,lon)\n\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n self.next_line = cur_line\n best_index = 0\n best_score = 0\n for matrix_index in range(len(matrices)):\n if matrices[matrix_index][1] > best_score:\n best_score = matrices[matrix_index][1]\n best_index = matrix_index\n\n #for coords in nodes_visited[best_index]:\n # self.graph.node_visit(self.trip_id,coords)\n\n #if self.trip_id not in self.graph.trip_id2line_num:\n # self.graph.first_last2trip_ids[tuple(first_lasts[best_index])].append(self.trip_id)\n\n return matrices[best_index][0],edge_sets[best_index],first_lasts[best_index]", "def __str__(self):\n # string representation includes values of all inner fields\n return \"Nodeset: \" + \"\\n\".join([node.__str__() for node in self.nodeset]) + \"\\n\"", "def add_set(self): # TODO test\n self.set_tree.remove_node(self.adding_node)\n i = len(self.exercise.sets)\n self.exercise.sets.append(Set())\n item = TreeViewLabel(text=\"Set \" + str(i))\n set_node = TreeViewSet(exercise=self.exercise, set_id=i, session=self.session)\n self.set_tree.add_node(item)\n self.set_tree.add_node(set_node, item)\n self.set_tree.add_node(self.adding_node)\n print(\"add set\")", "def xpointerNewLocationSetNodes(self, end):\n if end is None: end__o = None\n else: end__o = end._o\n ret = libxml2mod.xmlXPtrNewLocationSetNodes(self._o, end__o)\n if ret is None:raise treeError('xmlXPtrNewLocationSetNodes() failed')\n return xpathObjectRet(ret)", "def ipset():\n return IPSet(x=np.linspace(0, 10, 11), y=np.random.randn(11), x_new=np.linspace(3, 9, 20))", "def __init__(self):\n\n self.nodes = set()", "def ipset_below():\n return IPSet(x=np.linspace(0, 10, 11), y=np.linspace(-1, 1, 11), x_new=np.linspace(-2, 2, 5))", "def get_nodeset(self):\n return set(self.nodeset) # return the nodeset", "def XCAFDoc_GraphNode_Set(*args):\n return _XCAFDoc.XCAFDoc_GraphNode_Set(*args)", "def create_tip_index(tree):\r\n if hasattr(tree, '_tip_index'):\r\n return\r\n else:\r\n tree._tip_index = {n.Name: n for n in tree.tips()}", "def Set(*args):\n return _XCAFDoc.XCAFDoc_GraphNode_Set(*args)", "def differentNodesForNode(ntupleSet,nodeList,verbose=False):\n nodesPerNode = dict(zip(nodeList,[[] for n in range(len(nodeList))]))\n for ntuple in ntupleSet:\n for nodeInTuple in ntuple:\n nodesPerNode[nodeInTuple].extend(ntuple)\n \n for a,v in nodesPerNode.iteritems():\n nodesPerNode[a] = set(v)\n \n return nodesPerNode", "def differentNTuplesForNode(ntupleSet,nodeList,verbose=False):\n ntuplesPerNode = dict(zip(nodeList,[[] for n in range(len(nodeList))]))\n for ntuple in ntupleSet:\n ntuple.sort()\n joinedTuple = \"\".join(ntuple)\n for nodeInTuple in ntuple:\n ntuplesPerNode[nodeInTuple].append(joinedTuple)\n \n for a,v in ntuplesPerNode.iteritems():\n ntuplesPerNode[a] = set(v)\n \n return ntuplesPerNode", "def ipset_above():\n return IPSet(x=np.linspace(0, 10, 11), y=np.linspace(-1, 1, 11), x_new=np.linspace(8, 12, 5))", "def initial_nodes_to_merge(tree):\r\n to_process = set([])\r\n for n in tree.tips():\r\n sibs_are_tips = [s.istip() for s in n.siblings()]\r\n if all(sibs_are_tips):\r\n to_process.add(n.Parent)\r\n return to_process", "def ipset_same_x():\n return IPSet(x=np.linspace(0, 10, 11), y=np.linspace(-1, 1, 11), x_new=np.linspace(0, 10, 11))", "def root(self, node):\n\n if self.set[node] == node:\n return node\n\n self.set[node] = self.root(self.set[node])\n return self.set[node]", "def get_settemp(self):\n return self.settemp", "def createGraphPoint(self, cls, newId):\n def getUniqueId(container, base):\n ids = set(container.objectIds())\n new = base\n i = 2\n while new in ids:\n new = '%s%s' % (base, i)\n i += 1\n return new\n newId = getUniqueId(self.graphPoints, newId)\n gp = cls(newId)\n # Set sequence\n if gp.isThreshold:\n gp.sequence = -1\n else:\n gp.sequence = len(self.graphPoints())\n # Set legend for graph points on multigraph reports\n if self.report() and hasattr(gp, 'legend'):\n # For MultiGraphReports we use a fancier legend\n # to differentiate when you have multiple devices/graphpoints\n # on a single graph\n gp.legend = gp.DEFAULT_MULTIGRAPH_LEGEND\n self.graphPoints._setObject(gp.id, gp)\n gp = self.graphPoints._getOb(gp.id)\n if gp.sequence == -1:\n self.manage_resequenceGraphPoints()\n return gp", "def get_newick(self):\n tree_list = self.get_cluster_elements_labels\n newick = \"(\"\n \n \n newick = _get_newick_intermediate(tree_list,newick,False)\n\n newick = newick[0] + \");\"\n\n return newick", "def get_new_node(self):\n return TrieNode()", "def _get_set_dot1p(self):\n return self.__set_dot1p", "def _get_set_dot1p(self):\n return self.__set_dot1p", "def _get_set_dot1p(self):\n return self.__set_dot1p", "def _get_set_dot1p(self):\n return self.__set_dot1p", "def _get_set_dot1p(self):\n return self.__set_dot1p", "def _get_set_dot1p(self):\n return self.__set_dot1p", "def get_special_paths(set_path: str, set_value, sp_to_label: Dict, label_to_ship: Dict):\n additions = set()\n if type(set_value) is not dict:\n # If this path is already labelled as special, then check that the ship matched with it does fit\n if set_path in sp_to_label:\n if label_to_ship[sp_to_label[set_path]].check_fit(set_value):\n return additions\n\n # If this is something a ship covers, add it as a special path\n for ship in label_to_ship.values():\n if ship.check_fit(set_value):\n additions.add( (set_path, ship.get_label()) )\n\n # If this is a dict, recursively build the set of additional paths\n else:\n for k, v in set_value.items():\n assert check_valid_key_name(k), \"Invalid Key: {}\".format(k)\n if set_path != Path.rootPath():\n child_path = \"{}.{}\".format(set_path, k)\n else:\n child_path = \".{}\".format(k)\n child_add = get_special_paths(child_path, v, sp_to_label, label_to_ship)\n additions = additions.union(child_add)\n\n return additions", "def new_TaskSet(self, taskset):\n if not self.has_TaskSet(taskset.metadata): \n self.add_TaskSet(taskset)\n return self.get_TaskSet(taskset.metadata)", "def add_node(self, n):\r\n keys = self.d.keys()\r\n #check for node in graph\r\n if n not in keys:\r\n self.d.update({str(n): set()})", "def copy(self):\n return PathPoint(self.species.new_species(), deepcopy(self.constraints))", "def plot_path(self, current_path):\n full_path = current_path.copy()\n full_path.insert(0, self.root)\n\n path = Marker()\n id = 1\n\n # edge between nodes\n path = Marker()\n path.header.frame_id = \"map\"\n path.header.stamp = rospy.get_rostime()\n path.ns = \"markers\"\n path.id = 1\n path.type = path.LINE_STRIP\n path.action = path.ADD\n path.scale.x = self.rviz_tuning_plt\n path.color.a = 1.0\n path.color.r = 0.0\n path.color.g = 1.0\n path.color.b = 0.0\n\n path.lifetime = rospy.Duration()\n path.pose.orientation.w = 1.0\n\n for node in full_path:\n p1 = Point()\n p1.x = node.x\n p1.y = node.y\n p1.z = 0.03\n path.points.append(p1)\n\n self.pub_path.publish(path)", "def addPath(newList, refnode):\n ele = inkex.etree.Element('{http://www.w3.org/2000/svg}path')\n ele.set('d', simplepath.formatPath(newList))\n refnode.xpath('..')[0].append(ele)\n return ele", "def belongs_to_set(self, node, set_nodes):\r\n rep = set_nodes[0]\r\n if rep.op.as_while != node.op.as_while:\r\n return False\r\n\r\n nsteps = node.inputs[0]\r\n try:\r\n nsteps = int(get_scalar_constant_value(nsteps))\r\n except tensor.NotScalarConstantError:\r\n pass\r\n\r\n rep_nsteps = rep.inputs[0]\r\n try:\r\n rep_nsteps = int(get_scalar_constant_value(rep_nsteps))\r\n except tensor.NotScalarConstantError:\r\n pass\r\n\r\n # Check to see if it is an input of a different node\r\n can_add = True\r\n for nd in set_nodes:\r\n if find_up(node, nd) or find_up(nd, node):\r\n can_add = False\r\n\r\n can_add = can_add and (node.op.truncate_gradient ==\r\n rep.op.truncate_gradient)\r\n can_add = can_add and (node.op.mode == rep.op.mode)\r\n if not node.op.as_while:\r\n return nsteps == rep_nsteps and can_add\r\n cond = node.op.outputs[-1]\r\n rep_cond = rep.op.outputs[-1]\r\n same_cond = scan_utils.equal_computations([cond], [rep_cond],\r\n node.op.inputs,\r\n rep.op.inputs)\r\n return same_cond and (nsteps == rep_nsteps) and can_add", "def get_new_nodes(self):\n\n return self._new_nodes", "def addManualTip(self,A):\n #obsolete?\n profbox()\n self.fiducialNode = slicer.mrmlScene.CreateNodeByClass('vtkMRMLAnnotationFiducialNode')\n self.fiducialNode.Initialize(slicer.mrmlScene)\n self.fiducialNode.SetName('tip')\n self.fiducialNode.SetFiducialCoordinates(A)\n fd=self.fiducialNode.GetDisplayNode()\n fd.SetVisibility(1)\n fd.SetColor([0,1,0])", "def set_new_loc(self, line):\n return Gumtree.gumtree.setNewLoc(line)", "def show_path(self):\n\n node = self.goal\n\n while node.parent:\n node.parent.value = 1\n node = node.parent", "def visited_set(self):\n return visited_set(self._dtrajs)", "def get_tips(self):\n result = VGroup()\n if hasattr(self, \"tip\"):\n result.add(self.tip)\n if hasattr(self, \"start_tip\"):\n result.add(self.start_tip)\n return result", "def node(self):\n return self._changeset.get('node', [])", "def paths_set(self):\n return self._paths_set", "def EdgesSetCreate(TrajectoryEdges):\n listOfEdges = []\n for edgesList in TrajectoryEdges:\n for edge in edgesList:\n listOfEdges.append(edge)\n setOfEdges = list(set(listOfEdges))\n return setOfEdges, listOfEdges", "def put_node_set(self, node_set_id, node_set_node_list):\n ierr = exolib.py_expns(self.exoid, node_set_id,\n node_set_node_list + self._o)\n if ierr:\n raise ExodusIIWriterError(\"Error putting node set\")", "def get_shortest_path(self, other: \"Cell\") -> set:\n result = {self, other}\n if other in self.neighbours:\n return result\n # Recurse through shortest paths from neighbours to other.\n paths = []\n for neighbour in self.neighbours:\n paths.append(result.union(neighbour.get_shortest_path(other)))\n return min(paths)", "def new_taxon_set(ntax=10, label_func=None):\n taxon_set = TaxonSet()\n if label_func is None:\n label_idx_length = int(math.log(ntax, 10)) + 1\n label_template = \"T%%0%dd\" % (label_idx_length)\n label_func = lambda x: label_template % x\n for i in range(ntax):\n taxon_set.new_taxon(label=label_func(i+1))\n return taxon_set", "def ipset_num_x_y_different():\n return IPSet(x=np.linspace(0, 10, 11), y=np.linspace(-1, 1, 3), x_new=np.linspace(2, 5, 4))", "def new_node(name):\n\n return name, []", "def constructShortestPath(self):\r\n sp = []\r\n v = self.t\r\n while self.preds[v]: # is not None\r\n sp.append(v)\r\n v = self.preds[v]\r\n sp.append(self.s) # source\r\n sp.reverse() # to have the path from source to dest and not t to s\r\n return sp, self.graph.getCoords(sp)", "def select_patch(self, pset, name):\n new_pset = {}\n local = []\n for p in pset.pop(name, []):\n if p.path:\n new_pset[p.pop()] = [p]\n else:\n local = p.slist\n return (local, new_pset)", "def addManualTip(self, A):\r\n # obsolete?\r\n profbox()\r\n self.fiducialNode = slicer.mrmlScene.CreateNodeByClass('vtkMRMLAnnotationFiducialNode')\r\n self.fiducialNode.Initialize(slicer.mrmlScene)\r\n self.fiducialNode.SetName('tip')\r\n self.fiducialNode.SetFiducialCoordinates(A)\r\n fd = self.fiducialNode.GetDisplayNode()\r\n fd.SetVisibility(1)\r\n fd.SetColor([0, 1, 0])", "def test_create_tip_index(self):\r\n t = DndParser(\"((a,b)c,(d,e)f)g;\")\r\n qiime.simsam.create_tip_index(t)\r\n self.assertEqual({'a':t.getNodeMatchingName('a'),\r\n 'b':t.getNodeMatchingName('b'),\r\n 'd':t.getNodeMatchingName('d'),\r\n 'e':t.getNodeMatchingName('e')}, t._tip_index)", "def test_install_set_new(self):\n expected = copy.deepcopy(test_xdata)\n newtext = lxml.etree.SubElement(expected, \"NewText\")\n newtext.text = \"new content\"\n self._install([lxml.etree.Element(\"Set\", path=\"Test/NewText/#text\",\n value=\"new content\")],\n expected)", "def nodes(self):\n return set(self.values())", "def set_node_tooltip_bypass(node_names, new_tooltip, network=None, base_url=DEFAULT_BASE_URL):\n res = set_node_property_bypass(node_names, new_tooltip, 'NODE_TOOLTIP', network=network, base_url=base_url)\n return res", "def add_tip(self, tip_length=None, at_start=False):\n tip = self.create_tip(tip_length, at_start)\n self.reset_endpoints_based_on_tip(tip, at_start)\n self.asign_tip_attr(tip, at_start)\n self.add(tip)\n return self", "def see(self, cut):\n newptree = PTree()\n newptree._root = self._root.see(cut)\n return newptree", "def get_set(css_class_name, set_num=0):\r\n if not root:\r\n return None\r\n item = root.xpath('//dl[@class=\"%s\"]/dd' % css_class_name)\r\n if len(item) <= set_num:\r\n return None\r\n sets_node = item[set_num]\r\n item_set = set([ut.unicodeanyway(node.text).replace('\\n', '')\r\n for node\r\n in sets_node.xpath('.//a') if node.text is not None])\r\n \r\n \r\n \r\n return item_set", "def duplicateNode(self):\n\n try:\n self.__duplicate(nuke.selectedNode())\n except:\n nuke.message(\"Error - no node selected\")", "def get_set(self, which_set):\n return (getattr(self, 'x_' + which_set),\n getattr(self, 'y_' + which_set))", "def __repr__(self: 'StarTree') -> str:\n return 'StarTree({})'.format(repr(self.children[0]))", "def setGoalNode(self, newGoal):\r\n\t\tself.goalNode = newGoal", "def __call__(self):\n new_node = Node()\n new_node.op = self\n return new_node", "def create_tip(self, tip_length=None, at_start=False):\n tip = self.get_unpositioned_tip(tip_length)\n self.position_tip(tip, at_start)\n return tip", "def nodesInTuples(setOfTuples):\n nodesList = []\n for tupl in setOfTuples:\n nodesList.extend(tupl)\n return list(set(nodesList))", "def createSplineWarpNodeMI():\n return gt()", "def __str__(self):\n s = ''\n for node in self.nodes:\n s += '\\n\\n'+str(node)+'\\n\\t'\n edges = node.getChildren()\n keys = edges.keys()\n keys.sort()\n for key in keys:\n bounds = edges[key].getSuffix()\n s += str(edges[key])+' '\n for i in xrange(bounds[0], bounds[1]):\n s += self.target[i]\n s += '\\n\\t'\n return s", "def get_setpoint(self):\n svc = \"urn:upnp-org:serviceId:TemperatureSetpoint1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n\n return self.get_variable(svc, \"CurrentSetpoint\")", "def update_tip_names(tree, taxdict):\n\n list_nodes = []\n uniprot_mapping = pd.DataFrame(columns=['taxid', 'name', 'uniprot'])\n\n counter = 0\n for node in tree.traverse(\"postorder\"):\n current_name = node.name\n\n if 'NMR' in current_name:\n new_name = \"Heterocephalus_glaber\"\n node.name = new_name\n list_nodes.append(node.name)\n taxid = \"NA\" \n uniprot_mapping.loc[counter] = (taxid, new_name, \"UP000006813\")\n counter += 1\n\n elif 'Nfurzer' in current_name:\n new_name = \"Nothobranchius_furzeri\"\n node.name = new_name\n list_nodes.append(node.name)\n taxid = \"NA\"\n uniprot_mapping.loc[counter] = (taxid, new_name, new_name)\n counter += 1\n\n elif 'TAX' in current_name:\n taxid = current_name[3:].split('x')[0]\n new_name = taxdict.get(taxid, taxid) \n node.name = new_name \n list_nodes.append(node.name)\n unip = get_uniprot(taxid, accession)\n uniprot_mapping.loc[counter] = (taxid, new_name, unip)\n counter += 1\n\n\n \n tree.write(outfile=\"../../data/tree/tree.nw\")\n\n nodes_df = pd.DataFrame(list_nodes)\n nodes_df.to_csv(\"../../data/tree/tree_list_nodes.txt\", index=False, header=False)\n\n uniprot_mapping.to_csv(\"../../data/tree/tree_uniprot.txt\", sep='\\t', index=False, header=True)\n\n return tree, list_nodes", "def create_nodes(self):", "def get_fan_set_point(self):\n return self.__fan_set_point", "def to_set(self) -> Set[Tuple[int, int]]:\n return set(self.steps)", "def ipset_x_repeating():\n x = np.linspace(0, 10, 11)\n x[5] = x[4]\n return IPSet(x=x, y=np.linspace(-1, 1, 11), x_new=np.linspace(2, 5, 7))", "def __str__(self):\n return self.root_node", "def setnx(self, key, value):\n return self.set(key, value, nx=True)", "def __str__(self):\n return (\"Element set with name {0} containing elements with the \"\n \"following ids {1}\".format(self.name, self.ids))", "def get_nodes(self):\n return_set = set()\n for value in self._name:\n return_set.add(value)\n return return_set", "def reroot(tree, tipnames, tmp_nodename=\"TEMPORARY_ROOT_NODE_NAME\"):\n node = tree.lowestCommonAncestor(tipnames)\n\n # make a new node that sits inbetween LCA and parent\n parent = node.Parent\n parent.removeNode(node)\n node.Parent = None\n new_node = parent.__class__()\n new_node.Name = tmp_nodename\n\n if hasattr(new_node, 'Length') and new_node.Length:\n new_node.Length = node.Length / 2.0\n node.Length = node.Length / 2.0\n\n # add node back to tree and reconnect LCA\n parent.append(new_node)\n new_node.append(node)\n\n # root at the new node, unset its temporary name\n new_tree = tree.rootedAt(tmp_nodename)\n new_root = new_tree.getNodeMatchingName(tmp_nodename)\n new_root.Name = None\n\n # remove the stupid edge names that rootedAt -> unrootedDeepcopy adds in\n for n in new_tree.nontips():\n if n.Name and n.Name.startswith('edge.'):\n edge_split = n.Name.split('.')\n \n # verify the edge name is of the form we expect\n try:\n tmp = int(edge_split[1])\n n.Name = None\n except:\n pass\n\n # collapse single descendents if they exist\n new_tree.prune()\n\n return new_tree", "def post_nodeset(body): # noqa: E501\n if connexion.request.is_json:\n body = NodeSet.from_dict(connexion.request.get_json()) # noqa: E501\n return NodesetController.post_nodeset(body)", "def __call__(self):\r\n new_node = Node()\r\n new_node.op = self\r\n return new_node", "def pset(self):\n return plist([pset(x) for x in self], root=self.__root__)", "def ChooseNodePath(self):\n nodePath = ChooseContainerNode()\n if nodePath:\n self.nodePathEdit.setText(nodePath)", "def nodes(self):\n\n return list(set(self._graph.keys() + [x for x in itertools.chain.from_iterable(self._graph.values())]))", "def add_nodes(list_of_ids, G, singleGraph):\r\n road_set=set()\r\n for id, pm, dir, coords, hwy in list_of_ids:\r\n id_dict=dict(lat=coords[0], lon=coords[1], dire=dir, mile=pm, road=hwy)\r\n G.add_node(id, id_dict)\r\n singleGraph.add_node(id)\r\n singleGraph.position[id]=(coords[1], coords[0])\r\n road_set.add(int(hwy))\r\n print 'road set: ', road_set\r\n return road_set, G, singleGraph", "def clone(self):\n return XLNodeID(self._node_id)", "def set_node_positions(self):", "def __init__(self):\n self.EntireSet = []", "def produce_duplicate_disconnected_node(self):\n # retain the original name and attributes, but clear all outgoing and incoming edges\n return \\\n Node(\n self.get_name(),\n dict(self.get_attributes()),\n set()\n )", "def produce_duplicate_disconnected_node(self):\n # retain the original name and attributes, but clear all outgoing and incoming edges\n return \\\n Node(\n self.get_name(),\n dict(self.get_attributes()),\n set()\n )", "def Nodelocation(self, Tract_pop, Tractx, Tracty, longitude, latitude, cnum):\n import annealsimulation\n \n self.latl, self.lonl = [], []\n \n while(len(self.latl) != self.nodenum):\n lat = np.random.randint(len(self.Geoy) - 1)\n lon = np.random.randint(len(self.Geox) - 1)\n if(lat not in self.latl or lon not in self.lonl):\n self.latl.append(lat)\n self.lonl.append(lon) \n \n self.latl, self.lonl = np.array(self.latl), np.array(self.lonl)\n \n self.demandlat, self.demandlon = self.latl[self.demandseries], self.lonl[self.demandseries]\n self.tranlat, self.tranlon = self.latl[self.transeries], self.lonl[self.transeries]\n self.supplylat, self.supplylon = self.latl[self.supplyseries], self.lonl[self.supplyseries]\n \n self.demandloc = np.stack((self.demandlat, self.demandlon)).transpose()\n self.tranloc = np.stack((self.tranlat, self.tranlon)).transpose()\n self.supplyloc = np.stack((self.supplylat, self.supplylon)).transpose()\n \n #Demand node\n Geox1 = sf.FeatureScaling(self.Geox)\n Geoy1 = sf.FeatureScaling(self.Geoy)\n Tract_pop1 = sf.FeatureScaling(Tract_pop)\n Tractx1 = sf.FeatureScaling(Tractx)\n Tracty1 = sf.FeatureScaling(Tracty)\n \n self.demandloc, self.demandc, self.popuassign = ans.anneal2(self.demandloc, 'Population', Geox1, Geoy1, Tract_pop1, Tractx1, Tracty1, Tract_pop, cnum)\n self.demandy1 = Geoy1[self.demandloc[:, 0]]\n self.demandx1 = Geox1[self.demandloc[:, 1]]\n self.demandy = self.Geoy[self.demandloc[:, 0]]\n self.demandx = self.Geox[self.demandloc[:, 1]]\n #Transmission node\n self.tranloc, self.tranc, temp = ans.anneal2(self.tranloc, 'Facility', Geox1, Geoy1, Tract_pop1, self.demandx1, self.demandy1, Tract_pop, cnum)\n self.trany1 = Geoy1[self.tranloc[:, 0]]\n self.tranx1 = Geox1[self.tranloc[:, 1]]\n self.trany = self.Geoy[self.tranloc[:, 0]]\n self.tranx = self.Geox[self.tranloc[:, 1]]\n\n #Supply node\n self.supplyloc, self.supplyc, temp = ans.anneal2(self.supplyloc, 'Facility', Geox1, Geoy1, Tract_pop1, self.tranx1, self.trany1, Tract_pop, cnum)\n self.supplyy1 = Geoy1[self.supplyloc[:, 0]]\n self.supplyx1 = Geox1[self.supplyloc[:, 1]] \n self.supplyy = self.Geoy[self.supplyloc[:, 0]]\n self.supplyx = self.Geox[self.supplyloc[:, 1]]\n \n ##Coordinates of nodes\n self.y = np.concatenate((self.supplyy, self.trany, self.demandy))\n self.x = np.concatenate((self.supplyx, self.tranx, self.demandx))\n \n ##Latitudes and longitudes of nodes\n self.demandlatitude, self.demandlongitude = latitude[self.demandloc[:, 0]], longitude[self.demandloc[:, 1]]\n self.tranlatitude, self.tranlongitude = latitude[self.tranloc[:, 0]], longitude[self.tranloc[:, 1]]\n self.supplylatitude, self.supplylongitude = latitude[self.supplyloc[:, 0]], longitude[self.supplyloc[:, 1]]\n \n self.latitude = np.concatenate((self.supplylatitude, self.tranlatitude, self.demandlatitude))\n self.longitude = np.concatenate((self.supplylongitude, self.tranlongitude, self.demandlongitude))", "def possible(self):\n return [tuple(path) for path in nx.all_shortest_paths(self._gpm.Graph, source=self.source, target=self.target)]", "def svn_client_diff_summarize_t_node_kind_set(svn_client_diff_summarize_t_self, svn_node_kind_t_node_kind): # real signature unknown; restored from __doc__\n pass", "def dset_h5path(dset_node):\n h5path = [dset_node.attrib['name']]\n for a in dset_node.iterancestors():\n if a.tag == DAP4_GROUP:\n h5path.append(a.attrib['name'])\n elif a.tag == DAP4_DATASET:\n h5path.append('/')\n else:\n raise ValueError('Unexpected XML element: {}'.format(a.tag))\n h5path.reverse()\n return str(PurePosixPath(*h5path))" ]
[ "0.62189937", "0.5731682", "0.5512014", "0.55116665", "0.5465013", "0.5457394", "0.5436884", "0.5389255", "0.53820276", "0.5347002", "0.52735144", "0.52636284", "0.5250073", "0.5236279", "0.51947445", "0.51905155", "0.5184115", "0.5166615", "0.5112369", "0.510633", "0.5062499", "0.50607634", "0.5045453", "0.50444025", "0.50401163", "0.5039246", "0.50226706", "0.49769354", "0.49731883", "0.49731883", "0.49731883", "0.49731883", "0.49731883", "0.49731883", "0.492238", "0.49093485", "0.49023917", "0.48881856", "0.48752612", "0.484891", "0.48417723", "0.48314902", "0.48293608", "0.4828976", "0.48111096", "0.4802781", "0.4793345", "0.47898984", "0.4786304", "0.4753983", "0.47538874", "0.47508073", "0.47384638", "0.4737162", "0.47248143", "0.47149327", "0.4704042", "0.47032714", "0.4694689", "0.46857786", "0.46790886", "0.46764186", "0.46751112", "0.46702224", "0.46621954", "0.466174", "0.4644656", "0.46334797", "0.46225008", "0.46087393", "0.46036363", "0.459297", "0.45847222", "0.4584014", "0.4574784", "0.45732108", "0.45711735", "0.457101", "0.45700067", "0.4569138", "0.45605645", "0.4560325", "0.4559893", "0.4550407", "0.45457512", "0.45443112", "0.45388514", "0.45381972", "0.45349234", "0.45325378", "0.4525134", "0.45186478", "0.4514534", "0.4508602", "0.45060343", "0.45060343", "0.45052528", "0.4504109", "0.44996214", "0.4499293" ]
0.46827704
60
Return ir.Set for a pointer defined as a computable.
def computable_ptr_set( rptr: irast.Pointer, *, unnest_fence: bool=False, same_computable_scope: bool=False, ctx: context.ContextLevel) -> irast.Set: ptrcls = rptr.ptrcls source_set = rptr.source source_scls = source_set.stype # process_view() may generate computable pointer expressions # in the form "self.linkname". To prevent infinite recursion, # self must resolve to the parent type of the view NOT the view # type itself. Similarly, when resolving computable link properties # make sure that we use rptr.ptrcls.derived_from. if source_scls.is_view(ctx.env.schema): source_set = new_set_from_set( source_set, preserve_scope_ns=True, ctx=ctx) source_set.stype = source_scls.peel_view(ctx.env.schema) source_set.shape = [] if source_set.rptr is not None: schema = ctx.env.schema derived_from = source_set.rptr.ptrcls.get_derived_from(schema) if (derived_from is not None and not derived_from.generic(schema) and derived_from.get_derived_from(schema) is not None and ptrcls.is_link_property(schema)): source_set.rptr.ptrcls = derived_from try: qlexpr, qlctx, inner_source_path_id, path_id_ns = \ ctx.source_map[ptrcls] except KeyError: ptrcls_default = ptrcls.get_default(ctx.env.schema) if not ptrcls_default: ptrcls_sn = ptrcls.get_shortname(ctx.env.schema) raise ValueError( f'{ptrcls_sn!r} is not a computable pointer') if isinstance(ptrcls_default, s_expr.ExpressionText): qlexpr = astutils.ensure_qlstmt(qlparser.parse(ptrcls_default)) else: qlexpr = qlast.BaseConstant.from_python(ptrcls_default) qlctx = None inner_source_path_id = None path_id_ns = None if qlctx is None: # Schema-level computable, completely detached context newctx = ctx.detached else: newctx = _get_computable_ctx( rptr=rptr, source=source_set, source_scls=source_scls, inner_source_path_id=inner_source_path_id, path_id_ns=path_id_ns, same_scope=same_computable_scope, qlctx=qlctx, ctx=ctx) if ptrcls.is_link_property(ctx.env.schema): source_path_id = rptr.source.path_id.ptr_path() else: source_path_id = rptr.target.path_id.src_path() path_id = source_path_id.extend( ptrcls, s_pointers.PointerDirection.Outbound, ptrcls.get_target(ctx.env.schema), ns=ctx.path_id_namespace, schema=ctx.env.schema) with newctx() as subctx: subctx.view_scls = ptrcls.get_target(ctx.env.schema) subctx.view_rptr = context.ViewRPtr( source_scls, ptrcls=ptrcls, rptr=rptr) subctx.anchors[qlast.Source] = source_set subctx.empty_result_type_hint = ptrcls.get_target(ctx.env.schema) if isinstance(qlexpr, qlast.Statement) and unnest_fence: subctx.stmt_metadata[qlexpr] = context.StatementMetadata( is_unnest_fence=True) comp_ir_set = dispatch.compile(qlexpr, ctx=subctx) if ptrcls in ctx.pending_cardinality: comp_ir_set_copy = copy.copy(comp_ir_set) specified_card, source_ctx = ctx.pending_cardinality[ptrcls] stmtctx.get_pointer_cardinality_later( ptrcls=ptrcls, irexpr=comp_ir_set_copy, specified_card=specified_card, source_ctx=source_ctx, ctx=ctx) def _check_cardinality(ctx): if ptrcls.singular(ctx.env.schema): stmtctx.enforce_singleton_now(comp_ir_set_copy, ctx=ctx) stmtctx.at_stmt_fini(_check_cardinality, ctx=ctx) comp_ir_set.stype = ptrcls.get_target(ctx.env.schema) comp_ir_set.path_id = path_id comp_ir_set.rptr = rptr rptr.target = comp_ir_set return comp_ir_set
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSet(unique_name):", "def getSet(unique_name):", "def set():", "def visit_Set(self, node):\n self.generic_visit(node)\n return to_call(to_attribute(self.operator, '__set__'), node.elts)", "def set_of(element: Type) -> SetType:\n return SetType(element)", "def __rxor__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__rxor__', other)", "def owningSet(self) -> ghidra.util.graph.KeyIndexableSet:\n ...", "def set(self) -> set:\n return set(self)", "def set(x):\n pass", "def make_set(g, nodes):\n s = Set()\n names = nodes['names']\n for ii,name in enumerate(names):\n \"\"\" \n We will assume node is entirely contained\n in group if they have one atom in common\n \"\"\" \n atoms = mdn.dic2list(nodes[name]['atoms'])\n atom0 = atoms[0]\n if (atom0 in mdn.dic2list(g['atoms'])):\n s.add(ii)\n return s", "def force_frozenset(obj): \n # make it a set/tuple of 1 if it is a scalar and not a set already\n return tuple(force_hashable(obj))", "def sat_generate_candidate_assignments(self):\n # YOUR CODE HERE\n short = min(len(c) for c in self.clauses)\n for c in self.clauses:\n if len(c) == short:\n return set(c.literals)\n # return (set(x.literals) for x in self.clauses if len(x) == min(len(c) for c in self.clauses))", "def create_C1(data_set):\r\n C1 = set()\r\n for t in data_set:\r\n for item in t:\r\n item_set = frozenset([item])\r\n C1.add(item_set)\r\n return C1", "def cfset_to_set(cfset):\n count = cf.CFSetGetCount(cfset)\n buffer = (c_void_p * count)()\n cf.CFSetGetValues(cfset, byref(buffer))\n return set([cftype_to_value(c_void_p(buffer[i])) for i in range(count)])", "def create_C1(data_set):\n C1 = set()\n for t in data_set:\n for item in t:\n item_set = frozenset([item])\n C1.add(item_set)\n return C1", "def to_set(self):\n\n return frozenset(\n (i, j, self[i][j]) for i, j in self.cell_index_iter if self[i][j] is not None\n )", "def set():\n pass", "def components(self) -> Iterable[Mapping[T, Set[T]]]:", "def __or__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__or__', other)", "def get_from_set(set_):\n for e in set_: return e", "def find_set(self):\n return self._set_set(self._find_set())", "def set(self):\n return self.cdb.code_to_card_set[self.set_code]", "def chain_set(mixed_chains):\n return set([i[0] for i in mixed_chains])", "def define_set():\n set_1 = set([1, 2, 3])\n print type(set_1)\n print set_1\n\n set_2 = {2, 3, 2}\n print type(set_2)\n # <type 'set'>\n print set_2\n # set([2, 3])\n\n a = set((1, 2, 3, 4))\n b = set([3, 4, 5, 6])\n print a | b # Union\n # {1, 2, 3, 4, 5, 6}\n print a & b # Intersection\n # {3, 4}\n print a < b # Subset\n # False\n print a - b # Difference\n # {1, 2}\n print a ^ b # Symmetric Difference\n # {1, 2, 5, 6}", "def valueSet(rbt):\n try:\n vlist = lt.newList('SINGLE_LINKED', rbt['cmpfunction'])\n vlist = valueSetTree(rbt['root'], vlist)\n return vlist\n except Exception as exp:\n error.reraise(exp, 'RBT:valueSet')", "def make_set(node):\n node.parent = node\n node.rank = 0", "def unique(self):\n return frozenset(self)", "def __getitem__(self, name: str) -> Set[BaseAssignment]:\n ...", "def cast_value_to_set(self, name: str, value: Iterable) -> Set:\n return set(self.get_object_from_name(elem, name) for elem in value)", "def commonSetElementPredicate(field_set: Sequence[Any]) -> FrozenSet[str]:\n\n return frozenset(str(item) for item in field_set)", "def getSets(unique_name=None):", "def get_inputs(self):\n return set()", "def mkset(item):\n if isinstance(item, set):\n return item\n elif item is None:\n return set()\n elif isIterable(item):\n return set(item)\n else:\n return set([item])", "def new_set_from_set(\n ir_set: irast.Set, *,\n preserve_scope_ns: bool=False,\n path_id: typing.Optional[irast.PathId]=None,\n stype: typing.Optional[s_types.Type]=None,\n ctx: context.ContextLevel) -> irast.Set:\n if path_id is None:\n path_id = ir_set.path_id\n if not preserve_scope_ns:\n path_id = path_id.merge_namespace(ctx.path_id_namespace)\n if stype is None:\n stype = ir_set.stype\n result = new_set(\n path_id=path_id,\n path_scope_id=ir_set.path_scope_id,\n stype=stype,\n expr=ir_set.expr,\n ctx=ctx\n )\n result.rptr = ir_set.rptr\n return result", "def __call__(self, uind: Set[Ind]) -> FrozenSet[Ind]:\n ks = len(uind)\n try:\n start_arity, G = self.generate_graph(uind)\n except StopIteration:\n return frozenset()\n\n _logger.info('Looking for hypercliques')\n H = find_hypercliques(G)\n _logger.info('Validating %d hypercliques', len(H))\n I = self._validate_all(H)\n\n result = set(filter(lambda i: len(i) == 1, I))\n for m in range(start_arity + 1, ks):\n _logger.info('Iteration %d (%d candidates)', m, len(I))\n _logger.info('Iteration %d (%d positives)', m, len(result))\n C = set()\n for c in I:\n if c.valid and len(c) >= m - 1:\n result.add(c)\n if not c.valid and len(c) >= m:\n C.add(c)\n k_ary = gen_k_ary_ind_from_cliques(m, C)\n _logger.info('%d %d-ary generated from %d', len(k_ary), m, len(C))\n Gm = Graph()\n Gm.E = self._validate_all(k_ary)\n Gm.E = set(filter(lambda e: e.valid, Gm.E))\n if Gm.empty() or True:\n return frozenset(map(Edge.to_ind, result))\n result.update(gen_sub_inds(m, Gm, result))\n Gm.V = frozenset(reduce(frozenset.union, map(lambda e: e.set, Gm.E), frozenset()))\n H = find_hypercliques(Gm)\n I = self._validate_all(H)\n\n # Convert candidates back to Ind\n return frozenset(map(Edge.to_ind, result))", "def __xor__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__xor__', other)", "def strict(cls):\n return frozenset()", "def get_complete_set(self):\n return self.__symbol_set", "def __init__(self):\n self.set = set()", "def distinct(self):\n memory = set()\n\n def _distinct(iterator):\n while True:\n item = next(iterator)\n if item in memory:\n continue\n memory.add(item)\n return item\n return self.__class__(self, _distinct)", "def __iter__(self):\n new_set = self._clone()\n new_set.tree.iterator = self.tree.traverse()\n return new_set", "def copy(self):\n return set(self)", "def parse_set(field, star_range):\n ranges = tuple(parse_range(r, star_range) for r in field.split(\",\"))\n return crontab.Set(ranges)", "def Set(self) -> None:", "def _mappingGetValueSet(mapping, keys):\n setUnion = set()\n for k in keys:\n setUnion = setUnion.union(mapping[k])\n return setUnion", "def aspset(self):\n try:\n return pset([x.aspset() for x in self])\n except Exception:\n try:\n return frozenpset([x.aspset() for x in self])\n except Exception:\n pass\n return frozenpset([x for x in self])", "def __init__(self, set_ptr=None):\n\n if set_ptr is None:\n self.set = ipset.ipset_new()\n else:\n self.set = set_ptr", "def get_object_references(self, value):\n return set()", "def union(stack):\n assertArity(stack, 2)\n rhs, lhs = stack.pop(), stack.pop()\n assertType(lhs, Set)\n assertType(rhs, Set)\n return Set(lhs | rhs)", "def get_nodeset(self):\n return set(self.nodeset) # return the nodeset", "def objects_in_use(self):\n return set()", "def atoms(self):\n return set(self.array_form)", "def pset(self):\n return plist([pset(x) for x in self], root=self.__root__)", "def _get_set(self, key, operation, create=False, decode=False):\n return self._get_by_type(key, operation, create, b'set', set(), decode=decode)", "def test_allocator_single_confilicting_sets():\n indexSets = [set([1]), set([1])]\n allocator = Allocator(indexSets)\n assert len(allocator.slots) == 2\n allocation = allocator.allocate()\n assert not allocation", "def union_sets(S):\n res = set()\n for s in S:\n res |= s\n return res", "def __ror__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__ror__', other)", "def to_set(elem_sort, *elems):\n res = LambdaSet.get_empty(elem_sort)\n for elem in elems:\n res = res.insert(elem)\n return res", "def getOneItemSet(self, transListSet):\n itemSet = set()\n for line in transListSet:\n for item in line:\n itemSet.add(frozenset([item]))\n return itemSet", "def copySet(_session, _set_src, _set_dst, _segment):\n it = _session.create_iterator(_session.sc_constraint_new(sc_constants.CONSTR_3_f_a_a,\n _set_src,\n sc.SC_ARC,\n 0), True)\n \n while not it.is_over():\n# s_el = it.value(2)\n# _idtf = _session.get_idtf(s_el)\n# el = s_el\n# if isSystemId(_idtf):\n# el = _session.create_el(_segment, _session.get_type(s_el))\n createPair(_session, _segment, _set_dst, it.value(2), _session.get_type(it.value(1)))\n it.next()", "def getSets():", "def __sub__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__sub__', other)", "def create_all_compatible(self, atomic_signature: dict, structure_signature: dict):\n return set.union(*[self.lhs.create_all_compatible(atomic_signature, structure_signature) |\n self.rhs.create_all_compatible(atomic_signature, structure_signature)])", "def __and__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__and__', other)", "def get_complement(seta):\n\n complement_set = set()\n\n for elem in seta:\n new_elem_tuple = (elem[0], float(D('1.0') - D(str(elem[1]))))\n complement_set.add(new_elem_tuple)\n\n return complement_set", "def __init__(self):\n self.s = set()", "def local_setsubtensor_of_allocs(node):\r\n if isinstance(node.op, IncSubtensor) and node.op.set_instead_of_inc:\r\n x = node.inputs[0]\r\n y = node.inputs[1]\r\n replace_x = None\r\n replace_y = None\r\n\r\n try:\r\n replace_x = get_scalar_constant_value(x)\r\n except NotScalarConstantError:\r\n pass\r\n\r\n try:\r\n replace_y = get_scalar_constant_value(y)\r\n except NotScalarConstantError:\r\n pass\r\n\r\n if (replace_x == replace_y and\r\n replace_x is not None and\r\n replace_y is not None):\r\n return [x]\r\n else:\r\n return False", "def __rand__(self, other: t.Any) -> InspectableSet[_C]:\n return self._op_copy('__rand__', other)", "def sat_solve(self):\n # YOUR CODE HERE\n o = frozenset()\n if self.isfalse:\n return False\n elif self.istrue:\n return set()\n l = self.generate_candidate_assignments()\n print(\"assignments,\", l)\n for i in l:\n st = sat_apply_assignment(self, i)\n print(\"i:\", i, \"new set\", st)\n\n if st.istrue:\n return {i}\n elif not st.isfalse:\n sat_solve(st)\n\n return {i}", "def __init__(self):\n self.EntireSet = []", "def __init__(self):\n self.container = set()", "def set_or_callable(value) -> frozenset[str] | Callable:\n if value is None:\n return frozenset()\n if callable(value):\n return value\n if isinstance(value, (frozenset, set, list)):\n return frozenset(value)\n return frozenset([str(value)])", "def list_to_set(llist : LinkedList) -> set:\n current_node = llist.head\n lset = set()\n while current_node is not None:\n lset.add(current_node.value)\n current_node = current_node.next\n \n return lset", "def list_to_set(l): \r\n s = { item for item in l }\r\n return s", "def intersect_sets(S):\n res = S[0]\n for s in S:\n res &= s\n return res", "def _variable_or_iterable_to_set(x):\n if x is None:\n return frozenset([])\n\n if isinstance(x, str):\n return frozenset([x])\n\n if not isinstance(x, Iterable) or not all(isinstance(xx, str) for xx in x):\n raise ValueError(\n f\"{x} is expected to be either a string, set of strings, or an iterable of strings\"\n )\n\n return frozenset(x)", "def extend_path(\n source_set: irast.Set,\n ptrcls: s_pointers.Pointer,\n direction: PtrDir=PtrDir.Outbound,\n target: typing.Optional[s_nodes.Node]=None, *,\n ignore_computable: bool=False,\n force_computable: bool=False,\n unnest_fence: bool=False,\n same_computable_scope: bool=False,\n ctx: context.ContextLevel) -> irast.Set:\n\n if ptrcls.is_link_property(ctx.env.schema):\n src_path_id = source_set.path_id.ptr_path()\n else:\n if direction != s_pointers.PointerDirection.Inbound:\n source = ptrcls.get_near_endpoint(ctx.env.schema, direction)\n if not source_set.stype.issubclass(ctx.env.schema, source):\n # Polymorphic link reference\n source_set = class_indirection_set(\n source_set, source, optional=True, ctx=ctx)\n\n src_path_id = source_set.path_id\n\n if target is None:\n target = ptrcls.get_far_endpoint(ctx.env.schema, direction)\n path_id = src_path_id.extend(ptrcls, direction, target,\n ns=ctx.path_id_namespace,\n schema=ctx.env.schema)\n\n target_set = new_set(stype=target, path_id=path_id, ctx=ctx)\n\n ptr = irast.Pointer(\n source=source_set,\n target=target_set,\n ptrcls=ptrcls,\n direction=direction\n )\n\n target_set.rptr = ptr\n\n if (not ignore_computable and _is_computable_ptr(\n ptrcls, force_computable=force_computable, ctx=ctx)):\n target_set = computable_ptr_set(\n ptr, unnest_fence=unnest_fence,\n same_computable_scope=same_computable_scope, ctx=ctx)\n\n return target_set", "def intersection(seq: Iterable[AbstractSet[VT]]) -> Optional[Set[VT]]:\n it = iter(seq)\n try:\n ret = set(next(it))\n except StopIteration:\n return None\n for elem in it:\n ret &= elem\n return ret", "def test_set(self):\n a = set()\n a.add('b')\n a.add('c')\n a.add('a')\n b = list(a)\n b.sort()\n self.assertEqual(b, ['a', 'b', 'c'])\n a.remove('b')\n b = list(a)\n b.sort()\n self.assertEqual(b, ['a', 'c'])\n\n a.discard('d')\n\n b = set(['r', 's'])\n d = a.union(b)\n b = list(d)\n b.sort()\n self.assertEqual(b, ['a', 'c', 'r', 's'])", "def handle_set(self, agent) -> Tuple[Optional[str], Any]:\n ref_obj_d = {\"filters\": self.action_dict[\"filters\"]}\n ref_objs = self.subinterpret[\"reference_objects\"](\n self, self.speaker_name, ref_obj_d, extra_tags=[\"_physical_object\"]\n )\n if len(ref_objs) == 0:\n raise ErrorWithResponse(\"I don't know what you're referring to\")\n\n triples_d = self.action_dict[\"upsert\"][\"memory_data\"].get(\"triples\")\n if len(triples_d) == 1 and triples_d[0][\"pred_text\"] == \"has_name\":\n # the set has a name; check to see if one with that name exists,\n # if so add to it, else create one with that name\n name = triples_d[0][\"obj_text\"]\n set_memids, _ = self.memory.basic_search(\n \"SELECT MEMORY FROM Set WHERE (has_name={} OR name={})\".format(name, name)\n )\n if not set_memids:\n # make a new set, and name it\n set_memid = SetNode.create(self.memory)\n self.memory.add_triple(subj=set_memid, pred_text=\"has_name\", obj_text=name)\n else:\n # FIXME, which one\n set_memid = set_memids[0]\n else:\n # an anonymous set, assuming its new, and defined to hold the triple(s)\n set_memid = SetNode.create(self.memory)\n for t in triples_d:\n self.memory.add_triple(\n subj=set_memid, pred_text=t[\"pred_text\"], obj_text=t[\"obj_text\"]\n )\n for r in ref_objs:\n self.memory.add_triple(subj=r.memid, pred_text=\"member_of\", obj=set_memid)\n\n # FIXME point to the objects put in the set, otherwise explain this better\n self.memory.dialogue_stack_append_new(Say, \"OK made those objects into a set \")\n return None, None", "def __reduce_ex__(self, protocol=None):\n return (\n sm.copyreg._reconstructor,\n (type(self), set, list(self)),\n self.__getstate__(),\n )", "def prepare(self, conn): # real signature unknown; restored from __doc__\n return set(*(), **{})", "def pointsets_mod_automorphism(self, pointsets):\n points = set()\n for ps in pointsets:\n points.update(ps)\n points = tuple(points)\n Aut = self.lattice_automorphism_group(points,\n point_labels=tuple(range(len(points))))\n indexsets = set([ frozenset([points.index(p) for p in ps]) for ps in pointsets ])\n orbits = []\n while len(indexsets)>0:\n idx = indexsets.pop()\n orbits.append(frozenset([points[i] for i in idx]))\n for g in Aut:\n g_idx = frozenset([g(i) for i in idx])\n indexsets.difference_update([g_idx])\n return tuple(orbits)", "def makeAtomSet(guiName,atoms,chemAtomSet,mappingType):\n \n # RHFogh 3/12/09 - refactored to reduce getAtomSet calls\n \n atom0 = list(atoms)[0]\n project = atom0.root\n \n atomSets = [x.atomSet for x in atoms]\n atomSet0 = atomSets[0]\n aSet = set(atomSets)\n if len(aSet) != 1:\n for atomSet in aSet:\n if atomSet and not atomSet.resonanceSets:\n atomSet.delete()\n \n nmrProject = project.currentNmrProject\n \n if atomSet0 is None:\n atomSet = nmrProject.newAtomSet(atoms=atoms)\n else:\n atomSet = atomSet0\n \n residue = atom0.residue\n \n residueMapping = getResidueMapping(residue)\n if not residueMapping.findFirstAtomSetMapping(name=guiName):\n makeAtomSetMapping(residueMapping,guiName,(atomSet,),chemAtomSet,mappingType)\n\n atomSet.name = guiName\n return atomSet", "def to_set(self):\n return set(self._items)", "def to_set(self):\n return set(self._items)", "def _get_update_set(self, var):\n update_set = set()\n cvar = self.arch.expand_reg_expr(var)\n for wrt in self.syncinfo.wrt_set:\n if var.find(wrt) != -1:\n # not in write set: comparison in a lexical form\n update_set.add(wrt)\n elif AsmParser.is_register(wrt):\n # not in write set: comparison in a normalized form\n cwrt = self.arch.expand_reg_expr(wrt)\n if self._overlap_cvars(cvar, cwrt) != None:\n update_set.add(wrt)\n return update_set", "def __init__(self, name: unicode, set: ghidra.util.graph.KeyIndexableSet):\n ...", "def get_resources(self):\n res = set()\n res.update(self.get_inputs())\n res.update(self.get_outputs())\n return res", "def new_set(*, ctx: context.ContextLevel, **kwargs) -> irast.Set:\n ir_set = irast.Set(**kwargs)\n ctx.all_sets.append(ir_set)\n return ir_set", "def get_coref_set(self,\n step_idxs: List[int],\n use_entity: bool = False,\n ) -> Set[Tuple[int, str, int, str]]:\n if len(step_idxs) <= 1:\n return []\n\n coref_participants = defaultdict(set)\n for i, step_idx in enumerate(step_idxs):\n participants = self.steps[step_idx].participants\n for participant in participants:\n if use_entity:\n for value in participant.values:\n coref_participants[value.entity].add(\n (i, participant.role))\n else:\n if participant.refvar:\n coref_participants[participant.refvar].add(\n (i, participant.role))\n\n coref_set = set()\n for participants in coref_participants.values():\n for (i, role_1), (j, role_2) in combinations(participants, 2):\n if i == 0:\n continue\n if i > j:\n i, j, role_1, role_2 = j, i, role_2, role_1\n coref_set.add((i, role_1, j, role_2))\n\n return coref_set", "def copy(self):\n return self.__class__(*self.sets)", "def _getitem_from_self_or_parent(self, name: str) -> Set[BaseAssignment]:\n return self[name]", "def keySet(rbt):\n try:\n klist = lt.newList('SINGLE_LINKED', rbt['cmpfunction'])\n klist = keySetTree(rbt['root'], klist)\n return klist\n except Exception as exp:\n error.reraise(exp, 'RBT:KeySet')", "def get_set(css_class_name, set_num=0):\r\n if not root:\r\n return None\r\n item = root.xpath('//dl[@class=\"%s\"]/dd' % css_class_name)\r\n if len(item) <= set_num:\r\n return None\r\n sets_node = item[set_num]\r\n item_set = set([ut.unicodeanyway(node.text).replace('\\n', '')\r\n for node\r\n in sets_node.xpath('.//a') if node.text is not None])\r\n \r\n \r\n \r\n return item_set", "def free_symbols(self) -> set[Basic]:\n empty: set[Basic] = set()\n return empty.union(*(a.free_symbols for a in self.args))", "def int_to_set(i):\n\ts = set()\n\tj = 1\n\twhile i != 0:\n\t\tif i & 1:\n\t\t\ts.add(j)\n\t\ti >>= 1\n\t\tj += 1\n\treturn s", "def ipset_same_x():\n return IPSet(x=np.linspace(0, 10, 11), y=np.linspace(-1, 1, 11), x_new=np.linspace(0, 10, 11))", "def _op_copy(self, op: str, other: t.Any) -> InspectableSet[_C]:\n if hasattr(self.__members__, op):\n if isinstance(other, InspectableSet):\n other = other.__members__\n retval = getattr(self.__members__, op)(other)\n if retval is not NotImplemented:\n return InspectableSet(retval)\n return NotImplemented", "def make_iq_set(self, sub=None):\n iq = self.Iq()._set_stanza_values({'type': 'set'})\n if sub != None:\n iq.append(sub)\n return iq" ]
[ "0.63821733", "0.63821733", "0.6101638", "0.60650355", "0.5940626", "0.5939873", "0.5794654", "0.5785868", "0.5782201", "0.5721857", "0.5620131", "0.55960476", "0.559237", "0.55909944", "0.55824697", "0.5576209", "0.55492747", "0.5549152", "0.55308235", "0.551", "0.5487735", "0.5482171", "0.5480994", "0.54633623", "0.5419847", "0.54120576", "0.54119426", "0.540239", "0.535908", "0.53522295", "0.5337359", "0.53269404", "0.53150254", "0.53025705", "0.52839327", "0.5274996", "0.5250706", "0.52460426", "0.522057", "0.52142763", "0.52044946", "0.51959866", "0.5191204", "0.5169438", "0.5156627", "0.5154034", "0.51407874", "0.51356554", "0.51299304", "0.512183", "0.51215965", "0.5103428", "0.509035", "0.50894356", "0.50670666", "0.5065282", "0.50468504", "0.5045791", "0.5042717", "0.5039894", "0.5038762", "0.50321096", "0.50279295", "0.50225616", "0.5019947", "0.50099355", "0.50033313", "0.5002383", "0.49905127", "0.499012", "0.4988735", "0.49885607", "0.4982719", "0.49809033", "0.49757728", "0.49727064", "0.49705723", "0.49650702", "0.49589062", "0.4948216", "0.49437463", "0.4936704", "0.49178693", "0.49152377", "0.491409", "0.491409", "0.48997492", "0.48958284", "0.48878184", "0.48691675", "0.48642293", "0.4860968", "0.48579746", "0.48500896", "0.48436195", "0.48412022", "0.4840728", "0.4833105", "0.48267898", "0.48262593" ]
0.69799614
0
Requests should have a section of the config file and variable/field in that section to be returned in the response body.
def on_get(self, req, resp, section, field): resp.content_type = 'text/text' resp.body = self.direct_get(field, section)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, request, format=None):\n return Response({k: getattr(config, k) for k in list(dir(config))})", "def output_config() -> Response:\n c = dict(config)\n c['password'] = \"*********\"\n return jsonify(c)", "def get_config(req):\n #try:\n # user_id = req.user\n #except KeyError as e:\n # msg = req.get_error_msg(e)\n # return send_error_response(msg)\n try:\n config = tools_config_get_config(req)\n except Exception:\n raise http_exc.HTTPClientError()\n else:\n return Response(json_body=json.dumps(config), content_type='application/json')", "def get(request_url, **_):\n # Checks input parameters\n assert '/configuration' in request_url\n\n # Returns fake response\n response = requests.Response()\n response._content = response_json\n return response", "def configs(self, request, *args, **kwargs):\n response = self.retrieve(request, *args, **kwargs)\n response.data = response.data['configures']\n return response", "def get(url, **_):\n # Checks input parameters\n assert '/configuration/%s' % dummy_id in url\n\n # Returns fake response\n response = requests.Response()\n response._content = response_json\n response.status_code = 200\n return response", "def get(self, session: Session = None) -> Response:\n with open(self.manager.config_path, encoding='utf-8') as f:\n raw_config = base64.b64encode(f.read().encode(\"utf-8\"))\n return jsonify(raw_config=raw_config.decode('utf-8'))", "def post(self, request):\n section = request.DATA.get('section', None)\n section_name = request.DATA.get('section_name', None)\n configuration = request.DATA.get('configuration', None)\n\n named_sections = settings.HAPROXY_CONFIG_NAMED_SECTIONS\n if section in named_sections and not all([x is not None for x in [section, section_name, configuration]]):\n raise core_exceptions.InvalidRequestException()\n else:\n try:\n json.loads(configuration) # Should raise ValueError if configuration data are invalid\n config = HaProxyConfigModel(section=section, section_name=section_name, configuration=configuration)\n config.save()\n except IntegrityError:\n raise core_exceptions.DuplicateEntryException()\n except ValueError:\n raise core_exceptions.InvalidRequestException()\n\n return Response({'checksum': config.checksum}, status=HTTP_201_CREATED)", "def post(self, request):\n result = HaProxyConfigModel.objects.all()\n result.query.group_by = ['section', 'section_name']\n\n if not result:\n raise core_exceptions.DoesNotExistException()\n\n result = sorted(result, key=methodcaller('get_section_weight'))\n config = \"\"\n try:\n with open(settings.HAPROXY_CONFIG_DEV_PATH, 'w') as f:\n for res in result:\n config += \"{0} {1}\\n\".format(str(res.section), (res.section_name or \"\"))\n for key, value in res.configuration.iteritems():\n config += \" {0} {1}\\n\".format(str(key), (value or \"\"))\n config += \"\\n\"\n f.write(config)\n except IOError as e:\n raise_500_error(e.errno, e.strerror + settings.HAPROXY_CONFIG_DEV_PATH)\n\n return Response({'created': True}, status=HTTP_201_CREATED)", "def config():\n return {\n \"CLEAN_OUTBOX\": \"TRUE\",\n \"COMPONENT_NAME\": \"testing-unpacker\",\n \"DEST_SITE\": \"WIPAC\",\n \"FILE_CATALOG_REST_TOKEN\": \"fake-file-catalog-token\",\n \"FILE_CATALOG_REST_URL\": \"http://kVj74wBA1AMTDV8zccn67pGuWJqHZzD7iJQHrUJKA.com/\",\n \"HEARTBEAT_PATCH_RETRIES\": \"3\",\n \"HEARTBEAT_PATCH_TIMEOUT_SECONDS\": \"30\",\n \"HEARTBEAT_SLEEP_DURATION_SECONDS\": \"60\",\n \"INPUT_STATUS\": \"unpacking\",\n \"LTA_REST_TOKEN\": \"fake-lta-rest-token\",\n \"LTA_REST_URL\": \"http://RmMNHdPhHpH2ZxfaFAC9d2jiIbf5pZiHDqy43rFLQiM.com/\",\n \"OUTPUT_STATUS\": \"completed\",\n \"PATH_MAP_JSON\": \"/tmp/lta/testing/path_map.json\",\n \"RUN_ONCE_AND_DIE\": \"False\",\n \"SOURCE_SITE\": \"NERSC\",\n \"UNPACKER_OUTBOX_PATH\": \"/tmp/lta/testing/unpacker/outbox\",\n \"UNPACKER_WORKBOX_PATH\": \"/tmp/lta/testing/unpacker/workbox\",\n \"WORK_RETRIES\": \"3\",\n \"WORK_SLEEP_DURATION_SECONDS\": \"60\",\n \"WORK_TIMEOUT_SECONDS\": \"30\",\n }", "def test_request(self):\n self.assertIn('list', self.api.request('sys.settings.get').data,\n msg=\"request() doesn't work properly. 'list' is not found in the response\")", "def config(self) -> Dict[str, Any]:", "def handle_cluster_config(self, request):\n \"\"\"\n @api {get} /cluster/config/:key Get cluster parameter\n @apiName GetClusterConfig\n @apiGroup Cluster\n @apiVersion 1.0.0\n\n @apiParam {string} :key Name of the parameter to get\n \"\"\"\n \"\"\"\n @api {put} /cluster/config/:key Set cluster parameter\n @apiName SetClusterConfig\n @apiGroup Cluster\n @apiVersion 1.0.0\n\n @apiParam {string} :key Name of the parameter to set\n \"\"\"\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n match = re.match('/cluster/config/(.+)', request.uri_path)\n name = match.group(1)\n\n if request.method == \"GET\":\n try:\n return HTTPReply(body = json.dumps(self.cluster.config.get(name)), headers = headers)\n except KeyError:\n return HTTPReply(code = 404, headers = {'Access-Control-Allow-Origin': '*'})\n\n elif request.method == \"PUT\":\n try:\n self.cluster.config.set(name, json.loads(request.body))\n return HTTPReply(code = 204, headers = {'Access-Control-Allow-Origin': '*'})\n except (ValueError, TypeError) as error:\n return HTTPReply(code = 400, message = str(error), headers = {'Access-Control-Allow-Origin': '*'})\n except KeyError:\n return HTTPReply(code = 404, headers = {'Access-Control-Allow-Origin': '*'})\n\n elif request.method == \"DELETE\":\n try:\n self.cluster.config.clear(name)\n return HTTPReply(code = 204, headers = {'Access-Control-Allow-Origin': '*'})\n except KeyError:\n return HTTPReply(code = 404, headers = {'Access-Control-Allow-Origin': '*'})", "def prepare_config_request(self, req):\n\t\tself.content_type = 'text/javascript'\n\t\tself.template = 'fckconfig-custom.js.tmpl'", "def get(self, session: Session = None) -> Response:\n return jsonify(self.manager.config)", "def _create_config(self, body=None):\n request = Request.blank(self.uri_config, headers=[self.auth_header])\n request.method = 'POST'\n request.body = body\n return request.get_response(application)", "def test_specific_default_body_sent_with_request(self):\n req = self.httpbin.get_my_headers(dry_run=True)\n def_body = self.httpbin.client[\"get_my_headers\"][\"data\"]\n self.assertIn(urlencode(def_body), req.prepared_request.body)", "def api_response():\n return load_fixture(\"smhi.json\", DOMAIN)", "def process_data(req):\n try:\n config_name = req.json_body['config_name']\n except KeyError:\n from iap.data_loading.data_loader import Loader\n import logging\n settings = \"\"\n loader = Loader()\n loader.run_processing(config_name)\n return send_success_response()", "def get_page_configuration(req):\n # Get parameters from request.\n #try:\n user_id = req.user\n page_name = req.json_body['data']['page']\n #except KeyError as e:\n #msg = req.get_error_msg(e)\n #return send_error_response(msg)\n #try:\n state = rt.get_state(user_id)\n tool_id = state.tool_id\n language = state.language\n config = get_page_config(tool_id, page_name, language)\n return send_success_response(config)\n #except Exception as e:\n #msg = req.get_error_msg(e, language)\n #return send_error_response(msg)", "def get_config_verify(self,\n raw_response: Any,\n *args,\n **kwargs) -> bool:\n pass", "def config_section_data():\n config_data = u\"\"\"[feeds]\n# comma separated section names. ex. sqlserver_feed,file_feed\nfeed_names=<your feeds>\nreload=true\n# use reload_types to limit the types of objects when reload=true.\n# Ex: incident,task,note,artifact,attachment,<data_table_api_name>\nreload_types=\n# set to true if ElasticSearch errors occur during reload=true\nreload_query_api_method=false\n\n# feed_data is the default message destination that will be listened to\nqueue=feed_data\n\n# set to true if attachment data should be part of payload send to plugins\ninclude_attachment_data=false\n# if necessary, specify the supported workspace (by label, case sensitive) and the list of feeds associated with it\n# ex: 'Default Workspace': ['sqlserver_feed'], 'workspace A': ['kafka_feed', 'resilient_feed']\nworkspaces=\n\"\"\"\n return config_data", "def test_get_request_output(self):\n pass", "def test_api_section(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load sections from url specified in api base\n r = requests.get(r['sections']).json()\n r = requests.get(r['sections'][0]['url']).json()\n self.assertIn('id', r)\n self.assertIn('name', r)\n self.assertIn('html', r)\n self.assertIn('url', r)\n self.assertIn('regions', r)\n self.assertIn('sensors', r)\n self.assertIn('gages', r)\n self.assertIn('description', r)\n self.assertIn('access', r)\n self.assertIn('location', r)\n self.assertIn('in_latitude', r)\n self.assertIn('in_longitude', r)\n self.assertIn('out_latitude', r)\n self.assertIn('out_longitude', r)", "def config_get():\n server_config = db.get().server_config_get()\n\n if not server_config:\n return flask.jsonify({\n \"message\": \"Netmet server has not been setup yet\"}), 404\n\n return flask.jsonify(server_config), 200", "def get(self, request):\n result = HaProxyConfigModel.objects.all()\n result.query.group_by = ['section', 'section_name']\n\n if not result:\n raise core_exceptions.DoesNotExistException()\n\n result = sorted(result, key=methodcaller('get_section_weight'))\n serializer = HaProxyConfigModelSerializer(result, many=True)\n return Response(serializer.data)", "def get_content(toUrl):\n\n cf = ConfigParser.ConfigParser()\n cf.read(\"config.ini\")\n cookie = cf.get(\"cookie\",\"cookie\")\n cookdic = dict(Cookie=cookie)\n\n try:\n req = requests.get(toUrl,cookies = cookdic, timeout=100)\n except:\n return None\n if req.status_code != requests.codes.ok:\n print \"haven't get 200, status_code is: \"+str(req.status_code);\n # sys.exit(-1)\n return None\n return req", "def post(self):\n p = json.loads(self.request.body)\n\n if not p or 'config' not in p:\n self.NotFound('Unable to find pipeline config in json request.')\n else:\n logging.info('config is:\\n%r', p['config'])\n variable_names = GetVariableAttributes(p['config'])\n logging.info('var names is %r', variable_names)\n variables = p.get('variables', [])\n variables = dict([(v.get('name', ''), v) for v in variables])\n\n for v in set(variables.keys()) - variable_names:\n del variables[v] # remove vars not in variable_names\n for v in variable_names:\n variables.setdefault(v, {'name': v}) # add missing variables\n p['variables'] = variables.values()\n logging.info('returning variables %r from %r', variables, variable_names)\n self.SendJson(p)", "def request_extras(self):\n conf = {}\n if self.api_token:\n conf['headers'] = {\n 'Authorization': 'Token {}'.format(self.api_token),\n }\n\n if self.credentials:\n conf['auth'] = self.credentials\n\n return conf", "def get_config_on_json(self):\n # load section CONFIG from data\n try:\n return self.json_data[\"CONFIG\"]\n except:\n constant.get_error(constant.ERROR_004)", "def do_GET(self):\n if \"mock_configurations\" in self.path:\n resource = self.path.replace(\"/mock_configurations\", \"\")\n print resource\n self.recover_request(resource)\n\n else:\n \"\"\"Otherwise, serve the previously uploaded content.\"\"\"\n self.store_request(self.path)\n self.serve_response()", "def config_section_data():\n config_data = u\"\"\"[fn_sep]\nsep_base_path=/sepm/api/v1\nsep_auth_path=/sepm/api/v1/identity/authenticate\nsep_host=<SEPM server dns name or ip address>\nsep_port=8446\nsep_username=<username>\nsep_password=<password>\nsep_domain=<SEP domain name>\n# Optional settings for access to SEPM via a proxy.\n#http_proxy=http://proxy:80\n#https_proxy=http://proxy:80\n# Limit result sent to Resilient, add full result as an attachment.\nsep_results_limit=200\n# Period of time (seconds) to wait for all endpoints to return a scan result.\nsep_scan_timeout=1800\n\"\"\"\n return config_data", "async def dashboard(request):\n return [\n {'name': 'application config', 'value': {k: str(v) for k, v in app.cfg}},\n {'name': 'request headers', 'value': dict(request.headers)},\n ]", "def get(self, request, *args, **kwargs):\n timer = Timer()\n section_id = kwargs.get('section_id')\n try:\n return self.make_http_resp(timer, request, section_id)\n except Exception as ex:\n return handle_exception(logger, timer, traceback)", "def requested_config_vals():\n return {} # no extra values needed", "def get_config_file_content(self):\n\n config_content: List[str] = [\n 'server {',\n\t ' listen {};'.format(self.port),\n '',\n ' ##',\n ' # PHP-FPM',\n ' ##',\n ' #location ~ \\.php$ {',\n \t ' #include /etc/nginx/fastcgi_params;',\n\t\t ' #root /var/www/src;',\n ' #fastcgi_split_path_info ^(.+?\\.php)(/.*)$;',\n ' #fastcgi_pass\tphpfpm:3002;',\n\t\t ' #fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name;',\n ' #}',\n '',\n ' location / {',\n\t\t ' root /var/www/src;',\n ' index index.html;'\n\t\t ' #index index.php;',\n\t\t ' #rewrite ^ /index.php?$args last; break;',\n\t ' }',\n '}'\n ]\n return config_content", "def config_request(self) -> ConfigFilesRequest:\n\n return ConfigFilesRequest(\n discovery=True,\n check_existence=[self.config] if self.config else [],\n check_content={\"pyproject.toml\": b\"[tool.pytype\"},\n )", "def fusion_api_get_configuration(self, uri=None, param='', api=None, headers=None):\n return self.configuration.get(uri=uri, api=api, headers=headers, param=param)", "def response_parameters_from_config(response_config):\n h = response_config.get(\"headers\", {})\n headers = [(k, v) for k, v in six.iteritems(h)]\n status = response_config.get(\"status\", None)\n status_code = response_config.get(\"status_code\", None)\n body = response_config.get(\"body\", None)\n json_body = response_config.get(\"json_body\", None)\n return status, status_code, headers, body, json_body", "def request_vars(self):", "def get_configuration() -> Response: # noqa: E501\n config = rabbitMQ_manager.get_configuration()\n if config is not None:\n return Response(\n json.dumps(config),\n status=200\n )\n return Response(\n status=500\n )", "def json_bytes(request) -> bytes:\n return get_test_data(request, __name__, \"config.json\")", "def parse_response(self, response, case):\n request = response.request\n parsed = {\n 'request': {\n 'method': request.method,\n 'url': request.url,\n 'body': request.body,\n },\n 'response': {\n 'headers': OrderedDict(),\n 'status_code': response.status_code,\n 'reason': response.reason,\n }\n }\n\n # Re-assemble request line\n url_parts = urlparse(request.url)\n parsed['request']['request_line'] = '%s %s%s%s HTTP/1.1' % (\n request.method, url_parts.path, '?' if url_parts.query else '',\n url_parts.query)\n\n # Process request headers\n if self.mode == 'display':\n hostname = url_parts.hostname\n else:\n hostname = self.doc_hostname\n parsed['request']['headers'] = OrderedDict((('Host', hostname),))\n for header in sorted([h.title() for h in request.headers]):\n raw_value = request.headers[header]\n value = self.parse_header(header, raw_value, 'request')\n if value:\n parsed['request']['headers'][header.title()] = value\n\n # Re-assemble response line\n parsed['response']['response_line'] = 'HTTP/1.1 %s %s' % (\n response.status_code, response.reason)\n\n # Process response headers\n for header in sorted([h.title() for h in response.headers]):\n raw_value = response.headers[header]\n value = self.parse_header(header, raw_value, 'response')\n if value:\n fixed_header = header.title().replace('Www', 'WWW')\n parsed['response']['headers'][fixed_header] = value\n\n # Process response body\n response.encoding = 'utf-8'\n body = response.text\n if self.standardize:\n body = body.replace(api, self.doc_base_url)\n for key, value in case.get('standardize', {}).items():\n assert key in ('created', 'modified', 'date')\n pattern = r\"\"\"(?x)(?s) # Be verbose, . include newlines\n \"%s\":\\s\" # Key and quote\n \\d{4}-\\d{2}-\\d{2} # Date\n T\\d{2}:\\d{2}:\\d{2} # Time\n \\.\\d{0,6}Z # Microseconds and UTC timezone\n \", # End quote and comma\n \"\"\" % key\n replace = '\"%s\": \"%s\",' % (key, value)\n body = re.sub(pattern, replace, body)\n parsed['response']['body'] = body\n\n return parsed", "def config_section_data():\n config_data = None\n\n config_data = u\"\"\"[fn_rate_limit]\n# MySQL Database settings\ndb_user = USER\ndb_password = PASSWORD\ndb_host = 127.0.0.1\ndb_port = 3306\ndatabase = Rate_limit\n\"\"\"\n return config_data", "def test_get_section_by_crn(self):\n response = self.client.open(\n '/pablokvitca/classdeck-api/1.0.0/section/{crn}'.format(crn=99999),\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def on_get(self, request, response, psa_id, conf_id):\n try:\n self.logger.info(request.method+\" \"+request.uri)\n #TODO: define other formats, e.g., zipped\n conf_type = None\n #logging.info(self.confsPath+\"/\"+psa_id+\"/\"+conf_id)\n if self.config.USE_LOCAL_FILE:\n self.logger.info(\"-PSAConf Load PSA config from local file.\")\n try:\n fp = open(self.confsPath+\"/\"+psa_id+\"/\"+conf_id, 'rb')\n conf = fp.read()\n fp.close()\n except IOError as exc:\n logging.error(\"Unable to read file \"+self.confsPath+\"/\"+psa_id+\"/\"+conf_id)\n raise exc\n conf_type = \"text\"\n else:\n token = self.instantiator.TokenIP[self.get_client_address(request.env)]\n self.logger.info(\"-PSAConf Load PSA config from UPR for user:\" +str(token) + \", psa_id:\" + str(psa_id))\n r = self.upr_client.get_user_psaconf(token, psa_id)\n ##self.logger.info(\"response:\" + str(r.json()))\n conf = r.json()[\"conf\"]\n conf_type = \"base64\"\n\n #new JSON and IP\n\t token = self.instantiator.TokenIP[self.get_client_address(request.env)]\n\t userTVD = self.instantiator.userTVDs[token]\n configuration = {}\n configuration['conf_type'] = conf_type\n configuration['conf'] = conf\n if psa_id in userTVD.psaIPaddresses.keys():\n self.logger.info(\"PSAConf: PSA %s requires IP\" % (str(psa_id)))\n configuration[\"IP\"] = userTVD.psaIPaddresses[psa_id]\n configuration[\"gateway\"] = self.config.GATEWAY_IP\n configuration[\"dns\"] = self.config.DNS_IP\n configuration[\"netmask\"] = self.config.NETMASK\n configuration[\"userIP\"] = userTVD.interfaceIP\n configuration[\"mobility\"] = userTVD.mobility\n configuration[\"firstPSA\"] = True if psa_id == userTVD.psaID_first else False\n configuration[\"lastPSA\"] = True if psa_id == userTVD.psaID_last else False\n self.logger.info(\"\\n\\n---> PSAConfiguration: %s\" % (str(configuration)))\n else:\n self.logger.info(\"PSAConf: PSA doesn't require IP\")\n response.data = json.dumps(configuration)\n response.status = falcon.HTTP_200\n self.logger.info(\"PSA \"+psa_id+\" configuration \"+conf_id+\" sent to PSC \"+self.get_client_address(request.env))\n\n except Exception as e:\n self.logger.exception(sys.exc_info()[0])\n response.status = falcon.HTTP_501", "def get_configuration():\r\n if not hasattr(CURRENT_REQUEST_CONFIGURATION, 'data'):\r\n return {}\r\n\r\n return CURRENT_REQUEST_CONFIGURATION.data", "def api_response_lack_data():\n return load_fixture(\"smhi_short.json\", DOMAIN)", "def configRequest(cat,set,val):\n config.read('config.ini')\n if cat not in config:\n config.add_section(cat)\n config.set(cat,set,val)\n with open('config.ini','w') as update:\n config.write(update)", "def _serve_runs(self, request, query_params):\n request.respond(list(self.configs.keys()), 'application/json')", "def ez_config(auth_token, filename, options = None):\n status_code = 500\n try:\n API_REQUEST_URL = API_URL + \"/ez_config\"\n payload = {\n \"options\": json.dumps(options)\n }\n files = [(\"filename\", open(filename, \"rb\"))]\n headers = {\n \"Authorization\": \"Bearer \" + str(auth_token),\n }\n response = requests.request(\n \"POST\", API_REQUEST_URL, headers = headers, data = payload, files = files\n )\n status_code = response.status_code\n try:\n response_json = response.json()\n except Exception as e:\n response.raise_for_status()\n response_json[\"status_code\"] = status_code\n return response_json\n except Exception as e:\n print((traceback.print_exc()))\n return exception_return(e, status_code)", "def get(self):\n if self.file:\n self._read()\n config = self.client_file.parseString(self.content)\n return config", "def read_section(self, configuration_file=\"./conf.txt\", section=\"\"):\n parser = ConfigParser.ConfigParser()\n parser.read(configuration_file)\n\n sec = {}\n if parser.has_section(section):\n items = parser.items(section)\n for item in items:\n sec[item[0]] = item[1]\n else:\n raise ConfException(\"{0} not found in the {1} file\".format(section, configuration_file))\n return sec", "def updateSectionWithReq(self, section):\n dat = section['dat']\n req = section['req']\n if 1 == self.updateSectionReq(section):\n exit(1)\n # combined elements\n dat['URL'] = req['URL']\n dat['request-line'] = req['request-line']\n dat['request-body'] = req['request-body']\n # host\n m = r'Host\\s*:\\s*([^\\s]*)'\n for index, header in enumerate(dat['request-headers']):\n res = re.findall(m, header)\n if len(res) > 0:\n dat['request-headers'][index] = 'Host: ' + req['host']\n return True", "def ReadConfigFileSection( config, section ):\n dict1 = {}\n dict1['config'] = section\n options = config.options(section)\n for option in options:\n try:\n dict1[option] = config.get(section, option)\n except:\n print >> sys.stderr, (\"Exception on %s!\" % option)\n dict1[option] = None\n return dict1", "def build_response_dict(self):\n return {\n \"release\": self.settings['bookstore'][\"release\"],\n \"features\": self.settings['bookstore'][\"features\"],\n }", "def post(url, data=None, **_):\n # Checks input parameters\n assert '/configuration' in url\n if has_src:\n stream = data.fields['datafile'][1]\n stream.seek(0)\n assert stream.read() == file_content\n else:\n assert 'datafile' not in data.fields\n excepted = deepcopy(client._configuration_parameters)\n excepted['app']['reset'] = True\n excepted['app']['reload'] = True\n excepted['env']['apyfal_version'] = apyfal.__version__\n assert json.loads(data.fields['parameters']) == excepted\n\n # Returns fake response\n response = requests.Response()\n response._content = response_json\n response.status_code = 200\n return response", "def GET(self, req):\r\n\r\n max_keys = req.get_validated_param('max-keys', CONF.max_corerule_listing)\r\n # TODO: Separate max_corerule_listing and default_corerule_listing\r\n max_keys = min(max_keys, CONF.max_corerule_listing)\r\n resp = req.get_response(self.app)\r\n\tif 'x-oss-meta-access-control-allow-origin' not in resp.headers:\r\n\t raise NoSuchCORSConfiguration()\r\n allowed_origins=resp.headers['x-oss-meta-access-control-allow-origin']\r\n allowed_headers=resp.headers['x-oss-meta-access-control-allow-headers']\r\n allowed_methods=resp.headers['x-oss-meta-access-control-allow-methods']\r\n expose_headers=resp.headers['x-oss-meta-access-control-expose-headers']\r\n max_age_seconds=resp.headers['x-oss-meta-access-control-max-age']\r\n elem = Element('CORSConfiguration')\r\n rule_node =SubElement(elem, 'CORSRule')\r\n if rule_node is None and rule_node =='':\r\n raise NoSuchCORSConfiguration\r\n _add_node_list(rule_node, 'AllowedOrigin', _str_list(allowed_origins))\r\n _add_node_list(rule_node, 'AllowedMethod', _str_list(allowed_methods))\r\n _add_node_list(rule_node, 'AllowedHeader', _str_list(allowed_headers))\r\n _add_node_list(rule_node, 'ExposeHeader', _str_list(expose_headers))\r\n if max_age_seconds is not None:\r\n _add_text_child(rule_node, 'MaxAgeSeconds', str(max_age_seconds))\r\n body = tostring(elem)\r\n\r\n return HTTPOk(body=body, content_type='application/xml')", "def _read_section_config(self, req, section_name, default_values, custom_options = None):\n def _assemble_option(option_name, stored_value):\n option = self._gather_option_data(req, section_name, option_name, section_default_values)\n stored_value = self._convert_value(stored_value, option['option_info'])\n\n does_exist, value = self._get_session_value(req, section_name, option_name)\n if does_exist:\n option['value'] = value\n else:\n option['value'] = stored_value\n \n option['stored_value'] = stored_value\n return option\n \n options = {}\n section_default_values = default_values.get(section_name, None)\n\n for option_name, stored_value in self.config.options(section_name):\n options[option_name] = _assemble_option(option_name, stored_value)\n \n if custom_options is None:\n custom_options = self._get_session_custom_options(req, section_name)\n \n if section_name in custom_options:\n for option_name in custom_options[section_name].keys():\n if option_name in options:\n continue\n \n options[option_name] = _assemble_option(option_name, None)\n \n return options", "def print_ofpt_get_config_request(msg):\n pass", "def get_config():\n\n return json.loads(CONFIG_FILE.read_text())", "def config(self) -> pulumi.Output['outputs.ConfigResponse']:\n return pulumi.get(self, \"config\")", "def test_api_sections(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load sections from url specified in api base\n r = requests.get(r['sections']).json()\n self.assertIn('count', r)\n self.assertIn('next', r)\n self.assertIn('prev', r)\n self.assertIn('sections', r)\n section = r['sections'][0]\n self.assertIn('html', section)\n self.assertIn('url', section)\n self.assertIn('id', section)", "def endpoint_config(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s' % endpoint_name, 'GET')\n return body", "def backend_configs(collection, doc_id=None):\n\n print (\"\")\n log_app.debug(\"config app route\")\n log_app.debug(\"config app route / method : %s\", request.method )\n log_app.debug(\"config app route / collection : %s\", collection )\n log_app.debug(\"config app route / doc_id : %s\", doc_id )\n\n ### target right config collection\n allowedCollections = [\"global\" , \"footer\", \"navbar\", \"tabs\", \"endpoints\" , \"styles\" , \"routes\", \"socials\" ]\n if collection in allowedCollections :\n mongoColl = mongoConfigColls[collection] ### imported from . (and from there from .api.__init__ )\n else :\n log_app.warning(\"error : -%s- is not a valid config collection (redirect)\", collection)\n return redirect( \"/error/400\" )\n\n ### get request args if any\n apiviz_uuid = request.args.get('uuid', default=\"\", type=str)\n log_app.debug(\"config app route / apiviz_uuid : %s\", apiviz_uuid )\n\n ### is_log_route expected as booelan\n is_log_route_raw = request.args.get('log_route', default=\"false\", type=str)\n is_log_route = formatEnvVar(is_log_route_raw, format_type='boolean', is_arg=True)\n log_app.debug(\"config app route / is_log_route : %s\", is_log_route )\n\n ### get request payload (json) if any\n req_json = request.get_json()\n log_app.debug(\"config app route / req_json : \\n%s\", pformat(req_json) )\n\n\n ### check if uuid is authorized\n\n apiviz_front_auth_mode = request.args.get('auth_mode', default=None, type=str)\n ### retrieve access token \n token = request.args.get('token', default='', type=str)\n if req_json : \n # overide token from args with token from payload if any\n token = req_json.get('token', '')\n log_app.debug(\"config app route / token : %s\", token )\n\n uuid_auth = checkUuidAuth(apiviz_uuid, apiviz_front_auth_mode, user_token=token, is_log_route=is_log_route)\n log_app.debug(\"config app route / uuid_auth : %s\", uuid_auth )\n \n ### build basic query\n query = {'apiviz_front_uuid' : apiviz_uuid}\n\n if uuid_auth : \n\n field \t= request.args.get('field', default='field', type=str)\n\n # as_list = request.args.get('as_list', default=False, type=bool)\n # log_app.debug(\"config app route / as_list : %s\", as_list )\n as_list_raw = request.args.get('as_list', default=\"False\", type=str)\n as_list = formatEnvVar(as_list_raw, format_type='boolean', is_arg=True)\n log_app.debug(\"config app route / as_list : %s\", as_list )\n\n # role_to_check = request.args.get('role', default='admin', type=str)\n roles_to_check = COLLECTIONS_AUTH_MODIFICATIONS[collection][request.method]\n log_app.debug(\"config app route / roles_to_check : %s\", roles_to_check )\n\n\n ### example of access token :\n # eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpYXQiOjE1NTcwODI3OTQsIm5iZiI6MTU1NzA4Mjc5NCwianRpIjoiNjA4YWRhMDktMzA4My00ZmE1LTg1NDMtNjRkNDJmM2E4ZmZhIiwiZXhwIjoxNTU3MTI1OTk0LCJpZGVudGl0eSI6IjVjY2YzMmExODYyNmEwM2MzNmY1MzYzNCIsImZyZXNoIjpmYWxzZSwidHlwZSI6ImFjY2VzcyIsInVzZXJfY2xhaW1zIjp7Il9pZCI6IjVjY2YzMmExODYyNmEwM2MzNmY1MzYzNCIsImluZm9zIjp7Im5hbWUiOiJFbGlub3IiLCJzdXJuYW1lIjoiT3N0cm9tIiwiZW1haWwiOiJlbGlub3Iub3N0cm9tQGVtYWlsbmEuY28iLCJwc2V1ZG8iOiJBbm9ueW1vdXMgVXNlciJ9LCJhdXRoIjp7InJvbGUiOiJndWVzdCIsImNvbmZfdXNyIjpmYWxzZX0sInByb2ZpbGUiOnsibGFuZyI6ImVuIiwiYWdyZWVtZW50IjpmYWxzZSwidXNyX3ZpZXciOiJtaW5pbWFsIiwidXNyX3Byb2ZpbGVzIjpbXX19fQ.Iux2Grzvv-6VBXzKME5ub31iLtl-LHYea_0JSdQ22eM\n\n ### filter out field arg to unique identifiers fields in documents\n if field not in ['_id', 'field'] :\n field = 'field'\n\n ### precising query\n if doc_id :\n query[\"_id\"] = ObjectId(doc_id)\n if is_log_route and collection == 'endpoints' : \n query[\"data_type\"] = \"user\"\n\n log_app.debug(\"config app route / query : \\n%s\", query )\n\n if request.method != 'GET':\n\n if request.method == 'POST':\n\n log_app.debug(\"config app route / POST\" )\n\n query[\"_id\"] = ObjectId(req_json['doc_id']) \n log_app.debug(\"config app route / POST / query : \\n%s\", query )\n\n ### retrieve original document\n configDoc = mongoColl.find_one(query)\n log_app.debug(\"config app route / posT / configDoc : \\n%s\", pformat(configDoc) )\n \n auth_mode = req_json.get('auth_mode', None)\n is_authorized = checkJWT(token, roles_to_check, uuid=apiviz_uuid, auth_mode=auth_mode)\n\n if is_authorized and configDoc is not None :\n\n ### retrieve editionn config \n doc_config = req_json['doc_config']\n doc_data = req_json['doc_data']\n log_app.debug(\"config app route / posT / doc_config : \\n%s\", pformat(doc_config) )\n \n ### not editable fields\n notAllowedFields = ['_id', 'apiviz_front_uuid', 'app_version', 'is_default']\n\n ### check if need for nested field update / f.i. navbar links\n editSubfield = False\n if doc_config['type'] == 'blocs_list' : \n editSubfield = req_json['doc_subfield'].split('.')\n\n ### config edit specifics\n canAddKey = doc_config.get('canAddKeys', False) \n canAddToList = doc_config.get('canAddToList', False) \n canModifyKey = doc_config.get('canModifKeys', False) \n\n ### target fields to update\n print() \n update_query = {'$set' : {} }\n for k, v in doc_data.items() :\n # log_app.debug(\"config app route / posT / k:v : \\n%s\", pformat({k:v}) )\n # directly update field : for type == blocs || docs_list\n if canAddKey == False :\n if k not in notAllowedFields and k in [*configDoc] : \n update_query['$set'][k] = v\n\n if canAddKey == False :\n if k not in notAllowedFields : \n update_query['$set'][k] = v\n # print() \n\n ### update version\n update_query['$set']['app_version'] = version\n log_app.debug(\"config app route / posT / update_query : \\n%s\", pformat(update_query) )\n\n ### save updated doc\n mongoColl.update_one(query, update_query)\n\n ### get back doc as updated\n updatedDoc = mongoColl.find_one(query)\n # log_app.debug(\"config app route / posT / updatedDoc : \\n%s\", pformat(updatedDoc) )\n\n formatedUpdatedConfig = DocOidToString(updatedDoc)\n # log_app.debug(\"config app route / posT / DocOidToString(updatedDoc) : \\n%s\", pformat( formatedUpdatedConfig ))\n # return \"hello config master / POST ... praise be\"\n return jsonify({\n 'msg' : \"the doc was updated\",\n 'query' : DocOidToString(query),\n 'doc_updated' : formatedUpdatedConfig,\n 'request' : req_json,\n })\n\n elif configDoc is None :\n return jsonify({ \n \"msg\" : \"noooope... can't find doc dammit....\",\n 'query' : DocOidToString(query),\n 'request' : req_json,\n })\n\n else :\n return jsonify({ \n \"msg\" : \"noooope... you can't edit this ... mate\",\n 'query' : DocOidToString(query),\n 'request' : req_json,\n })\n\n\n elif request.method == 'DELETE':\n\n log_app.debug(\"config app route / DELETE\" )\n\n allowedCollsForDelete = [ \"endpoints\" , \"routes\" ]\n\n ### retrieve token from request and check it \n req_data = json.loads(request.data)\n log_app.debug(\"config app route / req_data : \\n%s\", pformat(req_data) )\n token = req_data.get('token', '')\n auth_mode = req_data.get('auth_mode', None)\n is_authorized = checkJWT(token, roles_to_check, uuid=apiviz_uuid, auth_mode=auth_mode)\n\n if is_authorized and collection in allowedCollsForDelete :\n\n ### retrieve doc to delete to add to returned message\n configDoc = mongoColl.find_one(query)\n deletedDoc = DocOidToString(configDoc)\n\n ### delete doc \n mongoColl.delete_one(query)\n\n return jsonify({\n 'msg' : 'this doc was deleted',\n 'query' : DocOidToString(query),\n 'request' : req_json,\n 'deleted_doc' : deletedDoc\n })\n\n else :\n return jsonify({ \n 'msg' : \"noooope... not authorized to delete this ... mate ...\",\n 'query' : DocOidToString(query),\n 'request' : req_json,\n })\n\n\n elif request.method == 'GET':\n\n app_config_dict = getDocuments(mongoColl, query=query, as_list=as_list, field=field)\n\n return jsonify( {\n \"msg\" \t\t\t\t: \"this is the results from your query on the '%s' config collection\" % collection,\n \"query\"\t\t\t\t: query,\n \"request\"\t\t\t: {\n \"url\" \t\t\t\t: request.url,\n \"args\" \t\t\t\t: request.args,\n \"method\"\t\t\t: request.method,\n \"collection\"\t: collection,\n \"doc_id\"\t\t\t: doc_id,\n },\n \"app_config\" \t: app_config_dict\n } )\n\n else : \n ### uuid is not authorized\n return jsonify({ \n \"msg\" : \"this uuid is not authorized, please contact Apiviz team to unlock it\",\n \"query\"\t\t\t\t: query,\n \"request\"\t\t\t: {\n \"url\" \t\t\t\t: request.url,\n \"args\" \t\t\t\t: request.args,\n \"method\"\t\t\t: request.method,\n },\n })", "def getCampaignConfig(docName, url=reqmgr_url):\n headers = {\"Content-type\": \"application/json\", \"Accept\": \"application/json\"}\n conn = make_x509_conn(url)\n url = '/reqmgr2/data/campaignconfig/%s' % docName\n conn.request(\"GET\", url, headers=headers)\n r2 = conn.getresponse()\n data = json.loads(r2.read())\n return data['result']", "def perform_http_get(self, system_name, variable_config=None,\n request_id=None, url=None,\n params=None, expected_response=None,\n headers=None, user=None, password=None,\n allow_redirects=None, timeout=None,\n json=None, cookies=None, files=None, proxies=None,\n verify=None, stream=None, cert=None, var_sub=None):\n arguments = {'system_name': system_name, 'variable_config': variable_config,\n 'request_id': request_id, 'url': url, 'params': params,\n 'expected_response': expected_response, 'headers': headers,\n 'user': user, 'password': password, 'allow_redirects': allow_redirects,\n 'timeout': timeout, 'json': json, 'cookies': cookies,\n 'files': files, 'proxies': proxies, 'verify': verify,\n 'stream': stream, 'cert': cert, 'var_sub': var_sub}\n wdesc = \"Perform a http get to the url\"\n pSubStep(wdesc)\n pNote(system_name)\n output_dict = {}\n result = True\n\n for element in arguments:\n if element in [\"json\", \"data\", \"variable_config\"] and \\\n arguments[element] and arguments[element] is not None:\n arguments[element] = Utils.rest_Utils.\\\n check_ext_get_abspath(arguments[element], self.tc_path)\n\n credentials = Utils.data_Utils.\\\n get_user_specified_tag_values_in_tc(self.datafile, **arguments)\n\n if credentials[\"variable_config\"] and \\\n credentials[\"variable_config\"] is not None:\n credentials[\"variable_config\"] = Utils.rest_Utils.\\\n check_ext_get_abspath(credentials[\"variable_config\"],\n os.path.dirname(self.datafile))\n\n for element in credentials:\n credentials = Utils.rest_Utils.\\\n resolve_credentials_for_rest(credentials, element,\n self.datafile, system_name)\n credentials[\"auth\"] = (credentials['user'], credentials['password'])\n\n credentials, popped_args = Utils.rest_Utils.\\\n remove_invalid_req_args(credentials, [\"user\", \"password\",\n \"request_id\",\n \"variable_config\",\n \"var_sub\", \"json\"])\n\n pNote(\"url is: {0}\".format(credentials['url']))\n for i in range(0, len(popped_args[\"json\"])):\n if popped_args[\"json\"][i] != \"Error\":\n credentials[\"json\"] = popped_args[\"json\"][i]\n for key in credentials:\n pNote(\"Sending argument '{0}': {1}\"\n .format(key, credentials[key]))\n status, api_response = self.rest_object.get(**credentials)\n result = result and status\n output_dict.update(self.rest_object.\n update_output_dict(system_name,\n api_response, request_id, status, i+1))\n else:\n pNote(\"Request not sent.\", \"error\")\n status = False\n result = result and status\n if result:\n msg = \"http get successful\"\n else:\n msg = \"http get failed\"\n pNote(msg)\n report_substep_status(result)\n return result, output_dict", "def requested_config_vals():\n return {'transfer_stats_per_file':'opt'}", "def processGetConfig(self, msg):\r\n resp = MsgHelper.createResponse(Messages.RSP_GET_CONFIG, msg)\r\n resp[RunInto] = self.runInto\r\n resp[ExecDelay] = self.execDelay\r\n resp[ByStep] = self.stepByStep\r\n return resp", "def get_sections(self, request: SectionSrvRequest) -> SectionSrvResponse:\n response = SectionSrvResponse()\n response.sections = self.groundtruth.get_section_msgs()\n rospy.logdebug(f\"Answering section request {response.sections}\")\n return response", "def _short_response(self, request): # pylint: disable=no-self-use\n # @TODO: Should check reqeust header using custom header for setting override\n # @TODO: revisit this approach, still performance imppact in serialization of uneeded data\n return settings.POST_PUT_REQUEST_SHORT_RESPONSE", "def fusion_api_email_config(self, body, api=None, headers=None):\n param = \"/email-config\"\n return self.email.post(body, api, headers, param)", "def get(self, request, cluster_id, service_id): # pylint: disable=arguments-differ\n obj = get_obj_conf(cluster_id, service_id)\n cl = self.get_queryset().filter(obj_ref=obj.config).order_by('-id')\n serializer = self.serializer_class(cl, many=True, context={'request': request})\n return Response(serializer.data)", "def format_response_for_docs(self, response, case):\n parsed = self.parse_response(response, case)\n formatted = {\n 'request': {'body': parsed['request']['body']},\n 'response': {'body': parsed['response']['body']}\n }\n\n request = parsed['request']\n headers = request['request_line']\n if request['headers']:\n headers += '\\n' + '\\n'.join(\n '%s: %s' % pair for pair in request['headers'].items())\n formatted['request']['headers'] = headers\n\n response = parsed['response']\n headers = response['response_line']\n if response['headers']:\n headers += '\\n' + '\\n'.join(\n '%s: %s' % pair for pair in response['headers'].items())\n formatted['response']['headers'] = headers\n\n for phase in ('request', 'response'):\n for part in ('headers', 'body'):\n if (formatted[phase][part] and\n not formatted[phase][part].endswith('\\n')):\n formatted[phase][part] += '\\n'\n\n return formatted", "def test_request(self):\n self.processor.request_values(\"\", read_file=\"test_request.txt\", year=2016, style=\"ololo\")\n self.assertEqual(self.processor.values_list(),\n [[\"Australia\", 34], [\"Congo\", 9], [\"Ukraine\", 37]])", "def read_config_dict(config_data_dict):\n global template_test_file\n global test_interface_template_file\n global test_variable_template_file\n global report_expression_template_file\n global variable_name_in_template\n global variable_original_name_in_template\n global variable_default_value_in_template\n global test_path\n\n global api_url\n global api_1_0_url\n global bitbucket_repository_url\n global default_domain\n\n global sql_server\n global db_name\n\n global exec_server_address\n global exec_server_username\n global exec_server_password\n global exec_server_working_directory\n global robot_tests_directory\n global archive_output_directory\n global local_working_directory\n\n global cloudshell_server_address\n global cloudshell_server_port\n global cloudshell_server_username\n global cloudshell_server_password\n global cloudshell_server_domain\n global cloudshell_shared_robots_folder\n\n if 'template_test_file' in config_data_dict:\n template_test_file = config_data_dict['template_test_file']\n if 'test_interface_template' in config_data_dict:\n test_interface_template_file = config_data_dict['test_interface_template']\n if 'test_variable_template' in config_data_dict:\n test_variable_template_file = config_data_dict['test_variable_template']\n if 'report_expression_template' in config_data_dict:\n report_expression_template_file = config_data_dict['report_expression_template']\n if 'variable_name_in_template' in config_data_dict:\n variable_name_in_template = config_data_dict['variable_name_in_template']\n variable_original_name_in_template = variable_name_in_template + '_Original'\n if 'variable_default_value_in_template' in config_data_dict:\n variable_default_value_in_template = config_data_dict['variable_default_value_in_template']\n if 'test_path' in config_data_dict:\n test_path = config_data_dict['test_path']\n if not test_path.endswith('\\\\'):\n test_path += '\\\\'\n\n if 'api_url' in config_data_dict:\n api_url = config_data_dict['api_url']\n if 'api_1_0_url' in config_data_dict:\n api_1_0_url = config_data_dict['api_1_0_url']\n if 'bitbucket_repository_url' in config_data_dict:\n bitbucket_repository_url = config_data_dict['bitbucket_repository_url']\n if 'default_domain' in config_data_dict:\n default_domain = config_data_dict['default_domain']\n\n if 'sql_server' in config_data_dict:\n sql_server = config_data_dict['sql_server']\n if 'db_name' in config_data_dict:\n db_name = config_data_dict['db_name']\n\n if 'exec_server_address' in config_data_dict:\n exec_server_address = config_data_dict['exec_server_address']\n if 'exec_server_username' in config_data_dict:\n exec_server_username = config_data_dict['exec_server_username']\n if 'exec_server_password' in config_data_dict:\n exec_server_password = config_data_dict['exec_server_password']\n if 'exec_server_working_directory' in config_data_dict:\n exec_server_working_directory = config_data_dict['exec_server_working_directory']\n if 'robot_tests_directory' in config_data_dict:\n robot_tests_directory = config_data_dict['robot_tests_directory']\n if 'archive_output_directory' in config_data_dict:\n archive_output_directory = config_data_dict['archive_output_directory']\n if 'local_working_directory' in config_data_dict:\n local_working_directory = config_data_dict['local_working_directory']\n\n if 'cloudshell_server_address' in config_data_dict:\n cloudshell_server_address = config_data_dict['cloudshell_server_address']\n if 'cloudshell_server_port' in config_data_dict:\n cloudshell_server_port = config_data_dict['cloudshell_server_port']\n if 'cloudshell_server_username' in config_data_dict:\n cloudshell_server_username = config_data_dict['cloudshell_server_username']\n if 'cloudshell_server_password' in config_data_dict:\n cloudshell_server_password = config_data_dict['cloudshell_server_password']\n if 'cloudshell_server_domain' in config_data_dict:\n cloudshell_server_domain = config_data_dict['cloudshell_server_domain']\n if 'cloudshell_shared_robots_folder' in config_data_dict:\n cloudshell_shared_robots_folder = config_data_dict['cloudshell_shared_robots_folder']", "def do_config(self, request):\n try:\n config_file = request['file']\n except KeyError:\n config_file = None\n self._target.load_device_config(request['device'], config_file)\n return None", "def get_data(self, section, option_name):\n data = None\n\n #Check if file exists to prevent exceptions trying to reach it.\n if self.file[\"exists\"]:\n log.debug(\"File exists, setting up ConfigParser.\")\n config = ConfigParser()\n config.read(self.file[\"file_name\"])\n\n #Check if the section exists.\n if config.has_section(section):\n log.debug(\"Section '\" + section + \"' found.\")\n #Check if the option exists.\n if config.has_option(section, option_name):\n log.debug(\"Option '\" + option_name + \"' found.\")\n data = config.get(section, option_name)\n log.info(\"Data obtained correctly.\")\n else:\n log.warning(\"Option requested not found!\")\n else:\n log.warning(\"Section requested not found!\")\n else:\n log.error(\"Can't set token because the settings file is missing. Last error: \" +\n self.file[\"error\"])\n return data", "def get_download_params(self, example_id):\n\n config_filename = self.get_local_path(example_id)\n\n parser = SafeConfigParser()\n parser.read(config_filename)\n config_section = dict(parser.items('config')) if 'config' in parser else {}\n other_sections = {key: value for key, value in parser.items() if key != 'config' and key != 'DEFAULT'}\n return config_section, other_sections", "def test_default_default_body_sent_with_request(self):\n req = self.httpbin.get_my_ip(dry_run=True)\n def_body = self.httpbin.client[\"default_data\"]\n self.assertIn(urlencode(def_body), req.prepared_request.body)", "async def getConfigurations(self, ):\n payload = {}\n \n\n # Parameter validation\n schema = CatalogValidator.getConfigurations()\n schema.dump(schema.load(payload))\n \n\n url_with_params = await create_url_with_params(self._conf.domain, f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/product-configuration/\", \"\"\"{\"required\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}],\"optional\":[],\"query\":[],\"headers\":[],\"path\":[{\"in\":\"path\",\"name\":\"company_id\",\"description\":\"A `company_id` is a unique identifier for a particular seller account.\",\"schema\":{\"type\":\"string\"},\"required\":true},{\"in\":\"path\",\"name\":\"application_id\",\"description\":\"A `application_id` is a unique identifier for a particular sale channel.\",\"schema\":{\"type\":\"string\"},\"required\":true}]}\"\"\", )\n query_string = await create_query_string()\n headers = {\n \"Authorization\": \"Bearer \" + await self._conf.getAccessToken()\n }\n for h in self._conf.extraHeaders:\n headers.update(h)\n exclude_headers = []\n for key, val in headers.items():\n if not key.startswith(\"x-fp-\"):\n exclude_headers.append(key)\n return await AiohttpHelper().aiohttp_request(\"GET\", url_with_params, headers=get_headers_with_signature(self._conf.domain, \"get\", await create_url_without_domain(f\"/service/platform/catalog/v1.0/company/{self._conf.companyId}/application/{self.applicationId}/product-configuration/\", ), query_string, headers, \"\", exclude_headers=exclude_headers), data=\"\")", "def load_response(self, category):\n self.response = requests.get(f\"{self.settings.BASE_URL}/{category}\")\n if self.response.status_code == 200:\n self.response_info = self.response.json()", "def getYamlInstructions():\n with open('role_file_template.yaml', 'r') as yamlfile:\n output = yamlfile.read()\n if request.headers['Accept'] == 'application/json':\n return output, 200\n else:\n return render_template(\"output.html\", output=output)", "def get_config():\n return {'address': ADDRESS, 'https': HTTPS == 'https',\n 'password': PASSWORD, 'username': USERNAME,\n 'port': PORT, 'version': VERSION}", "def generate_config():\n\n return {\n \"email_subject\": DEFAULT_EMAIL_SUBJECT,\n \"from_email\": DEFAULT_FROM_EMAIL,\n \"to_email\": DEFAULT_TO_EMAIL,\n \"url\": DEFAULT_URL,\n \"start_value\": DEFAULT_START_VALUE,\n \"look_ahead\": DEFAULT_LOOK_AHEAD,\n \"slide_window\": DEFAULT_SLIDE_WINDOW,\n }", "def test_retrieve_json(self):\n setting_name = 'project_json_setting'\n url = reverse(\n 'projectroles:api_project_setting_retrieve',\n kwargs={'project': self.project.sodar_uuid},\n )\n get_data = {'app_name': EX_APP_NAME, 'setting_name': setting_name}\n response = self.request_knox(url, data=get_data)\n\n self.assertEqual(response.status_code, 200, msg=response.content)\n response_data = json.loads(response.content)\n expected = {\n 'app_name': EX_APP_NAME,\n 'project': str(self.project.sodar_uuid),\n 'user': None,\n 'name': setting_name,\n 'type': 'JSON',\n 'value': self.project_json_setting['value'],\n 'user_modifiable': True,\n }\n self.assertEqual(response_data, expected)", "def _get_config():\n resp = requests.get(TRAEFIK_API_URL)\n if not resp.ok:\n raise Exception(\n \"Bad traefik response: %s %s\" % (resp.status_code, resp.text)\n )\n return resp.json()", "def get_settings(self):\n return self.request({\n \"path\": \"/\" + UUID + \"/setting\"\n })", "def get(self, request, cluster_id, service_id, version): # pylint: disable=arguments-differ\n obj = get_obj_conf(cluster_id, service_id)\n cl = get_config_version(obj.config, version)\n if self.for_ui(request):\n try:\n cl.config = cm.adcm_config.ui_config(obj, cl)\n except AdcmEx as e:\n raise AdcmApiEx(e.code, e.msg, e.http_code)\n serializer = self.serializer_class(cl, context={'request': request})\n return Response(serializer.data)", "def read_config(file_location=None):\n _file_location = './settings.cfg'\n if file_location:\n _file_location = file_location\n config = configparser.ConfigParser()\n try:\n # read file\n config.read(_file_location)\n _return_dict = {\n 'prefix': config.get('SITE_INFO', '_prefix'),\n 'save_file_prefix': config.get('SITE_INFO', '_save_file_prefix'),\n 'test1': config.get('TEST_LINK', '_cno_url'),\n 'test2': config.get('TEST_LINK', '_article_url')\n }\n print()\n return _return_dict\n except configparser.NoSectionError:\n print('[ERROR] No section Error in {}'.format(_file_location))\n return {}", "def service_config():\n global _service_config\n if not _service_config:\n r = requests.get('https://tech.lds.org/mobile/ldstools/config.json')\n r.raise_for_status()\n _service_config = r.json()\n return _service_config", "def test_get_rule_settings(self):\n # Basic passing test\n rule_settings_params = {'agency_code': '097', 'file': 'B'}\n response = self.app.get('/v1/rule_settings/', rule_settings_params, headers={'x-session-id': self.session_id})\n\n self.assertEqual(response.status_code, 200)\n assert {'errors', 'warnings'} <= set(response.json.keys())", "def _get_conf():\n configs = [\"mds_cache_memory_limit\",\n \"mds_cache_reservation\",\n \"mds_health_cache_threshold\"]\n holder = {}\n for config in configs:\n cmd = \"sudo ceph daemon mds.\" \\\n \"$HOSTNAME config show | grep {}\".format(config)\n conf = model.run_on_unit(self.TESTED_UNIT, cmd)\n for i in (conf['Stdout'].replace('\"', '')\n .replace(',', '')\n .strip()\n .split(\"\\n\")):\n key, val = i.split(\":\")\n holder[key] = val.strip()\n return holder", "def discovery_data(request):\n file = request.param\n p = Path(file)\n if not p.is_absolute():\n p = Path(__file__).parent / \"fixtures\" / file\n\n with open(p) as f:\n return json.load(f)", "def case_sections(self, case, response):\n formatted = self.format_response_for_docs(response, case)\n base_path = os.path.join(self.raw_dir, case['name'])\n for phase in ('request', 'response'):\n is_json = (\n 'Content-Type: application/vnd.api+json' in\n formatted[phase].get('headers', ''))\n for section_type in ('headers', 'body'):\n ext = 'json' if (section_type == 'body' and is_json) else 'txt'\n path = base_path + '-%s-%s.%s' % (phase, section_type, ext)\n section = formatted[phase][section_type]\n yield phase, section_type, path, section", "def helper_config(request, def_type):\n pymodbus_apply_logging_config()\n _logger.setLevel(\"DEBUG\")\n datablock = ModbusSequentialDataBlock(0x00, [17] * 100)\n context = ModbusServerContext(\n slaves=ModbusSlaveContext(\n di=datablock, co=datablock, hr=datablock, ir=datablock, unit=1\n ),\n single=True,\n )\n cwd = os.getcwd().split(\"/\")[-1]\n path = \"../examples\" if cwd == \"test\" else \"examples\"\n cfg = {\n \"serial\": {\n \"srv_args\": {\n \"context\": context,\n \"framer\": ModbusRtuFramer,\n \"port\": \"socket://127.0.0.1:5020\",\n },\n \"cli_args\": {\n \"framer\": ModbusRtuFramer,\n \"port\": \"socket://127.0.0.1:5020\",\n \"timeout\": 0.2,\n },\n \"async\": {\n \"srv\": server.StartAsyncSerialServer,\n \"cli\": client.AsyncModbusSerialClient,\n },\n \"sync\": {\n \"srv\": server.StartSerialServer,\n \"cli\": client.ModbusSerialClient,\n },\n },\n \"tcp\": {\n \"srv_args\": {\n \"context\": context,\n \"framer\": ModbusSocketFramer,\n \"address\": (\"127.0.0.1\", 5020),\n \"allow_reuse_address\": True,\n },\n \"cli_args\": {\n \"framer\": ModbusSocketFramer,\n \"host\": \"127.0.0.1\",\n \"port\": 5020,\n \"timeout\": 0.2,\n },\n \"async\": {\n \"srv\": server.StartAsyncTcpServer,\n \"cli\": client.AsyncModbusTcpClient,\n },\n \"sync\": {\n \"srv\": server.StartTcpServer,\n \"cli\": client.ModbusTcpClient,\n },\n },\n \"tls\": {\n \"srv_args\": {\n \"context\": context,\n \"framer\": ModbusTlsFramer,\n \"address\": (\"127.0.0.1\", 5020),\n \"allow_reuse_address\": True,\n \"certfile\": f\"{path}/certificates/pymodbus.crt\",\n \"keyfile\": f\"{path}/certificates/pymodbus.key\",\n },\n \"cli_args\": {\n \"framer\": ModbusTlsFramer,\n \"host\": \"127.0.0.1\",\n \"port\": 5020,\n \"certfile\": f\"{path}/certificates/pymodbus.crt\",\n \"keyfile\": f\"{path}/certificates/pymodbus.key\",\n \"server_hostname\": \"localhost\",\n \"timeout\": 2,\n },\n \"async\": {\n \"srv\": server.StartAsyncTlsServer,\n \"cli\": client.AsyncModbusTlsClient,\n },\n \"sync\": {\n \"srv\": server.StartTlsServer,\n \"cli\": client.ModbusTlsClient,\n },\n },\n \"udp\": {\n \"srv_args\": {\n \"context\": context,\n \"framer\": ModbusSocketFramer,\n \"address\": (\"127.0.0.1\", 5020),\n },\n \"cli_args\": {\n \"framer\": ModbusSocketFramer,\n \"host\": \"127.0.0.1\",\n \"port\": 5020,\n \"timeout\": 0.2,\n },\n \"async\": {\n \"srv\": server.StartAsyncUdpServer,\n \"cli\": client.AsyncModbusUdpClient,\n },\n \"sync\": {\n \"srv\": server.StartUdpServer,\n \"cli\": client.ModbusUdpClient,\n },\n },\n }\n\n cur = cfg[request]\n cur_m = cur[def_type]\n return cur_m[\"srv\"], cur[\"srv_args\"], cur_m[\"cli\"], cur[\"cli_args\"]", "def get_section_config_ini(self, cfile, section, dict_format=False):\r\n\r\n config = self.get_config_ini(cfile)\r\n if dict_format:\r\n # Retorno um dicionario\r\n return dict(config.items(section.upper()))\r\n else:\r\n # Retorna um objeto config\r\n return config.items(section.upper())", "def configuration():", "def load_config(self, config_file, usage):\n config = configparser.ConfigParser()\n config.read(config_file)\n auth_id = config.get('SMARTY STREETS', 'auth_id' )\n auth_token = config.get('SMARTY STREETS', 'auth_token')\n api_credentials = StaticCredentials(auth_id, auth_token)\n client_builder = ClientBuilder(api_credentials)\n if usage == 'batch': \n client_builder.with_custom_header( {'Connection':'keep-alive'} )\n \n self.client = client_builder.build_us_street_api_client()", "def get_fields(request):\n\n json_resp = {}\n json_resp['fields'] = []\n json_resp['fields_to_ann'] = []\n all = request.GET.get('all',None)\n workpath = os.path.dirname(os.path.abspath(__file__)) # Returns the Path your .py file is in\n auto_request = request.GET.get('ns_id', None)\n report = request.GET.get('report', None)\n # print(request.session['report_type'])\n if report is not None or all == 'all':\n if report is not None:\n if report.startswith('PUBMED_'):\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n json_resp = get_fields_from_json()\n if all == 'all':\n # All the possible fields for every usecase (MANUAL CONFIGURATION)\n json_resp = get_fields_from_json()\n if Report.objects.filter(institute = 'PUBMED').exists():\n json_resp['all_fields'].extend(['title','abstract','volume','journal','year','authors']) #aggiungo pubmed solo in coda!\n else:\n if request.session['report_type'] == 'pubmed':\n json_resp['fields'] = ['volume','authors','year','journal']\n json_resp['fields_to_ann'] = ['title','abstract']\n else:\n # Fileds related exclusively to a usecase\n json_resp = get_fields_from_json_configuration(request.session['usecase'],request.session['institute'],request.session['language'])\n if request.session['mode'] == 'Robot' or auto_request == 'Robot':\n with open(os.path.join(workpath, './automatic_annotation/auto_fields/auto_fields.json')) as out:\n data = json.load(out)\n json_resp['fields_to_ann'] = data['extract_fields'][request.session['usecase']]\n for el in json_resp['fields_to_ann']:\n if el in json_resp['fields']:\n json_resp['fields'].remove(el)\n # print('FIELDS', json_resp)\n return JsonResponse(json_resp)" ]
[ "0.6424139", "0.610865", "0.6059112", "0.5978963", "0.5865252", "0.577705", "0.56624496", "0.55671096", "0.5566648", "0.55476516", "0.5478806", "0.54480094", "0.5438941", "0.5410219", "0.54032665", "0.5333807", "0.5330522", "0.53133196", "0.5285215", "0.5283924", "0.5283706", "0.5277842", "0.52734745", "0.5241377", "0.52363706", "0.5236073", "0.5196588", "0.51946366", "0.5180977", "0.5177833", "0.5177188", "0.51687914", "0.5166722", "0.51513016", "0.5136007", "0.51168996", "0.51131403", "0.51076263", "0.5106055", "0.5105266", "0.50959516", "0.50909555", "0.5088023", "0.5080207", "0.5064092", "0.5059339", "0.5052521", "0.5045766", "0.5045112", "0.5034787", "0.5026183", "0.50256395", "0.5019637", "0.50189924", "0.5006026", "0.5005644", "0.5003835", "0.49996504", "0.49986246", "0.499205", "0.49813455", "0.49769983", "0.49489018", "0.49453446", "0.49435484", "0.49235347", "0.49079055", "0.49044916", "0.49039528", "0.48997158", "0.48995143", "0.4894174", "0.48888323", "0.48852557", "0.48813584", "0.48791966", "0.48782203", "0.48778743", "0.4876095", "0.4875612", "0.48639184", "0.48618498", "0.48573714", "0.48531294", "0.48454672", "0.48452953", "0.4845116", "0.4840872", "0.48401916", "0.4831333", "0.48261324", "0.4825273", "0.48216358", "0.48189408", "0.48054788", "0.4795581", "0.47925475", "0.4792324", "0.47914013", "0.47870058" ]
0.5230246
26
read the maps and get the set of unique rs
def getLCD(lbase=[]): listmf = [] rsdict = {} for i,basename in enumerate(lbase): # for each basename to be included mf = file('%s.map' % basename,'r').readlines() lmap = [x.strip().split() for x in mf] rslist = [x[1] for x in lmap] # chrom rs gendist physdist for x in lmap: rsdict[x[1]] = (x[0],int(x[3]),x[1]) # key by chrom,offset,rs setrs = set(rslist) listmf.append(setrs) # list of map lines for processing lcd = listmf.pop(0) # start with first - order doesn't matter for setrs in listmf: lcd = lcd & setrs # intersection lcd = list(lcd) # now have lowest common denom as a list of rs lcdmap = [rsdict[rs] for rs in lcd] # restore chrom,offset,rs for rs to keep lcdmap.sort() # now in genomic order print 'got lcdmap=',lcdmap[:10] return lcdmap # sorted common map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getSet(unique_name):", "def getSet(unique_name):", "def getSets(unique_name=None):", "def _findUniqueMappingValues(mapping):\n uniqueMappingValues = set()\n for entries in viewvalues(mapping):\n if len(entries) == 1:\n uniqueMappingValues.update(entries)\n return uniqueMappingValues", "def components(self) -> Iterable[Mapping[T, Set[T]]]:", "def uniqueResults( self, results ):\n rid_map = {}\n for r in results:\n rid_map[r.getRID()] = r\n return rid_map.values()", "def _findUniqueMappingKeys(mapping):\n\n uniqueMappingKeys = set()\n for key, entries in viewitems(mapping):\n if len(entries) == 1:\n uniqueMappingKeys.add(key)\n return uniqueMappingKeys", "def find_unique_elements(molecule_map):\n atoms = []\n for molec_name in molecule_map.keys():\n atoms += [subst['atom'] for subst in molecule_map[molec_name]]\n return set(atoms)", "def filter_otus_from_otu_map(input_otu_map_fp,\r\n output_otu_map_fp,\r\n min_count,\r\n min_sample_count=1):\r\n results = set()\r\n output_otu_map_f = open(output_otu_map_fp, 'w')\r\n for line in open(input_otu_map_fp, 'U'):\r\n fields = line.strip().split('\\t')\r\n sample_ids = set([e.split('_')[0] for e in fields[1:]])\r\n # only write this line if the otu has more than n sequences (so\r\n # greater than n tab-separated fields including the otu identifier)\r\n if (len(fields) > min_count) and (len(sample_ids) >= min_sample_count):\r\n output_otu_map_f.write(line)\r\n results.add(fields[0].split('\\t')[0])\r\n output_otu_map_f.close()\r\n return results", "def unique(self):\n # variables for uniques \n self._currentSet = 1\n self._uniqueValue = {}\n\n pd = self._dataTable\n for col in pd:\n arr = pd[col].unique()\n for i in arr:\n unique_entry = ((col,i),)\n self._uniqueValue[unique_entry] = 0 \n\n self._sets[self._currentSet] = self._uniqueValue", "def getSets():", "def keySet (map):\n ltset = lt.newList()\n for pos in range(lt.size(map['table'])):\n entry = lt.getElement (map['table'], pos+1)\n if (entry['key']!=None and entry['key']!='__EMPTY__'):\n lt.addLast (ltset, entry['key'])\n return ltset", "def fastLoad(f_list):\n\n data_list = []\n t_1 = datetime.now()\n for i, f in enumerate(f_list):\n t_data = loadFile(f)\n data_list.extend(t_data)\n data_list = [dict(r) for r in set([tuple(d.items()) for d in data_list])]\n print i, datetime.now() - t_1, \"removing duplicates...\"\n print \"Done removing duplicates.\"\n return data_list", "def get_available_temporal_mapsets():\n global c_library_interface\n global message_interface\n\n mapsets = c_library_interface.available_mapsets()\n \n tgis_mapsets = {}\n\n for mapset in mapsets:\n mapset = mapset\n driver = c_library_interface.get_driver_name(mapset)\n database = c_library_interface.get_database_name(mapset)\n\n message_interface.debug(1, \"get_available_temporal_mapsets: \"\\\n \"\\n mapset %s\\n driver %s\\n database %s\"%(mapset,\n driver, database))\n if driver and database:\n # Check if the temporal sqlite database exists\n # We need to set non-existing databases in case the mapset is the current mapset\n # to create it\n if (driver == \"sqlite\" and os.path.exists(database)) or mapset == get_current_mapset() :\n tgis_mapsets[mapset] = (driver, database)\n\n # We need to warn if the connection is defined but the database does not\n # exists\n if driver == \"sqlite\" and not os.path.exists(database):\n message_interface.warning(\"Temporal database connection defined as:\\n\" + \\\n database + \"\\nBut database file does not exist.\")\n return tgis_mapsets", "def _mappingGetValueSet(mapping, keys):\n setUnion = set()\n for k in keys:\n setUnion = setUnion.union(mapping[k])\n return setUnion", "def mapper_get_items_init(self):\n if int(self.options.iteration) > 1:\n with open(self.options.f, 'r') as fh:\n self.frequent_items = set(fh.read().splitlines())\n else:\n self.frequent_items = {}", "def _GetStudyUIDMaps(has_study_uid=None):\n\n # Download UIDs for breast density 2 and 3.\n http = httplib2.Http(timeout=60, disable_ssl_certificate_validation=True)\n study_uid_to_series_uid = {}\n study_uid_to_label = {}\n for path in _LABEL_PATHS:\n resp, content = http.request(path, method=\"GET\")\n assert resp.status == 200, \"Failed to download label files from: \" + path\n r = csv.reader(content.decode(\"utf-8\").splitlines(), delimiter=\",\")\n header = next(r)\n breast_density_column = -1\n image_file_path_column = -1\n for idx, h in enumerate(header):\n if h in _BREAST_DENSITY_COLUMN:\n breast_density_column = idx\n if h in _IMAGE_FILE_PATH_COLUMN:\n image_file_path_column = idx\n assert breast_density_column != -1, \"breast_density column not found\"\n assert image_file_path_column != -1, \"image file path column not found\"\n for row in r:\n density = row[breast_density_column]\n if density != \"2\" and density != \"3\":\n continue\n dicom_uids = row[image_file_path_column].split(\"/\")\n study_instance_uid, series_instance_uid = dicom_uids[1], dicom_uids[2]\n if study_instance_uid in _EXCLUDED_STUDY_UIDS:\n continue\n if has_study_uid and has_study_uid != study_instance_uid:\n continue\n study_uid_to_series_uid[study_instance_uid] = series_instance_uid\n study_uid_to_label[study_instance_uid] = density\n return study_uid_to_series_uid, study_uid_to_label", "def load_srumid_lookups(database):\n id_lookup = {}\n #Note columns 0 = Type, 1 = Index, 2 = Value\n lookup_table = database.get_table_by_name('SruDbIdMapTable')\n column_lookup = dict([(x.name,index) for index,x in enumerate(lookup_table.columns)]) \n for rec_entry_num in range(lookup_table.number_of_records):\n bin_blob = smart_retrieve(lookup_table,rec_entry_num, column_lookup['IdBlob'])\n if smart_retrieve(lookup_table,rec_entry_num, column_lookup['IdType'])==3:\n bin_blob = BinarySIDtoStringSID(bin_blob)\n elif not bin_blob == \"Empty\":\n bin_blob = blob_to_string(bin_blob)\n id_lookup[smart_retrieve(lookup_table,rec_entry_num, column_lookup['IdIndex'])] = bin_blob\n return id_lookup", "def _get_unique_genres(connection):\n print('---Getting unique genres---')\n genreDict = {}\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM shared_genres;\")\n res = cursor.fetchall()\n num_genres = 0\n for genre in res:\n if genre[1] not in genreDict:\n genreDict[genre[1]] = num_genres\n num_genres += 1\n return genreDict", "def download_all_maps(self):\n return self._download_all_maps_recur()", "def getObjectMaps(self,toMod):\n if self.objectMaps == None: self.loadObjectMaps()\n subset = {}\n for key in self.objectMaps.keys():\n if key[1] == toMod:\n subset[key[0]] = self.objectMaps[key]\n return subset", "def unique_rp(db):\n for rp in sorted(db['rp'].keys()):\n print(rp)", "def valueSet(map):\n ltset = lt.newList()\n for pos in range(lt.size(map['table'])):\n entry = lt.getElement (map['table'], pos+1)\n if (entry['value']!=None and entry['value']!='__EMPTY__'):\n lt.addLast (ltset, entry['value'])\n return ltset", "def ana_merge_senzory_map(datas):\n#TODO: improve senzory map merging\n return iter(datas.viewvalues()).next()['senzory_map']", "def unique_rows(self):\n return list(set([coord[0] for coord in self.landscape]))", "def uniquewords(self):\n vas = set({})\n file = self.read1()\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n if s_i != \"\":\n vas.add(s_i)\n l_i = list(vas)\n self.print(l_i)\n self.write(l_i)\n logging.debug(\"Starting with to\")\n return l_i", "def read_all():\n return [pp_dict[key] for key in sorted(pp_dict.keys())]", "def get_sid_set(sources):\n sid_list = []\n for source_dict in sources:\n sid = source_dict['SID']\n sid_list.append(sid)\n sid_set = set(sid_list)\n\n assert len(sid_set) == len(sid_set), \"Duplicate SID detected\"\n return sid_set", "def load_mapping():\n return [l.strip() for l in open(ALL_URL_LIST)]", "def unique_values(self):\n for key in self.metadb.unique_values():\n yield key, self.datadb[key]", "def keys(self):\n return sorted(self._local_unique_map.keys())", "def load_unique_kmers(n, k):\t\n\thg38, hiv1 = load_kmer_data(k)\n\n\tkmers = set()\n\tif len(hg38)+len(hiv1) < n:\n\t\tprint(\"Not enough sequences! {} < {}!\".format(len(hg38)+len(hiv1), n))\n\telse:\n\t\ti = 0\n\t\twhile len(kmers) < n:\n\t\t\tkmers.add(hg38[i][0].upper())\n\t\t\tif len(kmers) < n:\n\t\t\t\tkmers.add(hiv1[i][0].upper())\n\t\t\ti += 1\n\treturn list(kmers)", "def get_cursor_values(self, keys: Set[str]) -> Mapping[str, str]:", "def record_sets_fetcher(record):\n return record.get(\"_oai\", {}).get(\"sets\", [])", "def init_sets(fastapath):\n st = set()\n with open (fastapath, 'r') as f:\n\n for rec in SeqIO.parse(f, 'fasta'):\n sq = str(rec.seq)\n st.add(sq)\n\n return st", "def createset(inputfile):\n\n movie = set()\n user = set()\n count = 0\n #open and read in data from file\n f = open(inputfile)\n for line in f:\n #adding all unique keys to setMovie\n movie.add(line.split()[1])\n #adding all unique users to setUser\n user.add(line.split()[0])\n count+=1\n f.close()\n return movie,user,count", "def import_nwgc_uuid_map(nwgc_uuid_file):\n nwgc_uuid = pd.read_excel(nwgc_uuid_file,\n usecols=[\"sample\", \"uuid\"],\n dtype={\"sample\": \"str\"}) \\\n .drop_duplicates(subset=\"sample\")\n nwgc_uuid.set_index(\"sample\", inplace=True)\n nwgc_uuid_map = nwgc_uuid.to_dict()[\"uuid\"]\n return nwgc_uuid_map", "def available_maps():\r\n existing_maps = os.listdir(\"maps\")\r\n # remove file extensions\r\n temp = []\r\n for map in existing_maps:\r\n map = re.sub(r\"(.+)\\..+\", r\"\\1\", map)\r\n temp.append(map)\r\n existing_maps = temp\r\n for i, map in enumerate(existing_maps):\r\n print(\"{} - {}\".format(i,map))\r\n return existing_maps", "def final_repset_from_iteration_repsets(repset_fasta_fs):\r\n observed = {}\r\n for repset_fasta_f in repset_fasta_fs:\r\n for otu_id, seq in parse_fasta(repset_fasta_f):\r\n o = otu_id.split()[0]\r\n if not o in observed:\r\n yield (otu_id, seq)\r\n observed[o] = None\r\n else:\r\n # we already have a representative for this otu id\r\n pass", "def tempmap():\n rand_number = [random.randint(0, 9) for i in range(6)]\n rand_number_str = ''.join(map(str, rand_number))\n mapname = 'temp_' + rand_number_str\n maplist = grass.read_command('g.list', type='vector', mapset='.').split()\n while mapname in maplist:\n rand_number = [random.randint(0, 9) for i in range(6)]\n rand_number_str = ''.join(map(str, rand_number))\n mapname = 'temp_' + rand_number_str\n maplist = grass.read_command('g.list', type='vector', mapset='.').split()\n return mapname", "def get_plwn2sumo_dict(self):\n if not os.path.exists(self.resources().mapping_sumo_file()):\n raise IOError(\n \"%s file not found!\" % \\\n self.resources().mapping_sumo_file()\n )\n\n plwn2sumo_dict = defaultdict(set)\n\n with open(self.resources().mapping_sumo_file()) as sumofile:\n next(sumofile)\n for line in sumofile:\n synset_id = int(line.strip().split(';')[0])\n sumo = line.strip().split(';')[-2]\n plwn2sumo_dict[sumo].add(synset_id)\n \n return plwn2sumo_dict", "def missing_mappings(self):\n return [ mapping for mapping in self.mapping_names() if not config.file_in_cache(self.name, self.observatory) ]", "def all_colormaps():\n maps = [name\n for name in cm.datad.keys()\n if not name.endswith(\"_r\")]\n maps.sort()\n return maps", "def scan(self):\n for fn in self.map:\n coords = list(self.map[fn].keys())\n coords.sort()\n for coord in coords:\n yield fn, coord, self.map[fn][coord]", "def pullSerializedAll(*keys):", "def getGIs(fileBlast6,uniquekeep = 1, fileout = \"gis.txt\"):\n mylist = open(fileBlast6,\"r\").readlines()\n querys = {}\n giset = set()\n fout = open(fileout,\"w\")\n for ele in mylist:\n gi = ele.split()[1].split(\"|\")[1]\n query = ele.split()[0]\n if query not in querys:\n querys[query] = 1\n else:\n querys[query] += 1\n if querys[query] <= uniquekeep:\n giset.add(gi)\n for gi in giset:\n fout.write(gi+\"\\n\")\n fout.close()", "def unique_rationales(smiles_list):\n visited = set()\n unique = []\n for smiles in smiles_list:\n mol = Chem.MolFromSmiles(smiles)\n root_atoms = 0\n for atom in mol.GetAtoms():\n if atom.GetAtomMapNum() > 0:\n root_atoms += 1\n atom.SetAtomMapNum(1)\n\n smiles = Chem.MolToSmiles(mol)\n if smiles not in visited and root_atoms > 0:\n visited.add(smiles)\n unique.append(smiles)\n return unique", "def get_resources(self):\n res = set()\n res.update(self.get_inputs())\n res.update(self.get_outputs())\n return res", "def unique_ssh_results(results):\n r = {}\n for k in results:\n r[results[k][0]] = True\n return r.keys()", "def get_dict(cleaned_docs):\n data = []\n for doc in cleaned_docs:\n data += doc\n return list(set(data))", "def get_unique_snps(self):\n\n for chromosome in self.snpsites.keys():\n\n for position in self.snpsites[chromosome].keys():\n for filenumber in range(len(self.vcffilenames)):\n\n if (\n self.snpsites[chromosome][position][filenumber] == True\n and sum(self.snpsites[chromosome][position]) == 1\n ): # First any(array) finds\n self.snp_positions[self.vcffilenames[filenumber]][chromosome][\n position\n ].update({\"unique\": True})\n elif (\n sum(self.snpsites[chromosome][position]) >= 2\n ): # there might be snp at same position but with different alt base\n\n snp_index = [\n i\n for i, j in enumerate(self.snpsites[chromosome][position])\n if j == True\n ]\n\n totalindex = len(snp_index)\n # Lets check the alt base in these vcf files using index\n # lets get array of alt bases from each file\n alt_snps = []\n for index in snp_index:\n alt_snps.append(\n self.snp_positions[self.vcffilenames[index]][\n chromosome\n ][position][\"alt\"]\n )\n\n # get the counts of the elements\n\n counts = self.count_list_elements_occurrences(alt_snps)\n\n for index in range(len(counts)):\n if counts[index] == 1:\n # this is unique, so occurred once\n self.snp_positions[self.vcffilenames[snp_index[index]]][\n chromosome\n ][position].update(\n {\"unique\": True}\n ) # vcffilenames[snp_index[index]] = this will be the filename\n # print(\"this is unique\", vcffilenames[snp_index[index]], chromosome, position, self.snp_positions[vcffilenames[snp_index[index]]][chromosome][position])\n\n # else:\n # \tvcf_database[\"self.snp_positions\"][chromosome + \"_\" + position].update({\"unique\":False})\n\n return", "def completeMap(partialMapArr, setLen=-1):\n a = time.clock()\n fullMap = partialMapArr.copy()\n count = 0\n seen = {}\n for key in partialMapArr.keys():\n thisNumSets = partialMapArr[key]\n removals = []\n for i, setItem in enumerate(thisNumSets):\n tupleVersion = tuple(list(setItem))\n if(tupleVersion in seen):\n break\n else:\n seen[tupleVersion] = True\n #conditional to filter out all sets that are not a certain length\n if(len(setItem)==setLen or setLen==-1):\n for num in setItem:\n count+=1\n if(num==key):\n continue\n if(setItem not in fullMap[num]):\n fullMap[num].append(setItem)\n else:\n removals.append(i)\n removals.reverse()\n for item in removals: thisNumSets.pop(item)\n b = time.clock()\n\n #returns the map with duplicated values, the number of values seen (n for\n #O(n)), and the time elapsed to run the function\n return fullMap, count, (b-a)", "def LoadMapping(self, fname):\n\n M = [{} for i in range(N_ChanUIDS)]\n\n # Load Map:\n with open(fname, \"r\") as f:\n pass", "def getMasterMap(self,masterInfo):\n masterMap = [0]\n #--Map'em\n for mmName in masterInfo.masterNames:\n if mmName not in self.masterNames: \n raise MoshError(_(\"Misordered esm: %s should load before %s\") % (mmName, masterInfo.name))\n masterMap.append(self.masterNames.index(mmName)+1)\n #--Done\n return masterMap", "def get_sensor_dict():\n\n with open('last_seen.json') as json_file:\n stored_dict = json.load(json_file)\n\n new_list = []\n for dev in stored_dict['devices']:\n new_list.append(dev['id'])\n unique_list = list(set(new_list))\n\n return stored_dict, unique_list", "def _strip_map(mols):\n for m in mols:\n [a.ClearProp('molAtomMapNumber')\n for a in m.GetAtoms() if a.HasProp('molAtomMapNumber')]\n return mols", "def get_unique_hashes():\n return list( set( [ filename.split(\"_\")[0] for filename in os.listdir(CACHE_DIRECTORY) ] ) )", "def get_seqs_to_keep_lookup_from_fasta_file(fasta_f):\r\n return (\r\n set([seq_id.split()[0] for seq_id, seq in parse_fasta(fasta_f)])\r\n )", "def test_get_sam_ids(self):\r\n map_file = StringIO.StringIO(\"\"\"#SampleID\tCountry\tAgeYears\tFamily\tAgeCat\r\n h208A.1\tMalawi\t0.032854209\th208\tChild\r\n h301A.1\tMalawi\t0.05\th301\tChild\r\n h301B.1\tMalawi\t0.05\th301\tChild\r\n USinfTw20.1\tUSA\t0.083333333\tUSinfTw20\tChild\r\n USinfTw20.2\tUSA\t0.083333333\tUSinfTw20\tChild\r\n USinfTw1.1\tUSA\t0.083333333\tUSinfTw1\tChild\r\n h10M\tMalawi\t26\th10\tAdult\r\n h68M\tMalawi\t26\th68\tAdult\r\n TS25\tUSA\t26\tUSts9\tAdult\r\n TS26\tUSA\t26\tUSts9\tAdult\"\"\")\r\n\r\n map_data, map_header, comments = parse_mapping_file(map_file)\r\n colorby = 'Country'\r\n cat = 'USA'\r\n primary_state = 'AgeCat:Child'\r\n ids1, ids2 = get_sam_ids(map_data, map_header, colorby, cat,\r\n primary_state, secondary_state=None)\r\n self.assertEqual(set(ids1),\r\n set(['USinfTw20.1', 'USinfTw20.2', 'USinfTw1.1']))\r\n self.assertEqual(set(ids2), set(['TS25', 'TS26']))", "def traverse_uris(uri):\n seen = set()\n uris_to_check = [uri]\n while len(uris_to_check) > 0: \n uri = uris_to_check.pop()\n if uri not in seen:\n seen.add(uri)\n for key in keys_for_uri[uri]:\n for uri2 in uris_for_key[key]:\n if uri2 not in seen:\n uris_to_check.append(uri2)\n \n return seen", "def read_all(self):\n def is_data(i):\n \"\"\"\n It checks if given key is different than added by system\n \"\"\"\n keys = ['_id', '_time']\n return all(i != k for k in keys)\n\n self.logger.log_reading()\n return simplejson.dumps([{i: x[i] for i in x if is_data(i)} for x in self.json_collection.find()])", "def geo_pull():\r\n with open('output_got.csv', 'r') as fin:\r\n tweet_data = list(csv.reader(fin, delimiter=';'))\r\n geo_set = {(row[GEOCOL],row[DATECOL]) for row in tweet_data[1:] if len(row[GEOCOL])<50 and len(row[GEOCOL])>1}\r\n #added row[DATECOL]^^\r\n # print(str(geo))\r\n return geo_set\r\n #set of locations\r", "def dataFromFile(fname):\n file_iter = open(fname, 'rU')\n for line in file_iter:\n line = line.strip().rstrip(',') # Remove trailing comma\n record = frozenset(line.split(','))\n yield record", "def get_all_jsons():\r\n res = get_all_mps_ids()\r\n for id in res.keys():\r\n get_mp_json_from_file(id)", "def uniqueDicts(obj):\n return [json.loads(d) for d in set(json.dumps(r, sort_keys=True) for o in obj)]", "def read_flat_map(filename,i_map=0) :\n hdul=fits.open(filename)\n w=WCS(hdul[0].header)\n\n maps=hdul[i_map].data\n ny,nx=maps.shape\n\n return w,maps", "def read_gzip_file_lines_into_set(filename):\n with gzip.open(filename, 'r') as file:\n return set([line.strip() for line in file.readlines()])", "def get_ROIs(self, base):\n locs3d = self.locs3d\n #print loc3d\n base_locs = locs3d[base]\n ROI_dic = dict((i, [Id]) for i,Id in enumerate(base))\n for i, loc in enumerate(locs3d):\n if i not in base:\n dist = np.sqrt(np.sum((base_locs - loc)**2, 1))\n min_i = np.argmin(dist)\n ROI_dic[min_i].append(i)\n out = ROI_dic.values()\n return out", "def map( self ) :\n\n self.readMap( )\n\n return( self.__map )", "def test_check_map_primer_pool(self):\r\n s = \"\"\"#SampleID\\tBarcodeSequence\\tLinkerPrimerSequence\\tX\\tDescription\r\n#fake data\r\nx\\tAA\\tAC\\t3\\tsample_x\r\ny\\t\"AC\"\\tAT,DC\\t4\\t\"sample_y\"\r\nz\\tGG\\tGC\\t5\\tsample_z\"\"\"\r\n f = StringIO(s)\r\n f.name = 'test.xls'\r\n headers, id_map, barcode_to_sample_id, warnings, errors, \\\r\n primer_seqs_lens, all_primers = check_map(f,\r\n disable_primer_check=False)\r\n\r\n self.assertEqual(\r\n barcode_to_sample_id,\r\n {'AA': 'x',\r\n 'AC': 'y',\r\n 'GG': 'z'})\r\n self.assertEqual(errors, [])\r\n self.assertEqual(warnings, [])\r\n\r\n # Returns all possible primers with lengths associated.\r\n expected_all_primers = {'AC': 2, 'GC': 2, 'AT': 2, 'TC': 2}\r\n self.assertEqual(all_primers, expected_all_primers)\r\n\r\n # Returns all primers associated with each barcode.\r\n expected_primer_seqs_lens = {'AA': {'AC': 2}, 'GG': {'GC': 2},\r\n 'AC': {'AC': 2, 'GC': 2, 'AT': 2, 'TC': 2}}\r\n\r\n self.assertEqual(primer_seqs_lens, expected_primer_seqs_lens)", "def unique(fname):\n addresses = []\n with gzip.open(fname, \"rb\") as f:\n lines = f.readlines()\n for line in lines:\n #print(\"[\"+line.split()[1]+\"]\")\n if line.split()[0] not in addresses:\n addresses.append(line.split()[0])\n return addresses", "def map():", "def extract_map(infile, outfile):\n subprocess.check_call(['mudraw', '-w', '1800', '-h', '1800', '-o', outfile, infile, '1'])", "def get_all_strains_per_unique_layer(self, strain_map):\n # get the position indices of all unique layers in the sample structure\n positions = self.S.get_all_positions_per_unique_layer()\n strains = []\n\n for value in positions.values():\n strains.append(np.sort(np.unique(strain_map[:, value].flatten())))\n\n return strains", "def _map_invarioms(self):\n self.map = {}\n for invariom in self.invariom_list:\n kill = False\n for molecule in self.sorted_molecules:\n for atom in molecule.atoms:\n if invariom in atom.invarioms:\n self.map[invariom] = molecule.name\n kill = True\n break\n if kill:\n break", "def get_same_mapping(self):\n sames = {}\n for clue in self.clueset:\n if clue[\"type\"] == SAME:\n sames[clue[\"vals\"][0]] = clue[\"vals\"][1]\n sames[clue[\"vals\"][1]] = clue[\"vals\"][0]\n\n return sames", "def mapdata():\n return getmapdata(db, MyTable)", "def unique_residue_ids(self): \n # Convenience abbreviations.\n identifiers = self.identifiers\n res_ids = self.res_ids\n res_cnt = self.res_cnt \n # Preparing the list of unique residue identifiers.\n # In the end it should be: res_cnt == len(res_ids)-1.\n # The 'elif' line is controlling that only unique\n # identifiers are collected.\n for identifier in identifiers:\n if len(res_ids) == 0:\n # Require 'deepcopy', otherwise constant change\n # of 'res_ids[res_cnt]' with 'identifier'.\n res_ids.append(deepcopy(identifier))\n elif identifier[1] == res_ids[res_cnt][1]: \n pass\n else:\n res_ids.append(deepcopy(identifier))\n res_cnt += 1 \n # Return assignments to object scope.\n self.res_ids = res_ids\n self.res_cnt = res_cnt", "def get_used_strings(file):\n\n result = set()\n with open(file, 'r') as src:\n for line in src.readlines():\n find_strings(line, result)\n return result", "def read_mapfiles():\n mappings = []\n\n # matches stuff like\n # \"/GLOW/*\" glow\n # \"/cms/Role=pilot/Capability=NULL\" cmspilot\n # and extracts the stuff between the quotes, and the username in the second field\n regex = re.compile(r'^\\s*[\"](/[^\"]+)[\"]\\s+([A-Za-z0-9_]+)\\s*(?:$|[#])')\n for filepath in [DEFAULT_VOMS_MAPFILE, VOMS_MAPFILE]:\n try:\n with open(filepath, \"r\", encoding=\"latin-1\") as filehandle:\n for line in filehandle:\n match = regex.match(line)\n if not match:\n continue\n else:\n mappings.append(Mapping(match.group(1), match.group(2)))\n except EnvironmentError as err:\n if err.errno == errno.ENOENT:\n continue\n else:\n raise\n\n return mappings", "def resource_map(self):", "def _read_names_file(self):\n filename = os.path.join(self.path, 'names.csv')\n lookup = collections.defaultdict(list)\n with open(filename) as f:\n reader = csv.reader(f)\n for line in reader:\n matches = set(line)\n for match in matches:\n lookup[match].append(matches)\n return lookup", "def read_file_keys(fname):\n with open(fname, 'r') as infile:\n fkeys = infile.read().split('\\n')\n return set(fkeys)", "def get_existing_hashes(results):\n hashes = list(set(iter_existing_hashes(results)))\n return hashes", "def list_imdbs():\n return __sets.keys()", "def list_imdbs():\n return __sets.keys()", "def list_imdbs():\n return __sets.keys()", "def _map___iter__(self):\n return self.iterkeys()", "def keySet (self) -> StringSet:\n\n Logging.trace(\">>\")\n result = set(self._keyToValueMap.keys())\n Logging.trace(\"<<: %r\", result)\n return result", "def sets(self):\n\n return self._collection.distinct('set')", "def select_unique_ids(self):\n utk = self.metadata\n utk_ids = []\n for gg in set(utk['gender']):\n for rg in set(utk['race']):\n for ag in set(utk['age']):\n try:\n intersection_ids = list(utk[np.logical_and(utk['gender'] == gg,\n np.logical_and(utk['race'] == rg,\n utk['age'] == ag))]['filename'])\n if len(intersection_ids) <= CAP:\n utk_ids += intersection_ids\n else:\n x = list(np.random.choice(intersection_ids, CAP, replace=False))\n utk_ids += x\n\n except:\n continue\n self.unique_ids = utk_ids\n return utk_ids", "def test_write_otu_map(self):\r\n write_otu_map(self.otu_map1, self.tmp_fp1)\r\n actual = fields_to_dict(open(self.tmp_fp1))\r\n self.files_to_remove.append(self.tmp_fp1)\r\n self.assertEqual(actual, dict(self.otu_map1))", "def getTransListSet(self, filePath):\n transListSet = []\n with open(filePath, 'r') as file:\n reader = csv.reader(file, delimiter=',')\n for line in reader:\n transListSet.append(set(line)) \n return transListSet", "def read_set_from_file(filename):\n collection = set()\n with open(filename, \"r\", encoding=\"utf-8\") as file_:\n for line in file_:\n collection.add(line.rstrip())\n return collection", "def read_cuis(file_path):\n\n file_as_string = open(file_path).read()\n return set(file_as_string.split())", "def getUniChemData(self, inchiKeyList):\n mapD = {\n 1: {\"name\": \"chembl\", \"baseUrl\": \"https://www.ebi.ac.uk/chembl/\", \"entryUrl\": \"https://www.ebi.ac.uk/chembldb/compound/inspect/\"},\n 3: {\"name\": \"pdb\", \"baseUrl\": \"http://www.ebi.ac.uk/pdbe/\", \"entryUrl\": \"http://www.ebi.ac.uk/pdbe-srv/pdbechem/chemicalCompound/show/\"},\n 2: {\"name\": \"drugbank\", \"baseUrl\": \"http://drugbank.ca/\", \"entryUrl\": \"http://www.drugbank.ca/drugs/\"},\n 5: {\"name\": \"pubchem_dotf\", \"baseUrl\": \"http://pubchem.ncbi.nlm.nih.gov/sources/sources.cgi\", \"entryUrl\": \"http://pubchem.ncbi.nlm.nih.gov/substance/\"},\n 4: {\"name\": \"gtopdb\", \"baseUrl\": \"http://www.guidetopharmacology.org\", \"entryUrl\": \"http://www.guidetopharmacology.org/GRAC/LigandDisplayForward?ligandId=\"},\n 11: {\"name\": \"ibm\", \"baseUrl\": \"http://www-935.ibm.com/services/us/gbs/bao/siip/nih/\", \"entryUrl\": \"http://www-935.ibm.com/services/us/gbs/bao/siip/nih/?sid=\"},\n 6: {\"name\": \"kegg_ligand\", \"baseUrl\": \"http://www.genome.jp/kegg/ligand.html\", \"entryUrl\": \"http://www.genome.jp/dbget-bin/www_bget?\"},\n 9: {\"name\": \"zinc\", \"baseUrl\": \"http://zinc15.docking.org\", \"entryUrl\": \"http://zinc15.docking.org/substances/\"},\n 8: {\"name\": \"nih_ncc\", \"baseUrl\": \"http://nihsmr.evotec.com/evotec/\", \"entryUrl\": \"\"},\n 10: {\"name\": \"emolecules\", \"baseUrl\": \"https://www.emolecules.com/\", \"entryUrl\": \"https://www.emolecules.com/cgi-bin/more?vid=\"},\n 12: {\"name\": \"atlas\", \"baseUrl\": \"http://www.ebi.ac.uk/gxa/home\", \"entryUrl\": \"http://www.ebi.ac.uk/gxa/query?conditionQuery=\"},\n 7: {\"name\": \"chebi\", \"baseUrl\": \"http://www.ebi.ac.uk/chebi/downloadsForward.do\", \"entryUrl\": \"http://www.ebi.ac.uk/chebi/searchId.do?chebiId=CHEBI%3A\"},\n 14: {\n \"name\": \"fdasrs\",\n \"baseUrl\": \"http://fdasis.nlm.nih.gov/srs/srs.jsp\",\n \"entryUrl\": \"http://fdasis.nlm.nih.gov/srs/ProxyServlet?mergeData=true&objectHandle=DBMaint&APPLICATION_NAME=fdasrs&actionHandle=default&nextPage=jsp/srs/ResultScreen.jsp&TXTSUPERLISTID=\",\n },\n 15: {\"name\": \"surechembl\", \"baseUrl\": \"https://www.surechembl.org/search/\", \"entryUrl\": \"https://www.surechembl.org/chemical/\"},\n 21: {\"name\": \"pubchem_tpharma\", \"baseUrl\": \"http://www.thomson-pharma.com/\", \"entryUrl\": \"http://pubchem.ncbi.nlm.nih.gov/substance/\"},\n 22: {\"name\": \"pubchem\", \"baseUrl\": \"http://pubchem.ncbi.nlm.nih.gov\", \"entryUrl\": \"http://pubchem.ncbi.nlm.nih.gov/compound/\"},\n 27: {\"name\": \"recon\", \"baseUrl\": \"https://vmh.uni.lu\", \"entryUrl\": \"https://vmh.uni.lu/\"},\n 28: {\"name\": \"molport\", \"baseUrl\": \"https://www.molport.com/shop/index\", \"entryUrl\": \"https://www.molport.com/shop/molecule-link/\"},\n 31: {\n \"name\": \"bindingdb\",\n \"baseUrl\": \"https://www.bindingdb.org/bind/index.jsp\",\n \"entryUrl\": \"http://www.bindingdb.org/bind/chemsearch/marvin/MolStructure.jsp?monomerid=\",\n },\n 41: {\"name\": \"swisslipids\", \"baseUrl\": \"http://www.swisslipids.org/\", \"entryUrl\": \"http://www.swisslipids.org/\"},\n 29: {\"name\": \"nikkaji\", \"baseUrl\": \"http://jglobal.jst.go.jp/en/\", \"entryUrl\": \"http://jglobal.jst.go.jp/en/redirect?Nikkaji_No=\"},\n 32: {\"name\": \"comptox\", \"baseUrl\": \"https://comptox.epa.gov/dashboard/\", \"entryUrl\": \"https://comptox.epa.gov/dashboard/\"},\n 33: {\"name\": \"lipidmaps\", \"baseUrl\": \"http://www.lipidmaps.org\", \"entryUrl\": \"http://www.lipidmaps.org/data/LMSDRecord.php?LMID=\"},\n 35: {\"name\": \"carotenoiddb\", \"baseUrl\": \"http://carotenoiddb.jp/index.html\", \"entryUrl\": \"http://carotenoiddb.jp/Entries/\"},\n 36: {\"name\": \"metabolights\", \"baseUrl\": \"http://www.ebi.ac.uk/metabolights/\", \"entryUrl\": \"http://www.ebi.ac.uk/metabolights/\"},\n 37: {\"name\": \"brenda\", \"baseUrl\": \"https://www.brenda-enzymes.org/index.php\", \"entryUrl\": \"https://www.brenda-enzymes.org/ligand.php?brenda_ligand_id=\"},\n 17: {\"name\": \"pharmgkb\", \"baseUrl\": \"https://www.pharmgkb.org\", \"entryUrl\": \"https://www.pharmgkb.org/drug/\"},\n 18: {\"name\": \"hmdb\", \"baseUrl\": \"http://www.hmdb.ca\", \"entryUrl\": \"http://www.hmdb.ca/metabolites/\"},\n 24: {\n \"name\": \"nmrshiftdb2\",\n \"baseUrl\": \"http://nmrshiftdb.nmr.uni-koeln.de/portal/media-type/html/user/anon/page/default.psml/js_pane/P-Home\",\n \"entryUrl\": \"http://nmrshiftdb.org/molecule/\",\n },\n 25: {\"name\": \"lincs\", \"baseUrl\": \"http://www.lincsproject.org/\", \"entryUrl\": \"http://identifiers.org/lincs.smallmolecule/\"},\n 39: {\"name\": \"chemicalbook\", \"baseUrl\": \"https://www.chemicalbook.com\", \"entryUrl\": \"https://www.chemicalbook.com/ChemicalProductProperty_EN_\"},\n 20: {\"name\": \"selleck\", \"baseUrl\": \"http://www.selleckchem.com\", \"entryUrl\": \"http://www.selleckchem.com/products/\"},\n 23: {\"name\": \"mcule\", \"baseUrl\": \"https://mcule.com\", \"entryUrl\": \"https://mcule.com/\"},\n 26: {\"name\": \"actor\", \"baseUrl\": \"https://actor.epa.gov\", \"entryUrl\": \"http://actor.epa.gov/actor/chemical.xhtml?casrn=\"},\n 34: {\"name\": \"drugcentral\", \"baseUrl\": \"http://drugcentral.org\", \"entryUrl\": \"http://drugcentral.org/drugcard/\"},\n 38: {\"name\": \"rhea\", \"baseUrl\": \"http://www.rhea-db.org\", \"entryUrl\": \"http://www.rhea-db.org/searchresults?q=CHEBI:\"},\n }\n oD = {}\n try:\n for ky in inchiKeyList:\n unc = unichem_client # pylint: disable=no-member\n # unc.set_format(\"json\")\n uDL = unc.get(ky)\n if uDL:\n qD = {}\n for uD in uDL:\n if \"src_id\" in uD and int(uD[\"src_id\"]) in mapD:\n qD[mapD[int(uD[\"src_id\"])][\"name\"]] = uD[\"src_compound_id\"]\n if qD:\n oD[ky] = qD\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n return oD", "def create_city_map(n: int) -> set:\n return set((row, col) for row in range(0, n) for col in range(0, n))", "def extract_set(file):\n s = set([])\n\n f = open(file, 'r')\n for line in f:\n flist = extract_words(line)\n\n for word in flist:\n if(word not in s):\n s.add(word)\n\n f.close()\n\n return s", "def read_map(self, map_path):\n with open(map_path, mode='rb') as f:\n index_id_map = pickle.load(f)\n return index_id_map", "def unique(self):\n return self.d_series.map_partitions(\n lambda s: s.list.unique(), meta=self.d_series._meta\n )", "def findUniqueResults(ids, results):\n ordered = OrderedDict(sorted(data.items(), key=lambda t: t[0]))\n return list(ordered.values())" ]
[ "0.65541935", "0.65541935", "0.63683355", "0.6343676", "0.60978246", "0.6096743", "0.6089678", "0.595537", "0.58867276", "0.5861434", "0.577347", "0.57008076", "0.56791395", "0.5647053", "0.56417656", "0.5591158", "0.5569142", "0.5543664", "0.5527229", "0.5526804", "0.550279", "0.5468678", "0.5454197", "0.5446342", "0.5445241", "0.5442133", "0.54127926", "0.5408065", "0.5390716", "0.5382519", "0.5375124", "0.5348258", "0.5343608", "0.5335556", "0.5333381", "0.5332004", "0.5319507", "0.5305272", "0.53022087", "0.5297897", "0.52944416", "0.52863795", "0.52773786", "0.5270312", "0.5251218", "0.52481854", "0.5238983", "0.52372366", "0.5236583", "0.5218227", "0.5215704", "0.52156323", "0.5204224", "0.5203454", "0.51999", "0.5196438", "0.51854134", "0.5178634", "0.5171269", "0.51694226", "0.51634556", "0.51582754", "0.515427", "0.51384753", "0.51223", "0.5116891", "0.5116538", "0.5115481", "0.5109489", "0.5106255", "0.5098785", "0.50984883", "0.50939405", "0.5081391", "0.5074487", "0.50728905", "0.5069962", "0.50699556", "0.50683594", "0.50662565", "0.50524896", "0.5048816", "0.50446606", "0.5036293", "0.5035912", "0.5035912", "0.5035912", "0.5026482", "0.5020548", "0.5016942", "0.50126636", "0.50040674", "0.50024056", "0.5002258", "0.5002011", "0.500048", "0.49981755", "0.4995577", "0.49946877", "0.49907586", "0.49896818" ]
0.0
-1
return founder and offspring subset of basename.ped containing only the markers in lcd lcd contains a sorted list of (chrom,offset,rs) for the common snps in all maps we need to keep genotypes all in the same column order
def subsetPed(basename="",lcdmap = [],faff='1', ofaff='2'): mf = file('%s.map' % basename,'r').readlines() lmap = [x.strip().split() for x in mf] rscols = {} # lookup marker table colrs = [] # lookup rs from column for i,m in enumerate(lmap): # get columns to keep in the order we want them rscols[m[1]] = i # keep track of where each rs is in this map colrs.append(m[1]) # and keep the list of rs for tracking alleles wewant = [rscols[x[2]] for x in lcdmap] # columns we want to keep print '#Subsetped faff=%s ofaff=%s keeping %d (%s) of potential lcd %d for %s' % \ (faff,ofaff,len(wewant),wewant[:20],len(lcdmap),basename) pf = file('%s.ped' % basename,'r') ogeno = [] # offspring new lines fgeno = [] # founders oped = [] # for pedigrees fped = [] rsadict = {} # keep a count of alleles - seems to be a problem for i,l in enumerate(pf): if (i+1) % 500 == 0: print '%s at line %d' % (basename,i+1) ll = l.strip().split() ped = ll[:6] founder = (ll[2] == '0' and ll[3] == '0') aff = faff if not founder: aff = ofaff ped[5] = aff # adjust as needed if founder: fped.append(ped) else: oped.append(ped) gt = ll[6:] geno = [] for snp in wewant: # columns in order thisrs = colrs[snp] base = snp*2 g1 = gt[base] g2 = gt[base+1] geno.append(g1) geno.append(g2) if not rsadict.get(thisrs,None): rsadict[thisrs] = {} if g1 <> '0': if not rsadict[thisrs].get(g1,None): rsadict[thisrs][g1] = 1 else: rsadict[thisrs][g1] += 1 if g2 <> '0': if not rsadict[thisrs].get(g2,None): rsadict[thisrs][g2] = 1 else: rsadict[thisrs][g2] += 1 keepgt = array.array('c',geno) if founder: fgeno.append(keepgt) else: ogeno.append(keepgt) print '#Subsetped %s %d fgeno %d ogeno' % (basename,len(fgeno),len(ogeno)) return fped,oped,fgeno,ogeno,rsadict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getLCD(lbase=[]):\r\n listmf = []\r\n rsdict = {}\r\n for i,basename in enumerate(lbase): # for each basename to be included\r\n mf = file('%s.map' % basename,'r').readlines()\r\n lmap = [x.strip().split() for x in mf] \r\n rslist = [x[1] for x in lmap] # chrom rs gendist physdist\r\n for x in lmap:\r\n rsdict[x[1]] = (x[0],int(x[3]),x[1]) # key by chrom,offset,rs\r\n setrs = set(rslist)\r\n listmf.append(setrs) # list of map lines for processing\r\n lcd = listmf.pop(0) # start with first - order doesn't matter\r\n for setrs in listmf:\r\n lcd = lcd & setrs # intersection\r\n lcd = list(lcd) # now have lowest common denom as a list of rs\r\n lcdmap = [rsdict[rs] for rs in lcd] # restore chrom,offset,rs for rs to keep\r\n lcdmap.sort() # now in genomic order\r\n print 'got lcdmap=',lcdmap[:10]\r\n return lcdmap # sorted common map\r", "def mergePed(bnlist=[],faff=[],ofaff=[],newbasename='newped',fo=0):\r\n lcdmap = getLCD(bnlist) # list of chr,offset,rs for all snp common to all files\r\n print 'got %d lcd snps-%s' % (len(lcdmap),lcdmap[:5])\r\n cfped = []\r\n coped = []\r\n cfgeno = []\r\n cogeno = []\r\n allrsa = {}\r\n ignorers = {}\r\n for i,basename in enumerate(bnlist):\r\n fped,oped,fgeno,ogeno,trsadict = subsetPed(basename,lcdmap,faff[i],ofaff[i])\r\n print '%s gave %d fgeno' % (basename,len(fgeno))\r\n for rs in trsadict.keys():\r\n tk = trsadict[rs].keys()\r\n if len(tk) > 2:\r\n print 'for %s, rs %s has alleles %s' % (basename, rs, trsadict[rs])\r\n if not allrsa.get(rs,None):\r\n allrsa[rs] = {}\r\n for a in tk:\r\n if not allrsa[rs].get(a,None):\r\n allrsa[rs][a] = trsadict[rs][a]\r\n else:\r\n allrsa[rs][a] += trsadict[rs][a]\r\n tk = allrsa[rs].keys()\r\n if len(tk) > 2 and not ignorers.get(rs,None): # new\r\n #print 'After merge basename %s, rs %s has alleles %s' % (basename, rs,allrsa[rs])\r\n ignorers[rs] = rs\r\n cfped += fped\r\n coped += oped\r\n cfgeno += fgeno\r\n cogeno += ogeno\r\n print 'after merge all have %d fgeno and %d ogeno' % (len(cfgeno),len(cogeno))\r\n # now have offspring and founder rows in lcdmap order\r\n # write map file\r\n print '### found %d markers > 2 alleles' % (len(ignorers.keys()))\r\n keepmarkers = [x for x in range(len(lcdmap)) if not ignorers.get(lcdmap[x][2],None)]\r\n newmap = ['\\t'.join((lcdmap[x][0],lcdmap[x][2],'0','%d' % lcdmap[x][1])) for x in keepmarkers] # chrom,offset,rs\r\n f = file('%s.map' % newbasename,'w')\r\n f.write('%s\\n' % '\\n'.join(newmap))\r\n f.close()\r\n for i,geno in enumerate(cfgeno): # convert each array into a list and keep the good markers\r\n gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers])\r\n g = array.array('c',gs) # good ones\r\n cfgeno[i] = g # replace\r\n print 'cfgeno converted'\r\n if not fo: # not founders only - note arrays are not lists!\r\n cfped += copy.copy(coped) #\r\n del coped\r\n for i,geno in enumerate(cogeno): # convert each array into a list and keep the good markers\r\n gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers])\r\n g = array.array('c',gs) # good ones\r\n cfgeno.append(g) # extend founders\r\n del cogeno\r\n print 'after if not fo now have %d cfgeno' % (len(cfgeno))\r\n f = file('%s.ped' % newbasename,'w')\r\n for n,ped in enumerate(cfped):\r\n l = ' '.join(ped + list(cfgeno[n]))\r\n if n % 100 == 0 and n > 0:\r\n print 'writing line %d' % n\r\n f.write(l)\r\n f.write('\\n')\r\n f.close()\r\n print 'wrote %d map rows and %d ped rows to %s' % (len(newmap),len(cfped),newbasename)", "def getStartEndCoords(fileName1, fileName2):\n uniqueNames = dict()\n with open(fileName1, \"r\", encoding=\"utf8\") as f1:\n f1 = csv.reader(f1, delimiter='\\t')\n for ls in f1:\n start = ls[0][4:].strip()\n normStart = norm.normalize_alphabet(start)\n start_reg = ls[1]#.strip().split(\",\")\n startKey = ','.join([normStart] + start_reg.strip().split(\",\"))\n startKey_orig = ','.join([start] + start_reg.strip().split(\",\"))\n end = ls[2][4:].strip()\n normEnd = norm.normalize_alphabet(end)\n end_reg = ls[3]#.strip().split(\",\")\n endKey = ','.join([normEnd] + end_reg.strip().split(\",\"))\n endKey_orig = ','.join([end] + end_reg.strip().split(\",\"))\n\n with open(fileName2, \"r\", encoding=\"utf8\") as jsonFile: \n allData = json.load(jsonFile)\n for d in allData[\"features\"]:\n # populates the uniqueNames dictionary for start and end toponyms\n if not any(x in uniqueNames for x in [startKey, startKey_orig]):\n uniqueNames.update(populateDict(start, start_reg, d))\n if not any(x in uniqueNames for x in [endKey, endKey_orig]):\n uniqueNames.update(populateDict(end, end_reg, d))\n if not any(x in uniqueNames for x in [startKey, startKey_orig]):\n for uri in gv.found_URIs:\n if any(x in uri for x in [startKey, startKey_orig])\\\n and re.match(r'\\d', gv.found_URIs[uri]) == None:\n tmp = {}\n tmp[startKey_orig] = {}\n tmp[startKey_orig]['lat'] = \"null\"\n tmp[startKey_orig]['lon'] = \"null\"\n tmp[startKey_orig]['region'] = start_reg\n tmp[startKey_orig]['cornuUri'] = gv.found_URIs[uri]\n uniqueNames.update(tmp)\n if startKey_orig not in uniqueNames:\n tmp = {}\n tmp[startKey_orig] = {}\n tmp[startKey_orig]['lat']= \"null\"\n tmp[startKey_orig]['lon'] = \"null\"\n tmp[startKey_orig]['region'] = start_reg\n tmp[startKey_orig]['cornuUri'] = \"null\"\n uniqueNames.update(tmp)\n\n if not any(x in uniqueNames for x in [endKey, endKey_orig]):\n for uri in gv.found_URIs:\n if any(x in uri for x in [endKey, endKey_orig])\\\n and re.match(r'\\d', gv.found_URIs[uri]) == None:\n tmp = {}\n tmp[endKey_orig] = {}\n tmp[endKey_orig]['lat'] = \"null\"\n tmp[endKey_orig]['lon'] = \"null\"\n tmp[endKey_orig]['region'] = end_reg\n tmp[endKey_orig]['cornuUri'] = gv.found_URIs[uri]\n uniqueNames.update(tmp)\n if endKey_orig not in uniqueNames:\n tmp = {}\n tmp[endKey_orig] = {}\n tmp[endKey_orig]['lat']= \"null\"\n tmp[endKey_orig]['lon'] = \"null\"\n tmp[endKey_orig]['region'] = end_reg\n tmp[endKey_orig]['cornuUri'] = \"null\"\n uniqueNames.update(tmp)\n return uniqueNames", "def getORFs(catFile, queryName, geneDir):\n\n\toutORFraw = geneDir+catFile.split(\"/\")[-1].split(\".\")[0]+\"_allORFs.fasta\"\n\tlogger = logging.getLogger(\"main.orf\")\n\t\n\tlogger.debug(\"getorf -sequence {:s} -outseq {:s} -table 0 -find 3 -noreverse\".format(catFile, outORFraw))\n\tcmd(\"getorf -sequence {:s} -outseq {:s} -table 0 -find 3 -noreverse\".format(catFile, outORFraw), False)\n\t\n\tdId2ORFs = defaultdict(list)\n\tf = SeqIO.parse(open(outORFraw),'fasta')\n\tfor fasta in f:\n\t\tfname, fseq = fasta.id, str(fasta.seq)\n\t\tif len(fname.split(\"_\")) > 2:\n\t\t\tfname2 = \"_\".join(fname.split(\"_\")[0:-1])\n\t\telse:\n\t\t\tfname2 = fname.split(\"_\")[0]\n\t\tdId2ORFs[fname2].append(fseq)\n\t\n\tdId2Longest = {}\n\tfor k, v in dId2ORFs.items():\n\t\tdId2Longest[k] = max(v, key=len)\n\t\t\n\t# delete duplicate sequences\n\tdRev = {}\n\tfor k, v in dId2Longest.items():\n\t\tdRev.setdefault(v, set()).add(k)\n\t\t\n\tAllDupl = [values for key, values in dRev.items() if len(values) > 1]\n\tn = 0\n\tfor dupl in AllDupl:\n\t\tspecies = set([x.split(\"_\")[0] for x in dupl])\n\t\t\n\t\tfor sp in species:\n\t\t\tif queryName in dupl:\n\t\t\t\tfirstOcc = queryName\n\t\t\telse:\n\t\t\t\tlOcc = [x for x in dupl if sp in x]\n\t\t\t\t\n\t\t\t\tif len(lOcc) > 0:\n\t\t\t\t\tfirstOcc = lOcc[0]\n\t\t\t\telse:\n\t\t\t\t\tfirstOcc = str(lOcc)\n\t\t\t\t\t\n\t\t\tdupl.remove(firstOcc)\n\t\t\n\t\tfor i in dupl:\n\t\t\tdId2Longest.pop(i, None)\n\t\t\tn += 1\n\t\t\tlogger.debug(\"Deleted sequence {:s} (duplicate)\".format(i))\n\t\t\n\tlogger.info(\"Deleted {} sequences as duplicates\".format(n))\n\t\n\toutORF = outORFraw.replace(\"_allORFs.fasta\",\"_longestORFs.fasta\")\n\n\twith open(outORF, \"w\") as outO:\n\t outO.write(FastaResFunc.dict2fasta(dId2Longest))\n\t outO.close()\n\t \n\tlogger.info(\"Extracted longest ORFs: {:s}\".format(outORF))\n\n\treturn(outORF)", "def getReadOnGeneFile(rnameList, len_param):\n log.info(\"Select reads that are on genes\")\n for ch in rnameList:\n tcount = 0\n \n geneS = {}#gene start\n geneE = {}#gene end\n g_direct = {}#gene direction\n readS = {}#read start\n readE = {}#read End\n readDic = {}#readDic[id] = read\n sortGeneId = {}\n sortReadId = {}\n genefile = os.path.join(working_dir, 'removeOverlap.'+ch+'.gff')\n readfile = os.path.join(working_dir, 'MappedRead.'+ch+'.sam')\n rgfile = os.path.join(working_dir, 'ReadOnGeneList.'+ch+'.tab')\n log.info(\"Generate \" + rgfile)\n f=open(rgfile, \"w\") \n \n geneS, geneE, g_direct = getGFFStartEnd(genefile, len_param)\n sortGeneId = sortId(geneS)\n \n readS, readE,readDic = getSAMStartEnd(readfile)\n sortReadId = sortId(readS)\n ys = 0\n \n for x in range(len(sortGeneId)):\n \n gID = sortGeneId[x]#gene id\n gs = geneS.get(gID)#gene start\n ge = geneE.get(gID)#gene end\n gd = g_direct.get(gID)\n glineList = []\n sameG = False\n \n for y in range(ys,len(sortReadId)):\n rID = sortReadId[y]\n rs = readS.get(rID)\n re = readE.get(rID)\n if rs >= gs:\n if re <= ge:\n f.write(gID)\n f.write('\\t')\n f.write(str(gs))\n f.write('\\t')\n f.write(str(ge))\n f.write('\\t')\n f.write(gd)\n f.write('\\t')\n f.write(rID)\n f.write('\\t')\n f.write(str(rs))\n f.write('\\t')\n f.write(str(re))\n f.write('\\t')\n f.write(readDic.get(rID))\n elif re > ge:\n ys = y\n break\n elif rs > ge:\n ys = y\n break\n f.close()", "def get_reps_filenames(celltype): \n prefix = os.path.join(os.getcwd(),'peaks',celltype,'MACS2')\n reps = os.listdir(prefix)\n return [os.path.join(prefix,rep) for rep in reps if rep.endswith('sorted.bdg')]", "def split_per(folderin, folderout, split_col='ECO_ID', colNms=['i_h100','i_cd',\n 'doy','i_wflen','i_acqdate','b1','vcf','ECO_NAME','ECO_ID','BIOME','geometry']):\n\n split_files = glob.glob(folderin + '*.shp')\n\n for filename in split_files:\n print(filename)\n basename = os.path.splitext(os.path.basename(filename))[0]\n dfa = gpd.read_file(filename)\n df = dfa.astype({split_col: 'int32'}) \n ecoNames = list(np.unique(df[split_col]))#get list of unique ecoregions \n \n for eco in ecoNames:\n #create new df with just columns I want\n df2 = gpd.GeoDataFrame(df, columns=colNms)\n ID = str(eco)\n df_eco = df.loc[df2[split_col]==eco, colNms]\n df_eco.to_file(folderout + '/{}_eco_{}.shp'.format(basename, ID))", "def F_subset_OMHCHO(self,path):\n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('OMI-Aura_L2-OMHCHO_'+DATE.strftime(\"%Ym%m%d\")+'t*.he5')\n l2_list = l2_list+flist\n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n maxsza = self.maxsza\n maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n maxMDQF = self.maxMDQF\n maxEXTQF = self.maxEXTQF\n \n data_fields = ['AMFCloudFraction','AMFCloudPressure','AirMassFactor','Albedo',\\\n 'ReferenceSectorCorrectedVerticalColumn','ColumnUncertainty','MainDataQualityFlag',\\\n 'PixelCornerLatitudes','PixelCornerLongitudes','FittingRMS']\n data_fields_l2g = ['cloud_fraction','cloud_pressure','amf','albedo',\\\n 'column_amount','column_uncertainty','MainDataQualityFlag',\\\n 'PixelCornerLatitudes','PixelCornerLongitudes','FittingRMS']\n geo_fields = ['Latitude','Longitude','TimeUTC','SolarZenithAngle',\\\n 'TerrainHeight','XtrackQualityFlagsExpanded']\n geo_fields_l2g = ['latc','lonc','TimeUTC','SolarZenithAngle',\\\n 'terrain_height','XtrackQualityFlagsExpanded']\n swathname = 'OMI Total Column Amount HCHO'\n \n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading'+fn_dir)\n outp_he5 = self.F_read_he5(fn_dir,swathname,data_fields,geo_fields,data_fields_l2g,geo_fields_l2g)\n f1 = outp_he5['SolarZenithAngle'] <= maxsza\n f2 = outp_he5['cloud_fraction'] <= maxcf\n f3 = outp_he5['MainDataQualityFlag'] <= maxMDQF \n f4 = outp_he5['latc'] >= south\n f5 = outp_he5['latc'] <= north\n tmplon = outp_he5['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_he5['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_he5['UTC_matlab_datenum'] <= self.end_matlab_datenum\n f10 = outp_he5['XtrackQualityFlagsExpanded'] <= maxEXTQF\n validmask = f1 & f2 & f3 & f4 & f5 & f6 & f7 & f8 & f9 & f10\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n \n l2g_data0 = {}\n \n Lat_lowerleft = outp_he5['PixelCornerLatitudes'][0:-1,0:-1][validmask]\n Lat_upperleft = outp_he5['PixelCornerLatitudes'][1:,0:-1][validmask]\n Lat_lowerright = outp_he5['PixelCornerLatitudes'][0:-1,1:][validmask]\n Lat_upperright = outp_he5['PixelCornerLatitudes'][1:,1:][validmask] \n Lon_lowerleft = outp_he5['PixelCornerLongitudes'][0:-1,0:-1][validmask]\n Lon_upperleft = outp_he5['PixelCornerLongitudes'][1:,0:-1][validmask]\n Lon_lowerright = outp_he5['PixelCornerLongitudes'][0:-1,1:][validmask]\n Lon_upperright = outp_he5['PixelCornerLongitudes'][1:,1:][validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_he5.keys():\n if key not in {'MainDataQualityFlag','PixelCornerLatitudes',\\\n 'PixelCornerLongitudes','TimeUTC','XtrackQualityFlagsExpanded'}:\n l2g_data0[key] = outp_he5[key][validmask]\n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def split_decode_file():\n # split files by chromosome\n header = []\n current_chrom = 'chr1'\n # file_template = decode_folder + '/{}.deCODE_2019.GRCh38.txt'\n file_template = decode_folder + '/{}.deCODE_2019_hg19.txt'\n decode_file = decode_folder + '/aau1043_DataS3_hg19_liftOver.bed'\n w = open(file_template.format(current_chrom), 'a')\n print('NOTE: appending to map files, not overwriting. may cause duplicates')\n with open(decode_file, 'r') as f:\n for line in f:\n # save the header info\n if line.startswith('#'):\n header.append(line)\n # save the column labels\n elif line.startswith('Chr'):\n header.append('# ' + line)\n # write header to first file now\n w.write(''.join(header))\n # the remaining lines are data\n else:\n # get the chromosome for the current line\n ch = line.split()[0]\n # if the chromosome matches the open file, write to it\n if ch == current_chrom:\n w.write(line)\n # if a new chromosome arises, switch to a new writefile\n else:\n w.close()\n current_chrom = ch\n w = open(file_template.format(current_chrom), 'a')\n # write header to file\n w.write(''.join(header))\n w.write(line)\n\n # close the last open file\n w.close()", "def subFarms(self,partition,full_name=None):\n if self.inUse.value() is None: self.load()\n got = []\n for i in xrange(len(self.inUse.data)):\n if self.inUse.data[i]==partition:\n if full_name:\n got.append(self.name+'_'+self.subfarms.data[i])\n else:\n got.append(self.subfarms.data[i])\n return got", "def F_subset_S5PHCHO(self,path): \n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('S5P_OFFL_L2__HCHO___'+DATE.strftime(\"%Y%m%d\")+'T*.nc')\n l2_list = l2_list+flist\n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n maxsza = self.maxsza\n maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n min_qa_value = self.min_qa_value\n \n # absolute path of useful variables in the nc file\n # not sure about cloud fraction\n # the time_utc string is empty?! why are you doing this to the user!\n data_fields = ['/PRODUCT/SUPPORT_DATA/INPUT_DATA/cloud_fraction_crb',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/surface_albedo',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time',\\\n '/PRODUCT/delta_time',\\\n '/PRODUCT/formaldehyde_tropospheric_vertical_column',\\\n '/PRODUCT/formaldehyde_tropospheric_vertical_column_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['cloud_fraction','latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','albedo','latc','lonc','qa_value','time','delta_time',\\\n 'column_amount','column_uncertainty']\n self.logger.info('Read, subset, and store level 2 data to l2g_data')\n self.logger.info('Level 2 data are located at '+l2_dir)\n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading '+fn)\n outp_nc = self.F_read_S5P_nc(fn_dir,data_fields,data_fields_l2g)\n f1 = outp_nc['SolarZenithAngle'] <= maxsza\n f2 = outp_nc['cloud_fraction'] <= maxcf\n # ridiculously, qa_value has a scale_factor of 0.01. so error-prone\n f3 = outp_nc['qa_value'] >= min_qa_value \n f4 = outp_nc['latc'] >= south\n f5 = outp_nc['latc'] <= north\n tmplon = outp_nc['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_nc['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_nc['UTC_matlab_datenum'] <= self.end_matlab_datenum\n validmask = f1 & f2 & f3 & f4 & f5 & f6 & f7 & f8 & f9\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n l2g_data0 = {}\n # yep it's indeed messed up\n Lat_lowerleft = np.squeeze(outp_nc['latitude_bounds'][:,:,0])[validmask]\n Lat_upperleft = np.squeeze(outp_nc['latitude_bounds'][:,:,3])[validmask]\n Lat_lowerright = np.squeeze(outp_nc['latitude_bounds'][:,:,1])[validmask]\n Lat_upperright = np.squeeze(outp_nc['latitude_bounds'][:,:,2])[validmask]\n Lon_lowerleft = np.squeeze(outp_nc['longitude_bounds'][:,:,0])[validmask]\n Lon_upperleft = np.squeeze(outp_nc['longitude_bounds'][:,:,3])[validmask]\n Lon_lowerright = np.squeeze(outp_nc['longitude_bounds'][:,:,1])[validmask]\n Lon_upperright = np.squeeze(outp_nc['longitude_bounds'][:,:,2])[validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_nc.keys():\n if key not in {'latitude_bounds','longitude_bounds','time_utc','time','delta_time'}:\n l2g_data0[key] = outp_nc[key][validmask]\n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def get_orfs(genome, min_num_aa):\n allowed = \"ATGC\"\n if not isinstance(genome, str) or len(genome) == 0 or not all(c in allowed for c in genome):\n raise TypeError\n start_codon = \"ATG\"\n stop_codon = ['TAA', 'TAG', 'TGA']\n ref_dict = {\"T\" : \"A\", \"A\" : \"T\", \"G\" : \"C\", \"C\" : \"G\"}\n amino_dict = {\n 'L' : ['CTC', 'CTT', 'CTA', 'CTG', 'TTA', 'TTG'],\n 'S' : ['TCA', 'TCT', 'TCC', 'TCG', 'AGC', 'AGT'],\n 'R' : ['CGA', 'CGC', 'CGT', 'CGG', 'AGA', 'AGG'],\n 'V' : ['GTA', 'GTG', 'GTC', 'GTT'],\n 'P' : ['CCC', 'CCA', 'CCG', 'CCT'],\n 'T' : ['ACC', 'ACG', 'ACT', 'ACA'],\n 'A' : ['GCA', 'GCC', 'GCG', 'GCT'],\n 'G' : ['GGA', 'GGC', 'GGT', 'GGG'],\n 'I' : ['ATA', 'ATC', 'ATT'],\n 'F' : ['TTT', 'TTC'],\n 'Y' : ['TAT', 'TAC'],\n 'H' : ['CAC', 'CAT'],\n 'Q' : ['CAG', 'CAA'],\n 'N' : ['AAC', 'AAT'],\n 'K' : ['AAA', 'AAG'],\n 'D' : ['GAC', 'GAT'],\n 'E' : ['GAA', 'GAG'],\n 'C' : ['TGC', 'TGT'],\n 'M' : ['ATG'],\n 'W' : ['TGG']\n\n }\n comp_genome = \"\"\n for stra in genome:\n comp_genome += ref_dict[stra]\n main_orfs = find_orfs(genome, start_codon, stop_codon, min_num_aa, amino_dict, False)\n comp_orfs = find_orfs(comp_genome[::-1], start_codon, stop_codon, min_num_aa, amino_dict, True)\n circular_orfs = find_cir_orfs(genome, main_orfs, start_codon, stop_codon, min_num_aa, amino_dict, False)\n \n circular_orfs_comp = find_cir_orfs(comp_genome[::-1], comp_orfs, start_codon, stop_codon, min_num_aa, amino_dict, True)\n \n for main_orf in main_orfs:\n for cir_orf in circular_orfs:\n if main_orf[0] <= cir_orf[1] and main_orf[1] <= cir_orf[1] or len(main_orf) == 5:\n main_orfs.remove(main_orf)\n for comp_orf in comp_orfs:\n for cir_orf in circular_orfs_comp:\n if comp_orf[1] == cir_orf[1] or len(comp_orf) == 5:\n comp_orfs.remove(comp_orf)\n\n final_orf = main_orfs + comp_orfs + circular_orfs + circular_orfs_comp\n #print(len(comp_orfs))\n \n \n \n return final_orf", "def create_file_list_popdiag(case,workdir):\n indir = os.path.join('/',workdir)\n allfiles = os.listdir(indir)\n\n suffix = ('-01.nc','-02.nc','-03.nc','-04.nc','-05.nc','-06.nc', \\\n '-07.nc','-08.nc','-09.nc','-10.nc','-11.nc','-12.nc')\n\n filelist = [os.path.join(indir,file) for file in allfiles\n if file.startswith(case) and file.endswith(suffix)]\n\n filelist.sort()\n return filelist", "def F_subset_S5PNO2(self,path): \n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('S5P_RPRO_L2__NO2____'+DATE.strftime(\"%Y%m%d\")+'T*.nc')\n l2_list = l2_list+flist\n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n maxsza = self.maxsza\n maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n min_qa_value = self.min_qa_value\n \n # absolute path of useful variables in the nc file\n data_fields = ['/PRODUCT/SUPPORT_DATA/DETAILED_RESULTS/cloud_fraction_crb_nitrogendioxide_window',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/surface_albedo_nitrogendioxide_window',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time_utc',\\\n '/PRODUCT/nitrogendioxide_tropospheric_column',\\\n '/PRODUCT/nitrogendioxide_tropospheric_column_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['cloud_fraction','latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','albedo','latc','lonc','qa_value','time_utc',\\\n 'column_amount','column_uncertainty']\n self.logger.info('Read, subset, and store level 2 data to l2g_data')\n self.logger.info('Level 2 data are located at '+l2_dir)\n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading '+fn)\n outp_nc = self.F_read_S5P_nc(fn_dir,data_fields,data_fields_l2g)\n f1 = outp_nc['SolarZenithAngle'] <= maxsza\n f2 = outp_nc['cloud_fraction'] <= maxcf\n # ridiculously, qa_value has a scale_factor of 0.01. so error-prone\n f3 = outp_nc['qa_value'] >= min_qa_value \n f4 = outp_nc['latc'] >= south\n f5 = outp_nc['latc'] <= north\n tmplon = outp_nc['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_nc['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_nc['UTC_matlab_datenum'] <= self.end_matlab_datenum\n validmask = f1 & f2 & f3 & f4 & f5 & f6 & f7 & f8 & f9\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n l2g_data0 = {}\n # yep it's indeed messed up\n Lat_lowerleft = np.squeeze(outp_nc['latitude_bounds'][:,:,0])[validmask]\n Lat_upperleft = np.squeeze(outp_nc['latitude_bounds'][:,:,3])[validmask]\n Lat_lowerright = np.squeeze(outp_nc['latitude_bounds'][:,:,1])[validmask]\n Lat_upperright = np.squeeze(outp_nc['latitude_bounds'][:,:,2])[validmask]\n Lon_lowerleft = np.squeeze(outp_nc['longitude_bounds'][:,:,0])[validmask]\n Lon_upperleft = np.squeeze(outp_nc['longitude_bounds'][:,:,3])[validmask]\n Lon_lowerright = np.squeeze(outp_nc['longitude_bounds'][:,:,1])[validmask]\n Lon_upperright = np.squeeze(outp_nc['longitude_bounds'][:,:,2])[validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_nc.keys():\n if key not in {'latitude_bounds','longitude_bounds','time_utc','time','delta_time'}:\n l2g_data0[key] = outp_nc[key][validmask]\n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def read_files():\n with open(\"CvixLerC9.loc\") as loc, open(\"CvixLerC9.qua\") as qua:\n qua_file = (qua.read().split('\\n'))\n qua_file = qua_file[8:-1]\n new_qua = []\n for q in qua_file:\n new_qua.append(q.split('\\t')) # [['1', '1.279502474'], ['3', '0.303712231']....]\n\n new_loc = {}\n header = ''\n read = False\n for i in loc:\n i = i.replace(\"\\n\", '')\n if read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n if \"(a,b)\" in i:\n header = i\n read = True\n else:\n read = False\n\n elif read:\n for j in i:\n if \" \" != j:\n if header in new_loc.keys():\n new_loc[header].append(j)\n else:\n new_loc[header] = [j]\n\n return new_loc, new_qua", "def mkglob(fullpaths: list, trim=False) -> str:\n string_list = []\n glob = None\n for fname in fullpaths:\n if trim:\n fname = re.sub(r\"^.*/(.*)$\", r\"\\1\", fname)\n # fname = re.sub(r\"^(.*)\\.fits?(\\.fz)*$\", r\"\\1\", fname)\n fname = re.sub(r\"^([^\\.]*)\\..*$\", r\"\\1\", fname) # trim suffix\n string_list.append(fname)\n logging.debug(\"string_list[]={}\".format(string_list))\n if len(string_list) == 1:\n glob = string_list[0]\n elif len(string_list) > 1:\n # title is longest common substring array\n # joined with *'s to look like a glob pattern\n ss_arr = []\n get_lcs_array(string_list, ss_arr, 0, \"\", 2)\n if ss_arr:\n glob = \"{}\".format(\"*\".join(ss_arr))\n if not re.match(ss_arr[0], string_list[0]):\n glob = \"*{}\".format(glob)\n if not re.search(r\"{}$\".format(ss_arr[-1]), string_list[0]):\n glob = \"{}*\".format(glob)\n return glob", "def getGFFStartEnd(file, len_param):\n dicS = {}\n dicE = {}\n direct = {}\n for line in open(file):\n itemList = line[:-1].split('\\t')\n start = int(itemList[3])-len_param\n if start <0:\n start = 0\n end = int(itemList[4])+len_param\n #id = getsubString(itemList[8][4:],';') # ToDo: need to check for other species\n id = itemList[8][itemList[8].find('=')+1:itemList[8].find(';')]\n dicS[id]= start\n dicE[id]= end\n direct[id] = itemList[6]\n return dicS,dicE,direct", "def generate_figures_and_xls_all_strains(outdir, cols_starts, region2data, ext, xls, group2pos, feature_names, samples):\n all_freqs = []\n # concatenate all pos and samples into one dataframe\n dframes = []\n for ri, (ref, pos) in enumerate(region2data.keys()): #regions): #[3]#; print(ref, pos, mt)\n mer, calls = region2data[(ref, pos)]\n for c, s in zip(calls, samples): \n df = pd.DataFrame(c, columns=feature_names)\n df[\"Strain\"] = s\n df[\"chr_pos\"] = \"%s:%s\"%(ref, pos)\n dframes.append(df)\n # read all tsv files\n df = pd.concat(dframes).dropna().reset_index()\n chr_pos, strains = df[\"chr_pos\"].unique(), df[\"Strain\"].unique() \n # compare individual methods\n for clf, method in (\n (KMeans(n_clusters=2), \"KMeans\"), \n (KNeighborsClassifier(), \"KNN\"), \n #(iso_new.iForest(ntrees=100, random_state=0), \"GMM+eIF\"), \n (GaussianMixture(random_state=0, n_components=2), \"GMM\"), \n (AgglomerativeClustering(n_clusters=2), \"AggClust\"), \n #(OneClassSVM(), \"OCSVM\"), \n (IsolationForest(random_state=0), \"IF\"), \n #(iso_new.iForest(ntrees=100, random_state=0), \"eIF\"), \n (RandomForestClassifier(), \"RF\"), \n ):\n fname = method\n for i, cols_start in enumerate(cols_starts, 1):\n results = []\n feat_name = \"_\".join(cols_start)\n fname = \"%s.%s\"%(method, feat_name); print(fname)\n outfn = os.path.join(outdir, \"%s.%s\"%(fname, ext))\n # narrow down the features to only signal intensity & trace\n cols = list(filter(lambda n: n.startswith(cols_start), feature_names))#; print(cols) #, \"DT\"\n # compare all samples to 0%\n s0 = samples[0]\n for s in samples[3:]: \n with np.errstate(under='ignore'):\n if \"+\" in method:\n clf2_name = method.split(\"+\")[-1]\n results += get_mod_freq_two_step(df, cols, chr_pos, [s0, s], feat_name, \n OFFSET=0.5, clf2_name=clf2_name, clf2=clf)\n elif method in (\"KNN\", \"RF\"):\n results += get_mod_freq_clf_train_test(df, cols, chr_pos, [s0, s], samples[1:3], clf, feat_name)\n else:\n results += get_mod_freq_clf(df, cols, chr_pos, [s0, s], clf, feat_name)\n \n # and store mod_freq predicted by various methods\n freqs = pd.DataFrame(results, columns=[\"chr_pos\", \"features\", \"mod_freq wt\", \"mod_freq strain\", \"strain\"])\n freqs[\"diff\"] = freqs.max(axis=1)-freqs.min(axis=1); freqs\n for name, pos in group2pos.items(): #((\"negative\", negatives), (\"pU\", pU_pos), (\"Nm\", Nm_pos)):\n freqs.loc[freqs[\"chr_pos\"].isin(pos), \"group\"] = name\n #freqs.to_csv(outfn, sep=\"\\t\"); freqs.head()\n freqs.to_excel(xls, fname, index=False)\n # plot differences between methods\n for group, pos in group2pos.items():\n freqs.loc[freqs[\"chr_pos\"].isin(pos), \"modification\"] = group\n #return freqs\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))#, sharey=\"all\")\n sns.barplot(x=\"chr_pos\", y=\"mod_freq strain\", hue=\"strain\", edgecolor=\"white\", palette=[\"#f8786fff\", \"#7aae02ff\", \"#00bfc2ff\", \"#c67afeff\"], \n data=freqs[(freqs[\"features\"]==feat_name)&(freqs[\"group\"]==\"pU\")], ax=ax1)\n sns.barplot(x=\"chr_pos\", y=\"mod_freq strain\", hue=\"strain\", edgecolor=\"white\", palette=[\"#ed823aff\", \"#1c6ca9ff\", \"#35d1bbff\", \"#c978fdff\"], \n data=freqs[(freqs[\"features\"]==feat_name)&(freqs[\"group\"]==\"Nm\")], ax=ax2)\n ax1.set_ylabel(\"Per-site stoichiometry\"); ax2.set_ylabel(\"\")\n ax1.get_legend().remove(); ax2.get_legend().remove()#ax1.legend([]); ax2.legend([])\n ax1.set_ylim(0, 1); ax2.set_ylim(0, 1); #ax2.set(aspect=1.7)\n ax1.set_title(\"pU modifications\"); ax2.set_title(\"Nm modifications\")\n fig.suptitle(fname)\n fig.savefig(outfn)\n plt.close() # clear axis\n freqs[\"name\"] = fname\n all_freqs.append(freqs)\n return all_freqs", "def get_keys(filen, flist): \n if (filen in flist[0]):\n key1 = 'PSTH_STIM'\n key2 = 'ELEC_'\n key3 = '_TRIAL_'\n elif (filen in flist[1]) or (filen in flist[2]):\n key1 = 'PSTH'\n key2 = ''\n key3 = '_'\n elif (filen in flist[3]) or (filen in flist[4]):\n key1 = 'Stim'\n key2 = 'Elec'\n key3 = 'Repet'\n return key1, key2, key3", "def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r", "def get_unique_snps(self):\n\n for chromosome in self.snpsites.keys():\n\n for position in self.snpsites[chromosome].keys():\n for filenumber in range(len(self.vcffilenames)):\n\n if (\n self.snpsites[chromosome][position][filenumber] == True\n and sum(self.snpsites[chromosome][position]) == 1\n ): # First any(array) finds\n self.snp_positions[self.vcffilenames[filenumber]][chromosome][\n position\n ].update({\"unique\": True})\n elif (\n sum(self.snpsites[chromosome][position]) >= 2\n ): # there might be snp at same position but with different alt base\n\n snp_index = [\n i\n for i, j in enumerate(self.snpsites[chromosome][position])\n if j == True\n ]\n\n totalindex = len(snp_index)\n # Lets check the alt base in these vcf files using index\n # lets get array of alt bases from each file\n alt_snps = []\n for index in snp_index:\n alt_snps.append(\n self.snp_positions[self.vcffilenames[index]][\n chromosome\n ][position][\"alt\"]\n )\n\n # get the counts of the elements\n\n counts = self.count_list_elements_occurrences(alt_snps)\n\n for index in range(len(counts)):\n if counts[index] == 1:\n # this is unique, so occurred once\n self.snp_positions[self.vcffilenames[snp_index[index]]][\n chromosome\n ][position].update(\n {\"unique\": True}\n ) # vcffilenames[snp_index[index]] = this will be the filename\n # print(\"this is unique\", vcffilenames[snp_index[index]], chromosome, position, self.snp_positions[vcffilenames[snp_index[index]]][chromosome][position])\n\n # else:\n # \tvcf_database[\"self.snp_positions\"][chromosome + \"_\" + position].update({\"unique\":False})\n\n return", "def read_file_names(self):\n files_BIDMC = os.listdir(self.root_dir_BIDMC)\n masks_BIDMC = os.listdir(self.seg_dir_BIDMC)\n files_HK = os.listdir(self.root_dir_HK)\n masks_HK = os.listdir(self.seg_dir_HK)\n files_I2CVB = os.listdir(self.root_dir_I2CVB)\n masks_I2CVB = os.listdir(self.seg_dir_I2CVB)\n files_ISBI = os.listdir(self.root_dir_ISBI)\n masks_ISBI = os.listdir(self.seg_dir_ISBI)\n files_ISBI_15 = os.listdir(self.root_dir_ISBI_15)\n masks_ISBI_15 = os.listdir(self.seg_dir_ISBI_15)\n files_UCL = os.listdir(self.root_dir_UCL)\n masks_UCL = os.listdir(self.seg_dir_UCL)\n site_files = [files_BIDMC, files_HK, files_I2CVB, files_ISBI, files_ISBI_15, files_UCL]\n site_masks = [masks_BIDMC, masks_HK, masks_I2CVB, masks_ISBI, masks_ISBI_15, masks_UCL]\n return site_files, site_masks", "def full_chromosomes(reader):\n for line in reader.header.get_lines(\"contig\"):\n if line.id in CHROMS:\n name = line.id\n length = line.length or 1_000_000_000\n yield \"{}:{}-{}\".format(name, 1, length)", "def getReadSamFile(read_file,rnameList):\n size = len(rnameList)\n prev = 0\n ends = range(0, size, 20)\n ends += [size]\n ends.pop(0)\n \n \n \n for i in ends:\n chrs = rnameList[prev:i]\n f = []\n ch_p = ''\n jj = 0\n for j in range(0,i-prev):\n samfile = os.path.join(working_dir, 'MappedRead.'+chrs[j]+'.sam')\n log.info('Generating ' + samfile)\n f.append(open(samfile, \"w\"))\n for line in open(read_file, \"r\"):\n \n itemList = line[:-1].split('\\t')\n \n if len(itemList) < 11:\n continue\n #print itemList\n if itemList[0][0:1] == '@':\n continue\n line_ch = itemList[2]\n if line_ch == '*':\n continue\n if int(itemList[1]) & 0b100 != 0:\n continue\n \n if ch_p != line_ch:\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n jj = j\n ch_p = line_ch\n continue\n #end for j in range(0,i-prev):\n elif ch_p == line_ch:\n f[jj].write(line)\n '''\n for j in range(0,i-prev):\n if chrs[j] == line_ch:\n f[j].write(line)\n continue\n '''\n for fp in f:\n fp.close()\n prev = i", "def create_file_list(case):\n for server in ['bonaire','barbados','caiapo']:\n for basedir in ['data0/ivan/archive','data1/ivan/archive',\n 'data2/ivan/archive','data3/ivan/archive',\n '/bonaire/data2/data/SODA-POP','data0',\n '/barbados/data3/CCSM3-BGC']:\n if 'SODA-POP' in basedir:\n path = os.path.join('/',server,basedir,case)\n elif 'CCSM3-BGC' in basedir:\n path = os.path.join('/',server,basedir,case,'ocn/hist')\n else:\n path = os.path.join('/',server,basedir,case,'ocn2')\n\n if os.path.isdir(path):\n \t\tindir = path\n \t\tallfiles = os.listdir(indir)\n else:\n continue\n\n filelist = [os.path.join(indir,file) for file in allfiles\n if file.endswith('.nc')]\n filelist.sort()\n return filelist", "def _collect_reads(self, wildcards, _library_name, prefix):\n folder_name = get_ngs_library_folder_name(self.parent.sheets, wildcards.library_name)\n pattern_set_keys = (\"right\",) if prefix.startswith(\"right-\") else (\"left\",)\n seen = []\n for _, path_infix, filename in self.path_gen.run(folder_name, pattern_set_keys):\n path = os.path.join(self.base_path_in, path_infix, filename).format(**wildcards)\n if path in seen:\n print(\"WARNING: ignoring path seen before %s\" % path, file=sys.stderr)\n else:\n seen.append(path)\n yield path", "def processFiles(fileName):\n print fileName\n count_t1 = 0\n inFile=open(fileName,'r')\n all_angleList = Counter()\n rep_angleList = Counter()\n all_lengthsList = Counter()\n maxDist_List = Counter()\n global xCord, yCord, zCord\n aminoAcidName={}\n xCord={}\n yCord={}\n zCord={}\n seq_number={}\n counter=0\n for i in inFile:\n if (i[0:6].rstrip()==\"NUMMDL\"):\n numOfModels=i[10:14].rstrip()\n if ((i[0:6].rstrip()==\"ENDMDL\")or (i[0:6].rstrip()=='TER')):\n break\n if (i[0:6].rstrip()==\"MODEL\" and int(i[10:14].rstrip())>1):\n break\n \n if(i[0:4].rstrip())==\"ATOM\" and(i[13:15].rstrip())==\"CA\" and(i[16]=='A'or i[16]==' ')and i[17:20]!= \"UNK\" :\n aminoAcidName[counter]=int(aminoAcidLabel[i[17:20]])\n xCord[counter]=(float(i[30:38]))\n yCord[counter]=(float(i[38:46]))\n zCord[counter]=(float(i[46:54]))\n seq_number[counter]=str(i[22:27])\n counter+=1\n\n protLen=len(yCord)\n initialLabel=[]\n sortedLabel=[]\n sortedIndex=[]\n outDist={}\n for m in range(0,3):\n initialLabel.append(0)\n sortedLabel.append(0)\n sortedIndex.append(0)\n\n for i in range(0,protLen-2):\n for j in range(i+1,protLen-1):\n for k in range(j+1, protLen):\n global i1,j1,k1\n i1=i\n j1=j\n k1=k\n keepLabelIndex={}\n keepLabelIndex[aminoAcidName[i]]=i\n keepLabelIndex[aminoAcidName[j]]=j\n keepLabelIndex[aminoAcidName[k]]=k\n initialLabel[0]=aminoAcidName[i]\n initialLabel[1]=aminoAcidName[j]\n initialLabel[2]=aminoAcidName[k]\n sortedLabel=list(initialLabel)\n sortedLabel.sort(reverse=True)\n\n #Perform Rule- based labelling\n\n if (sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]):\n dist1_2Temp=calcDist(i,j)\n dist1_3Temp=calcDist(i,k)\n dist2_3Temp=calcDist(j,k)\n if dist1_2Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)):\n indexOf0=i\n indexOf1=j\n indexOf2=k\n elif dist1_3Temp>=(max(dist1_2Temp,dist1_3Temp,dist2_3Temp)):\n indexOf0=i\n indexOf1=k\n indexOf2=j\n else:\n indexOf0=j\n indexOf1=k\n indexOf2=i\n elif(aminoAcidName[i]!=aminoAcidName[j])and(aminoAcidName[i]!=aminoAcidName[k]) and(aminoAcidName[j]!=aminoAcidName[k]): \n for index_ in range(0,3):\n sortedIndex[index_]=keepLabelIndex[sortedLabel[index_]]\n indexOf0=sortedIndex[0]\n indexOf1=sortedIndex[1]\n indexOf2=sortedIndex[2]\n elif(sortedLabel[0]==sortedLabel[1])and(sortedLabel[1]!=sortedLabel[2]):\n indexOf2=keepLabelIndex[sortedLabel[2]]\n indices=indexFind(indexOf2,i,j,k)\n a=indexOf2\n b=indices[0]\n c=indices[1]\n dist1_3Temp=calcDist(b,a)\n dist2_3Temp=calcDist(c,a)\n if dist1_3Temp>=dist2_3Temp:\n indexOf0=indices[0]\n indexOf1=indices[1] \n else:\n indexOf0=indices[1]\n indexOf1=indices[0]\n elif(sortedLabel[0]!=sortedLabel[1])and(sortedLabel[1]==sortedLabel[2]):\n indexOf0=keepLabelIndex[sortedLabel[0]]\n indices=indexFind(indexOf0,i,j,k)\n if calcDist(indexOf0,indices[0])>= calcDist(indexOf0,indices[1]):\n indexOf1=indices[0]\n indexOf2=indices[1] \n else:\n indexOf2=indices[0]\n indexOf1=indices[1]\n dist01=calcDist(indexOf0,indexOf1)\n s2=dist01/2\n dist02=calcDist(indexOf0,indexOf2)\n s1=dist02\n dist12=dist01\n dist03=calcDist(indexOf1,indexOf2)\n\n # All lengths calculation \n all_lengthsList[round(dist01,round_off_to)] += 1\n all_lengthsList[round(dist02,round_off_to)] += 1\n all_lengthsList[round(dist03,round_off_to)] += 1\n\n maxDist_List[round(max(dist01,dist02,dist03),round_off_to)] +=1\n\n s3=(((xCord[indexOf0]+xCord[indexOf1])/2-xCord[indexOf2])**2\n +((yCord[indexOf0]+yCord[indexOf1])/2-yCord[indexOf2])**2\n +((zCord[indexOf0]+zCord[indexOf1])/2-zCord[indexOf2])**2)**0.5\n \n \n Theta1=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14\n if Theta1<=90:\n all_angleList[round(Theta1,round_off_to)] +=1\n rep_angleList[round(Theta1,round_off_to)] +=1\n else:\n all_angleList[round(abs(180-Theta1),round_off_to)] +=1\n rep_angleList[round(abs(180-Theta1),round_off_to)] +=1\n \n #if Theta1>90: \n # Theta1=abs(180-Theta1)\n #print 'Second Theta1, ',Theta1\n #Theta 2\n dist02=calcDist(indexOf1,indexOf0)\n s1=dist02\n dist01=calcDist(indexOf1,indexOf2)\n s2=dist01/2\n s3=(((xCord[indexOf1]+xCord[indexOf2])/2-xCord[indexOf0])**2\n +((yCord[indexOf1]+yCord[indexOf2])/2-yCord[indexOf0])**2\n +((zCord[indexOf1]+zCord[indexOf2])/2-zCord[indexOf0])**2)**0.5\n \n Theta2=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 \n #if Theta2 > 90:\n # Theta2 = abs(180-Theta2)\n if Theta2<=90:\n all_angleList[round(Theta2,round_off_to)] +=1\n else:\n all_angleList[round(abs(180-Theta2),round_off_to)] +=1\n\n #Theta 3\n dist02=calcDist(indexOf2,indexOf1)\n s1=dist02\n dist01=calcDist(indexOf2,indexOf0)\n s2=dist01/2\n s3=(((xCord[indexOf2]+xCord[indexOf0])/2-xCord[indexOf1])**2+\n ((yCord[indexOf2]+yCord[indexOf0])/2-yCord[indexOf1])**2+\n ((zCord[indexOf2]+zCord[indexOf0])/2-zCord[indexOf1])**2)**0.5\n \n Theta3=180*(math.acos((s1**2-s2**2-s3**2)/(2*s2*s3)))/3.14 \n #if Theta3 > 90:\n # Theta3 = abs(180-Theta3)\n if Theta3<=90:\n all_angleList[round(Theta3,round_off_to)] +=1\n else:\n all_angleList[round(abs(180-Theta3),round_off_to)] +=1\n # Either writting output to a file or using dictionary or \n # counter will save you from memory exceptions in this case.\n #all_angleList[round(Theta1,round_off_to)] +=1\n #all_angleList[round(Theta2,round_off_to)] +=1\n #all_angleList[round(Theta3,round_off_to)] +=1\n\n #rep_angleList[round(Theta1,round_off_to)] +=1\n\n count_t1 = count_t1+1\n\n print 'count_t1:',count_t1\n\n return [all_angleList,rep_angleList,all_lengthsList,maxDist_List]", "def _get_sensor_col_files(self, gas, loc):\n sub = os.path.join(self.GasNames[gas], self.Locs[loc])\n files = os.listdir(os.path.join(self.data_location, sub))\n files.sort()\n return (sub, files)", "def _build_found_list(self, filenames):\n return sorted(\n ('FOUND_FILENAME', os.path.normpath(f)) for f in filenames)", "def get_glob_strings(subdirglob):\n dirname = path.dirname(subdirglob)\n basename = path.basename(subdirglob)\n assert (((\"_M1_\" in subdirglob) or (\"_M2_\" in subdirglob)) or (\"_S_\" in subdirglob)), \\\n (\"_M1_ or _M2_ not in subdirglob, cant differentiate between M1 and M2, aborting.\"\n f\"glob: {subdirglob}\")\n if (\"*\" not in subdirglob) and (\"_S_\" not in basename):\n newbasename = basename.replace(\"_M2_\", \"_M1_\"), basename.replace(\"_M1_\", \"_M2_\")\n return path.join(dirname, newbasename[0]), path.join(dirname, newbasename[1])\n elif (\"_M1_\" or \"_M2_\") in basename:\n newbasename = basename.replace(\"_M2_\", \"_M1_\"), basename.replace(\"_M1_\", \"_M2_\")\n return path.join(dirname, newbasename[0]), path.join(dirname, newbasename[1])\n elif \"_S_\" in basename:\n return basename", "def get_overlaps(file_name):\r\n\r\n place = {}\r\n size = {}\r\n sap = {}\r\n overlapping = []\r\n active_list = []\r\n max_width = 0\r\n\r\n with open(file_name + \".scl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if line.split()[0] == \"Sitespacing\":\r\n sitespacing = line.split()[2]\r\n if line.split()[0] == \"SubrowOrigin\":\r\n starting_x = line.split()[2]\r\n ending_x = int(starting_x) + int(sitespacing) * int(line.split()[5])\r\n if ending_x > max_width:\r\n max_width = ending_x\r\n\r\n divider = max_width // 10\r\n\r\n with open(file_name + \".nodes\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if len(line.split()) == 3:\r\n size[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n\r\n with open(file_name + \".pl\") as f:\r\n for i, line in enumerate(f):\r\n\r\n line = line.strip()\r\n if line:\r\n if re.match(r'[a-z]{1}[0-9]+', line.split()[0]):\r\n if line.split()[0] in size:\r\n place[line.split()[0]] = [line.split()[1], line.split()[2]]\r\n sap_num = int(line.split()[1]) // divider\r\n if sap_num not in sap.keys():\r\n sap[sap_num] = []\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]), int(line.split()[2]),\r\n \"start\"])\r\n\r\n sap[sap_num].append([line.split()[0], int(line.split()[1]),\r\n int(line.split()[1]) + int(size[line.split()[0]][0]),\r\n int(line.split()[2]) + int(size[line.split()[0]][1]), \"end\"])\r\n\r\n for lista in sap.values():\r\n lista.sort(key=lambda x: x[3])\r\n lista.sort(key=lambda x: x[4], reverse=True)\r\n for element in lista:\r\n if element[4] == \"start\":\r\n if len(active_list) == 0:\r\n active_list.append(element[0])\r\n else:\r\n for node in active_list:\r\n if int(place[node][0]) < int(place[element[0]][0]) + int(size[element[0]][0]) \\\r\n and int(place[node][0]) + int(size[node][0]) > int(place[element[0]][0]) \\\r\n and int(place[node][1]) < int(place[element[0]][1]) + int(size[element[0]][1]) \\\r\n and int(place[node][1]) + int(size[node][1]) > int(place[element[0]][1]):\r\n overlap = (node, element[0])\r\n overlapping.append(overlap)\r\n active_list.append(element[0])\r\n else:\r\n active_list.remove(element[0])\r\n return overlapping", "def get_files(start_str = \"sim_\"):\n n = len(start_str)\n file_list = [f for f in os.listdir(in_path) if f[0:n] == start_str]\n return file_list", "def gather_initial_fullnames():\n\n infullnames = []\n for (dirpath, _, filenames) in os.walk('.'):\n dpath = dirpath[2:]\n if dpath:\n dpath += '/'\n for fname in filenames:\n infullnames.append('%s%s' % (dpath, fname))\n\n if miscutils.fwdebug_check(6, 'PFWRUNJOB_DEBUG'):\n miscutils.fwdebug_print(\"initial infullnames=%s\" % infullnames)\n return infullnames", "def extract_strandsFromGff(gff_file, left, right, scale=True, downsample=0):\n\n # sometimes the first line is a comment which pandas can't handle\n skiprows = 0\n with open(gff_file, \"r\") as infile:\n if infile.read(1) == \"#\":\n skiprows = 1\n table = pandas.read_table(gff_file, header=None,\n usecols=[0, 2, 3, 4, 5, 6], comment=\"#\", skiprows=skiprows,\n names=[\"chromosome\", \"name\", \"leftpos\", \"rightpos\", \"reads\", \"strand\"])\n table = table[(table.rightpos >= left) & (table.leftpos <= right)]\n # TODO - detect if chromsome_plus and chromosome_minus\n if len(table.chromosome.unique()) > 1:\n raise Exception(\"multiple chromosomes not supported\")\n if (table.leftpos == table.rightpos).all(): # each line is one point\n table = table[[\"leftpos\", \"reads\", \"strand\"]]\n table_plus = table[table.strand == \"+\"].set_index(\"leftpos\")\n table_minus = table[table.strand == \"-\"].set_index(\"leftpos\")\n # fill missing values with 0\n filler = pandas.Series([range(left, right + 1)], [range(left, right + 1)])\n table_plus[\"filler\"] = 1\n table_minus[\"filler\"] = 1\n table_plus.fillna(0)\n table_minus.fillna(0)\n # extract only the series we need\n plus = table_plus.reads\n minus = table_minus.reads.abs() # in case stored negative\n if scale:\n plus *= 100. / plus.max()\n minus *= 100. / minus.max()\n # downsample\n collapse_factor = None;\n if downsample > 1:\n collapse_factor = int((right - left) / downsample)\n if collapse_factor and collapse_factor > 1:\n plus = plus.groupby(lambda x: x // collapse_factor).mean()\n plus.index *= collapse_factor\n minus = minus.groupby(lambda x: x // collapse_factor).mean()\n minus.index *= collapse_factor\n return plus,minus;", "def main():\n tot = []\n with open(intersect) as fil:\n for line in fil:\n data = []\n if len(line) > 1:\n rec = int_parse(line)\n win = find_winID(rec.wst, w)\n if int(rec.gst) != -1:\n imp = rec.info.rstrip().split(';')\n ID = imp[0].split('=')[1]\n Name = imp[1].split('=')[1]\n if imp[2].startswith('fullname'):\n fullname = imp[2].split('=')[1]\n if imp[3].startswith('Alias'):\n Alias = imp[3].split('=')[1]\n Ontology_term = imp[4].split('=')[1]\n Dbxref = imp[5].split('=')[1]\n GO, _ = split_ont(Ontology_term)\n _, SO = split_ont(Ontology_term)\n gst = rec.gst\n gen = rec.gen\n else:\n Alias = 'NA'\n Ontology_term = imp[3].split('=')[1]\n Dbxref = imp[4].split('=')[1]\n GO, _ = split_ont(Ontology_term)\n _, SO = split_ont(Ontology_term)\n gst = rec.gst\n gen = rec.gen\n else:\n fullname = 'NA'\n if imp[2].startswith('Alias'):\n Alias = imp[2].split('=')[1]\n Ontology_term = imp[3].split('=')[1]\n Dbxref = imp[4].split('=')[1]\n GO, _ = split_ont(Ontology_term)\n _, SO = split_ont(Ontology_term)\n gst = rec.gst\n gen = rec.gen\n else:\n Alias = 'NA'\n Ontology_term = imp[2].split('=')[1]\n Dbxref = imp[3].split('=')[1]\n GO, _ = split_ont(Ontology_term)\n _, SO = split_ont(Ontology_term)\n gst = rec.gst\n gen = rec.gen\n else:\n ID = 'NA'\n Name = 'NA'\n fullname = 'NA'\n gst = 'NA'\n gen = 'NA'\n Alias = 'NA'\n GO = 'NA'\n SO = 'NA'\n Dbxref = 'NA'\n data.append(ID)\n data.append(Name)\n data.append(fullname)\n data.append(gst)\n data.append(gen)\n data.append(Alias)\n data.append(GO)\n data.append(SO)\n data.append(Dbxref)\n data.append(rec.chrom)\n data.append(win)\n tot.append(tuple(data))\n writeTable(tot)", "def get_annot_cfpath_list(ibs, aid_list, suffix=None):\n #utool.assert_all_not_None(aid_list, 'aid_list')\n _cfname_fmt = get_chip_fname_fmt(ibs=ibs, suffix=suffix)\n cfname_iter = (None if aid is None else _cfname_fmt % aid for aid in iter(aid_list))\n cfpath_list = [None if cfname is None else join(ibs.chipdir, cfname) for cfname in cfname_iter]\n return cfpath_list", "def _separate_file_list( file_list, target_locus ):\n log.info(\"Parsing locus-specific subread FOFN\")\n target_fasta = None\n other_fasta = []\n print file_list, target_locus\n for filename in file_list:\n basename = filename.split('.')[0]\n locus = basename.split('_')[-1]\n if locus == target_locus and target_fasta is None:\n target_fasta = filename\n elif locus == target_locus:\n msg = 'Multiple files for target locus found!'\n log.error( msg )\n raise ValueError( msg )\n else:\n other_fasta.append( filename )\n if target_fasta is None:\n msg = 'No fasta file for target locus found!'\n log.error( msg )\n raise ValueError( msg )\n return ( target_fasta, other_fasta )", "def filenames(root, combined=None):\n segments = segment_paths(root)\n globname = \"*.nc*\" if combined is None else \"*.nc\"\n files = []\n for segment in segments:\n globstring = os.path.join(segment, globname)\n segment_files = sorted(glob.glob(globstring))\n segment_files = [os.path.basename(file) for file in segment_files]\n files.append(set(segment_files))\n return list(set.intersection(*files))", "def process_all_leading_genes(f_path):\n with open(f_path, 'r') as f:\n contents = f.read()\n parts = contents.strip().split('\\t')\n genes = parts[2:]\n return genes", "def loci_parsed(loci_file):\n #\n ga_list = [\"Ang_30\",\"Ang_29\"]\n\n gb_list = [\"Ang_67\", \"Ang_21\"]\n\n cc_list = [\"Cg12063\", \"Cg125212\", \"Cg126212\", \"Cg12758\", \"Cg_432\"]\n\n loci_dic = {}\n\n loci_list = {\"ga\": None, \"gb\": None, \"cc\": None}\n\n\n\n for files in loci_file:\n\n name= files.strip().split (\"/\")\n name_loci = name[12].split(\"_\")\n name_loci_1 = name_loci[1].split(\".\")\n real_name_loci = name_loci_1[0]\n\n loci_file = open(files)\n\n\n for line in loci_file:\n\n if line[:1] in \"0123456789\":\n pass\n else:\n\n line_information = line.strip().split()\n isolate = line_information[0]\n sequence = line_information [1]\n\n # if \"-\" in sequence:\n # sequence = sequence.replace (\"-\", \"\")\n\n if isolate in ga_list and loci_list[\"ga\"] == None:\n loci_list[\"ga\"] = sequence\n if isolate in gb_list and loci_list[\"gb\"] == None:\n loci_list[\"gb\"] = sequence\n if isolate in cc_list and loci_list[\"cc\"] == None:\n loci_list[\"cc\"] = sequence\n loci_dic[real_name_loci] = loci_list\n\n\n\n loci_list = {\"ga\": None, \"gb\": None, \"cc\": None}\n\n return loci_dic", "def get_files_prefix(prefixes, dirname, Lshow=None, Ldir=None):\n matched_files=[]\n for pref in prefixes:\n print(f\"prefix: {pref} in {whereami()} of module {__name__}\")\n for fname in os.listdir(dirname):\n # re.match finds only prefix\n if re.match(pref, fname):\n if not Ldir and os.path.isdir(fname):\n continue\n matched_files.append(fname)\n #print (pref, fname)\n return matched_files", "def getMatchingMotifs(fname_test):\n global gTrie\n res = []\n\n with open(fname_test) as f:\n motifs = f.readlines() \n for m in motifs[1:]:\n m = m.split(',')[0]\n if gTrie.has_key(m):\n res.append(m)\n #print(m)\n return res", "def parse_geno_file(folder,return_flag):\n\n perc_alt = defaultdict(list)\n perc_ref = defaultdict(list)\n abs_alt = defaultdict(list)\n abs_ref = defaultdict(list)\n\n perc_alt_inv = defaultdict(dict)\n perc_ref_inv = defaultdict(dict)\n abs_alt_inv = defaultdict(dict)\n abs_ref_inv = defaultdict(dict)\n\n for geno_file in glob.glob(folder+'*_test_summary.tsv'):\n strain = geno_file.split('/')[-1].split('_')[0]\n #print strain\n prev_coordinate = \"0\"\n count = 0\n alt_allele = {}\n amb_allele = {}\n ref_allele = {}\n flag = 0 \n\n TEMP_HANDLE = open(geno_file,'r')\n for line in TEMP_HANDLE:\n line = line.rstrip('\\n')\n\n if(line[0]!='v'): ## Skip the header\n coordinate = line.split('\\t')[0].split('::')[-1]\n if(coordinate != prev_coordinate):\n #prev_coordinate = coordinate\n count = count + 1\n if(count == 1):\n if(line.split('\\t')[-3]!='alt'): ## No reads supporting the alternate allele\n flag = 1 \n alt_allele[coordinate] = 0\n amb_allele[coordinate] = int(line.split('\\t')[-1])\n #print line\n else:\n alt_allele[coordinate] = int(line.split('\\t')[-1])\n if(count == 2):\n amb_allele[coordinate] = int(line.split('\\t')[-1])\n if(count == 3):\n if(line.split('\\t')[-3]!='ref'): ## No reads supporting the reference allele (all are ambiguous)\n ref_allele[coordinate] = 0\n else:\n ref_allele[coordinate] = int(line.split('\\t')[-1])\n prev_coordinate = coordinate\n count = 0\n if(flag == 1): ## The case where there are no alternate allele reads, counter is incremented to account for changed numbering\n count = count + 1 \n flag = 0 \n\n \n for key in alt_allele:\n if(alt_allele[key]+ref_allele[key]!= 0): ## Check to see if the denominator is not zero\n abs_alt[strain].append(float(alt_allele[key]))\n abs_ref[strain].append(float(ref_allele[key]))\n perc_alt[strain].append(float(alt_allele[key])/(alt_allele[key]+ref_allele[key]))\n perc_ref[strain].append(float(ref_allele[key])/(alt_allele[key]+ref_allele[key]))\n\n\n abs_alt_inv[strain][key] = float(alt_allele[key])\n abs_ref_inv[strain][key] = float(ref_allele[key])\n perc_alt_inv[strain][key] = float(alt_allele[key])/(alt_allele[key]+ref_allele[key])\n perc_ref_inv[strain][key] = float(ref_allele[key])/(alt_allele[key]+ref_allele[key])\n \n \n\n ## Keep only the common inversions, i.e. those between MC and the rest \n all_inversions = []\n common_inversions = []\n abs_alt_set = defaultdict(list)\n perc_alt_set = defaultdict(list)\n\n abs_alt_inv_set = defaultdict(dict)\n perc_alt_inv_set = defaultdict(dict)\n abs_ref_inv_set = defaultdict(dict)\n perc_ref_inv_set = defaultdict(dict)\n\n Rock = ['AC', 'CL','CM','CN','TI','PN','MC']\n Sand = ['MZ','DC','LF','MP','MS','CV']\n\n\n sand_inversions = []\n rock_inversions = []\n\n for strain in abs_alt_inv.keys():\n for inversion in abs_alt_inv[strain].keys():\n if(strain in Rock):\n rock_inversions.append(inversion)\n else:\n sand_inversions.append(inversion)\n all_inversions.append(inversion)\n \n \n common_inversions_sand = Counter(sand_inversions)\n common_inversions_rock = Counter(rock_inversions)\n #count_sand = 0\n common_inversions = Counter(all_inversions)\n return_inversions = []\n \n \n #print common_inversions\n for inversion in common_inversions.keys():\n if(common_inversions[inversion]==13):\n return_inversions.append(inversion)\n for strain in abs_alt_inv.keys():\n abs_alt_set[strain].append(abs_alt_inv[strain][inversion])\n perc_alt_set[strain].append(perc_alt_inv[strain][inversion])\n\n abs_alt_inv_set[strain][inversion] = abs_alt_inv[strain][inversion]\n perc_alt_inv_set[strain][inversion] = perc_alt_inv[strain][inversion]\n abs_ref_inv_set[strain][inversion] = abs_ref_inv[strain][inversion]\n perc_ref_inv_set[strain][inversion] = perc_ref_inv[strain][inversion]\n\n\n for inversion in abs_alt_inv_set['MC']:\n alternate_allele_sum_rock = 0\n reference_allele_sum_rock = 0\n alternate_allele_sum_sand = 0\n reference_allele_sum_sand = 0 \n for strain in Rock:\n alternate_allele_sum_rock = alternate_allele_sum_rock + abs_alt_inv_set[strain][inversion]\n reference_allele_sum_rock = reference_allele_sum_rock + abs_ref_inv_set[strain][inversion]\n\n for strain in Sand:\n alternate_allele_sum_sand = alternate_allele_sum_sand + abs_alt_inv_set[strain][inversion]\n reference_allele_sum_sand = reference_allele_sum_sand + abs_ref_inv_set[strain][inversion]\n\n abs_alt_set['Rock'].append(alternate_allele_sum_rock)\n perc_alt_set['Rock'].append(float((alternate_allele_sum_rock)/(alternate_allele_sum_rock + reference_allele_sum_rock)))\n \n abs_alt_set['Sand'].append(alternate_allele_sum_sand)\n perc_alt_set['Sand'].append(float((alternate_allele_sum_sand)/(alternate_allele_sum_sand + reference_allele_sum_sand)))\n \n with open('log_file.txt','a') as LOG_FILE:\n if(float((alternate_allele_sum_rock)/(alternate_allele_sum_rock + reference_allele_sum_rock))>float(sys.argv[2]) or float((alternate_allele_sum_sand)/(alternate_allele_sum_sand + reference_allele_sum_sand))>float(sys.argv[2])):\n print >> LOG_FILE,inversion \n \n\n print \"Sand : \"+str(count_sand)\n\n if return_flag == True:\n #print len([abs_alt_inv_set,abs_ref_inv_set,perc_alt_inv_set,perc_ref_inv_set])\n return perc_alt_inv_set\n else:\n return [abs_alt_set,perc_alt_set]", "def get_cds_start_end_locations_genbank_file(filename):\n # Loop over the features\n genes = defaultdict(list)\n cds = 0\n for seq_record in SeqIO.parse(filename, \"genbank\"):\n print(f'Dealing with GenBank record {seq_record.id}')\n for seq_feature in seq_record.features:\n if seq_feature.type == \"CDS\" and 'protein_id' in seq_feature.qualifiers:\n cds += 1\n prot_id = seq_feature.qualifiers['protein_id'][0]\n start, end = int(seq_feature.location.start), int(seq_feature.location.end)\n genes[prot_id] = genes.get(prot_id, []) + [start, end]\n print(f'There are {cds} CDS and {len(genes)} genes annoted for this genbank record')\n return genes", "def readDocuments(docs, prefix):\n\n fmap = open(\"mapping.txt\", \"w\")\n\n\n i = -1\n for folder in pressrelease_folders_txt:\n i += 1\n fullpath = path.join(prefix, folder)\n totFilesInFolder = len(fnmatch.filter(os.listdir(fullpath),\n '*.txt'))\n countFiles = 0\n for f in listdir(path.join(prefix, folder)):\n fmap.write(\"{0}\\t {1:5d}\\n\".format(f, countFiles))\n countFiles += 1\n fullname = fullpath + f\n # text = open(fullname).readlines()\n ff = open(fullname)\n docs.append(ff.read())\n\n print(\"{0:5d}/{1:5d} :: Reading file {2:10s} \".format(countFiles,\n totFilesInFolder, f))\n\n # if countFiles > 4:\n # return\n\n\n fmap.close()", "def test_get_suffixes(self):\n\n ans = self.short_sf.get_suffixes()\n\n self.assertEqual(ans, [(0, 0), (1, 1), (0, 1), (0, 2), (1, 0), (1, 2)])", "def F_subset_S5PCH4(self,path,if_trop_xch4=False,s5p_product='RPRO'): \n from scipy.interpolate import interp1d\n # find out list of l2 files to subset\n if os.path.isfile(path):\n self.F_update_popy_with_control_file(path)\n l2_list = self.l2_list\n l2_dir = self.l2_dir\n else:\n import glob\n l2_dir = path\n l2_list = []\n cwd = os.getcwd()\n os.chdir(l2_dir)\n start_date = self.start_python_datetime.date()\n end_date = self.end_python_datetime.date()\n days = (end_date-start_date).days+1\n DATES = [start_date + datetime.timedelta(days=d) for d in range(days)]\n for DATE in DATES:\n flist = glob.glob('S5P_'+s5p_product+'_L2__CH4____'+DATE.strftime(\"%Y%m%d\")+'T*.nc')\n l2_list = l2_list+flist\n \n os.chdir(cwd)\n self.l2_dir = l2_dir\n self.l2_list = l2_list\n \n #maxsza = self.maxsza \n #maxcf = self.maxcf\n west = self.west\n east = self.east\n south = self.south\n north = self.north\n min_qa_value = self.min_qa_value\n \n # absolute path of useful variables in the nc file\n data_fields = ['/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time',\\\n '/PRODUCT/delta_time',\\\n '/PRODUCT/methane_mixing_ratio',\\\n '/PRODUCT/methane_mixing_ratio_bias_corrected',\\\n '/PRODUCT/methane_mixing_ratio_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','latc','lonc','qa_value','time','delta_time',\\\n 'column_amount_no_bias_correction','column_amount','column_uncertainty']\n if if_trop_xch4:\n # absolute path of useful variables in the nc file\n data_fields = ['/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/latitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/longitude_bounds',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/solar_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/GEOLOCATIONS/viewing_zenith_angle',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/dry_air_subcolumns',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/surface_pressure',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/pressure_interval',\\\n '/PRODUCT/SUPPORT_DATA/INPUT_DATA/methane_profile_apriori',\\\n '/PRODUCT/latitude',\\\n '/PRODUCT/longitude',\\\n '/PRODUCT/qa_value',\\\n '/PRODUCT/time',\\\n '/PRODUCT/delta_time',\\\n '/PRODUCT/methane_mixing_ratio',\\\n '/PRODUCT/methane_mixing_ratio_bias_corrected',\\\n '/PRODUCT/methane_mixing_ratio_precision'] \n # standardized variable names in l2g file. should map one-on-one to data_fields\n data_fields_l2g = ['latitude_bounds','longitude_bounds','SolarZenithAngle',\\\n 'vza','dry_air_subcolumns','surface_pressure','pressure_interval',\n 'methane_profile_apriori','latc','lonc','qa_value','time','delta_time',\\\n 'column_amount_no_bias_correction','column_amount','column_uncertainty']\n self.logger.info('Read, subset, and store level 2 data to l2g_data')\n self.logger.info('Level 2 data are located at '+l2_dir)\n l2g_data = {}\n for fn in l2_list:\n fn_dir = l2_dir+fn\n self.logger.info('Loading '+fn)\n outp_nc = self.F_read_S5P_nc(fn_dir,data_fields,data_fields_l2g)\n if if_trop_xch4:\n sounding_interp = F_interp_geos_mat(outp_nc['lonc'],outp_nc['latc'],outp_nc['UTC_matlab_datenum'],\\\n geos_dir='/mnt/Data2/GEOS/s5p_interp/',\\\n interp_fields=['TROPPT'])\n outp_nc['TROPPT'] = sounding_interp['TROPPT']\n #f1 = outp_nc['SolarZenithAngle'] <= maxsza\n #f2 = outp_nc['cloud_fraction'] <= maxcf\n # ridiculously, qa_value has a scale_factor of 0.01. so error-prone\n f3 = outp_nc['qa_value'] >= min_qa_value \n f4 = outp_nc['latc'] >= south\n f5 = outp_nc['latc'] <= north\n tmplon = outp_nc['lonc']-west\n tmplon[tmplon < 0] = tmplon[tmplon < 0]+360\n f6 = tmplon >= 0\n f7 = tmplon <= east-west\n f8 = outp_nc['UTC_matlab_datenum'] >= self.start_matlab_datenum\n f9 = outp_nc['UTC_matlab_datenum'] <= self.end_matlab_datenum\n validmask = f3 & f4 & f5 & f6 & f7 & f8 & f9\n self.logger.info('You have '+'%s'%np.sum(validmask)+' valid L2 pixels')\n l2g_data0 = {}\n if np.sum(validmask) == 0:\n continue\n # yep it's indeed messed up\n Lat_lowerleft = np.squeeze(outp_nc['latitude_bounds'][:,:,0])[validmask]\n Lat_upperleft = np.squeeze(outp_nc['latitude_bounds'][:,:,3])[validmask]\n Lat_lowerright = np.squeeze(outp_nc['latitude_bounds'][:,:,1])[validmask]\n Lat_upperright = np.squeeze(outp_nc['latitude_bounds'][:,:,2])[validmask]\n Lon_lowerleft = np.squeeze(outp_nc['longitude_bounds'][:,:,0])[validmask]\n Lon_upperleft = np.squeeze(outp_nc['longitude_bounds'][:,:,3])[validmask]\n Lon_lowerright = np.squeeze(outp_nc['longitude_bounds'][:,:,1])[validmask]\n Lon_upperright = np.squeeze(outp_nc['longitude_bounds'][:,:,2])[validmask]\n l2g_data0['latr'] = np.column_stack((Lat_lowerleft,Lat_upperleft,Lat_upperright,Lat_lowerright))\n l2g_data0['lonr'] = np.column_stack((Lon_lowerleft,Lon_upperleft,Lon_upperright,Lon_lowerright))\n for key in outp_nc.keys():\n if key not in {'latitude_bounds','longitude_bounds','time_utc','time','delta_time'}:\n l2g_data0[key] = outp_nc[key][validmask]\n if if_trop_xch4:\n # calculate trop xch4 using l2g_data0\n l2g_data0['air_column_strat'] = np.zeros(l2g_data0['latc'].shape)\n l2g_data0['air_column_total'] = np.zeros(l2g_data0['latc'].shape)\n l2g_data0['methane_ap_column_strat'] = np.zeros(l2g_data0['latc'].shape)\n for il2 in range(len(l2g_data0['latc'])):\n cum_air = np.concatenate(([0.],np.cumsum(l2g_data0['dry_air_subcolumns'][il2,].squeeze())))\n cum_methane = np.concatenate(([0.],np.cumsum(l2g_data0['methane_profile_apriori'][il2,].squeeze())))\n # model top is 10 Pa, 12 layers, 13 levels\n plevel = 10.+np.arange(0,13)*l2g_data0['pressure_interval'][il2]\n tropp = l2g_data0['TROPPT'][il2]\n l2g_data0['air_column_total'][il2] = np.sum(l2g_data0['dry_air_subcolumns'][il2,])\n f = interp1d(plevel,cum_air)\n l2g_data0['air_column_strat'][il2] = f(tropp)\n f = interp1d(plevel,cum_methane)\n l2g_data0['methane_ap_column_strat'][il2] = f(tropp)\n del l2g_data0['dry_air_subcolumns']\n del l2g_data0['methane_profile_apriori'] \n l2g_data = self.F_merge_l2g_data(l2g_data,l2g_data0)\n self.l2g_data = l2g_data\n if not l2g_data:\n self.nl2 = 0\n else:\n self.nl2 = len(l2g_data['latc'])", "def new_resolve_unique_contigs(scaffold_list, unique_contigs_list):\n \n contig_location = {}\n s_l = copy.deepcopy(scaffold_list)\n \n #first deal with any scaffolds that have more than one copy of a unique contig\n to_remove = []\n for scaf in s_l: \n for contig in unique_contigs_list:\n if scaf.count(contig) > 1:\n scaffold_parts = split_siamese(contig, scaf)\n to_remove.append(scaf)\n s_l.extend(scaffold_parts)\n break \n for scaf in to_remove:\n s_l.remove(scaf) \n\n\n for contig in unique_contigs_list:\n #if contig[:4] == \"five\": \n finds = find_unique_contig(contig, s_l)\n\n if len(finds) > 1:\n contig_location[contig] = finds\n\n sc_ov = {}\n sc_ov = make_scaff_overlap_dict(contig_location)\n\n #This is the new bit that takes just the first conflicted contig \n first_k = list(sc_ov.items())[0:1]\n first_sc_ov = dict(first_k)\n new_scaffold_list = combine_overlapping_contigs(first_sc_ov, s_l)\n\n #Split off unique scaffolds attached by their 3' ends to multiple scaffolds\n \n for contig in contig_location:\n if contig[:5] == \"three\":\n for scaf in contig_location[contig]:\n conflict = False\n if scaf.index(contig) == 1:\n conflict = True\n new_left_scaf = scaf[:3]\n new_right_scaf = scaf[3:]\n if scaf.index(contig) == len(scaf) - 2:\n conflict = True\n new_left_scaf = scaf[:-3]\n new_right_scaf = scaf[-3:]\n if conflict:\n new_left_scaf.append(\"link_conflict6\")\n new_right_scaf.insert(0,\"link_conflict6\")\n if len(new_left_scaf) >= 4: \n new_scaffold_list.append(new_left_scaf)\n if len(new_right_scaf) >= 4:\n new_scaffold_list.append(new_right_scaf)\n if scaf in new_scaffold_list:\n new_scaffold_list.remove(scaf)\n\n return new_scaffold_list", "def filter_otus_from_otu_map(input_otu_map_fp,\r\n output_otu_map_fp,\r\n min_count,\r\n min_sample_count=1):\r\n results = set()\r\n output_otu_map_f = open(output_otu_map_fp, 'w')\r\n for line in open(input_otu_map_fp, 'U'):\r\n fields = line.strip().split('\\t')\r\n sample_ids = set([e.split('_')[0] for e in fields[1:]])\r\n # only write this line if the otu has more than n sequences (so\r\n # greater than n tab-separated fields including the otu identifier)\r\n if (len(fields) > min_count) and (len(sample_ids) >= min_sample_count):\r\n output_otu_map_f.write(line)\r\n results.add(fields[0].split('\\t')[0])\r\n output_otu_map_f.close()\r\n return results", "def write_target_regions(out_f, args, chrom_list, combined_files, snp_files):\n\n for chrom in chrom_list: \n node_name = \"/%s\" % chrom.name\n if node_name not in snp_files.snp_index_h5:\n continue\n if node_name not in snp_files.snp_tab_h5:\n continue\n \n sys.stderr.write(\" %s\\n\" % chrom.name)\n\n sys.stderr.write(\" getting genotype counts\\n\")\n ref_geno_count = combined_files.ref_count_h5.get_node(node_name)[:]\n het_geno_count = combined_files.het_count_h5.get_node(node_name)[:]\n\n ref_allele_count = ref_geno_count * 2 + het_geno_count\n # free memory as it is no longer needed\n del ref_geno_count\n\n alt_geno_count = combined_files.alt_count_h5.get_node(node_name)[:]\n alt_allele_count = alt_geno_count * 2 + het_geno_count\n\n del alt_geno_count\n\n sys.stderr.write(\" getting minor allele counts\\n\")\n\n minor_count = np.amin(np.vstack([ref_allele_count, alt_allele_count]),\n axis=0)\n \n idx = np.where((minor_count >= args.min_minor_allele_count) &\n (het_geno_count >= args.min_het_count))[0]\n\n del het_geno_count\n del minor_count\n \n sys.stderr.write(\" %d possible test SNPs\\n\" % idx.shape[0])\n\n read_counts = combined_files.read_count_h5.get_node(node_name)[:]\n as_read_counts = combined_files.as_count_h5.get_node(node_name)[:]\n\n snp_idx = snp_files.snp_index_h5.get_node(node_name)\n snp_tab = snp_files.snp_tab_h5.get_node(node_name)\n \n n_region = 0\n \n for i in idx:\n start = max(1, i+1 - args.target_region_size/2)\n end = min(chrom.length, i+1 + args.target_region_size/2)\n\n n_reads = np.sum(read_counts[start-1:end])\n n_as_reads = np.sum(as_read_counts[start-1:end])\n\n snp_row = snp_tab[snp_idx[i]]\n\n if (n_reads >= args.min_read_count) and (n_as_reads >= args.min_as_count):\n # keep this target region\n\n # NOTE: currently this filter just uses total count of AS reads in region.\n # Would be better to take into account genotypes of each individual, since AS reads\n # are only useful for test in individuals that are heterozygous for test SNP\n out_f.write(\"%s %d %d %s %s + %s.%d %d %d\\n\" % \n (chrom.name, i+1, i+2, snp_row['allele1'],\n snp_row['allele2'], chrom.name, start+1,\n start, end))\n\n n_region += 1\n\n sys.stderr.write(\" wrote %d test SNP / target region pairs\\n\" % n_region)", "def print_file(chr_list,filename):\n infile = open(filename)\n for line in infile:\n if line.startswith('SL2.40'):\n chr = int(line.strip().split()[0][-2:])\n loci = int(line.strip().split()[1])\n for chr_i,chr_l in enumerate(chr_list):\n for loc in chr_l:\n if chr==chr_i and loci==loc:\n print line\n return", "def combine_mappings(fasta_fh, mapping_fh, denoised_seqs_fh,\r\n otu_picker_otu_map_fh, out_dir):\r\n\r\n # read in mapping from split_library file\r\n labels = imap(lambda a_b: a_b[0], parse_fasta(fasta_fh))\r\n # mapping from seq_id to sample_id\r\n sample_id_mapping = extract_read_to_sample_mapping(labels)\r\n\r\n denoiser_mapping = read_denoiser_mapping(mapping_fh)\r\n # read in cd_hit otu map\r\n # and write out combined otu_picker+denoiser map\r\n otu_fh = open(out_dir + \"/denoised_otu_map.txt\", \"w\")\r\n for otu_line in otu_picker_otu_map_fh:\r\n otu_split = otu_line.split()\r\n\r\n otu = otu_split[0]\r\n ids = otu_split[1:]\r\n\r\n get_sample_id = sample_id_mapping.get\r\n # concat lists\r\n # make sure the biggest one is first for pick_repr\r\n all_ids = sort_ids(ids, denoiser_mapping)\r\n all_ids.extend(sum([denoiser_mapping[id] for id in ids], []))\r\n try:\r\n otu_fh.write(\"%s\\t\" % otu +\r\n \"\\t\".join(map(get_sample_id, all_ids)) + \"\\n\")\r\n except TypeError:\r\n # get returns Null if denoiser_mapping id not present in\r\n # sample_id_mapping\r\n print \"Found id in denoiser output, which was not found in split_libraries \" +\\\r\n \"output FASTA file. Wrong file?\"\r\n exit()\r\n\r\n fasta_out_fh = open(out_dir + \"/denoised_all.fasta\", \"w\")\r\n for label, seq in parse_fasta(denoised_seqs_fh):\r\n id = label.split()[0]\r\n newlabel = \"%s %s\" % (sample_id_mapping[id], id)\r\n fasta_out_fh.write(BiologicalSequence(seq, id=newlabel).to_fasta())", "def build_basenames():\r\n dict = {}\r\n with open(STREETS_FILE) as file:\r\n for line in file:\r\n dict[line.strip()] = True\r\n return dict", "def get_filenames_strains(self, file_path_template_newick_tree):\n\t\tassert self.validate_file(file_path_template_newick_tree)\n\t\tlist_of_filenames_strains = []\n\t\ttree = Phylo.read(file_path_template_newick_tree, 'newick')\n\t\tfor leaf in tree.get_terminals():\n\t\t\tprefix = leaf.name\n\t\t\tif prefix.lower() == \"ancestor\":\n\t\t\t\tcontinue\n\t\t\tlist_of_filenames_strains.append(\"{prefix}.fasta\".format(prefix=prefix))\n\t\treturn list_of_filenames_strains", "def get_fnams(start = '', dir_base = './', end = ''):\r\n fnams = os.listdir(dir_base)\r\n fnams_out = []\r\n for i, fnam in enumerate(fnams):\r\n if fnam[:len(start)] == start :\r\n if fnam[-len(end):] == end or len(end) == 0 :\r\n temp = os.path.join( dir_base, fnam)\r\n if os.path.isfile( temp ) :\r\n fnams_out.append(temp)\r\n return fnams_out", "def _processing( infile, rchr, dist, outf ):\n\n coords, sizes = build_dict(infile)\n qry_chrs = list(coords.keys())\n\n print(\"Primary\\tHaplotig\\tPrimary_Start\\tPrimary_end\\tHaplotig_Start\\tHaplotig_End\\tHaplotig_Length\", file=outf)\n for qchr in qry_chrs:\n refcoords = coords[qchr][0]\n qrycoords = coords[qchr][1]\n refst, refend, qryst, qryend = \\\n clustering( refcoords, sorted(qrycoords), sizes[qchr], dist )\n\n print(\"%s\\t%s\\t%d\\t%d\\t%d\\t%d\\t%d\" % \\\n (rchr, qchr, refst, refend, qryst, qryend, sizes[qchr]), file=outf)", "def main():\n\tdb, cursor = connect()\n\t#chroms = ['1','22']\n\t#chroms = ['2','21']\n\t#chroms = ['3','20']\n\t#chroms = ['4','19']\n\t#chroms = ['5','18']\n\t#chroms = ['6','17']\n\t#chroms = ['7','16']\n\t#chroms = ['8','15']\n\t#chroms = ['9','14']\n\t#chroms = ['10','13']\n\tchroms = ['11','12']\n\t#chroms = [str(i) for i in range(10,23)]\n\t#chroms = ['X','Y']\n\tchroms.reverse()\n\tfor chrom in chroms:\n\t\tt0 = time()\n\t\ttable = \"gnomad_freqs_chr_\" + chrom\n\t\tprint\n\t\tprint \"*\"*20\n\t\tprint table\n\t\tprint \"number of variants:\", search_db(cursor, \"select count(1) from %s\" % table)[0][0]\n\t\tqry = \"select count(1) from %s \" % table\n\t\tqry += \"where char_length(reference)=1 and char_length(variant)=1\"\n\t\tprint \"simple SNPs\", search_db(cursor, qry)[0][0]\n\n\t\tcandidates, long_vars_ct = find_complex_variants(cursor, table)\n\t\tprint\n\t\tprint \"Complex variants with reference<30:\", len(candidates),\n\t\tprint \" long variants: \", long_vars_ct\n\n\t\tclusters = find_clusters_of_candidates(candidates)\n\t\tprint\n\t\tprint \"Done clustering. Max pos:\", max([cluster[0][0] for cluster in clusters])\n\t\tprint \"Number of hotspot regions:\", len(clusters)\n\n\n\t\tnumber_of_vars_in_clusters = 0\n\t\tnumber_of_clusters_with_periodic_motifs = 0\n\t\tfor cluster in clusters:\n\t\t\t# no varaints: cluster is just the number of positions here, not the number of\n\t\t\t# vars repoted for each\n\t\t\t[start,end, number_of_variants] = characterize_region(cluster)\n\t\t\tif number_of_variants<2: continue\n\t\t\tnumber_of_vars_in_clusters += number_of_variants\n\t\t\tfixed_fields = {'chrom':chrom, 'start':start, 'end':end}\n\t\t\tstore_without_checking(cursor, 'gnomad_hotspots', fixed_fields)\n\t\tprint\n\t\tprint \"Number of variants with clusters:\", number_of_vars_in_clusters\n\t\tprint \"Number of clusters with periodic motifs:\", number_of_clusters_with_periodic_motifs\n\t\tprint\n\t\tprint \"time taken %.2f min\" % ((time() - t0) / 60.0)\n\t\tprint\n\tcursor.close()\n\tdb.close()\n\n\treturn", "def collect_names(files):\n names = set()\n routes = defaultdict(dict)\n for file in files:\n gene_name = file.split('/')[-1].split('.')[0]\n for record in SeqIO.parse(file, 'fasta'):\n if record.name.count('_') >= 3:\n name = record.name.split('_')[0]\n route = record.name.split('_')[2]\n routes[gene_name][name] = route\n elif '_' in record.name:\n name = record.name.split('_')[0]\n else:\n name = record.name\n names.add(name)\n return sorted(list(names)), routes", "def test_prefilter_exact_prefixes_all_to_one_filtering(self):\r\n # maps to first when all are same length\r\n app = CdHitOtuPicker(params={})\r\n seqs = [('s1 comment', 'ACGTAA'),\r\n ('s2', 'ACGTAC'),\r\n ('s3', 'ACGTAG'),\r\n ('s4', 'ACGTAT'),\r\n ('s5', 'ACGTCA'),\r\n ('s6', 'ACGTCC')]\r\n\r\n prefix_length = 4\r\n actual = app._prefilter_exact_prefixes(seqs, prefix_length)\r\n actual[0].sort()\r\n expected = [('s1', 'ACGTAA')], {'s1':\r\n ['s1', 's2', 's3', 's4', 's5', 's6']}\r\n self.assertEqual(actual, expected)\r\n\r\n # maps to longest seq\r\n app = CdHitOtuPicker(params={})\r\n seqs = [('s1', 'ACGTAA'),\r\n ('s2', 'ACGTACA'),\r\n ('s3', 'ACGTAG'),\r\n ('s4', 'ACGTAT'),\r\n ('s5', 'ACGTCA'),\r\n ('s6', 'ACGTCC')]\r\n\r\n prefix_length = 4\r\n actual = app._prefilter_exact_prefixes(seqs, prefix_length)\r\n actual[0].sort()\r\n expected = [('s2', 'ACGTACA')], {'s2':\r\n ['s1', 's2', 's3', 's4', 's5', 's6']}\r\n self.assertEqual(actual, expected)\r\n\r\n # maps to longest seq\r\n app = CdHitOtuPicker(params={})\r\n seqs = [('s1', 'ACGTAA'),\r\n ('s2', 'ACGTACA'),\r\n ('s3', 'ACGTAGAA'),\r\n ('s4', 'ACGTATAAA'),\r\n ('s5', 'ACGTCAAAAA'),\r\n ('s6', 'ACGTCCAAAAA')]\r\n\r\n prefix_length = 4\r\n actual = app._prefilter_exact_prefixes(seqs, prefix_length)\r\n actual[0].sort()\r\n expected = [('s6', 'ACGTCCAAAAA')\r\n ], {'s6': ['s1', 's2', 's3', 's4', 's5', 's6']}\r\n self.assertEqual(actual, expected)", "def get_my_mutations(quality_cutoff, coverage_cutoff):\n\n # my_mutations = {}\n # with open('/home/perry/Projects/loh/working/murim.exome.aa_chg.vars') as f:\n # for line in f:\n # my_mutations[line.strip()] = True\n # return my_mutations\n\n bed_file = 'data/nimblegen/2.1M_Human_Exome_Annotation/2.1M_Human_Exome.bed'\n bed_chr2st2end, bed_chr2posLs = bed_tools.load_bed(bed_file, \n 'NimbleGen Tiled Regions')\n # NimbleGen Tiled Regions\n # Target Regions\n\n use_data_dir = '/home/perry/Projects/loh/data/all_non_ref_hg18/'\n all_somatic = {}\n all_inherited = {}\n cancer_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanT.ann')\n normal_qualities = mutations.get_consensus_qualities(use_data_dir + 'yusanN.ann')\n for exome in global_settings.exome_types:\n data_file = use_data_dir + exome\n inherited, somatic, murim = mutations.get_mutations(data_file, normal_qualities,\n cancer_qualities, quality_cutoff,\n False, coverage_cutoff)\n # only use the bed_tools NimbleGen\n # restriction for hg18 data\n for s in somatic['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_somatic[s] = True\n for i in inherited['yusan']: \n chr, pos = s.split(':')\n if bed_tools.find_location_in_bed(chr, int(pos), \n bed_chr2posLs,\n bed_chr2st2end):\n all_inherited[i] = True\n return (set(all_somatic.keys()) & set(get_murim_covered(quality_cutoff)), set(all_inherited.keys()) & set(get_murim_covered(quality_cutoff)))", "def files_to_map(\n files,\n despike_l1b=False,\n only_long_exposures=False,\n only_short_exposures=False,\n only_short_flare_exposures=False,\n):\n # Avoid circular imports\n from sunkit_instruments.suvi.suvi import despike_l1b_array\n\n if isinstance(files, str):\n files = [files]\n files = sorted(files)\n if any(fn in os.path.basename(files[0]) for fn in COMPOSITE_MATCHES):\n composites = True\n elif any(fn in os.path.basename(files[0]) for fn in L1B_MATCHES):\n composites = False\n else:\n raise ValueError(\n f\"First file {files[0]} does not look like a SUVI L1b file or L2 HDR composite.\"\n )\n\n datas = []\n headers = []\n for afile in files:\n logging.debug(f\"Reading {afile}\")\n if composites:\n if any(fn in os.path.basename(afile) for fn in COMPOSITE_MATCHES):\n header, data, _ = read_suvi(afile)\n datas.append(data)\n headers.append(header)\n else:\n warn_user(\n f\"File {afile} does not look like a SUVI L2 HDR composite. Skipping.\"\n )\n else:\n if any(fn in os.path.basename(afile) for fn in L1B_MATCHES):\n header, data, dqf_mask = read_suvi(afile)\n if despike_l1b:\n data = despike_l1b_array(data, dqf_mask)\n if only_long_exposures:\n if \"long_exposure\" in header[\"SCI_OBJ\"]:\n datas.append(data)\n headers.append(header)\n elif only_short_exposures:\n if \"short_exposure\" in header[\"SCI_OBJ\"]:\n datas.append(data)\n headers.append(header)\n elif only_short_flare_exposures:\n if \"short_flare_exposure\" in header[\"SCI_OBJ\"]:\n datas.append(data)\n headers.append(header)\n else:\n datas.append(data)\n headers.append(header)\n else:\n warn_user(f\"File {afile} does not look like a SUVI L1b file. Skipping.\")\n if len(datas) == 1:\n return sunpy.map.Map(datas[0], headers[0])\n elif len(datas) > 1:\n return sunpy.map.Map(list(zip(datas, headers)), sequence=True)\n else:\n warn_user(\"List of data/headers is empty.\")", "def build_messy_lookup(source,dest,ref_col):\n la = QuickGrid().open(source)\n od = QuickGrid().open(join(\"source_files\",\"local_authority_data_names.csv\"))\n\n lookup = QuickGrid()\n lookup.header = [\"la name\",ref_col]\n\n possible = [\"official-name\",\"alt-name-1\",\"alt-name-2\",\"alt-name-3\"]\n possible = [p for p in possible if p in la.header]\n for r in la:\n for p in possible:\n if r[p]:\n lookup.add([r[p],r[ref_col]])\n \n current_names = [x[0] for x in lookup]\n\n for r in od:\n if r[\"name\"] not in current_names:\n code = r[\"local-authority\"].split(\":\")[1]\n lookup.add([r[\"name\"],code])\n \n lookup.save(dest,force_unicode=True)", "def findgene(fname, dbpaths=dbpaths):\n scaf = []\n gbeg = []\n gend = []\n gfor = []\n gsta = []\n gdif = []\n cuffgenes = {}\n\n fobj = open(fname)\n for line in fobj:\n col = line.split()\n scaf.append( re.search('[sCcafold]*[0-9]+', col[3]).group() )\n gbeg.append( int(re.search(':(.*)-', col[3]).groups()[0]) )\n gend.append( int(re.search('-(.*)', col[3]).groups()[0]) )\n gfor.append(float(col[7]))\n gsta.append(float(col[8]))\n gdif.append(float(col[9]))\n\n fobj.close()\n print \"Significant transcripts read\"\n\n\n for result in range(len(scaf)):\n cur_scaf = scaf[result]\n cur_gbeg = gbeg[result]\n cur_gend = gend[result]\n cur_gfor = gfor[result]\n cur_gsta = gsta[result]\n cur_gdif = gdif[result]\n fobj = open(dbpaths['gff'])\n for line in fobj:\n col = line.split()\n if col[2] == \"mRNA\":\n if col[0] == cur_scaf:\n if float(col[3]) <= cur_gend and float(col[4]) >= cur_gbeg:\n try:\n cuffgenes[(cur_scaf, cur_gbeg)] = (re.search('ID=([^;]*);', col[8]).groups()[0], cur_scaf, cur_gbeg, cur_gend, cur_gfor, cur_gsta, cur_gdif)\n except AttributeError:\n print col[8]\n fobj.close()\n\n return cuffgenes", "def _get_pathless_file_name_prefixes(model_name, grid_id=None):\n\n grid_id = nwp_model_utils.check_grid_name(\n model_name=model_name, grid_name=grid_id)\n\n if model_name == nwp_model_utils.NARR_MODEL_NAME:\n return [NARR_ID_FOR_FILE_NAMES]\n\n if model_name == nwp_model_utils.RAP_MODEL_NAME:\n return ['{0:s}_{1:s}'.format(model_name, grid_id)]\n\n return ['ruc2_{0:s}'.format(grid_id), 'ruc2anl_{0:s}'.format(grid_id)]", "def test_get_suffixes_random(self):\n\n ans = self.random_sf.get_suffixes()\n\n # check there are the right number of suffixes\n expected_num = sum([len(chstring) for chstring in self.rand_gs.values()])\n actual_num = len(ans)\n self.assertEqual(actual_num, expected_num)\n\n # check that the order is correct\n last_glidx, last_tidx = ans[0]\n last = self.random_sf.data[last_glidx][last_tidx:]\n for glyph_idx, tok_idx in ans[1:]:\n current = self.random_sf.data[glyph_idx][tok_idx:]\n self.assertTrue(last <= current)", "def test_paths_to_plates():\n output = filelister_yoko.paths_to_plates(TEST_PATH_YOKO)\n prefix = os.path.abspath(TEST_PATH_YOKO)\n plate_names = [\"screen-name-batch1_20190213_095340/A000002-PC\"]\n make_own = [os.path.join(prefix, name) for name in plate_names]\n assert len(output) == len(plate_names)\n for ans in output:\n assert ans in make_own", "def annotateFilesAfterHeurAndSelection(inputFolderPath, outputFolderPath, dumpSP=True):\n # add a slash if needed\n if inputFolderPath[-1] != u'/':\n inputFolderPath = u'{0}/'.format(inputFolderPath)\n if outputFolderPath[-1] != u'/':\n outputFolderPath = u'{0}/'.format(outputFolderPath)\n # get the selected reference file lines\n with open(u'{0}sampleReference.Paths'.format(inputFolderPath)) as refPathsFile:\n referenceLines = refPathsFile.readlines()\n # get the en and fr input lines\n with open(u'{0}sample.en'.format(inputFolderPath)) as enFile:\n enLns = enFile.readlines()\n with open(u'{0}sample.fr'.format(inputFolderPath)) as frFile:\n frLns = frFile.readlines()\n with open(u'{0}scores.tsv'.format(inputFolderPath)) as scFile:\n scLns = scFile.readlines()\n # get rid of the files we have already annotated\n if utilsOs.theFileExists(u'{0}sampleReference.tsv'.format(outputFolderPath)):\n # get the already seen lines\n referencePathLine = utilsOs.readAllLinesFromFile(u'{0}sampleReference.tsv'.format(outputFolderPath),\n noNewLineChar=True)\n listOfAnnotations = utilsOs.readAllLinesFromFile(u'{0}sampleAnnotation.tsv'.format(outputFolderPath),\n noNewLineChar=True)\n # maintain only what we haven't seen\n annotatedFiles = set(referencePathLine)\n newRefLines = []\n for ind, file in enumerate(referenceLines):\n if file.replace(u'\\n', u'') not in annotatedFiles:\n newRefLines.append( [ind, file.replace(u'\\n', u'')] )\n referenceLines = newRefLines\n # print(referenceLines)\n else:\n referencePathLine = []\n listOfAnnotations = []\n referenceLines = [(ind, file.replace(u'\\n', u'')) for ind, file in enumerate(referenceLines)]\n # print the annotator cheat sheet\n printCheatSheet()\n # open each file in EN and FR and show it in the terminal\n for tupleRef in referenceLines:\n indRef, refLn = tupleRef[0], tupleRef[1]\n print(u'############# {0} ##############'.format(refLn.replace(u'\\n', u'')))\n # get the path for the source and target\n lnsSource = enLns if u'en-fr' in refLn else frLns\n lnsTarget = frLns if u'en-fr' in refLn else enLns\n # get the correct terminal line length\n lineLength = 137-len(str(len(listOfAnnotations)+1))\n # color in red the during lines\n redDuringSource = u'\\033[1;31m{0}\\033[0m'.format(lnsSource[indRef])\n # print the sentences\n print(u'{0} - {1}'.format(len(listOfAnnotations), redDuringSource))\n print(u'{0} - {1}'.format(len(listOfAnnotations), lnsTarget[indRef]))\n print()\n # count the lines that take the space of 2 lines\n longLines = getNbLongLines([lnsSource[indRef], lnsTarget[indRef]], lineLength)\n # get the first part of the annotation (aligned or not)\n annotatorGeneralInput = input(u'Aligned-Misaligned annotation: ')\n # make sure to have the right general annotation\n while True:\n if annotatorGeneralInput in [u'0', u'1', u'0.0', u'0.1', u'0.2',\n u'1.0', u'1.1', u'1.2', u'1.3', u'1.4', u'c', u'correction']:\n break\n else:\n utilsOs.moveUpAndLeftNLines(1, slowly=False)\n annotatorGeneralInput = input(u'Repeat annotation: ')\n if annotatorGeneralInput in [u'c', u'correct']:\n annotatorGeneralInput, listOfAnnotations = correctionToAnnotation(listOfAnnotations)\n # save to the list of annotations\n listOfAnnotations.append(float(annotatorGeneralInput))\n # remove the lines from the terminal before getting to the next pair\n utilsOs.moveUpAndLeftNLines(7+longLines, slowly=False)\n # erase all remainder of the previous sentences and go back up again\n for e in range(14+longLines):\n print(u' '*(lineLength+4))\n utilsOs.moveUpAndLeftNLines(7 + longLines, slowly=False)\n # append the reference to the file\n referencePathLine.append(refLn)\n # dump the file line by line, to be sure in case of error\n # dump the reference\n utilsOs.dumpRawLines(referencePathLine, u'{0}sampleReference.tsv'.format(outputFolderPath),\n addNewline=True, rewrite=True)\n # dump the annotation\n utilsOs.dumpRawLines(listOfAnnotations, u'{0}sampleAnnotation.tsv'.format(outputFolderPath),\n addNewline=True, rewrite=True)\n # dump the SP\n if dumpSP is True:\n enSent = lnsSource[indRef] if u'en-fr' in refLn else lnsTarget[indRef]\n frSent = lnsTarget[indRef] if u'en-fr' in refLn else lnsSource[indRef]\n utilsOs.appendLineToFile(enSent, u'{0}sample.en'.format(outputFolderPath), addNewLine=False)\n utilsOs.appendLineToFile(frSent, u'{0}sample.fr'.format(outputFolderPath), addNewLine=False)\n utilsOs.appendLineToFile(scLns[indRef], u'{0}scores.tsv'.format(outputFolderPath), addNewLine=False)\n # clear part of terminal\n utilsOs.moveUpAndLeftNLines(7, slowly=False)", "def set_in_files():\r\n\tindatadir = '/nobackup/ejblom/reddit'\r\n\tcom_dir = '/comments'\r\n\tsubm_dir = '/submissions'\r\n\tglob_end = '/filtered*'\r\n\tcom_glob_str = indatadir + com_dir + glob_end\r\n\tsubm_glob_str = indatadir + subm_dir + glob_end\r\n\tinfilenames = sorted(glob.glob(com_glob_str)) + sorted(glob.glob(subm_glob_str))\r\n\treturn infilenames", "def fetch_basenames(engine, form_factor):\n for key in ['current', 'm_mother', 'm_daughter', 'm_spectator', 'momentum']:\n if key not in form_factor:\n raise KeyError(f\"Required key '{key}' is missing.\")\n\n def abspath(dirname):\n return os.path.join(pathlib.Path(__file__).parent.absolute(), dirname)\n\n # 2pt correlators like 'P5-P5_RW_RW_d_d_m0.002426_m0.002426_p000'\n mother = \"%_RW_RW_d_d_m{m_mother}_m{m_spectator}_p000%fine\"\n daughter = \"%_RW_RW_d_d_m{m_daughter}_m{m_spectator}_{momentum}%fine\"\n if form_factor['m_daughter'] < form_factor['m_spectator']:\n daughter = \"%_RW_RW_d_d_m{m_spectator}_m{m_daughter}_{momentum}%fine\"\n\n # 3pt correlators like 'P5-P5_RW_RW_d_d_m0.002426_m0.002426_p000',\n corr3 = \"%_{current}_T%_m{m_mother}_RW_RW_x_d_m{m_spectator}_m{m_daughter}_{momentum}%fine\"\n\n params = {\n 'mother': mother.format(**form_factor),\n 'daughter': daughter.format(**form_factor),\n 'corr3': corr3.format(**form_factor)}\n queries = aiosql.from_path(abspath(\"sql/\"), \"sqlite3\")\n with db.connection_scope(engine) as conn:\n corrs = queries.postgres.get_correlator_names(conn, **params)\n \n return np.squeeze(np.array(corrs))", "def gene_names(filepath, complete=True):\n if complete:\n df_ucsc = pd.read_csv(filepath, sep='\\t', header=None)\n df_ucsc.columns = (\n ['number', 'gene_name', 'locus_link',\n 'ref_seq_num', 'genbank', 'uniprot', 'taxon']\n )\n gene_ucsc = set(\n [str(name).lower() for name in df_ucsc[\"gene_name\"]\n if len(str(name)) >1]\n )\n return gene_ucsc\n else:\n df_syn = pd.read_csv(filepath, sep='\\t', header=None)\n df_syn.columns = ['number', 'gene_name']\n gene_ucsc = set(\n [str(name).lower() for name in df_syn[\"gene_name\"]\n if len(str(name)) >1]\n )\n return gene_ucsc", "def main():\n tl = TwoLocus(in_path='/csbiodata/public/www.csbio.unc.edu/htdocs/sgreens/pairwise_origins/')\n # tl = TwoLocus()\n # tl.preprocess(glob.glob('OR_ss_origins/*.hap'))\n print len(tl.list_available_strains())\n exit()\n # print len(tl.list_available_strains())\n # tl.preprocess(['cc_origins.csv'])\n # tl.preprocess(['ccv_origins.csv'])\n classical = [s for s in\n [\"129P1/ReJ\", # \"129P3/J\", \"129S1SvlmJ\", \"129S6\", \"129T2/SvEmsJ\", \"129X1/SvJ\", \"A/J\", \"A/WySnJ\",\n \"AEJ/GnLeJ\", \"AEJ/GnRk\", \"AKR/J\", \"ALR/LtJ\", \"ALS/LtJ\", \"BALB/cByJ\", \"BALB/cJ\", \"BDP/J\", \"BPH/2J\",\n # \"BPL/1J\", \"BPN/3J\", \"BTBR T<+>tf/J\", \"BUB/BnJ\", \"BXSB/MpJ\", \"C3H/HeJ\", \"C3HeB/FeJ\", \"C57BL/10J\",\n # \"C57BL/10ScNJ\", \"C57BL/10SAAAJ\", \"C57BL/6CR\", \"C57BL/6J\", \"C57BL/6NCI\", \"C57BL/6Tc\", \"C57BLKS/J\",\n # \"C57BR/cdJ\", \"C57L/J\", \"C58/J\", \"CBA/CaJ\", \"CBA/J\", \"CE/J\", \"CHMU/LeJ\", \"DBA/1J\", \"DBA/1LacJ\",\n # \"DBA/2DeJ\", \"DBA/2HaSmnJ\", \"DBA/2J\", \"DDK/Pas\", \"DDY/JclSidSeyFrkJ\", \"DLS/LeJ\", \"EL/SuzSeyFrkJ\",\n # \"FVB/NJ\", \"HPG/BmJ\", \"I/LnJ\", \"IBWSP2\", \"IBWSR2\", \"ICOLD2\", \"IHOT1\", \"IHOT2\", \"ILS\", \"ISS\", \"JE/LeJ\",\n # \"KK/HlJ\", \"LG/J\", \"LP/J\", \"LT/SvEiJ\", \"MRL/MpJ\", \"NOD/ShiLtJ\", \"NON/ShiLtJ\", \"NONcNZO10/LtJ\",\n # \"NONcNZO5/LtJ\", \"NOR/LtJ\", \"NU/J\", \"NZB/BlNJ\", \"NZL/LtJ\", \"NZM2410/J\", \"NZO/HlLtJ\", \"NZW/LacJ\", \"P/J\",\n # \"PL/J\", \"PN/nBSwUmabJ\", \"RF/J\", \"RHJ/LeJ\", \"RIIIS/J\", \"RSV/LeJ\", \"SB/LeJ\", \"SEA/GnJ\", \"SEC/1GnLeJ\",\n # \"SEC/1ReJ\", \"SH1/LeJ\", \"SI/Col Tyrp1 Dnahc11/J\", \"SJL/Bm\", \"SJL/J\", \"SM/J\", \"SSL/LeJ\", \"ST/bJ\",\n \"STX/Le\", ] # \"SWR/J\", \"TALLYHO/JngJ\", \"TKDU/DnJ\", \"TSJ/LeJ\", \"YBR/EiJ\", \"ZRDCT Rax<+>ChUmdJ\"]\n if tl.is_available(s)]\n wild_derived = [s for s in\n ['22MO',\n # 'BIK/g', 'BULS', 'BUSNA', 'BZO', 'CALB/RkJ', 'CASA/RkJ', 'CAST/EiJ', 'CIM', 'CKN', 'CKS',\n 'CZECHI/EiJ', 'CZECHII/EiJ', 'DCA', 'DCP', 'DDO', 'DEB', 'DGA', 'DIK', 'DJO', 'DKN', 'DMZ', 'DOT',\n # 'IS/CamRkJ', 'JF1/Ms', 'LEWES/EiJ', 'MBK', 'MBS', 'MCZ', 'MDG', 'MDGI', 'MDH', 'MGA', 'MH',\n # 'MOLD/RkJ', 'MOLF/EiJ', 'MOLG/DnJ', 'MOR/RkJ', 'MPB', 'MSM/Ms', 'PERA/EiJ', 'PERC/EiJ', 'POHN/Deh',\n # 'PWD/PhJ', 'PWK/PhJ', 'RBA/DnJ', 'RBB/DnJ', 'RBF/DnJ', 'SF/CamEiJ', 'SKIVE/EiJ', 'SOD1/EiJ',\n # 'STLT', 'STRA', 'STRB', 'STUF', 'STUP', 'STUS', 'TIRANO/EiJ', 'WLA', 'WMP', 'WSB/EiJ',\n 'ZALENDE/EiJ'] if tl.is_available(s)]\n tl.contingency_table(classical, wild_derived, '/csbiohome01/sgreens/Projects/intervals/contingency.csv')\n exit()\n x = TwoLocus(chrom_sizes=[20e6, 20e6])\n x.preprocess([\"test2.csv\"])\n x.unique_combos(['A', 'B', 'D'], ['C', 'E'])\n x.sources_at_point_pair('1', 1, '1', 10000000, ['A'])\n # x.interlocus_dependence([chr(c) for c in xrange(ord('A'), ord('J')+1)])\n # exit()\n\n x = TwoLocus(chrom_sizes=[20 * 10 ** 6, 20 * 10 ** 6])\n x.preprocess([\"test.csv\"])\n rez = x.pairwise_frequencies([\"A\"])\n\n areas = x.calculate_genomic_area(rez[0], rez[1])\n total = 0.0\n\n for combo in subspecies.iter_combos():\n print \"\\t{:15s}({:4d}):{:1.5f}\".format(subspecies.to_string(combo), combo,\n areas[str(subspecies.to_string(combo))])\n total += areas[str(subspecies.to_string(combo))]\n print \"\\t{:21s}:{:1.5f}\".format(\"Total\", total)\n\n sys.exit(1)\n # for code, combo in combos.iteritems():\n # print \"\\n\", rez[1]\n # print \"\\t{} ({}):\\n{}\".format(combo, code, rez[0][code])", "def makeRecombMapBooker(snplist, rholist, chrom):\n if chrom == \"X\":\n mapsize = 44.7\n elif chrom == \"3R\":\n mapsize = 90.9\n elif chrom == \"3L\":\n mapsize = 89.1\n elif chrom == \"2L\":\n mapsize = 63.2\n elif chrom == \"2R\":\n mapsize = 94.8\n elif chrom == \"2RL\":\n mapsize = 158\n elif chrom == \"3RL\":\n mapsize = 180\n poslist = []\n rhocum = []\n cMlist = []\n cMMblist = []\n for i, pos in enumerate(snplist):\n if i == 0:\n rhoTemp = (rholist[i] * (pos))\n else:\n rhoTemp = (rholist[i] * (pos - snplist[i-1]))\n if i == 0:\n rhocum.append(rhoTemp)\n else:\n rhocum.append(rhocum[-1] + rhoTemp)\n poslist.append(pos)\n for i, j in enumerate(rhocum):\n cMperSNP = (j / rhocum[-1])\n cMlist.append(cMperSNP)\n cMMblist.append(((cMlist[i] - cMlist[i-1])*mapsize) / ((snplist[i] - snplist[i-1])/1E6))\n return(poslist, cMMblist, cMlist)", "def getSplitLibrariesMappingFileData(self, study_id):\n\n try:\n con = self.getMetadataDatabaseConnection()\n results = con.cursor()\n result_sets = {}\n con.cursor().callproc('qiime_assets.get_split_libarary_data', [study_id, results])\n\n mapping_file_header = '#SampleID\\tBarcodeSequence\\tLinkerPrimerSequence\\tRunPrefix\\tDescription'\n #for column in results.description:\n # mapping_file_header += column[0] + '\\t'\n\n for row in results:\n linker = row[2]\n primers = row[3]\n run_prefix = row[4]\n linker_primer_list = ''\n \n # handles null linkers\n if linker is None:\n linker=''\n if primers is None:\n primers=''\n # Create a comma-separated list of linker+primer sequences\n if ',' in primers:\n primer_list = primers.split(',')\n for primer in primer_list:\n linker_primer_list += '{0}{1},'.format(linker, primer)\n \n # Strip the trailing comma\n linker_primer_list = linker_primer_list[:-1]\n else:\n linker_primer_list = linker + primers\n\n # Adjust the row contents\n newrow = (row[0], row[1], linker_primer_list, row[4], row[5])\n\n # If this is the first time we've seen this run_prefix, create a new list \n # to hold the rows\n if run_prefix not in result_sets:\n result_sets[run_prefix] = []\n\n # Add the row to the right run_prefix heading\n result_sets[run_prefix].append(newrow)\n\n #raise Exception(str(result_sets))\n\n return mapping_file_header, result_sets\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n raise Exception(str(e))", "def get_top_k_docs():\n# doc_res_dicts_path = base_path+\"\\\\docs_norm_scores_dicts\"\n# sen_ret_input_path = base_path+\"\\\\claimLM_senLM_sen_ret_input\"\n doc_res_dicts_path = linux_base_path+\"/docs_norm_scores_dicts\"\n sen_ret_input_path = linux_base_path+\"/claimLM_senLM_sen_ret_input\"\n claims_no_SW_dict = read_pickle(\"claims_no_SW_dict\")\n k_values = [100]\n for filename in os.listdir(doc_res_dicts_path):\n if not \"clm_key_ranked_list_of_docs_\" in filename:\n curr_dict = read_pickle(doc_res_dicts_path+\"/\"+filename)\n curr_claim_num = filename.split(\"_clm_\")[1].split(\"_dict_sorted\")[0]\n curr_alpha = filename.split(\"_alpha_\")[1].split(\"_beta_\")[0]\n curr_beta = filename.split(\"_beta_\")[1].split(\"_clm_\")[0]\n for k_val in k_values:\n top_k_docs = [key[1] for key in curr_dict.keys()][0:k_val]\n # write to sen query file with workingSetDocno\n sen_ret_docno_file = open(sen_ret_input_path+\"/claimLM_senLM_sen_ret_docno_alpha_\"+curr_alpha+\"_beta_\"+curr_beta+\"_top_k_docs_\"+str(k_val)+\"_clm_\"+curr_claim_num,\"wb\")\n sen_ret_docno_file.write(\"<parameters>\\n\")\n sen_ret_docno_file.write(\"<query><number>\"+curr_claim_num+\"</number><text>\"+claims_no_SW_dict[curr_claim_num][0].strip()+\"|\"+claims_no_SW_dict[curr_claim_num][1].strip()+\"</text>\")\n for workingDoc in top_k_docs:\n sen_ret_docno_file.write(\"<workingSetDocno>\"+workingDoc+\"</workingSetDocno>\")\n sen_ret_docno_file.write(\"</query>\\n\")\n sen_ret_docno_file.write(\"</parameters>\")\n sen_ret_docno_file.close()", "def Overtopping(self):\n\n #sort files\n leftOverTop = list()\n RightOverTop = list()\n # get names of files that has _left or _right at its end\n All1DFiles = os.listdir(self.OneDResultPath)\n for i in range(len(All1DFiles)) :\n if All1DFiles[i].endswith(self.leftOvertopping_Suffix):\n leftOverTop.append(All1DFiles[i])\n if All1DFiles[i].endswith(self.RightOvertopping_Suffix):\n RightOverTop.append(All1DFiles[i])\n\n # two dictionaries for overtopping left and right\n OverToppingSubsLeft = dict()\n OverToppingSubsRight = dict()\n # the _left and _right files has all the overtopping discharge\n # but sometimes the sum of all the overtopping is less than a threshold specified\n # and then the 2D algorithm does not run so these cross sections you will not find\n # any inundation beside it in the maps but you will find it in the _left or _right maps\n\n # for each sub-basin that has overtopping from the left dike\n for i in range(len(leftOverTop)):\n\n try:\n # open the file (if there is no column sthe file is empty)\n data = pd.read_csv(self.OneDResultPath + leftOverTop[i],header =None,delimiter = r'\\s+')\n # add the sub basin to the overtopping dictionary of sub-basins\n OverToppingSubsLeft[leftOverTop[i][:-len(self.leftOvertopping_Suffix)]] = dict()\n except:\n continue\n # get the XS that overtopping happened from\n XSs = list(set(data.loc[:,2]))\n # for each XS get the days\n for j in range(len(XSs)):\n OverToppingSubsLeft[leftOverTop[i][:-len(self.leftOvertopping_Suffix)]][XSs[j]] = list(set(data[0][data[2] == XSs[j]].tolist()))\n\n for i in range(len(RightOverTop)):\n\n try:\n # open the file\n data = pd.read_csv(self.OneDResultPath + RightOverTop[i],header =None,delimiter = r'\\s+')\n # add the sub basin to the overtopping dictionary of sub-basins\n OverToppingSubsRight[RightOverTop[i][:-len(self.RightOvertopping_Suffix)]] = dict()\n except :\n continue\n # get the XS that overtopping happened from\n XSs = list(set(data.loc[:,2]))\n # for each XS get the days\n for j in range(len(XSs)):\n OverToppingSubsRight[RightOverTop[i][:-len(self.RightOvertopping_Suffix)]][XSs[j]] = list(set(data[0][data[2] == XSs[j]].tolist()))\n\n self.OverToppingSubsLeft = OverToppingSubsLeft\n self.OverToppingSubsRight = OverToppingSubsRight", "def read_prnu_files():\n file_path = r'C:\\Users\\nmishra\\Workspace\\TEMPO\\PRNU_map\\PRNU_map_Median'\n prnu_mask_a = np.genfromtxt(file_path +'/' + 'Quad A_Final_PRNU.csv',\n delimiter=',')\n prnu_mask_b = np.genfromtxt(file_path +'/' + 'Quad B_Final_PRNU.csv',\n delimiter=',')\n prnu_mask_c = np.genfromtxt(file_path +'/' + 'Quad C_Final_PRNU.csv',\n delimiter=',')\n prnu_mask_d = np.genfromtxt(file_path +'/' + 'Quad D_Final_PRNU.csv',\n delimiter=',')\n prnu_mask = [prnu_mask_a, prnu_mask_b, prnu_mask_c, prnu_mask_d]\n return prnu_mask", "def _get_files(\n self,\n data_root,\n data_subset=\"full/*0\",\n signal_subset=\"*\",\n noise_subset=\"*\",\n data_type=\"raw\",\n noise_type=\"stationary\",\n noise_type_sim=None,\n mask_type=\"hitsmask_tailored\",\n signal_type=\"r0p03\",\n signal_type_sim=None,\n signal_transfer_type=None,\n suffix=\"\",\n foreground_type_sim=None,\n template_type=None,\n sub_planck=False,\n ):\n\n if signal_transfer_type is None:\n signal_transfer_type = signal_type\n\n # regularize data root\n if not os.path.exists(data_root):\n raise OSError(\"Missing data root {}\".format(data_root))\n\n # find all map files\n map_root = os.path.join(data_root, \"data_{}\".format(data_type))\n map_files = []\n data_subset = data_subset.split(\",\")\n for f in np.atleast_1d(data_subset):\n files = glob.glob(os.path.join(map_root, \"{}.fits\".format(f)))\n if not len(files):\n raise OSError(\"Missing files in data subset {}\".format(f))\n map_files.extend(files)\n data_subset = \",\".join(data_subset)\n map_files = sorted(map_files)\n map_files = [f for f in map_files if os.path.basename(f).startswith(\"map_\")]\n map_tags = [\n os.path.splitext(os.path.basename(f))[0].split(\"_\", 1)[1] for f in map_files\n ]\n map_freqs = []\n for t in map_tags:\n # if map tag is not a plain frequency, extract plain frequency\n map_freqs.append(self.dict_freqs[t])\n self.log(\"Found {} map files in {}\".format(len(map_files), map_root), \"info\")\n self.log(\"Map files: {}\".format(map_files), \"debug\")\n self.log(\"Map freqs: {}\".format(map_freqs), \"debug\")\n\n raw_root = None\n raw_files = None\n # find all corresponding signal sims\n signal_root = os.path.join(data_root, \"signal_{}\".format(signal_type))\n num_signal = None\n signal_files = []\n for f in map_files:\n sfiles = sorted(\n glob.glob(\n f.replace(map_root, signal_root).replace(\n \".fits\", \"_{}.fits\".format(signal_subset)\n )\n )\n )\n nsims1 = len(sfiles)\n if not nsims1:\n raise OSError(\"Missing signal sims for {}\".format(f))\n if num_signal is None:\n num_signal = nsims1\n else:\n if nsims1 != num_signal:\n raise OSError(\n \"Found {} signal sims for map {}, expected {}\".format(\n nsims1, f, num_signal\n )\n )\n num_signal = min(num_signal, nsims1)\n signal_files.append(sfiles)\n signal_files = np.asarray([x[:num_signal] for x in signal_files])\n self.log(\"Found {} signal sims in {}\".format(num_signal, signal_root), \"info\")\n self.log(\n \"First signal sim files: {}\".format(signal_files[:, 0].tolist()), \"debug\"\n )\n\n # find all corresponding signal transfer function sims\n signal_transfer_root = os.path.join(\n data_root, \"signal_{}\".format(signal_transfer_type)\n )\n num_signal_transfer = None\n signal_transfer_files = []\n for f in map_files:\n sfiles = sorted(\n glob.glob(\n f.replace(map_root, signal_transfer_root).replace(\n \".fits\", \"_{}.fits\".format(signal_subset)\n )\n )\n )\n nsims1 = len(sfiles)\n if not nsims1:\n raise OSError(\"Missing signal sims for {}\".format(f))\n if num_signal_transfer is None:\n num_signal_transfer = nsims1\n else:\n if nsims1 != num_signal_transfer:\n raise OSError(\n \"Found {} signal_transfer sims for map {}, expected {}\".format(\n nsims1, f, num_signal_transfer\n )\n )\n num_signal_transfer = min(num_signal_transfer, nsims1)\n signal_transfer_files.append(sfiles)\n signal_transfer_files = np.asarray(\n [x[:num_signal_transfer] for x in signal_transfer_files]\n )\n self.log(\n \"Found {} signal transfer sims in {}\".format(\n num_signal_transfer, signal_transfer_root\n ),\n \"info\",\n )\n self.log(\n \"First signal transfer sim files: {}\".format(\n signal_transfer_files[:, 0].tolist()\n ),\n \"debug\",\n )\n\n # find all corresponding noise sims\n if noise_type is not None:\n noise_root = os.path.join(data_root, \"noise_{}\".format(noise_type))\n num_noise = None\n noise_files = []\n for f in map_files:\n nfiles = sorted(\n glob.glob(\n f.replace(map_root, noise_root).replace(\n \".fits\", \"_{}.fits\".format(noise_subset)\n )\n )\n )\n nsims1 = len(nfiles)\n if not nsims1:\n raise OSError(\"Missing noise sims for {}\".format(f))\n if num_noise is None:\n num_noise = nsims1\n else:\n if nsims1 != num_noise:\n raise OSError(\n \"Found {} noise sims for map {}, expected {}\".format(\n nsims1, f, num_noise\n )\n )\n num_noise = min(num_noise, nsims1)\n noise_files.append(nfiles)\n noise_files = np.asarray([x[:num_noise] for x in noise_files])\n self.log(\"Found {} noise sims in {}\".format(num_noise, noise_root), \"info\")\n self.log(\n \"First noise sim files: {}\".format(noise_files[:, 0].tolist()), \"debug\"\n )\n else:\n noise_root = None\n noise_files = None\n\n # find all corresponding noise sims for sim_index run\n if noise_type_sim is not None:\n noise_root_sim = os.path.join(data_root, \"noise_{}\".format(noise_type_sim))\n num_noise_sim = None\n noise_files_sim = []\n for f in map_files:\n nfiles = sorted(\n glob.glob(\n f.replace(map_root, noise_root_sim).replace(\n \".fits\", \"_{}.fits\".format(noise_subset)\n )\n )\n )\n nsims1 = len(nfiles)\n if not nsims1:\n raise OSError(\"Missing noise sims for {}\".format(f))\n if num_noise_sim is None:\n num_noise_sim = nsims1\n else:\n if nsims1 != num_noise_sim:\n raise OSError(\n \"Found {} noise sims for map {}, expected {}\".format(\n nsims1, f, num_noise_sim\n )\n )\n num_noise_sim = min(num_noise_sim, nsims1)\n noise_files_sim.append(nfiles)\n noise_files_sim = np.asarray(noise_files_sim)\n self.log(\n \"Found {} noise sims in {}\".format(num_noise_sim, noise_root_sim),\n \"info\",\n )\n self.log(\n \"First noise sim files: {}\".format(noise_files_sim[:, 0].tolist()),\n \"debug\",\n )\n else:\n noise_root_sim = noise_root\n noise_files_sim = noise_files\n\n # find all corresponding signal sims for sim_index run\n if signal_type_sim is not None:\n signal_root_sim = os.path.join(\n data_root, \"signal_{}\".format(signal_type_sim)\n )\n num_signal_sim = None\n signal_files_sim = []\n for f in map_files:\n nfiles = sorted(\n glob.glob(\n f.replace(map_root, signal_root_sim).replace(\n \".fits\", \"_{}.fits\".format(signal_subset)\n )\n )\n )\n nsims1 = len(nfiles)\n if not nsims1:\n raise OSError(\"Missing signal sims for {}\".format(f))\n if num_signal_sim is None:\n num_signal_sim = nsims1\n else:\n if nsims1 != num_signal_sim:\n raise OSError(\n \"Found {} signal sims for map {}, expected {}\".format(\n nsims1, f, num_signal_sim\n )\n )\n num_signal_sim = min(num_signal_sim, nsims1)\n signal_files_sim.append(nfiles)\n signal_files_sim = np.asarray(signal_files_sim)\n self.log(\n \"Found {} signal sims in {}\".format(num_signal_sim, signal_root_sim),\n \"info\",\n )\n self.log(\n \"First signal sim files: {}\".format(signal_files_sim[:, 0].tolist()),\n \"debug\",\n )\n else:\n signal_root_sim = signal_root\n signal_files_sim = signal_files\n\n # find all corresponding foreground sims for sim_index run\n if foreground_type_sim is not None:\n foreground_root = os.path.join(\n data_root, \"foreground_{}\".format(foreground_type_sim)\n )\n num_foreground_sim = None\n foreground_files = []\n for f in map_files:\n nfiles = sorted(\n glob.glob(\n f.replace(map_root, foreground_root).replace(\".fits\", \"_*.fits\")\n )\n )\n nsims1 = len(nfiles)\n if not nsims1:\n raise OSError(\"Missing foreground sims for {}\".format(f))\n if num_foreground_sim is None:\n num_foreground_sim = nsims1\n else:\n if nsims1 != num_foreground_sim:\n raise OSError(\n \"Found {} foreground sims for map {}, expected {}\".format(\n nsims1, f, num_foreground_sim\n )\n )\n num_foreground_sim = min(num_foreground_sim, nsims1)\n foreground_files.append(nfiles)\n foreground_files = np.asarray(\n [x[:num_foreground_sim] for x in foreground_files]\n )\n self.log(\n \"Found {} foreground sims in {}\".format(\n num_foreground_sim, foreground_root\n ),\n \"info\",\n )\n self.log(\n \"First foreground sim files: {}\".format(\n foreground_files[:, 0].tolist()\n ),\n \"debug\",\n )\n else:\n foreground_root = None\n foreground_files = None\n\n # find all corresponding masks\n if mask_type is None:\n raise ValueError(\"Argument mask_type required\")\n # If mask is a fits file, use the same mask for all maps\n if os.path.splitext(mask_type)[1] == \".fits\":\n if os.path.exists(mask_type):\n # it's an absolute path\n mask_files = np.tile(mask_type, len(map_tags))\n mask_root = os.path.dirname(mask_type)\n else:\n # it's relative to base directory structure\n mask_files = np.tile(os.path.join(data_root, mask_type), len(map_tags))\n mask_root = os.path.dirname(os.path.join(data_root, mask_type))\n else:\n mask_root = os.path.join(data_root, \"masks_{}\".format(mask_type))\n # XXX Do this smarter\n mask_files = [\n os.path.join(mask_root, \"mask_map_{}.fits\".format(tag))\n for tag in map_tags\n ]\n for f in mask_files:\n if not os.path.exists(f):\n raise OSError(\"Missing mask file {}\".format(f))\n self.log(\"Found {} masks in {}\".format(len(mask_files), mask_root), \"info\")\n self.log(\"Mask files: {}\".format(mask_files), \"debug\")\n\n # Also need a list of unique map tags for populating dictionaries\n # in data structures\n map_tags_orig = list(map_tags) # copy\n map_tags = pt.unique_tags(map_tags)\n\n # make a list of names corresponding to the order of the cross spectra\n map_pairs = pt.tag_pairs(map_tags)\n map_pairs_orig = pt.tag_pairs(map_tags, index=map_tags_orig)\n\n # make a dictionary of map freqs for each unique map tag\n map_freqs_dict = {}\n for im0, m0 in enumerate(map_tags):\n map_freqs_dict[m0] = map_freqs[im0]\n map_freqs = map_freqs_dict\n\n fields = [\n \"data_root\",\n \"data_subset\",\n \"map_root\",\n \"map_files\",\n \"map_tags\",\n \"map_pairs\",\n \"map_tags_orig\",\n \"map_pairs_orig\",\n \"map_freqs\",\n \"raw_root\",\n \"raw_files\",\n \"signal_root\",\n \"signal_files\",\n \"signal_root_sim\",\n \"signal_files_sim\",\n \"signal_transfer_root\",\n \"signal_transfer_files\",\n \"noise_root\",\n \"noise_files\",\n \"noise_root_sim\",\n \"noise_files_sim\",\n \"mask_root\",\n \"mask_files\",\n \"foreground_root\",\n \"foreground_files\",\n ]\n out = dict()\n local = locals()\n for f in fields:\n out[f + suffix] = local[f]\n return out", "def fragment_length_filter(fragment_anno_dic):\n out_list = []\n total_fragment = 0\n for key in fragment_anno_dic.keys():\n #print fragment_anno_dic[key]\n fragments_flag = []\n fragments_length = []\n fragments_region = []\n total_fragment += int(fragment_anno_dic[key][0][-3])\n reads_coverage = [x[-3] for x in fragment_anno_dic[key]]\n if len(list(set(reads_coverage))) != 1:\n print (fragment_anno_dic[key])\n if len(fragment_anno_dic[key]) == 1:\n fragment_anno_dic[key][0] = list(fragment_anno_dic[key][0])\n fragment_anno_dic[key][0][-2] = str(fragment_anno_dic[key][0][-2])\n out_list.append('\\t'.join(fragment_anno_dic[key][0]))\n else:\n for i in range(0,len(fragment_anno_dic[key])):\n fragment_anno_dic[key][i] = list(fragment_anno_dic[key][i])\n iso = fragment_anno_dic[key][i]\n iso_length = sum([int(x) for x in iso[10].split(',')])\n fragments_length.append(iso_length)\n fragments_flag.append(iso[-2])\n fragments_region.append(iso[8])\n #print fragment_anno_dic[key]\n#---------------------------------------------------------------- complete fragments (Set region preference)\n region_complete = [''] * len(fragments_flag)\n max_flag = max(fragments_flag)\n #print fragments_length,fragments_region,fragments_flag\n if max_flag == 3:\n for x in range(0,len(fragments_flag)):\n if fragments_flag[x] == max_flag:\n fragment_anno_dic[key][x][-2] = str(fragment_anno_dic[key][x][-2])\n region_complete[x] = fragments_region[x]\n # Set preference\n if 'CDS' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('CDS')]))\n elif '5UTR' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('5UTR')]))\n elif '3UTR' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('3UTR')]))\n elif '5UTR-CDS' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('5UTR-CDS')]))\n elif 'CDS-3UTR' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('CDS-3UTR')]))\n elif 'intron' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('intron')]))\n elif 'intron-containing' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('intron-containing')]))\n elif 'Null' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('Null')]))\n else:\n print (fragment_anno_dic[key])\n print ('Gene type error!')\n#----------------------------------------------------------------- incomplete fragments (choose the longest fragments)\n elif max_flag == 2:\n max_length_list = [0] * len(fragments_length)\n max_region_list = [''] * len(fragments_length)\n for y in range(0,len(fragments_flag)):\n if fragments_flag[y] == max_flag:\n max_length_list[y] = fragments_length[y]\n #print max_length_list\n max_length = max(max_length_list)\n #print max_length\n for z in range(0,len(max_length_list)):\n if max_length_list[z] == max_length:\n fragment_anno_dic[key][z][-2] = str(fragment_anno_dic[key][z][-2])\n max_region_list[z] = fragments_region[z]\n #print max_region_list\n # Set preference\n if 'CDS' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('CDS')]))\n elif '5UTR' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('5UTR')]))\n elif '3UTR' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('3UTR')]))\n elif '5UTR-CDS' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('5UTR-CDS')]))\n elif 'CDS-3UTR' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('CDS-3UTR')]))\n elif 'intron' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('intron')]))\n elif 'intron-containing' in region_complete:\n out_list.append('\\t'.join(fragment_anno_dic[key][region_complete.index('intron-containing')]))\n elif 'Null' in max_region_list:\n out_list.append('\\t'.join(fragment_anno_dic[key][max_region_list.index('Null')]))\n elif max_flag == 1: #Not annotated to exon region\n fragment_anno_dic[key][fragments_flag.index(1)][-2] = str(fragment_anno_dic[key][fragments_flag.index(1)][-2])\n # print (fragment_anno_dic[key])\n out_list.append('\\t'.join(fragment_anno_dic[key][fragments_flag.index(1)]))\n elif max_flag == 0: #Not annotated to intragenic region\n fragment_anno_dic[key][0][-2] = str(fragment_anno_dic[key][0][-2])\n out_list.append('\\t'.join(fragment_anno_dic[key][0]))\n else:\n print (fragment_anno_dic[key])\n print ('Please check flag information')\n print ('Total fragments after filtering 1: ' + str(total_fragment))\n return out_list", "def _get_sensor_col_files(self, gas, loc):\n sub = os.path.join('WTD_upload', self.GasNames[gas], self.Locs[loc])\n with zipfile.ZipFile(self.data_location, mode='r') as zf:\n files = ['/'.join(info.filename.split('/')[1:]) for info in zf.infolist()]\n files.sort()\n return sub, files", "def get_final_main_cgovtype_ori_agency(file_path):\n final_main_df = pd.read_csv(file_path)\n final_main_fips_ori_agency = final_main_df[['ORI', 'AGENCY', 'CGOVTYPE', 'FIPS_STATE', 'FIPS_PLACE']]\n\n \"\"\"\n 1. Obtain only unique records from the final main file - key: fips place + fips state\n \"\"\"\n final_main_fips_ori_agency_unique = final_main_fips_ori_agency.drop_duplicates(['FIPS_STATE', 'FIPS_PLACE']) # --> 11,602 rows\n\n \"\"\"\n 2. Rename CGOVTYPE, FIPS_STATE, FIPS_PLACE to Govt_level, 'STATEFP', 'place_fips' to match national census file\n \"\"\"\n final_main_fips_ori_agency_unique = final_main_fips_ori_agency_unique.rename(\n {'CGOVTYPE': 'Govt_level', 'FIPS_STATE': 'STATEFP', 'FIPS_PLACE': 'place_fips'}, axis='columns')\n\n \"\"\"\n 3. Get only those records from 90 final main file whose cgovtype is 1,2 or 3\n \"\"\"\n final_main_fips_ori_agency_unique = final_main_fips_ori_agency_unique.loc[final_main_fips_ori_agency_unique['Govt_level'].isin([1, 2, 3])]\n\n return final_main_fips_ori_agency_unique", "def read_GFF(gff_filename):\n gff_info = {} # loci --> LocusInfo\n tmp = {} # loci PB.X --> list of GFF records for PB.X.Y\n\n for r in collapseGFFReader(gff_filename):\n m = rex_pbid.match(r.seqid)\n if m is None:\n raise Exception(f\"Expected PBID format PB.X.Y but saw {r.seqid}\")\n locus = m.group(1) # ex: PB.1\n if locus not in tmp:\n tmp[locus] = [r]\n gff_info[locus] = LocusInfo(\n chrom=r.chr, strand=r.strand, regions=None, isoforms=None\n )\n else:\n if gff_info[locus].chrom != r.chr:\n logger.warning(\n f\"WARNING: Expected {r.seqid} to be on {gff_info[locus].chrom} but saw {r.chr}. Could be minimap2 multi-mapping inconsistency for repetitive genes. Check later.\\n\"\n )\n tmp[locus].append(r)\n\n # now figure out the exonic regions for each gene PB.X\n for locus, records in tmp.items():\n c = ClusterTree(0, 0)\n for r in records:\n for e in r.ref_exons:\n c.insert(\n max(0, e.start - extra_bp_around_junctions),\n e.end + extra_bp_around_junctions,\n 1,\n )\n\n regions = [(a, b) for (a, b, junk) in c.getregions()]\n regions[0] = (max(0, regions[0][0] - __padding_before_after__), regions[0][1])\n regions[-1] = (\n max(0, regions[-1][0]),\n regions[-1][1] + __padding_before_after__,\n )\n gff_info[locus] = LocusInfo(\n chrom=gff_info[locus].chrom,\n strand=gff_info[locus].strand,\n regions=regions,\n isoforms=[r.seqid for r in records],\n )\n\n return gff_info", "def rest_of_ORF(dna):\n for h in range(0, len(dna), 3):\n curr_triplet = dna[h : h + 3] \n #for n in range(0, len(amino_acids.codons),1):\n if curr_triplet in amino_acids.codons[10]:\n return dna[0 : h] \n return dna", "def get_all_down_profile(th_object, start, end, filename, path):\n get_down_profile(th_object, start, end, filename, '20', '10', path + \"_20_10.csv\")\n get_down_profile(th_object, start, end, filename, '20', '30', path + \"_20_30.csv\")\n get_down_profile(th_object, start, end, filename, '22', '11', path + \"_22_11.csv\")\n get_down_profile(th_object, start, end, filename, '22', '31', path + \"_22_31.csv\")\n get_down_profile(th_object, start, end, filename, '22', '10', path + \"_22_10.csv\")\n get_down_profile(th_object, start, end, filename, '20', '31', path + \"_20_31.csv\")\n get_down_profile(th_object, start, end, filename, '20', '70', path + \"_20_70.csv\")\n get_down_profile(th_object, start, end, filename, '20', '90', path + \"_20_90.csv\")\n get_down_profile(th_object, start, end, filename, '22', '71', path + \"_22_71.csv\")\n get_down_profile(th_object, start, end, filename, '22', '91', path + \"_22_91.csv\")\n get_down_profile(th_object, start, end, filename, '22', '70', path + \"_22_70.csv\")\n get_down_profile(th_object, start, end, filename, '20', '91', path + \"_20_91.csv\")", "def collapse_genotypes(pL,gL):\n if len(gL) < 2:\n return gL\n else:\n uniqueL = [] # list of unique genotypes relative to ploidy\n for g in gL:\n s = ''\n for i in xrange(len(pL)):\n s += ''.join(sorted(g[0:pL[i]]))\n g = g[pL[i]:]\n if s not in uniqueL:\n uniqueL.append(s)\n return uniqueL", "def mel_gff_list():\n\tmod_gff3 = sys.argv[1]\n\twith open(mod_gff3, 'r') as f:\n\t\tgff = [line.strip().split('\\t') for line in f]\n\t\tf.close()\n\treturn gff\n\t#gff_list ex/:\n\t#['2L', 'FlyBase', 'gene', '7529', '9484', '.', '+', '.', 'ID=FBgn0031208;Name=CG11023;Ontology_term=SO:0000010,SO:0000087,GO:0016929,GO:0016926;Dbxref=FlyBase:FBan0011023,FlyBase_Annotation_IDs:CG11023,GB_protein:ACZ94128,GB_protein:AAO41164,GB:AI944728,GB:AJ564667,GB_protein:CAD92822,GB:BF495604,UniProt/TrEMBL:Q86BM6,INTERPRO:IPR003653,GB_protein:AGB92323,UniProt/TrEMBL:M9PAY1,OrthoDB7_Drosophila:EOG796K1P,OrthoDB7_Diptera:EOG7X1604,EntrezGene:33155,UniProt/TrEMBL:E1JHP8,UniProt/TrEMBL:Q6KEV3,OrthoDB7_Insecta:EOG7Q8QM7,OrthoDB7_Arthropoda:EOG7R5K68,OrthoDB7_Metazoa:EOG7D59MP,InterologFinder:33155,BIOGRID:59420,FlyAtlas:CG11023-RA,GenomeRNAi:33155;gbunit=AE014134;derived_computed_cyto=21A5-21A5'], ['2L', 'FlyBase', 'gene', '9839', '21376', '.', '-', '.', 'ID=FBgn0002121;Name=l(2)gl;fullname=lethal (2) giant larvae;Alias=Lgl,lgl,lethal giant larvae,lethal giant larve,lethal giant larva,lethal(2)giant larvae,Complementation group 2.1,Lethal Giant Larvae,dlgl,p127l(2)gl,LGL,l(2) giant larva,CG2671,L(2)GL,p127,l(2)giant larvae,D-LGL,l(2),gl,l[[2]]gl,l-gl,lethal-giant-larvae,Lethal giant larvae,Lethal (2) giant larvae,L(2)gl,Lethal (2) giant larva,Lethal-giant-larvae,MENE (2L)-B,lethal(2) giant larvae,p127[l(2)gl],lethal(2)-giant larvae,lethal-2-giant larvae,l(2) giant larvae,lethal- giant-larvae,Lethal(2)giant larvae,Lethal-2-giant larvae;Ontology_term=SO:0000010,SO:0000087,GO:0005578,GO:0005886,GO:0007269,GO:0016082,GO:0008021,GO:0008283,GO:0016334,GO:0016336,GO:0016333,GO:0016335,GO:0016327,GO:0005829,GO:0045175,GO:0016332,GO:0045184,GO:0007399,GO:0005938,GO:0005737,GO:0007179,GO:0045197,GO:0045196,GO:0002009,GO:0005918,GO:0008105,GO:0045167,GO:0008104,GO:0045746,GO:0007423,GO:0008285,GO:0001738,GO:0016323,GO:0007391,GO:0005856,GO:0030154,GO:0042127,GO:0005614,GO:0045159,GO:0035072,GO:0007559,GO:0045200,GO:0008360,GO:0019991,GO:0007406,GO:0051726,GO:0051668,GO:0007314,GO:0016325,GO:0030036,GO:0030863,GO:0035070,GO:0055059,GO:0035212,GO:0035293,GO:0090163,GO:0048730,GO:0000132,GO:0098725,GO:0060429,GO:0007293,GO:0045176,GO:0072697,GO:0000149,SO:0000548,GO:0005920,GO:0017022,GO:0004860,GO:0006469;Dbxref=FlyBase:FBan0002671,FlyBase_Annotation_IDs:CG2671,INTERPRO:IPR015943,GB_protein:AAN10503,GB_protein:AAG22256,GB_protein:AAN10502,GB_protein:AAN10501,GB_protein:AAF51570,GB_protein:AAG22255,INTERPRO:IPR017986,GB:AA246243,GB:AW942062,GB:AY051654,GB_protein:AAK93078,GB:BH809482,GB:CZ471313,GB:CZ482024,GB:CZ484691,GB:M17022,GB_protein:AAA28671,GB_protein:AAA28672,GB:X05426,GB_protein:CAA29007,UniProt/Swiss-Prot:P08111,INTERPRO:IPR000664,INTERPRO:IPR001680,INTERPRO:IPR013577,GB_protein:AGB92324,UniProt/TrEMBL:M9NCX1,UniProt/TrEMBL:M9PBJ2,OrthoDB7_Drosophila:EOG7CW2GT,OrthoDB7_Diptera:EOG7DRVK2,GB_protein:AFH03479,GB_protein:AFH03478,GB_protein:AFH03481,GB_protein:AFH03480,EntrezGene:33156,INTERPRO:IPR013905,BDGP_clone:PC00404,OrthoDB7_Insecta:EOG7SRGKH,OrthoDB7_Arthropoda:EOG7ZDD82,OrthoDB7_Metazoa:EOG79W94C,InterologFinder:33156,FlyAtlas:CG2671-RB,BIOGRID:59421,Fly-FISH:CG2671,GenomeRNAi:33156,INTERACTIVEFLY:/cytoskel/lethl2g1.htm;gbunit=AE014134;derived_computed_cyto=21A5-21A5'],\n\t# ['2L', 'FlyBase', 'ncRNA', '286383', '288292', '.', '+', '.', 'ID=FBtr0347595;Name=CR46263-RA;Parent=FBgn0267996;Dbxref=FlyBase_Annotation_IDs:CR46263-RA;score_text=Weakly Supported;score=0'], ['2L', 'FlyBase', 'gene', '287252', '289144', '.', '-', '.', 'ID=FBgn0025686;Name=Amnionless;fullname=Amnionless ortholog;Alias=FBgn0031246,CG11592,CK02467,BEST:CK02467,dAMN,Amnionless;Ontology_term=SO:0000010,SO:0000087,GO:0046331,GO:0097206,GO:0016021,GO:0097017;Dbxref=FlyBase:FBan0011592,FlyBase_Annotation_IDs:CG11592,GB_protein:AAF51514,GB:AA141784,GB:CZ468687,UniProt/TrEMBL:Q9VPN2,GB_protein:AGB92350,OrthoDB7_Drosophila:EOG7CGKJK,EntrezGene:33199,BDGP_clone:IP03221,OrthoDB7_Diptera:EOG774804,INTERPRO:IPR026112,OrthoDB7_Insecta:EOG7G266G,OrthoDB7_Arthropoda:EOG7P65FW,OrthoDB7_Metazoa:EOG7ZGX2W,InterologFinder:33199,FlyAtlas:CG11592-RA,GenomeRNAi:33199;gbunit=AE014134;derived_computed_cyto=21B7-21B7'], ['2L', 'FlyBase', 'gene', '292419', '293222', '.', '+', '.', 'ID=FBgn0031247;Name=CG11562;Alias=FBgn0063011,BcDNA:RE44650;Ontology_term=SO:0000010,SO:0000087,GO:0005739,GO:0003674,GO:0008150;Dbxref=FlyBase:FBan0011562,FlyBase_Annotation_IDs:CG11562,GB_protein:AAF51513,GB:AI520524,GB:AI945841,GB:AY119645,GB_protein:AAM50299,GB:BE662187,GB:BI358003,UniProt/TrEMBL:Q9VPN3,OrthoDB7_Drosophila:EOG7HTW3H,OrthoDB7_Diptera:EOG7200K9,EntrezGene:33200,BDGP_clone:RE44650,OrthoDB7_Insecta:EOG7B9454,OrthoDB7_Arthropoda:EOG7RK278,OrthoDB7_Metazoa:EOG78H3X3,FlyAtlas:CG11562-RA,INTERPRO:IPR031568,Fly-FISH:CG11562,GenomeRNAi:33200;gbunit=AE014134;derived_computed_cyto=21B7-21B7'], ['2L', 'FlyBase', 'gene', '292959', '294681', '.', '-', '.', 'ID=FBgn0017457;Name=U2af38;fullname=U2 small nuclear riboprotein auxiliary factor 38;Alias=FBgn0010626,U2AF38,U2AF,dU2AF38,DU2AF38,CG3582,dU2AF[38],l(2)06751,u2af38,U2AF 38;Ontology_term=GO:0089701,SO:0000010,SO:0000087,GO:0000398,GO:0008187,GO:0005681,GO:0005686,GO:0000381,GO:0005634,GO:0003729,GO:0007052,GO:0071011,GO:0008380,GO:0000166,GO:0046872;Dbxref=FlyBase:FBan0003582,FlyBase_Annotation_IDs:CG3582,GB_protein:AAF51512,GB:AA264081,GB:AA820431,GB:AC004115,GB:AC008371,GB:AI061776,GB:AI455418,GB:AI944553,GB:AQ026079,GB:AY058537,GB_protein:AAL13766,GB:U67066,GB_protein:AAB17271,UniProt/Swiss-Prot:Q94535,INTERPRO:IPR000504,INTERPRO:IPR000571,INTERPRO:IPR009145,INTERPRO:IPR012677,GB_protein:AGB92351,UniProt/TrEMBL:M9PBM1,OrthoDB7_Drosophila:EOG7FRM2M,OrthoDB7_Diptera:EOG700KS6,EntrezGene:33201,BDGP_clone:LD24048,OrthoDB7_Insecta:EOG76QSHP,OrthoDB7_Arthropoda:EOG7KMJ7T,OrthoDB7_Metazoa:EOG70089G,apodroso:10448-U2af38[k14504],InterologFinder:33201,FlyAtlas:CG3582-RA,BIOGRID:59457,Fly-FISH:CG3582,GenomeRNAi:33201;gbunit=AE014134;derived_computed_cyto=21B7-21B8']]", "def list_sorted_files(uuid, basepath=None):\n if basepath is None:\n basepath = get_basepath()\n if 's3://' in basepath:\n return s3wrangler.list_objects(basepath + 'ephys/' + uuid + '/derived/kilosort2/')\n else:\n # return glob.glob(os.path.join(basepath, f'ephys/{uuid}/derived/kilosort2/*'))\n return glob.glob(basepath + f'ephys/{uuid}/derived/kilosort2/*')", "def order_chromosomal_contigs(chr_blast_output):\n ordered_chr_contigs = []\n current_contig = \"null\"\n current_contig_direction = 0\n current_contig_hits = 0\n\n with open(chr_blast_output) as blast_matches:\n for hit in blast_matches:\n hit_data = hit.rstrip(\"\\n\").split(\"\\t\")\n core_gene_dir = int(hit_data[0].split(\"|\")[1])\n if float(hit_data[2]) >= 90.0:\n new_contig = hit_data[1]\n new_contig_direction = core_gene_dir*np.sign(int(hit_data[9])-int(hit_data[8]))\n \n if new_contig == current_contig and new_contig_direction == current_contig_direction:\n current_contig_hits += 1\n else: \n contig_tuple = (current_contig, current_contig_direction, current_contig_hits)\n ordered_chr_contigs.append(contig_tuple)\n current_contig = new_contig\n current_contig_direction = new_contig_direction\n current_contig_hits = 1\n\n contig_tuple = (current_contig, current_contig_direction, current_contig_hits)\n ordered_chr_contigs.append(contig_tuple)\n ordered_chr_contigs.pop(0)\n\n #If hits to a contig are not contiguous, keep only the longest run \n chr_contig_dict = {} #stores the longest run for each contig\n remove_list = [] #stores the shorter runs for deletion\n n = -1\n for entry in ordered_chr_contigs:\n n += 1\n contig = entry[0]\n hits = entry[2]\n if contig not in chr_contig_dict:\n chr_contig_dict[contig] = (n, entry)\n elif hits > chr_contig_dict[contig][1][2]:\n remove_list.append(chr_contig_dict[contig])\n chr_contig_dict[contig] = (n, entry)\n else:\n remove_list.append((n, entry))\n\n #The first contig will usually also be the last - both should be kept \n for item in remove_list:\n \n if int(item[0]) == 0 or int(item[0]) == len(ordered_chr_contigs)-1:\n remove_list.remove(item)\n \n remove_list.sort(reverse = True)\n for item in remove_list:\n position = item[0]\n ordered_chr_contigs.pop(position)\n \n return ordered_chr_contigs", "def merge_root_histos(run, seqno, slices):\n inset = {\"hists\": \"hd_root.root\",\n \"tree_TS_scaler\": \"tree_TS_scaler.root\",\n \"tree_bcal_hadronic_eff\": \"tree_bcal_hadronic_eff.root\",\n \"tree_fcal_hadronic_eff\": \"tree_fcal_hadronic_eff.root\",\n \"tree_tof_eff\": \"tree_tof_eff.root\",\n \"tree_sc_eff\": \"tree_sc_eff.root\",\n \"tree_PSFlux\": \"tree_PSFlux.root\",\n \"tree_TPOL\": \"tree_TPOL.root\",\n }\n outset = {\"hists\": \"hd_root_{0:06d}_{1:03d}.root\",\n \"tree_TS_scaler\": \"tree_TS_scaler_{0:06d}_{1:03d}.root\",\n \"tree_bcal_hadronic_eff\": \"tree_bcal_hadronic_eff_{0:06d}_{1:03d}.root\",\n \"tree_fcal_hadronic_eff\": \"tree_fcal_hadronic_eff_{0:06d}_{1:03d}.root\",\n \"tree_tof_eff\": \"tree_tof_eff_{0:06d}_{1:03d}.root\",\n \"tree_sc_eff\": \"tree_sc_eff_{0:06d}_{1:03d}.root\",\n \"tree_PSFlux\": \"tree_PSFlux_{0:06d}_{1:03d}.root\",\n \"tree_TPOL\": \"tree_TPOL_{0:06d}_{1:03d}.root\",\n }\n badslices = []\n slicepatt = re.compile(r\"([1-9][0-9]*),([1-9][0-9]*)/\")\n for iset in inset:\n ofile = outset[iset].format(run, seqno)\n ifiles = [\"{0},{1}/\".format(sl[0], sl[1]) +\n inset[iset].format(run, seqno, sl[0], sl[1])\n for sl in slices\n ]\n cmd = subprocess.Popen([\"hadd\", ofile] + ifiles,\n stderr=subprocess.PIPE)\n elog = cmd.communicate()\n if cmd.returncode != 0:\n for eline in elog[1].decode(\"ascii\").split('\\n'):\n badslice = slicepatt.search(eline)\n if badslice:\n badslices.append(\"{0},{1}\".format(badslice.group(1),\n badslice.group(2)))\n sys.stderr.write(eline + '\\n')\n sys.stderr.write(\"Error on output file {0}\".format(ofile) +\n \" - root file merging failed!\\n\")\n sys.stderr.flush()\n continue\n odir = output_area + \"/\" + iset + \"/{0:06d}\".format(run)\n upload(ofile, odir)\n return badslices", "def mapping_names(self):\n return sorted([self.basename] + [name for selector in self.selections.normal_values() for name in selector.mapping_names()])", "def gene_finder(dna):\n threshold = longest_ORF_noncoding(dna, 1500)\n l = []\n for i in find_all_ORFs_both_strands(dna):\n \tif len(i)>=threshold:\n \t\tl.append(coding_strand_to_AA(i))\n print l\n return l", "def test_paths_to_plates():\n output = filelister_ix.paths_to_plates(TEST_PATH_IX)\n prefix = os.path.abspath(TEST_PATH_IX)\n plate_names = [\"test-plate-1\", \"test-plate-2\",\n \"test-plate-3\", \"test-plate-4\"]\n make_own = [os.path.join(prefix, name) for name in plate_names]\n assert len(output) == len(plate_names)\n for ans in output:\n assert ans in make_own", "def build_messy_lookup_lad(source,dest):\n la = QuickGrid().open(source)\n\n lookup = QuickGrid()\n lookup.header = [\"gss-code\",\"local-authority-code\"]\n\n possible = [\"gss-code\",\"archaic-gss-code\"]\n possible = [p for p in possible if p in la.header]\n for r in la:\n for p in possible:\n if r[p]:\n values = r[p].split(\",\")\n for v in values:\n lookup.add([v,r[\"local-authority-code\"]])\n \n lookup.save(dest,force_unicode=True)", "def fusion(first_fh, fused_fh, compare_file):\r\n # initialize\r\n ha_seq = \"\"\r\n ha_header = \"\"\r\n # parse through file\r\n for line in first_fh:\r\n # if a > is found assume it is header\r\n if line[0] == \">\":\r\n # ha_header = line\r\n # if the header is found (length > 0)\r\n if len(ha_header) > 0:\r\n # pull needed information from header to make new one\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n # print(ha_header)\r\n # Call find_match function, input the file to search and the new header created.\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n # if return is equal then write to new file with two sequences fused\r\n if na_header == ha_header:\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n # reset variables\r\n ha_header = line\r\n ha_seq = \"\"\r\n\r\n else:\r\n # if it is part of the sequence\r\n ha_seq = ha_seq + line\r\n\r\n # To return/write the last entries in the files, won't get written in loop\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n if na_header == ha_header:\r\n # print(\"matches2\")\r\n # print(ha_header)\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n\r\n # Close Files\r\n first_fh.close()\r\n fused_fh.close()", "def get_files_suffix_list(suffixes, flist, Lshow=False, Ldir=False):\n matched_files=[]\n dirs=[]\n files=[]\n for fname in flist:\n if os.path.isdir(fname):\n dirs.append(fname)\n else:\n files.append(fname)\n for suff in suffixes:\n for fname in files:\n #print(f\" {suff} in {fname} ?\")\n if fname.endswith(suff):\n matched_files.append(fname)\n matched_files.extend(dirs) \n return matched_files", "def temp_(code):\n\n # Does the partial match criteria include at least one OLC code?\n if centroid or northwest or southeast:\n # Is the specified UBID code valid?\n if buildingid.v2.isValid(code):\n # Separate the UBID code into three OLC codes.\n openlocationcodes = code.split(buildingid.v2.SEPARATOR_)\n\n # Extract the OLC codes.\n centroid_openlocationcode = openlocationcodes[buildingid.v2.INDEX_CENTROID_]\n northwest_openlocationcode = openlocationcodes[buildingid.v2.INDEX_NORTHWEST_]\n southeast_openlocationcode = openlocationcodes[buildingid.v2.INDEX_SOUTHEAST_]\n\n # Initialize new list of OLC codes.\n new_openlocationcodes = []\n\n if centroid:\n if drop_suffix_centroid > 0:\n # If the \"--centroid\" flag is set and the \"--drop-suffix-centroid\"\n # option is non-zero, then drop the required number of\n # characters, and append the new OLC code to the list.\n new_openlocationcodes.append(centroid_openlocationcode[:(-1 * drop_suffix_centroid)])\n else:\n # Otherwise, append the unmodified OLC code to the list.\n new_openlocationcodes.append(centroid_openlocationcode)\n\n if northwest:\n if drop_suffix_northwest > 0:\n # If the \"--northwest\" flag is set and the \"--drop-suffix-northwest\"\n # option is non-zero, then drop the required number of\n # characters, and append the new OLC code to the list.\n new_openlocationcodes.append(northwest_openlocationcode[:(-1 * drop_suffix_northwest)])\n else:\n # Otherwise, append the unmodified OLC code to the list.\n new_openlocationcodes.append(northwest_openlocationcode)\n\n if southeast:\n if drop_suffix_southeast > 0:\n # If the \"--southeast\" flag is set and the \"--drop-suffix-southeast\"\n # option is non-zero, then drop the required number of\n # characters, and append the new OLC code to the list.\n new_openlocationcodes.append(southeast_openlocationcode[:(-1 * drop_suffix_southeast)])\n else:\n # Otherwise, append the unmodified OLC code to the list.\n new_openlocationcodes.append(southeast_openlocationcode)\n\n if len(new_openlocationcodes) > 0:\n # If the new list of OLC codes is non-empty, then join\n # the OLC codes, and then return the result.\n return buildingid.v2.SEPARATOR_.join(new_openlocationcodes)\n else:\n # No result.\n return None\n else:\n # No result.\n return None\n else:\n # No result.\n return None", "def find_emoji_partial_multiFiles(self,bound_tuple):\n count_2_save=self.count_to_save\n save_period=count_2_save\n start=bound_tuple[0]\n limit=bound_tuple[1]\n emoji_hdf5_Info_File_address = '{}/info_{}_to_{}.hdf'.format(self.result_dir, start+1, start+limit)\n emoji_hdf5_Mat_File_address = '{}/matrix_{}_to_{}.hdf'.format(self.result_dir, start+1, start+limit)\n trace_working_file = '{}/taceWorking_{}_to_{}.txt'.format(self.result_dir, start+1, start+limit)\n \n my_col=self.get_collection()\n part_DB=my_col.find().skip(start).limit(limit)\n \n emojiList=self.emoji_list\n adjMat = np.zeros((len(emojiList), len(emojiList)), dtype = int) # The matrix containing the edges\n emojiCount=np.zeros((len(emojiList)), dtype = int) # The number of emoji in the tweet dataset\n heap_mat = np.zeros((len(emojiList), len(emojiList)), dtype = int) # The matrix containing the edges\n last_emoji_netIndex=0\n df_emoji_info = pd.DataFrame()\n df_emoji_heap = pd.DataFrame()\n count_tweet=0\n count_tweet_emoji=0\n count_total_seen_emoji=0\n count_new_emoji=0\n ####------------------------------------------------------######\n ####------------------------------------------------------######\n #### This is the part that the emoji extractor works.\n #### It reads each tweet and matches teh emoji unicodes.\n #### If the emoji unicode is in the text, it will be appended to the \"mentionedTogether\" list.\n print 'Start to extract emojis.....'\n for mytweet in part_DB:\n mentionedTogether=[] ## It stores the emojis detected from the current tweet (i.e. mytweet).\n mentionedTogether_index_in_Net=[] ## It stores the index of emojis. The indeices are defined based on the emojiList.\n mentionedTogether_position_in_Text=[] ## It stores the posision of emoji in the text for future work.\n count_tweet+=1\n if 'text' in mytweet:\n #count_tweet+=1\n for emoji in emojiList:\n emoji_str=emoji.replace('\\n','')\n match_all=re.finditer(emoji_str.decode('unicode-escape'),mytweet['text'])\n for match in match_all:\n count_total_seen_emoji+=1\n mentionedTogether.append(emoji)\n mentionedTogether_index_in_Net.append(emojiList.index(emoji))\n mentionedTogether_position_in_Text.append(int(match.start()))\n emojiCount[emojiList.index(emoji)]+=1\n\n \n if len(mentionedTogether)>0:\n ## Yoiu can uncomment the followings to see the tweets detected:\n #print 'tweet #', count_tweet, ': ', mytweet['text']\n #print mentionedTogether\n #print '-----------------------------------------------------'\n ##\n count_tweet_emoji+=1\n emoji_dict=emojiFunction.create_Emoji_info_Dictionary(mytweet,mentionedTogether, mentionedTogether_index_in_Net, \n mentionedTogether_position_in_Text)## creating the dictionary of info\n df_emoji_info = df_emoji_info.append(emoji_dict, ignore_index=True)## updating dataframe for info by emoji_info dictionary\n emoji_heap_dict=emojiFunction.create_Emoji_heap_Dictionary(count_tweet, count_tweet_emoji, count_total_seen_emoji,\n count_new_emoji, mytweet['lang'])## creating the dictionary for heap\n df_emoji_heap=df_emoji_heap.append(emoji_heap_dict, ignore_index=True)## updating dataframe for heap by heap dictionary\n \n if (len(mentionedTogether)>1):####### 2 Mentioned - If they are mentioned together they should be in this list\n #print count_tweet,': ',mentionedTogether_index_in_Net, '(NET) is/are mentioned in: ', mytweet['text']\n #print (mentionedTogether_position_in_Text, ' TEXT is/are mentioned in: ', mytweet['text'])\n adjMat=emojiFunction.update_adj_matrix(adjMat, mentionedTogether_index_in_Net, mentionedTogether_position_in_Text)\n if self.concat_tweet and count_tweet_emoji>1:\n mentionedTogether_index_in_Net.insert(0,last_emoji_netIndex)\n heap_mat=emojiFunction.update_heap_mat(heap_mat, mentionedTogether_index_in_Net)\n if len(mentionedTogether)>0:\n last_emoji_netIndex=mentionedTogether_index_in_Net.pop()\n \n if count_tweet>count_2_save:\n count_2_save+=save_period\n print 'total number of tweets: ',count_tweet, ' saving files .............'\n #print (mentionedTogether_index_in_Net, '(NET) is/are mentioned in: ', mytweet['text'])\n df_emoji_count= pd.DataFrame(data=emojiCount, index=emojiList)\n \n df_emoji_adjMatrix=pd.DataFrame(data=adjMat, index=np.arange(len(emojiList)), columns=np.arange(len(emojiList)))\n df_emoji_heapMatrix=pd.DataFrame(data=heap_mat, index=np.arange(len(emojiList)), columns=np.arange(len(emojiList)))\n \n #df_emoji_adjMatrix=pd.DataFrame(data=adjMat, index=np.arange(len(emojiList)), columns=np.arange(len(emojiList))) ## create data frame for adjacency matrix\n #df_emoji_heapMatrix=pd.DataFrame(data=heap_mat, index=np.arange(len(emojiList)), columns=np.arange(len(emojiList))) ## create dataframe for the heap matrix\n print 'Saving df_info .........'\n self.write_on_hdf(emoji_hdf5_Info_File_address, hdf_struct=df_emoji_info, hdf_key='df_info', my_mode='a')\n print 'Saving df_heap ..........'\n self.write_on_hdf(emoji_hdf5_Info_File_address, hdf_struct=df_emoji_heap, hdf_key='df_heap', my_mode='a')\n del df_emoji_info\n df_emoji_info = pd.DataFrame()\n del df_emoji_heap\n df_emoji_heap = pd.DataFrame()\n \n print 'Saving df_count .........'\n self.write_on_hdf(emoji_hdf5_Mat_File_address, hdf_struct=df_emoji_count, hdf_key='df_count', my_mode='w')\n print 'Saving df_adjMat ..........'\n self.write_on_hdf(emoji_hdf5_Mat_File_address, hdf_struct=df_emoji_adjMatrix, hdf_key='df_adjMat', my_mode='a')\n print 'Saving df_heapMat ..........'\n self.write_on_hdf(emoji_hdf5_Mat_File_address, hdf_struct=df_emoji_heapMatrix, hdf_key='df_heapMat', my_mode='a') \n \n with open(trace_working_file, 'a') as the_file:\n temp='\\t'+str(count_tweet)+',\\t'+str(mytweet['created_at'])+',\\t'+str(mytweet['id'])\n the_file.write(temp)\n the_file.write('\\n')\n print 'After tweet #{}, the {}_to_{} part was saved'.format(count_tweet, start+1, start+limit)\n print 'Working on the rest........'\n if self.stop:\n break\n\n print 'Saving files of the part {}_to{} for the last time...............'.format(start+1, start+limit)\n df_emoji_count= pd.DataFrame(data=emojiCount, index=emojiList)\n df_emoji_adjMatrix=pd.DataFrame(data=adjMat, index=np.arange(len(emojiList)), columns=np.arange(len(emojiList)))\n df_emoji_heapMatrix=pd.DataFrame(data=heap_mat, index=np.arange(len(emojiList)), columns=np.arange(len(emojiList)))\n \n #df_emoji_info.to_hdf(emoji_hdf5_Mat_File_address, where='df_info, df_heap, df_count, df_adjMat, df_heapMat', mode='w')\n \n self.write_on_hdf(emoji_hdf5_Info_File_address, hdf_struct=df_emoji_info, hdf_key='df_info', my_mode='a')\n self.write_on_hdf(emoji_hdf5_Info_File_address, hdf_struct=df_emoji_heap, hdf_key='df_heap', my_mode='a')\n self.write_on_hdf(emoji_hdf5_Mat_File_address, hdf_struct=df_emoji_count, hdf_key='df_count', my_mode='w')\n self.write_on_hdf(emoji_hdf5_Mat_File_address, hdf_struct=df_emoji_adjMatrix, hdf_key='df_adjMat', my_mode='a')\n self.write_on_hdf(emoji_hdf5_Mat_File_address, hdf_struct=df_emoji_heapMatrix, hdf_key='df_heapMat', my_mode='a') \n\n with open(trace_working_file, 'a') as the_file:\n temp='\\t'+str(count_tweet)+',\\t'+str(mytweet['created_at'])+',\\t'+str(mytweet['id'])\n the_file.write(temp)\n the_file.write('\\n')\n print \"total emoji: \", count_total_seen_emoji\n # return {'df_emoji_info':df_emoji_info, 'df_emoji_heap':df_emoji_heap, 'df_emoji_count':df_emoji_count, 'df_emoji_adjMatrix':df_emoji_adjMatrix, 'df_emoji_heapMatrix':df_emoji_heapMatrix}", "def lookup_ifproc_file(obsnum, path='/data_lmt/ifproc/', debug=False):\n paths = [path]\n\n if 'ifproc' not in path:\n paths += ['/data_lmt/ifproc/']\n if 'lmtttpm' not in path:\n paths += ['/data_lmt/lmttpm/']\n if 'tel' not in path:\n paths += ['/data_lmt/tel/']\n\n if debug:\n print(paths)\n\n for path in paths:\n filenames = glob.glob(os.path.join(path, '*_%06d_*.nc' % obsnum))\n if len(filenames) > 0:\n if debug:\n print('found %s' % (filenames[0]))\n return filenames[0]\n return ''\n #filename = ''\n #for file in os.listdir(path):\n # if fnmatch.fnmatch(file,'*_%06d_*.nc'%(obsnum)):\n # print('found %s'%(file))\n # filename = path+file\n #if filename == '':\n #print('lookup_ifproc_file: no file for obsnum ', obsnum)\n #if 'lmttpm' not in path:\n # print('look in lmttpm')\n # return lookup_ifproc_file(obsnum,path='/data_lmt/lmttpm/')\n #return(filename)", "def get_nsite_DMRfind(inputf,output,samples,path_to_allc=\"\",mc_type=[\"C\"],num_procs=1,use_mc_status=True,min_cov=0):\n #dictionary of sample_name -> file handle\n allc_files = {}\n allc_lines = {}\n allc_fields = {}\n allc_prevbyte = {} #sample_name -> prevbyte (started from) in the file\n with open(inputf,'r') as f, open(output,'w') as g:\n line = f.readline()\n line = line.rstrip(\"\\n\")\n fields = line.split(\"\\t\")\n prefix_len = len(fields) #number of fields in original file\n mc_type = expand_nucleotide_code(mc_type)\n g.write(\"\\t\".join(fields[:prefix_len])+\"\\t\"+\"\\t\".join([\"nsite_\"+sample for sample in samples])+\"\\n\")\n prev_chrom = \"\"\n prev_end = \"\"\n dmr_lines=[]\n methylation_levels = {}\n for line in f:\n line = line.rstrip(\"\\n\")\n dmr_lines.append(line)\n if num_procs == 1:\n for sample in samples:\n methylation_levels[sample]=get_nsite_DMRfind_worker(dmr_lines,mc_type,sample,path_to_allc,output,min_cov,use_mc_status=False)\n else:\n pool = Pool(num_procs)\n results = {}\n for sample in samples:\n results[sample]=pool.apply_async(get_nsite_DMRfind_worker,(dmr_lines,mc_type,sample,path_to_allc,output,min_cov),{\"use_mc_status\":False})\n pool.close()\n pool.join()\n for sample in results:\n methylation_levels[sample]=results[sample].get()\n temp_files = {}\n for sample in samples:\n temp_files[sample]=open(output.replace(\".tsv\",\"\")+\"_\"+sample+\"_temp_nsite.tsv\",'r')\n\n for index,line in enumerate(dmr_lines):\n g.write(line)\n for sample in samples:\n #g.write(\"\\t\"+methylation_levels[sample][index])\n g.write(\"\\t\"+temp_files[sample].readline().rstrip(\"\\n\"))\n g.write(\"\\n\")\n for sample in samples:\n temp_files[sample].close()\n subprocess.check_call(shlex.split(\"rm \"+output.replace(\".tsv\",\"\")+\"_\"+sample+\"_temp_nsite.tsv\"))", "def extract_strings_from_i(incarnato_fragments, genome, param):\r\n i_dict = {}\r\n i_fragment_regions = \"\"\r\n\r\n with open(incarnato_fragments) as f:\r\n for line in f:\r\n start, end = line.strip().split(\"_\")[1].split(\"-\")\r\n seq = next(f).strip()\r\n ss = next(f).strip()\r\n i_dict[(int(start), int(end))] = [seq,ss]\r\n\r\n for start, end in sorted(i_dict.keys()):\r\n temp = start - len(i_fragment_regions)\r\n gaps = \"\".join([\"_\" for x in range(0, temp)])\r\n i_fragment_regions += gaps\r\n i_fragment_regions += i_dict[(start, end)][param]\r\n \r\n tail = \"\".join([\"_\" for x in range(len(i_fragment_regions), genome+1)])\r\n i_fragment_regions += tail\r\n return i_fragment_regions", "def filter_overlapping_files(files):\n keys = list(files.keys())\n base = min([key.replace(\"M\", \"\") for key in files.keys()])\n base = str(base) + \"M\"\n keys.remove(base)\n base_files = files[base]\n\n dict_files_all = {}\n for key in keys:\n file_keys = files[key]\n for file_key in file_keys:\n for file_base in base_files:\n dates_overlapping = filter_overlapping_dates(file_base, file_key)\n if len(dates_overlapping) > 0:\n list_files = [file_base, file_key]\n combination = base + \"_\" + key\n if combination in dict_files_all.keys():\n dict_files_all[combination].append(list_files)\n else:\n dict_files_all[combination] = [list_files]\n return dict_files_all" ]
[ "0.6327156", "0.61232364", "0.56347793", "0.56296253", "0.5618694", "0.5515025", "0.54627013", "0.5458853", "0.538327", "0.53801644", "0.53717375", "0.52709544", "0.5267916", "0.52162147", "0.5175573", "0.5166157", "0.51569754", "0.5155961", "0.51448816", "0.5137253", "0.51114327", "0.51104504", "0.5105954", "0.50756", "0.5073835", "0.50372237", "0.50094825", "0.4972083", "0.4970715", "0.4952365", "0.49423096", "0.49407315", "0.49331123", "0.49330524", "0.49312112", "0.49224436", "0.4920015", "0.49143833", "0.49139616", "0.49049374", "0.49041584", "0.49037442", "0.48978916", "0.48912138", "0.48894522", "0.48693928", "0.486669", "0.48574406", "0.48532793", "0.48506132", "0.48461044", "0.48439103", "0.48437777", "0.48402077", "0.48351824", "0.48301232", "0.48214963", "0.4820676", "0.48084423", "0.48076835", "0.48041043", "0.48027486", "0.47992563", "0.4796253", "0.47960895", "0.47925168", "0.47923946", "0.47819084", "0.47808933", "0.47802576", "0.47801992", "0.47779778", "0.4777417", "0.47750917", "0.47730654", "0.47656795", "0.47501853", "0.47451076", "0.47346994", "0.47338232", "0.47333273", "0.47314057", "0.47247973", "0.47200182", "0.47190553", "0.4718846", "0.47180367", "0.47150645", "0.4715036", "0.47123307", "0.47055906", "0.47034025", "0.47008175", "0.46906427", "0.46888307", "0.4687923", "0.4686869", "0.4686306", "0.46854836", "0.46849057" ]
0.675231
0
take a list of basenames, get lcd and merge set founder affection according to faff flag and offspring according to ofaff flag
def mergePed(bnlist=[],faff=[],ofaff=[],newbasename='newped',fo=0): lcdmap = getLCD(bnlist) # list of chr,offset,rs for all snp common to all files print 'got %d lcd snps-%s' % (len(lcdmap),lcdmap[:5]) cfped = [] coped = [] cfgeno = [] cogeno = [] allrsa = {} ignorers = {} for i,basename in enumerate(bnlist): fped,oped,fgeno,ogeno,trsadict = subsetPed(basename,lcdmap,faff[i],ofaff[i]) print '%s gave %d fgeno' % (basename,len(fgeno)) for rs in trsadict.keys(): tk = trsadict[rs].keys() if len(tk) > 2: print 'for %s, rs %s has alleles %s' % (basename, rs, trsadict[rs]) if not allrsa.get(rs,None): allrsa[rs] = {} for a in tk: if not allrsa[rs].get(a,None): allrsa[rs][a] = trsadict[rs][a] else: allrsa[rs][a] += trsadict[rs][a] tk = allrsa[rs].keys() if len(tk) > 2 and not ignorers.get(rs,None): # new #print 'After merge basename %s, rs %s has alleles %s' % (basename, rs,allrsa[rs]) ignorers[rs] = rs cfped += fped coped += oped cfgeno += fgeno cogeno += ogeno print 'after merge all have %d fgeno and %d ogeno' % (len(cfgeno),len(cogeno)) # now have offspring and founder rows in lcdmap order # write map file print '### found %d markers > 2 alleles' % (len(ignorers.keys())) keepmarkers = [x for x in range(len(lcdmap)) if not ignorers.get(lcdmap[x][2],None)] newmap = ['\t'.join((lcdmap[x][0],lcdmap[x][2],'0','%d' % lcdmap[x][1])) for x in keepmarkers] # chrom,offset,rs f = file('%s.map' % newbasename,'w') f.write('%s\n' % '\n'.join(newmap)) f.close() for i,geno in enumerate(cfgeno): # convert each array into a list and keep the good markers gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers]) g = array.array('c',gs) # good ones cfgeno[i] = g # replace print 'cfgeno converted' if not fo: # not founders only - note arrays are not lists! cfped += copy.copy(coped) # del coped for i,geno in enumerate(cogeno): # convert each array into a list and keep the good markers gs = ''.join(['%s%s' % (geno[2*x],geno[2*x + 1]) for x in keepmarkers]) g = array.array('c',gs) # good ones cfgeno.append(g) # extend founders del cogeno print 'after if not fo now have %d cfgeno' % (len(cfgeno)) f = file('%s.ped' % newbasename,'w') for n,ped in enumerate(cfped): l = ' '.join(ped + list(cfgeno[n])) if n % 100 == 0 and n > 0: print 'writing line %d' % n f.write(l) f.write('\n') f.close() print 'wrote %d map rows and %d ped rows to %s' % (len(newmap),len(cfped),newbasename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def autocontext_forests(dirname):\n rf_files = []\n for filename in os.listdir(dirname):\n fullname = os.path.join(dirname, filename)\n if os.path.isfile(fullname) and len(filename) >= 8:\n base, middle, end = filename[:3], filename[3:-4], filename[-4:]\n if base == \"rf_\" and end ==\".ilp\":\n rf_files.append((int(middle), fullname))\n rf_files = sorted(rf_files)\n rf_indices, rf_files = zip(*rf_files)\n assert rf_indices == tuple(xrange(len(rf_files))) # check that there are only the indices 0, 1, 2, ... .\n return rf_files", "def subsetPed(basename=\"\",lcdmap = [],faff='1', ofaff='2'):\r\n mf = file('%s.map' % basename,'r').readlines()\r\n lmap = [x.strip().split() for x in mf]\r\n rscols = {} # lookup marker table\r\n colrs = [] # lookup rs from column\r\n for i,m in enumerate(lmap): # get columns to keep in the order we want them\r\n rscols[m[1]] = i # keep track of where each rs is in this map\r\n colrs.append(m[1]) # and keep the list of rs for tracking alleles\r\n wewant = [rscols[x[2]] for x in lcdmap] # columns we want to keep\r\n print '#Subsetped faff=%s ofaff=%s keeping %d (%s) of potential lcd %d for %s' % \\\r\n (faff,ofaff,len(wewant),wewant[:20],len(lcdmap),basename)\r\n pf = file('%s.ped' % basename,'r')\r\n ogeno = [] # offspring new lines\r\n fgeno = [] # founders\r\n oped = [] # for pedigrees\r\n fped = []\r\n rsadict = {} # keep a count of alleles - seems to be a problem\r\n for i,l in enumerate(pf):\r\n if (i+1) % 500 == 0:\r\n print '%s at line %d' % (basename,i+1)\r\n ll = l.strip().split()\r\n ped = ll[:6]\r\n founder = (ll[2] == '0' and ll[3] == '0') \r\n aff = faff\r\n if not founder:\r\n aff = ofaff\r\n ped[5] = aff # adjust as needed\r\n if founder:\r\n fped.append(ped)\r\n else:\r\n oped.append(ped)\r\n gt = ll[6:]\r\n geno = []\r\n for snp in wewant: # columns in order\r\n thisrs = colrs[snp]\r\n base = snp*2\r\n g1 = gt[base]\r\n g2 = gt[base+1]\r\n geno.append(g1)\r\n geno.append(g2)\r\n if not rsadict.get(thisrs,None):\r\n rsadict[thisrs] = {}\r\n if g1 <> '0':\r\n if not rsadict[thisrs].get(g1,None):\r\n rsadict[thisrs][g1] = 1\r\n else:\r\n rsadict[thisrs][g1] += 1 \r\n if g2 <> '0':\r\n if not rsadict[thisrs].get(g2,None):\r\n rsadict[thisrs][g2] = 1\r\n else:\r\n rsadict[thisrs][g2] += 1\r\n keepgt = array.array('c',geno)\r\n if founder:\r\n fgeno.append(keepgt)\r\n else:\r\n ogeno.append(keepgt)\r\n print '#Subsetped %s %d fgeno %d ogeno' % (basename,len(fgeno),len(ogeno))\r\n return fped,oped,fgeno,ogeno,rsadict", "def getLCD(lbase=[]):\r\n listmf = []\r\n rsdict = {}\r\n for i,basename in enumerate(lbase): # for each basename to be included\r\n mf = file('%s.map' % basename,'r').readlines()\r\n lmap = [x.strip().split() for x in mf] \r\n rslist = [x[1] for x in lmap] # chrom rs gendist physdist\r\n for x in lmap:\r\n rsdict[x[1]] = (x[0],int(x[3]),x[1]) # key by chrom,offset,rs\r\n setrs = set(rslist)\r\n listmf.append(setrs) # list of map lines for processing\r\n lcd = listmf.pop(0) # start with first - order doesn't matter\r\n for setrs in listmf:\r\n lcd = lcd & setrs # intersection\r\n lcd = list(lcd) # now have lowest common denom as a list of rs\r\n lcdmap = [rsdict[rs] for rs in lcd] # restore chrom,offset,rs for rs to keep\r\n lcdmap.sort() # now in genomic order\r\n print 'got lcdmap=',lcdmap[:10]\r\n return lcdmap # sorted common map\r", "def buildfilelist():\r\n for files in filelist:\r\n if os.path.splitext(files)[1]=='.dxf': #查找目录下的dxf文件,加入到readfilelist文件列表中 \r\n readfilelist.append(files)\r\n #feilin=file('feilin(ph).dxf','w') #新建一个文件,名字先占位用,后续改成由配置文件中读入名称。 \r", "def maf2vcf_mrefs(maf):\n f = open(maf + \".aa\", 'w')\n with open(maf, 'r') as maf:\n for line in maf:\n if line.startswith(\"a\"):\n ancallele = ''\n refout = ''\n line = next(maf)\n while line.startswith(\"s\"):\n if \"Wb\" in line:\n aa = line.split()\n pos = int(aa[2])\n size = int(aa[5])\n chrom = aa[1].split(\".\")[1]\n if \"-\" in aa[4]:\n if aa[6] == 'A':\n rallele = 'T'\n elif aa[6] == 'T':\n rallele = 'A'\n elif aa[6] == 'C':\n rallele = 'G'\n elif aa[6] == 'G':\n rallele = 'C'\n else:\n print(\"ERROR allele not iupac\")\n pos_1 = size - pos\n else:\n pos_1 = pos\n rallele = aa[6]\n else:\n # read in other refs\n aa = line.split()\n refout += aa[1][0]\n if \"-\" in aa[4]:\n # flip to opposite base\n if aa[6] == 'A':\n ancallele += 'T'\n elif aa[6] == 'T':\n ancallele += 'A'\n elif aa[6] == 'C':\n ancallele += 'G'\n elif aa[6] == 'G':\n ancallele += 'C'\n else:\n print(\"ERROR allele not iupac\")\n else:\n ancallele += aa[6]\n line = next(maf)\n if ancallele:\n f.write(\"{}\\t{}\\t{}\\t{}\\t{}\\n\".format(chrom, pos_1 + 1,\n rallele, ancallele,\n refout))\n else:\n pass\n return(None)", "def get_files_suffix_list(suffixes, flist, Lshow=False, Ldir=False):\n matched_files=[]\n dirs=[]\n files=[]\n for fname in flist:\n if os.path.isdir(fname):\n dirs.append(fname)\n else:\n files.append(fname)\n for suff in suffixes:\n for fname in files:\n #print(f\" {suff} in {fname} ?\")\n if fname.endswith(suff):\n matched_files.append(fname)\n matched_files.extend(dirs) \n return matched_files", "def _separate_file_list( file_list, target_locus ):\n log.info(\"Parsing locus-specific subread FOFN\")\n target_fasta = None\n other_fasta = []\n print file_list, target_locus\n for filename in file_list:\n basename = filename.split('.')[0]\n locus = basename.split('_')[-1]\n if locus == target_locus and target_fasta is None:\n target_fasta = filename\n elif locus == target_locus:\n msg = 'Multiple files for target locus found!'\n log.error( msg )\n raise ValueError( msg )\n else:\n other_fasta.append( filename )\n if target_fasta is None:\n msg = 'No fasta file for target locus found!'\n log.error( msg )\n raise ValueError( msg )\n return ( target_fasta, other_fasta )", "def get_annot_cfpath_list(ibs, aid_list, suffix=None):\n #utool.assert_all_not_None(aid_list, 'aid_list')\n _cfname_fmt = get_chip_fname_fmt(ibs=ibs, suffix=suffix)\n cfname_iter = (None if aid is None else _cfname_fmt % aid for aid in iter(aid_list))\n cfpath_list = [None if cfname is None else join(ibs.chipdir, cfname) for cfname in cfname_iter]\n return cfpath_list", "def make_master_flats(dc):\n\n\t## Make EXTcheck: is there always the same number of extensions in each file\n\tprint \"Making master flats\"\n\t\n\t## Choose extensions you are using\n\t\n\tfor flat_type in ['FFS']: # Currently FFD is unsupported. If you have FFDs, add them to the list but you must have ONLY FFDs or ONLY FFSs in the dir. Otherwise the first element in the list will get overwritten!\n\t\t#~ print \"\\n\", flat_type, \"\\n\"\n\t\tfor i in dc:\n\t\t\tTRIM, TRIM1, VR, PS, PS1, OS, OS1 = CCD_sections((i[0], i[1]))\n\t\t\tfilelist = []\n\t\t\tfor f in glob.glob(RAW+'*'+flat_type+'*fits'):\n\t\t\t\tccd_conf = []\n\t\t\t\theader0 = fits.getheader(f)\n\t\t\t\theader1 = fits.getheader(f, ext=1)\n\t\t\t\tif header0['OBSMODE']==flat_type:\n\t\t\t\t\tfor KW in ['BINX', 'BINY']:\n\t\t\t\t\t\tccd_conf.append(header0[KW])\n\t\t\t\t\tfor KW in ['NAXIS1', 'NAXIS2']:\n\t\t\t\t\t\tccd_conf.append(header1[KW])\n\t\t\t\t\t\tif tuple(ccd_conf)==i:\n\t\t\t\t\t\t\tfilelist.append(f)\n\t\t\tlfl = len(filelist)\n\t\t\tif lfl > 0:\n\t\t\t\tBIN=CD+'/'+str(i[0])+'x'+str(i[1])+'/'\n\t\t\t\tWD=BIN+str(i[-2])+'x'+str(i[-1])+'/' # Bottom level dir with calibrated and master frames\n\t\t\t\tB=check_exist(WD, 'MF.fits', i)\n\t\t\t\tif B=='n':\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\thdul = fits.HDUList()\n\t\t\t\t\thdul.append(fits.ImageHDU())\n\t\t\t\t\t#~ MB = fits.open(WD+'MB.fits')\n\t\t\t\t\tx = np.array(range(0,i[-1]))\n\t\t\t\t\tfor EXT in (extensions):\n\t\t\t\t\t\tprint \"##################################################\"\n\t\t\t\t\t\tprint \"Stacking \"+`lfl`+' '+`i[-2]`+'x'+`i[-1]`+' channel '+`EXT`+' flat frames!'\n\t\t\t\t\t\tif EXT==1:\n\t\t\t\t\t\t\tPSC=PS1\n\t\t\t\t\t\t\tOSC=OS1\n\t\t\t\t\t\t\tTR=TRIM1\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tPSC=PS1\n\t\t\t\t\t\t\tOSC=OS\n\t\t\t\t\t\t\tTR=TRIM\n\t\t\t\t\t\tsc = -1 # counts how many flats have mean>limit\n\t\t\t\t\t\tfor n, fn in enumerate(filelist):\n\t\t\t\t\t\t\tprint \"Files left:\",`lfl-n`+'/'+`lfl`\n\t\t\t\t\t\t\tim = fits.getdata(fn, ext=EXT)\n\t\t\t\t\t\t\tmeanval = np.mean(im[VR[0]:VR[1], TR[0]:TR[1]])\n\t\t\t\t\t\t\t#~ maxval = np.max(im[VR[0]:VR[1], TR[0]:TR[1]])\n\t\t\t\t\t\t\tmaxval = stats.scoreatpercentile(im[VR[0]:VR[1], TR[0]:TR[1]], 90)\n\t\t\t\t\t\t\texptime = fits.getheader(fn)['EXPTIME']\n\t\t\t\t\t\t\t#~ if meanval > 15000. and meanval < 40000. and maxval < 50000. and exptime>5.:\n\t\t\t\t\t\t\tif meanval > 16000. and meanval < 40000. and exptime>=5.:\n\t\t\t\t\t\t\t\tsc+=1\n\t\t\t\t\t\t\t\t#~ im[im<1]=1\n\t\t\t\t\t\t\t\tmscrow, sigmarow = median_row(OSC, PSC, TR, im)\n\t\t\t\t\t\t\t\tsh = np.shape(im)\n\t\t\t\t\t\t\t\tfor y in range(0, sh[0]):\n\t\t\t\t\t\t\t\t\tim[y] = im[y]-mscrow[y]\n\t\t\t\t\t\t\t\tF=im\n\t\t\t\t\t\t\t\tnorm = np.median(F[VR[0]:VR[1], TR[0]:TR[1]])\n\t\t\t\t\t\t\t\tF = F/norm #+np.min(F)+0.0001\n\t\t\t\t\t\t\t\tif sc==0:\n\t\t\t\t\t\t\t\t\tstack_arr = F\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tstack_arr = np.dstack((stack_arr, F))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tprint \"****************************************\"\n\t\t\t\t\t\t\t\tprint \"Rejected\", fn, \"AVG =\", meanval, \"EXPTIME =\", exptime\n\t\t\t\t\t\t\t\tprint \"****************************************\"\n\t\t\t\t\t\tprint 'Will stack a total of', np.shape(stack_arr)[2], 'flats'\n\t\t\t\t\t\tMF = np.median(stack_arr, axis=2)\n\t\t\t\t\t\thdul.append(fits.ImageHDU(MF))\n\t\t\t\t\t\thdul[EXT].header.set(\"NAXIS1\", np.shape(MF)[1])\n\t\t\t\t\t\thdul[EXT].header.set(\"NAXIS2\", np.shape(MF)[0])\n\t\t\t\t\thdul[0].header.set(\"CALIBR\", \"T\")\n\t\t\t\t\thdul[0].header.set(\"INSTRUME\", \"MAIA\")\n\t\t\t\t\thdul[0].header.set(\"BINX\", i[0])\n\t\t\t\t\thdul[0].header.set(\"BINY\", i[1])\n\t\t\t\t\thdul[0].header.set(\"CALMODE\", \"MASTER FLAT\")\n\t\t\t\t\thdul.writeto(WD+\"MF.fits\", clobber=True)\n\t\t\t\t\tprint \"############################################################\"\n\tprint \"Completed master flats\"", "def get_light_sbc(filenames, onoff=True):\n if onoff:\n param = \"on\"\n else:\n param = \"off\"\n return filter_filenames(filenames, [param])", "def getPrefices(fileList):\n # Format:\n # prefix_dictionary[surl] = [oldPrefix, newPrefix]\n # Note: this function returns oldPrefix, newPrefix, prefix_dictionary\n # old/newPrefix are the fixed prefices defined in copysetup[in]\n # In case copyprefix[in] can be used, ie if it is set, it may contain a list of copyprefices that can sort out\n # more complicated cases\n\n prefix_dictionary = {}\n\n # get the file access info (only old/newPrefix are needed here)\n useCT, oldPrefix, newPrefix, useFileStager, directIn = getFileAccessInfo()\n\n # get the copyprefices\n copyprefix = readpar('copyprefixin')\n if copyprefix == \"\":\n copyprefix = readpar('copyprefix')\n\n # should we fall back to copyprefix or use the faxredirector? (this is the case for FAX test jobs since they reset old/newPrefix)\n if oldPrefix == \"\" or newPrefix == \"\" or not (oldPrefix and newPrefix):\n\n # special case for FAX on sites that are not setup for direct i/o in the normal way\n if (readpar('copytoolin').lower() == \"fax\") or (readpar('copytoolin') == \"\" and readpar('copytool').lower() == \"fax\"):\n if \"dummy\" in copyprefix:\n # try to construct the TURL using the copyprefix and the faxredirector\n prefix, dummy = copyprefix.split(\"^\")\n faxredirector = readpar('faxredirector')\n if faxredirector != \"\":\n tolog(\"Using copyprefix and faxredirector for old/newPrefix\")\n oldPrefix = prefix\n newPrefix = faxredirector\n else:\n tolog(\"WARNING: faxredirector not set, do not know how to construct old/newPrefix\")\n else:\n if not \"^\" in copyprefix:\n tolog(\"WARNING: Will default to using lcg-getturls\")\n \n # in case of less complex copyprefix\n if \"^\" in copyprefix and not \",\" in copyprefix and not \"dummy\" in copyprefix:\n prefices = copyprefix.split(\"^\")\n oldPrefix = prefices[0]\n newPrefix = prefices[1]\n\n # in case of more complex copyprefix (the case of copyprefix lists)\n if \"^\" in copyprefix and \",\" in copyprefix and not \"dummy\" in copyprefix:\n\n # handle copyprefix lists\n pfroms, ptos = getCopyprefixLists(copyprefix)\n tolog(\"Copyprefix lists: %s, %s\" % (str(pfroms), str(ptos)))\n\n if not \"\" in pfroms and not \"dummy\" in pfroms and not \"\" in ptos and not \"dummy\" in ptos:\n # create a prefix dictionary for all the files\n for surl in fileList:\n # first get the proper old/newPrefices\n oldPrefix, newPrefix = matchCopyprefixReplica(surl, pfroms, ptos)\n # then fill the dictionary\n prefix_dictionary[surl] = [oldPrefix, newPrefix]\n else:\n if oldPrefix != \"\" and newPrefix != \"\":\n # Use the same prefices for all surls\n for surl in fileList:\n prefix_dictionary[surl] = [oldPrefix, newPrefix]\n\n else: # old/newPrefix are set\n\n # handle copyprefix lists\n pfroms, ptos = getCopyprefixLists(copyprefix)\n tolog(\"Copyprefix lists: %s, %s\" % (str(pfroms), str(ptos)))\n\n if not \"\" in pfroms and not \"dummy\" in pfroms and not \"\" in ptos and not \"dummy\" in ptos:\n # create a prefix dictionary for all the files\n for surl in fileList:\n # first get the proper old/newPrefices\n oldPrefix, newPrefix = matchCopyprefixReplica(surl, pfroms, ptos)\n # then fill the dictionary\n prefix_dictionary[surl] = [oldPrefix, newPrefix]\n else:\n if oldPrefix != \"\" and newPrefix != \"\":\n # Use the same prefices for all surls\n for surl in fileList:\n prefix_dictionary[surl] = [oldPrefix, newPrefix]\n \n if oldPrefix != \"\" and newPrefix != \"\":\n tolog(\"Will use oldPrefix=%s and newPrefix=%s for SURL to TURL conversion\" % (oldPrefix, newPrefix))\n else:\n tolog(\"WARNING: old/newPrefix not known\")\n\n return oldPrefix, newPrefix, prefix_dictionary", "def get_files(file_list, mode):\n\tfile_set = set(file_list)\n\tif \"band\" in mode:\n\t\tband_all_set = set([\"bands.dat.gnu\", \"freq.plot\"])\n\t\tband_file_set = file_set & band_all_set ; remain_file_set = file_set - band_all_set\n\t\tfiles_str = \", \".join([f\"\\033[32m{b}\\033[0m\" for b in band_file_set]) + \"; \" + \", \".join(remain_file_set)\n\tif mode == \"dos\":\n\t\tdos_all_set = set([\"S.dos\"])\n\t\tdos_file_set = file_set & dos_all_set ; remain_file_set = file_set - dos_all_set\n\t\tfiles_str = \", \".join([f\"\\033[32m{d}\\033[0m\" for d in dos_file_set]) + \"; \" + \", \".join(remain_file_set)\n\tprint(f\"Files in the directory: {files_str}\"); file = input(\"Please choose your file (type one only): \")\n\treturn file", "def Parse_folder_to_multi_faa(target_dir,faa_filename):\n os.chdir(target_dir)\n output_handle = open(faa_filename, \"w\")\n for gbk_filename in FileGen(target_dir):\n with open(gbk_filename, \"r\") as input_handle:\n for seq_record in SeqIO.parse(input_handle, \"genbank\") :\n print(\"Dealing with GenBank record %s\" % seq_record.id)\n for seq_feature in seq_record.features :\n if seq_feature.type==\"CDS\" :\n assert len(seq_feature.qualifiers['translation'])==1\n try:\n name = seq_feature.qualifiers['locus_tag'][0]\n except KeyError:\n name = seq_feature.qualifiers['product'][0]\n output_handle.write(\">%s from %s\\n%s\\n\" % (\n name,\n gbk_filename.split(\"/\")[-1],\n seq_feature.qualifiers['translation'][0])) \n output_handle.close()", "def get_keys(filen, flist): \n if (filen in flist[0]):\n key1 = 'PSTH_STIM'\n key2 = 'ELEC_'\n key3 = '_TRIAL_'\n elif (filen in flist[1]) or (filen in flist[2]):\n key1 = 'PSTH'\n key2 = ''\n key3 = '_'\n elif (filen in flist[3]) or (filen in flist[4]):\n key1 = 'Stim'\n key2 = 'Elec'\n key3 = 'Repet'\n return key1, key2, key3", "def masterFlat(flat_list, master_dark_fname, normalize = 'median', local_sig_bad_pix = 3, \\\n global_sig_bad_pix = 9, local_box_size = 11, hotp_map_fname = None, verbose=False,\n output_dir = None,min_flux=1000):\n\n #Open the master dark\n master_dark_hdu = f.open(master_dark_fname)\n master_dark = master_dark_hdu[0].data\n dark_shape = np.shape(master_dark)\n\n if verbose:\n print((\"Subtracting {} from each flat file\".format(master_dark_fname)))\n dark_exp_time = master_dark_hdu[0].header['EXPTIME']\n\n #Open all files into a 3D array\n #foo = np.empty((dark_shape[0],dark_shape[1],len(flat_list)))\n foo = []\n\n #Open first flat file to check exposure time and filter\n first_flat_hdu = f.open(flat_list[0])\n flat_exp_time = first_flat_hdu[0].header['EXPTIME']\n\n\n\n if dark_exp_time != flat_exp_time:\n print(\"The master dark file doesn't have the same exposure time as the flats. We'll scale the dark for now, but this isn't ideal\", UserWarning)\n factor = flat_exp_time/dark_exp_time\n else:\n factor = 1.\n\n #We've already read it, so we'll stick it in foo\n\n print(\"Combining flat files\")\n for i in range(0,len(flat_list)):\n try: \n #subtract dark for each file, then normalize by mode\n hdu = f.open(flat_list[i],ignore_missing_end=True)\n d_sub = hdu[0].data - factor*master_dark\n if np.nanmedian(d_sub) < min_flux:\n #print(\"Skipping file {}, because its flux is lower than {}\".format(flat_list[i],min_flux))\n continue\n #normalize\n if normalize == 'mode':\n d_sub = d_sub/mode(d_sub, axis = None, nan_policy = 'omit')\n elif normalize == 'median':\n d_sub = d_sub/np.nanmedian(d_sub)\n #foo[:,:,i] = d_sub\n foo.append(d_sub)\n except:\n print(\"Some error. Skipping file {}\".format(i)) \n #Median combine frames\n flat = np.median(foo, axis = 0)\n\n #Filter bad pixels\n #bad_px = sigma_clip(flat, sigma = sig_bad_pix) #old and bad\n ###Major update here: do sigma clipping on the pix-to-pix flat with the large scale vignette removed\n ###Also add local sigma clipping\n def stddevFilter(img, box_size):\n \"\"\" from\n https://stackoverflow.com/questions/28931265/calculating-variance-of-an-image-python-efficiently/36266187#36266187\n This function compute the standard deviation of an image in a\n moving box of a given size. The pixel i,j of the output is the\n standard deviation of the pixel value in the box_size x box_size box\n around the i,j pixel in the original image.\n \"\"\"\n wmean, wsqrmean = (cv2.boxFilter(x, -1, (box_size, box_size), \\\n borderType=cv2.BORDER_REFLECT) for x in (img, img*img))\n return np.sqrt(wsqrmean - wmean*wmean)\n\n #median flat\n median_flat = median_filter(flat, local_box_size) #arbitrary size, shouldn't matter as long as it's big enough\n #standard deviation image\n stddev_im = stddevFilter(flat, local_box_size)\n\n #Local clipping\n local_bad_pix = np.abs(median_flat - flat) > local_sig_bad_pix*stddev_im\n\n #Global clipping here to reject awful pixels and dust, bad columns, etc\n pix_to_pix = flat/median_flat\n global_bad_px = sigma_clip(pix_to_pix, sigma = global_sig_bad_pix).mask #9 seems to work best\n\n #also set all 0 and negative pixels in flat as bad\n non_positive = flat <= 0\n\n #logic combine\n bad_px = np.logical_or(global_bad_px, local_bad_pix)\n\n #also add non_positive pixels\n bad_px = np.logical_or(bad_px, non_positive)\n\n #Normalize good pixel values\n if normalize == 'median':\n norm_flat = flat/np.nanmedian(flat[~bad_px])\n elif normalize == 'mode':\n norm_flat = flat/mode(flat, axis = None, nan_policy = 'omit')\n #Stick it back in the last hdu\n hdu[0].data = norm_flat\n\n #Add pipeline version and history keywords\n vers = version.get_version()\n hdu[0].header.set('PL_VERS',vers,'Version of pipeline used for processing')\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created master flat by median combining the following:\"\n for i in range(len(flat_list)):\n hdu[0].header['HISTORY'] = flat_list[i]\n hdu[0].header['HISTORY'] = \"Normalized to the median of the master flat\"\n hdu[0].header['HISTORY'] = \"Performed bad pixel local and global sigma clipping with {}, {}sigmas\".format(local_sig_bad_pix, global_sig_bad_pix)\n hdu[0].header['HISTORY'] = \"############################\"\n\n #Parse the last fileanme\n if output_dir is not None:\n flat_outname = flat_list[-1].rsplit('.',1)[0]+\"_master_flat.fits\"\n flat_outname = flat_outname.rsplit('/',1)[-1]\n flat_outname = output_dir+flat_outname\n else:\n flat_outname = flat_list[-1].rsplit('.',1)[0]+\"_master_flat.fits\"\n\n #Write the fits file\n if verbose:\n print((\"Writing master flat to {}\".format(flat_outname)))\n hdu.writeto(flat_outname, overwrite=True)\n\n #If there's already a hot pixel map then we'll add to it.\n if hotp_map_fname != None:\n #read in the existing bp map\n #hdu = f.open(hotp_map_fname)\n #hdu[0].data += np.array(bad_px.mask, dtype=float)\n #hdu[0].data = np.logical_or(hdu[0].data.astype(bool), bad_px) #use logical or to combine bad pixel maps\n #bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n print(\"Will deal with hot pixel map from dark frames in the calibrate function\")\n\n #else:\n #Parse the last fileanme\n if output_dir is not None:\n bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n bp_outname = bp_outname.rsplit('/',1)[-1]\n bp_outname = output_dir+bp_outname\n else:\n bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n\n ##### Now write the bad pixel map\n hdu[0].data = bad_px.astype(int)#np.array(bad_px.mask, dtype=float)\n #Parse the last fileanme\n # bp_outname = flat_list[-1].rsplit('.',1)[0]+\"_bp_map.fits\"\n\n #Add history keywords\n hdu[0].header['HISTORY'] = \"############################\"\n hdu[0].header['HISTORY'] = \"Created bad pixel map by sigma clipping on pixel-to-pixel flat{}\".format(flat_outname)\n hdu[0].header['HISTORY'] = \"Bad pixel cutoffs: local sigma = {} and global sigma = {} for clipping\".format(local_sig_bad_pix, global_sig_bad_pix)\n #hdu[0].header['HISTORY'] = \"Bad pixel cutoff of {}sigma\".format(sig_bad_pix)\n hdu[0].header['HISTORY'] = \"A pixel value of 1 indicates a bad pixel\"\n hdu[0].header['HISTORY'] = \"############################\"\n\n if verbose:\n print((\"Writing bad pixel map to {}\".format(bp_outname)))\n #Write the fits file\n hdu.writeto(bp_outname, overwrite=True)\n\n return flat_outname, bp_outname", "def cat_sff_files(list_of_file_handles):\r\n # mimicks lazy_parse_sff_handle on multiple files\r\n # Move to cogent???\r\n if (list_of_file_handles == []):\r\n return [], None\r\n try:\r\n flowgrams_and_headers = map(\r\n lazy_parse_sff_handle,\r\n list_of_file_handles)\r\n except ValueError:\r\n raise FileFormatError('Wrong flogram file format. Make sure you pass the sff.txt format ' +\r\n 'produced by sffinfo. The binary .sff will not work here.')\r\n\r\n flowgram_iterators = [a for a, b in flowgrams_and_headers]\r\n return chain(*flowgram_iterators), flowgrams_and_headers[0][1]", "def create_F1_F2_cols(col_base_list, output='both'):\n F12_cols = []\n for x in col_base_list:\n pref = x[:3]\n if output == 'both':\n if pref =='FM_':\n F12_cols.append('FM_F1_'+ x[3:])\n F12_cols.append('FM_F2_' + x[3:])\n else:\n F12_cols.append('F1_' + x)\n F12_cols.append('F2_' + x)\n elif output =='F1':\n if pref =='FM_':\n F12_cols.append('FM_F1_'+ x[3:])\n else:\n F12_cols.append('F1_' + x)\n elif output =='F2':\n if pref =='FM_':\n F12_cols.append('FM_F2_'+ x[3:])\n else:\n F12_cols.append('F2_' + x)\n return F12_cols", "def parse_flt_files(files=[], info=None, uniquename=False, use_visit=False,\n get_footprint = False, \n translate = {'AEGIS-':'aegis-', \n 'COSMOS-':'cosmos-', \n 'GNGRISM':'goodsn-', \n 'GOODS-SOUTH-':'goodss-', \n 'UDS-':'uds-'}): \n \n if info is None:\n if not files:\n files=glob.glob('*flt.fits')\n \n if len(files) == 0:\n return False\n \n info = get_flt_info(files)\n else:\n info = info.copy()\n \n for c in info.colnames:\n if not c.islower(): \n info.rename_column(c, c.lower())\n\n if 'expstart' not in info.colnames:\n info['expstart'] = info['exptime']*0.\n\n so = np.argsort(info['expstart'])\n info = info[so]\n\n #pa_v3 = np.round(info['pa_v3']*10)/10 % 360.\n pa_v3 = np.round(info['pa_v3']) % 360.\n \n target_list = []\n for i in range(len(info)):\n #### Replace ANY targets with JRhRmRs-DdDmDs\n if info['targname'][i] == 'ANY': \n if use_visit:\n new_targname=info['file'][i][:6]\n else:\n new_targname = 'par-'+radec_to_targname(ra=info['ra_targ'][i],\n dec=info['dec_targ'][i])\n \n target_list.append(new_targname.lower())\n else:\n target_list.append(info['targname'][i])\n \n target_list = np.array(target_list)\n\n info['progIDs'] = [file[1:4] for file in info['file']]\n\n progIDs = np.unique(info['progIDs'])\n visits = np.array([os.path.basename(file)[4:6] for file in info['file']])\n dates = np.array([''.join(date.split('-')[1:]) for date in info['date-obs']])\n \n targets = np.unique(target_list)\n \n output_list = [] #OrderedDict()\n filter_list = OrderedDict()\n \n for filter in np.unique(info['filter']):\n filter_list[filter] = OrderedDict()\n \n angles = np.unique(pa_v3[(info['filter'] == filter)]) \n for angle in angles:\n filter_list[filter][angle] = []\n \n for target in targets:\n #### 3D-HST targname translations\n target_use = target\n for key in translate.keys():\n target_use = target_use.replace(key, translate[key])\n \n ## pad i < 10 with zero\n for key in translate.keys():\n if translate[key] in target_use:\n spl = target_use.split('-')\n try:\n if (int(spl[-1]) < 10) & (len(spl[-1]) == 1):\n spl[-1] = '{0:02d}'.format(int(spl[-1]))\n target_use = '-'.join(spl)\n except:\n pass\n\n for filter in np.unique(info['filter'][(target_list == target)]):\n angles = np.unique(pa_v3[(info['filter'] == filter) & \n (target_list == target)])\n \n for angle in angles:\n exposure_list = []\n exposure_start = []\n product='{0}-{1:05.1f}-{2}'.format(target_use, angle, filter) \n\n visit_match = np.unique(visits[(target_list == target) &\n (info['filter'] == filter)])\n \n this_progs = []\n this_visits = []\n \n for visit in visit_match:\n ix = (visits == visit) & (target_list == target) & (info['filter'] == filter)\n #this_progs.append(info['progIDs'][ix][0])\n #print visit, ix.sum(), np.unique(info['progIDs'][ix])\n new_progs = list(np.unique(info['progIDs'][ix]))\n this_visits.extend([visit]*len(new_progs))\n this_progs.extend(new_progs)\n \n for visit, prog in zip(this_visits, this_progs):\n visit_list = []\n visit_start = []\n visit_product = '{0}-{1}-{2}-{3:05.1f}-{4}'.format(target_use, prog, visit, angle, filter) \n \n use = ((target_list == target) & \n (info['filter'] == filter) & \n (visits == visit) & (pa_v3 == angle) &\n (info['progIDs'] == prog))\n \n if use.sum() == 0:\n continue\n\n for tstart, file in zip(info['expstart'][use],\n info['file'][use]):\n \n f = file.split('.gz')[0]\n if f not in exposure_list:\n visit_list.append(str(f))\n visit_start.append(tstart)\n \n exposure_list = np.append(exposure_list, visit_list)\n exposure_start.extend(visit_start)\n \n filter_list[filter][angle].extend(visit_list)\n \n if uniquename:\n print(visit_product, len(visit_list))\n so = np.argsort(visit_start)\n exposure_list = np.array(visit_list)[so]\n #output_list[visit_product.lower()] = visit_list\n \n d = OrderedDict(product=str(visit_product.lower()),\n files=list(np.array(visit_list)[so]))\n output_list.append(d)\n \n if not uniquename:\n print(product, len(exposure_list))\n so = np.argsort(exposure_start)\n exposure_list = np.array(exposure_list)[so]\n #output_list[product.lower()] = exposure_list\n d = OrderedDict(product=str(product.lower()),\n files=list(np.array(exposure_list)[so]))\n output_list.append(d)\n \n ### Get visit footprint from FLT WCS\n if get_footprint:\n from shapely.geometry import Polygon\n \n N = len(output_list)\n for i in range(N):\n for j in range(len(output_list[i]['files'])):\n flt_file = output_list[i]['files'][j]\n if (not os.path.exists(flt_file)) & os.path.exists('../RAW/'+flt_file):\n flt_file = '../RAW/'+flt_file\n \n flt_j = pyfits.open(flt_file)\n h = flt_j[0].header\n if (h['INSTRUME'] == 'WFC3') & (h['DETECTOR'] == 'IR'):\n wcs_j = pywcs.WCS(flt_j['SCI',1])\n else:\n wcs_j = pywcs.WCS(flt_j['SCI',1], fobj=flt_j)\n \n fp_j = Polygon(wcs_j.calc_footprint())\n if j == 0:\n fp_i = fp_j\n else:\n fp_i = fp_i.union(fp_j)\n \n output_list[i]['footprint'] = fp_i\n \n return output_list, filter_list", "def chk_chng(src_flist,dst_flist):\n uc_flist = []\n c_flist = []\n for files in src_flist:\n if files in dst_flist:\n uc_flist.append(files)\n else:\n c_flist.append(files)\n return uc_flist,c_flist", "def concatenate_detected_verified(fasta_name, PATH_FASTA_DETECTED, PATH_FASTA_VERIFIED, INFO_folder, PATH_FASTA_CONCATENATED):\n\n\tprint \"\\n#################\"\n\tprint \"# Concatetaned file\"\n\tprint \"#################\\n\"\n\n\t# NOTE Dictionaire avec en clef l'id espèce/système et en value une liste\n\t# NOTE [\"l'id espèce/système du verifié qui correspond\", [liste des sequences ATPase, IM ...]]\n\tdict_remove = {}\n\n\tprint \"\\n------------------------------------------\"\n\tprint \"| First read : Creation of the dictionnary\"\n\tprint \"------------------------------------------\\n\"\n\n\tfor fasta_file in fasta_name :\n\t\tverified_fasta=os.path.join(PATH_FASTA_VERIFIED, fasta_file)\n\t\tdetected_fasta=os.path.join(PATH_FASTA_DETECTED, fasta_file)\n\t\tconcatenated_fasta=os.path.join(PATH_FASTA_CONCATENATED, fasta_file)\n\n\t\tlist_seq_verified = list(SeqIO.parse(verified_fasta, \"fasta\"))\n\t\tlist_id_verified = [seq.id for seq in list_seq_verified]\n\t\tlist_seq_verified = [seq.seq for seq in list_seq_verified]\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\t\tnumber_seq = len(list(seq_parser))\n\t\tprogression = 1\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\n\t\t# IDEA Il faut tester au moins une fois pour voir si lors de la concatenation, je ne me retrouve pas avec des systems ou je n'ai pas tous enlevé. Exemple l'ATPase de X n'est pas la même que celle de Y mais l'IMplatform l'ai si c'est le cas X est a enlevé aussi pour son ATPase\n\t\t# IDEA Si idea précédente vrai alors il faut faire des fichiers temporaires des sequences que l'on garde et concatener par \"cat\" à la fin le fichier temporaire et son homonyme en verifié.\n\n\t\t# NOTE Il y avait un problème : le nom/id de l'epèce + système ne doit pas contenir le _NumX_ car ce Num fait référence au nombre de duplicat de la protéine (exemple deux ATPase gspE)\n\t\t# NOTE Quelques systèmes on des sequences qui sont similaire pour toutes les protéines sauf une exemple ESCO3 et NC_011993 qui sont identique pour tous sauf ATPase (98% seulement)\n\n\t\tfor seq in seq_parser :\n\n\t\t\tsys.stdout.write(\"File : {} -> {:.2f}% : {}/{} sequences detected read\\r\".format(fasta_file, progression/float(number_seq)*100, progression,number_seq))\n\t\t\tsys.stdout.flush()\n\t\t\tprogression += 1\n\n\t\t\tid_seq=seq.id.split(\"_\")\n\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"D\")]))\n\n\t\t\tif id_seq in dict_remove :\n\t\t\t\tcontinue\n\n\t\t\telif seq.seq in list_seq_verified :\n\t\t\t\tindex=list_seq_verified.index(seq.seq)\n\n\t\t\t\tid_seq_verif = list_id_verified[index].split(\"_\")\n\t\t\t\tid_seq_verif = re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq_verif[:id_seq_verif.index(\"V\")]))\n\n\t\t\t\t# NOTE dans le dictionnaire je met le système vérifié en premier, toutes les séquences du système identitique en deuxième et la séquence qui en est la cause en troisème\n\t\t\t\tdict_remove[id_seq]=[id_seq_verif,[], seq.id]\n\n\t\tprint\n\t\tprint(\"File : {} -> Done!\".format(fasta_file))\n\n\tprint \"\\n-----------------------------\"\n\tprint \"| Second read : Writing files\"\n\tprint \"-----------------------------\\n\"\n\n\tfor fasta_file in fasta_name :\n\t\tverified_fasta=os.path.join(PATH_FASTA_VERIFIED, fasta_file)\n\t\tdetected_fasta=os.path.join(PATH_FASTA_DETECTED, fasta_file)\n\t\tconcatenated_fasta=os.path.join(PATH_FASTA_CONCATENATED, fasta_file)\n\n\t\tos.system('cat \"{}\" > \"{}\"'.format(verified_fasta, concatenated_fasta))\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\t\tnumber_seq = len(list(seq_parser))\n\t\tprogression = 1\n\n\t\tseq_parser = SeqIO.parse(detected_fasta, \"fasta\")\n\n\t\twith open(concatenated_fasta, \"a\") as w_file :\n\t\t\tfor seq in seq_parser :\n\n\t\t\t\tsys.stdout.write(\"File : {} -> {:.2f}% : {}/{} sequences detected read\\r\".format(fasta_file, progression/float(number_seq)*100, progression,number_seq))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tprogression += 1\n\n\t\t\t\tid_seq=seq.id.split(\"_\")\n\t\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"D\")]))\n\n\t\t\t\tif id_seq in dict_remove :\n\t\t\t\t\tdict_remove[id_seq][1].append(seq)\n\n\t\t\t\telse :\n\t\t\t\t\tSeqIO.write(seq, w_file, \"fasta\")\n\t\tprint\n\t\tprint(\"File : {} -> Done!\".format(fasta_file))\n\n\t# NOTE Dict remove complete and all concatenate write\n\twrite_remove_concatenate(dict_remove, INFO_folder)\n\n\treturn", "def batch_fuc(CCD):\n batch_q()\n default_path = os.getcwd()\n PATH = os.path.join(os.getcwd(), list_subdir()[0])\n folder_name = list_subdir()[0]\n # print(\"default_path :\", default_path)\n print(\"folder_name :\", folder_name)\n # print(\"PATH :\", PATH)\n\n A = True\n\n while A is True:\n message = \"Select function\"\n choices = ['Bias correction', 'Cosmic-ray correction',\n 'Flat correction', 'Wavelength calibration',\n 'Flux calibration', 'Plot tools', 'Backup',\n 'Restore', 'Header correction', 'Quit']\n input = options(message, choices)\n\n if input == 'Bias correction':\n b_bias(folder_name, PATH, CCD)\n elif input == 'Cosmic-ray correction':\n b_cosmic(folder_name, PATH, CCD)\n elif input == 'Flat correction':\n b_flat(folder_name, PATH, CCD)\n elif input == 'Wavelength calibration':\n b_wave(folder_name, PATH, CCD, default_path)\n elif input == 'Flux calibration':\n b_flux(folder_name, PATH, CCD, default_path)\n elif input == 'Plot tools':\n b_plots(folder_name, PATH, default_path)\n elif input == 'Backup':\n b_backup(pathloc=PATH)\n elif input == 'Restore':\n b_restore(pathloc=PATH)\n elif input == 'Header correction':\n b_headercorr(folder_name)\n elif input == 'Quit':\n A = False\n sys.exit()", "def prep_cum_data(self, list_of_concat_files):\n\n cf = ''.join(list_of_concat_files)\n cf = cf.replace('\\n', '\\t')\n cf = cf.split('\\t')\n cf = filter(lambda x: 'GO:' in x, cf)\n return cf", "def lfp_extract(files):\r\n \r\n if 'lfpdata' in locals():\r\n del lfpdata\r\n \r\n for i, file in enumerate(files):\r\n \r\n ### load data\r\n matdat = sio.loadmat(file, variable_names = ['lfpsegs', 'lfpdata', 'fs', 'chnAreas'], \r\n struct_as_record = False, squeeze_me = True) \r\n \r\n \r\n \r\n ### extract the noused channels, only calculate once\r\n if i == 0:\r\n \r\n # chnAreas\r\n chnAreas = matdat['chnAreas'].tolist()\r\n \r\n # fs: sample rate\r\n fs = matdat['fs'] \r\n \r\n \r\n\r\n ### dealing lfp data\r\n \r\n # lfp (np.ndarray): nareas * ntemp * ntrials or ntemp * nareas * ntrials\r\n if 'lfpdata' in matdat.keys():\r\n lfpdata_1file = matdat['lfpdata']\r\n elif 'lfpsegs' in matdat.keys():\r\n lfpdata_1file = matdat['lfpsegs']\r\n\r\n n1, n2, n3 = lfpdata_1file.shape\r\n if n1 > n2: # ntemp * nareas * ntrials\r\n lfpdata_1file = np.transpose(lfpdata_1file, (1, 0, 2))\r\n \r\n # concatenate to lfpdata for all files\r\n if 'lfpdata' not in locals():\r\n lfpdata = lfpdata_1file\r\n else:\r\n lfpdata = np.concatenate((lfpdata, lfpdata_1file), axis = 2)\r\n \r\n \r\n return lfpdata, chnAreas, fs", "def add_reffile_overrides(self):\n all_obs_info, unique_obs_info = self.info_for_all_observations()\n\n # Add empty placeholders for reference file entries\n empty_col = np.array([' ' * 500] * len(self.info['Instrument']))\n superbias_arr = deepcopy(empty_col)\n linearity_arr = deepcopy(empty_col)\n saturation_arr = deepcopy(empty_col)\n gain_arr = deepcopy(empty_col)\n distortion_arr = deepcopy(empty_col)\n photom_arr = deepcopy(empty_col)\n ipc_arr = deepcopy(empty_col)\n transmission_arr = deepcopy(empty_col)\n badpixmask_arr = deepcopy(empty_col)\n pixelflat_arr = deepcopy(empty_col)\n\n # Loop over combinations, create metadata dict, and get reffiles\n for status in unique_obs_info:\n updated_status = deepcopy(status)\n (instrument, detector, filtername, pupilname, readpattern, exptype) = status\n\n if instrument == 'FGS':\n if detector in ['G1', 'G2']:\n detector = detector.replace('G', 'GUIDER')\n updated_status = (instrument, detector, filtername, pupilname, readpattern, exptype)\n\n # If the user entered reference files in self.reffile_defaults\n # use those over what comes from the CRDS query\n #sbias, lin, sat, gainfile, dist, ipcfile, pam = self.reffiles_from_dict(status)\n manual_reffiles = self.reffiles_from_dict(updated_status)\n for key in manual_reffiles:\n if manual_reffiles[key] == 'none':\n manual_reffiles[key] = 'crds'\n\n # Identify entries in the original list that use this combination\n match = [i for i, item in enumerate(all_obs_info) if item==status]\n\n # Populate the reference file names for the matching entries\n superbias_arr[match] = manual_reffiles['superbias']\n linearity_arr[match] = manual_reffiles['linearity']\n saturation_arr[match] = manual_reffiles['saturation']\n gain_arr[match] = manual_reffiles['gain']\n distortion_arr[match] = manual_reffiles['distortion']\n photom_arr[match] = manual_reffiles['photom']\n ipc_arr[match] = manual_reffiles['ipc']\n transmission_arr[match] = manual_reffiles['transmission']\n badpixmask_arr[match] = manual_reffiles['badpixmask']\n pixelflat_arr[match] = manual_reffiles['pixelflat']\n\n self.info['superbias'] = list(superbias_arr)\n self.info['linearity'] = list(linearity_arr)\n self.info['saturation'] = list(saturation_arr)\n self.info['gain'] = list(gain_arr)\n self.info['astrometric'] = list(distortion_arr)\n self.info['photom'] = list(photom_arr)\n self.info['ipc'] = list(ipc_arr)\n self.info['transmission'] = list(transmission_arr)\n self.info['badpixmask'] = list(badpixmask_arr)\n self.info['pixelflat'] = list(pixelflat_arr)", "def conRFMixAndMaskToBeagle(indfile_name, rephasedhaps_pref, em_iters, win_size, chroms):\n\t### First get individual information\n\twindow_id = 0\n\tem_iter = em_iters\n\tindfile = open(indfile_name, \"r\")\t\n\tinds = []\n\tfor line in indfile:\n\t\tsplits = line.strip(\"\\r\\n\").split()\n\t\tinds.append(splits[1] + \"_A\")\n\t\tinds.append(splits[1] + \"_B\")\n\n\tallloci = []\n\toutfilename = rephasedhaps_pref + \"_w\" + str(win_size) + \".beagle\"\n\toutfile = open(outfilename, \"w\")\n\toutfile.write(\"I\\tid\\t\" + \"\\t\".join(inds) + \"\\n\")\n\t## Write genotype data out to file\n\n\tvitout = open(rephasedhaps_pref + \".vit\", \"w\")\n\twinout = open(rephasedhaps_pref + \".windows\", \"w\")\n\tfbkout = rephasedhaps_pref + \".fbk\"\n\tif os.path.exists(fbkout):\n\t\tos.remove(fbkout)\n\tvitlist = []\n\tfor chrom in chroms:\n\t\tprint chrom\n\t\tshapeitfilename = rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.allelesRephased\" + str(em_iters) + \".txt\"\n\t\tshapeitfile = open(shapeitfilename, \"rb\")\n\t\tfbkin_name = rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.\" + str(em_iters) + \".ForwardBackward.txt\"\n\t\tos.system('cat ' + fbkin_name + \" >> \" + fbkout) # Concatenate files together\n\t\tmarkerin = rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.amaps\"\n\t\tmarkerfile = open(markerin, \"r\")\n\t\tloci=[]\n\t\talleles = {}\n\t\tfor mline in markerfile:\n\t\t\tmsplit = mline.strip().split()\n\t\t\tloci.append(msplit[1])\n\t\t\talleles[msplit[1]] = [msplit[3], msplit[4] ]\n\n\t\tallloci.extend(loci)\n\t\tfor j,line in enumerate(shapeitfile):\n\t\t\tsline = line.strip(\"\\r\\n\")\n\t\t\tzero, ones = alleles[loci[j]]\n\t\t\tfixed = [ recodeAllele(k, zero, ones) for k in sline ]\n\t\t\toutfile.write(\"M\\t\" + loci[j] + \"\\t\" + \"\\t\".join(fixed) + \"\\n\")\n\t\tvitfile = open(rephasedhaps_pref + \"_chr\" + str(chrom) + \"_shapeout.\" + str(em_iters) + \".Viterbi.txt\", \"r\")\n\t\tvitlist.extend([x.strip().split() for x in vitfile])\n\t\tshapeitfile.close()\n\t\tvitfile.close()\n\t\t\n\t# This will transpose the whole Viterbi file\n\t# Yikes this may take a lot of memory\n\tfor i,x in enumerate(zip(*vitlist)):\n\t\tvitout.write(inds[i] + \"\\t\")\n\t\tfor y in x:\n\t\t\tvitout.write(y+\"\\t\")\n\t\tvitout.write(\"\\n\")\n\t\t### This doesn't quite work yet so make sure to fix it next time\n\tfor l in allloci:\n\t\twinout.write(\"window\" + str(window_id) + \"\\t\" + l + \"\\n\")\n\t\twindow_id += 1\n\treturn([outfile.name, vitout.name, winout.name, fbkout])", "def test_outpath_multi(tmpdir):\n base = glob.glob(\"%s/dummy/mm0\" % DATA_DIR)[0]\n paths = sorted(glob.glob(base + \"/*.ufo\"))\n # the reference font is modified in-place, make a temp copy first\n referenceSrc = py.path.local(paths[0])\n referenceDst = tmpdir / referenceSrc.basename\n referenceSrc.copy(referenceDst)\n reference = str(referenceDst)\n inpaths = paths[1:]\n outpaths = [str(tmpdir / basename(p)) for p in inpaths]\n\n psautohint(inpaths + ['-o'] + outpaths + ['-r', reference])", "def correct_naming(obsid, inst):\n cobsid = str(int(float(obsid)))\n if len(cobsid) == 5:\n return \n\n lobsid = mcf.add_leading_zero(obsid, 5)\n \n for sdir in ['secondary', 'analysis']:\n\n cmd = 'ls /data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/hrcf* >' + zspace\n os.system(cmd)\n\n data = mcf.read_data_file(zspace, remove=1)\n for ent in data:\n atemp = re.split('\\/', ent)\n fname = atemp[-1]\n mc = re.search(lobsid, fname)\n if mc is not None:\n continue\n else:\n atemp = re.split('hrcf', fname)\n btemp = re.split('_', atemp[1])\n sobs = btemp[0]\n new = fname.replace(sobs, lobsid)\n full = '/data/hrc/' + inst + '/' + lobsid + '/' + sdir + '/' + new\n\n cmd = 'mv ' + ent + ' ' + full\n os.system(cmd)", "def fetch_basenames(engine, form_factor):\n for key in ['current', 'm_mother', 'm_daughter', 'm_spectator', 'momentum']:\n if key not in form_factor:\n raise KeyError(f\"Required key '{key}' is missing.\")\n\n def abspath(dirname):\n return os.path.join(pathlib.Path(__file__).parent.absolute(), dirname)\n\n # 2pt correlators like 'P5-P5_RW_RW_d_d_m0.002426_m0.002426_p000'\n mother = \"%_RW_RW_d_d_m{m_mother}_m{m_spectator}_p000%fine\"\n daughter = \"%_RW_RW_d_d_m{m_daughter}_m{m_spectator}_{momentum}%fine\"\n if form_factor['m_daughter'] < form_factor['m_spectator']:\n daughter = \"%_RW_RW_d_d_m{m_spectator}_m{m_daughter}_{momentum}%fine\"\n\n # 3pt correlators like 'P5-P5_RW_RW_d_d_m0.002426_m0.002426_p000',\n corr3 = \"%_{current}_T%_m{m_mother}_RW_RW_x_d_m{m_spectator}_m{m_daughter}_{momentum}%fine\"\n\n params = {\n 'mother': mother.format(**form_factor),\n 'daughter': daughter.format(**form_factor),\n 'corr3': corr3.format(**form_factor)}\n queries = aiosql.from_path(abspath(\"sql/\"), \"sqlite3\")\n with db.connection_scope(engine) as conn:\n corrs = queries.postgres.get_correlator_names(conn, **params)\n \n return np.squeeze(np.array(corrs))", "def _SetAnatNames(self, anat_tgt):\n# Define links to structural image in each output directory.\n for entry in self.entry_map['epi'] + self.entry_map['fmap'] + \\\n self.entry_map['dti'] + self.entry_map['asl']:\n self.info[entry]['anat_link'] = anat_tgt\n\n# Name the normalization source image T1High. Number the rest.\n anat_entries = self.entry_map['anat'][:]\n anat_entries.remove(anat_tgt)\n n_t1high = 1\n for entry in anat_entries:\n if self.info[entry]['type'] == 'T1High':\n# High res T1-weighted, not normalization target. Rename it.\n fname = 'T1High_%d' % n_t1high\n fullname = '%s/%s' % (self.info[entry]['outdir'], fname)\n self.info[entry]['imgfile'] = fullname\n self.info[entry]['imgfile_skstrip'] = '%s_skstrip' % fullname\n self.info[entry]['matfile'] = '%s_matfile.aff12.1D' % fullname\n self.info[anat_tgt]['norm_src'] = False\n n_t1high += 1\n fname = 'T1High'\n fullname = '%s/%s' % (self.info[anat_tgt]['outdir'], fname)\n self.info[anat_tgt]['imgfile'] = fullname\n self.info[anat_tgt]['imgfile_skstrip'] = '%s_skstrip' % fullname\n self.info[anat_tgt]['matfile'] = '%s_matfile.aff12.1D' % fullname\n self.info[anat_tgt]['norm_src'] = True\n\n self.anatomical = '%s%s' % (self.info[anat_tgt]['imgfile'], \\\n self.info[anat_tgt]['suffix'])\n# The target for motin correction is the source for spatial normalization.\n self.norm_src = anat_tgt", "def set_fnames(subj, decondir):\n fnames = dict()\n outpref = 'decon_out.ramps_wav.%s_concat.Powered.cleanEPI' % subj\n sfx = 'Powered.cleanEPI.uncensored.txt'\n wm_name = 'wm_v8.%s_all.%s' % (subj, sfx)\n fnames['wm_file'] = os.path.join(os.environ['avp'], 'nii',\n '%s_CNR.anat' % subj, wm_name)\n vent_name = 'vent_v8.%s_all.%s' % (subj, sfx)\n fnames['vent_file'] = os.path.join(os.environ['avp'], 'nii',\n '%s_CNR.anat' % subj, vent_name)\n fnames['cf'] = os.path.join(os.environ['avp'], 'nii',\n 'all_ts.%s.Powered.censor.1D' % subj)\n fnames['outpref'] = os.path.join(decondir, outpref)\n\n return fnames", "def f2suff(forfile, opath, suff):\n import os\n\n idir = os.path.dirname(forfile)\n ifile = os.path.basename(forfile)\n odir = idir + '/' + opath\n ofile = ifile[0:ifile.rfind('.')] + '.' + suff\n\n return odir + '/' + ofile", "def lfcSet(file_list):\n # Just out of paranoia, order the LFC files\n pseudo_time = np.empty_like(file_list,dtype='float')\n file_date = np.empty_like(file_list,dtype='float')\n file_obsn = np.empty_like(file_list,dtype='float')\n for i,file_name in enumerate(file_list):\n file_id = os.path.basename(file_name).split('_')[-1][:-5]\n pseudo_time[i] = file_id\n file_date[i], file_obsn[i] = file_id.split('.')\n time_sort = np.argsort(pseudo_time)\n file_list = np.array(file_list)[time_sort]\n file_date = file_date[time_sort]\n file_obsn = file_obsn[time_sort]\n \n sets = []\n consecutive = []\n date = file_date[0]\n for i in range(1,len(file_list)):\n if date != file_date[i]:\n date = file_date[i]\n sets.append(consecutive)\n consecutive=[file_list[i]]\n elif file_obsn[i] != file_obsn[i-1]+1:\n sets.append(consecutive)\n consecutive=[file_list[i]]\n else:\n consecutive.append(file_list[i])\n return sets", "def luke_filewalker(Malware_bin):\n file_analyses=[]\n for (dirpath, dirname, filenames) in os.walk(Malware_bin):\n for f in filenames:\n file_analyses.append(os.path.join(Malware_bin,f))\n #print \"Added : \"+'\\033[1m'+f+'\\033[0m'\n return file_analyses", "def _process_utterance(lf0_dir, mgc_dir, bap_dir, cmp_dir, linear_dir, basename, wav_path, text, hparams):\n\n\tif hparams.trim_silence:\n\t\ttar_wavfile = wav_path[:-4] + \"_trim.wav\"\n\t\tprint(\"raw wav path:%s\" % wav_path)\n\t\twav_raw, fs = sf.read(wav_path)\n\t\twav_trim = audio.trim_silence(wav_raw, hparams)\n\t\tsf.write(tar_wavfile, wav_trim, fs)\n\n\t\twav_path = tar_wavfile\n\n\tnFFTHalf, alpha, bap_dim = audio.get_config(hparams.sample_rate)\n\n\tmcsize = hparams.num_mgc - 1\n\n\tfilename = basename #os.path.basename(wav_path).split(\".\")[0]\n\n\tprint('extract feats for %s' % wav_path)\n\n\t# extract f0,sp,ap\n\tos.system(\"analysis %s %s/%s.f0 %s/%s.sp %s/%s.bapd\" %\n\t\t\t\t (wav_path, lf0_dir, filename,\n\t\t\t\t mgc_dir, filename, bap_dir, filename)) # get float64???\n\n # interpolate f0\n\tf0 = np.fromfile(\"%s/%s.f0\" % (lf0_dir, filename),dtype=np.float64)\n\tcontinuous_f0 = interp1d(f0, kind=\"slinear\")\n\tcontinuous_f0.tofile(\"%s/%s.f0c\" % (lf0_dir, filename))\n\n\t# convert f0 to lf0\n\tos.system(\"x2x +da %s/%s.f0c > %s/%s.f0a\" % (lf0_dir, filename, lf0_dir, filename))\n\tos.system(\"x2x +af %s/%s.f0a | sopr -magic 0.0 -LN -MAGIC -1.0E+10 > %s/%s.lf0\" % (\n\t\tlf0_dir, filename, lf0_dir, filename))\n\n\t# convert sp to mgc\n\tos.system(\"x2x +df %s/%s.sp | sopr -R -m 32768.0 | \"\n\t\t\t \"mcep -a %f -m %d -l %d -e 1.0E-8 -j 0 -f 0.0 -q 3 \"\n\t\t\t \"> %s/%s.mgc\" % (mgc_dir, filename, alpha, mcsize, nFFTHalf, mgc_dir, filename))\n\n\t# convert ap to bap\n\tos.system(\"x2x +df %s/%s.bapd > %s/%s.bap\" %\n\t\t\t (bap_dir, filename, bap_dir, filename))\n\n\t# merge mgc,lf0 and bap to cmp\n\tos.system(\"merge +f -s 0 -l 1 -L %d %s/%s.mgc < %s/%s.lf0 > %s/%s.ml\" %\n\t\t\t((mcsize+1), mgc_dir, filename, lf0_dir, filename, cmp_dir, filename))\n\tos.system(\"merge +f -s 0 -l %d -L %d %s/%s.ml < %s/%s.bap > %s/%s.cmp\" %\n\t\t\t(bap_dim, (mcsize+2), cmp_dir, filename, bap_dir, filename, cmp_dir, filename))\n\n\t#if mel_frames > hparams.max_mel_frames and hparams.clip_mels_length:\n\t#\treturn None\n\n\t#Compute the linear scale spectrogram from the wav\n\twav = audio.load_wav(wav_path, hparams.sample_rate)\n\tlinear_spectrogram = audio.linearspectrogram(wav, hparams).astype(np.float32)\n\tlinear_frames = linear_spectrogram.shape[1]\n\n\t#sanity check\n\t#assert linear_frames == mel_frames\n\n\tlf0 = np.fromfile(\"%s/%s.lf0\" % (lf0_dir, filename), dtype=np.float32)\n\tmgc = np.fromfile(\"%s/%s.mgc\" % (mgc_dir, filename), dtype=np.float32)\n\tbap = np.fromfile(\"%s/%s.bap\" % (bap_dir, filename), dtype=np.float32)\n\tcmp = np.fromfile(\"%s/%s.cmp\" % (cmp_dir, filename), dtype=np.float32)\n\n\tcmp_dim = mcsize + 1 + 1 + bap_dim\n\tcmp_frames = cmp.shape[0] / cmp_dim\n\t#print(f0[:100])\n\t#print(continuous_f0[:100])\n\tprint(lf0.shape)\n\tprint(continuous_f0.shape)\n\tprint(mgc.shape)\n\tprint(bap.shape)\n\tprint(cmp_frames)\n\tprint(continuous_f0.dtype)\n\tprint(mgc.dtype)\n\tprint(bap.dtype)\n\tassert (mgc.shape[0]/(mcsize+1)) == (continuous_f0.shape[0]/1) == (bap.shape[0]/bap_dim) == cmp_frames\n\tassert cmp_dim == hparams.num_mels\n\t#assert len(out) >= cmp_frames * audio.get_hop_size(hparams)\n\n\t#time resolution adjustement\n\t#ensure length of raw audio is multiple of hop size so that we can use\n\t#transposed convolution to upsample\n\t#out = out[:mel_frames * audio.get_hop_size(hparams)]\n\t#assert len(out) % audio.get_hop_size(hparams) == 0\n\t#time_steps = len(out)\n\n\t# Write the spectrogram and audio to disk\n\t#audio_filename = 'audio-{}.npy'.format(index)\n\tcmp_mat = cmp.reshape(-1, cmp_dim)\n\tcmp_filename = 'cmp-{}.npy'.format(basename)\n\tlinear_filename = 'linear-{}.npy'.format(basename)\n\t#np.save(os.path.join(wav_dir, audio_filename), out.astype(out_dtype), allow_pickle=False)\n\tnp.save(os.path.join(cmp_dir, cmp_filename), cmp_mat, allow_pickle=False)\n\tnp.save(os.path.join(linear_dir, linear_filename), linear_spectrogram.T, allow_pickle=False)\n\t# Return a tuple describing this training example\n\treturn (cmp_filename, linear_filename, cmp_frames, text)", "def cut_seq_fasta_file(listOfFasta, PATH_FASTA_CUTOFF, INFO_folder, file_cutoff=None) :\n\n\tif file_cutoff == True :\n\t\tDICT_CUTOFF=set_dict_cutoff_init(listOfFasta, INFO_folder)\n\telse :\n\t\tDICT_CUTOFF=set_dict_cutoff(cutoff_file)\n\n\n\tprint \"\\n#################\"\n\tprint \"# Cutoff file\"\n\tprint \"#################\\n\"\n\n\tcreate_folder(PATH_FASTA_CUTOFF)\n\n\tdict_remove = {}\n\n\tprint \"\\n------------------------------------------\"\n\tprint \"| First read : Creation of the dictionnary\"\n\tprint \"------------------------------------------\\n\"\n\n\tfor my_file in listOfFasta :\n\t\tcurrent_file = os.path.basename(my_file)\n\t\tif current_file in DICT_CUTOFF:\n\n\t\t\tseqiter = SeqIO.parse(my_file, 'fasta')\n\t\t\tnumber_seq = len(list(seqiter))\n\t\t\tprogression = 1\n\n\t\t\tseqiter = SeqIO.parse(my_file, 'fasta')\n\n\t\t\tfor seq in seqiter :\n\t\t\t\tsys.stdout.write(\"File : {} -> {:.2f}% : {}/{} sequences read\\r\".format(current_file, progression/float(number_seq)*100, progression, number_seq))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tprogression += 1\n\n\t\t\t\tid_seq=seq.id.split(\"_\")\n\n\t\t\t\tif \"_D_\" in seq.id :\n\t\t\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"D\")]))\n\t\t\t\telse :\n\t\t\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"V\")]))\n\n\t\t\t\tif id_seq in dict_remove :\n\t\t\t\t\tcontinue\n\t\t\t\telif len(seq) > DICT_CUTOFF[current_file][1] or len(seq) < DICT_CUTOFF[current_file][0] :\n\t\t\t\t\tif len(seq) > DICT_CUTOFF[current_file][1] :\n\t\t\t\t\t\tdict_remove[id_seq]=[seq.id,[], \"long\"]\n\t\t\t\t\telse :\n\t\t\t\t\t\tdict_remove[id_seq]=[seq.id,[], \"short\"]\n\t\tprint\n\t\tprint(\"File : {} -> Done!\".format(current_file))\n\n\tprint \"\\n-----------------------------\"\n\tprint \"| Second read : Writing files\"\n\tprint \"-----------------------------\\n\"\n\n\tfor my_file in listOfFasta :\n\t\tcurrent_file = os.path.basename(my_file)\n\t\twith open(os.path.join(PATH_FASTA_CUTOFF, current_file), \"w\") as writing_file :\n\n\t\t\tseqiter = SeqIO.parse(my_file, 'fasta')\n\t\t\tnumber_seq = len(list(seqiter))\n\t\t\tprogression = 1\n\n\t\t\tseqiter = SeqIO.parse(my_file, 'fasta')\n\t\t\tfor seq in seqiter :\n\t\t\t\tsys.stdout.write(\"File : {} -> {:.2f}% : {}/{} sequences read\\r\".format(current_file, progression/float(number_seq)*100, progression, number_seq))\n\t\t\t\tsys.stdout.flush()\n\t\t\t\tprogression += 1\n\n\t\t\t\tid_seq=seq.id.split(\"_\")\n\n\t\t\t\tif \"_D_\" in seq.id :\n\t\t\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"D\")]))\n\t\t\t\telse :\n\t\t\t\t\tid_seq=re.sub(\"Num[0-9]_\", \"\", \"_\".join(id_seq[:id_seq.index(\"V\")]))\n\n\t\t\t\tif id_seq in dict_remove :\n\t\t\t\t\tdict_remove[id_seq][1].append(seq)\n\t\t\t\telse :\n\t\t\t\t\tSeqIO.write(seq, writing_file,\"fasta\")\n\n\t\tprint\n\t\tprint(\"File : {} -> Done!\".format(current_file))\n\n\twrite_remove_cutoff(dict_remove, INFO_folder)\n\n\treturn", "def annotateFilesAfterHeurAndSelection(inputFolderPath, outputFolderPath, dumpSP=True):\n # add a slash if needed\n if inputFolderPath[-1] != u'/':\n inputFolderPath = u'{0}/'.format(inputFolderPath)\n if outputFolderPath[-1] != u'/':\n outputFolderPath = u'{0}/'.format(outputFolderPath)\n # get the selected reference file lines\n with open(u'{0}sampleReference.Paths'.format(inputFolderPath)) as refPathsFile:\n referenceLines = refPathsFile.readlines()\n # get the en and fr input lines\n with open(u'{0}sample.en'.format(inputFolderPath)) as enFile:\n enLns = enFile.readlines()\n with open(u'{0}sample.fr'.format(inputFolderPath)) as frFile:\n frLns = frFile.readlines()\n with open(u'{0}scores.tsv'.format(inputFolderPath)) as scFile:\n scLns = scFile.readlines()\n # get rid of the files we have already annotated\n if utilsOs.theFileExists(u'{0}sampleReference.tsv'.format(outputFolderPath)):\n # get the already seen lines\n referencePathLine = utilsOs.readAllLinesFromFile(u'{0}sampleReference.tsv'.format(outputFolderPath),\n noNewLineChar=True)\n listOfAnnotations = utilsOs.readAllLinesFromFile(u'{0}sampleAnnotation.tsv'.format(outputFolderPath),\n noNewLineChar=True)\n # maintain only what we haven't seen\n annotatedFiles = set(referencePathLine)\n newRefLines = []\n for ind, file in enumerate(referenceLines):\n if file.replace(u'\\n', u'') not in annotatedFiles:\n newRefLines.append( [ind, file.replace(u'\\n', u'')] )\n referenceLines = newRefLines\n # print(referenceLines)\n else:\n referencePathLine = []\n listOfAnnotations = []\n referenceLines = [(ind, file.replace(u'\\n', u'')) for ind, file in enumerate(referenceLines)]\n # print the annotator cheat sheet\n printCheatSheet()\n # open each file in EN and FR and show it in the terminal\n for tupleRef in referenceLines:\n indRef, refLn = tupleRef[0], tupleRef[1]\n print(u'############# {0} ##############'.format(refLn.replace(u'\\n', u'')))\n # get the path for the source and target\n lnsSource = enLns if u'en-fr' in refLn else frLns\n lnsTarget = frLns if u'en-fr' in refLn else enLns\n # get the correct terminal line length\n lineLength = 137-len(str(len(listOfAnnotations)+1))\n # color in red the during lines\n redDuringSource = u'\\033[1;31m{0}\\033[0m'.format(lnsSource[indRef])\n # print the sentences\n print(u'{0} - {1}'.format(len(listOfAnnotations), redDuringSource))\n print(u'{0} - {1}'.format(len(listOfAnnotations), lnsTarget[indRef]))\n print()\n # count the lines that take the space of 2 lines\n longLines = getNbLongLines([lnsSource[indRef], lnsTarget[indRef]], lineLength)\n # get the first part of the annotation (aligned or not)\n annotatorGeneralInput = input(u'Aligned-Misaligned annotation: ')\n # make sure to have the right general annotation\n while True:\n if annotatorGeneralInput in [u'0', u'1', u'0.0', u'0.1', u'0.2',\n u'1.0', u'1.1', u'1.2', u'1.3', u'1.4', u'c', u'correction']:\n break\n else:\n utilsOs.moveUpAndLeftNLines(1, slowly=False)\n annotatorGeneralInput = input(u'Repeat annotation: ')\n if annotatorGeneralInput in [u'c', u'correct']:\n annotatorGeneralInput, listOfAnnotations = correctionToAnnotation(listOfAnnotations)\n # save to the list of annotations\n listOfAnnotations.append(float(annotatorGeneralInput))\n # remove the lines from the terminal before getting to the next pair\n utilsOs.moveUpAndLeftNLines(7+longLines, slowly=False)\n # erase all remainder of the previous sentences and go back up again\n for e in range(14+longLines):\n print(u' '*(lineLength+4))\n utilsOs.moveUpAndLeftNLines(7 + longLines, slowly=False)\n # append the reference to the file\n referencePathLine.append(refLn)\n # dump the file line by line, to be sure in case of error\n # dump the reference\n utilsOs.dumpRawLines(referencePathLine, u'{0}sampleReference.tsv'.format(outputFolderPath),\n addNewline=True, rewrite=True)\n # dump the annotation\n utilsOs.dumpRawLines(listOfAnnotations, u'{0}sampleAnnotation.tsv'.format(outputFolderPath),\n addNewline=True, rewrite=True)\n # dump the SP\n if dumpSP is True:\n enSent = lnsSource[indRef] if u'en-fr' in refLn else lnsTarget[indRef]\n frSent = lnsTarget[indRef] if u'en-fr' in refLn else lnsSource[indRef]\n utilsOs.appendLineToFile(enSent, u'{0}sample.en'.format(outputFolderPath), addNewLine=False)\n utilsOs.appendLineToFile(frSent, u'{0}sample.fr'.format(outputFolderPath), addNewLine=False)\n utilsOs.appendLineToFile(scLns[indRef], u'{0}scores.tsv'.format(outputFolderPath), addNewLine=False)\n # clear part of terminal\n utilsOs.moveUpAndLeftNLines(7, slowly=False)", "def ffi(ccd=1, camera=1, sector=1, size=150, local_directory='', producing_mask=False):\n input_files = glob(f'{local_directory}ffi/*{camera}-{ccd}-????-?_ffic.fits')\n print('camera: ' + str(camera) + ' ccd: ' + str(ccd) + ' num of files: ' + str(len(input_files)))\n time = []\n quality = []\n cadence = []\n flux = np.empty((len(input_files), 2048, 2048), dtype=np.float32)\n for i, file in enumerate(tqdm(input_files)):\n try:\n with fits.open(file, mode='denywrite', memmap=False) as hdul:\n quality.append(hdul[1].header['DQUALITY'])\n cadence.append(hdul[0].header['FFIINDEX'])\n flux[i] = hdul[1].data[0:2048, 44:2092]\n time.append((hdul[1].header['TSTOP'] + hdul[1].header['TSTART']) / 2)\n\n except:\n print(f'Corrupted file {file}, download again ...')\n response = requests.get(\n f'https://mast.stsci.edu/api/v0.1/Download/file/?uri=mast:TESS/product/{os.path.basename(file)}')\n open(file, 'wb').write(response.content)\n with fits.open(file, mode='denywrite', memmap=False) as hdul:\n quality.append(hdul[1].header['DQUALITY'])\n cadence.append(hdul[0].header['FFIINDEX'])\n flux[i] = hdul[1].data[0:2048, 44:2092]\n time.append((hdul[1].header['TSTOP'] + hdul[1].header['TSTART']) / 2)\n time_order = np.argsort(np.array(time))\n time = np.array(time)[time_order]\n flux = flux[time_order, :, :]\n quality = np.array(quality)[time_order]\n cadence = np.array(cadence)[time_order]\n # mask = np.array([True] * 2048 ** 2).reshape(2048, 2048)\n # for i in range(len(time)):\n # mask[np.where(flux[i] > np.percentile(flux[i], 99.95))] = False\n # mask[np.where(flux[i] < np.median(flux[i]) / 2)] = False\n\n if producing_mask:\n median_flux = np.median(flux, axis=0)\n mask = background_mask(im=median_flux)\n mask /= ndimage.median_filter(mask, size=51)\n np.save(f'{local_directory}mask/mask_sector{sector:04d}_cam{camera}_ccd{ccd}.npy', mask)\n return\n # load mask\n mask = pkg_resources.resource_stream(__name__, f'background_mask/median_mask.fits')\n mask = fits.open(mask)[0].data[(camera - 1) * 4 + (ccd - 1), :]\n mask = np.repeat(mask.reshape(1, 2048), repeats=2048, axis=0)\n bad_pixels = np.zeros(np.shape(flux[0]))\n med_flux = np.median(flux, axis=0)\n bad_pixels[med_flux > 0.8 * np.nanmax(med_flux)] = 1\n bad_pixels[med_flux < 0.2 * np.nanmedian(med_flux)] = 1\n bad_pixels[np.isnan(med_flux)] = 1\n\n x_b, y_b = np.where(bad_pixels)\n for i in range(len(x_b)):\n if x_b[i] < 2047:\n bad_pixels[x_b[i] + 1, y_b[i]] = 1\n if x_b[i] > 0:\n bad_pixels[x_b[i] - 1, y_b[i]] = 1\n if y_b[i] < 2047:\n bad_pixels[x_b[i], y_b[i] + 1] = 1\n if y_b[i] > 0:\n bad_pixels[x_b[i], y_b[i] - 1] = 1\n\n mask = np.ma.masked_array(mask, mask=bad_pixels)\n mask = np.ma.masked_equal(mask, 0)\n\n for i in range(10):\n hdul = fits.open(input_files[np.where(np.array(quality) == 0)[0][i]])\n wcs = WCS(hdul[1].header)\n if wcs.axis_type_names == ['RA', 'DEC']:\n break\n\n exposure = int((hdul[0].header['TSTOP'] - hdul[0].header['TSTART']) * 86400)\n\n # 95*95 cuts with 2 pixel redundant, (22*22 cuts)\n # try 77*77 with 4 redundant, (28*28 cuts)\n os.makedirs(f'{local_directory}source/{camera}-{ccd}/', exist_ok=True)\n for i in trange(14): # 22\n for j in range(14): # 22\n source_path = f'{local_directory}source/{camera}-{ccd}/source_{i:02d}_{j:02d}.pkl'\n source_exists = exists(source_path)\n if source_exists and os.path.getsize(source_path) > 0:\n # print(f'{source_path} exists. ')\n pass\n else:\n with open(source_path, 'wb') as output:\n source = Source(x=i * (size - 4), y=j * (size - 4), flux=flux, mask=mask, sector=sector,\n time=time, size=size, quality=quality, wcs=wcs, camera=camera, ccd=ccd,\n exposure=exposure, cadence=cadence)\n pickle.dump(source, output, pickle.HIGHEST_PROTOCOL)", "def populateFileList(self):\n\n self.m_fileList.SetForegroundColour(wx.NullColour)\n\n # We'll need to track which file names are modified and which\n # file names duped.\n applicable, dupes = set(), set()\n\n if not self.m_validPatterns:\n # Regex's don't compile yet, just use the raw filename list.\n newNames = self.m_diskNames\n\n else:\n # Apply the substitution to the filename list to produce a\n # destination-name list, and identify whether the patterns\n # actually affect anything.\n #\n newNames, modifiedIndexes = [], []\n\n matcher = re.compile(self.m_reFromCtl.Value).subn\n subs = self.m_reToCtl.Value\n\n for filename in self.m_diskNames:\n # Perform the sub\n (filename, numChanges) = matcher(subs, filename)\n\n # Was there a modification?\n if numChanges:\n # Record the affected name.\n applicable.add(filename)\n if filename in newNames:\n dupes.add(filename)\n\n # Add to the primary list\n newNames.append(filename)\n\n # Does this produce a different list than we already had? If so,\n # clear the file list and replace it with the new one.\n #\n if newNames != self.m_newNames:\n\n self.m_fileList.Clear()\n\n # Figure out the longest name so we can create a cleanly-formatted\n # set of prefix/suffix characters for the modified/duped annotation.\n #\n maxLen = max(map(len, newNames))\n decorate = '{m} {fn:<{ml}} {m}'.format\n\n # Now build a list of display elements.\n for filename in newNames:\n mark = ' ' if filename not in applicable else '|'\n if filename in dupes:\n mark = '*'\n self.m_fileList.Append(decorate(m=mark, fn=filename, ml=maxLen))\n\n # Keep the list.\n self.m_newNames[:] = newNames\n\n # Update the apply button, we only want it enabled when the user\n # has a valid set of patterns that affect any files and have no\n # dupes produced as a result.\n #\n self.m_applyBtn.Enabled = bool(applicable) and not dupes\n\n if dupes:\n # Emphasize the presence of dupes.\n self.m_fileList.SetForegroundColour(wx.RED)\n\n # Draw the list.\n self.m_fileList.Refresh()", "def add_detectors(self, detect_list):\n if self.barrier is None:\n raise RuntimeError(\"You need to call setup_processes() first\")\n try:\n if 'KoopaTroopaBeach' not in self.variables[0]['course']:\n # Find SHORTCUT and remove it\n for detector in detect_list:\n if isinstance(detector, detection.Shortcut):\n detect_list.remove(detector)\n break\n except:\n # Assume phase 0\n pass\n\n self.manager.set_detectors(detect_list)\n self.manager.start_workers()", "def load_files(self, filenames, ffc_correction):\n self.filenames = filenames\n self.ffc_correction = ffc_correction\n\n proj, flat, dark, theta = dx.read_aps_32id(filenames, proj=(0, 1))\n\n #self.slider.setRange(0, len(theta) - 1)\n self.slider.setRange(0, util.get_dx_dims(str(filenames), 'data')[0] - 1)\n self.slider.setSliderPosition(0)\n self.update_image()", "def maf2vcf(maf, ref):\n f = open(maf + \".aa\", 'w')\n with open(maf, 'r') as maf:\n for line in maf:\n if line.startswith(\"s\"):\n if ref in line:\n aa = line.split()\n ancallele = aa[6]\n if \"-\" in aa[4]:\n # flip to opposite base\n if aa[6] == 'A':\n ancallele = 'T'\n elif aa[6] == 'T':\n ancallele = 'A'\n elif aa[6] == 'C':\n ancallele = 'G'\n elif aa[6] == 'G':\n ancallele = 'C'\n else:\n print(\"ERROR allele not iupac\")\n else:\n pass\n line = next(maf)\n aa = line.split()\n pos = int(aa[2])\n size = int(aa[5])\n if \"-\" in aa[4]:\n pos_1 = size - pos\n else:\n pos_1 = pos\n f.write(\"{}\\t{}\\t{}\\n\".format(aa[1][3:], pos_1 + 1, ancallele))\n return(None)", "def load_and_join(LC_DIR):\n fnames = sorted(glob.glob(os.path.join(LC_DIR, \"*fits\")))\n hdulist = fits.open(fnames[0])\n t = hdulist[1].data\n time = t[\"TIME\"]\n flux = t[\"PDCSAP_FLUX\"]\n flux_err = t[\"PDCSAP_FLUX_ERR\"]\n q = t[\"SAP_QUALITY\"]\n m = np.isfinite(time) * np.isfinite(flux) * np.isfinite(flux_err) * \\\n (q == 0)\n x = time[m]\n med = np.median(flux[m])\n y = flux[m]/med - 1\n yerr = flux_err[m]/med\n for fname in fnames[1:]:\n hdulist = fits.open(fname)\n t = hdulist[1].data\n time = t[\"TIME\"]\n flux = t[\"PDCSAP_FLUX\"]\n flux_err = t[\"PDCSAP_FLUX_ERR\"]\n q = t[\"SAP_QUALITY\"]\n m = np.isfinite(time) * np.isfinite(flux) * np.isfinite(flux_err) * \\\n (q == 0)\n x = np.concatenate((x, time[m]))\n med = np.median(flux[m])\n y = np.concatenate((y, flux[m]/med - 1))\n yerr = np.concatenate((yerr, flux_err[m]/med))\n return x, y, yerr", "def make_fake_masters(hdul, Mtype, ccd_shape, TD):\n\t\n\tif Mtype == \"MB.fits\":\n\t\tfn = Mtype\n\t\tprint \"NO MASTER BIAS AVAILABLE -- CROPPING A REPLACEMENT from FF BIAS\"\n\telse:\n\t\tfn = Mtype\n\t\tprint \"NO MASTER FLAT AVAILABLE -- CROPPING A REPLACEMENT from FF FLAT\"\n\n\tTRIM, TRIM1, VR, PS, PS1, OS, OS1 = CCD_sections((ccd_shape[1], ccd_shape[1]))\n\tBIN = `ccd_shape[1]` + 'x' + `ccd_shape[2]`\n\tFFDIR = np.sort(glob.glob(CD + '/' + BIN + '/*'))[-1]\t # Dir with the FF for given bining\n\t\n\tM = fits.HDUList()\n\tM.append(fits.ImageHDU())\n\tFFM = fits.open(FFDIR+'/MF.fits')\n\t\n\tfor EXT in extensions:\n\t\tYSTART = int((hdul[0].header['YSTART']).strip('[,]')) # Window y start pixel, virtual rows included\n\t\tYEND = YSTART + int(hdul[0].header['WINY']) # End of window y direction, virtual rows included\n\t\tdata = FFM[EXT].data[YSTART:YEND]\n\t\tM.append(fits.ImageHDU(data))\n\t\tM[EXT].header.set(\"NAXIS1\", np.shape(M[EXT].data)[1])\n\t\tM[EXT].header.set(\"NAXIS2\", np.shape(M[EXT].data)[0])\n\t\tM[0].header.set(\"RECONSTR\", 'Y')\n\t\tM.writeto(TD+'/'+fn, clobber=True)\n\n\treturn M", "def addSuffixes(self, alist):\n for i, (word, filename) in enumerate(alist):\n withsuffix = self._findVideoFile(filename)\n alist[i] = (word, withsuffix)\n return alist", "def categorize_classifier_files(out_dir):\n\n #sort all of the classifier files into a dictionary\n class_files = glob.glob(\"feature_extraction_m*\")\n class_file_dict = {\"positive\":[], \"negative\":[]}\n class_cand_dict = {\"m1\":class_file_dict, \"m2\":class_file_dict, \"m3\":class_file_dict, \"m4\":class_file_dict, \"m5\":class_file_dict}\n\n for filename in class_files:\n split_name = filename.split(\"_\")[-1].split(\".\")\n model_num = split_name[0]\n det = split_name[-1]\n class_cand_dict[model_num][det].append(filename)\n\n #get all of the pfd files into a list\n class_file_m1 = glob.glob(\"feature_extraction_m1*\")\n pfd_files = []\n for afile in class_file_m1:\n f = open(afile, \"r\")\n for line in f.readlines():\n pfd_files.append(line)\n f.close()\n\n #fill a dictionary with pfds and a value for how many positive IDs each pfd has\n pulsar_pfds={}\n for key in pfd_files:\n pulsar_pfds[key]=0\n for model_num in class_cand_dict.keys():\n if class_cand_dict[model_num][\"positive\"]:\n print(class_cand_dict[model_num][\"positive\"])\n f = open(class_cand_dict[model_num][\"positive\"][0], \"r\")\n for line in f.readlines():\n pulsar_pfds[line]+=1\n f.close()\n\n #For each pfd with >=3 positive IDs, write that pfd to 'positive' file, else write to 'negative' file\n pos_f = open(os.path.join(out_dir, \"LOTAAS_positive_detections.txt\"), \"w+\")\n neg_f = open(os.path.join(out_dir, \"LOTAAS_negative_detections.txt\"), \"w+\")\n for pfd_key in pulsar_pfds.keys():\n if pulsar_pfds[pfd_key]>=3:\n print(\"detected pulsar: {}\".format(pfd_key))\n pos_f.write(pfd_key.split(\"/\")[-1])\n else:\n neg_f.write(pfd_key.split(\"/\")[-1])\n pos_f.close()\n neg_f.close()", "def load_joint_train_data_asvspoof(filelist_path, asv_feature_dir, cm_feature_dir):\n\n filelist = readlines_and_split_spaces(filelist_path)\n\n asv_features = []\n cm_features = []\n speaker_labels = []\n is_targets = []\n is_spoofs = []\n\n for speaker_id, filename, system_id, key in tqdm(filelist, desc=\"load\"):\n # ASV data\n filepath = os.path.join(asv_feature_dir, filename) + \".npy\"\n data = np.load(filepath).astype(np.float32)\n asv_features.append(data)\n\n # CM data\n filepath = os.path.join(cm_feature_dir, filename) + \".npy\"\n # Remember to transpose the features...\n # Great design there, dummy!\n data = np.load(filepath).astype(np.float32).T\n cm_features.append(data)\n\n speaker_labels.append(speaker_id)\n\n # Target = target speaker sample and bona fide\n # Spoof samples have key = \"spoof\"\n is_targets.append(key == \"target\")\n # Also include information on if the trial is a spoof\n # sample or not\n is_spoofs.append(key == \"spoof\")\n\n return asv_features, cm_features, speaker_labels, is_targets, is_spoofs", "def DirFA():\n\n global Asm\n\n target.BoundarySync()\n\n families = ['TINY', 'AVR', 'MEGA', 'XMEGA', 'REDUCED', 'MINIMAL',\n 'CLASSIC8K']\n families += ['CLASSIC128K', 'ENHANCED8K', 'ENHANCED128K', 'ENHANCED4M']\n\n if dec.Asm.Parse_Pointer == 0:\n # No parameter given\n errors.DoError('missoper', False)\n return\n\n family = assem.GetWord().upper()\n if family not in families:\n errors.DoError('badoper', False)\n return\n\n dec.Asm.AVR_Family = families.index(family) + 1\n\n NoMore()", "def setfiles(self, filelist):\r\n self._filelist=filelist\r\n self._energy=self.readenergy(filelist)", "def _read_suffixes(lookup, suffixes):\n for uid in suffixes:\n d = suffixes[uid]\n s = lookup[uid] # suffixes keys are ids, so get suffix component\n for key in d: # set values from value dict\n try:\n kc = lookup[int(key)] # use int because json turn keys to string\n except KeyError:\n continue\n s[kc] = d[key]", "def mafft_multiple_alignment(path, id_protein, output_name):\n\n path_to_templates = path + 'Modeling/cleaned_template_fastas/'\n path_to_target = path + id_protein + '.fasta'\n with open('fastas_for_mafft', 'w') as fastas:\n\n # write target fasta in joint file\n\n target = open(path_to_target)\n for line in target:\n fastas.write(line)\n fastas.write(line)\n target.close()\n\n # write templates fastas in joint file\n\n number_of_fastas = 1 # 1 is for target\n templates = next(os.walk(path_to_templates))[2]\n print(templates)\n for i in templates:\n number_of_fastas += 1\n with open(path_to_templates + i) as template:\n for line in template:\n fastas.write(line)\n path_to_alignment = path + 'Modeling/fasta_alns_and_identities/'\n os.system('mafft --localpair --maxiterate 1000 fastas_for_mafft > ' + path_to_alignment + output_name)\n # os.remove('fastas_for_mafft')\n return number_of_fastas", "def cb_filelist(args):\n req = args[\"request\"]\n\n pyhttp = req.getHttp()\n config = req.getConfiguration()\n pathinfo = pyhttp[\"PATH_INFO\"]\n\n if not pathinfo.startswith(\"/\" + TRIGGER):\n return\n\n logger = tools.getLogger()\n\n data = req.getData()\n data[INIT_KEY] = 1\n datadir = config[\"datadir\"]\n data['root_datadir'] = config['datadir']\n wikidir = config.get(\"wikidir\", config['datadir'])\n\n # convert the / to os.sep so that we can use os.path stuff.\n wikidir = wikidir.replace(\"/\", os.sep)\n if not wikidir.endswith(os.sep):\n wikidir = wikidir + os.sep\n\n page_name = pathinfo[len(\"/\" + TRIGGER)+1:]\n\n if not page_name:\n return\n\n page_name = page_name.replace(\"/\", os.sep)\n\n if not page_name:\n return\n\n if page_name.endswith(os.sep):\n page_name = page_name[:-1]\n\n # if the page has a flavour, we use that. otherwise\n # we default to the wiki flavour\n page_name, flavour = os.path.splitext(page_name)\n if flavour:\n data[\"flavour\"] = flavour[1:]\n\n # wikifile should hold the absolute path on the file system to\n # the wiki file we're looking at. if it's in a parent directory\n # of wikidir, then we abort. \n wikifile = os.path.normpath(os.path.join(wikidir, page_name))\n if not wikifile.startswith(wikidir):\n logger.info(\"wiki file requested '%s' is not in wikidir.\" % wikifile)\n return []\n\n # we build our own config dict for the fileentry to kind of\n # fake it into loading this file correctly rather than\n # one of the entries.\n newdatadir = wikidir\n\n ext = tools.what_ext(data[\"extensions\"].keys(), wikifile)\n\n if not ext:\n logger.info(\"wiki file '%s' does not exist.\" % wikifile)\n return []\n\n data['root_datadir'] = page_name + '.' + ext\n data['bl_type'] = 'file'\n wikifile = wikifile + \".\" + ext\n\n if not os.path.isfile(wikifile):\n return []\n\n fe = FileEntry(req, wikifile, wikidir)\n\n # now we evaluate python code blocks\n body = fe.getData()\n body = eval_python_blocks(req, body)\n body = \"<!-- STATIC PAGE START -->\\n\\n%s\\n<!-- STATIC PAGE END -->\\n\" % body\n\n # now we evaluate for wikilinks\n body = connect_links(config[\"base_url\"],\n data[\"extensions\"].keys(),\n wikidir,\n body)\n\n fe.setData(body)\n\n fe[\"absolute_path\"] = TRIGGER\n fe[\"fn\"] = page_name\n fe[\"file_path\"] = TRIGGER + \"/\" + page_name\n fe[\"template_name\"] = \"wiki\"\n\n data['blog_title_with_path'] = \"%s : %s\" % \\\n (config.get(\"blog_title\", \"\"), fe.get(\"title_escaped\", \"\"))\n\n # set the datadir back\n config[\"datadir\"] = datadir\n\n return [fe]", "def features_combine():\n\n\n\t# PROCESSING AUDIO", "def main():\n namelst = open('cor_lamp.lst').readlines()\n namelst = ['awftbo'+i.strip() for i in namelst]\n for name in namelst:\n cor_keyword(name)\n func.set_airmass(name)", "def get_files_prefix(prefixes, dirname, Lshow=None, Ldir=None):\n matched_files=[]\n for pref in prefixes:\n print(f\"prefix: {pref} in {whereami()} of module {__name__}\")\n for fname in os.listdir(dirname):\n # re.match finds only prefix\n if re.match(pref, fname):\n if not Ldir and os.path.isdir(fname):\n continue\n matched_files.append(fname)\n #print (pref, fname)\n return matched_files", "def ModifyScenarioFiles(base_path):\n enumrealm = socket.gethostbyname('enum_realm')\n prirealm = socket.gethostbyname('prv_rsa')\n pubrealm = socket.gethostbyname('pub_rsa')\n\n strList = ['ENUM_REALM_IP','PRI_REALM_IP','PUB_REALM_IP']\n repList = [enumrealm,prirealm,pubrealm]\n fileName = ['Basic_Receiver_enum.xml','Basic_Receiver_pri.xml','Basic_Receiver_pub.xml']\n \n try:\n for i in range(len(strList)):\n zfile=open(base_path + fileName[i],\"r\")\n zList = zfile.readlines()\n zfile.close()\n\n for j in zList:\n if j.__contains__(strList[i]):\n str1 = j.replace(strList[i],repList[i])\n ind = zList.index(j)\n zList[ind] = str1\n break\n \n zfile=open(base_path + fileName[i],\"w\")\n zList = zfile.writelines(zList)\n zfile.close()\n except Exception, e:\n log.error('error: %s' %str(e))", "def prep_filename_masks(mask:str)->(list,list):\n mask = mask.strip()\n if '\"' in mask:\n # Temporary replace all ' ' into \"\" to '·'\n re_binqu= re.compile(r'\"([^\"]+) ([^\"]+)\"')\n while re_binqu.search(mask):\n mask= re_binqu.sub(r'\"\\1·\\2\"', mask) \n masks = mask.split(' ')\n masks = [m.strip('\"').replace('·', ' ') for m in masks if m]\n else:\n masks = mask.split(' ')\n fi_masks= [m for m in masks if m and m[0]!='/']\n fo_masks= [m[1:] for m in masks if len(m)>1 and m[0]=='/']\n return (fi_masks, fo_masks)", "def get_effective_lumi(array_from_googledoc,_era,_skoutput ,data_skoutput, _skdatadir,_dirlist,_summary_path,skim_list, _workdir, RunFull):\n\n print_message(1,\"get_effective_lumi [\"+_era+\"]\")\n\n \"\"\" loop over dataset list on tamsa\"\"\"\n var_url = get_url_from_era(_era,False)\n\n arr_alias=[]\n arr_alias_torun=[]\n array_gd = array_from_googledoc\n\n for dsn in _dirlist:\n \n array_gd = array_from_googledoc\n\n\n var_alias = find_googledoc_var_from_dsn(array_gd,_era,\"alias\", dsn)\n\n if var_alias == \"NULL\" :\n print \"Skipping NULL [get_effective_lumi] \" +dsn\n continue\n\n if not os.path.exists(data_skoutput+\"/\"+ var_alias +\".txt\"):\n print \"get_effective_lumi: adding \" + var_alias + \" to processing list\"\n arr_alias_torun.append(var_alias)\n\n \n arr_alias.append(var_alias)\n if os.path.exists( _workdir+\"/MC\"+_era+\".txt\"):\n os.system(\"rm \" + _workdir + \"/MC\"+_era+\".txt\")\n\n w_list=open(_workdir+\"/MC\"+_era+\".txt\",\"w\")\n\n for x in arr_alias_torun:\n w_list.write(x.split()[0]+\"\\n\")\n w_list.close()\n \n return_list=[]\n\n if len(arr_alias_torun) > 0:\n currentdir = os.getenv(\"PWD\")\n print \"SKFlat.py -a GetEffLumi -l \"+currentdir+\"/\"+_workdir+\"/MC\"+_era+\".txt -n 50 --nmax 300 -e \"+_era\n\n os.chdir(os.getenv(\"SKFlat_WD\"))\n os.system(\"SKFlat.py -a GetEffLumi -l \"+currentdir+\"/\"+_workdir+\"/MC\"+_era+\".txt -n 50 --nmax 300 -e \"+_era )\n for x in arr_alias_torun:\n print \"SKFlat.py -a GetEffLumi -i \"+x.split()[0] +\" -n 50 --nmax 300 -e \"+_era \n #print ('cp ' + _skoutput + \"/GetEffLumi/\"+_era + \"/GetEffLumi_\"+ x.split()[0] +\".root \" + data_skoutput+\"/\")\n #os.system('cp ' + _skoutput + \"/GetEffLumi/\"+_era + \"/GetEffLumi_\"+ x.split()[0] +\".root \" + data_skoutput+\"/\")\n\n #GetEFf=False\n #while not GetEFf:\n # l_userinput= raw_input ('Check if Eff lumi job is ok? [y/ MCname]:') \n # if l_userinput == \"y\" : \n # print \"Good\"\n # GetEFf=True\n # else:\n # os.system(\"SKFlat.py -a GetEffLumi -i \"+l_userinput+\" -n 50 --nmax 300 -e \"+_era )\n \n\n for skim in skim_list:\n new_list=open(currentdir+\"/\"+_workdir+\"/MC\"+_era+\".txt\",\"r\")\n new_skimlist=open(currentdir+\"/\"+_workdir+\"/MC_\"+skim+\"_\"+_era+\".txt\",\"w\")\n runSkim=False\n for l in new_list:\n l = l.split()[0]\n #allowed_inputs=['y','n']\n #l_userinput ='NULL'\n #while not l_userinput in allowed_inputs:\n # l_userinput= raw_input ('Sample to update ['+l+']: make skim ' + skim + ' [y/n]:')\n\n \n if run_skim_from_googledoc(_era,l , skim,var_url) == \"Y\": #l_userinput == \"y\":\n print 'Sample to update ['+l+']: make skim ' + skim\n new_skimlist.write(l+'\\n')\n runSkim=True\n return_list.append(find_googledoc_var_from_alias(_era, \"dsn\", l.split()[0],var_url))\n\n new_list.close()\n new_skimlist.close()\n if runSkim:\n os.system(\"SKFlat.py -a \"+skim+\" -l \"+currentdir+\"/\"+_workdir+\"/MC_\"+skim+\"_\"+_era+\".txt -n 100 --nmax 300 -e \"+_era )\n \n \n os.system(\"rm \"+currentdir+\"/\"+_workdir+\"/MC_\"+skim+\"_\"+_era+\".txt\")\n os.chdir(currentdir)\n \n else:\n print \"get_effective_lumi: all samples proccessed previously\"\n\n ''' delete job submittion file '''\n os.system(\"rm \"+ _workdir+ \"/MC\"+_era+\".txt\") \n\n \n ''' run over ds list at tamsa and fill common samplefile'''\n\n print ('Fill CommonSampleFiles')\n\n update_array=[]\n\n for dsn in _dirlist:\n ''' access alias and xsec fmor google doc'''\n\n array_gd = array_from_googledoc\n\n var_alias = find_googledoc_var_from_dsn(array_gd,_era,\"alias\", dsn)\n if var_alias == \"NULL\" :\n continue\n\n\n if not RunFull:\n if not var_alias in arr_alias_torun:\n print (\"skipping \" + var_alias + \" since not running Full mode\")\n continue\n \n else:\n print 'Filling for ' + dsn\n\n var_xsec = find_googledoc_var_from_dsn(array_gd,_era,\"xsec\" , dsn)\n\n #''' get nevents from GetEffLumi job'''\n\n dirpath = _skoutput + \"/GetEffLumi/\"+_era + \"/\"\n _file = ROOT.TFile(dirpath + \"/GetEffLumi_\"+ var_alias + \".root\")\n hist = _file.Get(\"sumW\")\n nevents_w = hist.Integral()\n signhist = _file.Get(\"sumSign\")\n nevents_sign = signhist.Integral()\n _file.Close()\n nevents_no_w=0\n \n orig_xsec=\"\"\n orig_nevent_no_w=\"\"\n orig_nevent_sign=\"\"\n orig_nevent_w=\"\"\n\n \n print \"Reading : \" + dsn\n print \"Reading \" + _skdatadir + _era+ \"/Sample/CommonSampleInfo/\"+var_alias+\".txt\"\n\n orig_common_list = open(_skdatadir + _era+ \"/Sample/CommonSampleInfo/\"+var_alias+\".txt\",\"r\")\n\n for line in orig_common_list:\n if not \"#\" in line:\n if len(line.split()) < 1:\n continue\n orig_xsec=line.split()[2]\n orig_nevent_no_w=line.split()[3]\n orig_nevent_sign=line.split()[4]\n orig_nevent_w=line.split()[5]\n orig_common_list.close()\n\n print ('Filled for original values')\n update_file=False\n\n if not orig_xsec == var_xsec:\n update_file=True\n print \"CommonSampleInfo xsec updated for \" + var_alias\n\n #if not orig_nevent_no_w==str(nevents_no_w):\n #update_file=True\n #print \"CommonSampleInfo xsec updated for nevents_no_w \" + str(nevents_no_w)\n if not float(orig_nevent_w)==nevents_w:\n update_file=True\n nevents_no_w=nevents(_skdatadir + _era+ \"/Sample/ForSNU/\"+var_alias + \".txt\")\n print \"CommonSampleInfo updated for nevents_w \" + str(nevents_w)\n\n elif not float(orig_nevent_sign)==nevents_sign:\n update_file=True\n nevents_no_w=nevents(_skdatadir + _era+ \"/Sample/ForSNU/\"+var_alias + \".txt\")\n print \"CommonSampleInfo updated for nevents_sign \" + str(nevents_sign)\n else: \n nevents_no_w=orig_nevent_no_w\n\n if update_file:\n ''' make commonfile for alias'''\n common_list=open(_skdatadir + _era+ \"/Sample/CommonSampleInfo/\"+var_alias+\".txt\",\"w\")\n common_list.write(\"# alias PD xsec nmc sumsign sumw\\n\")\n common_list.write( var_alias + \"\\t\" + dsn + \"\\t\" + var_xsec + \"\\t\" + str(nevents_no_w) +\"\\t\"+ str(nevents_sign) +\"\\t\"+ str(nevents_w)+\" \\n\") \n\n\n common_list.close()\n os.system(\"git diff \" + _skdatadir + _era+ \"/Sample/CommonSampleInfo/\"+var_alias+\".txt\")\n\n update_array.append([var_alias,var_xsec, nevents_no_w, nevents_sign , nevents_w])\n\n\n if len(update_array) > 0:\n update_summarymc_file(_era)\n\n return return_list", "def files_to_map(\n files,\n despike_l1b=False,\n only_long_exposures=False,\n only_short_exposures=False,\n only_short_flare_exposures=False,\n):\n # Avoid circular imports\n from sunkit_instruments.suvi.suvi import despike_l1b_array\n\n if isinstance(files, str):\n files = [files]\n files = sorted(files)\n if any(fn in os.path.basename(files[0]) for fn in COMPOSITE_MATCHES):\n composites = True\n elif any(fn in os.path.basename(files[0]) for fn in L1B_MATCHES):\n composites = False\n else:\n raise ValueError(\n f\"First file {files[0]} does not look like a SUVI L1b file or L2 HDR composite.\"\n )\n\n datas = []\n headers = []\n for afile in files:\n logging.debug(f\"Reading {afile}\")\n if composites:\n if any(fn in os.path.basename(afile) for fn in COMPOSITE_MATCHES):\n header, data, _ = read_suvi(afile)\n datas.append(data)\n headers.append(header)\n else:\n warn_user(\n f\"File {afile} does not look like a SUVI L2 HDR composite. Skipping.\"\n )\n else:\n if any(fn in os.path.basename(afile) for fn in L1B_MATCHES):\n header, data, dqf_mask = read_suvi(afile)\n if despike_l1b:\n data = despike_l1b_array(data, dqf_mask)\n if only_long_exposures:\n if \"long_exposure\" in header[\"SCI_OBJ\"]:\n datas.append(data)\n headers.append(header)\n elif only_short_exposures:\n if \"short_exposure\" in header[\"SCI_OBJ\"]:\n datas.append(data)\n headers.append(header)\n elif only_short_flare_exposures:\n if \"short_flare_exposure\" in header[\"SCI_OBJ\"]:\n datas.append(data)\n headers.append(header)\n else:\n datas.append(data)\n headers.append(header)\n else:\n warn_user(f\"File {afile} does not look like a SUVI L1b file. Skipping.\")\n if len(datas) == 1:\n return sunpy.map.Map(datas[0], headers[0])\n elif len(datas) > 1:\n return sunpy.map.Map(list(zip(datas, headers)), sequence=True)\n else:\n warn_user(\"List of data/headers is empty.\")", "def gather_initial_fullnames():\n\n infullnames = []\n for (dirpath, _, filenames) in os.walk('.'):\n dpath = dirpath[2:]\n if dpath:\n dpath += '/'\n for fname in filenames:\n infullnames.append('%s%s' % (dpath, fname))\n\n if miscutils.fwdebug_check(6, 'PFWRUNJOB_DEBUG'):\n miscutils.fwdebug_print(\"initial infullnames=%s\" % infullnames)\n return infullnames", "def add_files(self, *files):\n for f in files:\n # if file contains actual aperture magnitudes\n if \"mag_calib_unc\" in Table.read(f, format=\"ascii\").colnames:\n LightCurve.__mag_file_append(self, f)\n # if table contains limiting magnitudes\n else: \n LightCurve.__limmag_file_append(self, f)", "def add_substring_match_features(self,lst1,lst2,normalize_factor = 1.):\n for j in range(self.WILD_LOW,self.WILD_HI+1):\n self.features.append(Measures.longest_substring_with_wildcards(lst1, lst2, j) / normalize_factor)", "def interpretor(file_list):\n for i in range(len(file_list)):\n l_seq = 0\n l_var = 0\n l_ind = 0\n inds = 0 #This variable is used to specify wheter there are more than 1 \"-\" in a row.\n with open(\"alignments/\"+file_list[i],'r') as f:\n regel = f.read().split() #Viewing each file as a whole.\n for item in regel[5:]: #Only from the 5th element there is relevant information present.\n if item.startswith(\"*\"):\n l_var += (len(item))\n elif item[0].isupper() or item[0] == \"-\": #only lines that starts with capital letters or - are sequence lines.\n for char in item: #Viewing individual character in list item.\n if char == \"-\" or char.isupper(): \n l_seq += 1\n if char == \"-\":\n inds+=1\n elif char.isupper() and inds != 0: # if inds > 1. This means there are more than 1 \"-\" in a row.\n l_ind+=1 # This is important because the program needs to reconginze this as 1 indel.\n inds = 0 # Reset the indel count.\n\n fill_var_calls(file_list[i],l_seq,l_var,l_ind) #After each iteration the the file_var_calls method is executed.", "def fusion(first_fh, fused_fh, compare_file):\r\n # initialize\r\n ha_seq = \"\"\r\n ha_header = \"\"\r\n # parse through file\r\n for line in first_fh:\r\n # if a > is found assume it is header\r\n if line[0] == \">\":\r\n # ha_header = line\r\n # if the header is found (length > 0)\r\n if len(ha_header) > 0:\r\n # pull needed information from header to make new one\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n # print(ha_header)\r\n # Call find_match function, input the file to search and the new header created.\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n # if return is equal then write to new file with two sequences fused\r\n if na_header == ha_header:\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n # reset variables\r\n ha_header = line\r\n ha_seq = \"\"\r\n\r\n else:\r\n # if it is part of the sequence\r\n ha_seq = ha_seq + line\r\n\r\n # To return/write the last entries in the files, won't get written in loop\r\n matches = re.findall(\"(Strain Name:[AB]\\/[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n subtype_match = re.findall(\"(Subtype:[A-Za-z0-9]+)\", ha_header)\r\n organ = re.findall(\"(Organism:[\\/A-Za-z 0-9()\\\\-_'.]+)\", ha_header)\r\n ha_header = \">\" + organ[0] + \"|\" + matches[0] + \"|\" + subtype_match[0]\r\n na_header, na_seq = find_match(compare_file, ha_header)\r\n if na_header == ha_header:\r\n # print(\"matches2\")\r\n # print(ha_header)\r\n write_data_2(fused_fh, ha_header, ha_seq.strip() + \"\\n\" + na_seq.strip())\r\n\r\n # Close Files\r\n first_fh.close()\r\n fused_fh.close()", "def generate_flare_set(self):\n\n # x_count = 10\n # m_count = 10\n files = []\n for file in os.listdir(self.flare_path):\n files.append(file)\n # if file[0] == 'M' and m_count >0:\n # files.append(file)\n # m_count-=1\n # elif file[0] == \"X\" and x_count >0:\n # files.append(file)\n # x_count-=1\n\n return files", "def batch_load(self, setup_list):\n players = 1\n default = (self.mission.enemy_faction.default_ship.id, 1, 'A')\n for s in setup_list:\n if not s: s = default\n action = s[2]\n init = s[1]\n chassis = s[0]\n self.mission.fgsetup_set.create(flight_group=self, chassis_id=chassis, action=action, players=players, init=init)", "def fetch_hst_calibs(flt_file, ftpdir='https://hst-crds.stsci.edu/unchecked_get/references/hst/', calib_types=['BPIXTAB', 'CCDTAB', 'OSCNTAB', 'CRREJTAB', 'DARKFILE', 'NLINFILE', 'PFLTFILE', 'IMPHTTAB', 'IDCTAB', 'NPOLFILE'], verbose=True):\n import os\n \n im = pyfits.open(flt_file)\n if im[0].header['INSTRUME'] == 'ACS':\n ref_dir = 'jref'\n \n if im[0].header['INSTRUME'] == 'WFC3':\n ref_dir = 'iref'\n \n if not os.getenv(ref_dir):\n print('No ${0} set! Put it in ~/.bashrc or ~/.cshrc.'.format(ref_dir))\n return False\n \n for ctype in calib_types:\n if ctype not in im[0].header:\n continue\n \n if verbose:\n print('Calib: {0}={1}'.format(ctype, im[0].header[ctype]))\n \n if im[0].header[ctype] == 'N/A':\n continue\n \n fetch_hst_calib(im[0].header[ctype], ftpdir=ftpdir, verbose=verbose)\n \n return True", "def _separate_amplicons( file_list, reference_fasta, locus):\n subread_file, other_files = _separate_file_list( file_list, locus )\n alignment = _align_subreads( subread_file, reference_fasta, locus )\n locations = _parse_alignment( alignment )\n os.remove( alignment )\n medians = _calculate_medians( locations )\n centroids = _identify_centroids( locations, medians )\n assignments = _assign_reads( medians, centroids )\n new_subread_files = _write_assigned_reads( subread_file, assignments )\n return new_subread_files + other_files", "def combine_mappings(fasta_fh, mapping_fh, denoised_seqs_fh,\r\n otu_picker_otu_map_fh, out_dir):\r\n\r\n # read in mapping from split_library file\r\n labels = imap(lambda a_b: a_b[0], parse_fasta(fasta_fh))\r\n # mapping from seq_id to sample_id\r\n sample_id_mapping = extract_read_to_sample_mapping(labels)\r\n\r\n denoiser_mapping = read_denoiser_mapping(mapping_fh)\r\n # read in cd_hit otu map\r\n # and write out combined otu_picker+denoiser map\r\n otu_fh = open(out_dir + \"/denoised_otu_map.txt\", \"w\")\r\n for otu_line in otu_picker_otu_map_fh:\r\n otu_split = otu_line.split()\r\n\r\n otu = otu_split[0]\r\n ids = otu_split[1:]\r\n\r\n get_sample_id = sample_id_mapping.get\r\n # concat lists\r\n # make sure the biggest one is first for pick_repr\r\n all_ids = sort_ids(ids, denoiser_mapping)\r\n all_ids.extend(sum([denoiser_mapping[id] for id in ids], []))\r\n try:\r\n otu_fh.write(\"%s\\t\" % otu +\r\n \"\\t\".join(map(get_sample_id, all_ids)) + \"\\n\")\r\n except TypeError:\r\n # get returns Null if denoiser_mapping id not present in\r\n # sample_id_mapping\r\n print \"Found id in denoiser output, which was not found in split_libraries \" +\\\r\n \"output FASTA file. Wrong file?\"\r\n exit()\r\n\r\n fasta_out_fh = open(out_dir + \"/denoised_all.fasta\", \"w\")\r\n for label, seq in parse_fasta(denoised_seqs_fh):\r\n id = label.split()[0]\r\n newlabel = \"%s %s\" % (sample_id_mapping[id], id)\r\n fasta_out_fh.write(BiologicalSequence(seq, id=newlabel).to_fasta())", "def set_in_files():\r\n\tindatadir = '/nobackup/ejblom/reddit'\r\n\tcom_dir = '/comments'\r\n\tsubm_dir = '/submissions'\r\n\tglob_end = '/filtered*'\r\n\tcom_glob_str = indatadir + com_dir + glob_end\r\n\tsubm_glob_str = indatadir + subm_dir + glob_end\r\n\tinfilenames = sorted(glob.glob(com_glob_str)) + sorted(glob.glob(subm_glob_str))\r\n\treturn infilenames", "def dir_resolution(self, src_path, frag_length=128):\n src_path = os.path.join(self.root_path, src_path)\n files = os.listdir(src_path)\n\n MFCCs = None\n labels = None\n cnt = 1\n total_num = len(files)\n for wav in files:\n wav_path = os.path.join(src_path, wav)\n MFCCs_each, labels_each = self.features_and_labels(wav_path, frag_length)\n if MFCCs is not None:\n MFCCs = torch.cat((MFCCs, MFCCs_each))\n labels = torch.cat((labels, labels_each))\n else:\n MFCCs, labels = MFCCs_each, labels_each\n\n if cnt % 1000 == 0:\n print('{} data pieces have been loaded in and {} are left'.format(cnt, total_num-cnt))\n cnt += 1\n\n np.save(self.feature_file, MFCCs.numpy()) \n np.save(self.label_file, labels.numpy())\n print('Loading into files finished!')", "def get_condid(filen, flist):\n if (filen in flist[0]) or (filen in flist[1]) or \\\n (filen in flist[2]) or (filen in flist[3]):\n blk_target = 19\n elif (filen in flist[4]):\n blk_target = 16\n return blk_target", "def get_all_fic_paths(path_list):\n fic_list = []\n for path in path_list:\n fics = [fic for fic in os.listdir(path) if re.search(r'.*\\.html', fic) is not \n None]\n fics = map(lambda x: '%s%s' %(path, x), fics)\n \n fic_list = fic_list + fics\n \n return fic_list", "def mel_gff_list():\n\tmod_gff3 = sys.argv[1]\n\twith open(mod_gff3, 'r') as f:\n\t\tgff = [line.strip().split('\\t') for line in f]\n\t\tf.close()\n\treturn gff\n\t#gff_list ex/:\n\t#['2L', 'FlyBase', 'gene', '7529', '9484', '.', '+', '.', 'ID=FBgn0031208;Name=CG11023;Ontology_term=SO:0000010,SO:0000087,GO:0016929,GO:0016926;Dbxref=FlyBase:FBan0011023,FlyBase_Annotation_IDs:CG11023,GB_protein:ACZ94128,GB_protein:AAO41164,GB:AI944728,GB:AJ564667,GB_protein:CAD92822,GB:BF495604,UniProt/TrEMBL:Q86BM6,INTERPRO:IPR003653,GB_protein:AGB92323,UniProt/TrEMBL:M9PAY1,OrthoDB7_Drosophila:EOG796K1P,OrthoDB7_Diptera:EOG7X1604,EntrezGene:33155,UniProt/TrEMBL:E1JHP8,UniProt/TrEMBL:Q6KEV3,OrthoDB7_Insecta:EOG7Q8QM7,OrthoDB7_Arthropoda:EOG7R5K68,OrthoDB7_Metazoa:EOG7D59MP,InterologFinder:33155,BIOGRID:59420,FlyAtlas:CG11023-RA,GenomeRNAi:33155;gbunit=AE014134;derived_computed_cyto=21A5-21A5'], ['2L', 'FlyBase', 'gene', '9839', '21376', '.', '-', '.', 'ID=FBgn0002121;Name=l(2)gl;fullname=lethal (2) giant larvae;Alias=Lgl,lgl,lethal giant larvae,lethal giant larve,lethal giant larva,lethal(2)giant larvae,Complementation group 2.1,Lethal Giant Larvae,dlgl,p127l(2)gl,LGL,l(2) giant larva,CG2671,L(2)GL,p127,l(2)giant larvae,D-LGL,l(2),gl,l[[2]]gl,l-gl,lethal-giant-larvae,Lethal giant larvae,Lethal (2) giant larvae,L(2)gl,Lethal (2) giant larva,Lethal-giant-larvae,MENE (2L)-B,lethal(2) giant larvae,p127[l(2)gl],lethal(2)-giant larvae,lethal-2-giant larvae,l(2) giant larvae,lethal- giant-larvae,Lethal(2)giant larvae,Lethal-2-giant larvae;Ontology_term=SO:0000010,SO:0000087,GO:0005578,GO:0005886,GO:0007269,GO:0016082,GO:0008021,GO:0008283,GO:0016334,GO:0016336,GO:0016333,GO:0016335,GO:0016327,GO:0005829,GO:0045175,GO:0016332,GO:0045184,GO:0007399,GO:0005938,GO:0005737,GO:0007179,GO:0045197,GO:0045196,GO:0002009,GO:0005918,GO:0008105,GO:0045167,GO:0008104,GO:0045746,GO:0007423,GO:0008285,GO:0001738,GO:0016323,GO:0007391,GO:0005856,GO:0030154,GO:0042127,GO:0005614,GO:0045159,GO:0035072,GO:0007559,GO:0045200,GO:0008360,GO:0019991,GO:0007406,GO:0051726,GO:0051668,GO:0007314,GO:0016325,GO:0030036,GO:0030863,GO:0035070,GO:0055059,GO:0035212,GO:0035293,GO:0090163,GO:0048730,GO:0000132,GO:0098725,GO:0060429,GO:0007293,GO:0045176,GO:0072697,GO:0000149,SO:0000548,GO:0005920,GO:0017022,GO:0004860,GO:0006469;Dbxref=FlyBase:FBan0002671,FlyBase_Annotation_IDs:CG2671,INTERPRO:IPR015943,GB_protein:AAN10503,GB_protein:AAG22256,GB_protein:AAN10502,GB_protein:AAN10501,GB_protein:AAF51570,GB_protein:AAG22255,INTERPRO:IPR017986,GB:AA246243,GB:AW942062,GB:AY051654,GB_protein:AAK93078,GB:BH809482,GB:CZ471313,GB:CZ482024,GB:CZ484691,GB:M17022,GB_protein:AAA28671,GB_protein:AAA28672,GB:X05426,GB_protein:CAA29007,UniProt/Swiss-Prot:P08111,INTERPRO:IPR000664,INTERPRO:IPR001680,INTERPRO:IPR013577,GB_protein:AGB92324,UniProt/TrEMBL:M9NCX1,UniProt/TrEMBL:M9PBJ2,OrthoDB7_Drosophila:EOG7CW2GT,OrthoDB7_Diptera:EOG7DRVK2,GB_protein:AFH03479,GB_protein:AFH03478,GB_protein:AFH03481,GB_protein:AFH03480,EntrezGene:33156,INTERPRO:IPR013905,BDGP_clone:PC00404,OrthoDB7_Insecta:EOG7SRGKH,OrthoDB7_Arthropoda:EOG7ZDD82,OrthoDB7_Metazoa:EOG79W94C,InterologFinder:33156,FlyAtlas:CG2671-RB,BIOGRID:59421,Fly-FISH:CG2671,GenomeRNAi:33156,INTERACTIVEFLY:/cytoskel/lethl2g1.htm;gbunit=AE014134;derived_computed_cyto=21A5-21A5'],\n\t# ['2L', 'FlyBase', 'ncRNA', '286383', '288292', '.', '+', '.', 'ID=FBtr0347595;Name=CR46263-RA;Parent=FBgn0267996;Dbxref=FlyBase_Annotation_IDs:CR46263-RA;score_text=Weakly Supported;score=0'], ['2L', 'FlyBase', 'gene', '287252', '289144', '.', '-', '.', 'ID=FBgn0025686;Name=Amnionless;fullname=Amnionless ortholog;Alias=FBgn0031246,CG11592,CK02467,BEST:CK02467,dAMN,Amnionless;Ontology_term=SO:0000010,SO:0000087,GO:0046331,GO:0097206,GO:0016021,GO:0097017;Dbxref=FlyBase:FBan0011592,FlyBase_Annotation_IDs:CG11592,GB_protein:AAF51514,GB:AA141784,GB:CZ468687,UniProt/TrEMBL:Q9VPN2,GB_protein:AGB92350,OrthoDB7_Drosophila:EOG7CGKJK,EntrezGene:33199,BDGP_clone:IP03221,OrthoDB7_Diptera:EOG774804,INTERPRO:IPR026112,OrthoDB7_Insecta:EOG7G266G,OrthoDB7_Arthropoda:EOG7P65FW,OrthoDB7_Metazoa:EOG7ZGX2W,InterologFinder:33199,FlyAtlas:CG11592-RA,GenomeRNAi:33199;gbunit=AE014134;derived_computed_cyto=21B7-21B7'], ['2L', 'FlyBase', 'gene', '292419', '293222', '.', '+', '.', 'ID=FBgn0031247;Name=CG11562;Alias=FBgn0063011,BcDNA:RE44650;Ontology_term=SO:0000010,SO:0000087,GO:0005739,GO:0003674,GO:0008150;Dbxref=FlyBase:FBan0011562,FlyBase_Annotation_IDs:CG11562,GB_protein:AAF51513,GB:AI520524,GB:AI945841,GB:AY119645,GB_protein:AAM50299,GB:BE662187,GB:BI358003,UniProt/TrEMBL:Q9VPN3,OrthoDB7_Drosophila:EOG7HTW3H,OrthoDB7_Diptera:EOG7200K9,EntrezGene:33200,BDGP_clone:RE44650,OrthoDB7_Insecta:EOG7B9454,OrthoDB7_Arthropoda:EOG7RK278,OrthoDB7_Metazoa:EOG78H3X3,FlyAtlas:CG11562-RA,INTERPRO:IPR031568,Fly-FISH:CG11562,GenomeRNAi:33200;gbunit=AE014134;derived_computed_cyto=21B7-21B7'], ['2L', 'FlyBase', 'gene', '292959', '294681', '.', '-', '.', 'ID=FBgn0017457;Name=U2af38;fullname=U2 small nuclear riboprotein auxiliary factor 38;Alias=FBgn0010626,U2AF38,U2AF,dU2AF38,DU2AF38,CG3582,dU2AF[38],l(2)06751,u2af38,U2AF 38;Ontology_term=GO:0089701,SO:0000010,SO:0000087,GO:0000398,GO:0008187,GO:0005681,GO:0005686,GO:0000381,GO:0005634,GO:0003729,GO:0007052,GO:0071011,GO:0008380,GO:0000166,GO:0046872;Dbxref=FlyBase:FBan0003582,FlyBase_Annotation_IDs:CG3582,GB_protein:AAF51512,GB:AA264081,GB:AA820431,GB:AC004115,GB:AC008371,GB:AI061776,GB:AI455418,GB:AI944553,GB:AQ026079,GB:AY058537,GB_protein:AAL13766,GB:U67066,GB_protein:AAB17271,UniProt/Swiss-Prot:Q94535,INTERPRO:IPR000504,INTERPRO:IPR000571,INTERPRO:IPR009145,INTERPRO:IPR012677,GB_protein:AGB92351,UniProt/TrEMBL:M9PBM1,OrthoDB7_Drosophila:EOG7FRM2M,OrthoDB7_Diptera:EOG700KS6,EntrezGene:33201,BDGP_clone:LD24048,OrthoDB7_Insecta:EOG76QSHP,OrthoDB7_Arthropoda:EOG7KMJ7T,OrthoDB7_Metazoa:EOG70089G,apodroso:10448-U2af38[k14504],InterologFinder:33201,FlyAtlas:CG3582-RA,BIOGRID:59457,Fly-FISH:CG3582,GenomeRNAi:33201;gbunit=AE014134;derived_computed_cyto=21B7-21B8']]", "def redisperse_list(files,dw,w1,w2,key='spec'):\r\n input_list = ','.join(files)\r\n disp_files = [f.replace(key, key+'-disp') for f in files]\r\n output_disp_list = ','.join(disp_files)\r\n iraf.unlearn('dispcor')\r\n iraf.dispcor.input = input_list\r\n iraf.dispcor.output = output_disp_list\r\n # keep existing wavelength endpoints\r\n iraf.dispcor.dw = dw\r\n iraf.dispcor.w1 = w1\r\n iraf.dispcor.w2 = w2\r\n iraf.dispcor.flux = 'no'\r\n iraf.dispcor()\r\n # write text files\r\n for output in disp_files:\r\n iraf.wspectext(output, output.replace('fits', 'txt'), header=\"no\")\r\n\r\n return disp_files", "def fix_seqname(sname):\r\n # protid is on each line of the FASTA file; splitting doesn't really do anything\r\n # protid = sname.split(' ')\r\n # TK 2020-07-22\r\n # Dictionary for filenames so that we know which CDS file to query for each\r\n # protein ID.\r\n lookups = {\r\n 'AET' : 'Aegilops_tauschii.Aet_v4.0.cds.all.fa',\r\n\t'PNS' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'PNT' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQJ' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'KQK' : 'Brachypodium_distachyon.Brachypodium_distachyon_v3.0.cds.all.fa',\r\n\t'Dr' : 'Dioscorea_rotundata.TDr96_F1_Pseudo_Chromosome_v1.0.cds.all.fa',\r\n\t'Et' : 'Eragrostis_tef.ASM97063v1.cds.all.fa',\r\n\t'HORVU' : 'Hordeum_vulgare.IBSC_v2.cds.all.fa',\r\n\t'LPERR' : 'Leersia_perrieri.Lperr_V1.4.cds.all.fa',\r\n\t'GSMUA' : 'Musa_acuminata.ASM31385v1.cds.all.fa',\r\n\t'OBART' : 'Oryza_barthii.O.barthii_v1.cds.all.fa',\r\n\t'ORGLA' : 'Oryza_glaberrima.Oryza_glaberrima_V1.cds.all.fa',\r\n\t'ONIVA': 'Oryza_nivara.Oryza_nivara_v1.0.cds.all.fa',\r\n\t'ORUFI' : 'Oryza_rufipogon.OR_W1943.cds.all.fa',\r\n\t'PVH' : 'Panicum_hallii_fil2.PHallii_v3.1.cds.all.fa',\r\n\t'Sspon' : 'Saccharum_spontaneum.Sspon.HiC_chr_asm.cds.all.fa',\r\n\t'KQL' : 'Setaria_italica.Setaria_italica_v2.0.cds.all.fa',\r\n\t'TraesCS' : 'Triticum_aestivum.IWGSC.cds.all.fa',\r\n\t'Zm' : 'Zea_mays.B73_RefGen_v4.cds.all.fa',\r\n\t'Zlat': 'Zlat_V1.cds.fa',\r\n 'FUN': 'rice.transcripts.fa',\r\n 'Os': 'Oryza_sativa.IRGSP-1.0.cds.all.fa'\r\n }\r\n # Get the filename based on what the sequence starts with.\r\n for id_start, cds_file in lookups.items():\r\n if sname.startswith(id_start):\r\n target_file = cds_file\r\n break\r\n # Return the protein name and CDS target file as a tuple\r\n return (target_file, sname)\r\n\r\n # Make a lookup table to get the species name based on the protein ID.\r\n # lookups = [('Zlat*','Zizania_latifolia'),('FUN*','Zizania_palustris'),('Os*','Oryza_sativa')]\r\n # Initialize an empty species dictionary to assist in connecting protid (gene name) to species name\r\n # species_dict = {}\r\n # # This for loop will populate the species dictionary so that we can get species name keyed on the protid (gene name)\r\n # for i in protid:\r\n # species = lookup(i, lookups)\r\n # return species.encode, i\r\n # species_dict[protid] = species.encode()\r\n # return None\r", "def list_of_sorted_calib_files_from_list_of_files(list_of_files) : \n list_of_calib_files = []\n for file in list_of_files :\n cfile = CalibFile(str(file))\n if cfile.is_calib_file() :\n list_of_calib_files.append(cfile)\n #cfile.print_member_data()\n\n return sorted(list_of_calib_files) # sorted() uses reimplemented method CalibFile.__cmp__()", "def mkglob(fullpaths: list, trim=False) -> str:\n string_list = []\n glob = None\n for fname in fullpaths:\n if trim:\n fname = re.sub(r\"^.*/(.*)$\", r\"\\1\", fname)\n # fname = re.sub(r\"^(.*)\\.fits?(\\.fz)*$\", r\"\\1\", fname)\n fname = re.sub(r\"^([^\\.]*)\\..*$\", r\"\\1\", fname) # trim suffix\n string_list.append(fname)\n logging.debug(\"string_list[]={}\".format(string_list))\n if len(string_list) == 1:\n glob = string_list[0]\n elif len(string_list) > 1:\n # title is longest common substring array\n # joined with *'s to look like a glob pattern\n ss_arr = []\n get_lcs_array(string_list, ss_arr, 0, \"\", 2)\n if ss_arr:\n glob = \"{}\".format(\"*\".join(ss_arr))\n if not re.match(ss_arr[0], string_list[0]):\n glob = \"*{}\".format(glob)\n if not re.search(r\"{}$\".format(ss_arr[-1]), string_list[0]):\n glob = \"{}*\".format(glob)\n return glob", "def pele_folders(input_, file_list, dir_=None):\r\n os.chdir(\"../\")\r\n if not dir_:\r\n base = basename(input_)\r\n base = base.replace(\".pdb\", \"\")\r\n else:\r\n base = dir_\r\n count = 0\r\n folder = []\r\n for files in file_list:\r\n name = basename(files)\r\n name = name.replace(\".pdb\", \"\")\r\n if not count:\r\n hold = \"bla\"\r\n count += 1\r\n if name != \"original\" and hold != name[:-1]:\r\n hold = name[:-1]\r\n folder.append(\"mutations_{}/{}\\n\".format(base, hold))\r\n with open(\"dirnames_{}.txt\".format(base), \"w\") as txt:\r\n txt.writelines(folder)", "def get_file_flag(self):\n flag_list = os.listdir(self.path)\n temp_flag_list = []\n for flag in flag_list[:5]:\n result = re.match('^(\\w{2}\\d{6}\\_)(\\d{8})', flag)\n if result:\n temp_flag_list.append(result[2])\n self.flag_list = list(set(temp_flag_list))", "def prep_files(app):\n smali_paths = []\n start = time.time()\n \n for root, dirs, files in os.walk(app, topdown=False):\n for name in files:\n if name[-6:] == \".smali\":\n smali_paths.append(str(os.path.join(root, name)))\n \n return smali_paths", "def find_FEfiles(self):\n \n\t\t# take each item in the list of files to eve.\n for i in self.files_to_find:\n filename1 = i[0] \n file1_dye = i[1]\n filename2 = i[2]\n file2_dye = i[3]\n out_file_prefix=i[4]\n\n # set filename as none to help identify when no match has been found below\n file1_filename = None\n file2_filename = None\n\n # search for a FE file which matches the filename pattern\n for afile in os.listdir(self.chosenfolder):\n # file 1\n if fnmatch.fnmatch(afile, filename1):\n file1_filename = afile\n \n # file 2\n if fnmatch.fnmatch(afile, filename2):\n file2_filename = afile\n \n # if both files have been identified add this to a new list\n if file1_filename and file2_filename:\n self.list_of_files.append((file1_filename, file1_dye, file2_filename, file2_dye,out_file_prefix))\n\t\t\t# if either file could not be identified report this.\n else:\n raise ValueError(\"no match for \" + filename1 + \" and \" + filename2)", "def makeUvcontsub(files='*.dat', fitorder=1, useFrequency=False):\n if files.find('*') >= 0:\n resultFiles = sorted(glob.glob(files))\n uids = []\n for resultFile in resultFiles:\n uid = resultFile.split('.')[0]\n if len(np.unique(uids)) > 1:\n print \"There are results for more than one OUS in this directory. Be more specific.\"\n return\n elif type(files) == str:\n resultFiles = sorted(files.split(','))\n else:\n resultFiles = sorted(files)\n freqRanges = {}\n spws = []\n for rf in resultFiles:\n spw = rf.split('spw')[1].split('.')[0]\n spws.append(spw)\n uid = rf.split('.')[0]\n field = rf[len(uid)+6:].split('_')[1]\n f = open(rf,'r')\n lines = f.readlines()\n f.close()\n for line in lines:\n tokens = line.split()\n if line == lines[0]:\n channelRanges = tokens[0]\n if len(tokens) == 2:\n uid = tokens[0]\n if uid not in freqRanges.keys():\n freqRanges[uid] = {}\n if field not in freqRanges[uid].keys():\n freqRanges[uid][field] = ''\n else:\n freqRanges[uid][field] += ','\n if useFrequency:\n freqRanges[uid][field] += '%s:%s' % (spw,tokens[1])\n else:\n freqRanges[uid][field] += '%s:%s' % (spw,channelRanges)\n spws = ','.join(np.unique(spws))\n if freqRanges == {} and useFrequency:\n print \"There are no frequency ranges in the *.dat files. You need to run findContinuum with the 'vis' parameter specified.\"\n return\n for uid in freqRanges.keys():\n for field in freqRanges[uid].keys():\n print \"uvcontsub('%s', field='%s', fitorder=%d, spw='%s', fitspw='%s')\\n\" % (uid, field, fitorder, spws, freqRanges[uid][field])", "def mafftoutput(wildcards):\n\tcheckpoint_output = checkpoints.query2hitseqfas.get(**wildcards)#.output[0] # I actually don't need anything from this, just to confirm it's a checkpoint\n\n\t# Get the names of all the final alignments\n\tfile = glob.glob(\"filtering/Podan2_1n.txt\")[0]\n\tnum_lines = sum(1 for line in open(file)) # How many lines in the file?\n\tfinalnames = []\n\tfor n in range(1, num_lines + 1):\n\t\tnumber = \"{0:04d}\".format(n)\n\t\tfname = \"alignments/orthologs%s.fas\" % (number)\n\t\tfinalnames.append(fname)\n\t\t\n\treturn finalnames", "def lr(*files):\n _ls('-aRF', *files)", "def handleFileNames(self):\n \n # expand the wild cards - but do not create the full directory path\n # as the work sub directories have yet to be created.\n if not os.path.exists(self.shareArea):\n m = 'Cannot set self.auxfiles due to non-existent share directory: %s' % self.shareArea\n self.logger.fatal(m)\n raise RTTCodingError(m)\n\n # resolve auxFile patterns to file names\n auxFiles = []\n for pattern in self.auxFilePatterns:\n base, fnpattern = os.path.split(pattern)\n srcDir = os.path.normpath(os.path.join(self.shareArea, base))\n filesInShare = os.listdir(srcDir)\n auxFiles.extend([os.path.join(base,file) for file in filesInShare if fnmatch.fnmatch(file, fnpattern)])\n\n self.auxFiles = unique(auxFiles)", "def mergefsl(log, file_list, outname):\n cmdargs = split('fslmerge -t {} {}'.format(outname, file_list))\n proc = Popen(cmdargs, stdout=PIPE, stderr=STDOUT)\n log.info(proc.stdout.read())", "def combine_modeloutputs(outputname='xxRENAMExx_Zcombined.txt',\n data='sfr',\n verbose=True):\n if data == 'sfr':\n filepath = '/Users/kschmidt/work/catalogs/NEOGALlines/nebular_emission/'\n modelfilestr = filepath+'nebular_emission_Z0*.txt'\n splitstr = 'emission_Z'\n elif data == 'agn':\n filepath = '/Users/kschmidt/work/catalogs/NEOGALlines/AGN_NLR_nebular_feltre16/'\n modelfilestr = filepath+'nlr_nebular_Z0*.txt'\n splitstr = 'nebular_Z'\n else:\n sys.exit('Inavlid value of data=\"'+data+'\"')\n\n output = filepath+outputname\n if verbose: print(' - Setting up output for:\\n '+output)\n modelfiles = glob.glob(modelfilestr)\n header = open(modelfiles[0]).readline().rstrip()\n if data == 'sfr':\n header = header.replace('##','# Zgas ')\n elif data == 'agn':\n header = header.replace('#','# Zgas ')\n header = header+'\\n'\n\n fout = open(output, 'w')\n fout.write(header)\n if verbose: print(' - Writing the following files to ouput:')\n for mf in modelfiles:\n if verbose: print(' '+mf)\n Zgasstring = mf.split('/')[-1].split(splitstr)[-1].split('.txt')[0]\n\n with open(mf, 'r') as f:\n linesall = f.readlines()\n\n for linestring in linesall:\n if linestring.startswith('#'):\n pass\n elif linestring == ' \\n':\n fout.write(linestring)\n else:\n fout.write('0.'+Zgasstring+' '+linestring)\n\n fout.close()", "def add_substring_match_features(self,lst1,lst2,normalize_factor = 1.):\n for i in range(self.RE_LOW,self.RE_HI+1):\n for j in range(self.WILD_LOW,self.WILD_HI+1):\n self.features.append(Measures.longest_substring_with_wildcards(lst1, lst2, i, j) / normalize_factor)", "def merge(df_list):\n df_final = pd.read_csv(df_list[0])\n for ind, df in enumerate(df_list):\n if ind >= 1:\n temp_df = pd.read_csv(df_list[ind])\n temp_df = temp_df.drop(['lbl'], axis=1)\n df_final = pd.merge(df_final, temp_df, on=['author_id'])\n final_path = os.path.join(os.path.expanduser(\"~/Desktop/Age-Detection\"), \"merged-feature-collection.csv\")\n df_final.to_csv(final_path, sep=',', index=False)\n return final_path", "def load_defects(self, val_dir):\n \n img_list_1 = os.listdir(val_dir+'/'+'1')\n img_list_2 = os.listdir(val_dir+'/'+'2')\n img_list_3 = os.listdir(val_dir+'/'+'3')\n img_list_4 = os.listdir(val_dir+'/'+'4')\n\n\n\n img_list_1 = self.make_imgs_list(val_dir + '/' + '1', img_list_1)\n img_list_2 = self.make_imgs_list(val_dir + '/' + '2', img_list_2)\n img_list_3 = self.make_imgs_list(val_dir + '/' + '3', img_list_3)\n img_list_4 = self.make_imgs_list(val_dir + '/' + '4', img_list_4)\n\n\n img_list_1 = self.load_imgsLabels(img_list_1)\n img_list_2 = self.load_imgsLabels(img_list_2)\n img_list_3 = self.load_imgsLabels(img_list_3)\n img_list_4 = self.load_imgsLabels(img_list_4)\n\n\n img_list_1 = self.features_to_np_array(img_list_1)\n img_list_2 = self.features_to_np_array(img_list_2)\n img_list_3 = self.features_to_np_array(img_list_3)\n img_list_4 = self.features_to_np_array(img_list_4)\n\n lbl_list_1 = img_list_1.shape[0]*[1]\n lbl_list_2 = img_list_2.shape[0]*[2]\n lbl_list_3 = img_list_3.shape[0]*[3]\n lbl_list_4 = img_list_4.shape[0]*[4]\n\n\n imgs = np.concatenate((img_list_1, img_list_2, img_list_3, img_list_4))\n lbls = lbl_list_1 + lbl_list_2 + lbl_list_3 + lbl_list_4\n\n\n lbls = np.array(lbls)\n \n lbls = lbls - 1\n \n lbls = to_categorical(lbls)\n \n return imgs, lbls", "def getFilenamesAndGuid(thisfile):\n\n pfn = str(thisfile.getElementsByTagName(\"pfn\")[0].getAttribute(\"name\"))\n filename = os.path.basename(pfn)", "def read_file_names(self):\n files_BIDMC = os.listdir(self.root_dir_BIDMC)\n masks_BIDMC = os.listdir(self.seg_dir_BIDMC)\n files_HK = os.listdir(self.root_dir_HK)\n masks_HK = os.listdir(self.seg_dir_HK)\n files_I2CVB = os.listdir(self.root_dir_I2CVB)\n masks_I2CVB = os.listdir(self.seg_dir_I2CVB)\n files_ISBI = os.listdir(self.root_dir_ISBI)\n masks_ISBI = os.listdir(self.seg_dir_ISBI)\n files_ISBI_15 = os.listdir(self.root_dir_ISBI_15)\n masks_ISBI_15 = os.listdir(self.seg_dir_ISBI_15)\n files_UCL = os.listdir(self.root_dir_UCL)\n masks_UCL = os.listdir(self.seg_dir_UCL)\n site_files = [files_BIDMC, files_HK, files_I2CVB, files_ISBI, files_ISBI_15, files_UCL]\n site_masks = [masks_BIDMC, masks_HK, masks_I2CVB, masks_ISBI, masks_ISBI_15, masks_UCL]\n return site_files, site_masks", "def main(masklist, degrade, outfolder, outfileroot):\n maplist = []\n nsidelist = []\n\n # 1) Operating on individual masks\n for mask in masklist:\n path = mask['path']\n cut = mask['cut']\n invert = mask['invert']\n unseen = mask['unseen']\n\n hpmap = hp.read_map(path)\n nside = hp.get_nside(hpmap)\n nsidelist.append(nside)\n hp_seen = (hpmap != hp.UNSEEN)\n\n if cut is not None:\n hpmap = np.where((hpmap < cut) & hp_seen, 1, 0)\n else:\n assert np.all((hpmap[hp_seen] == 0) | (hpmap[hp_seen] == 1)), (\"\"+\n \"If there is no cut, the map must consist of zeros and ones\")\n if invert:\n hpmap[hp_seen] = np.where((hpmap[hp_seen] == 1), 0, 1)\n if unseen is not None:\n hpmap[~hp_seen] = unseen\n\n maplist.append(hpmap)\n del hpmap, nside\n\n # 2) Upgrading to common high resolution and merging\n n_hires = max(nsidelist)\n mask_hires = np.ones(hp.nside2npix(n_hires))\n for hpmap, nside in zip(maplist, nsidelist):\n if nside < n_hires:\n hpmap = hp.ud_grade(hpmap, n_hires)\n mask_hires *= hpmap\n del hpmap\n\n # 3) Save full mask\n try:\n os.makedirs(outfolder)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n outfile = outfileroot + \"_nside\" + str(n_hires) + \".fits\"\n outpath = os.path.join(outfolder, outfile)\n hp.write_map(outpath, mask_hires)\n\n # 4) Degrade resolution down to Nside = 128 and saving\n if degrade:\n pow2_hires = int(np.log2(n_hires))\n nsideiter = (2**i for i in xrange(7, pow2_hires)) # 7 --> Nside = 128\n for nside in nsideiter:\n mask_lores = hp.ud_grade(mask_hires, nside)\n outfile = (outfileroot + \"_degraded_nside\" + str(n_hires) +\n \"_to_\" + str(nside) + \".fits\")\n outpath = os.path.join(outfolder, outfile)\n hp.write_map(outpath, mask_lores)\n del mask_lores\n\n return None", "def _read_motifs_from_filehandle(handle, fmt):\n if fmt.lower() == \"pwm\":\n motifs = _read_motifs_pwm(handle)\n if fmt.lower() == \"transfac\":\n motifs = _read_motifs_transfac(handle)\n if fmt.lower() == \"xxmotif\":\n motifs = _read_motifs_xxmotif(handle)\n if fmt.lower() == \"align\":\n motifs = _read_motifs_align(handle)\n if fmt.lower() == \"jaspar\":\n motifs = _read_motifs_jaspar(handle)\n \n if handle.name:\n base = os.path.splitext(handle.name)[0]\n map_file = base + \".motif2factors.txt\"\n if os.path.exists(map_file):\n m2f_direct = {}\n m2f_indirect = {}\n for line in open(map_file):\n try:\n motif,*factor_info = line.strip().split(\"\\t\")\n if len(factor_info) == 1:\n m2f_direct[motif] = factor_info[0].split(\",\")\n elif len(factor_info) == 3:\n if factor_info[2] == \"Y\":\n m2f_direct[motif] = m2f_direct.get(motif, []) + [factor_info[0]]\n else:\n m2f_indirect[motif] = m2f_indirect.get(motif, []) + [factor_info[0]]\n except:\n pass\n for motif in motifs:\n if motif.id in m2f_direct:\n motif.factors[DIRECT_NAME] = m2f_direct[motif.id]\n if motif.id in m2f_indirect:\n motif.factors[INDIRECT_NAME] = m2f_indirect[motif.id]\n for motif in motifs:\n for n in [DIRECT_NAME, INDIRECT_NAME]:\n motif.factors[n] = list(set(motif.factors[n]))\n return motifs", "def runLNFL(self):\n\n if 'allT5' not in dir(self):\n self.allT5 = sorted(glob.glob('%s/TAPE5_*' % self.dirT5))\n\n # set up the input directory\n self.makeLinks(self.pathLNFL, 'lnfl')\n tapeStrList = ['TAPE1', 'TAPE5']\n self.cleanUp()\n\n # loop through each HITRAN molecule and create an associated TAPE5\n for iMol, mol in enumerate(self.mols):\n base = os.path.basename(mol)\n print(base)\n tape5 = self.allT5[iMol]\n\n if self.isoH2O:\n # there are multiple line files to consider for H2O\n isoStr = ['01_h2o_161_only', '01_h2o_162_excl', \\\n '01_h2o_162_only', '01_h2o_171_only', '01_h2o_172_only', \\\n '01_h2o_181_only', '01_h2o_182_only']\n tape1List = ['%s/%s' % (mol, iso) for iso in isoStr]\n else:\n tape1List = ['%s/%s' % (mol, base)]\n # endif WV\n\n # loop really only exists for H2O\n for tape1 in tape1List:\n tapeList = [tape1, tape5]\n\n # grab the line coupling file if necessary\n if base in ['02_CO2', '06_CH4', '07_O2']:\n tape2 = '%s/lncpl_lines' % mol\n tapeList.append(tape2)\n tapeStrList.append('TAPE2')\n # endif line coupling\n\n # stage the files necessary for an LNFL run\n for source, target in zip(tapeList, tapeStrList):\n self.makeLinks(source, target)\n\n # call LNFL and save TAPE3 to unique name\n sub.call(['lnfl'])\n if self.isoH2O:\n tape3 = '%s/TAPE3_%s' % (mol, os.path.basename(tape1))\n else:\n tape3 = '%s/TAPE3_%s' % (mol, base)\n # endif wv\n if os.path.exists(tape3):\n print('WARNING: overwriting %s' % tape3)\n os.rename('TAPE3', tape3)\n\n # clean up\n self.cleanUp()\n # end TAPE1 loop\n\n #self.cleanUp()\n # if we're doing WV isotopologues, *only* do them\n if self.isoH2O: return\n # end molecule loop\n\n return", "def create_file_list(case):\n for server in ['bonaire','barbados','caiapo']:\n for basedir in ['data0/ivan/archive','data1/ivan/archive',\n 'data2/ivan/archive','data3/ivan/archive',\n '/bonaire/data2/data/SODA-POP','data0',\n '/barbados/data3/CCSM3-BGC']:\n if 'SODA-POP' in basedir:\n path = os.path.join('/',server,basedir,case)\n elif 'CCSM3-BGC' in basedir:\n path = os.path.join('/',server,basedir,case,'ocn/hist')\n else:\n path = os.path.join('/',server,basedir,case,'ocn2')\n\n if os.path.isdir(path):\n \t\tindir = path\n \t\tallfiles = os.listdir(indir)\n else:\n continue\n\n filelist = [os.path.join(indir,file) for file in allfiles\n if file.endswith('.nc')]\n filelist.sort()\n return filelist", "def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--num', action='store', type=int, required=True)\n num = parser.parse_args().num\n # load ing subset and usda ingredients\n ing_path = 'data/join/%d.pkl' % num\n ings = pickle.load(open(ing_path, 'rb'))\n usda_path = 'data/join/usda.pkl'\n usda = pickle.load(open(usda_path, 'rb'))\n names = usda['desc']\n processed = names.apply(fuzz._process_and_sort)\n usda_id_map = get_usda_id_map(processed)\n name_id_map = {ing: usda_id_map[(fuzzy_match(\n fuzz._process_and_sort(ing), ing_ser=processed, comp=fuzz.token_set_ratio))] for ing in ings}\n peak(name_id_map, names)\n # save name_id_map\n out_path = 'data/join/%d_map.pkl' % num\n pickle.dump(name_id_map)", "def add_names_to_output_set(fasta_dict_a,fasta_dict_b,output_set):\n out_fasta_list = []\n fasta_contig_name = \"\"\n \n for fasta_data in output_set:\n if fasta_data in fasta_dict_a:\n fasta_contig_name = fasta_dict_a[fasta_data]\n out_fasta_list.append(\"\\n\"+fasta_contig_name+\"\\n\") \n out_fasta_list.append(fasta_data) \n elif fasta_data in fasta_dict_b:\n fasta_contig_name = fasta_dict_b[fasta_data]\n out_fasta_list.append(\"\\n\"+fasta_contig_name+\"\\n\") \n out_fasta_list.append(fasta_data)\n\n return ''.join(out_fasta_list)", "def runCalFlat(lst, hband=False, darkLst=None, rootFolder='', nlCoef=None, satCounts=None, BPM=None, distMapLimitsFile='', plot=True, nChannel=32, nRowsAvg=0,rowSplit=1,nlSplit=32, combSplit=32,bpmCorRng=100, crReject=False, skipObsinfo=False,winRng=51, polyFitDegree=3, imgSmth=5,nlFile='',bpmFile='', satFile='',darkFile='',flatCutOff=0.1,flatSmooth=0, logfile=None, gain=1., ron=None, dispAxis=0,limSmth=20, ask=True, obsCoords=None,satSplit=32, centGuess=None, flatCor=False, flatCorFile=''):\n\n colorama.init()\n \n plt.ioff()\n\n t0 = time.time()\n \n #create processed directory, in case it doesn't exist\n wifisIO.createDir('processed')\n wifisIO.createDir('quality_control')\n\n if hband:\n print('*** WORKING ON H-BAND DATA ***')\n \n #create processed directory, in case it doesn't exist\n wifisIO.createDir('processed')\n\n if (plot):\n wifisIO.createDir('quality_control')\n\n procFlux = []\n procSigma = []\n procSatFrame = []\n\n #go through list and process each file individually\n #************\n #eventually need to add capability to create master flat from groups\n #************\n\n for lstNum in range(len(lst)):\n if (lst.ndim>1):\n folder = lst[lstNum,0]\n else:\n folder = lst[lstNum]\n\n t1 = time.time()\n\n savename = 'processed/'+folder\n\n #first check master flat and limits exists\n \n if(os.path.exists(savename+'_flat.fits') and os.path.exists(savename+'_flat_limits.fits') and os.path.exists(savename+'_flat_slices.fits') and os.path.exists(savename+'_flat_slices_norm.fits')):\n cont = 'n'\n if ask:\n cont = wifisIO.userInput('All processed flat field files already exists for ' +folder+', do you want to continue processing (y/n)?')\n else:\n cont = 'y'\n \n if (cont.lower() == 'y'):\n print('*** Working on folder ' + folder + ' ***')\n\n if (os.path.exists(savename+'_flat.fits')):\n cont = 'n'\n cont = wifisIO.userInput('Processed flat field file already exists for ' +folder+', do you want to continue processing (y/n)?')\n \n if (not cont.lower() == 'y'):\n print('Reading image '+savename+'_flat.fits instead')\n flatImgs, hdr= wifisIO.readImgsFromFile(savename+'_flat.fits')\n flatImg, sigmaImg, satFrame = flatImgs\n if (type(hdr) is list):\n hdr = hdr[0]\n contProc2=False\n else:\n contProc2=True\n else:\n contProc2=True\n \n if contProc2:\n flatImg, sigmaImg, satFrame, hdr = processRamp.auto(folder, rootFolder,savename+'_flat.fits', satCounts, nlCoef, BPM, nChannel=nChannel, rowSplit=rowSplit, nlSplit=nlSplit, combSplit=combSplit, crReject=crReject, bpmCorRng=bpmCorRng, skipObsinfo=skipObsinfo, nRows=nRowsAvg, rampNum=None, nlFile=nlFile, satFile=satFile, bpmFile=bpmFile, gain=gain, ron=ron, logfile=logfile, obsCoords=obsCoords, avgAll=True, satSplit=satSplit)\n \n #carry out dark subtraction\n if darkLst is not None and darkLst[0] is not None:\n print('Subtracting dark ramp')\n if len(darkLst)>1:\n dark, darkSig = darkLst[:2]\n sigmaImg = np.sqrt(sigmaImg**2 + darkSig**2)\n else:\n dark = darkLst[0]\n logfile.write('*** Warning: No uncertainty associated with dark image ***\\n')\n print(colorama.Fore.RED+'*** WARNING: No uncertainty associated with dark image ***'+colorama.Style.RESET_ALL)\n\n flatImg -= dark\n hdr.add_history('Dark image subtracted using file:')\n hdr.add_history(darkFile)\n if logfile is not None:\n logfile.write('Subtracted dark image using file:\\n')\n logfile.write(darkFile+'\\n')\n else:\n print(colorama.Fore.RED+'*** WARNING: No dark image provided, or file does not exist ***'+colorama.Style.RESET_ALL)\n if logfile is not None:\n logfile.write('*** WARNING: No dark image provided, or file ' + str(darkFile)+' does not exist ***')\n\n if os.path.exists(savename+'_flat_limits.fits'):\n cont = wifisIO.userInput('Limits file already exists for ' +folder+ ', do you want to continue processing (y/n)?')\n \n if (not cont.lower() == 'y'):\n print('Reading limits '+savename+'_flat_limits.fits instead')\n finalLimits, limitsHdr= wifisIO.readImgsFromFile(savename+'_flat_limits.fits')\n shft = limitsHdr['LIMSHIFT']\n contProc2 = False\n else:\n contProc2 = True\n else:\n contProc2= True\n \n if (contProc2):\n print('Finding slice limits and extracting slices')\n\n #remove comment about contents of file\n hdrTmp = hdr[::-1]\n hdrTmp.remove('COMMENT')\n hdr = hdrTmp[::-1]\n \n #find limits of each slice with the reference pixels, but the returned limits exclude them\n limits = slices.findLimits(flatImg, dispAxis=dispAxis, winRng=winRng, imgSmth=imgSmth, limSmth=limSmth, rmRef=True,centGuess=centGuess)\n\n if logfile is not None:\n logfile.write('Identified slice limits using the following parameters:\\n')\n logfile.write('dispAxis: '+str(dispAxis)+'\\n')\n logfile.write('winRng: ' + str(winRng)+'\\n')\n logfile.write('imgSmth: ' + str(imgSmth)+'\\n')\n logfile.write('limSmth: ' + str(limSmth)+'\\n')\n \n if hband:\n print('Using suitable region of detector to determine flat limits')\n if logfile is not None:\n logfile.write('Using suitable region of detector to determine flat limits:\\n')\n\n #only use region with suitable flux\n if dispAxis == 0:\n flatImgMed = np.nanmedian(flatImg[4:-4,4:-4], axis=1)\n else:\n flatImgMed = np.nanmedian(flatImg[4:-4,4:-4], axis=0)\n \n flatImgMedGrad = np.gradient(flatImgMed)\n medMax = np.nanargmax(flatImgMed)\n lim1 = np.nanargmax(flatImgMedGrad[:medMax])\n lim2 = np.nanargmin(flatImgMedGrad[medMax:])+medMax\n\n if logfile is not None:\n logfile.write('Using following detector limits to set slice limits:\\n')\n logfile.write(str(lim1)+ ' ' + str(lim2)+'\\n')\n \n polyLimits = slices.polyFitLimits(limits, degree=2, sigmaClipRounds=2, constRegion=[lim1,lim2])\n else:\n #get smoother limits, if desired, using polynomial fitting\n polyLimits = slices.polyFitLimits(limits, degree=polyFitDegree, sigmaClipRounds=2)\n\n if logfile is not None:\n logfile.write('Fit polynomial to slice edge traces using:\\n')\n logfile.write('Polynomial degree: ' + str(polyFitDegree)+'\\n')\n logfile.write('sigmaClipRounds: ' + str(2)+'\\n')\n\n if hband:\n logfile.write('Only used pixels between ' + str(lim1) +' and ' + str(lim2)+'\\n')\n \n if os.path.exists(distMapLimitsFile):\n print('Finding slice limits relative to distortion map file')\n hdr.add_history('Slice limits are relative to the following file:')\n hdr.add_history(distMapLimitsFile)\n distMapLimits = wifisIO.readImgsFromFile(distMapLimitsFile)[0]\n if logfile is not None:\n logfile.write('Finding slice limits relative to distortion map file:\\n')\n logfile.write(distMapLimitsFile+'\\n')\n\n if hband:\n shft = int(np.nanmedian(polyLimits[1:-1,lim1:lim2+1] - distMapLimits[1:-1,lim1:lim2+1]))\n else:\n shft = int(np.nanmedian(polyLimits[1:-1,:] - distMapLimits[1:-1,:]))\n \n if logfile is not None:\n logfile.write('Median pixel shift using all inner edge limits is ' + str(shft)+'\\n')\n finalLimits = distMapLimits\n else:\n finalLimits = polyLimits\n shft = 0\n\n if logfile is not None:\n logfile.write('*** WARNING:No slice limits provided for distortion map. Finding independent slice limits ***\\n')\n logfile.write(distMapLimitsFile+'\\n')\n \n \n #write distMapLimits + shft to file\n hdr.set('LIMSHIFT',shft, 'Limits shift relative to Ronchi slices')\n hdr.add_comment('File contains the edge limits for each slice')\n\n wifisIO.writeFits(finalLimits.astype('float32'),savename+'_flat_limits.fits', hdr=hdr, ask=False)\n\n #remove comment about contents of file\n hdrTmp = hdr[::-1]\n hdrTmp.remove('COMMENT')\n hdr = hdrTmp[::-1]\n \n #save figures of tracing results for quality control purposes\n if (plot):\n print('Plotting results')\n plt.ioff()\n wifisIO.createDir('quality_control')\n \n pdfName = 'quality_control/'+folder+'_flat_slices_traces.pdf'\n with PdfPages(pdfName) as pdf:\n fig = plt.figure()\n #med1= np.nanmedian(flatImg)\n interval = ZScaleInterval()\n lims = interval.get_limits(flatImg[4:-4,4:-4])\n #plt.imshow(flatImg[4:-4,4:-4], aspect='auto', cmap='jet', clim=[0,2.*med1], origin='lower')\n plt.imshow(flatImg[4:-4,4:-4], aspect='auto', cmap='jet', clim=lims, origin='lower')\n \n plt.xlim=(0,2040)\n plt.colorbar()\n for l in range(limits.shape[0]):\n if dispAxis==0:\n plt.plot(limits[l], np.arange(limits.shape[1]),'k', linewidth=1) #drawn limits\n plt.plot(np.clip(finalLimits[l]+shft,0, flatImg[4:-4,4:-4].shape[0]-1), np.arange(limits.shape[1]),'r--', linewidth=1) #shifted ronchi limits, if provided, or polynomial fit\n else:\n plt.plot(np.arange(limits.shape[1]),limits[l],'k', linewidth=1) #drawn limits\n plt.plot(np.arange(limits.shape[1]),np.clip(finalLimits[l]+shft,0, flatImg[4:-4,4:-4].shape[0]-1),'r--', linewidth=1) #shifted ronchi limits\n\n if hband:\n if dispAxis==0:\n plt.plot([0,flatImg[4:-4,4:-4].shape[1]-1],[lim1,lim1],'b:',linewidth=1)\n plt.plot([0,flatImg[4:-4,4:-4].shape[1]-1],[lim2,lim2],'b:',linewidth=1)\n else:\n plt.plot([lim1,lim1],[0,flatImg[4:-4,4:-4].shape[1]-1],'b:',linewidth=1)\n plt.plot([lim2,lim2],[0,flatImg[4:-4,4:-4].shape[1]-1],'b:',linewidth=1)\n\n plt.tight_layout()\n pdf.savefig()\n plt.close(fig)\n\n #get rid of reference pixels\n flatImg = flatImg[4:-4, 4:-4]\n sigmaImg = sigmaImg[4:-4, 4:-4]\n satFrame = satFrame[4:-4,4:-4]\n\n if logfile is not None:\n logfile.write('Removing reference pixels\\n')\n \n if os.path.exists(savename+'_flat_slices.fits'):\n cont='n'\n cont = wifisIO.userInput('Flat slices file already exists for ' +folder+ ', do you want to continue processing (y/n)?')\n\n if (not cont.lower() == 'y'):\n print('Reading slices file '+savename+'_flat_slices.fits instead')\n flatSlices = wifisIO.readImgsFromFile(savename+'_flat_slices.fits')[0]\n contProc2 = False\n else:\n contProc2 = True\n else:\n contProc2= True\n \n if (contProc2):\n print('Extracting slices') \n #now extract the individual slices\n flatSlices = slices.extSlices(flatImg, finalLimits, dispAxis=dispAxis, shft=shft)\n for slc in flatSlices:\n slc = slc.astype('float32')\n \n if logfile is not None:\n logfile.write('Extracted flat slices\\n')\n \n #extract uncertainty slices\n sigmaSlices = slices.extSlices(sigmaImg, finalLimits, dispAxis=dispAxis, shft=shft)\n for slc in sigmaSlices:\n slc = slc.astype('float32')\n \n if logfile is not None:\n logfile.write('Extracted uncertainty slices\\n')\n \n #extract saturation slices\n satSlices = slices.extSlices(satFrame, finalLimits, dispAxis=dispAxis, shft=shft)\n for slc in satSlices:\n slc = slc.astype('float32')\n \n if logfile is not None:\n logfile.write('Extracted saturation info slices\\n')\n \n #write slices to file\n hdr.add_comment('File contains each slice image as separate extension')\n wifisIO.writeFits(flatSlices+sigmaSlices+satSlices,savename+'_flat_slices.fits',hdr=hdr, ask=False)\n \n #remove comment about contents of file\n hdrTmp = hdr[::-1]\n hdrTmp.remove('COMMENT')\n hdr = hdrTmp[::-1]\n \n if os.path.exists(savename+'_flat_slices_norm.fits'):\n cont = 'n'\n cont = wifisIO.userInput('Normalized flat slices file already exists for ' +folder+', do you want to continue processing (y/n)?')\n \n if (not cont.lower() == 'y'):\n contProc2 = False\n else:\n contProc2 = True\n else:\n contProc2= True\n \n if (contProc2):\n print('Getting normalized flat field')\n #now get smoothed and normalized response function\n flatNorm = slices.getResponseAll(flatSlices, flatSmooth, flatCutOff)\n for slc in flatNorm:\n slc = slc.astype('float32')\n \n hdr.add_comment('File contains the normalized flat-field response function')\n hdr.add_history('Smoothed using Gaussian with 1-sigma width of ' + str(flatSmooth) + ' pixels')\n hdr.add_history('Normalized cutoff threshold is ' + str(flatCutOff))\n\n if logfile is not None:\n logfile.write('Computed normalized response function from flat slices using the following parameters:\\n')\n logfile.write('flatSmooth: ' + str(flatSmooth)+'\\n')\n logfile.write('flatCutoff: ' + str(flatCutOff)+'\\n')\n \n sigmaNorm = slices.ffCorrectAll(sigmaSlices, flatNorm)\n for slc in sigmaNorm:\n slc = slc.astype('float32')\n \n if logfile is not None:\n logfile.write('Computed uncertainties for normalized response function for each slice\\n')\n\n if flatCor:\n print('Correcting flat field response function')\n logfile.write('Correcting flat field response function using file:\\n')\n logfile.write(flatCorFile+'\\n')\n \n flatCorSlices = wifisIO.readImgsFromFile(flatCorFile)[0]\n flatNorm = slices.ffCorrectAll(flatNorm, flatCorSlices)\n hdr.add_history('Corrected flat field response function using file:')\n hdr.add_history(flatCorFile)\n\n if len(flatCorSlices)>nSlices:\n hdr.add_history('Uncertainties include correction')\n sigmaNorm = wifisUncertainties.multiplySlices(flatNorm,sigmaNorm,flatCorSlices[:nSlices],flatCorSlices[nSlices:2*nSlices])\n\n else:\n hdr.add_history('Uncertainties do not include correction')\n logfile.write('*** WARNING: Response correction does not include uncertainties***\\n')\n\n else:\n #print(colorama.Fore.RED+'*** WARNING: Flat field correction file does not exist, skipping ***'+colorama.Style.RESET_ALL)\n \n #logfile.write('*** WARNING: Flat field correction file does not exist, skipping ***\\n')\n print('Flat field correction file off...skipping')\n \n logfile.write('Flat field correction file off...skipping\\n')\n \n #write normalized images to file\n wifisIO.writeFits(flatNorm + sigmaNorm + satSlices,savename+'_flat_slices_norm.fits',hdr=hdr, ask=False)\n print('*** Finished processing ' + folder + ' in ' + str(time.time()-t1) + ' seconds ***')\n \n return", "def get_hindustani_ragas(self, hind_list):\n self.hind = [h.name for h in hind_list if h.raga == self.name]" ]
[ "0.58278376", "0.569089", "0.5623684", "0.56120443", "0.5441088", "0.53539836", "0.5308381", "0.5256427", "0.52116", "0.51895374", "0.5147063", "0.5108449", "0.51060414", "0.5100264", "0.5047533", "0.5038564", "0.5021236", "0.5014878", "0.50056285", "0.49947405", "0.49672922", "0.4966864", "0.49275842", "0.4905078", "0.48935366", "0.48853895", "0.48839477", "0.48766556", "0.48747486", "0.48745847", "0.48663166", "0.48535794", "0.48196667", "0.48138747", "0.48130685", "0.481137", "0.48046204", "0.48033082", "0.4803167", "0.4802041", "0.47863787", "0.47836506", "0.4776202", "0.47679973", "0.4766645", "0.47649175", "0.4762341", "0.47612754", "0.47555676", "0.47518805", "0.47505596", "0.4738792", "0.4737747", "0.47357345", "0.4729013", "0.4725789", "0.4712497", "0.47108245", "0.47077924", "0.46958643", "0.4691763", "0.46862495", "0.4685312", "0.46836817", "0.46812335", "0.46689978", "0.4668346", "0.46628848", "0.46597", "0.4651725", "0.46333247", "0.4630924", "0.4629531", "0.4627038", "0.46244", "0.46227592", "0.4615381", "0.46141592", "0.46140748", "0.461208", "0.4610186", "0.46086422", "0.46034306", "0.460111", "0.45937636", "0.45930213", "0.4587917", "0.45875174", "0.4583426", "0.45819488", "0.45808715", "0.45701286", "0.45692322", "0.45604283", "0.45532602", "0.4547988", "0.4546344", "0.45375735", "0.4534257", "0.45313022" ]
0.64975625
0
Test list secrets when not connected to any cluster.
def test_secrets_list_server_not_reachable(): message = "REANA client is not connected to any REANA cluster." reana_token = "000000" runner = CliRunner() result = runner.invoke(cli, ["secrets-list", "-t", reana_token]) assert result.exit_code == 1 assert message in result.output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def list_secrets(self):\n pass", "def test_read_namespaced_secret_list_secrets(self):\n pass", "def test_secrets_list_server_no_token():\n message = \"Please provide your access token\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n runner = CliRunner(env=env)\n result = runner.invoke(cli, [\"secrets-list\"])\n assert result.exit_code == 1\n assert message in result.output", "def test_check_secrets(self):\n secrets.check_secrets([], argparse.Namespace())", "def secrets(self): # pylint: disable=no-self-use\n return []", "def list(**kwargs):\n cluster_call(\"secret_list\", **kwargs)", "def _all_secrets(cls, *, secretsmanager_client):\n return secretsmanager_client.list_secrets()['SecretList']", "def test_get_secret_4(self):\n self.assertIsNone(\n get_secret(\"plain text, no secrets here\")\n )", "def test_secrets_list_ok():\n status_code = 200\n response = [{\"name\": \"password\", \"type\": \"env\"}]\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n mock_http_response, mock_response = Mock(), Mock()\n mock_http_response.status_code = status_code\n mock_response = response\n reana_token = \"000000\"\n runner = CliRunner(env=env)\n with runner.isolation():\n with patch(\n \"reana_client.api.client.current_rs_api_client\",\n make_mock_api_client(\"reana-server\")(mock_response, mock_http_response),\n ):\n result = runner.invoke(cli, [\"secrets-list\", \"-t\", reana_token])\n assert result.exit_code == 0\n assert \"password\" in result.output\n assert \"env\" in result.output", "def secrets():\n click.echo(STEP_PATH / \"secrets\")", "def list_command(env: Optional[str], config: str) -> None:\n layer = Layer.load_from_yaml(config, env)\n amplitude_client.send_event(amplitude_client.LIST_SECRETS_EVENT)\n gen_all(layer)\n _raise_if_no_k8s_cluster_exists(layer)\n\n configure_kubectl(layer)\n load_kube_config()\n v1 = CoreV1Api()\n api_response = v1.read_namespaced_secret(\"secret\", layer.name)\n if api_response.data is None:\n print(\n \"No secrets found, you can make some by adding them in you opta file k8s service\"\n )\n return\n for key in api_response.data:\n print(key)", "def secrets(self):\n return self._secrets", "def test_secret():\n # TODO: split this up and write better tests\n\n @make_config()\n class Config:\n \"\"\"The test configuration for configurave.\"\"\"\n\n root_url: str = ce(\n comment=\"The root url configuration for the application\",\n description=\"A long ass multiline description goes here about all the options\"\n \" you could potentially decide upon using.\",\n )\n token: str = ce(\n comment=\"The discord token for your bot\",\n secret=True,\n )\n\n c = Config(\n sources=[ # in order of priority\n \"tests/test-config/secrets.toml\",\n ]\n )\n\n assert \"token\" in str(c._crve_configs)\n assert c.token == \"secret token\"\n\n default_toml = (\n \"# The test configuration for configurave.\\n\"\n \"# This is an autogenerated default configuration file written by Configurave\\n\\n\"\n \"# (str): The root url configuration for the application\\n\"\n \"# root_url = \\n\"\n \"# Description: A long ass multiline description goes here about all the\\n\"\n \"# options you could potentially decide upon using.\\n\"\n \"\\n\"\n \"# (str): The discord token for your bot\\n\"\n \"# Secret: value will not be exported\\n\"\n \"token =\\n\"\n )\n assert c.defaults_toml() == default_toml", "def secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SecretArgs']]]]:\n return pulumi.get(self, \"secrets\")", "def test_secrets() -> Secrets:\n from dotenv import load_dotenv\n from os import getenv\n from pathlib import Path\n env_path = Path('.') / '.env.testing'\n load_dotenv(dotenv_path=env_path)\n return Secrets(\n google_id_token=getenv(\"GOOGLE_ID_TOKEN\"),\n google_user_id=getenv(\"GOOGLE_USER_ID\")\n )", "def pod_secrets(self) -> Optional[Sequence['outputs.DataBoxSecretResponse']]:\n return pulumi.get(self, \"pod_secrets\")", "def list_secrets(self, MaxResults: int = None, NextToken: str = None) -> Dict:\n pass", "def list_secrets_command(client: KeyVaultClient, args: dict[str, Any]) -> CommandResults:\n vault_name = args['vault_name']\n limit = arg_to_number(args.get('limit')) or DEFAULT_LIMIT\n offset = arg_to_number(args.get('offset')) or DEFAULT_OFFSET\n response = client.list_secrets_request(vault_name, limit, offset)\n outputs = copy.deepcopy(response)\n readable_response = []\n\n for secret in outputs:\n readable_response.append({\n 'secret_id': secret.get('id'), 'managed': secret.get('managed'),\n **convert_attributes_to_readable(secret.get('attributes', {}).copy())\n })\n secret[VAULT_NAME_CONTEXT_FIELD] = vault_name\n secret['attributes'] = convert_time_attributes_to_iso(secret['attributes'])\n\n readable_output = tableToMarkdown(\n f'{vault_name} Secrets List',\n readable_response,\n ['secret_id', 'enabled', 'create_time', 'update_time', 'expiry_time'], removeNull=True,\n headerTransform=string_to_table_header)\n\n command_results = CommandResults(\n outputs_prefix='AzureKeyVault.Secret',\n outputs_key_field='id',\n outputs=outputs,\n raw_response=response,\n readable_output=readable_output,\n ignore_auto_extract=True\n )\n\n return command_results", "def test_secret():\r\n try:\r\n straxen.get_secret('somethingnonexistent')\r\n except ValueError:\r\n # Good we got some message we cannot load something that does\r\n # not exist,\r\n pass", "def list_vault_secrets(schedule_id):\n from mist.api.poller.models import ListVaultSecretsPollingSchedule\n sched = ListVaultSecretsPollingSchedule.objects.get(id=schedule_id)\n sched.owner.secrets_ctl.list_secrets(recursive=True)", "def know_secret(self):\r\n return(self.secret != \"\") and (self.key != \"\")", "def get_db_secrets():\n secret_response = secrets_client.get_secret_value(SecretId=db_secret_name)\n secrets = json.loads(secret_response['SecretString'])\n return secrets", "def secrets(self):\n return self._secrets_store", "def _secrets(self, credstash):\n if credstash == \"true\":\n return True\n else:\n return False", "def _list_known_secret_tokens():\n global _secret_token_map\n\n keys = list(_secret_token_map.keys())\n keys.sort()\n\n ret = ''\n for key in keys:\n if ret != '':\n ret += ', '\n ret += \"'\" + key + \"'\"\n return ret", "def cabinet_pod_secrets(self) -> Sequence['outputs.DataBoxHeavySecretResponse']:\n return pulumi.get(self, \"cabinet_pod_secrets\")", "def test_secret(self, env: yaenv.Env):\n assert env.secret() == 'notsosecret'\n assert 'NEW_SECRET_KEY' not in env\n _secret = env.secret('NEW_SECRET_KEY')\n assert _secret is not None\n assert _secret != env.secret('NEW_SECRET_KEY2')\n del env['NEW_SECRET_KEY'], env['NEW_SECRET_KEY2']", "def test_get_secret_5(self):\n\n # notice space between SECRET and immediately following\n # curly bracket.\n self.assertIsNone(\n get_secret(\"SECRET { ...}\")\n )\n\n # typo in keyword SECRET\n self.assertIsNone(\n get_secret(\"SECRIT { ...}\")\n )\n\n # missing closing bracket\n self.assertIsNone(\n get_secret(\"SECRET { ...\")\n )\n\n # curly brackets missing\n self.assertIsNone(\n get_secret(\"SECRET ...\")\n )", "def test_mask_secret_nomatch():\n secrets = [\n \"8bca8d2e-1cd6-4ec0-8e55-9614aa01cf88\",\n \"683c08d7-bc07-4d72-b098-46ef00b74aec\",\n ]\n assert utils.mask_secrets(\"ls -lh /tmp\", secrets) == \"ls -lh /tmp\"", "def get_secrets():\n client = datastore.Client()\n query = client.query(kind='env_vars')\n entity = query.fetch()\n secrets = list(entity)[0]\n return secrets", "def apply_secrets():\n for name, value in Secrets.__dict__.items():\n if name[0] != '_':\n os.environ[name] = value", "def _secret_not_in_order():\n pecan.abort(400, u._(\"Secret metadata expected but not received.\"))", "def _delete_all_secrets(self):\n for secret_ref in self.created_entities['secret']:\n self.barbicanclient.secrets.delete(secret_ref, True)", "def test_get_secret_from_vault(key, environment, stage, namespace, table, nkey,\n boto3_resource, boto3_client, monkeypatch):\n # Call to the DynamoDB client to retrieve the encrypted secret\n monkeypatch.setattr(\"boto3.resource\", boto3_resource)\n monkeypatch.setattr(\"boto3.client\", boto3_client)\n secret = lambdautils.utils.get_secret(key,\n namespace=namespace,\n environment=environment,\n stage=stage)\n assert secret == \"dummy\"\n boto3_client(\"dynamodb\").get_item.assert_called_with(\n TableName=table,\n Key={\"id\": {\"S\": nkey}})\n\n # Call to the KMS client to decrypt the secret\n boto3_client('kms').decrypt.assert_called_with(CiphertextBlob=\"encrypted\")", "def get_secrets(token):\n try:\n return get_keycloak_client().userinfo(token)['attributes'].get('secrets')\n except KeycloakError as ke:\n logger.error(\"Keycloak error: {0}\").format(ke)\n raise exceptions.TransferError\n except KeyError as ke:\n logger.error(\"Secrects not found in token.\")\n raise exceptions.TransferUnauthorized", "def inject_secrets(self, secrets: str) -> None:\n self.config.read(secrets)", "def __decrypt_secrets(self, secrets):\n assert self.decrypter != None\n return [self.decrypter.decrypt(secret) for secret in secrets]", "def secrets(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"secrets\")", "def secrets(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"secrets\")", "def _get_token_secrets(self, request):\n if self.secrets is None:\n return [self.secret]\n # Secrets are looked up by hostname.\n # We need to normalize some port information for this work right.\n node_name = request.host_url\n host_url = urlparse(request.host_url)\n if host_url.scheme == \"http\" and host_url.port == 80:\n assert node_name.endswith(\":80\")\n node_name = node_name[:-3]\n elif host_url.scheme == \"http\" and host_url.port == 443:\n assert node_name.endswith(\":443\")\n node_name = node_name[:-4]\n return self.secrets.get(node_name)", "def get_secrets(session, secret_id):\n secretsmanager = session.client('secretsmanager')\n secrets = json.loads(secretsmanager.get_secret_value(SecretId=secret_id)['SecretString'])\n formatted_secrets = []\n for (key, value) in secrets.items():\n skipped_secrets = [ # cloudformation doesn't need these, so skip them\n 'TABLE_NAME',\n 'DATATRUST_KEY',\n 'JWT_SECRET_KEY',\n 'DB_URL',\n 'RPC_PATH',\n 'LOG_LEVEL',\n 'CELERY_BROKER_URL',\n 'CELERY_RESULT_BACKEND',\n 'S3_DESTINATION'\n ]\n if key not in skipped_secrets:\n formatted_secrets.append(\n {\"ParameterKey\": key, \"ParameterValue\": value}\n )\n formatted_secrets.append(\n {\"ParameterKey\": \"StackName\", \"ParameterValue\": STACK_NAME}\n )\n return formatted_secrets", "def test_list_cluster_role(self):\n pass", "def getSecretKeys():\n secretKeys = dict()\n for (key, value) in os.environ.items():\n if( key.startswith(\"SM_\")):\n secretKeys[key] = value\n if(bool(secretKeys)):\n return secretKeys\n else:\n raise ValueError(\"Secrets Manager environment variable key not found, make sure there is atleast an env var with 'SM_' prefix for the init-container\")", "def test_run_cmd_simple_positive_with_secrets(caplog):\n caplog.set_level(logging.DEBUG)\n secrets = [\"8bca8d2e-1cd6\", \"683c08d7-bc07\"]\n cmd = \"echo -n hello 8bca8d2e-1cd6\"\n assert utils.run_cmd(cmd, secrets=secrets) == \"hello *****\"\n # check that logs were satinized as well\n for secret in secrets:\n assert secret not in caplog.text", "def test_list_cluster_policy(self):\n pass", "def test_display_all_credentials(self):\n\n self.assertEqual(Credentials.display_credentials(), Credentials.credentials_list)", "def test_mask_secret_nosecrets():\n assert utils.mask_secrets(\"ls -lh /tmp\", None) == \"ls -lh /tmp\"", "def secrets(self, secrets):\n\n self._secrets = secrets", "def get_secret(name):\n config = ConfigParser()\n config.read('/srv/oclubs/secrets.ini')\n return config.get('secrets', name)", "def test_client_key_secret_not_provided(self):\n\n # this adds lti passports to system\n mocked_course = Mock(lti_passports=['test_id:test_client:test_secret'])\n modulestore = Mock()\n modulestore.get_course.return_value = mocked_course\n runtime = Mock(modulestore=modulestore)\n self.xmodule.runtime = runtime\n # set another lti_id\n self.xmodule.lti_id = \"another_lti_id\"\n key_secret = self.xmodule.get_client_key_secret()\n expected = ('', '')\n assert expected == key_secret", "def secretstore():\n pass", "def test_list_config_roots(self):\n with self.override_role():\n self.config_client.list_config_roots()", "def test_list_config_nodes(self):\n with self.override_role():\n self.config_client.list_config_nodes()", "def test_diff_is_not_shown_for_keys_in_secrets(tmp_path, monkeypatch, capsys):\n monkeypatch.chdir(\"examples/tutorial-secrets\")\n if os.path.exists(\"work\"):\n shutil.rmtree(\"work\")\n try:\n out, _ = cmd(\"./batou deploy tutorial\")\n finally:\n shutil.rmtree(\"work\")\n assert out == Ellipsis(\n \"\"\"\\\nbatou/2... (cpython 3...)\n================================== Preparing ===================================\nmain: Loading environment `tutorial`...\nmain: Verifying repository ...\nmain: Loading secrets ...\n================== Connecting hosts and configuring model ... ==================\nlocalhost: Connecting via local (1/1)\n================================== Deploying ===================================\nlocalhost: Scheduling component hello ...\nlocalhost > Hello > File('work/hello/hello') > Presence('hello')\nlocalhost > Hello > File('work/hello/hello') > Content('hello')\nNot showing diff as it contains sensitive data,\nsee ...diff for the diff.\nlocalhost > Hello > File('work/hello/other-secrets.yaml') > Presence('other-secrets.yaml')\nlocalhost > Hello > File('work/hello/other-secrets.yaml') > Content('other-secrets.yaml')\nNot showing diff as it contains sensitive data,\nsee ...diff for the diff.\n=================================== Summary ====================================\nDeployment took total=...s, connect=...s, deploy=...s\n============================= DEPLOYMENT FINISHED ==============================\n\"\"\"\n ) # noqa: E501 line too long", "def test_list_application_credentials(self):\n self.create_application_credential()\n self.create_application_credential()\n\n app_creds = self._list_app_creds()\n self.assertEqual(2, len(app_creds))", "def get_password_testing():\n if settings.CLOUD:\n return [os.environ.get('passwordtest')]\n with open('env.yaml') as file_name:\n data = yaml.safe_load(file_name)\n return (data['test_variables']['password'],)", "def secret_keys(self):\n return self._secret_keys", "def list(cls):\n\n db = get_db_handle()\n\n secret_basic_configs = []\n for secret in db.secret_table.select():\n secret_basic_configs.append(secret.get_detail_dict())\n\n return secret_basic_configs", "def test_list_cluster_role_binding(self):\n pass", "def test_client_key_secret_not_provided(self):\r\n\r\n #this adds lti passports to system\r\n mocked_course = Mock(lti_passports = ['test_id:test_client:test_secret'])\r\n modulestore = Mock()\r\n modulestore.get_course.return_value = mocked_course\r\n runtime = Mock(modulestore=modulestore)\r\n self.xmodule.descriptor.runtime = runtime\r\n #set another lti_id\r\n self.xmodule.lti_id = \"another_lti_id\"\r\n key_secret = self.xmodule.get_client_key_secret()\r\n expected = ('','')\r\n self.assertEqual(expected, key_secret)", "def _find_secrets(cls, *, pattern, secretsmanager_client):\n results = []\n for secret in cls._all_secrets(secretsmanager_client=secretsmanager_client):\n if re.search(pattern, secret['Name']):\n results.append(secret)\n return results", "def test_run_cmd_simple_negative_with_secrets(caplog):\n caplog.set_level(logging.DEBUG)\n secrets = [\"8bca8d2e-1cd6\", \"683c08d7-bc07\"]\n cmd = \"ls /tmp/this/file/683c08d7-bc07/isnotthere\"\n with pytest.raises(CommandFailed) as excinfo:\n utils.run_cmd(cmd, secrets=secrets)\n assert \"No such file or directory\" in str(excinfo.value)\n # check that exception was sanitized\n for secret in secrets:\n assert secret not in str(excinfo.value)\n # check that logs were satinized as well\n for secret in secrets:\n assert secret not in caplog.text", "async def read_secret(self, name: str):\n pass", "def get_secret(setting, secrets=secrets):\n return secrets[setting]", "def test_get_yggdrasil_vaults(self):\n pass", "def test_find_secret_locations():\n list_of_random = [(random.Random(), random.Random()), (random.Random(), random.Random()), (random.Random(),\n random.Random()),\n (random.Random(), random.Random()), (random.Random(), random.Random()),\n (random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)", "def list_secrets_request(self, vault_name: str, limit: int, offset: int) -> list[dict]:\n url = f'https://{vault_name}{self.azure_cloud.suffixes.keyvault_dns}/secrets'\n response = self.http_request(\n 'GET', full_url=url, resource=self.get_vault_resource())\n\n return self.get_entities_independent_of_pages(response, limit, offset, self.get_vault_resource())", "def getsecret(path=None):\n\n if path:\n print(vault.read(path))\n\n else:\n for section in vault.list():\n for key in vault.list(section):\n print(f'{section}{key}')", "def test_check_keys_exist_for_provider_list_no_keys(self):\n\n secret_key = [None, None]\n provider_id = 'asu'\n\n serializer = serializers.CreditProviderCallbackSerializer()\n with pytest.raises(PermissionDenied):\n serializer._check_keys_exist_for_provider(secret_key, provider_id) # lint-amnesty, pylint: disable=protected-access", "def test_list_cluster_policy_binding(self):\n pass", "def test_invalid_secrets(self):\n s = SecretsChecker(stage='dev')\n # Override the email field obtained from terraform\n s.email = ['nonsense']\n with self.assertRaises(ValueError):\n s.run()", "def secret(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"secret\")", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def test_list_keys(self):\n with patch('iceit.crypto.gnupg.GPG') as mock_gpg:\n mock_gpg.return_value = mock_gpg\n encryptor = self.test_init()\n encryptor.list_secret_keys()\n\n mock_gpg.list_keys.assert_called_once_with(True)", "def test_no_vault_secrets(mock_load, localhost_client, gen_input_config):\n mock_load.return_value = gen_input_config(vault_secrets={})\n\n localhost_client.load(\"in.json\")\n\n mock_load.assert_called_with(\"in.json\")", "def secret() -> None:\n pass", "def secret():\n pass", "def list_secret(self, **kwargs):\n kwargs['_return_http_data_only'] = True\n if kwargs.get('callback'):\n return self.list_secret_with_http_info(**kwargs)\n else:\n (data) = self.list_secret_with_http_info(**kwargs)\n return data", "def test_display_all_credentials(self):\n\n\n self.assertEqual(Credential.display_credentials(),Credential.credential_list)", "def test_get_secret_3(self):\n\n text_subject = \"Important Message\"\n text_body = \"\"\"\n This is body of plain text message of some email\n \"\"\"\n self.assertIsNone(\n # no secret in the text\n get_secret([text_subject, text_body])\n )", "def test_get_secrets_does_not_retry_on_200(self, mget):\n error_data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n secret_data = json.dumps({\n \"data\": {\n \"sushi\": \"ikenohana\",\n \"ramen\": \"yuzu\"\n }\n })\n\n mget.side_effect = [self._mock_response(status=200, content=secret_data),\n self._mock_response(status=500, content=error_data)]\n self.client.get_secrets_data('fake/path')", "def disk_secrets(self) -> Sequence['outputs.DiskSecretResponse']:\n return pulumi.get(self, \"disk_secrets\")", "def test_get_list(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"vendor.fetchai.connections.p2p_libp2p.config.entry_peers\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"[]\\n\"", "def test_vault_get_all_vault_items(self):\n pass", "def known(self) -> List[str]:\n return [k for k in self._config.get('auths', {}).keys()]", "def test_create_seed_secrets(self):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n name = 'job-type-post-test-secret'\n manifest['job']['name'] = name\n manifest['job']['interface']['settings'] = [\n {\n 'name': 'VERSION',\n 'secret': True\n },\n {\n 'name': 'DB_HOST',\n 'secret': True\n },\n {\n 'name': 'DB_PASS',\n 'secret': True\n }\n ]\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': False,\n 'max_scheduled': 1,\n 'docker_image': 'my-job-1.0.0-seed:1.0.0',\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n with patch.object(SecretsHandler, '__init__', return_value=None), \\\n patch.object(SecretsHandler, 'set_job_type_secrets', return_value=None) as mock_set_secret:\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n job_type = JobType.objects.filter(name=name).first()\n\n results = json.loads(response.content)\n self.assertEqual(results['id'], job_type.id)\n\n # Secrets sent to Vault\n secrets_name = '-'.join([results['name'], results['version']]).replace('.', '_')\n secrets = json_data['configuration']['settings']\n mock_set_secret.assert_called_once_with(secrets_name, secrets)\n\n #Secrets scrubbed from configuration on return\n self.assertEqual(results['configuration']['settings'], {})", "def test_set_cipher_list_no_cipher_match(self, context):\n with pytest.raises(Error) as excinfo:\n context.set_cipher_list(b\"imaginary-cipher\")\n assert excinfo.value.args[0][0] in [\n # 1.1.x\n (\n \"SSL routines\",\n \"SSL_CTX_set_cipher_list\",\n \"no cipher match\",\n ),\n # 3.0.x\n (\n \"SSL routines\",\n \"\",\n \"no cipher match\",\n ),\n ]", "def read_namespaced_secret_list_secrets(self, namespace, name, **kwargs):\n\n all_params = ['namespace', 'name', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method read_namespaced_secret_list_secrets\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `read_namespaced_secret_list_secrets`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `read_namespaced_secret_list_secrets`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/imagestreams/{name}/secrets'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1SecretList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_listCheckers(self):\n expected = [credentials.IUsernamePassword, credentials.IUsernameHashedPassword]\n got = self.portal.listCredentialsInterfaces()\n self.assertEqual(sorted(got), sorted(expected))", "def test_client_key_secret(self):\n #this adds lti passports to system\n mocked_course = Mock(lti_passports=['lti_id:test_client:test_secret'])\n modulestore = Mock()\n modulestore.get_course.return_value = mocked_course\n runtime = Mock(modulestore=modulestore)\n self.xmodule.runtime = runtime\n self.xmodule.lti_id = \"lti_id\"\n key, secret = self.xmodule.get_client_key_secret()\n expected = ('test_client', 'test_secret')\n assert expected == (key, secret)", "def test_get_secrets_does_not_retry_on_4xx(self, mget):\n error_data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n\n mget.side_effect = [self._mock_response(status=403, content=error_data),\n self._mock_response(status=403, content=error_data)]\n with self.assertRaises(CerberusClientException):\n self.client.get_secrets_data('fake/path')\n mget.assert_called_once_with(\n self.cerberus_url + '/v1/secret/fake/path',\n params={'versionId': 'CURRENT'},\n headers=self.client.HEADERS\n )", "def _decrypt_secret(\n self, \n encryption_key: str,\n secret_list: List,\n secret_name: str\n ):\n f = Fernet(\n bytes(encryption_key, \"utf-8\")\n )\n secret=None\n if 'secrets' in secret_list:\n if secret_name in secret_list['secrets']:\n secret = f.decrypt(\n bytes(secret_list['secrets'][secret_name], \"utf-8\")\n ).decode('UTF-8')\n #self.log.log_success(\n # f'{secret_name} : {secret}'\n #)\n return secret", "def list_credentials():\n creds = load_auth()\n max_username_len = max([len(c.username) for c in creds]) if len(creds) > 0 else 1\n long_format = f\"{{:{max_username_len}}} for {{}}\"\n for cred in creds:\n if len(cred.hostname) > 0:\n print(str.format(long_format, cred.username, cred.hostname))\n else:\n print(cred.username)\n if len(creds) == 0 and os.isatty(1):\n print(\"No credentials configured\")", "def secret(self) -> str:\n return pulumi.get(self, \"secret\")", "def secret(self) -> str:\n return pulumi.get(self, \"secret\")", "def test_display_cred(self):\n self.assertEqual(Credentials.display_cred(), Credentials.cred_list)", "def prepare_secrets(c, rebuild_venv=False, no_secret_cache=False):\n cli_tasks.prepare_secrets.run(c, rebuild_venv, no_secret_cache)", "def test_bad_client_key_secret(self):\n # this adds lti passports to system\n mocked_course = Mock(lti_passports=['test_id_test_client_test_secret'])\n modulestore = Mock()\n modulestore.get_course.return_value = mocked_course\n runtime = Mock(modulestore=modulestore)\n self.xmodule.runtime = runtime\n self.xmodule.lti_id = 'lti_id'\n with pytest.raises(LTIError):\n self.xmodule.get_client_key_secret()", "async def admin_secret(self, ctx: commands.Context, *token: str):\n the_token = await self.config.secret()\n token = ' '.join(token)\n if not token:\n await ctx.author.send(f'Team management secret: {the_token}')\n else:\n await self.config.secret.set(token)\n message = [display(ctx.author),\n f'set the team management secret to {token}.']\n if the_token:\n message.append(f'(was `{the_token}`)')\n await self.admin_msg(' '.join(message))", "def test_read_cluster_role(self):\n pass" ]
[ "0.7848596", "0.77356535", "0.73634505", "0.71850795", "0.7145914", "0.6926122", "0.674381", "0.655007", "0.64121014", "0.639309", "0.6267915", "0.6263949", "0.6144681", "0.61244977", "0.6104293", "0.6090162", "0.6053545", "0.60486376", "0.601324", "0.59840655", "0.59817785", "0.5971765", "0.5966492", "0.5962005", "0.595862", "0.59378386", "0.59016454", "0.5863159", "0.58138025", "0.58063555", "0.57929474", "0.5791032", "0.57886547", "0.5764904", "0.5759233", "0.5739729", "0.57203865", "0.5720164", "0.5720164", "0.5715832", "0.5698517", "0.5673045", "0.56721646", "0.5666193", "0.56597966", "0.56440055", "0.562933", "0.56133145", "0.5578109", "0.55739915", "0.55573106", "0.5553126", "0.55521035", "0.554845", "0.55394804", "0.5536658", "0.55300665", "0.5529654", "0.5514262", "0.5512417", "0.54819703", "0.54748446", "0.547203", "0.54620373", "0.5458834", "0.5457036", "0.5450419", "0.54343927", "0.54206765", "0.54201096", "0.54074824", "0.5400424", "0.5399408", "0.5388313", "0.5388042", "0.5372318", "0.53615135", "0.5324318", "0.53185534", "0.5315584", "0.5303501", "0.52934295", "0.5283719", "0.527627", "0.5268288", "0.5258733", "0.5250908", "0.52476525", "0.5247625", "0.5230263", "0.522935", "0.5223926", "0.52054936", "0.5204904", "0.5204904", "0.52039635", "0.5199486", "0.51984286", "0.5196262", "0.5184004" ]
0.77385426
1
Test list secrets when access token is not set.
def test_secrets_list_server_no_token(): message = "Please provide your access token" env = {"REANA_SERVER_URL": "localhost"} runner = CliRunner(env=env) result = runner.invoke(cli, ["secrets-list"]) assert result.exit_code == 1 assert message in result.output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_read_namespaced_secret_list_secrets(self):\n pass", "def test_check_secrets(self):\n secrets.check_secrets([], argparse.Namespace())", "def test_secrets_list_server_not_reachable():\n message = \"REANA client is not connected to any REANA cluster.\"\n reana_token = \"000000\"\n runner = CliRunner()\n result = runner.invoke(cli, [\"secrets-list\", \"-t\", reana_token])\n assert result.exit_code == 1\n assert message in result.output", "async def list_secrets(self):\n pass", "def test_get_secret_4(self):\n self.assertIsNone(\n get_secret(\"plain text, no secrets here\")\n )", "def test_list_o_auth_access_token(self):\n pass", "def _all_secrets(cls, *, secretsmanager_client):\n return secretsmanager_client.list_secrets()['SecretList']", "def test_secrets_list_ok():\n status_code = 200\n response = [{\"name\": \"password\", \"type\": \"env\"}]\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n mock_http_response, mock_response = Mock(), Mock()\n mock_http_response.status_code = status_code\n mock_response = response\n reana_token = \"000000\"\n runner = CliRunner(env=env)\n with runner.isolation():\n with patch(\n \"reana_client.api.client.current_rs_api_client\",\n make_mock_api_client(\"reana-server\")(mock_response, mock_http_response),\n ):\n result = runner.invoke(cli, [\"secrets-list\", \"-t\", reana_token])\n assert result.exit_code == 0\n assert \"password\" in result.output\n assert \"env\" in result.output", "def get_secrets(token):\n try:\n return get_keycloak_client().userinfo(token)['attributes'].get('secrets')\n except KeycloakError as ke:\n logger.error(\"Keycloak error: {0}\").format(ke)\n raise exceptions.TransferError\n except KeyError as ke:\n logger.error(\"Secrects not found in token.\")\n raise exceptions.TransferUnauthorized", "def secrets(self): # pylint: disable=no-self-use\n return []", "def _secret_not_in_order():\n pecan.abort(400, u._(\"Secret metadata expected but not received.\"))", "def test_secrets() -> Secrets:\n from dotenv import load_dotenv\n from os import getenv\n from pathlib import Path\n env_path = Path('.') / '.env.testing'\n load_dotenv(dotenv_path=env_path)\n return Secrets(\n google_id_token=getenv(\"GOOGLE_ID_TOKEN\"),\n google_user_id=getenv(\"GOOGLE_USER_ID\")\n )", "def test_get_invalid_secret(self):\n response = self.client.get(\n reverse(\n 'projectroles:api_remote_get', kwargs={'secret': build_secret()}\n )\n )\n self.assertEqual(response.status_code, 401)", "def test_check_keys_exist_for_provider_list_no_keys(self):\n\n secret_key = [None, None]\n provider_id = 'asu'\n\n serializer = serializers.CreditProviderCallbackSerializer()\n with pytest.raises(PermissionDenied):\n serializer._check_keys_exist_for_provider(secret_key, provider_id) # lint-amnesty, pylint: disable=protected-access", "def test_secret():\r\n try:\r\n straxen.get_secret('somethingnonexistent')\r\n except ValueError:\r\n # Good we got some message we cannot load something that does\r\n # not exist,\r\n pass", "def test_secret(self, env: yaenv.Env):\n assert env.secret() == 'notsosecret'\n assert 'NEW_SECRET_KEY' not in env\n _secret = env.secret('NEW_SECRET_KEY')\n assert _secret is not None\n assert _secret != env.secret('NEW_SECRET_KEY2')\n del env['NEW_SECRET_KEY'], env['NEW_SECRET_KEY2']", "def test_get_secret_5(self):\n\n # notice space between SECRET and immediately following\n # curly bracket.\n self.assertIsNone(\n get_secret(\"SECRET { ...}\")\n )\n\n # typo in keyword SECRET\n self.assertIsNone(\n get_secret(\"SECRIT { ...}\")\n )\n\n # missing closing bracket\n self.assertIsNone(\n get_secret(\"SECRET { ...\")\n )\n\n # curly brackets missing\n self.assertIsNone(\n get_secret(\"SECRET ...\")\n )", "def test_get_tokens():\n tokens = get_tokens()\n assert tokens[\"token_type\"] == \"Bearer\"\n assert tokens[\"access_token\"] is not None\n assert tokens[\"expires_at\"] is not None\n assert tokens[\"expires_in\"] is not None\n assert tokens[\"refresh_token\"] is not None\n\n assert \"token_type\" in tokens\n assert \"access_token\" in tokens\n assert \"expires_at\" in tokens\n assert \"expires_in\" in tokens\n assert \"refresh_token\" in tokens\n\n assert tokens[\"expires_at\"] > int(time.time())", "def test_token_was_blacklisted(self):\n\n revoked_token = RevokedToken('secret_token_blacklisted')\n revoked_token.save()\n\n self.assertTrue(\n RevokedToken.is_jti_blacklisted('secret_token_blacklisted'))", "def test_env_access_token(context):\n os.environ[config.FLOWSERV_ACCESS_TOKEN] = '0001'\n assert context.access_token() == '0001'\n del os.environ[config.FLOWSERV_ACCESS_TOKEN]\n with pytest.raises(err.MissingConfigurationError):\n context.access_token()", "def test_authenticated_user_read(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Forbidden,\r\n getattr(require, 'token').read,\r\n token)", "def list_secrets(self, MaxResults: int = None, NextToken: str = None) -> Dict:\n pass", "def test_get_secrets_does_not_retry_on_200(self, mget):\n error_data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n secret_data = json.dumps({\n \"data\": {\n \"sushi\": \"ikenohana\",\n \"ramen\": \"yuzu\"\n }\n })\n\n mget.side_effect = [self._mock_response(status=200, content=secret_data),\n self._mock_response(status=500, content=error_data)]\n self.client.get_secrets_data('fake/path')", "def test_cannot_view_all_users_with_blacklisted_token(self):\n resp = self.admin_create_user()\n reply = self.admin_create_user2()\n resp = self.admin_login()\n token = resp['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/users',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_interactive_withdraw_no_token(client):\n response = client.get(WEBAPP_PATH)\n assert \"Missing authentication token\" in str(response.content)\n assert response.status_code == 403", "def test_secret():\n # TODO: split this up and write better tests\n\n @make_config()\n class Config:\n \"\"\"The test configuration for configurave.\"\"\"\n\n root_url: str = ce(\n comment=\"The root url configuration for the application\",\n description=\"A long ass multiline description goes here about all the options\"\n \" you could potentially decide upon using.\",\n )\n token: str = ce(\n comment=\"The discord token for your bot\",\n secret=True,\n )\n\n c = Config(\n sources=[ # in order of priority\n \"tests/test-config/secrets.toml\",\n ]\n )\n\n assert \"token\" in str(c._crve_configs)\n assert c.token == \"secret token\"\n\n default_toml = (\n \"# The test configuration for configurave.\\n\"\n \"# This is an autogenerated default configuration file written by Configurave\\n\\n\"\n \"# (str): The root url configuration for the application\\n\"\n \"# root_url = \\n\"\n \"# Description: A long ass multiline description goes here about all the\\n\"\n \"# options you could potentially decide upon using.\\n\"\n \"\\n\"\n \"# (str): The discord token for your bot\\n\"\n \"# Secret: value will not be exported\\n\"\n \"token =\\n\"\n )\n assert c.defaults_toml() == default_toml", "def test_cannot_view_all_products_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def secrets():\n click.echo(STEP_PATH / \"secrets\")", "def test_aiven_creds_exist(self):\n assert os.environ[\"AIVEN_API_URL\"] is not None\n assert os.environ[\"AIVEN_TOKEN\"] is not None", "def _list_known_secret_tokens():\n global _secret_token_map\n\n keys = list(_secret_token_map.keys())\n keys.sort()\n\n ret = ''\n for key in keys:\n if ret != '':\n ret += ', '\n ret += \"'\" + key + \"'\"\n return ret", "def know_secret(self):\r\n return(self.secret != \"\") and (self.key != \"\")", "def get_secrets(session, secret_id):\n secretsmanager = session.client('secretsmanager')\n secrets = json.loads(secretsmanager.get_secret_value(SecretId=secret_id)['SecretString'])\n formatted_secrets = []\n for (key, value) in secrets.items():\n skipped_secrets = [ # cloudformation doesn't need these, so skip them\n 'TABLE_NAME',\n 'DATATRUST_KEY',\n 'JWT_SECRET_KEY',\n 'DB_URL',\n 'RPC_PATH',\n 'LOG_LEVEL',\n 'CELERY_BROKER_URL',\n 'CELERY_RESULT_BACKEND',\n 'S3_DESTINATION'\n ]\n if key not in skipped_secrets:\n formatted_secrets.append(\n {\"ParameterKey\": key, \"ParameterValue\": value}\n )\n formatted_secrets.append(\n {\"ParameterKey\": \"StackName\", \"ParameterValue\": STACK_NAME}\n )\n return formatted_secrets", "def test_get_secrets_does_not_retry_on_4xx(self, mget):\n error_data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n\n mget.side_effect = [self._mock_response(status=403, content=error_data),\n self._mock_response(status=403, content=error_data)]\n with self.assertRaises(CerberusClientException):\n self.client.get_secrets_data('fake/path')\n mget.assert_called_once_with(\n self.cerberus_url + '/v1/secret/fake/path',\n params={'versionId': 'CURRENT'},\n headers=self.client.HEADERS\n )", "def test_invalid_secrets(self):\n s = SecretsChecker(stage='dev')\n # Override the email field obtained from terraform\n s.email = ['nonsense']\n with self.assertRaises(ValueError):\n s.run()", "def test_tenant_secret_page_on_root_domain_not_be_accessible(self):\n response = self.client.get(\n self.secret_url, HTTP_HOST=self.tenant_root_domain)\n self.assertEqual(response.status_code, 403)", "def list(**kwargs):\n cluster_call(\"secret_list\", **kwargs)", "def test_access_token_empty(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n with HTTMock(spark_cloud_mock):\n token = CloudCredentials.objects._access_token()\n self.assertEqual(token, None)", "def _secrets(self, credstash):\n if credstash == \"true\":\n return True\n else:\n return False", "def test_list_application_credentials(self):\n self.create_application_credential()\n self.create_application_credential()\n\n app_creds = self._list_app_creds()\n self.assertEqual(2, len(app_creds))", "def test_list_auth(self):\n self.api_client.logout()\n resp = self.api_client.get('/api/metadata/tracks/')\n self.assertEqual(resp.status_code, 403)", "def test_unauthenticated_get(self):\n url = reverse('edit-list')\n\n response = self.client.get(url)\n self.assertEqual(403, response.status_code)\n self.assertEqual('Forbidden', response.status_text)\n self.assertTrue(\n 'credentials were not provided.' in response.data.get('detail'))", "def test_anonymous_user_read(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Unauthorized,\r\n getattr(require, 'token').read,\r\n token)", "def secrets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['SecretArgs']]]]:\n return pulumi.get(self, \"secrets\")", "def list_secrets_command(client: KeyVaultClient, args: dict[str, Any]) -> CommandResults:\n vault_name = args['vault_name']\n limit = arg_to_number(args.get('limit')) or DEFAULT_LIMIT\n offset = arg_to_number(args.get('offset')) or DEFAULT_OFFSET\n response = client.list_secrets_request(vault_name, limit, offset)\n outputs = copy.deepcopy(response)\n readable_response = []\n\n for secret in outputs:\n readable_response.append({\n 'secret_id': secret.get('id'), 'managed': secret.get('managed'),\n **convert_attributes_to_readable(secret.get('attributes', {}).copy())\n })\n secret[VAULT_NAME_CONTEXT_FIELD] = vault_name\n secret['attributes'] = convert_time_attributes_to_iso(secret['attributes'])\n\n readable_output = tableToMarkdown(\n f'{vault_name} Secrets List',\n readable_response,\n ['secret_id', 'enabled', 'create_time', 'update_time', 'expiry_time'], removeNull=True,\n headerTransform=string_to_table_header)\n\n command_results = CommandResults(\n outputs_prefix='AzureKeyVault.Secret',\n outputs_key_field='id',\n outputs=outputs,\n raw_response=response,\n readable_output=readable_output,\n ignore_auto_extract=True\n )\n\n return command_results", "def test_no_vault_secrets(mock_load, localhost_client, gen_input_config):\n mock_load.return_value = gen_input_config(vault_secrets={})\n\n localhost_client.load(\"in.json\")\n\n mock_load.assert_called_with(\"in.json\")", "def __decrypt_secrets(self, secrets):\n assert self.decrypter != None\n return [self.decrypter.decrypt(secret) for secret in secrets]", "def test_discover_tokens(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n with HTTMock(spark_cloud_mock):\n found = CloudCredentials.objects._discover_tokens(self.cloud)\n self.assertEqual(CloudCredentials.objects.count(), 1)\n self.assertEqual(CloudCredentials.objects._access_token(), ACCESS_TOKEN)", "def _get_token_secrets(self, request):\n if self.secrets is None:\n return [self.secret]\n # Secrets are looked up by hostname.\n # We need to normalize some port information for this work right.\n node_name = request.host_url\n host_url = urlparse(request.host_url)\n if host_url.scheme == \"http\" and host_url.port == 80:\n assert node_name.endswith(\":80\")\n node_name = node_name[:-3]\n elif host_url.scheme == \"http\" and host_url.port == 443:\n assert node_name.endswith(\":443\")\n node_name = node_name[:-4]\n return self.secrets.get(node_name)", "def allow_unresolved_secret_tokens(self):\n return self._allow_unresolved_secret_tokens", "def test_client_key_secret_not_provided(self):\n\n # this adds lti passports to system\n mocked_course = Mock(lti_passports=['test_id:test_client:test_secret'])\n modulestore = Mock()\n modulestore.get_course.return_value = mocked_course\n runtime = Mock(modulestore=modulestore)\n self.xmodule.runtime = runtime\n # set another lti_id\n self.xmodule.lti_id = \"another_lti_id\"\n key_secret = self.xmodule.get_client_key_secret()\n expected = ('', '')\n assert expected == key_secret", "def testAuthorizationMultipleClientSecret(self):\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'client_id': self._VALID_CLIENT.id,\n 'client_secret': [self._VALID_CLIENT.secret] * 2,\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, MultipleParameterError('client_secret'),\n msg='Expected the token resource to reject a request with multiple client secrets.')", "def test_need_login_to_see_usagelist(self):\n response = self.client.get(reverse('api_v1:usage-list'), follow=True)\n self.assertEqual(response.status_code, 403)", "async def admin_secret(self, ctx: commands.Context, *token: str):\n the_token = await self.config.secret()\n token = ' '.join(token)\n if not token:\n await ctx.author.send(f'Team management secret: {the_token}')\n else:\n await self.config.secret.set(token)\n message = [display(ctx.author),\n f'set the team management secret to {token}.']\n if the_token:\n message.append(f'(was `{the_token}`)')\n await self.admin_msg(' '.join(message))", "async def test_existing_token_missing_scope(\n hass: HomeAssistant,\n token_scopes: list[str],\n component_setup: ComponentSetup,\n config_entry: MockConfigEntry,\n) -> None:\n await component_setup()\n\n entries = hass.config_entries.async_entries(DOMAIN)\n assert len(entries) == 1\n assert entries[0].state is ConfigEntryState.SETUP_ERROR\n\n flows = hass.config_entries.flow.async_progress()\n assert len(flows) == 1\n assert flows[0][\"step_id\"] == \"reauth_confirm\"", "def apply_secrets():\n for name, value in Secrets.__dict__.items():\n if name[0] != '_':\n os.environ[name] = value", "def test_no_token_get_all(self):\n response = self.app.get('/api/v3/users')\n self.assertEqual(response.status_code, 401)", "def test_run_cmd_simple_negative_with_secrets(caplog):\n caplog.set_level(logging.DEBUG)\n secrets = [\"8bca8d2e-1cd6\", \"683c08d7-bc07\"]\n cmd = \"ls /tmp/this/file/683c08d7-bc07/isnotthere\"\n with pytest.raises(CommandFailed) as excinfo:\n utils.run_cmd(cmd, secrets=secrets)\n assert \"No such file or directory\" in str(excinfo.value)\n # check that exception was sanitized\n for secret in secrets:\n assert secret not in str(excinfo.value)\n # check that logs were satinized as well\n for secret in secrets:\n assert secret not in caplog.text", "def test_get_secrets_retry_on_5xx(self, mget):\n error_data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n secret_data = json.dumps({\n \"data\": {\n \"sushi\": \"ikenohana\",\n \"ramen\": \"yuzu\"\n }\n })\n\n mget.side_effect = [self._mock_response(status=500, content=error_data),\n self._mock_response(status=502, content=error_data),\n self._mock_response(status=200, content=secret_data)]\n self.client.get_secrets_data('fake/path')", "def test_list_o_auth_authorize_token(self):\n pass", "def test_client_key_secret_not_provided(self):\r\n\r\n #this adds lti passports to system\r\n mocked_course = Mock(lti_passports = ['test_id:test_client:test_secret'])\r\n modulestore = Mock()\r\n modulestore.get_course.return_value = mocked_course\r\n runtime = Mock(modulestore=modulestore)\r\n self.xmodule.descriptor.runtime = runtime\r\n #set another lti_id\r\n self.xmodule.lti_id = \"another_lti_id\"\r\n key_secret = self.xmodule.get_client_key_secret()\r\n expected = ('','')\r\n self.assertEqual(expected, key_secret)", "def test_get_yggdrasil_vaults(self):\n pass", "def _delete_all_secrets(self):\n for secret_ref in self.created_entities['secret']:\n self.barbicanclient.secrets.delete(secret_ref, True)", "def test_read_o_auth_access_token(self):\n pass", "def test_creds_not_found():\n assert_equal(find_credentials({'foo': 'bar'}), (None, None))", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def test_list_api_client_auth_invalid_credentials(self):\n api_client = APIClient.objects.create(\n name='test', accesskey='a' * 32, secretkey='s' * 32)\n\n headers = {'secretkey': 'INVALID-SECRETKEY'}\n response = self.client.get(\n '/api/products/?accesskey={}'.format('INVALID-ACCESSKEY'), **headers)\n\n expected = {'detail': 'Invalid APIClient credentials'}\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json(), expected)", "def sanity_check(secret):\n matches = list()\n for cur_file in CASSETTE_PATH.rglob(\"*.yaml\"):\n if secret in cur_file.read_text():\n matches.append(cur_file)\n if matches:\n click.secho(\"Found {0} cassettes that still mention auth token:\".format(len(matches)), fg=\"red\")\n for cur_match in matches:\n click.secho(\"\\t{0}\".format(cur_match.name), fg=\"red\")\n return False\n click.secho(\"Cassettes look clean - no mentions of auth tokens!\", fg=\"green\")\n return True", "def list_secrets_request(self, vault_name: str, limit: int, offset: int) -> list[dict]:\n url = f'https://{vault_name}{self.azure_cloud.suffixes.keyvault_dns}/secrets'\n response = self.http_request(\n 'GET', full_url=url, resource=self.get_vault_resource())\n\n return self.get_entities_independent_of_pages(response, limit, offset, self.get_vault_resource())", "def test_001_unauthorized_access(self):\n false_token = \"12345\"\n self.info(\"Will use token %s\", false_token)\n client = ComputeClient(self.clients.compute_url, false_token)\n client.CONNECTION_RETRY_LIMIT = self.clients.retry\n\n with self.assertRaises(ClientError) as cl_error:\n client.list_servers()\n self.assertEqual(cl_error.exception.status, 401)", "def test_get_all_tokens_anonymous_user(self):\r\n\r\n # Anonymoues users should be unauthorized, no matter which kind of token are requesting\r\n res = self.app.get('/api/token')\r\n err = json.loads(res.data)\r\n\r\n assert res.status_code == 401, err\r\n assert err['status'] == 'failed', err\r\n assert err['status_code'] == 401, err\r\n assert err['exception_cls'] == 'Unauthorized', err\r\n assert err['target'] == 'token', err", "def test_access_token_all_expired(self):\n exp = self.factory.create(access_token='expired', expires_at=self.expired_dt)\n with HTTMock(spark_cloud_mock):\n token = CloudCredentials.objects._access_token()\n self.assertEqual(token, None)\n exp.delete()", "def tokens():\n return ['access token', 'refresh token']", "def test_tenant_secret_page_on_marketing_domain_not_be_accessible(self):\n response = self.client.get(\n self.secret_url, HTTP_HOST=\"landingpage.com\")\n self.assertEqual(response.status_code, 403)", "def test_mask_secret_nomatch():\n secrets = [\n \"8bca8d2e-1cd6-4ec0-8e55-9614aa01cf88\",\n \"683c08d7-bc07-4d72-b098-46ef00b74aec\",\n ]\n assert utils.mask_secrets(\"ls -lh /tmp\", secrets) == \"ls -lh /tmp\"", "def test_list_not_authenticated(self):\n response = self.client.get('/api/products/')\n expected = {'detail': 'Authentication credentials were not provided.'}\n self.assertEqual(response.status_code, 401)\n self.assertEqual(response.json(), expected)", "def test_cannot_view_a_product_with_blacklisted_token(self):\n resp = self.admin_register()\n reply = self.admin_login()\n token = reply['token']\n product = dict(\n prod_name='NY_denims',\n category='denims',\n stock=20,\n price=150\n )\n resp = self.client.post(\n '/api/v1/products',\n content_type='application/json',\n data=json.dumps(product),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Product successfully added to Inventory!')\n self.assertEqual(resp.status_code, 201)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/products/1',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_get_secrets_invalid_path(self, mget):\n data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n mget.return_value = self._mock_response(status=401, content=data)\n with self.assertRaises(CerberusClientException):\n self.client.get_secrets('this/path/does/not/exist')", "def test__parse_access_token():\n for input_data, expected_output in (\n ({'access_token': ''}, ''),\n ({'access_token': 'a'}, 'a'),\n ):\n output = parse_access_token(input_data)\n vampytest.assert_eq(output, expected_output)", "def test_cannot_get_all_sale_records_with_blacklisted_token(self):\n reply = self.admin_add_product()\n\n resp = self.admin_create_user()\n reply = self.attendant_login()\n token = reply['token']\n sale = dict(products = [\n {\n \"prod_name\":\"NY_denims\", \n \"quantity\":10\n }\n\t ])\n resp = self.client.post(\n '/api/v1/sales',\n content_type='application/json',\n data=json.dumps(sale),\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Sale record created')\n self.assertEqual(resp.status_code, 200)\n \n reply = self.admin_login()\n token = reply['token']\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.get(\n '/api/v1/sales',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n \n self.assertEqual(reply['message'], 'Invalid Authentication, Please Login!')\n self.assertEqual(resp.status_code, 401)", "def test_credentials(self):\n twine = Twine(source=self.VALID_CREDENTIALS_TWINE)\n with mock.patch.dict(\n os.environ,\n {\"SECRET_THE_FIRST\": \"a value\", \"SECRET_THE_SECOND\": \"another value\", \"SECRET_THE_THIRD\": \"value\"},\n ):\n twine.validate_credentials()\n self.assertEqual(os.environ[\"SECRET_THE_THIRD\"], \"value\")", "def _find_secrets(cls, *, pattern, secretsmanager_client):\n results = []\n for secret in cls._all_secrets(secretsmanager_client=secretsmanager_client):\n if re.search(pattern, secret['Name']):\n results.append(secret)\n return results", "def test_api_video_read_list_token_user(self):\n video = factories.VideoFactory()\n jwt_token = AccessToken()\n jwt_token.payload[\"resource_id\"] = str(video.id)\n jwt_token.payload[\"roles\"] = [random.choice([\"instructor\", \"administrator\"])]\n\n response = self.client.get(\n \"/api/videos/\", HTTP_AUTHORIZATION=f\"Bearer {jwt_token}\"\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(\n response.json(), {\"count\": 0, \"next\": None, \"previous\": None, \"results\": []}\n )", "def inject_secrets(self, secrets: str) -> None:\n self.config.read(secrets)", "def test_get_secrets_retry_stop_after_limit(self, mget):\n error_data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n secret_data = json.dumps({\n \"data\": {\n \"sushi\": \"ikenohana\",\n \"ramen\": \"yuzu\"\n }\n })\n\n mget.side_effect = [self._mock_response(status=500, content=error_data),\n self._mock_response(status=502, content=error_data),\n self._mock_response(status=500, content=error_data),\n self._mock_response(status=200, content=secret_data)]\n with self.assertRaises(CerberusClientException):\n self.client.get_secrets_data('fake/path')", "def get_db_secrets():\n secret_response = secrets_client.get_secret_value(SecretId=db_secret_name)\n secrets = json.loads(secret_response['SecretString'])\n return secrets", "def test_get_all_tokens_authenticated_user(self):\r\n\r\n user = UserFactory.create_batch(2)[1]\r\n user.info = create_tokens_for(user)\r\n\r\n res = self.app.get('api/token?api_key=' + user.api_key)\r\n data = json.loads(res.data)\r\n\r\n for provider in TokenAPI.oauth_providers:\r\n token_name = '%s_token' % provider\r\n assert data.get(token_name) is not None, data", "def test_cannot_logout_with_blacklisted_token(self):\n reply = self.admin_register()\n user = dict(\n username='jonnie',\n password='Andela8'\n )\n resp = self.client.post(\n '/api/v1/login',\n content_type='application/json',\n data=json.dumps(user)\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'Login sucessful!')\n self.assertTrue(reply['token'])\n self.assertEqual(resp.status_code, 200)\n\n token = reply['token']\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are successfully logged out!')\n self.assertEqual(resp.status_code, 200)\n\n resp = self.client.delete(\n '/api/v1/logout',\n headers={'Authorization': 'Bearer {}'.format(token)}\n )\n reply = json.loads(resp.data.decode())\n self.assertEqual(reply['message'], 'You are already logged out!')\n self.assertEqual(resp.status_code, 404)", "def read_namespaced_secret_list_secrets(self, namespace, name, **kwargs):\n\n all_params = ['namespace', 'name', 'pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method read_namespaced_secret_list_secrets\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'namespace' is set\n if ('namespace' not in params) or (params['namespace'] is None):\n raise ValueError(\"Missing the required parameter `namespace` when calling `read_namespaced_secret_list_secrets`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `read_namespaced_secret_list_secrets`\")\n\n resource_path = '/oapi/v1/namespaces/{namespace}/imagestreams/{name}/secrets'.replace('{format}', 'json')\n path_params = {}\n if 'namespace' in params:\n path_params['namespace'] = params['namespace']\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1SecretList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def test_authorization_multiple_accesses(self, mock_init, mock_get_token):\n creds = credentials.Credentials('file')\n # On real init we would have had access_token set to None\n creds.access_token = None\n\n auth = creds.authorization\n mock_get_token.reset_mock()\n # Second access to authorization property shouldn't call\n # get_access_token\n auth2 = creds.authorization\n self.assertEqual('Bearer access_token1', auth2)\n self.assertEqual(auth, auth2)\n self.assertFalse(mock_get_token.called)", "def test_get_secret_from_vault(key, environment, stage, namespace, table, nkey,\n boto3_resource, boto3_client, monkeypatch):\n # Call to the DynamoDB client to retrieve the encrypted secret\n monkeypatch.setattr(\"boto3.resource\", boto3_resource)\n monkeypatch.setattr(\"boto3.client\", boto3_client)\n secret = lambdautils.utils.get_secret(key,\n namespace=namespace,\n environment=environment,\n stage=stage)\n assert secret == \"dummy\"\n boto3_client(\"dynamodb\").get_item.assert_called_with(\n TableName=table,\n Key={\"id\": {\"S\": nkey}})\n\n # Call to the KMS client to decrypt the secret\n boto3_client('kms').decrypt.assert_called_with(CiphertextBlob=\"encrypted\")", "def test_get_secret_3(self):\n\n text_subject = \"Important Message\"\n text_body = \"\"\"\n This is body of plain text message of some email\n \"\"\"\n self.assertIsNone(\n # no secret in the text\n get_secret([text_subject, text_body])\n )", "def test_get_request_on_bucketlist_resource(self):\n\n response = self.client.get(\"/bucketlists/\")\n self.assertEqual(response.status_code, 401)", "def test_mask_secret_nosecrets():\n assert utils.mask_secrets(\"ls -lh /tmp\", None) == \"ls -lh /tmp\"", "def test_get_all_accessible_by_hash_list_as_anonymous_with_access_right_does_not_return_user_template(\n self,\n ):\n mock_request = create_mock_request(user=self.anonymous_user)\n templates = template_api.get_all_accessible_by_hash_list(\n [self.fixture.user1_template.hash], request=mock_request\n )\n self.assertTrue(templates.count() == 0)", "def test_authenticated_user_update(self):\r\n with self.flask_app.test_request_context('/'):\r\n for token in self.auth_providers:\r\n assert_raises(Forbidden,\r\n getattr(require, 'token').update,\r\n token)", "def test_access_token_setting(self):\n client = Client()\n assert not client.is_access_token_set()\n client.set_client_access_token(\"FAKE-TOKEN\")\n assert client.is_access_token_set()", "def test_get_all_existing_tokens_authenticated_user(self):\r\n\r\n user = UserFactory.create_batch(2)[1]\r\n user.info = create_tokens_for(user)\r\n del user.info['google_token']\r\n\r\n res = self.app.get('api/token?api_key=' + user.api_key)\r\n data = json.loads(res.data)\r\n\r\n assert data.get('twitter_token') is not None, data\r\n assert data.get('facebook_token') is not None, data\r\n assert data.get('google_token') is None, data", "def testGetToken(self):\n # Token is base64 for a json object so always starts with '{\"'\n self.assertTrue(self.dl_object._access_token.startswith('eyJ'))\n self.assertTrue(len(self.dl_object._access_token) > 100)", "def check_if_token_in_blacklist(decrypted_token):\n return (\n decrypted_token[\"jti\"] in BLACKLIST\n ) # if True, go to revoked_token_callback", "def get_secrets(request):\n secret_keys = (\n 'neptune_sql_credentials',\n 'triton_sql_credentials',\n 'saturn_sql_credentials',\n 'qualtrics_credentials',\n 'rserve_service_account_credentials',\n )\n secrets = {s: json.loads(SecretValue.get(s, 'null'))\n for s in secret_keys}\n\n # Add the mandrill api key, which isn't a JSON string.\n if request.get('send_email', None) != 'false':\n secrets['mandrill_api_key'] = SecretValue.get(\n 'mandrill_api_key', '')\n\n return secrets" ]
[ "0.71583724", "0.697432", "0.6764023", "0.6734302", "0.657616", "0.6429628", "0.63544613", "0.6260767", "0.6180775", "0.61721295", "0.61655974", "0.6159696", "0.6127158", "0.6099549", "0.60978544", "0.60681677", "0.60320014", "0.60044026", "0.59975713", "0.59924906", "0.5940487", "0.59056437", "0.5865818", "0.58229667", "0.5821601", "0.57871646", "0.5784048", "0.5770556", "0.57658297", "0.5753944", "0.5732295", "0.5730329", "0.57186913", "0.5715738", "0.57096624", "0.5691288", "0.56798583", "0.5668266", "0.56679326", "0.5656549", "0.56321025", "0.56184304", "0.5603674", "0.56036335", "0.55972767", "0.5587615", "0.5580879", "0.5571337", "0.5532255", "0.5509228", "0.55000865", "0.54998547", "0.5481851", "0.5468794", "0.54609025", "0.54576105", "0.54562163", "0.54552114", "0.54517275", "0.5451073", "0.5437259", "0.5433821", "0.543214", "0.5424843", "0.5405506", "0.54016954", "0.5399893", "0.5391104", "0.5390327", "0.5386687", "0.53848326", "0.536994", "0.53682375", "0.5359191", "0.5352296", "0.53509617", "0.5349142", "0.5345157", "0.5344645", "0.5342789", "0.53382564", "0.5336023", "0.5331889", "0.5326983", "0.5325415", "0.53146434", "0.531326", "0.530817", "0.5304614", "0.5304169", "0.52939624", "0.5292068", "0.52919555", "0.5291525", "0.52856475", "0.5281107", "0.52808", "0.52795327", "0.52771026", "0.5275088" ]
0.77104926
0
Test list secrets successfull.
def test_secrets_list_ok(): status_code = 200 response = [{"name": "password", "type": "env"}] env = {"REANA_SERVER_URL": "localhost"} mock_http_response, mock_response = Mock(), Mock() mock_http_response.status_code = status_code mock_response = response reana_token = "000000" runner = CliRunner(env=env) with runner.isolation(): with patch( "reana_client.api.client.current_rs_api_client", make_mock_api_client("reana-server")(mock_response, mock_http_response), ): result = runner.invoke(cli, ["secrets-list", "-t", reana_token]) assert result.exit_code == 0 assert "password" in result.output assert "env" in result.output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def list_secrets(self):\n pass", "def test_read_namespaced_secret_list_secrets(self):\n pass", "def test_secrets_list_server_no_token():\n message = \"Please provide your access token\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n runner = CliRunner(env=env)\n result = runner.invoke(cli, [\"secrets-list\"])\n assert result.exit_code == 1\n assert message in result.output", "def test_check_secrets(self):\n secrets.check_secrets([], argparse.Namespace())", "def test_secrets_list_server_not_reachable():\n message = \"REANA client is not connected to any REANA cluster.\"\n reana_token = \"000000\"\n runner = CliRunner()\n result = runner.invoke(cli, [\"secrets-list\", \"-t\", reana_token])\n assert result.exit_code == 1\n assert message in result.output", "def list(**kwargs):\n cluster_call(\"secret_list\", **kwargs)", "def secrets():\n click.echo(STEP_PATH / \"secrets\")", "def secrets(self): # pylint: disable=no-self-use\n return []", "def test_vault_get_all_vault_items(self):\n pass", "def test_get_secret_4(self):\n self.assertIsNone(\n get_secret(\"plain text, no secrets here\")\n )", "def list_secrets_command(client: KeyVaultClient, args: dict[str, Any]) -> CommandResults:\n vault_name = args['vault_name']\n limit = arg_to_number(args.get('limit')) or DEFAULT_LIMIT\n offset = arg_to_number(args.get('offset')) or DEFAULT_OFFSET\n response = client.list_secrets_request(vault_name, limit, offset)\n outputs = copy.deepcopy(response)\n readable_response = []\n\n for secret in outputs:\n readable_response.append({\n 'secret_id': secret.get('id'), 'managed': secret.get('managed'),\n **convert_attributes_to_readable(secret.get('attributes', {}).copy())\n })\n secret[VAULT_NAME_CONTEXT_FIELD] = vault_name\n secret['attributes'] = convert_time_attributes_to_iso(secret['attributes'])\n\n readable_output = tableToMarkdown(\n f'{vault_name} Secrets List',\n readable_response,\n ['secret_id', 'enabled', 'create_time', 'update_time', 'expiry_time'], removeNull=True,\n headerTransform=string_to_table_header)\n\n command_results = CommandResults(\n outputs_prefix='AzureKeyVault.Secret',\n outputs_key_field='id',\n outputs=outputs,\n raw_response=response,\n readable_output=readable_output,\n ignore_auto_extract=True\n )\n\n return command_results", "def list_secrets(self, MaxResults: int = None, NextToken: str = None) -> Dict:\n pass", "def _all_secrets(cls, *, secretsmanager_client):\n return secretsmanager_client.list_secrets()['SecretList']", "def test_display_all_credentials(self):\n\n\n self.assertEqual(Credential.display_credentials(),Credential.credential_list)", "def list_command(env: Optional[str], config: str) -> None:\n layer = Layer.load_from_yaml(config, env)\n amplitude_client.send_event(amplitude_client.LIST_SECRETS_EVENT)\n gen_all(layer)\n _raise_if_no_k8s_cluster_exists(layer)\n\n configure_kubectl(layer)\n load_kube_config()\n v1 = CoreV1Api()\n api_response = v1.read_namespaced_secret(\"secret\", layer.name)\n if api_response.data is None:\n print(\n \"No secrets found, you can make some by adding them in you opta file k8s service\"\n )\n return\n for key in api_response.data:\n print(key)", "def test_get_asgard_vaults(self):\n pass", "def test_get_list(self):\n pass", "def test_get_yggdrasil_vaults(self):\n pass", "def test_list(self):\n pass", "def test_list(self):\n pass", "def test_display_all_credentials(self):\n\n self.assertEqual(Credentials.display_credentials(), Credentials.credentials_list)", "def test_list_keys(self):\n with patch('iceit.crypto.gnupg.GPG') as mock_gpg:\n mock_gpg.return_value = mock_gpg\n encryptor = self.test_init()\n encryptor.list_secret_keys()\n\n mock_gpg.list_keys.assert_called_once_with(True)", "def test_wallets_get_list(self):\n pass", "def test_find_secret_locations():\n list_of_random = [(random.Random(), random.Random()), (random.Random(), random.Random()), (random.Random(),\n random.Random()),\n (random.Random(), random.Random()), (random.Random(), random.Random()),\n (random.Random(), random.Random()), (random.Random(), random.Random())]\n list_of_truth = [(-11, -3), (20, -13), (1, -3), (-2, -5), (10, 4), (6, -13), (2, -6)]\n secrets = s.find_secret_locations()\n assert type(secrets) == list\n for x in secrets:\n assert type(x) == tuple\n assert secrets == list_of_truth\n assert list_of_random != secrets\n assert len(list_of_random) == len(secrets)", "def list_vault_secrets(schedule_id):\n from mist.api.poller.models import ListVaultSecretsPollingSchedule\n sched = ListVaultSecretsPollingSchedule.objects.get(id=schedule_id)\n sched.owner.secrets_ctl.list_secrets(recursive=True)", "def test_list(self):\n responses.add(\n responses.Response(\n method='GET',\n url='https://connection.keboola.com/v2/storage/buckets',\n json=list_response\n )\n )\n buckets_list = self.buckets.list()\n assert isinstance(buckets_list, list)", "def test_get_secret_5(self):\n\n # notice space between SECRET and immediately following\n # curly bracket.\n self.assertIsNone(\n get_secret(\"SECRET { ...}\")\n )\n\n # typo in keyword SECRET\n self.assertIsNone(\n get_secret(\"SECRIT { ...}\")\n )\n\n # missing closing bracket\n self.assertIsNone(\n get_secret(\"SECRET { ...\")\n )\n\n # curly brackets missing\n self.assertIsNone(\n get_secret(\"SECRET ...\")\n )", "def test_create_seed_secrets(self):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n name = 'job-type-post-test-secret'\n manifest['job']['name'] = name\n manifest['job']['interface']['settings'] = [\n {\n 'name': 'VERSION',\n 'secret': True\n },\n {\n 'name': 'DB_HOST',\n 'secret': True\n },\n {\n 'name': 'DB_PASS',\n 'secret': True\n }\n ]\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': False,\n 'max_scheduled': 1,\n 'docker_image': 'my-job-1.0.0-seed:1.0.0',\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n with patch.object(SecretsHandler, '__init__', return_value=None), \\\n patch.object(SecretsHandler, 'set_job_type_secrets', return_value=None) as mock_set_secret:\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n job_type = JobType.objects.filter(name=name).first()\n\n results = json.loads(response.content)\n self.assertEqual(results['id'], job_type.id)\n\n # Secrets sent to Vault\n secrets_name = '-'.join([results['name'], results['version']]).replace('.', '_')\n secrets = json_data['configuration']['settings']\n mock_set_secret.assert_called_once_with(secrets_name, secrets)\n\n #Secrets scrubbed from configuration on return\n self.assertEqual(results['configuration']['settings'], {})", "def test_display_credentials(self):\n\n self.assertEqual(Credentials.display_credentials(),Credentials.credential_list)", "def test_display_all_credential(self):\n self.assertEqual(Credential.display_credential(),Credential.credential_list)", "def test_get_vault_pubkeys(self):\n pass", "def test_secret():\r\n try:\r\n straxen.get_secret('somethingnonexistent')\r\n except ValueError:\r\n # Good we got some message we cannot load something that does\r\n # not exist,\r\n pass", "def test_store_multiple_cred(self):\n self.new_cred.save_cred()\n test_cred = Credentials('stackoverflow','Lugaga', 'golfalpharomeo')\n test_cred.save_cred()\n self.assertEqual(len(Credentials.cred_list), 2)", "def _delete_all_secrets(self):\n for secret_ref in self.created_entities['secret']:\n self.barbicanclient.secrets.delete(secret_ref, True)", "def test_list_application_credentials(self):\n self.create_application_credential()\n self.create_application_credential()\n\n app_creds = self._list_app_creds()\n self.assertEqual(2, len(app_creds))", "def _list_known_secret_tokens():\n global _secret_token_map\n\n keys = list(_secret_token_map.keys())\n keys.sort()\n\n ret = ''\n for key in keys:\n if ret != '':\n ret += ', '\n ret += \"'\" + key + \"'\"\n return ret", "def test_list(self):\n response = self.app.get(self.url('tags.list'))\n # Test response...", "def test_display_cred(self):\n self.assertEqual(Credentials.display_cred(), Credentials.cred_list)", "def test_listCheckers(self):\n expected = [credentials.IUsernamePassword, credentials.IUsernameHashedPassword]\n got = self.portal.listCredentialsInterfaces()\n self.assertEqual(sorted(got), sorted(expected))", "def get_db_secrets():\n secret_response = secrets_client.get_secret_value(SecretId=db_secret_name)\n secrets = json.loads(secret_response['SecretString'])\n return secrets", "def test_list_auth(self):\n self.api_client.logout()\n resp = self.api_client.get('/api/metadata/tracks/')\n self.assertEqual(resp.status_code, 403)", "def getsecret(path=None):\n\n if path:\n print(vault.read(path))\n\n else:\n for section in vault.list():\n for key in vault.list(section):\n print(f'{section}{key}')", "def test_show_private_lists_valid(self):\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.user1.id\n \n res = c.get(\"/users/tester1/private-lists\")\n\n self.assertEqual(res.status_code, 200)", "def test_user_can_get_list_of_buckets(self):\n with self.client:\n response = self.client.get(\n '/bucketlists/',\n headers=dict(Authorization='Bearer ' + self.get_user_token())\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertIsInstance(data['buckets'], list)\n self.assertEqual(len(data['buckets']), 0)\n self.assertEqual(data['count'], 0)\n self.assertIsInstance(data['count'], int)\n self.assertEqual(data['previous'], None)\n self.assertEqual(data['next'], None)", "def test_secret():\n # TODO: split this up and write better tests\n\n @make_config()\n class Config:\n \"\"\"The test configuration for configurave.\"\"\"\n\n root_url: str = ce(\n comment=\"The root url configuration for the application\",\n description=\"A long ass multiline description goes here about all the options\"\n \" you could potentially decide upon using.\",\n )\n token: str = ce(\n comment=\"The discord token for your bot\",\n secret=True,\n )\n\n c = Config(\n sources=[ # in order of priority\n \"tests/test-config/secrets.toml\",\n ]\n )\n\n assert \"token\" in str(c._crve_configs)\n assert c.token == \"secret token\"\n\n default_toml = (\n \"# The test configuration for configurave.\\n\"\n \"# This is an autogenerated default configuration file written by Configurave\\n\\n\"\n \"# (str): The root url configuration for the application\\n\"\n \"# root_url = \\n\"\n \"# Description: A long ass multiline description goes here about all the\\n\"\n \"# options you could potentially decide upon using.\\n\"\n \"\\n\"\n \"# (str): The discord token for your bot\\n\"\n \"# Secret: value will not be exported\\n\"\n \"token =\\n\"\n )\n assert c.defaults_toml() == default_toml", "def test_get_secret_from_vault(key, environment, stage, namespace, table, nkey,\n boto3_resource, boto3_client, monkeypatch):\n # Call to the DynamoDB client to retrieve the encrypted secret\n monkeypatch.setattr(\"boto3.resource\", boto3_resource)\n monkeypatch.setattr(\"boto3.client\", boto3_client)\n secret = lambdautils.utils.get_secret(key,\n namespace=namespace,\n environment=environment,\n stage=stage)\n assert secret == \"dummy\"\n boto3_client(\"dynamodb\").get_item.assert_called_with(\n TableName=table,\n Key={\"id\": {\"S\": nkey}})\n\n # Call to the KMS client to decrypt the secret\n boto3_client('kms').decrypt.assert_called_with(CiphertextBlob=\"encrypted\")", "def test_client_verification_list(self):\n pass", "def test_list_all_bucektlists_for_authenticated_user(self):\n\n response = self.client.get(\n \"/bucketlists/\",\n headers={'Authorization': self.user_token}\n )\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.data, '[]\\n')", "def secretstore():\n pass", "def test_get_value_list_result(self):\n test_data = []\n test_data.append(json.loads('{\"name\": \"Pat\"}'))\n test_data.append(json.loads('{\"last_name\": \"Nat\"}'))\n test_data.append(json.loads('{\"name\": \"Gwen\"}'))\n\n key = \"name\"\n result_list = get_value_list(test_data, key)\n self.assertTrue(len(result_list) == 2)", "def test_get_list(self):\n result = self.runner.invoke(\n cli,\n [\n *CLI_LOG_OPTION,\n \"config\",\n \"get\",\n \"vendor.fetchai.connections.p2p_libp2p.config.entry_peers\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0\n assert result.output == \"[]\\n\"", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def test_get_bucketlist_items(self):\r\n email = \"test@test.com\"\r\n _pword = \"test\"\r\n user = User.query.filter_by(email=email).first()\r\n bucketlist = BucketList.query.filter_by(user_id=user.id, id=1).first()\r\n items_no = len(bucketlist.bucketlist_items)\r\n headers = self.authentication_headers(email=email, password=_pword)\r\n response = self.client.get(\r\n '/api/v1/bucketlist/1/items/',\r\n content_type=\"application/json\",\r\n headers=headers,\r\n follow_redirects=True\r\n )\r\n result = json.loads(response.data.decode('utf-8'))\r\n self.assertEqual(len(result), items_no)", "def test_api_get_all_bucketlists(self):\n res = self.client().post('/bucketlist', data=self.bucketlist)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/bucketlist')\n self.assertEqual(res.status_code, 200)\n self.assertIn('Go to vacation', str(res.data))", "def test_list_o_auth_client_authorization(self):\n pass", "def secrets(self):\n return self._secrets", "def test_list(self):\n self.userbase('create', 'alice', 'localhost', SECRET)\n self.userbase('create', 'bob', 'localhost', SECRET)\n output = self.userbase('list')\n self.assertEqual(output, ['alice@localhost', 'bob@localhost'])", "def _secrets(self, credstash):\n if credstash == \"true\":\n return True\n else:\n return False", "def test_vault_get_all_vault_sections(self):\n pass", "def tearDown(self):\n Credential.credential_list = []", "def test_run_cmd_simple_positive_with_secrets(caplog):\n caplog.set_level(logging.DEBUG)\n secrets = [\"8bca8d2e-1cd6\", \"683c08d7-bc07\"]\n cmd = \"echo -n hello 8bca8d2e-1cd6\"\n assert utils.run_cmd(cmd, secrets=secrets) == \"hello *****\"\n # check that logs were satinized as well\n for secret in secrets:\n assert secret not in caplog.text", "def test_get_request_on_bucketlist_resource(self):\n\n response = self.client.get(\"/bucketlists/\")\n self.assertEqual(response.status_code, 401)", "def test_get_startup_list(self):\n result = self.param_dict.get_startup_list()\n self.assertTrue(isinstance(result, list))\n self.assertEquals(len(result), 2)\n self.assert_(\"foo\" in result)\n self.assert_(\"bar\" in result)", "def test_csc_authorization_request_list_authlist_user(self):\n # Arrange:\n self.client.credentials(\n HTTP_AUTHORIZATION=\"Token \" + self.token_user_authlist.key\n )\n\n # Act:\n url = reverse(\"authlistrequest-list\")\n response = self.client.get(url, format=\"json\")\n\n # Assert\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.data), 3)", "def test_list_o_auth_client(self):\n pass", "async def test_list(self, tmp_path):\n store = FilesystemStorageProvider(tmp_path)\n await store.store(\"example.com\", \"CERT\\n\", \"KEY\\n\")\n await store.store(\"step-3-profit.biz\", \"CERT\\n\", \"KEY\\n\")\n\n assert set(await store.list()) == {\"example.com\", \"step-3-profit.biz\"}", "def testGetRandomList():\n for n in range(1, 10):\n print(\"n: \", n, \" List:\", getRandomList(n))", "def test_kyc_get_legal_list(self):\n pass", "def _secret_not_in_order():\n pecan.abort(400, u._(\"Secret metadata expected but not received.\"))", "def test_get_secrets_does_not_retry_on_200(self, mget):\n error_data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n secret_data = json.dumps({\n \"data\": {\n \"sushi\": \"ikenohana\",\n \"ramen\": \"yuzu\"\n }\n })\n\n mget.side_effect = [self._mock_response(status=200, content=secret_data),\n self._mock_response(status=500, content=error_data)]\n self.client.get_secrets_data('fake/path')", "def test_list_o_auth_access_token(self):\n pass", "def test_get_server_list(self):\n self.assertEqual(sorted(self.checkredis.get_server_list(\"php\",\"qa\")), sorted(['aw1-php70-qa', 'aw1-php80-qa']))", "def test_secret(self, env: yaenv.Env):\n assert env.secret() == 'notsosecret'\n assert 'NEW_SECRET_KEY' not in env\n _secret = env.secret('NEW_SECRET_KEY')\n assert _secret is not None\n assert _secret != env.secret('NEW_SECRET_KEY2')\n del env['NEW_SECRET_KEY'], env['NEW_SECRET_KEY2']", "async def test_list_user(hass: HomeAssistant, provider, capsys) -> None:\n data = provider.data\n data.add_auth(\"test-user\", \"test-pass\")\n data.add_auth(\"second-user\", \"second-pass\")\n\n await script_auth.list_users(hass, provider, None)\n\n captured = capsys.readouterr()\n\n assert captured.out == \"\\n\".join(\n [\"test-user\", \"second-user\", \"\", \"Total users: 2\", \"\"]\n )", "def test_list(self, env: yaenv.Env):\n _val = env.list('LIST_VAR', separator=':')\n _expect = ['item1', 'item2']\n assert _val == _expect and type(_val) == list\n _expect.append('item3')\n _val = env.list('MISSING', _expect)\n assert _val == _expect and type(_val) == list\n assert env.list('MISSING') is None", "def test_get_direct_access_list(self):\n result = self.param_dict.get_direct_access_list()\n self.assertTrue(isinstance(result, list))\n self.assertEquals(len(result), 2)\n self.assert_(\"foo\" in result)\n self.assert_(\"baz\" in result)", "def test_get_token_supply_all_using_get(self):\n pass", "def test_get_sdb_keys(self, mock_get):\n list_data = {\n \"lease_id\": \"\",\n \"renewable\": False,\n \"lease_duration\": 0,\n \"data\": {\"keys\": [\"magic\", \"princess\"]},\n \"wrap_info\": None,\n \"warnings\": None,\n \"auth\": None\n }\n\n mock_resp = self._mock_response(content=json.dumps(list_data))\n mock_get.return_value = mock_resp\n\n keys = self.client.get_sdb_keys('fake/path')\n\n assert_equals(keys[0], 'magic')\n assert_equals(keys[1], 'princess')\n assert_in('X-Cerberus-Client', self.client.HEADERS)\n mock_get.assert_called_with(\n self.cerberus_url + '/v1/secret/fake/path/?list=true',\n headers=self.client.HEADERS\n )", "def test_get_secret_3(self):\n\n text_subject = \"Important Message\"\n text_body = \"\"\"\n This is body of plain text message of some email\n \"\"\"\n self.assertIsNone(\n # no secret in the text\n get_secret([text_subject, text_body])\n )", "def test_client_key_secret(self):\n #this adds lti passports to system\n mocked_course = Mock(lti_passports=['lti_id:test_client:test_secret'])\n modulestore = Mock()\n modulestore.get_course.return_value = mocked_course\n runtime = Mock(modulestore=modulestore)\n self.xmodule.runtime = runtime\n self.xmodule.lti_id = \"lti_id\"\n key, secret = self.xmodule.get_client_key_secret()\n expected = ('test_client', 'test_secret')\n assert expected == (key, secret)", "def list_secrets_request(self, vault_name: str, limit: int, offset: int) -> list[dict]:\n url = f'https://{vault_name}{self.azure_cloud.suffixes.keyvault_dns}/secrets'\n response = self.http_request(\n 'GET', full_url=url, resource=self.get_vault_resource())\n\n return self.get_entities_independent_of_pages(response, limit, offset, self.get_vault_resource())", "def test_list(self):\n user = UserFactory.create()\n self.client.login(username=user.username, password=PASSWORD)\n response = self.client.get(self.path, content_type=JSON_CONTENT_TYPE)\n\n assert response.status_code == 200\n actual = json.loads(response.content.decode('utf-8'))\n expected = [self._serialize_course(self.course, [self.course_mode])]\n self.assertListEqual(actual, expected)", "def tearDown(self):\n Credentials.cred_list = []", "def test_save_multiple_credential(self):\n self.new_credential.save_credential()\n test_credential = Credential(\"winnie\",\"test\",\"login\",\"winnie\")\n test_credential.save_credential()\n self.assertEqual(len(Credential.credential_list),2)", "def __decrypt_secrets(self, secrets):\n assert self.decrypter != None\n return [self.decrypter.decrypt(secret) for secret in secrets]", "def test_client_key_secret(self):\r\n #this adds lti passports to system\r\n mocked_course = Mock(lti_passports = ['lti_id:test_client:test_secret'])\r\n modulestore = Mock()\r\n modulestore.get_course.return_value = mocked_course\r\n runtime = Mock(modulestore=modulestore)\r\n self.xmodule.descriptor.runtime = runtime\r\n self.xmodule.lti_id = \"lti_id\"\r\n key, secret = self.xmodule.get_client_key_secret()\r\n expected = ('test_client', 'test_secret')\r\n self.assertEqual(expected, (key, secret))", "def test_secrets() -> Secrets:\n from dotenv import load_dotenv\n from os import getenv\n from pathlib import Path\n env_path = Path('.') / '.env.testing'\n load_dotenv(dotenv_path=env_path)\n return Secrets(\n google_id_token=getenv(\"GOOGLE_ID_TOKEN\"),\n google_user_id=getenv(\"GOOGLE_USER_ID\")\n )", "def check_for_list(check):", "def test_vault_get_vault_item(self):\n pass", "def tearDown(self):\n Credentials.credential_list = []", "def test_list_roles(self):\n pass", "def test_list_alerts(self):\n pass", "def test_get_list8(self):\n pass", "def test_list(self):\n response = self.client.get('/exercises/')\n expected = {\n 'id': self.exer1.id,\n 'name': self.exer1.name,\n 'description': self.exer1.description,\n 'muscle_group': self.exer1.muscle_group\n }\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 2)\n self.assertEqual(len(response.data['results']), 2)\n self.assertEqual(response.data['results'][0], expected)", "def test_creating_and_getting_a_bucketlist_for_authenticated_user(self):\n\n # test all bucketlists\n response = self.client.post(\n \"/bucketlists/\",\n data=dict(name='test_bucketlist'),\n headers={'Authorization': self.user_token}\n )\n bucketlist = json.loads(response.data)\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(bucketlist[\"name\"], 'test_bucketlist')\n\n # test single bucketlist\n self.bucketlist_id = bucketlist[\"bucketlist_id\"]\n single_bucketlist = self.client.get(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n one_bucketlist = json.loads(single_bucketlist.data)\n\n self.assertEqual(single_bucketlist.status_code, 200)\n self.assertEqual(one_bucketlist[\"name\"], 'test_bucketlist')\n\n # test all items in bucketlist\n item = self.client.post(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\",\n data=dict(name=\"test_item\"),\n headers={'Authorization': self.user_token}\n )\n\n one_item = json.loads(item.data)\n\n self.assertEqual(item.status_code, 200)\n self.assertEqual(one_item[\"name\"], 'test_item')\n\n # test single item in bucketlist\n self.item_id = one_item[\"item_id\"]\n single_item = self.client.get(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\" + str(self.item_id) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n created_item = json.loads(single_item.data)\n\n self.assertEqual(single_item.status_code, 200)\n self.assertEqual(created_item[\"name\"], 'test_item')\n\n # test for deletion of bucketlist\n second_bucketlist = self.client.post(\n \"/bucketlists/\",\n data=dict(name='second_bucketlist'),\n headers={'Authorization': self.user_token}\n )\n\n bucketlist_two = json.loads(second_bucketlist.data)\n\n self.assertEqual(second_bucketlist.status_code, 200)\n self.assertEqual(bucketlist_two[\"name\"], 'second_bucketlist')\n\n delete_response = self.client.delete(\n \"/bucketlists/\" + str(bucketlist_two[\"bucketlist_id\"]) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n deletion = json.loads(delete_response.data)\n\n self.assertEqual(delete_response.status_code, 200)\n self.assertEqual(deletion[\"message\"], \"Deleted\")\n\n # test for deletion of an item in bucketlist\n delete_item = self.client.delete(\n \"/bucketlists/\" + str(bucketlist[\"bucketlist_id\"]) + \"/items/\" + str(one_item[\"item_id\"]) + \"\",\n headers={'Authorization': self.user_token}\n )\n\n item_deletion = json.loads(delete_item.data)\n\n self.assertEqual(delete_item.status_code, 200)\n self.assertEqual(item_deletion[\"message\"], \"Deleted\")\n\n # test for updating of bucketlist\n self.bucketlist_id = bucketlist[\"bucketlist_id\"]\n bucketlist_update = self.client.put(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"\",\n data=dict(name='bucketlist_test'),\n headers={'Authorization': self.user_token}\n )\n\n updated_bucketlist = json.loads(bucketlist_update.data)\n\n self.assertEqual(bucketlist_update.status_code, 200)\n self.assertEqual(updated_bucketlist[\"name\"], 'bucketlist_test')\n\n # test update of item in bucketlist\n item = self.client.post(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\",\n data=dict(name=\"test_item\"),\n headers={'Authorization': self.user_token}\n )\n\n one_item = json.loads(item.data)\n\n item_update = self.client.put(\n \"/bucketlists/\" + str(self.bucketlist_id) + \"/items/\"+ str(one_item[\"item_id\"]) + \"\",\n data=dict(name=\"item_test\"),\n headers={'Authorization': self.user_token}\n )\n\n updated_item = json.loads(item_update.data)\n\n self.assertEqual(item_update.status_code, 200)\n self.assertEqual(updated_item[\"name\"], 'item_test')", "def test_get_value_list_value(self):\n test_data = []\n test_data.append(json.loads('{\"name\": \"Pat\"}'))\n test_data.append(json.loads('{\"last_name\": \"Nat\"}'))\n\n key = \"name\"\n result_list = get_value_list(test_data, key)\n self.assertTrue(result_list == ['Pat'])", "def test_diff_is_not_shown_for_keys_in_secrets(tmp_path, monkeypatch, capsys):\n monkeypatch.chdir(\"examples/tutorial-secrets\")\n if os.path.exists(\"work\"):\n shutil.rmtree(\"work\")\n try:\n out, _ = cmd(\"./batou deploy tutorial\")\n finally:\n shutil.rmtree(\"work\")\n assert out == Ellipsis(\n \"\"\"\\\nbatou/2... (cpython 3...)\n================================== Preparing ===================================\nmain: Loading environment `tutorial`...\nmain: Verifying repository ...\nmain: Loading secrets ...\n================== Connecting hosts and configuring model ... ==================\nlocalhost: Connecting via local (1/1)\n================================== Deploying ===================================\nlocalhost: Scheduling component hello ...\nlocalhost > Hello > File('work/hello/hello') > Presence('hello')\nlocalhost > Hello > File('work/hello/hello') > Content('hello')\nNot showing diff as it contains sensitive data,\nsee ...diff for the diff.\nlocalhost > Hello > File('work/hello/other-secrets.yaml') > Presence('other-secrets.yaml')\nlocalhost > Hello > File('work/hello/other-secrets.yaml') > Content('other-secrets.yaml')\nNot showing diff as it contains sensitive data,\nsee ...diff for the diff.\n=================================== Summary ====================================\nDeployment took total=...s, connect=...s, deploy=...s\n============================= DEPLOYMENT FINISHED ==============================\n\"\"\"\n ) # noqa: E501 line too long", "def test_list_role(self):\n pass", "def test_list_runs(self):\n pass", "def tearDown(self):\n Credentials.credentials_list = []" ]
[ "0.7842545", "0.7786088", "0.7250055", "0.7128142", "0.70077723", "0.69276756", "0.6661624", "0.6632881", "0.64265823", "0.640965", "0.64062554", "0.6279332", "0.6268154", "0.6260419", "0.6230832", "0.6193241", "0.6184802", "0.6167017", "0.6167011", "0.6167011", "0.6139364", "0.6109556", "0.6042342", "0.6037259", "0.6013731", "0.60115093", "0.5968101", "0.59222615", "0.5890465", "0.588985", "0.5881899", "0.58591366", "0.5842982", "0.5840477", "0.5835591", "0.583251", "0.58224", "0.5817169", "0.5814215", "0.5806021", "0.5798683", "0.5789648", "0.5781153", "0.5752322", "0.57464117", "0.57251877", "0.5720316", "0.5718448", "0.5710635", "0.57096577", "0.57085323", "0.5701218", "0.56980544", "0.5691868", "0.5681036", "0.5673859", "0.5639721", "0.5637702", "0.5632813", "0.56184876", "0.5605955", "0.5599569", "0.5594682", "0.55945086", "0.55856156", "0.5582709", "0.55805886", "0.5579902", "0.5573423", "0.55710423", "0.55697507", "0.5562688", "0.55521286", "0.5546033", "0.5545541", "0.5540511", "0.55368876", "0.5534495", "0.5534457", "0.5530773", "0.55251074", "0.55201", "0.55167085", "0.5507068", "0.5503729", "0.5503343", "0.5501008", "0.5499931", "0.54977393", "0.5496807", "0.54960364", "0.5485859", "0.5482838", "0.54774934", "0.5471168", "0.5465709", "0.5463395", "0.5461739", "0.54485047", "0.54479355" ]
0.74387324
2
Test adding secrets with wrong format.
def test_secrets_add_wrong_format(secret): reana_token = "000000" env = {"REANA_SERVER_URL": "localhost"} runner = CliRunner(env=env) message = 'For literal strings use "SECRET_NAME=VALUE" format' result = runner.invoke(cli, ["secrets-add", "-t", reana_token, "--env", secret]) assert result.exit_code == 1 assert message in result.output
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_secrets(self):\n secrets.check_secrets([], argparse.Namespace())", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def test_invalid_secrets(self):\n s = SecretsChecker(stage='dev')\n # Override the email field obtained from terraform\n s.email = ['nonsense']\n with self.assertRaises(ValueError):\n s.run()", "def test_get_secret_5(self):\n\n # notice space between SECRET and immediately following\n # curly bracket.\n self.assertIsNone(\n get_secret(\"SECRET { ...}\")\n )\n\n # typo in keyword SECRET\n self.assertIsNone(\n get_secret(\"SECRIT { ...}\")\n )\n\n # missing closing bracket\n self.assertIsNone(\n get_secret(\"SECRET { ...\")\n )\n\n # curly brackets missing\n self.assertIsNone(\n get_secret(\"SECRET ...\")\n )", "def test_secret():\r\n try:\r\n straxen.get_secret('somethingnonexistent')\r\n except ValueError:\r\n # Good we got some message we cannot load something that does\r\n # not exist,\r\n pass", "def test_get_secret_4(self):\n self.assertIsNone(\n get_secret(\"plain text, no secrets here\")\n )", "def test_read_namespaced_secret_list_secrets(self):\n pass", "def test_secret(self, env: yaenv.Env):\n assert env.secret() == 'notsosecret'\n assert 'NEW_SECRET_KEY' not in env\n _secret = env.secret('NEW_SECRET_KEY')\n assert _secret is not None\n assert _secret != env.secret('NEW_SECRET_KEY2')\n del env['NEW_SECRET_KEY'], env['NEW_SECRET_KEY2']", "def testSecretKey(loggingMixin, yamlConfigForParsingPlugins):\n parameters = yamlConfigForParsingPlugins\n # It will always return a string, so we must compare to a string.\n assert parameters[\"secretKey\"] == \"12345\"\n # We can't predict what it will produce, so we just check to make sure that it's not null\n assert parameters[\"secretKeyGen\"] != \"null\"\n assert parameters[\"secretKeyGen\"] is not None", "def test_secret():\n # TODO: split this up and write better tests\n\n @make_config()\n class Config:\n \"\"\"The test configuration for configurave.\"\"\"\n\n root_url: str = ce(\n comment=\"The root url configuration for the application\",\n description=\"A long ass multiline description goes here about all the options\"\n \" you could potentially decide upon using.\",\n )\n token: str = ce(\n comment=\"The discord token for your bot\",\n secret=True,\n )\n\n c = Config(\n sources=[ # in order of priority\n \"tests/test-config/secrets.toml\",\n ]\n )\n\n assert \"token\" in str(c._crve_configs)\n assert c.token == \"secret token\"\n\n default_toml = (\n \"# The test configuration for configurave.\\n\"\n \"# This is an autogenerated default configuration file written by Configurave\\n\\n\"\n \"# (str): The root url configuration for the application\\n\"\n \"# root_url = \\n\"\n \"# Description: A long ass multiline description goes here about all the\\n\"\n \"# options you could potentially decide upon using.\\n\"\n \"\\n\"\n \"# (str): The discord token for your bot\\n\"\n \"# Secret: value will not be exported\\n\"\n \"token =\\n\"\n )\n assert c.defaults_toml() == default_toml", "def _secret_not_in_order():\n pecan.abort(400, u._(\"Secret metadata expected but not received.\"))", "def test_diff_is_not_shown_for_keys_in_secrets(tmp_path, monkeypatch, capsys):\n monkeypatch.chdir(\"examples/tutorial-secrets\")\n if os.path.exists(\"work\"):\n shutil.rmtree(\"work\")\n try:\n out, _ = cmd(\"./batou deploy tutorial\")\n finally:\n shutil.rmtree(\"work\")\n assert out == Ellipsis(\n \"\"\"\\\nbatou/2... (cpython 3...)\n================================== Preparing ===================================\nmain: Loading environment `tutorial`...\nmain: Verifying repository ...\nmain: Loading secrets ...\n================== Connecting hosts and configuring model ... ==================\nlocalhost: Connecting via local (1/1)\n================================== Deploying ===================================\nlocalhost: Scheduling component hello ...\nlocalhost > Hello > File('work/hello/hello') > Presence('hello')\nlocalhost > Hello > File('work/hello/hello') > Content('hello')\nNot showing diff as it contains sensitive data,\nsee ...diff for the diff.\nlocalhost > Hello > File('work/hello/other-secrets.yaml') > Presence('other-secrets.yaml')\nlocalhost > Hello > File('work/hello/other-secrets.yaml') > Content('other-secrets.yaml')\nNot showing diff as it contains sensitive data,\nsee ...diff for the diff.\n=================================== Summary ====================================\nDeployment took total=...s, connect=...s, deploy=...s\n============================= DEPLOYMENT FINISHED ==============================\n\"\"\"\n ) # noqa: E501 line too long", "def test_secrets() -> Secrets:\n from dotenv import load_dotenv\n from os import getenv\n from pathlib import Path\n env_path = Path('.') / '.env.testing'\n load_dotenv(dotenv_path=env_path)\n return Secrets(\n google_id_token=getenv(\"GOOGLE_ID_TOKEN\"),\n google_user_id=getenv(\"GOOGLE_USER_ID\")\n )", "def test_fails_on_dict(self):\n invalid_credentials_dict_not_array_twine = \"\"\"\n {\n \"credentials\": {\n \"name\": \"MY_API_SECRET_KEY\",\n \"purpose\": \"Token for accessing a 3rd party API service\"\n }\n }\n \"\"\"\n\n with self.assertRaises(exceptions.InvalidTwine):\n Twine(source=invalid_credentials_dict_not_array_twine)", "def test_secrets_add_already_exist():\n status_code = 409\n reana_token = \"000000\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n message = \"One of the secrets already exists. No secrets were added.\"\n mock_http_response = Mock(\n status_code=status_code,\n reason=\"Conflict\",\n json=Mock(return_value={\"message\": \"Conflict\"}),\n )\n rs_api_client_mock = Mock()\n rs_api_client_mock.api.add_secrets = Mock(side_effect=HTTPError(mock_http_response))\n runner = CliRunner(env=env)\n with runner.isolation():\n with patch(\"reana_client.api.client.current_rs_api_client\", rs_api_client_mock):\n result = runner.invoke(\n cli, [\"secrets-add\", \"-t\", reana_token, \"--env\", \"USER=reanauser\"]\n )\n assert message in result.output\n assert result.exit_code == 1", "def test_create_seed_secrets(self):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n name = 'job-type-post-test-secret'\n manifest['job']['name'] = name\n manifest['job']['interface']['settings'] = [\n {\n 'name': 'VERSION',\n 'secret': True\n },\n {\n 'name': 'DB_HOST',\n 'secret': True\n },\n {\n 'name': 'DB_PASS',\n 'secret': True\n }\n ]\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': False,\n 'max_scheduled': 1,\n 'docker_image': 'my-job-1.0.0-seed:1.0.0',\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n with patch.object(SecretsHandler, '__init__', return_value=None), \\\n patch.object(SecretsHandler, 'set_job_type_secrets', return_value=None) as mock_set_secret:\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n job_type = JobType.objects.filter(name=name).first()\n\n results = json.loads(response.content)\n self.assertEqual(results['id'], job_type.id)\n\n # Secrets sent to Vault\n secrets_name = '-'.join([results['name'], results['version']]).replace('.', '_')\n secrets = json_data['configuration']['settings']\n mock_set_secret.assert_called_once_with(secrets_name, secrets)\n\n #Secrets scrubbed from configuration on return\n self.assertEqual(results['configuration']['settings'], {})", "def test_run_cmd_simple_positive_with_secrets(caplog):\n caplog.set_level(logging.DEBUG)\n secrets = [\"8bca8d2e-1cd6\", \"683c08d7-bc07\"]\n cmd = \"echo -n hello 8bca8d2e-1cd6\"\n assert utils.run_cmd(cmd, secrets=secrets) == \"hello *****\"\n # check that logs were satinized as well\n for secret in secrets:\n assert secret not in caplog.text", "def test_mask_secret_nosecrets():\n assert utils.mask_secrets(\"ls -lh /tmp\", None) == \"ls -lh /tmp\"", "def test_get_secret_3(self):\n\n text_subject = \"Important Message\"\n text_body = \"\"\"\n This is body of plain text message of some email\n \"\"\"\n self.assertIsNone(\n # no secret in the text\n get_secret([text_subject, text_body])\n )", "def test_run_cmd_simple_negative_with_secrets(caplog):\n caplog.set_level(logging.DEBUG)\n secrets = [\"8bca8d2e-1cd6\", \"683c08d7-bc07\"]\n cmd = \"ls /tmp/this/file/683c08d7-bc07/isnotthere\"\n with pytest.raises(CommandFailed) as excinfo:\n utils.run_cmd(cmd, secrets=secrets)\n assert \"No such file or directory\" in str(excinfo.value)\n # check that exception was sanitized\n for secret in secrets:\n assert secret not in str(excinfo.value)\n # check that logs were satinized as well\n for secret in secrets:\n assert secret not in caplog.text", "def test_create_rsa_container_w_invalid_key_names(self):\n secret_urls = self.secret_behaviors.create_n_secrets(3)\n secret_refs = [SecretRef(name='secret{0}'.format(i), ref=url)\n for i, url in enumerate(secret_urls)]\n container_resp = self.behaviors.create_container(\n 'name', 'rsa', secret_refs)\n self.assertEqual(container_resp.status_code, 400)", "def test_mask_secret_nomatch():\n secrets = [\n \"8bca8d2e-1cd6-4ec0-8e55-9614aa01cf88\",\n \"683c08d7-bc07-4d72-b098-46ef00b74aec\",\n ]\n assert utils.mask_secrets(\"ls -lh /tmp\", secrets) == \"ls -lh /tmp\"", "def _wrap_secret(self, val):\n return {\"SecretString\": val}", "def verify_secret(prop_name, value):\n\n hashed = hashlib.sha256(value.encode('UTF-8')).hexdigest()\n has_must_be = RUN_CONFIG.get(prop_name)\n\n return hashed == has_must_be", "def test_adding_config_keys():\n\n with pytest.raises(ValueError) as error:\n Config.config()[\"something_fake\"] = True\n\n assert \"something_fake is not a valid config key.\" in error.value.args", "def test_secrets_list_server_no_token():\n message = \"Please provide your access token\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n runner = CliRunner(env=env)\n result = runner.invoke(cli, [\"secrets-list\"])\n assert result.exit_code == 1\n assert message in result.output", "def test_bad_password_type(self):\n for val in [x for x in bad_data_typevals_list if not isinstance(x, basestring) and x is not None]:\n self.request.json_body = deepcopy(self.good_dict)\n self.request.json_body['password'] = val\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'password must be a string'))", "def test_plaintext_and_anoncrypt_raises_error(alice):\n with pytest.raises(ValueError):\n alice.pack({\"test\": \"test\"}, plaintext=True, anoncrypt=True)", "def test_create_container_w_duplicate_secret_refs(self):\n\n secret_resp = self.secret_behaviors.create_secret_from_config()\n secret_refs = [SecretRef(name='1', ref=secret_resp.ref),\n SecretRef(name='2', ref=secret_resp.ref)]\n\n container_resp = self.behaviors.create_container(\n 'name', 'generic', secret_refs)\n\n self.assertEqual(container_resp.status_code, 400)", "def secrets():\n click.echo(STEP_PATH / \"secrets\")", "def test_kms_re_encrypt_fails_without_b64_secret(self):\n with self.assertRaises(SystemExit):\n ef_utils.kms_re_encrypt(self.mock_kms, self.service, self.env, self.secret)", "def test_mask_secret_simple_positive():\n secrets = [\"8bca8d2e-1cd6\", \"683c08d7-bc07\"]\n cmd = \"ls -lh /tmp/8bca8d2e /tmp/683c08d7-bc07 /1cd6-4ec0-8e55\"\n cmd_masked_expected = \"ls -lh /tmp/8bca8d2e /tmp/***** /1cd6-4ec0-8e55\"\n assert utils.mask_secrets(cmd, secrets) == cmd_masked_expected", "def test_password_is_okay():\n\twith pytest.raises(Exception):\n\t\tassert password_is_ok('qqqqqqqq') == Exception('Password either has to contain a digit \\n or password has to contain uppercase \\n or password has to contain lowercase') \n\n\twith pytest.raises(Exception):\n\t\tassert password_is_ok('') == Exception('Password either has to contain a digit \\n or password has to contain uppercase \\n or password has to contain lowercase') \n\n\t\"\"\"test that valid passwords work\"\"\"\n\tassert password_is_ok('Q8qqqqqqqq') == True\n\tassert password_is_ok('q8qqqqqqqq') == True\n\tassert password_is_ok('Qqqqqqqqqq') == True\n\tassert password_is_ok('qqqqqqqqqq') == True", "async def add_secret(app: Sanic, secret: str, passphrase: str, ttl: Optional[int]) -> str:\n\n key = get_fernet_key(app, passphrase)\n\n sign = hmac.digest(key=key, msg=passphrase.encode(), digest='sha512').hex()\n secret_key = secrets.token_hex(16)\n\n cipher = fernet.Fernet(key)\n encrypted = cipher.encrypt(secret.encode()).decode()\n\n expires = None\n if ttl:\n expires = datetime.utcnow() + timedelta(seconds=ttl)\n\n await app.db.secrets.insert_one({\n 'secret': encrypted,\n 'secret_key': secret_key,\n 'signature': sign,\n 'expires': expires, # for mongo index\n 'ttl': ttl, # for fernet check\n })\n\n return secret_key", "def test_mask_secret_null():\n assert utils.mask_secrets(\"\", None) == \"\"", "def test_add_exchange_empty_secret(self):\n exchange_name = \"Testing\"\n api_key = \"Testing\"\n secret = \"\"\n new_exchange = self.app.add_exchange(exchange_name, api_key, secret)\n self.assertIn(new_exchange[0], \"error\")", "def test_credentials(self):\n twine = Twine(source=self.VALID_CREDENTIALS_TWINE)\n with mock.patch.dict(\n os.environ,\n {\"SECRET_THE_FIRST\": \"a value\", \"SECRET_THE_SECOND\": \"another value\", \"SECRET_THE_THIRD\": \"value\"},\n ):\n twine.validate_credentials()\n self.assertEqual(os.environ[\"SECRET_THE_THIRD\"], \"value\")", "def test_secret_setting(self):\n url = '/%s/job-types/validation/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n config = copy.deepcopy(self.configuration)\n\n json_data = {\n 'manifest': manifest,\n 'configuration': config\n }\n\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertEqual(len(results['warnings']), 0)", "def test_args_secret_file(self):\n args = [self.service, self.env, \"--length\", \"10\", \"--secret_file\",\n \"test_data/parameters/test.cnf.parameters.json\", \"--match\", \"test\"]\n context = ef_password.handle_args_and_set_context(args)\n self.assertEqual(context.env, self.env)\n self.assertEqual(context.service, self.service)\n self.assertEqual(context.length, 10)\n self.assertEqual(context.secret_file, \"test_data/parameters/test.cnf.parameters.json\")\n self.assertEqual(context.match, \"test\")", "def test_validate_credentials(self):\n pass", "def test_args_without_secret_file(self):\n args = [self.service, self.env, \"--match\", \"test\"]\n with self.assertRaises(ValueError):\n ef_password.handle_args_and_set_context(args)", "def test_get_secret_from_env(monkeypatch):\n key = str(uuid.uuid4()).replace('-', '.')\n value = str(uuid.uuid4())\n monkeypatch.setenv(key.replace('.', '_').upper(), value)\n secret = lambdautils.utils.get_secret(key)\n assert secret == value", "def test_args_without_match(self):\n args = [self.service, self.env, \"--secret_file\", \"test_data/parameters/test.cnf.parameters.json\"]\n with self.assertRaises(ValueError):\n ef_password.handle_args_and_set_context(args)", "def test_client_key_secret_not_provided(self):\r\n\r\n #this adds lti passports to system\r\n mocked_course = Mock(lti_passports = ['test_id:test_client:test_secret'])\r\n modulestore = Mock()\r\n modulestore.get_course.return_value = mocked_course\r\n runtime = Mock(modulestore=modulestore)\r\n self.xmodule.descriptor.runtime = runtime\r\n #set another lti_id\r\n self.xmodule.lti_id = \"another_lti_id\"\r\n key_secret = self.xmodule.get_client_key_secret()\r\n expected = ('','')\r\n self.assertEqual(expected, key_secret)", "def test_validate_aead_cmp(self):\n secret = pyhsm.aead_cmd.YHSM_YubiKeySecret(self.yk_key, self.yk_uid)\n cleartext = secret.pack()\n self.assertTrue(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, cleartext))\n wrong_cleartext = 'X' + cleartext[1:]\n self.assertFalse(self.hsm.validate_aead(self.yk_public_id, self.kh_validate, self.aead, wrong_cleartext))", "def _secrets(self, credstash):\n if credstash == \"true\":\n return True\n else:\n return False", "def test_invalid_json_dumpling(self):\n with pytest.raises(InvalidDumpling):\n validate_dumpling(\"{'invalid_single_quotes': 'value'}\")", "def test_credential_boolean_parsing_failure():\n init_dict = {\"url\": \"http://example.com\", \"ssl_verify\": \"bogus\"}\n with pytest.raises(CredentialError):\n Credentials(init_dict)", "def test_bad_client_key_secret(self):\r\n #this adds lti passports to system\r\n mocked_course = Mock(lti_passports = ['test_id_test_client_test_secret'])\r\n modulestore = Mock()\r\n modulestore.get_course.return_value = mocked_course\r\n runtime = Mock(modulestore=modulestore)\r\n self.xmodule.descriptor.runtime = runtime\r\n self.xmodule.lti_id = 'lti_id'\r\n with self.assertRaises(LTIError):\r\n self.xmodule.get_client_key_secret()", "def test_bad_client_key_secret(self):\n # this adds lti passports to system\n mocked_course = Mock(lti_passports=['test_id_test_client_test_secret'])\n modulestore = Mock()\n modulestore.get_course.return_value = mocked_course\n runtime = Mock(modulestore=modulestore)\n self.xmodule.runtime = runtime\n self.xmodule.lti_id = 'lti_id'\n with pytest.raises(LTIError):\n self.xmodule.get_client_key_secret()", "def test_invalid_key(self):\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('221b=\"starts with number\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('_=\"not assignable\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('o-o=\"invalid character\"')\n assert 'Invalid key' in str(err.value)", "def secretstore():\n pass", "def test_invalid_password(self):\n self.request.json_body = deepcopy(self.good_dict)\n invalids = ['5horT']\n for val in invalids:\n self.request.json_body['password'] = val\n result = user_id_put_view(self.request)['d']\n self.assertEqual(result, error_dict('api_errors', 'password must be at least 8 characters'))", "def secret() -> None:\n pass", "def test_volumes_invalid(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n volumes: 666\n \"\"\"\n )\n\n self._invalid_config(\"must be a dict\")", "def test_bad_input(alice):\n with pytest.raises(TypeError):\n alice.pack(\"blah\")", "def test_invalid_chars_ssck(self):\r\n valid_base = SlashSeparatedCourseKey(u'org.dept-1%2', u'course.sub-2%3', u'run.faster-4%5')\r\n for key in SlashSeparatedCourseKey.KEY_FIELDS:\r\n with self.assertRaises(InvalidKeyError):\r\n # this ends up calling the constructor where the legality check should occur\r\n valid_base.replace(**{key: u'funny thing'})", "def secret():\n pass", "def test_decrypt_format(self):\n with pytest.raises(EncryptionError):\n decrypt('message')", "def test_secrets_list_server_not_reachable():\n message = \"REANA client is not connected to any REANA cluster.\"\n reana_token = \"000000\"\n runner = CliRunner()\n result = runner.invoke(cli, [\"secrets-list\", \"-t\", reana_token])\n assert result.exit_code == 1\n assert message in result.output", "def test_secrets_list_ok():\n status_code = 200\n response = [{\"name\": \"password\", \"type\": \"env\"}]\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n mock_http_response, mock_response = Mock(), Mock()\n mock_http_response.status_code = status_code\n mock_response = response\n reana_token = \"000000\"\n runner = CliRunner(env=env)\n with runner.isolation():\n with patch(\n \"reana_client.api.client.current_rs_api_client\",\n make_mock_api_client(\"reana-server\")(mock_response, mock_http_response),\n ):\n result = runner.invoke(cli, [\"secrets-list\", \"-t\", reana_token])\n assert result.exit_code == 0\n assert \"password\" in result.output\n assert \"env\" in result.output", "def test_docker_args_invalid(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n docker_args: 666\n \"\"\"\n )\n\n self._invalid_config(\"must be a string\")", "def test_badly_formatted_entry(self):\n with pytest.raises(AssertionError) as exc_info:\n list(parser.generate_commands(yaml.load(\"\"\"\n - key1: 1\n key2: 2\n \"\"\")))\n assert \"Command has multiple top-level keys: ['key1', 'key2']\" in str(exc_info.value)", "async def write_secret(self, name: str, value: str, content_type: str, tags: dict):\n pass", "def _verify_arguments(self):\n # if self.options.action == \"create\":\n # if self.options.encrypt_payload and not self.options.payload_secret:\n # self.parser.error('A secret must be supplied with --payload-secret option when the --encrypt-payload option is in use.')\n pass", "def test_generate_key(self): \n k = Key().generate()\n self.assertRegex(k, \"[a-zA-Z0-9+\\/]+={0,2}\")", "def test_invalid_password(self):\n pass", "def testBcrypt(loggingMixin, yamlConfigForParsingPlugins):\n parameters = yamlConfigForParsingPlugins\n expected = {\"user\": \"pass\"}\n assert parameters[\"bcrypt\"].keys() == expected.keys()\n # The hash isn't repeatable, so we just want to be certain that it's hashed.\n assert parameters[\"bcrypt\"][\"user\"] != expected[\"user\"]\n # We do know that the hash should begin with the following string\n beginningStr = b\"$2b$12$\"\n assert parameters[\"bcrypt\"][\"user\"][:len(beginningStr)] == beginningStr\n # We don't expect any users here.\n assert parameters[\"bcryptNoUser\"] == {}", "def create_secret(logger,namespace,body,v1=None):\n if v1 is None:\n v1 = client.CoreV1Api()\n logger.debug('new client - fn create secret')\n try:\n name = body['metadata']['name']\n except KeyError:\n logger.debug(\"No name in body ?\")\n raise kopf.TemporaryError(\"can not get the name.\")\n try:\n data = body.get('data')\n except KeyError:\n data = ''\n logger.error(\"Empty secret?? could not get the data.\")\n \n secret_type = 'Opaque'\n if 'type' in body:\n secret_type = body['type']\n\n metadata = {'name': name, 'namespace': namespace}\n api_version = 'v1'\n kind = 'Secret'\n body = client.V1Secret(api_version, data , kind, metadata, type = secret_type)\n # kopf.adopt(body)\n logger.info(f\"cloning secret in namespace {namespace}\")\n try:\n api_response = v1.create_namespaced_secret(namespace, body)\n except client.rest.ApiException as e:\n if e.reason == 'Conflict':\n logger.warning(f\"secret `{name}` already exist in namesace '{namespace}'\")\n return 0\n logger.error(f'Can not create a secret, it is base64 encoded? data: {data}')\n logger.error(f'Kube exception {e}')\n return 1\n return 0", "def test_encrypt_key_invalid(self):\n with pytest.raises(EncryptionError):\n encrypt('message', key=b'0' * 31)", "def test_no_vault_secrets(mock_load, localhost_client, gen_input_config):\n mock_load.return_value = gen_input_config(vault_secrets={})\n\n localhost_client.load(\"in.json\")\n\n mock_load.assert_called_with(\"in.json\")", "def test_valid_password_valid():\n assert valid_password(\"123456\")\n assert valid_password(\"abcdef\")", "def test_invalid_config_options_output():\n\n with pytest.raises(InputError):\n _check_input_config({\"unknown_key_1\": 1})", "def add_secrets(self, id: str, body: dict[str, Any]) -> dict[str, Any]:\n return self.client.post(self._url(\"%s/secrets\" % id), data=body)", "def test_client_key_secret_not_provided(self):\n\n # this adds lti passports to system\n mocked_course = Mock(lti_passports=['test_id:test_client:test_secret'])\n modulestore = Mock()\n modulestore.get_course.return_value = mocked_course\n runtime = Mock(modulestore=modulestore)\n self.xmodule.runtime = runtime\n # set another lti_id\n self.xmodule.lti_id = \"another_lti_id\"\n key_secret = self.xmodule.get_client_key_secret()\n expected = ('', '')\n assert expected == key_secret", "def testKeyInfoTooLong(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey',\n keyInfo='xxxxx')", "def add(ctx, secret, name, issuer, period, oath_type, digits, touch, algorithm,\n counter, force):\n\n digits = int(digits)\n\n if not secret:\n while True:\n secret = click.prompt('Enter a secret key (base32)', err=True)\n try:\n secret = parse_b32_key(secret)\n break\n except Exception as e:\n click.echo(e)\n\n ensure_validated(ctx)\n\n _add_cred(ctx, CredentialData(secret, issuer, name, oath_type, algorithm,\n digits, period, counter, touch), force)", "def test_allowed_chars(self):\n hash_val = self.reverse_hash.get_hash('123')\n self.assertEqual(hash_val['error'], 'allowed chars {}'.format(self.reverse_hash.letters))", "async def test_create_bike_bad_value(database, key_name):\n with pytest.raises(ValueError):\n await register_bike(public_key=key_name, master_key=key_name)", "def test_random_password():\n output = sh.random_password()\n assert isinstance(output, str) is True\n assert len(output) == 16", "def test_configurations_create_invalid_value_type(self):\n values = '{\"key_buffer_size\": \"this is a string not int\"}'\n assert_unprocessable(instance_info.dbaas.configurations.create,\n CONFIG_NAME, values, CONFIG_DESC)", "def testKeyInfoTooShort(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='x', keyInfo='xx')", "def test_volumes_invalid_volume_type(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n volumes:\n /foo:\n - a list makes no sense\n \"\"\"\n )\n\n self._invalid_config(\"must be string or dict\")", "def test_kms_decrypt_fails_without_b64_secret(self):\n with self.assertRaises(SystemExit):\n ef_utils.kms_decrypt(self.mock_kms, self.secret)", "def test_generate_secret_file(self, mock_context, mock_create_aws, mock_file_open, mock_json, mock_dump):\n context = ef_password.EFPWContext()\n context.env, context.service = self.env, self.service\n context.secret_file = self.secret_file\n context.match = 'password'\n mock_context.return_value = context\n mock_create_aws.return_value = {\"kms\": self.mock_kms}\n mock_json.return_value = {\"params\": {\"test\": {\"password\": \"mock_secret1\"}}}\n ef_password.main()\n self.mock_kms.decrypt.assert_not_called()\n self.mock_kms.encrypt.assert_called_once_with(\n KeyId='alias/{}-{}'.format(self.env, self.service),\n Plaintext=\"mock_secret1\".encode()\n )\n mock_file_open.assert_called_with(self.secret_file, 'w')\n handle = mock_file_open()\n mock_dump.assert_called_once_with({'params': {'test': {'password': '{{aws:kms:decrypt,Y2lwaGVyX2Jsb2I=}}'}}},\n handle, indent=2, separators=(',', ': '))\n handle.write.assert_called_with('\\n')", "def test_invalid_password():\r\n auth_register_v1(email='harrypotter@gmail.com',\r\n password='qw3rtyAppl3s@99',\r\n name_first='Harry',\r\n name_last='Potter')\r\n\r\n invalid_password = 'ffffffffF'\r\n with pytest.raises(InputError) as e:\r\n auth_login_v1(email='harrypotter@gmail.com',\r\n password=invalid_password) \r\n assert f'Password {invalid_password} is not correct.' in str(e.value)", "def test_secretbox_enc_dec(test_data, minion_opts):\n # Store the data\n with patch(\"salt.runners.nacl.__opts__\", minion_opts, create=True):\n ret = nacl.keygen()\n assert \"pk\" in ret\n assert \"sk\" in ret\n pk = ret[\"pk\"]\n sk = ret[\"sk\"]\n\n # Encrypt with pk\n encrypted_data = nacl.secretbox_encrypt(\n data=test_data,\n sk=sk,\n )\n\n # Decrypt with sk\n ret = nacl.secretbox_decrypt(\n data=encrypted_data,\n sk=sk,\n )\n assert test_data == ret", "def test_entrypoint_invalid(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n entrypoint: 666\n \"\"\"\n )\n\n self._invalid_config(\"must be a string\")", "def test_secretbox_enc_dec(self):\n # Encrypt with sk\n encrypted_data = nacl.secretbox_encrypt(data=self.unencrypted_data, sk=self.sk)\n\n # Decrypt with sk\n decrypted_data = nacl.secretbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)", "def test_valid_password_invalid():\n assert not valid_password(\"\")\n assert not valid_password(\"1234567\")\n assert not valid_password(\"abcdefg\")", "def test_get_secret_from_vault(key, environment, stage, namespace, table, nkey,\n boto3_resource, boto3_client, monkeypatch):\n # Call to the DynamoDB client to retrieve the encrypted secret\n monkeypatch.setattr(\"boto3.resource\", boto3_resource)\n monkeypatch.setattr(\"boto3.client\", boto3_client)\n secret = lambdautils.utils.get_secret(key,\n namespace=namespace,\n environment=environment,\n stage=stage)\n assert secret == \"dummy\"\n boto3_client(\"dynamodb\").get_item.assert_called_with(\n TableName=table,\n Key={\"id\": {\"S\": nkey}})\n\n # Call to the KMS client to decrypt the secret\n boto3_client('kms').decrypt.assert_called_with(CiphertextBlob=\"encrypted\")", "def is_secret_string(value):\n if not isinstance(value, basestring):\n return False\n return bool(_secret_string_pattern.match(value))", "def testBadKeyToToken(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey')", "def test_mismatched_quote(self):\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('double=\"missing-closing')\n assert 'Mismatched quotes' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('double=missing-opening\"')\n assert 'Mismatched quotes' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar(\"single='missing-closing\")\n assert 'Mismatched quotes' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar(\"single=missing-opening'\")\n assert 'Mismatched quotes' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar(\"both=\\\"mismatched'\")\n assert 'Mismatched quotes' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar(\"both='mismatched\\\"\")\n assert 'Mismatched quotes' in str(err.value)", "def test_long_password():\n expect_error(register, InputError,\n \"abcdef\", \"a\" * (MIN_PASSWORD - 1), \"a\", \"A\", \"a\")", "def test_good_values_for_validate_guid(good_value):\n bcvalidators.validate_guid(good_value)", "def test_invalid_keys(self):\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"this has spaces\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with spaces did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"\\x10control\\x02characters\\x11\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with control characters did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"a\" * (SERVER_MAX_KEY_LENGTH + 1), 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"long key did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(u\"unicode\\u4f1a\", 1)\n\t\texcept TypeError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"unicode key did not raise ValueError\")", "def setUp(self):\n \n ### Secret messages:\n text1 = r\"\"\"\n665555432 64 o42o4 o__ __o/4__o__32 __o__564<|\\4/|>32 /v3 |4/>2\\32 />2\\5 64/ \\`o3o'/ \\32/>3 / \\32 \\o42\\o5364\\o/ v\\2/v \\o/32\\32\\o/4v\\42v\\52 64 |2 <\\/>2 |4o32|42<\\42<\\5264/ \\4/ \\32 <\\__2/ \\3_\\o__</3 _\\o__</5264\\o/4\\o/5554 64 |42|32o54 o54 64/ \\4/ \\3<|>532_<|>_532 6542/ \\42\\o__ __o55654o/2 \\o4 |3 |>3 o54 6532 <|__ __|>32 / \\2 / \\3<|>546532 /32 \\32 \\o/2 \\o/3/ \\54653 o/4 \\o32|3 |3 \\o/54653/v42 v\\3/ \\2 / \\3 |54 652 />43 <\\5/ \\5465555432 65555432 6553o4 o5562\\o__ __o__ __o52 <|>32_<|>_52\\o__ __o362 |3 |3 |>32o__ __o/3< >5 o__ __o32|3 |>2 62/ \\2 / \\2 / \\3 /v3 |32|4 o32 /v3 v\\3/ \\2 / \\2 62\\o/2 \\o/2 \\o/3/>3 / \\3 o__/_3<|>3 />32 <\\2 \\o/2 \\o/2 62 |3 |3 |3 \\32\\o/3 |4/ \\3 \\4 /3|3 |362/ \\2 / \\2 / \\3 o32|32|4\\o/32o32 o3/ \\2 / \\2 6532<\\__2/ \\3 o4 |32 <\\__ __/>56553<\\__3 / \\5432 65555432 6655Acrobatic font by Randy Ransom via Figlet6552 '\n\"\"\"\n text2 = r\"\"\"\n62@@@@@@@3@@@@@@3@@@@@@@2@@@2@@@2 @@@@@@2 62@@@@@@@@2@@@@@@@@2@@@@@@@@2@@@2@@@2@@@@@@@2 62@@!2@@@2@@!2@@@2!@@32 @@!2!@@2!@@32 62!@!2@!@2!@!2@!@2!@!32 !@!2@!!2!@!32 62@!@!!@!2 @!@2!@!2!@!32 @!@@!@!2 !!@@!!362!!@!@!3!@!2!!!2!!!32 !!@!!!3 !!@!!!2 62!!: :!!2 !!:2!!!2:!!32 !!: :!!4!:!262:!:2!:!2:!:2!:!2:!:32 :!:2!:!32!:!2 62::2 :::2::::: ::2 ::: :::2 ::2:::2:::: ::2 62 :2 : :2 : :2:3:: :: :2 :2 :::2:: : :26 655 Poison font by Vinney Thai via Figlet6 \n \"\"\"\n\n def decode(str):\n \"\"\"This just decodes the above strings into something \n meaningful.\"\"\"\n s6 = re.sub('6','\\n',str)\n s5 = re.sub('5','44',s6)\n s4 = re.sub('4','33',s5)\n s3 = re.sub('3','22',s4)\n return re.sub('2',' ',s3)\n \n self.item = CurlTestBlobEntry(mytext=decode(text1), mytext2=decode(text2))\n self.item.put();", "def test_invalid_length_for_new_password():\n user = User(email=\"test@foo.bar\", user_type=0)\n user_password = \"ILoveHTML\"\n user.SetPassword(user_password)\n\n new_password1 = \"pwd\"\n with pytest.raises(ValueError):\n user.SetPassword(new_password1)\n assert not user.VerifyPassword(new_password1)\n assert user.VerifyPassword(user_password)\n\n new_password2 = \"I love meatball and tuna.\"\n with pytest.raises(ValueError):\n user.SetPassword(new_password2)\n assert not user.VerifyPassword(new_password2)\n assert user.VerifyPassword(user_password)", "def test05_password_special(self):\n self.set_complexity(length=0, numeric=0, upper=0, lower=0, special=5)\n\n invalid = (\n \"A\",\n \"!!!!\",\n \"!A_B@C£D\",\n \"@@PASSWORD123!!\",\n \"ADMIN\",\n \"A1aB2bC3cD4dE5eF6fG7g\",\n )\n self.assertPasswordsInvalid(invalid)\n\n valid = (\n \"_____\",\n \"_!@£$\",\n \"A!B@C£D$F%\",\n \"Tr0ub4dor&3!@£$\",\n \"1234;.,/]1234\",\n \"a!A@0£b$B%0^c&C*0(d)D_0+e\",\n 'password1234\\'\"\"\"\"\"',\n \"p@$$w@*d\",\n )\n self.set_passwords(valid)" ]
[ "0.75044936", "0.707541", "0.70254934", "0.6977966", "0.67734826", "0.67173374", "0.6702236", "0.66481084", "0.6610407", "0.6495734", "0.642665", "0.6363775", "0.63510156", "0.6331783", "0.6269925", "0.6216668", "0.6207249", "0.61964357", "0.61894506", "0.6124794", "0.61163867", "0.6105825", "0.6076612", "0.60725117", "0.6063872", "0.60619366", "0.60597914", "0.5986713", "0.5979764", "0.5932464", "0.5924059", "0.59196836", "0.5915061", "0.58739257", "0.5857486", "0.5856137", "0.5825746", "0.58129334", "0.58064336", "0.580207", "0.57991505", "0.5797409", "0.5783716", "0.57291174", "0.57158524", "0.5715842", "0.5688074", "0.56866467", "0.5676883", "0.56664336", "0.56636024", "0.5660969", "0.56344616", "0.56276447", "0.56224144", "0.56092095", "0.56007344", "0.5594638", "0.5593574", "0.5585742", "0.5573266", "0.5572464", "0.557132", "0.5567149", "0.5564629", "0.5561352", "0.5558137", "0.5551626", "0.55416393", "0.5540442", "0.5531911", "0.55310977", "0.5530682", "0.55287874", "0.5527842", "0.55148506", "0.55125386", "0.55102587", "0.55091137", "0.5506597", "0.55016", "0.5496043", "0.54869217", "0.5485092", "0.54837364", "0.54642457", "0.54602987", "0.5457782", "0.54569924", "0.54563373", "0.5447462", "0.54396224", "0.5423268", "0.540951", "0.5408971", "0.540494", "0.5400119", "0.538521", "0.53827024", "0.5379264" ]
0.8482589
0
Test adding secrets when they already exist.
def test_secrets_add_already_exist(): status_code = 409 reana_token = "000000" env = {"REANA_SERVER_URL": "localhost"} message = "One of the secrets already exists. No secrets were added." mock_http_response = Mock( status_code=status_code, reason="Conflict", json=Mock(return_value={"message": "Conflict"}), ) rs_api_client_mock = Mock() rs_api_client_mock.api.add_secrets = Mock(side_effect=HTTPError(mock_http_response)) runner = CliRunner(env=env) with runner.isolation(): with patch("reana_client.api.client.current_rs_api_client", rs_api_client_mock): result = runner.invoke( cli, ["secrets-add", "-t", reana_token, "--env", "USER=reanauser"] ) assert message in result.output assert result.exit_code == 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_secrets(self):\n secrets.check_secrets([], argparse.Namespace())", "def test_secret(self, env: yaenv.Env):\n assert env.secret() == 'notsosecret'\n assert 'NEW_SECRET_KEY' not in env\n _secret = env.secret('NEW_SECRET_KEY')\n assert _secret is not None\n assert _secret != env.secret('NEW_SECRET_KEY2')\n del env['NEW_SECRET_KEY'], env['NEW_SECRET_KEY2']", "def test_secret():\r\n try:\r\n straxen.get_secret('somethingnonexistent')\r\n except ValueError:\r\n # Good we got some message we cannot load something that does\r\n # not exist,\r\n pass", "def test_get_secret_4(self):\n self.assertIsNone(\n get_secret(\"plain text, no secrets here\")\n )", "def test_secrets_add_wrong_format(secret):\n reana_token = \"000000\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n runner = CliRunner(env=env)\n message = 'For literal strings use \"SECRET_NAME=VALUE\" format'\n\n result = runner.invoke(cli, [\"secrets-add\", \"-t\", reana_token, \"--env\", secret])\n assert result.exit_code == 1\n assert message in result.output", "def test_create_container_w_duplicate_secret_refs(self):\n\n secret_resp = self.secret_behaviors.create_secret_from_config()\n secret_refs = [SecretRef(name='1', ref=secret_resp.ref),\n SecretRef(name='2', ref=secret_resp.ref)]\n\n container_resp = self.behaviors.create_container(\n 'name', 'generic', secret_refs)\n\n self.assertEqual(container_resp.status_code, 400)", "def test_generate_secret(self):\n random_secret = ef_password.generate_secret(24)\n self.assertEqual(len(random_secret), 24)\n assert not set('[~!@#$%^&*()_+{}\":;\\']+$').intersection(random_secret)", "def test_create_seed_secrets(self):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.COMPLETE_MANIFEST)\n name = 'job-type-post-test-secret'\n manifest['job']['name'] = name\n manifest['job']['interface']['settings'] = [\n {\n 'name': 'VERSION',\n 'secret': True\n },\n {\n 'name': 'DB_HOST',\n 'secret': True\n },\n {\n 'name': 'DB_PASS',\n 'secret': True\n }\n ]\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': False,\n 'max_scheduled': 1,\n 'docker_image': 'my-job-1.0.0-seed:1.0.0',\n 'manifest': manifest,\n 'configuration': self.configuration\n }\n\n with patch.object(SecretsHandler, '__init__', return_value=None), \\\n patch.object(SecretsHandler, 'set_job_type_secrets', return_value=None) as mock_set_secret:\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED, response.content)\n\n job_type = JobType.objects.filter(name=name).first()\n\n results = json.loads(response.content)\n self.assertEqual(results['id'], job_type.id)\n\n # Secrets sent to Vault\n secrets_name = '-'.join([results['name'], results['version']]).replace('.', '_')\n secrets = json_data['configuration']['settings']\n mock_set_secret.assert_called_once_with(secrets_name, secrets)\n\n #Secrets scrubbed from configuration on return\n self.assertEqual(results['configuration']['settings'], {})", "def test_invalid_secrets(self):\n s = SecretsChecker(stage='dev')\n # Override the email field obtained from terraform\n s.email = ['nonsense']\n with self.assertRaises(ValueError):\n s.run()", "def test_get_secret_5(self):\n\n # notice space between SECRET and immediately following\n # curly bracket.\n self.assertIsNone(\n get_secret(\"SECRET { ...}\")\n )\n\n # typo in keyword SECRET\n self.assertIsNone(\n get_secret(\"SECRIT { ...}\")\n )\n\n # missing closing bracket\n self.assertIsNone(\n get_secret(\"SECRET { ...\")\n )\n\n # curly brackets missing\n self.assertIsNone(\n get_secret(\"SECRET ...\")\n )", "def test_secrets() -> Secrets:\n from dotenv import load_dotenv\n from os import getenv\n from pathlib import Path\n env_path = Path('.') / '.env.testing'\n load_dotenv(dotenv_path=env_path)\n return Secrets(\n google_id_token=getenv(\"GOOGLE_ID_TOKEN\"),\n google_user_id=getenv(\"GOOGLE_USER_ID\")\n )", "def test_read_namespaced_secret_list_secrets(self):\n pass", "def test_secret():\n # TODO: split this up and write better tests\n\n @make_config()\n class Config:\n \"\"\"The test configuration for configurave.\"\"\"\n\n root_url: str = ce(\n comment=\"The root url configuration for the application\",\n description=\"A long ass multiline description goes here about all the options\"\n \" you could potentially decide upon using.\",\n )\n token: str = ce(\n comment=\"The discord token for your bot\",\n secret=True,\n )\n\n c = Config(\n sources=[ # in order of priority\n \"tests/test-config/secrets.toml\",\n ]\n )\n\n assert \"token\" in str(c._crve_configs)\n assert c.token == \"secret token\"\n\n default_toml = (\n \"# The test configuration for configurave.\\n\"\n \"# This is an autogenerated default configuration file written by Configurave\\n\\n\"\n \"# (str): The root url configuration for the application\\n\"\n \"# root_url = \\n\"\n \"# Description: A long ass multiline description goes here about all the\\n\"\n \"# options you could potentially decide upon using.\\n\"\n \"\\n\"\n \"# (str): The discord token for your bot\\n\"\n \"# Secret: value will not be exported\\n\"\n \"token =\\n\"\n )\n assert c.defaults_toml() == default_toml", "def secretstore():\n pass", "def apply_secrets():\n for name, value in Secrets.__dict__.items():\n if name[0] != '_':\n os.environ[name] = value", "def test_diff_is_not_shown_for_keys_in_secrets(tmp_path, monkeypatch, capsys):\n monkeypatch.chdir(\"examples/tutorial-secrets\")\n if os.path.exists(\"work\"):\n shutil.rmtree(\"work\")\n try:\n out, _ = cmd(\"./batou deploy tutorial\")\n finally:\n shutil.rmtree(\"work\")\n assert out == Ellipsis(\n \"\"\"\\\nbatou/2... (cpython 3...)\n================================== Preparing ===================================\nmain: Loading environment `tutorial`...\nmain: Verifying repository ...\nmain: Loading secrets ...\n================== Connecting hosts and configuring model ... ==================\nlocalhost: Connecting via local (1/1)\n================================== Deploying ===================================\nlocalhost: Scheduling component hello ...\nlocalhost > Hello > File('work/hello/hello') > Presence('hello')\nlocalhost > Hello > File('work/hello/hello') > Content('hello')\nNot showing diff as it contains sensitive data,\nsee ...diff for the diff.\nlocalhost > Hello > File('work/hello/other-secrets.yaml') > Presence('other-secrets.yaml')\nlocalhost > Hello > File('work/hello/other-secrets.yaml') > Content('other-secrets.yaml')\nNot showing diff as it contains sensitive data,\nsee ...diff for the diff.\n=================================== Summary ====================================\nDeployment took total=...s, connect=...s, deploy=...s\n============================= DEPLOYMENT FINISHED ==============================\n\"\"\"\n ) # noqa: E501 line too long", "def _secret_not_in_order():\n pecan.abort(400, u._(\"Secret metadata expected but not received.\"))", "def test_add_exchange_empty_secret(self):\n exchange_name = \"Testing\"\n api_key = \"Testing\"\n secret = \"\"\n new_exchange = self.app.add_exchange(exchange_name, api_key, secret)\n self.assertIn(new_exchange[0], \"error\")", "def test_mask_secret_nomatch():\n secrets = [\n \"8bca8d2e-1cd6-4ec0-8e55-9614aa01cf88\",\n \"683c08d7-bc07-4d72-b098-46ef00b74aec\",\n ]\n assert utils.mask_secrets(\"ls -lh /tmp\", secrets) == \"ls -lh /tmp\"", "def secrets():\n click.echo(STEP_PATH / \"secrets\")", "def test_secrets_list_server_no_token():\n message = \"Please provide your access token\"\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n runner = CliRunner(env=env)\n result = runner.invoke(cli, [\"secrets-list\"])\n assert result.exit_code == 1\n assert message in result.output", "def know_secret(self):\r\n return(self.secret != \"\") and (self.key != \"\")", "def verify_secret(prop_name, value):\n\n hashed = hashlib.sha256(value.encode('UTF-8')).hexdigest()\n has_must_be = RUN_CONFIG.get(prop_name)\n\n return hashed == has_must_be", "def testSecretKey(loggingMixin, yamlConfigForParsingPlugins):\n parameters = yamlConfigForParsingPlugins\n # It will always return a string, so we must compare to a string.\n assert parameters[\"secretKey\"] == \"12345\"\n # We can't predict what it will produce, so we just check to make sure that it's not null\n assert parameters[\"secretKeyGen\"] != \"null\"\n assert parameters[\"secretKeyGen\"] is not None", "def test_credential_exists(self):\n self.new_credentials.save_attributes()\n test_credential = Credentials(\"Instagram\", \"@zephonmakale\", \"123456\" )\n test_credential.save_attributes()\n\n credential_exist = Credentials.credentials_exist(\"Instagram\")\n self.assertTrue(credential_exist)", "def test_credentials(self):\n twine = Twine(source=self.VALID_CREDENTIALS_TWINE)\n with mock.patch.dict(\n os.environ,\n {\"SECRET_THE_FIRST\": \"a value\", \"SECRET_THE_SECOND\": \"another value\", \"SECRET_THE_THIRD\": \"value\"},\n ):\n twine.validate_credentials()\n self.assertEqual(os.environ[\"SECRET_THE_THIRD\"], \"value\")", "def test_credential_exist(self):\n self.new_credentials.save_creds()\n account_found = Credentials.search_by_account(\"Instagram\")\n\n self.assertTrue(account_found)", "def test_add_with_existing_key(self):\n self.client.login(username='admin', password='admin')\n response = self.client.post('/add/', {'url': 'http://example.com', 'key': 'example'})\n # TODO status 201\n self.client.login(user='admin', password='admin')\n response = self.client.post('/add/', {'url': 'http://example.com', 'key': 'example'})\n # TODO status 409", "def secret() -> None:\n pass", "def test_keyring_exists_without_keyring(self, mock_keyring):\n mock_keyring.get_keyring.return_value = False\n self.assertFalse(keyring_exists())", "def test_create_rsa_container_w_invalid_key_names(self):\n secret_urls = self.secret_behaviors.create_n_secrets(3)\n secret_refs = [SecretRef(name='secret{0}'.format(i), ref=url)\n for i, url in enumerate(secret_urls)]\n container_resp = self.behaviors.create_container(\n 'name', 'rsa', secret_refs)\n self.assertEqual(container_resp.status_code, 400)", "def check_secret_key(cls):\n logger.info(\"attempting to check the secret key...\")\n try:\n s = cls.objects.filter(is_active=True).first()\n if s and s.secret_key == \"not-a-very-good-secret\":\n chars = \"abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)\"\n s.secret_key = get_random_string(50, chars)\n s.save()\n except DBError:\n logger.warning(\"db not ready (error on {} model)\".format(cls.__name__))\n return", "def test_get_secret_from_env(monkeypatch):\n key = str(uuid.uuid4()).replace('-', '.')\n value = str(uuid.uuid4())\n monkeypatch.setenv(key.replace('.', '_').upper(), value)\n secret = lambdautils.utils.get_secret(key)\n assert secret == value", "def test_aiven_creds_exist(self):\n assert os.environ[\"AIVEN_API_URL\"] is not None\n assert os.environ[\"AIVEN_TOKEN\"] is not None", "def is_shared_secret_from_secret_name(soa_dir: str, secret_name: str) -> bool:\n secret_path = os.path.join(\n soa_dir, SHARED_SECRET_SERVICE, \"secrets\", f\"{secret_name}.json\"\n )\n return os.path.isfile(secret_path)", "def test_run_cmd_simple_negative_with_secrets(caplog):\n caplog.set_level(logging.DEBUG)\n secrets = [\"8bca8d2e-1cd6\", \"683c08d7-bc07\"]\n cmd = \"ls /tmp/this/file/683c08d7-bc07/isnotthere\"\n with pytest.raises(CommandFailed) as excinfo:\n utils.run_cmd(cmd, secrets=secrets)\n assert \"No such file or directory\" in str(excinfo.value)\n # check that exception was sanitized\n for secret in secrets:\n assert secret not in str(excinfo.value)\n # check that logs were satinized as well\n for secret in secrets:\n assert secret not in caplog.text", "def test_adding_config_keys():\n\n with pytest.raises(ValueError) as error:\n Config.config()[\"something_fake\"] = True\n\n assert \"something_fake is not a valid config key.\" in error.value.args", "async def add_secret(app: Sanic, secret: str, passphrase: str, ttl: Optional[int]) -> str:\n\n key = get_fernet_key(app, passphrase)\n\n sign = hmac.digest(key=key, msg=passphrase.encode(), digest='sha512').hex()\n secret_key = secrets.token_hex(16)\n\n cipher = fernet.Fernet(key)\n encrypted = cipher.encrypt(secret.encode()).decode()\n\n expires = None\n if ttl:\n expires = datetime.utcnow() + timedelta(seconds=ttl)\n\n await app.db.secrets.insert_one({\n 'secret': encrypted,\n 'secret_key': secret_key,\n 'signature': sign,\n 'expires': expires, # for mongo index\n 'ttl': ttl, # for fernet check\n })\n\n return secret_key", "def test_no_vault_secrets(mock_load, localhost_client, gen_input_config):\n mock_load.return_value = gen_input_config(vault_secrets={})\n\n localhost_client.load(\"in.json\")\n\n mock_load.assert_called_with(\"in.json\")", "def test_secrets_list_server_not_reachable():\n message = \"REANA client is not connected to any REANA cluster.\"\n reana_token = \"000000\"\n runner = CliRunner()\n result = runner.invoke(cli, [\"secrets-list\", \"-t\", reana_token])\n assert result.exit_code == 1\n assert message in result.output", "def test_client_key_secret_not_provided(self):\r\n\r\n #this adds lti passports to system\r\n mocked_course = Mock(lti_passports = ['test_id:test_client:test_secret'])\r\n modulestore = Mock()\r\n modulestore.get_course.return_value = mocked_course\r\n runtime = Mock(modulestore=modulestore)\r\n self.xmodule.descriptor.runtime = runtime\r\n #set another lti_id\r\n self.xmodule.lti_id = \"another_lti_id\"\r\n key_secret = self.xmodule.get_client_key_secret()\r\n expected = ('','')\r\n self.assertEqual(expected, key_secret)", "def test_client_key_secret_not_provided(self):\n\n # this adds lti passports to system\n mocked_course = Mock(lti_passports=['test_id:test_client:test_secret'])\n modulestore = Mock()\n modulestore.get_course.return_value = mocked_course\n runtime = Mock(modulestore=modulestore)\n self.xmodule.runtime = runtime\n # set another lti_id\n self.xmodule.lti_id = \"another_lti_id\"\n key_secret = self.xmodule.get_client_key_secret()\n expected = ('', '')\n assert expected == key_secret", "def test_load_no_vault_with_secrets(mock_load, gen_input_config):\n mock_load.return_value = gen_input_config()\n\n input_config_edited = gen_input_config()\n del input_config_edited[\"vault_secrets\"]\n\n client = VaultAnyConfig()\n\n with pytest.warns(UserWarning):\n assert client.load(\"in.json\") == input_config_edited\n\n mock_load.assert_called_with(\"in.json\")", "def test_add_context_duplicate():\n\n with pytest.raises(DuplicateContextKeyError):\n application_services.add_context('context2', 'value2')\n application_services.add_context('context2', 'value4')", "def _secrets(self, credstash):\n if credstash == \"true\":\n return True\n else:\n return False", "def test_store_existing_cred(self):\n self.new_cred.save_cred()\n self.assertEqual(len(Credentials.cred_list), 1)", "def secret():\n pass", "def populate_secrets_pre(vault_secret_keys, core_auth_cookies, extra_fns):\n\n for path in vault_secret_keys:\n vault.ensure_secret_key(path)\n\n for fn in extra_fns:\n if fn:\n fn(vault, config, random_secret)\n\n for name in core_auth_cookies:\n vault.ensure_secret(f'liquid/{name}/cookie', lambda: {\n 'cookie': random_secret(64),\n })", "def add(key, value, **kwargs):\n cluster_call(\"secret_add\", key=key, value=value, **kwargs, prefix=f\"Adding secret {key}...\", postfix=\"added.\")", "def test_mask_secret_nosecrets():\n assert utils.mask_secrets(\"ls -lh /tmp\", None) == \"ls -lh /tmp\"", "def test_run_cmd_simple_positive_with_secrets(caplog):\n caplog.set_level(logging.DEBUG)\n secrets = [\"8bca8d2e-1cd6\", \"683c08d7-bc07\"]\n cmd = \"echo -n hello 8bca8d2e-1cd6\"\n assert utils.run_cmd(cmd, secrets=secrets) == \"hello *****\"\n # check that logs were satinized as well\n for secret in secrets:\n assert secret not in caplog.text", "def test_resource_exists(self):\r\n\t\tself.assertTrue(self._configuration_.resources().has_key(\"AddWordTaskRepeat\") and self._configuration_.resources().has_key(\"RemoveWordTaskRepeat\"))", "def test_get_secret_3(self):\n\n text_subject = \"Important Message\"\n text_body = \"\"\"\n This is body of plain text message of some email\n \"\"\"\n self.assertIsNone(\n # no secret in the text\n get_secret([text_subject, text_body])\n )", "def prepare_secrets(c, rebuild_venv=False, no_secret_cache=False):\n cli_tasks.prepare_secrets.run(c, rebuild_venv, no_secret_cache)", "def setup_keys():\n if os.path.isfile(\"key.txt\"):\n message = \"Key already generated\"\n else:\n secret = secrets.token_urlsafe(64)\n message = \"Secret generated and saved in key.txt\"\n with open(\"key.txt\", \"w\") as fd:\n fd.write(secret)\n return json.dumps({'message': message})", "def create_secret(secret_name, secret_value, environment):\n environment.add_cleanup(\n environment.cfy.secrets.delete,\n kwargs={\n 'secret_name': secret_name,\n },\n )\n environment.cfy.secrets.create(\n secret_name=secret_name,\n secret_value=secret_value,\n )", "def test_keyring_exists_with_keyring(self, mock_keyring):\n mock_keyring.get_keyring.return_value = True\n self.assertTrue(keyring_exists())", "def inject_secrets(self, secrets: str) -> None:\n self.config.read(secrets)", "def test_vault_create_new_vault_item(self):\n pass", "def secrets(self): # pylint: disable=no-self-use\n return []", "def test_store_multiple_cred(self):\n self.new_cred.save_cred()\n test_cred = Credentials('stackoverflow','Lugaga', 'golfalpharomeo')\n test_cred.save_cred()\n self.assertEqual(len(Credentials.cred_list), 2)", "def test_create_application_credential(self):\n app_cred = self.create_application_credential()\n\n # Check that the secret appears in the create response\n secret = app_cred['secret']\n\n # Check that the secret is not retrievable after initial create\n app_cred = self.non_admin_app_creds_client.show_application_credential(\n user_id=self.user_id,\n application_credential_id=app_cred['id']\n )['application_credential']\n self.assertNotIn('secret', app_cred)\n\n # Check that the application credential is functional\n _, resp = self.non_admin_token.get_token(\n app_cred_id=app_cred['id'],\n app_cred_secret=secret,\n auth_data=True\n )\n self.assertEqual(resp['project']['id'], self.project_id)", "def test_check_keys_exist_for_provider_string(self):\n\n secret_key = None\n provider_id = 'asu'\n\n serializer = serializers.CreditProviderCallbackSerializer()\n with pytest.raises(PermissionDenied):\n serializer._check_keys_exist_for_provider(secret_key, provider_id) # lint-amnesty, pylint: disable=protected-access", "def test_new_config_already_exists(self, context):\n\n context.config_exists.return_value = True\n\n runner = CliRunner()\n result = runner.invoke(cli_node_new_configuration, [\n \"--name\", \"some-name\",\n \"--environment\", \"application\"\n ])\n\n # check that error is produced\n self.assertEqual(result.output[:7], \"[error]\")\n\n # check non-zero exit code\n self.assertEqual(result.exit_code, 1)", "def test_search_duplicate(self):\n self.new_credential.credential_create()\n test_credentials = Credentials(\"MySpace\", \"Ghostke99\", \"daimaMkenya001\")\n test_credentials.credential_create()\n search_duplicate = Credentials.search_duplicate(\"MySpace\")\n self.assertTrue(search_duplicate)", "async def test_create_bike_bad_value(database, key_name):\n with pytest.raises(ValueError):\n await register_bike(public_key=key_name, master_key=key_name)", "def test_create_container_w_null_secret_name(self):\n responses = self.behaviors.create_container_with_secret(\n name='name', secret_name=None)\n secret_resp, container_resp = responses\n self._check_container_create_response(container_resp)\n\n get_resp = self.container_client.get_container(container_resp.ref)\n self._check_container_get_resp(get_resp, ref=container_resp.ref,\n name='name', type='generic',\n num_secrets=1)\n\n # verify the secret's name is returned correctly\n secret_ref = get_resp.entity.secret_refs[0]\n self.assertEqual(secret_ref.name, None)", "async def test_invalid_insert_user_duplicate_key(database):\n await database.setup_database(reset=True)\n await database.insert_user(\"\")\n for user_id in zip([\"1\" for _ in range(0,10)]):\n try:\n await database.insert_user(user_id=user_id)\n assert False\n except:\n assert True\n await database.close_pool()", "def test_app_is_testing(self):\n self.assertFalse(app.config['SECRET_KEY'] == 'my_precious')\n self.assertTrue(app.config['DEBUG'])\n self.assertFalse(app.config['TESTING'])", "def test_unauthorized_add(self):\n response = self.client.post('/add/', {'url': 'http://example.com', 'key': 'example'})\n # TODO status 403", "def test_save_creds(self):\n self.new_credentials.save_creds()\n self.assertEqual(len(Credentials.credential_list),1)", "def test_app_is_testing(self):\n self.assertFalse(app.config['SECRET_KEY'] == 'my_precious')\n self.assertTrue(app.config['DEBUG'])\n self.assertTrue(app.config['TESTING'])", "def test_secrets_list_ok():\n status_code = 200\n response = [{\"name\": \"password\", \"type\": \"env\"}]\n env = {\"REANA_SERVER_URL\": \"localhost\"}\n mock_http_response, mock_response = Mock(), Mock()\n mock_http_response.status_code = status_code\n mock_response = response\n reana_token = \"000000\"\n runner = CliRunner(env=env)\n with runner.isolation():\n with patch(\n \"reana_client.api.client.current_rs_api_client\",\n make_mock_api_client(\"reana-server\")(mock_response, mock_http_response),\n ):\n result = runner.invoke(cli, [\"secrets-list\", \"-t\", reana_token])\n assert result.exit_code == 0\n assert \"password\" in result.output\n assert \"env\" in result.output", "def cryptsetup_add_password(config, slot):\n\n (password, mainslot) = config.first_password()\n\n pwfile = os.path.join(iotests.test_dir, \"passwd.txt\")\n with open(pwfile, \"w\") as fh:\n fh.write(config.passwords[slot])\n\n try:\n args = [\"luksAddKey\", config.image_path(),\n \"--key-slot\", slot,\n \"--key-file\", \"-\",\n \"--iter-time\", \"10\",\n pwfile]\n\n cryptsetup(args, password)\n finally:\n os.unlink(pwfile)", "def check_key_exists(self) -> None:\n omitted_configs = self.necessary_config_names - set(self.config.keys())\n assert len(omitted_configs) == 0, omitted_configs", "def test_environment_credentials(main_container):\n # Check for credential variables.\n # These are not required for pre-built images.\n assert (\n \"FOUNDRY_USERNAME\" in os.environ\n ), \"FOUNDRY_USERNAME was not in the environment\"\n assert (\n \"FOUNDRY_PASSWORD\" in os.environ\n ), \"FOUNDRY_PASSWORD was not in the environment\"", "def test_add_with_existing_item(self):\n settings.TEST_SETTING_LIST = ['item1']\n wrapper = SettingListWrapper('TEST_SETTING_LIST', 'test setting list')\n wrapper.add('item1')\n\n self.assertEqual(settings.TEST_SETTING_LIST, ['item1'])\n self.assertEqual(wrapper.ref_counts.get('item1'), 2)", "def test_get_secrets_does_not_retry_on_200(self, mget):\n error_data = json.dumps({\"error_id\": \"123\", \"errors\": []})\n secret_data = json.dumps({\n \"data\": {\n \"sushi\": \"ikenohana\",\n \"ramen\": \"yuzu\"\n }\n })\n\n mget.side_effect = [self._mock_response(status=200, content=secret_data),\n self._mock_response(status=500, content=error_data)]\n self.client.get_secrets_data('fake/path')", "def add_secrets(self, id: str, body: dict[str, Any]) -> dict[str, Any]:\n return self.client.post(self._url(\"%s/secrets\" % id), data=body)", "def add_ipsec_secrets(self, **kwargs):\r\n\r\n if 'auth_type' not in kwargs:\r\n self.linux_handle.log(level='ERROR', message=\"Mandatory Argument 'auth_type' is missing\")\r\n raise Exception(\"Mandatory Argument 'auth_type' is missing\")\r\n auth_type = kwargs.get('auth_type')\r\n ipsec_secret_file = self.conf_dir + '/ipsec.secrets'\r\n result = self.linux_handle.shell(command='ls ' + ipsec_secret_file).response()\r\n if not re.search(r'No such file or directory', result):\r\n self.linux_handle.log(\"Moving existing %s to %s.orig\" % (ipsec_secret_file, ipsec_secret_file))\r\n cmd = \"mv -f %s %s.orig\" % (ipsec_secret_file, ipsec_secret_file)\r\n self.linux_handle.shell(command=cmd)\r\n line = ''\r\n if auth_type.lower() == 'PSK'.lower():\r\n if 'preshared_key' not in kwargs:\r\n self.linux_handle.log(level='ERROR', message=\"For auth_type=psk, argument 'preshared_key' is mandatory\")\r\n raise Exception(\"Missing argument: For auth_type=psk, argument 'preshared_key' is mandatory\")\r\n if 'host_id' in kwargs:\r\n line = kwargs.get('host_id') + ' '\r\n if 'peer_id' in kwargs:\r\n line = line + ' ' + kwargs.get('peer_id') + ' '\r\n line = line + ' : PSK \"' + kwargs.get('preshared_key') + '\"'\r\n else:\r\n if 'local_cert' not in kwargs:\r\n self.linux_handle.log(level='ERROR', message=\" 'local_cert' is mandatory argument\")\r\n raise Exception(\"'local_cert' is mandatory argument\")\r\n line = ' : ' + auth_type.upper() + ' ' + kwargs.get('local_cert')\r\n if 'passphrase' in kwargs:\r\n line = line + ' ' + kwargs.get('passphrase')\r\n self.linux_handle.log('Adding %s into secrets file' %line)\r\n\r\n xauth = None\r\n if 'xauth_user' in kwargs and 'xauth_pwd' in kwargs:\r\n xauth = kwargs.get('xauth_user') + ' : XAUTH ' + kwargs.get('xauth_pwd')\r\n\r\n with open('ipsec.secrets', 'w') as out:\r\n out.write(line + \"\\n\")\r\n if xauth is not None:\r\n out.write(xauth + \"\\n\")\r\n out.close()\r\n\r\n if not self.linux_handle.upload(local_file='ipsec.secrets', remote_file=ipsec_secret_file,\r\n protocol='scp'):\r\n self.linux_handle.log(\"Uploading ipsec.secrets file failed\")\r\n raise Exception(\"Uploading ipsec.secrets file failed\")\r\n\r\n self.linux_handle.log(\"Updating ipsec.secrets file successfull\")\r\n return True", "async def test_create_bike_bad_master(database):\n with pytest.raises(BadKeyError):\n await register_bike(random_key(32), \"BADBAD\")", "def test_bad_client_key_secret(self):\n # this adds lti passports to system\n mocked_course = Mock(lti_passports=['test_id_test_client_test_secret'])\n modulestore = Mock()\n modulestore.get_course.return_value = mocked_course\n runtime = Mock(modulestore=modulestore)\n self.xmodule.runtime = runtime\n self.xmodule.lti_id = 'lti_id'\n with pytest.raises(LTIError):\n self.xmodule.get_client_key_secret()", "def test_wallets_put(self):\n pass", "def secrets(self, secrets):\n\n self._secrets = secrets", "def create_secrets(file):\n with open(file, 'w') as secfile:\n secfile.write((\n '# _credentials: Maintain your credentials below. Do not remove unused fields.\\n'\n 'USER = \\'\\'\\nPASSWORD = \\'\\'\\n# _courses: Define which courses should be crawled\\nCOURSES = []\\n\\n'\n '# local: Required if you want to download files and store them in a local folder'\n ' (for example in the Dropbox client folder)\\n'\n 'PATH = \\'\\' # Path to the destination folder\\n\\n'\n '# dropbox (-d): Required if you want to download files and upload them to Dropbox\\n'\n 'DROPBOX_TOKEN = \\'\\' # Personal Dropbox API token\\n'\n 'PATH_IN_DB = \\'\\' # Destination path of downloaded files within Dropbox\\n'))\n print('File app_secrets.py was created. Please maintain your credentials.')\n sys.exit(1)", "def _delete_all_secrets(self):\n for secret_ref in self.created_entities['secret']:\n self.barbicanclient.secrets.delete(secret_ref, True)", "def test_bad_client_key_secret(self):\r\n #this adds lti passports to system\r\n mocked_course = Mock(lti_passports = ['test_id_test_client_test_secret'])\r\n modulestore = Mock()\r\n modulestore.get_course.return_value = mocked_course\r\n runtime = Mock(modulestore=modulestore)\r\n self.xmodule.descriptor.runtime = runtime\r\n self.xmodule.lti_id = 'lti_id'\r\n with self.assertRaises(LTIError):\r\n self.xmodule.get_client_key_secret()", "def test_creation(sqlite_db):\n new_pass = \"TheNewPassword\"\n site = \"www.example.com\"\n response = smm.create_passwd(site, new_pass)\n assert response\n # Make sure we can't create twice.\n bad_response = smm.create_passwd(site, new_pass)\n assert not bad_response", "def test_get_yggdrasil_vaults(self):\n pass", "def mock_secret_config(\n ) -> SecretConfig:\n \n \n object_properties = [\n attr_name for attr_name in dir(SecretConfig)\n if not attr_name.startswith(\"_\")\n ]\n \n mock_entity = SecretConfig()\n\n mock_entity.reddit_client_id = \"mockclientid\"\n # pragma: allowlist nextline secret\n mock_entity.reddit_client_secret = \"mockvalue\" \n # pragma: allowlist nextline secret\n mock_entity.reddit_password = \"mockvalue2\" \n mock_entity.reddit_username = \"mockvalue3\" \n\n for object_property in object_properties:\n assert getattr(\n mock_entity, object_property\n ) is not None, (\n f\"mock_secret_config = fixture missing {object_property}\"\n )\n \n return(deepcopy(mock_entity))", "def test_get_secret_from_vault(key, environment, stage, namespace, table, nkey,\n boto3_resource, boto3_client, monkeypatch):\n # Call to the DynamoDB client to retrieve the encrypted secret\n monkeypatch.setattr(\"boto3.resource\", boto3_resource)\n monkeypatch.setattr(\"boto3.client\", boto3_client)\n secret = lambdautils.utils.get_secret(key,\n namespace=namespace,\n environment=environment,\n stage=stage)\n assert secret == \"dummy\"\n boto3_client(\"dynamodb\").get_item.assert_called_with(\n TableName=table,\n Key={\"id\": {\"S\": nkey}})\n\n # Call to the KMS client to decrypt the secret\n boto3_client('kms').decrypt.assert_called_with(CiphertextBlob=\"encrypted\")", "def get_secret(setting, secrets=secrets):\n return secrets[setting]", "def register_secret(self, secret):\n hashlock = sha3(secret)\n\n if not self.is_known(hashlock):\n raise ValueError('secret does not correspond to any hashlock')\n\n if self.is_locked(hashlock):\n pendinglock = self.hashlocks_to_pendinglocks[hashlock]\n del self.hashlocks_to_pendinglocks[hashlock]\n\n self.hashlocks_to_unclaimedlocks[hashlock] = UnlockPartialProof(\n pendinglock.lock,\n pendinglock.lockhashed,\n secret,\n )", "def test_kyc_put_legal_share_holder(self):\n pass", "def test_get_key_not_defined_yet(self):\n storage = SessionStorage()\n\n self.assertNotIn('key1', storage)\n s1 = storage['key1']\n self.assertIn('key1', storage)\n\n self.assertNotIn('key2', storage)\n s2 = storage['key2']\n self.assertIn('key2', storage)\n\n self.assertIsNot(s1, s2)", "def test_06_put(self, mock_readall, mock_writeall, mock_shred,\n mock_config, mock_verks):\n self._init()\n udocker.Config = mock_config\n udocker.Config.tmpdir = \"/tmp\"\n kstore = udocker.KeyStore(\"filename\")\n self.assertFalse(kstore.put(\"\", \"\", \"\"))\n mock_readall.return_value = dict()\n kstore.put(self.url, self.auth, self.email)\n mock_writeall.assert_called_once_with(self.credentials)", "def test_kms_re_encrypt_fails_without_b64_secret(self):\n with self.assertRaises(SystemExit):\n ef_utils.kms_re_encrypt(self.mock_kms, self.service, self.env, self.secret)", "async def list_secrets(self):\n pass", "def test_check_keys_exist_for_provider_list_no_keys(self):\n\n secret_key = [None, None]\n provider_id = 'asu'\n\n serializer = serializers.CreditProviderCallbackSerializer()\n with pytest.raises(PermissionDenied):\n serializer._check_keys_exist_for_provider(secret_key, provider_id) # lint-amnesty, pylint: disable=protected-access", "def with_secrets(self, kind, source):\n\n if kind == \"vault\" and isinstance(source, list):\n source = {\"project\": self.metadata.project, \"secrets\": source}\n\n self.spec.secret_sources.append({\"kind\": kind, \"source\": source})\n return self" ]
[ "0.7291631", "0.7147517", "0.6715984", "0.6684597", "0.66473454", "0.66376173", "0.6494984", "0.6443271", "0.6437272", "0.6393369", "0.629985", "0.6255083", "0.62139803", "0.61440545", "0.602208", "0.5996874", "0.5979041", "0.5953571", "0.59042215", "0.58320487", "0.58229697", "0.58182454", "0.5770143", "0.575439", "0.57336324", "0.57244754", "0.57242984", "0.57100505", "0.5703069", "0.5698558", "0.5694211", "0.569121", "0.5689121", "0.5686997", "0.56844294", "0.5668394", "0.5641086", "0.5639702", "0.5632741", "0.5624648", "0.5618119", "0.5614075", "0.56123406", "0.56104416", "0.56031644", "0.55983824", "0.55952275", "0.5589923", "0.5569582", "0.5566865", "0.5558933", "0.55541426", "0.55464184", "0.55343294", "0.55216616", "0.550183", "0.5500729", "0.5499009", "0.54959136", "0.54869133", "0.54828435", "0.5476078", "0.54438263", "0.5423932", "0.5420098", "0.5404057", "0.5398302", "0.5390691", "0.53877985", "0.5375573", "0.53524894", "0.5351889", "0.5348731", "0.5342433", "0.53415984", "0.5341554", "0.5340386", "0.53364915", "0.5334549", "0.5331153", "0.53311044", "0.53298116", "0.53282624", "0.5316626", "0.53124213", "0.53083295", "0.5295734", "0.529316", "0.52794677", "0.52777535", "0.52687466", "0.52650875", "0.5263785", "0.5262378", "0.5258504", "0.5253482", "0.52271044", "0.52256095", "0.5214471", "0.520548" ]
0.7203451
1
Optimized version of the generic paginate_query_across_partitioned_databases for case schedules queue_schedule_instances uses a lock to ensure that the same case_id cannot be queued within one hour of another instance The celery tasks handle_case_alert_schedule_instance and handle_case_timed_schedule_instance both use locks to ensure only one taks is operating on a case at one time. Each task also checks if the schedule is still valid on this case before processing it further Assumes that q_expression includes active = True
def _paginate_query_across_partitioned_databases(model_class, q_expression, load_source): from corehq.messaging.scheduling.scheduling_partitioned.models import ( CaseAlertScheduleInstance, CaseTimedScheduleInstance, ) if model_class not in (CaseAlertScheduleInstance, CaseTimedScheduleInstance): raise TypeError("Expected CaseAlertScheduleInstance or CaseTimedScheduleInstance") db_names = get_db_aliases_for_partitioned_query() for db_name in db_names: for row in _paginate_query(db_name, model_class, q_expression, load_source): yield row
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scheduling_method(self, cur_time, es, es_dict):\n dispatching_plan = []\n\n resource_types = self.resource_manager.resource_types\n avl_resources = self.resource_manager.current_availability\n system_capacity = self.resource_manager.system_capacity('nodes')\n\n # =======================================================================\n # Considered queued jobs: Jobs can be fitted in the current system state and less or equal than q_length\n # If a job_obj cannot be fitted or exceed the q_length is directly loaded in the dispatching decision using the no-solution dispatching tuple\n # =======================================================================\n priorized_jobs = SortedListWithKey(key=lambda job_tuple: job_tuple[1])\n\n current_qjobs = SortedList()\n\n cons_qjobs = {}\n for node in self.resource_manager.node_names:\n avl_res = avl_resources[node]\n # avl_res = system_capacity[node]\n for idx, job_obj in enumerate(es):\n job_id = job_obj.id\n\n if not (job_id in cons_qjobs):\n current_qjobs.add(job_id)\n cons_qjobs[job_id] = [False, 0, {}, None]\n priorized_jobs.add((job_id, self._job_priority_slowdown(job_obj, cur_time)))\n if self._reduced_model:\n possibilities = self._joint_nodes(job_obj, avl_res)\n if possibilities > 0:\n cons_qjobs[job_id][2][node] = min(possibilities, job_obj.requested_nodes)\n cons_qjobs[job_id][1] += possibilities\n if cons_qjobs[job_id][1] >= job_obj.requested_nodes:\n cons_qjobs[job_id][0] = True\n if not cons_qjobs[job_id][3]:\n cons_qjobs[job_id][3] = job_obj\n else:\n cons_qjobs[job_id][0] = True\n cons_qjobs[job_id][1] = None\n cons_qjobs[job_id][2] = None\n cons_qjobs[job_id][3] = job_obj\n\n qjobs = 0\n wc_makespan = 0\n makespans = []\n\n selected_priorized_jobs = []\n\n # Job of the dispatching decision\n decision_jobs = {}\n\n if self._reduced_model:\n for job_id, _ in priorized_jobs:\n t = cons_qjobs[job_id]\n if not t[0] or qjobs > self._cur_q_length - 1:\n decision_jobs[job_id] = self.dispatching_tuple(job_id)\n cons_qjobs.pop(job_id)\n else:\n exp_duration = max(1, t[-1].expected_duration)\n wc_makespan += exp_duration\n makespans.append(exp_duration)\n qjobs += 1\n selected_priorized_jobs.append(job_id)\n else:\n cannot_start_selected = 0\n for job_id, _ in priorized_jobs:\n t = cons_qjobs[job_id]\n if (not t[0] and cannot_start_selected >= self._considered_cannot_start) or (\n qjobs > self._cur_q_length - 1):\n decision_jobs[job_id] = self.dispatching_tuple(job_id)\n cons_qjobs.pop(job_id)\n else:\n if not t[0]:\n cons_qjobs[job_id][3] = es_dict[job_id]\n cannot_start_selected += 1\n exp_duration = max(1, t[-1].expected_duration)\n wc_makespan += exp_duration # , self.get_queue(t[-1].queue)) # exp_duration\n makespans.append(exp_duration)\n qjobs += 1\n selected_priorized_jobs.append(job_id)\n # =======================================================================\n # There are no jobs to dispatch at the current system state.\n # Then a no solution list is returned.\n # =======================================================================\n if not cons_qjobs:\n # Job Dispatching skip\n return decision_jobs.values(), []\n\n solved = False\n self.priorized_jobs = None\n\n if self._safe:\n manager = mp_dill.Manager()\n schedule_plan = manager.dict()\n process_class = mp_dill.Process\n\n p = process_class(target=getattr(self, 'cp_model'),\n args=(\n schedule_plan, cur_time, cons_qjobs, selected_priorized_jobs, es_dict, resource_types,\n avl_resources),\n kwargs={'timelimit': timelimit}\n )\n p.start()\n p.join()\n\n if p.exitcode != 0:\n schedule_plan.pop('solver_state', None)\n schedule_plan.pop('limit_reached', None)\n return list(decision_jobs.values()) \\\n + [self.dispatching_tuple(job_id, start_time, nodes) for (start_time, job_id, nodes) in\n schedule_plan.values()] \\\n + [self.dispatching_tuple(job_id, None, []) for job_id in cons_qjobs if\n not (job_id in schedule_plan)], []\n else:\n schedule_plan = {}\n args = (\n schedule_plan, cur_time, cons_qjobs, selected_priorized_jobs, es_dict, resource_types, avl_resources)\n kwargs = {'max_timelimit': self._max_timelimit}\n function = getattr(self, 'cp_model')\n function(*args, **kwargs)\n\n solved = schedule_plan.pop('solved')\n of_value = schedule_plan.pop('of_value')\n walltime = schedule_plan.pop('walltime')\n proc_time = schedule_plan.pop('proc_time')\n incurred_time = walltime + proc_time\n failures = schedule_plan.pop('failures')\n branches = schedule_plan.pop('branches')\n p = None\n\n self.priorized_jobs = None\n dispatching_plan = list(schedule_plan.values())\n self.__instance_data = (\n solved, of_value, walltime, incurred_time, failures, branches,\n dispatching_plan + list(decision_jobs.values()),)\n\n # This is useful for print and also to create the unsuccessful data\n dispatched_jobs = 0\n queued_job_ids = []\n for a in dispatching_plan:\n if a[2]:\n dispatched_jobs += 1\n if dispatched_jobs == 0:\n queued_job_ids.append(a[1])\n\n if self._reduce_job_length:\n # ===================================================================\n # The considered number of jobs in the next scheduling decision are reduced to the half\n # if the current problem instance was not solved, if the current usage is\n # leq of the previous time point. After a successful dispatching this value is reset.\n # The minimum is 1, otherwise there will be nothing to dispatch\n # ===================================================================\n if not solved:\n self._cur_q_length = max(1, min(self._cur_q_length,\n len(schedule_plan)) // 2) # max(1, self._cur_q_length // 2)\n else:\n self._cur_q_length = self._q_length\n\n print('{} - {}: Queued {}, Dispatched {}, Running {}. {}'.format(self._counter, cur_time,\n len(es) - dispatched_jobs, dispatched_jobs,\n len(self.resource_manager.current_allocations),\n self.resource_manager.current_usage))\n return dispatching_plan + list(decision_jobs.values()), []", "def schedule_metadata_tasks():\n # Some metadata tasks will abort if higher precedence tasks are in\n # progress. Avoid scheduling these tasks. The priority here is to\n # get the result of an in-progress metadata operation if one exists.\n for instance in models.Instance.query():\n queue = None\n if instance.active_metadata_update:\n if instance.active_metadata_update.url:\n # Enqueue task to check the in-progress metadata operation.\n queue = 'check-instance-metadata-operation'\n else:\n # Enqueue task to start a metadata operation.\n queue = 'update-instance-metadata'\n elif instance.pending_metadata_updates:\n # Enqueue task to compress a list of desired metadata updates.\n queue = 'compress-instance-metadata-updates'\n if queue:\n utilities.enqueue_task(queue, instance.key)", "def execute( self ):\n\n # This allows dynamic changing of the throughput timescale\n self.throughputTimescale = self.am_getOption( 'ThroughputTimescale', 3600 )\n self.throughputTimescale = 60 * 60 * 1\n #print 'ThroughputTimescale:',self.throughputTimescale\n ######################################################################################\n #\n # Obtain information on the current state of the channel queues\n #\n\n res = self.TransferDB.getChannelQueues()\n if not res['OK']:\n errStr = \"ReplicationScheduler._execute: Failed to get channel queues from TransferDB.\"\n gLogger.error( errStr, res['Message'] )\n return S_OK()\n if not res['Value']:\n gLogger.info( \"ReplicationScheduler._execute: No active channels found for replication.\" )\n return S_OK()\n channels = res['Value']\n\n res = self.TransferDB.getChannelObservedThroughput( self.throughputTimescale )\n if not res['OK']:\n errStr = \"ReplicationScheduler._execute: Failed to get observed throughput from TransferDB.\"\n gLogger.error( errStr, res['Message'] )\n return S_OK()\n if not res['Value']:\n gLogger.info( \"ReplicationScheduler._execute: No active channels found for replication.\" )\n return S_OK()\n bandwidths = res['Value']\n\n self.strategyHandler = StrategyHandler( bandwidths, channels, self.section )\n\n processedRequests = []\n requestsPresent = True\n while requestsPresent:\n\n ######################################################################################\n #\n # The first step is to obtain a transfer request from the RequestDB which should be scheduled.\n #\n\n gLogger.info( \"ReplicationScheduler._execute: Contacting RequestDB for suitable requests.\" )\n res = self.RequestDB.getRequest( 'transfer' )\n if not res['OK']:\n gLogger.error( \"ReplicationScheduler._execute: Failed to get a request list from RequestDB.\", res['Message'] )\n continue\n if not res['Value']:\n gLogger.info( \"ReplicationScheduler._execute: No requests found in RequestDB.\" )\n requestsPresent = False\n return S_OK()\n requestString = res['Value']['RequestString']\n requestName = res['Value']['RequestName']\n gLogger.info( \"ReplicationScheduler._execute: Obtained Request %s from RequestDB.\" % ( requestName ) )\n\n ######################################################################################\n #\n # The request must then be parsed to obtain the sub-requests, their attributes and files.\n #\n\n logStr = 'ReplicationScheduler._execute: Parsing Request %s.' % ( requestName )\n gLogger.info( logStr )\n oRequest = RequestContainer( requestString )\n res = oRequest.getAttribute( 'RequestID' )\n if not res['OK']:\n gLogger.error( 'ReplicationScheduler._execute: Failed to get requestID.', res['Message'] )\n return S_ERROR( 'ReplicationScheduler._execute: Failed to get number of sub-requests.' )\n requestID = res['Value']\n if requestID in processedRequests:\n # Break the loop once we have iterated once over all requests\n res = self.RequestDB.updateRequest( requestName, requestString )\n if not res['OK']:\n gLogger.error( \"Failed to update request\", \"%s %s\" % ( requestName, res['Message'] ) )\n return S_OK()\n\n processedRequests.append( requestID )\n\n res = oRequest.getNumSubRequests( 'transfer' )\n if not res['OK']:\n gLogger.error( 'ReplicationScheduler._execute: Failed to get number of sub-requests.', res['Message'] )\n return S_ERROR( 'ReplicationScheduler._execute: Failed to get number of sub-requests.' )\n numberRequests = res['Value']\n gLogger.info( \"ReplicationScheduler._execute: '%s' found with %s sub-requests.\" % ( requestName, numberRequests ) )\n\n ######################################################################################\n #\n # The important request attributes are the source and target SEs.\n #\n\n for ind in range( numberRequests ):\n gLogger.info( \"ReplicationScheduler._execute: Treating sub-request %s from '%s'.\" % ( ind, requestName ) )\n attributes = oRequest.getSubRequestAttributes( ind, 'transfer' )['Value']\n if attributes['Status'] != 'Waiting':\n # If the sub-request is already in terminal state\n gLogger.info( \"ReplicationScheduler._execute: Sub-request %s is status '%s' and not to be executed.\" % ( ind, attributes['Status'] ) )\n continue\n\n sourceSE = attributes['SourceSE']\n targetSE = attributes['TargetSE']\n \"\"\" This section should go in the transfer request class \"\"\"\n if type( targetSE ) in types.StringTypes:\n if re.search( ',', targetSE ):\n targetSEs = targetSE.split( ',' )\n else:\n targetSEs = [targetSE]\n \"\"\"----------------------------------------------------- \"\"\"\n operation = attributes['Operation']\n reqRepStrategy = None\n if operation in self.strategyHandler.getSupportedStrategies():\n reqRepStrategy = operation\n\n ######################################################################################\n #\n # Then obtain the file attribute of interest are the LFN and FileID\n #\n\n res = oRequest.getSubRequestFiles( ind, 'transfer' )\n if not res['OK']:\n gLogger.error( 'ReplicationScheduler._execute: Failed to obtain sub-request files.' , res['Message'] )\n continue\n files = res['Value']\n gLogger.info( \"ReplicationScheduler._execute: Sub-request %s found with %s files.\" % ( ind, len( files ) ) )\n filesDict = {}\n for file in files:\n lfn = file['LFN']\n if file['Status'] != 'Waiting':\n gLogger.debug( \"ReplicationScheduler._execute: %s will not be scheduled because it is %s.\" % ( lfn, file['Status'] ) )\n else:\n fileID = file['FileID']\n filesDict[lfn] = fileID\n if not filesDict:\n gLogger.info( \"ReplicationScheduler._execute: No Waiting files found for request\" )\n continue\n notSched = len( files ) - len( filesDict )\n if notSched:\n gLogger.info( \"ReplicationScheduler._execute: %d files found not Waiting\" % notSched )\n\n ######################################################################################\n #\n # Now obtain replica information for the files associated to the sub-request.\n #\n\n lfns = filesDict.keys()\n gLogger.info( \"ReplicationScheduler._execute: Obtaining replica information for %d sub-request files.\" % len( lfns ) )\n res = self.rm.getCatalogReplicas( lfns )\n if not res['OK']:\n gLogger.error( \"ReplicationScheduler._execute: Failed to get replica information.\", res['Message'] )\n continue\n for lfn, failure in res['Value']['Failed'].items():\n gLogger.error( \"ReplicationScheduler._execute: Failed to get replicas.\", '%s: %s' % ( lfn, failure ) )\n replicas = res['Value']['Successful']\n if not replicas.keys():\n gLogger.error( \"ReplicationScheduler._execute: Failed to get replica information for all files.\" )\n continue\n\n ######################################################################################\n #\n # Now obtain the file sizes for the files associated to the sub-request.\n #\n\n lfns = replicas.keys()\n gLogger.info( \"ReplicationScheduler._execute: Obtaining file sizes for %d sub-request files.\" % len( lfns ) )\n res = self.rm.getCatalogFileMetadata( lfns )\n if not res['OK']:\n gLogger.error( \"ReplicationScheduler._execute: Failed to get file size information.\", res['Message'] )\n continue\n for lfn, failure in res['Value']['Failed'].items():\n gLogger.error( 'ReplicationScheduler._execute: Failed to get file size.', '%s: %s' % ( lfn, failure ) )\n metadata = res['Value']['Successful']\n if not metadata.keys():\n gLogger.error( \"ReplicationScheduler._execute: Failed to get metadata for all files.\" )\n continue\n\n ######################################################################################\n #\n # For each LFN determine the replication tree\n #\n\n for lfn in sortList( metadata.keys() ):\n fileSize = metadata[lfn]['Size']\n lfnReps = replicas[lfn]\n fileID = filesDict[lfn]\n\n targets = []\n for targetSE in targetSEs:\n if targetSE in lfnReps.keys():\n gLogger.debug( \"ReplicationScheduler.execute: %s already present at %s.\" % ( lfn, targetSE ) )\n else:\n targets.append( targetSE )\n if not targets:\n gLogger.info( \"ReplicationScheduler.execute: %s present at all targets.\" % lfn )\n oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Done' )\n continue\n if not lfnReps:\n gLogger.error( \"ReplicationScheduler.execute: The file has no replicas.\", lfn )\n continue\n res = self.strategyHandler.determineReplicationTree( sourceSE, targets, lfnReps, fileSize, strategy = reqRepStrategy )\n if not res['OK']:\n gLogger.error( \"ReplicationScheduler.execute: Failed to determine replication tree.\", res['Message'] )\n continue\n tree = res['Value']\n\n ######################################################################################\n #\n # For each item in the replication tree obtain the source and target SURLS\n #\n\n for channelID, dict in tree.items():\n gLogger.info( \"ReplicationScheduler.execute: processing for channel %d %s\" % ( channelID, str( dict ) ) )\n hopSourceSE = dict['SourceSE']\n hopDestSE = dict['DestSE']\n hopAncestor = dict['Ancestor']\n\n # Get the sourceSURL\n if hopAncestor:\n status = 'Waiting%s' % ( hopAncestor )\n res = self.obtainLFNSURL( hopSourceSE, lfn )\n if not res['OK']:\n errStr = res['Message']\n gLogger.error( errStr )\n return S_ERROR( errStr )\n sourceSURL = res['Value']\n else:\n status = 'Waiting'\n res = self.resolvePFNSURL( hopSourceSE, lfnReps[hopSourceSE] )\n if not res['OK']:\n sourceSURL = lfnReps[hopSourceSE]\n else:\n sourceSURL = res['Value']\n\n # Get the targetSURL\n res = self.obtainLFNSURL( hopDestSE, lfn )\n if not res['OK']:\n errStr = res['Message']\n gLogger.error( errStr )\n return S_ERROR( errStr )\n targetSURL = res['Value']\n\n ######################################################################################\n #\n # For each item in the replication tree add the file to the channel\n #\n res = self.TransferDB.addFileToChannel( channelID, fileID, hopSourceSE, sourceSURL, hopDestSE, targetSURL, fileSize, fileStatus = status )\n if not res['OK']:\n errStr = res['Message']\n gLogger.error( \"ReplicationScheduler._execute: Failed to add File to Channel.\" , \"%s %s\" % ( fileID, channelID ) )\n return S_ERROR( errStr )\n res = self.TransferDB.addFileRegistration( channelID, fileID, lfn, targetSURL, hopDestSE )\n if not res['OK']:\n errStr = res['Message']\n gLogger.error( \"ReplicationScheduler._execute: Failed to add File registration.\" , \"%s %s\" % ( fileID, channelID ) )\n result = self.TransferDB.removeFileFromChannel( channelID, fileID )\n if not result['OK']:\n errStr += result['Message']\n gLogger.error( \"ReplicationScheduler._execute: Failed to remove File.\" , \"%s %s\" % ( fileID, channelID ) )\n return S_ERROR( errStr )\n oRequest.setSubRequestFileAttributeValue( ind, 'transfer', lfn, 'Status', 'Scheduled' )\n res = self.TransferDB.addReplicationTree( fileID, tree )\n\n if oRequest.isSubRequestEmpty( ind, 'transfer' )['Value']:\n oRequest.setSubRequestStatus( ind, 'transfer', 'Scheduled' )\n\n ################################################\n # Generate the new request string after operation\n requestString = oRequest.toXML()['Value']\n res = self.RequestDB.updateRequest( requestName, requestString )\n if not res['OK']:\n gLogger.error( \"ReplicationScheduler._execute: Failed to update request\", \"%s %s\" % ( requestName, res['Message'] ) )", "def reserved_compare(options):\n running_instances = defaultdict(dict)\n reserved_purchases = defaultdict(dict)\n regions = boto.ec2.regions()\n good_regions = [r for r in regions if r.name not in ['us-gov-west-1',\n 'cn-north-1']]\n for region in good_regions:\n if options.trace:\n print \" Scanning region {0}\".format(region.name)\n conn = region.connect()\n filters = {'instance-state-name': 'running'}\n zones = defaultdict(dict)\n\n if options.trace:\n print \" Fetching running instances\"\n reservations = conn.get_all_instances(filters=filters)\n for reservation in reservations:\n for inst in reservation.instances:\n if options.debug:\n print instance_string(inst, options, verbose=True)\n if inst.state != 'running':\n if options.debug:\n print \"Skip {0.id} state {0.state}\".format(inst)\n continue\n if inst.spot_instance_request_id:\n if options.debug:\n print \"Skip {0.id} has spot id {0.spot_instance_request_id}\".format(inst)\n continue\n if 'aws:autoscaling:groupName' in inst.tags:\n if options.debug:\n print \"Skip {0.id} is an autoscale instance\".format(inst)\n continue\n if inst.platform == 'Windows' or inst.platform == 'windows':\n if options.debug:\n print \"Skip {0.id} has platform {0.platform}\".format(inst)\n continue\n if inst.instance_type not in zones[inst.placement]:\n zones[inst.placement][inst.instance_type] = []\n zones[inst.placement][inst.instance_type].append(inst)\n\n if zones:\n running_instances[region.name] = zones\n\n purchased = defaultdict(dict)\n if options.trace:\n print \" Fetching reservations\"\n\n reserved = conn.get_all_reserved_instances()\n for r in reserved:\n if options.debug:\n print reservation_string(r, verbose=True)\n if r.state != 'active':\n continue\n if r.instance_tenancy != 'default':\n print 'WARNING: Non-default tenancy %s: %s' % (r.instance_tenancy, reservation_string(r))\n continue\n if r.instance_type not in purchased[r.availability_zone]:\n purchased[r.availability_zone][r.instance_type] = [r]\n else:\n purchased[r.availability_zone][r.instance_type].append(r)\n\n if purchased:\n reserved_purchases[region.name] = purchased\n\n return check_reservation_use(options, running_instances,\n reserved_purchases)", "def get_queue(queue_limits):\n\n queues, limits = queue_limits.items()\n queues.pop('')\n\n while(True): \n \n queued_jobs = qstat_plain()\n jobs = {queue : [j for j in queued_jobs if j.queue == queue] for queue in queues} \n jobs[''] = [j for j in queued_jobs if j.queue not in queues]\n\n for queue in queues:\n if len(jobs[queue]) < queue_limits[queue]:\n yield queue\n else:\n time.sleep(30)", "def instance_backup_schedule_update(self, context, instance_uuid,\n schedule):\n metadata = self._instance_metadata(context, instance_uuid)\n schedule_key = meta.BACKUP_SCHEDULE_KEY\n active_key = meta.BACKUP_ACTIVE_KEY\n if schedule and len(schedule) > 0:\n # Sort items by frequency\n sorted_schedule = sorted(schedule,\n key=lambda item: item[meta.SCHEDULE_FREQUENCY_KEY])\n metadata[schedule_key] = jsonutils.dumps(sorted_schedule)\n metadata[active_key] = True # This lingers forever, on purpose.\n self._instance_metadata_update(context, instance_uuid, metadata)\n return sorted_schedule\n else:\n metadata[schedule_key] = jsonutils.dumps([])\n self._instance_metadata_update(context, instance_uuid, metadata)\n return []", "async def get_scheduled_flow_runs(\n work_pool_name: str = Path(..., description=\"The work pool name\", alias=\"name\"),\n work_pool_queue_names: List[str] = Body(\n None, description=\"The names of work pool queues\"\n ),\n scheduled_before: DateTimeTZ = Body(\n None, description=\"The maximum time to look for scheduled flow runs\"\n ),\n scheduled_after: DateTimeTZ = Body(\n None, description=\"The minimum time to look for scheduled flow runs\"\n ),\n limit: int = dependencies.LimitBody(),\n worker_lookups: WorkerLookups = Depends(WorkerLookups),\n db: OrionDBInterface = Depends(provide_database_interface),\n) -> List[schemas.responses.WorkerFlowRunResponse]:\n async with db.session_context(begin_transaction=True) as session:\n work_pool_id = await worker_lookups._get_work_pool_id_from_name(\n session=session, work_pool_name=work_pool_name\n )\n\n if work_pool_queue_names is None:\n work_pool_queue_ids = None\n else:\n work_pool_queue_ids = []\n for qn in work_pool_queue_names:\n work_pool_queue_ids.append(\n await worker_lookups._get_work_pool_queue_id_from_name(\n session=session,\n work_pool_name=work_pool_name,\n work_pool_queue_name=qn,\n )\n )\n\n queue_response = await models.workers.get_scheduled_flow_runs(\n session=session,\n db=db,\n work_pool_ids=[work_pool_id],\n work_pool_queue_ids=work_pool_queue_ids,\n scheduled_before=scheduled_before,\n scheduled_after=scheduled_after,\n limit=limit,\n )\n\n return queue_response", "def run(delayed, concurrency, version_type=None, queue=None, raise_on_error=True):\n if delayed:\n celery_kwargs = {\n \"kwargs\": {\n \"version_type\": version_type,\n \"search_bulk_kwargs\": {\"raise_on_error\": raise_on_error},\n }\n }\n click.secho(\n \"Starting {0} tasks for indexing records...\".format(concurrency), fg=\"green\"\n )\n if queue is not None:\n celery_kwargs.update({\"queue\": queue})\n for c in range(0, concurrency):\n process_bulk_queue.apply_async(**celery_kwargs)\n else:\n click.secho(\"Indexing records...\", fg=\"green\")\n RecordIndexer(version_type=version_type).process_bulk_queue(\n search_bulk_kwargs={\"raise_on_error\": raise_on_error}\n )", "def _run_queries(self, queries: List[Query]) -> None:\n QUERY_TASK_LIMIT = 250\n\n while queries or self._running_queries:\n if queries:\n logger.debug(f\"Starting a new loop, {len(queries)} queries queued\")\n self._fill_query_slots(queries)\n query_tasks = self.get_running_query_tasks()[:QUERY_TASK_LIMIT]\n logger.debug(f\"Checking for results of {len(query_tasks)} query tasks\")\n for query_result in self._get_query_results(query_tasks):\n self._handle_query_result(query_result)\n time.sleep(0.5)", "def queue_fetch(model_admin, request, queryset):\n for locator in queryset:\n locator.queue_fetch()", "def instances_availability(self, lastsubmitedinstance, metrics):\n connection = self.connection\n instancesconfig = self.instancesconfigs\n\n cur = connection.cursor()\n harvesters = instancesconfig.keys()\n connection.row_factory = sqlite3.Row\n\n for harvesterid in harvesters:\n error_text = set()\n\n instanceisenable = self.__str_to_bool(instancesconfig[harvesterid]['instanceisenable'])\n del instancesconfig[harvesterid]['instanceisenable']\n ### Instance is enable ###\n if instanceisenable:\n for host in instancesconfig[harvesterid].keys():\n avaibility = []\n if self.__str_to_bool(instancesconfig[harvesterid][host]['hostisenable']):\n ### No submitted worker ###\n timedelta_submitted = timedelta(minutes=30)\n if host != 'none' and host in instancesconfig[harvesterid] \\\n and self.__str_to_bool( instancesconfig[harvesterid][host]['metrics']['lastsubmittedworker']['enable']):\n timedelta_submitted = self.__get_timedelta(\n instancesconfig[harvesterid][host]['metrics']['lastsubmittedworker']['value'])\n if lastsubmitedinstance[harvesterid]['harvesterhost'][host][\n 'harvesterhostmaxtime'] < datetime.utcnow() - timedelta_submitted:\n error = \"Last submitted worker was {0}\".format(\n str(lastsubmitedinstance[harvesterid]['harvesterhost'][host][\n 'harvesterhostmaxtime'])) + '\\n'\n error_text.add(error)\n avaibility.append(0)\n if harvesterid in metrics:\n ### No heartbeat ###\n heartbeattime = metrics[harvesterid][host].keys()[0]\n contacts = instancesconfig[harvesterid][host]['contacts']\n timedelta_heartbeat = self.__get_timedelta(instancesconfig[harvesterid][host]['metrics']['lastheartbeat']['value'])\n if self.__str_to_bool( instancesconfig[harvesterid][host]['metrics']['lastheartbeat']['enable']) and \\\n heartbeattime < datetime.utcnow() - timedelta_heartbeat:\n error = \"Last heartbeat was {0}\".format(\n str(heartbeattime)) + '\\n'\n error_text.add(error)\n avaibility.append(0)\n\n #### Metrics ####\n memory = instancesconfig[harvesterid][host]['memory']\n cpu_warning = instancesconfig[harvesterid][host]['metrics']['cpu']['cpu_warning']\n cpu_critical = instancesconfig[harvesterid][host]['metrics']['cpu']['cpu_critical']\n disk_warning = instancesconfig[harvesterid][host]['metrics']['disk']['disk_warning']\n disk_critical = instancesconfig[harvesterid][host]['metrics']['disk']['disk_critical']\n memory_warning = instancesconfig[harvesterid][host]['metrics']['memory']['memory_warning']\n memory_critical = instancesconfig[harvesterid][host]['metrics']['memory']['memory_critical']\n\n cpu_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['cpu']['enable'])\n disk_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['disk']['enable'])\n memory_enable = self.__str_to_bool(\n instancesconfig[harvesterid][host]['metrics']['memory']['enable'])\n\n #### Metrics DB ####\n for metric in metrics[harvesterid][host][heartbeattime]:\n #### CPU ####\n if cpu_enable:\n cpu_pc = int(metric['cpu_pc'])\n if cpu_pc >= cpu_warning:\n avaibility.append(50)\n error = \"Warning! CPU utilization: {0}\".format(\n str(cpu_pc)) + '\\n'\n error_text.add(error)\n elif cpu_pc >= cpu_critical:\n avaibility.append(10)\n error = \"CPU utilization: {0}\".format(\n str(cpu_pc)) + '\\n'\n error_text.add(error)\n #### Memory ####\n if memory_enable:\n if 'memory_pc' in metric:\n memory_pc = int(metric['memory_pc'])\n else:\n memory_pc = int(self.__get_change(metric['rss_mib'], memory))\n if memory_pc >= memory_warning:\n avaibility.append(50)\n error = \"Warning! Memory consumption: {0}\".format(\n str(memory_pc)) + '\\n'\n error_text.add(error)\n elif memory_pc >= memory_critical:\n avaibility.append(0)\n error = \"Memory consumption: {0}\".format(\n str(memory_pc)) + '\\n'\n error_text.add(error)\n #### HDD&HDD1 ####\n if disk_enable:\n if 'volume_data_pc' in metric:\n volume_data_pc = int(metric['volume_data_pc'])\n else:\n volume_data_pc = -1\n if volume_data_pc >= disk_warning:\n avaibility.append(50)\n error = \"Warning! Disk utilization: {0}\".format(\n str(volume_data_pc)) + '\\n'\n error_text.add(error)\n elif volume_data_pc >= disk_critical:\n avaibility.append(10)\n error = \"Disk utilization: {0}\".format(\n str(volume_data_pc)) + '\\n'\n error_text.add(error)\n if 'volume_data1_pc' in metric:\n volume_data1_pc = int(metric['volume_data1_pc'])\n if volume_data1_pc >= disk_warning:\n avaibility.append(50)\n error = \"Warning! Disk 1 utilization: {0}\".format(\n str(volume_data1_pc)) + '\\n'\n error_text.add(error)\n elif volume_data1_pc >= disk_critical:\n avaibility.append(10)\n error = \"Disk 1 utilization: {0}\".format(\n str(volume_data1_pc)) + '\\n'\n error_text.add(error)\n try:\n cur.execute(\"insert into INSTANCES values (?,?,?,?,?,?,?,?,?)\",\n (str(harvesterid), str(host),\n str(lastsubmitedinstance[harvesterid]['harvesterhost'][host]['harvesterhostmaxtime']),\n heartbeattime, 1, 0, min(avaibility) if len(avaibility) > 0 else 100, str(contacts), ', '.join(str(e) for e in error_text)))\n connection.commit()\n error_text = set()\n except:\n query = \\\n \"\"\"UPDATE INSTANCES \n SET lastsubmitted = '{0}', active = {1}, availability = {2}, lastheartbeat = '{3}', contacts = '{4}', errorsdesc = '{5}'\n WHERE harvesterid = '{6}' and harvesterhost = '{7}'\n \"\"\".format(str(lastsubmitedinstance[harvesterid]['harvesterhost'][host]['harvesterhostmaxtime']),\n 1, min(avaibility) if len(avaibility) > 0 else 100, heartbeattime, str(contacts), ', '.join(str(e) for e in error_text), str(harvesterid),\n str(host))\n cur.execute(query)\n connection.commit()\n error_text = set()\n else:\n cur.execute(\"DELETE FROM INSTANCES WHERE harvesterid = ?\", [str(harvesterid)])\n connection.commit()", "def run():\r\n num_workers = g.num_query_queue_workers\r\n wq = WorkQueue(num_workers = num_workers)\r\n wq.start()\r\n\r\n while True:\r\n job = None\r\n #limit the total number of jobs in the WorkQueue. we don't\r\n #need to load the entire db queue right away (the db queue can\r\n #get quite large).\r\n if len(running) < 2 * num_workers:\r\n with running_lock:\r\n iden, pickled_cr = get_query()\r\n if pickled_cr is not None:\r\n if not iden in running:\r\n running.add(iden)\r\n job = make_query_job(iden, pickled_cr)\r\n wq.add(job)\r\n\r\n #if we didn't find a job, sleep before trying again\r\n if not job:\r\n time.sleep(1)", "def _reclaim_queued_deletes(self, context):\n interval = CONF.reclaim_instance_interval\n if interval <= 0:\n LOG.debug(\"CONF.reclaim_instance_interval <= 0, skipping...\")\n return\n\n # TODO(comstud, jichenjc): Dummy quota object for now See bug 1296414.\n # The only case that the quota might be inconsistent is\n # the cloud node died between set instance state to SOFT_DELETED\n # and quota commit to DB. When cloud node starts again\n # it will have no idea the reservation is committed or not or even\n # expired, since it's a rare case, so marked as todo.\n quotas = objects.Quotas.from_reservations(context, None)\n\n filters = {'vm_state': vm_states.SOFT_DELETED,\n 'task_state': None,\n 'host': self.host}\n instances = objects.InstanceList.get_by_filters(\n context, filters,\n expected_attrs=objects.instance.INSTANCE_DEFAULT_FIELDS,\n use_slave=True)\n for instance in instances:\n if self._deleted_old_enough(instance, interval):\n bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(\n context, instance.uuid)\n LOG.info(_LI('Reclaiming deleted instance'), instance=instance)\n try:\n self._delete_instance(context, instance, bdms, quotas)\n except Exception as e:\n LOG.warning(_LW(\"Periodic reclaim failed to delete \"\n \"instance: %s\"),\n e, instance=instance)", "def _schedule(self, context, spec_obj):\n elevated = context.elevated()\n\n # Find our local list of acceptable hosts by repeatedly\n # filtering and weighing our options. Each time we choose a\n # host, we virtually consume resources on it so subsequent\n # selections can adjust accordingly.\n\n # Note: remember, we are using an iterator here. So only\n # traverse this list once. This can bite you if the hosts\n # are being scanned in a filter or weighing function.\n\n # If the request is for a preemptible instace, take into account all\n # resources used on the host. However, if the request is for a normal\n # instance, do not take into account the preemptible instances. This\n # way we can schedule normal requests even when there is no room for\n # them without doing a retry cycle.\n\n if self._is_preemptible_request(spec_obj):\n hosts = self._get_all_host_states(elevated, partial=False)\n else:\n hosts = self._get_all_host_states(elevated, partial=True)\n\n hosts_full_state = self._get_all_host_states(elevated, partial=False)\n\n selected_hosts = []\n num_instances = spec_obj.num_instances\n for num in range(num_instances):\n # Filter local hosts based on requirements ...\n hosts = self.host_manager.get_filtered_hosts(hosts,\n spec_obj, index=num)\n if not hosts:\n # Can't get any more locally.\n break\n\n LOG.debug(\"Filtered %(hosts)s\", {'hosts': hosts})\n\n # Get the full host states for weighing. The filtered list of\n # hosts does not take into account preemptible instances, but we\n # need them for weighing\n\n hosts_full_state = list(hosts_full_state)\n\n filtered_hosts = {(h.host, h.nodename): h for h in hosts}\n hosts_aux = [h for h in hosts_full_state\n if (h.host, h.nodename) in filtered_hosts]\n weighed_hosts = self.host_manager.get_weighed_hosts(hosts_aux,\n spec_obj)\n\n LOG.debug(\"Weighed %(hosts)s\", {'hosts': weighed_hosts})\n\n scheduler_host_subset_size = CONF.scheduler_host_subset_size\n if scheduler_host_subset_size > len(weighed_hosts):\n scheduler_host_subset_size = len(weighed_hosts)\n if scheduler_host_subset_size < 1:\n scheduler_host_subset_size = 1\n\n chosen_host = random.choice(\n weighed_hosts[0:scheduler_host_subset_size])\n LOG.debug(\"Selected host: %(host)s\", {'host': chosen_host})\n selected_hosts.append(chosen_host)\n\n # Now consume the resources so the filter/weights\n # will change for the next instance.\n\n # First update the chosen host, that is from the full state list\n chosen_host.obj.consume_from_request(spec_obj)\n\n # Now consume from the partial state list\n host = chosen_host.obj.host\n node = chosen_host.obj.nodename\n state_key = (host, node)\n filtered_hosts[state_key].consume_from_request(spec_obj)\n\n # Now continue with the rest of the scheduling function\n if spec_obj.instance_group is not None:\n spec_obj.instance_group.hosts.append(chosen_host.obj.host)\n # hosts has to be not part of the updates when saving\n spec_obj.instance_group.obj_reset_changes(['hosts'])\n\n return selected_hosts", "def test_singleton_reschedule(self):\n dbpool = buildConnectionPool(self, jobSchema + schemaText)\n\n qpool = yield self._enqueue(dbpool, 1, 2, cl=DummyWorkSingletonItem, notBefore=datetime.datetime(2014, 5, 17, 12, 0, 0))\n\n @inlineCallbacks\n def allWork(txn):\n jobs = yield JobItem.all(txn)\n work = [((yield job.workItem()), job) for job in jobs]\n returnValue(filter(lambda x: x[0], work))\n\n work = yield inTransaction(dbpool.connection, allWork)\n self.assertTrue(len(work) == 1)\n self.assertTrue(work[0][1].notBefore == datetime.datetime(2014, 5, 17, 12, 0, 0))\n\n def _reschedule_force(txn, force):\n txn._queuer = qpool\n return DummyWorkSingletonItem.reschedule(txn, 60, force=force)\n yield inTransaction(dbpool.connection, _reschedule_force, force=False)\n\n work = yield inTransaction(dbpool.connection, allWork)\n self.assertTrue(len(work) == 1)\n self.assertTrue(work[0][1].notBefore == datetime.datetime(2014, 5, 17, 12, 0, 0))\n\n yield inTransaction(dbpool.connection, _reschedule_force, force=True)\n\n work = yield inTransaction(dbpool.connection, allWork)\n self.assertTrue(len(work) == 1)\n self.assertTrue(work[0][1].notBefore != datetime.datetime(2014, 5, 17, 12, 0, 0))", "def _plan_workorders(self, replan=False):\n self.ensure_one()\n\n if not self.workorder_ids:\n return\n # Schedule all work orders (new ones and those already created)\n qty_to_produce = max(self.product_qty - self.qty_produced, 0)\n qty_to_produce = self.product_uom_id._compute_quantity(qty_to_produce, self.product_id.uom_id)\n start_date = max(self.date_planned_start, datetime.datetime.now())\n if replan:\n workorder_ids = self.workorder_ids.filtered(lambda wo: wo.state in ['ready', 'pending'])\n # We plan the manufacturing order according to its `date_planned_start`, but if\n # `date_planned_start` is in the past, we plan it as soon as possible.\n workorder_ids.leave_id.unlink()\n else:\n workorder_ids = self.workorder_ids.filtered(lambda wo: not wo.date_planned_start)\n for workorder in workorder_ids:\n workcenters = workorder.workcenter_id | workorder.workcenter_id.alternative_workcenter_ids\n\n best_finished_date = datetime.datetime.max\n vals = {}\n for workcenter in workcenters:\n # compute theoretical duration\n if workorder.workcenter_id == workcenter:\n duration_expected = workorder.duration_expected\n else:\n duration_expected = workorder._get_duration_expected(alternative_workcenter=workcenter)\n\n from_date, to_date = workcenter._get_first_available_slot(start_date, duration_expected)\n # If the workcenter is unavailable, try planning on the next one\n if not from_date:\n continue\n # Check if this workcenter is better than the previous ones\n if to_date and to_date < best_finished_date:\n best_start_date = from_date\n best_finished_date = to_date\n best_workcenter = workcenter\n vals = {\n 'workcenter_id': workcenter.id,\n 'duration_expected': duration_expected,\n }\n\n # If none of the workcenter are available, raise\n if best_finished_date == datetime.datetime.max:\n raise UserError(_('Impossible to plan the workorder. Please check the workcenter availabilities.'))\n\n # Instantiate start_date for the next workorder planning\n if workorder.next_work_order_id:\n start_date = best_finished_date\n\n # Create leave on chosen workcenter calendar\n leave = self.env['resource.calendar.leaves'].create({\n 'name': workorder.display_name,\n 'calendar_id': best_workcenter.resource_calendar_id.id,\n 'date_from': best_start_date,\n 'date_to': best_finished_date,\n 'resource_id': best_workcenter.resource_id.id,\n 'time_type': 'other'\n })\n vals['leave_id'] = leave.id\n workorder.write(vals)\n self.with_context(force_date=True).write({\n 'date_planned_start': self.workorder_ids[0].date_planned_start,\n 'date_planned_finished': self.workorder_ids[-1].date_planned_finished\n })", "def domain_check_threading_manage(next_domain_info: typing.Callable[\n [],\n typing.Tuple[\n Domain,\n typing.List[typing.Tuple[LocationHint, Location]],\n typing.List[typing.Tuple[MeasurementResult, Location]]\n ]],\n increment_domain_type_count: typing.Callable[\n [DomainLocationType], None],\n increment_count_for_type: typing.Callable[\n [LocationCodeType], None],\n ripe_create_sema: mp.Semaphore,\n ripe_slow_down_sema: mp.Semaphore,\n bill_to_address: str,\n wo_measurements: bool,\n allowed_measurement_age: int,\n api_key: str,\n measurement_strategy: MeasurementStrategy,\n number_of_probes_per_measurement: int,\n buffer_time: float,\n packets_per_measurement: int,\n use_efficient_probes: bool,\n location_to_probes_dct: typing.Dict[\n str, typing.Tuple[RipeAtlasProbe, float, Location]],\n measurement_results_queue: queue.Queue,\n stop_without_old_results: bool):\n logger.debug('thread started')\n\n def get_domains() -> typing.Generator[typing.Tuple[Domain, typing.List[LocationHint]],\n None, None]:\n while True:\n domain_hints_tuple = next_domain_info()\n if domain_hints_tuple is not None:\n yield domain_hints_tuple\n else:\n break\n\n for domain, location_hints, measurement_result_tuples in get_domains():\n try:\n logger.debug('next domain %s', domain.name)\n ip_version = constants.IPV4_IDENTIFIER if domain.ipv4_address else \\\n constants.IPV6_IDENTIFIER\n check_domain_location_ripe(domain, location_hints, increment_domain_type_count,\n increment_count_for_type, ripe_create_sema,\n ripe_slow_down_sema, ip_version, bill_to_address,\n wo_measurements, allowed_measurement_age, api_key,\n measurement_strategy, number_of_probes_per_measurement,\n buffer_time, packets_per_measurement, use_efficient_probes,\n location_to_probes_dct, measurement_result_tuples,\n measurement_results_queue, stop_without_old_results)\n except Exception:\n logger.exception('Check Domain Error %s', domain.name)\n\n logger.debug('Thread finished')", "def start_scans_for_lists_who_are_up_for_scanning() -> Task:\n\n tasks = []\n\n for urllist in UrlList.objects.all().filter():\n # this also gets the lists that are not scanned. The scan date needs to progress, otherwise it will be\n # scanned instantly when the list will be enabled. This also goes for deleted lists.\n if urllist.enable_scans is False or urllist.is_deleted is True:\n urllist.renew_scan_moment()\n continue\n\n if urllist.is_due_for_scanning():\n tasks.append(initialize_scan.si(urllist))\n\n # placed here, as otherwise the list is never due for scanning as the date might be updated to something\n # new in the future.\n urllist.renew_scan_moment()\n\n # using this in create_function_job so a job is created, allowing for tracking this a bit\n return group(tasks)", "def consistency_function(p_message_queue,\n p_consistency_results,\n p_last_balanced,\n p_database_lock,\n p_continue_consistency,\n consistency_rate = 2, # queries per second\n query_timeout = 5,\n VERBOSE = False,\n worker_num = 0):\n \n print(\"w{}: Consistency thread started.\".format(worker_num))\n \n # path to worker's database\n data_file = os.path.join(\"databases\",\"worker_{}_database.csv\".format(worker_num))\n \n # wait until there are data results in database\n next_im_id = -1\n while next_im_id < 5:\n time.sleep(0.1)\n with p_last_balanced.get_lock():\n next_im_id = p_last_balanced.value\n \n prev_time = time.time()\n active_queries = {} \n \n # get continue_val\n with p_continue_consistency.get_lock():\n continue_val = p_continue_consistency.value\n \n # continue until the val of p_continue_consistency is changed by heartbeat thread exiting\n while continue_val:\n \n if time.time() > prev_time + 1/consistency_rate:\n \n # cycle backwards through im_ids\n next_im_id -= 1\n if next_im_id < 0:\n with p_last_balanced.get_lock():\n next_im_id = p_last_balanced.value\n # add query to dict of active queries\n active_queries[next_im_id] = {\"time_in\": time.time(),\n \"vals\": [get_im_data(data_file,next_im_id,p_database_lock)[0]]}\n \n # forward consistency query to all other workers via message queue\n # the True indicates that this is an internal request\n message = (\"query_request\", (next_im_id,worker_num,True))\n p_message_queue.put(message)\n #if VERBOSE: print(\"w{}: Consistency query for im {} requested.\".format(worker_num,next_im_id))\n \n # parse results from consistency results queue\n while True:\n try:\n (query_data,query_im_id) = p_consistency_results.get(timeout = 0) \n prev_time = time.time()\n # add if still active\n if query_im_id in active_queries.keys():\n active_queries[query_im_id][\"vals\"].append(query_data) \n #if VERBOSE: print(\"w{}: Parsed consistency response for im {}.\".format(worker_num,query_im_id))\n except queue.Empty:\n break\n \n # cycle through active queries and return result for all that have timed out\n timed_out = []\n for id_tag in active_queries:\n prev_time = time.time()\n query = active_queries[id_tag]\n if query['time_in'] + query_timeout < time.time():\n # get most common val by comparing unique hashes\n hash_dict = {}\n for data in query[\"vals\"]:\n if type(data) == np.ndarray: # make sure None doesn't become the most common value\n data_hash = hash(data.tostring())\n if data_hash in hash_dict.keys():\n hash_dict[data_hash][\"count\"] +=1\n else:\n hash_dict[data_hash] = {}\n hash_dict[data_hash][\"count\"] = 1\n hash_dict[data_hash][\"data\"] = data\n \n # count hashes\n most_common_data = None\n count = 0\n for data_hash in hash_dict:\n if hash_dict[data_hash][\"count\"] > count:\n count = hash_dict[data_hash][\"count\"]\n most_common_data = hash_dict[data_hash][\"data\"]\n \n # lastly, compare to own data and see if count is greater than\n # num_validators on previous data. If not, send own value and \n # don't update own value\n (own_data, own_num_validators) = get_im_data(data_file,id_tag,p_database_lock)\n if own_num_validators < count:\n assert len(most_common_data[0]) > 0, print(\"most_common_data isn't valid\")\n update_data(data_file,count,most_common_data,p_database_lock)\n if VERBOSE: print(\"w{}: Consistency update on im {} with {} validators.\".format(worker_num,id_tag,count))\n \n timed_out.append(id_tag)\n \n # remove all handled requests\n timed_out.reverse()\n for tag in timed_out:\n del active_queries[tag]\n \n # determine whether to continue using shared variable with heartbeat thread\n with p_continue_consistency.get_lock():\n continue_val = p_continue_consistency.value\n \n print(\"{}: Consistency thread exited.\".format(worker_num))", "def runTasks(self):\n\n self.logger.INFO(\n f\"STARTING TASKS FOR TRADER {self.user['Name']} - ACCOUNT ID: {self.account_id}\\n\")\n\n def selectSleep():\n \"\"\"\n PRE-MARKET(0400 - 0930 ET): 5 SECONDS\n MARKET OPEN(0930 - 1600 ET): 5 SECONDS\n AFTER MARKET(1600 - 2000 ET): 5 SECONDS\n\n WEEKENDS: 60 SECONDS\n WEEKDAYS(2000 - 0400 ET): 60 SECONDS\n\n EVERYTHING WILL BE BASED OFF CENTRAL TIME\n\n OBJECTIVE IS TO FREE UP UNNECESSARY SERVER USAGE\n \"\"\"\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n day = dt_central.strftime(\"%a\")\n\n tm = dt_central.strftime(\"%H:%M:%S\")\n\n weekends = [\"Sat\", \"Sun\"]\n\n # IF CURRENT TIME GREATER THAN 8PM AND LESS THAN 4AM, OR DAY IS WEEKEND, THEN RETURN 60 SECONDS\n if tm > \"20:00\" or tm < \"04:00\" or day in weekends:\n\n return 60\n\n # ELSE RETURN 5 SECONDS\n return 5\n\n while self.isAlive:\n\n try:\n\n # RUN TASKS ####################################################\n self.killQueueOrder()\n\n self.updateAccountBalance()\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n tm = dt_central.time().strftime(\"%H:%M\")\n\n if tm == \"08:30\": # set this based on YOUR timezone\n\n if not self.check_options:\n\n self.sellOptionsAtExpiration()\n\n self.check_options = True\n\n else:\n\n self.check_options = False\n\n # IF MIDNIGHT, ADD BALANCE, PROFIT/LOSS TO HISTORY\n if tm == \"23:55\":\n\n if not self.midnight:\n\n self.balanceHistory()\n\n self.profitLossHistory()\n\n self.midnight = True\n\n else:\n\n self.midnight = False\n\n except KeyError:\n\n self.isAlive = False\n\n except Exception:\n\n self.logger.ERROR(\n f\"ACCOUNT ID: {self.account_id} - TRADER: {self.user['Name']}\")\n\n finally:\n\n time.sleep(selectSleep())\n\n self.logger.INFO(f\"TASK STOPPED FOR ACCOUNT ID {self.account_id}\")", "def onetime_query_state_locks(config, acon_query, acon_pg, query, args={}, num_workers=0):\n\n\tcurs_query = acon_query.cursor()\n\tcurs_pg = acon_pg.cursor()\n\tcurs_query.execute(\"select pg_advisory_lock(1);\")\n\tcurs_pg.execute(\"select pg_advisory_lock(2);\")\n\twait(acon_query)\n\twait(acon_pg)\n\tcurs_pg.execute(\"select pg_advisory_lock(1);\")\n\tset_guc(acon_query, 'enable_mergejoin', 'off')\n\tset_guc(acon_query, 'max_parallel_workers_per_gather', num_workers)\n\tcurs_query.execute(query)\n\t# extract current state of query progress\n\tMAX_PG_QS_RETRIES = 10\n\tDELAY_BETWEEN_RETRIES = 0.1\n\tpg_qs_args = {\n\t\t\t'config': config,\n\t\t\t'pid': acon_query.get_backend_pid(),\n\t\t\t'conn': acon_pg\n\t\t\t}\n\tfor k, v in args.items():\n\t\tpg_qs_args[k] = v\n\tn_retries = 0\n\n\twait(acon_pg)\n\n\twhile True:\n\t\tresult, notices = pg_query_state_locks(**pg_qs_args)\n\t\tn_retries += 1\n\t\tif len(result) > 0:\n\t\t\tbreak\n\t\tif n_retries >= MAX_PG_QS_RETRIES:\n\t\t\t# pg_query_state callings don't return any result, more likely run\n\t\t\t# query has completed\n\t\t\tbreak\n\t\ttime.sleep(DELAY_BETWEEN_RETRIES)\n\n\tcurs_pg.execute(\"select pg_advisory_unlock(2);\")\n\twait(acon_pg)\n\twait(acon_query)\n\n\tset_guc(acon_query, 'enable_mergejoin', 'on')\n\tcurs_query.execute(\"select pg_advisory_unlock(2);\")\n\tcurs_pg.execute(\"select pg_advisory_unlock(1);\")\n\treturn result, notices", "def _schedule(self, context, topic, spec_obj, instance_uuids,\n return_alternates=False):\n\n elevated = context.elevated()\n hosts = self.hosts_up(elevated, topic)\n if not hosts:\n msg = _(\"Is the appropriate service running?\")\n raise exception.NoValidHost(reason=msg)\n\n hosts = self._filter_hosts(hosts, spec_obj)\n if not hosts:\n msg = _(\"Could not find another compute\")\n raise exception.NoValidHost(reason=msg)\n\n # Note that we don't claim in the chance scheduler\n num_instances = len(instance_uuids)\n # If possible, we'd like to return distinct hosts for each instance.\n # But when there are fewer available hosts than requested instances, we\n # will need to return some duplicates.\n if len(hosts) >= num_instances:\n selected_hosts = random.sample(hosts, num_instances)\n else:\n selected_hosts = [random.choice(hosts)\n for i in range(num_instances)]\n\n # This is the overall list of values to be returned. There will be one\n # item per instance, and that item will be a list of Selection objects\n # representing the selected host and zero or more alternates.\n # NOTE(edleafe): in a multi-cell environment, this can return\n # alternates from different cells. When support for multiple cells is\n # implemented in select_destinations, this will have to be updated to\n # restrict alternates to come from the same cell.\n selections_to_return = []\n\n # We can't return dupes as alternates, since alternates are used when\n # building to the selected host fails.\n if return_alternates:\n alts_per_instance = min(len(hosts), CONF.scheduler.max_attempts)\n else:\n alts_per_instance = 0\n for sel_host in selected_hosts:\n selection = objects.Selection.from_host_state(sel_host)\n sel_plus_alts = [selection]\n while len(sel_plus_alts) < alts_per_instance:\n candidate = random.choice(hosts)\n if (candidate not in sel_plus_alts) and (\n candidate not in selected_hosts):\n # We don't want to include a selected host as an alternate,\n # as it will have a high likelihood of not having enough\n # resources left after it has an instance built on it.\n alt_select = objects.Selection.from_host_state(candidate)\n sel_plus_alts.append(alt_select)\n selections_to_return.append(sel_plus_alts)\n return selections_to_return", "def check_queue(st):\n\n logging.info(\"Checking queue...\")\n check_time = time.time()\n n_waiting_jobs = BatchPlugin.poll_queue()\n\n if n_waiting_jobs is not None:\n\n # Correction factor\n corr = st['vms_allegedly_running'] * cf['elastiq']['n_jobs_per_vm']\n logging.info(\"Jobs: waiting=%d | allegedly running=%d | considering=%d\" % \\\n (n_waiting_jobs, corr, n_waiting_jobs-corr))\n n_waiting_jobs -= corr\n\n if n_waiting_jobs > cf['elastiq']['waiting_jobs_threshold']:\n if st['first_seen_above_threshold'] != -1:\n if (check_time-st['first_seen_above_threshold']) > cf['elastiq']['waiting_jobs_time_s']:\n # Above threshold time-wise and jobs-wise: do something\n logging.info(\"Waiting jobs: %d (above threshold of %d for more than %ds)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold'], cf['elastiq']['waiting_jobs_time_s']))\n list_ok = scale_up( math.ceil(n_waiting_jobs / float(cf['elastiq']['n_jobs_per_vm'])), valid_hostnames=st['workers_status'].keys(), vms_allegedly_running=st['vms_allegedly_running'] )\n for inst in list_ok:\n change_vms_allegedly_running(st, 1, inst)\n st['event_queue'].append({\n 'action': 'check_owned_instance',\n 'when': time.time() + cf['elastiq']['estimated_vm_deploy_time_s'],\n 'params': [ inst ]\n })\n st['first_seen_above_threshold'] = -1\n else:\n # Above threshold but not for enough time\n logging.info(\"Waiting jobs: %d (still above threshold of %d for less than %ds)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold'], cf['elastiq']['waiting_jobs_time_s']))\n else:\n # First time seen above threshold\n logging.info(\"Waiting jobs: %d (first time above threshold of %d)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold']))\n st['first_seen_above_threshold'] = check_time\n else:\n # Not above threshold: reset\n logging.info(\"Waiting jobs: %d (below threshold of %d)\" % \\\n (n_waiting_jobs, cf['elastiq']['waiting_jobs_threshold']))\n st['first_seen_above_threshold'] = -1\n else:\n logging.error(\"Cannot get the number of waiting jobs this time, sorry\")\n\n return {\n 'action': 'check_queue',\n 'when': time.time() + cf['elastiq']['check_queue_every_s']\n }", "def _build_task_queue(self, dt: datetime.datetime, scheduled_tasks: List[ScheduledTask]):\r\n self.task_queue = tuple([task for task in scheduled_tasks if task.is_scheduled_to_run(dt)])\r\n logging.info(f\"Task queue built, {len(self.task_queue)} tasks scheduled\")", "def query_active(cls, schedule_id=None):\n q = Session.query(cls).outerjoin(Lesson).join(Schedule)\n if schedule_id is None:\n stmt = Schedule.query_current_id().subquery()\n q = q.join((stmt, Lesson.schedule_id == stmt.c.id))\n else:\n q = q.filter(Schedule.id == schedule_id)\n return q", "def query_active(cls, schedule_id=None):\n q = Session.query(cls).outerjoin(Lesson).join(Schedule)\n if schedule_id is None:\n stmt = Schedule.query_current_id().subquery()\n q = q.join((stmt, Lesson.schedule_id == stmt.c.id))\n else:\n q = q.filter(Schedule.id == schedule_id)\n return q", "def exec_query(collection,\n collection_name,\n granularity,\n queries,\n query_file_name,\n fig_dir,\n grid_dir):\n\n time_grid = [[None for i in range(granularity)] for j in range(granularity)]\n plan_grid = [[0 for i in range(granularity)] for j in range(granularity)]\n itr_count = 0\n fig_id = 0\n not_exists_marker = 'NULL'\n\n for (query, b_i, a_i) in queries:\n progress = round(float(itr_count) * 100 / len(queries), 2)\n print(\"Progress {}%\".format(progress))\n\n # display result\n if progress % 2 < 0.001:\n display_grid(plan_grid,\n os.path.join(fig_dir,\n collection_name,\n query_file_name.replace(\".txt\", \"\")),\n granularity,\n id=\"fig_{:0>5d}\".format(fig_id))\n fig_id += 1\n\n # timeout\n # t_win, t_a, t_b, t_cover, t_tbl = timeout, timeout, timeout, timeout, timeout\n projection = {\"_id\": 0, \"a\": 1, \"b\": 1}\n\n # measure time consumption of executing each query plan\n print(\"Forcing collscan\")\n table_scan_explain = collection.find(query, projection).hint([(\"$natural\", 1)]).explain()\n t_tbl = table_scan_explain[\"executionStats\"][\"executionTimeMillis\"]\n\n print(\"Forcing aIdx\")\n t_a = not_exists_marker\n if \"aIdx\" in collection.index_information():\n idx_a_explain = collection.find(query, projection).hint(\"aIdx\").explain()\n t_a = idx_a_explain[\"executionStats\"][\"executionTimeMillis\"]\n\n print(\"Forcing bIdx\")\n t_b = not_exists_marker\n if \"bIdx\" in collection.index_information():\n idx_b_explain = collection.find(query, projection).hint(\"bIdx\").explain()\n t_b = idx_b_explain[\"executionStats\"][\"executionTimeMillis\"]\n\n print(\"Forcing coverIdx\")\n t_cover = not_exists_marker\n if \"coverIdx\" in collection.index_information():\n idx_cover_explain = collection.find(query, projection).hint(\"coverIdx\").explain()\n t_cover = idx_cover_explain[\"executionStats\"][\"executionTimeMillis\"]\n\n # NOTE: FORMAT a|b|coverIdx|collscan\n t_s = [str(t_a), str(t_b), str(t_cover), str(t_tbl)]\n time_grid[b_i][a_i] = \"|\".join(t_s)\n\n # run the query without hint\n print(\"Finding winner\")\n exec_explain = collection.find(query, projection).explain()\n # t_win = exec_explain[\"executionStats\"][\"executionTimeMillis\"]\n winning_plan = str(exec_explain['queryPlanner']['winningPlan'])\n\n if 'aIdx' in winning_plan:\n plan_grid[b_i][a_i] = 1\n elif 'bIdx' in winning_plan:\n plan_grid[b_i][a_i] = 2\n elif 'coverIdx' in winning_plan:\n plan_grid[b_i][a_i] = 3\n elif 'COLLSCAN' in winning_plan:\n plan_grid[b_i][a_i] = 4\n\n pprint(exec_explain['queryPlanner'])\n print(\"Time: a: {}, b: {}, cover: {} ,collscan: {}\".format(t_a, t_b, t_cover, t_tbl))\n print(\"=\" * 60)\n\n itr_count += 1\n\n save_grid(plan_grid, os.path.join(grid_dir, collection_name,\n \"plan_grid{}\".format(query_file_name.replace(\"query\", \"\"))))\n save_grid(time_grid, os.path.join(grid_dir, collection_name,\n \"time_grid{}\".format(query_file_name.replace(\"query\", \"\"))))\n\n display_grid(plan_grid,\n os.path.join(fig_dir,\n collection_name,\n query_file_name.replace(\".txt\", \"\")),\n granularity,\n id=\"fig_{:0>5d}\".format(fig_id))\n return", "def run(self):\n self.timer.start()\n \n while not Status.is_final(self.status):\n if self.request:\n self.handle_request()\n \n if self.status == Status.RUNNING:\n # Clean up orphaned schedules and undead schedulers.\n # Schedule.objects.orphaned().update(scheduler=None)\n # CronSchedule.objects.orphaned().update(scheduler=None)\n \n cron = CronSchedule.objects.unclaimed()[:SCHEDULER_LIMIT]\n simple = Schedule.objects.unclaimed()[:SCHEDULER_LIMIT]\n for schedule in itertools.chain(cron, simple):\n self.log.info('Claiming %s.' % schedule)\n schedule.scheduler = self\n schedule.save()\n self.add(schedule)\n if not Status.is_final(self.status):\n self.wait()\n self.request = Scheduler.objects.get(pk=self.pk).request", "async def test_get_tasks(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # declare _scheduler task\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'get_tasks'\n interval_schedule.process_name = \"sleep5\"\n interval_schedule.repeat = datetime.timedelta(seconds=1)\n interval_schedule.exclusive = False\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(15)\n\n # Assert running tasks\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.INTERRUPTED)])\n assert not tasks\n\n tasks = await scheduler.get_tasks(\n where=[\"end_time\", \"=\", 'NULL'])\n assert tasks\n\n tasks = await scheduler.get_tasks(limit=50)\n states = [int(task.state) for task in tasks]\n\n assert len(tasks) > 1\n assert int(Task.State.RUNNING) in states\n assert int(Task.State.COMPLETE) in states\n\n tasks = await scheduler.get_tasks(1)\n assert len(tasks) == 1\n\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.RUNNING)],\n sort=[[\"state\", \"desc\"]], offset=50)\n assert not tasks\n\n tasks = await scheduler.get_tasks(\n where=[\"state\", \"=\", int(Task.State.RUNNING)],\n sort=[[\"state\", \"desc\"], [\"start_time\", \"asc\"]])\n assert tasks\n\n tasks = await scheduler.get_tasks(or_where_list=[[\"state\", \"=\", int(Task.State.RUNNING)], \\\n [\"state\", \"=\", int(Task.State.RUNNING)]])\n assert tasks\n\n tasks = await scheduler.get_tasks(and_where_list=[[\"state\", \"=\", int(Task.State.RUNNING)], \\\n [\"state\", \"=\", int(Task.State.RUNNING)]])\n assert tasks\n\n await self.stop_scheduler(scheduler)", "def GenerateRequestQueuing(self):\n url_to_requests = collections.defaultdict(list)\n for rq in self._request_track.GetEvents():\n url_to_requests[rq.url].append(rq)\n # Queuing events are organized by source id, which corresponds to a load of\n # a url. First collect timing information for each source id, then associate\n # with each request.\n timing_by_source_id = {}\n for source_id, events in self._queuing_events_by_id.iteritems():\n assert all(e.end_msec is None for e in events), \\\n 'Unexpected end_msec for nested async queuing events'\n ready_times = [e.start_msec for e in events if e.name == self.READY_NAME]\n if not ready_times:\n ready_msec = None\n else:\n assert len(ready_times) == 1, events\n ready_msec = ready_times[0]\n timing_by_source_id[source_id] = (\n min(e.start_msec for e in events),\n max(e.start_msec for e in events),\n ready_msec)\n queue_info = {}\n for request_url, requests in url_to_requests.iteritems():\n matching_source_ids = set(\n source_id for source_id, url in self._source_id_to_url.iteritems()\n if url == request_url)\n if len(matching_source_ids) > 1:\n logging.warning('Multiple matching source ids, probably duplicated'\n 'urls: %s', [rq.url for rq in requests])\n # Get first source id.\n sid = next(s for s in matching_source_ids) \\\n if matching_source_ids else None\n (throttle_start_msec, throttle_end_msec, ready_msec) = \\\n timing_by_source_id[sid] if matching_source_ids else (-1, -1, -1)\n\n blocking_requests = []\n for sid, (flight_start_msec,\n flight_end_msec, _) in timing_by_source_id.iteritems():\n if (flight_start_msec < throttle_start_msec and\n flight_end_msec > throttle_start_msec and\n flight_end_msec < throttle_end_msec):\n blocking_requests.extend(\n url_to_requests.get(self._source_id_to_url[sid], []))\n\n info = collections.namedtuple(\n 'QueueInfo', ['start_msec', 'end_msec', 'ready_msec', 'blocking'\n 'source_ids'])\n info.start_msec = throttle_start_msec\n info.end_msec = throttle_end_msec\n info.ready_msec = ready_msec\n current_request_ids = set(rq.request_id for rq in requests)\n info.blocking = [b for b in blocking_requests\n if b is not None and\n b.request_id not in current_request_ids]\n info.source_ids = matching_source_ids\n for rq in requests:\n queue_info[rq] = info\n return queue_info", "def _fill_query_slots(self, queries: List[Query]) -> None:\n while queries and self.query_slots > 0:\n logger.debug(\n f\"{self.query_slots} available query slots, creating query task\"\n )\n query = queries.pop(0)\n query_task_id = self.client.create_query_task(query.query_id)\n self.query_slots -= 1\n query.query_task_id = query_task_id\n self._query_by_task_id[query_task_id] = query\n self._running_queries.append(query)", "def RequeueWorkItems(instance_id):\n query = db.Query(run_log.RunLog)\n query.filter('status =', enum.CASE_STATUS.IN_PROGRESS)\n query.filter('client_id =', instance_id)\n\n logs = []\n for log in query:\n logs.append(log)\n\n # Ensure that the work item can be retried.\n if log.retry_count > 0:\n log.retry_count -= 1\n log.status = enum.CASE_STATUS.QUEUED\n log.client_id = ''\n log.priority -= 1\n else:\n log.status = enum.CASE_STATUS.UNKNOWN_ERROR\n\n db.put(logs)", "def compute_schedules(courses=None, excluded_times=(), free_sections_only=True, problem=None, generator=False, start=0):\n s = Scheduler(free_sections_only, problem)\n s.exclude_times(*tuple(excluded_times))\n return s.find_schedules(courses, generator, start)", "def load_schedules(self, from_date=None, to_date=None, supplier='All', day=None):\n logger.info('SchedulePurchase loading purchase schedules initiated')\n data = []\n try:\n with Transaction().start(DBNAME, 1):\n if not day:\n dataobj = self.ob.calculate_requirement(from_date, to_date)\n else:\n dataobj = self.ob.update_ingredients(day)\n for i, j in dataobj.iteritems():\n if j[1] <= 0:\n continue\n dictionary = {}\n # Product = Model.get('product.product')\n if supplier == 'All':\n product = self.Product.search([('name', '=', i),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])\n else:\n product = self.Product.search([('name', '=', i),\n ('product_suppliers', '=', supplier),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])\n product = product[-1] if product else None\n if product:\n dictionary['code'] = product.code\n dictionary['item'] = product.template.name\n dictionary['category'] = product.template.category.name\n dictionary['unit'] = j[0].name\n dictionary['quantity'] = j[1].quantize(Decimal('0.11')).to_eng()\n suppliers = product.template.product_suppliers\n if suppliers:\n dictionary['supplier'] = suppliers[0].party.name\n data.append(dictionary)\n else:\n pass\n return data\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return data", "def _queue_subtasks(self, create_subtask_fcn, items_per_query, items_per_task, initial_count, extra_count):\r\n\r\n task_id = str(uuid4())\r\n instructor_task = InstructorTaskFactory.create(\r\n course_id=self.course.id,\r\n task_id=task_id,\r\n task_key='dummy_task_key',\r\n task_type='bulk_course_email',\r\n )\r\n\r\n self._enroll_students_in_course(self.course.id, initial_count)\r\n task_queryset = CourseEnrollment.objects.filter(course_id=self.course.id)\r\n\r\n def initialize_subtask_info(*args): # pylint: disable=unused-argument\r\n \"\"\"Instead of initializing subtask info enroll some more students into course.\"\"\"\r\n self._enroll_students_in_course(self.course.id, extra_count)\r\n return {}\r\n\r\n with patch('instructor_task.subtasks.initialize_subtask_info') as mock_initialize_subtask_info:\r\n mock_initialize_subtask_info.side_effect = initialize_subtask_info\r\n queue_subtasks_for_query(\r\n entry=instructor_task,\r\n action_name='action_name',\r\n create_subtask_fcn=create_subtask_fcn,\r\n item_queryset=task_queryset,\r\n item_fields=[],\r\n items_per_query=items_per_query,\r\n items_per_task=items_per_task,\r\n )", "def queue_all_instances(self):\n if not self.is_job:\n return []\n\n tasks_list = []\n for job_instance in self.instances:\n tasks_list.append(job_instance.queue())\n\n self.status = 'QUEUED'\n return tasks_list", "def clear_not_launched_queued_tasks(self, session=None) -> None:\n self.log.debug(\"Clearing tasks that have not been launched\")\n if not self.kube_client:\n raise AirflowException(NOT_STARTED_MESSAGE)\n queued_tasks = session.query(TaskInstance).filter(TaskInstance.state == State.QUEUED).all()\n self.log.info('When executor started up, found %s queued task instances', len(queued_tasks))\n\n for task in queued_tasks:\n # pylint: disable=protected-access\n self.log.debug(\"Checking task %s\", task)\n dict_string = \"dag_id={},task_id={},execution_date={},airflow-worker={}\".format(\n pod_generator.make_safe_label_value(task.dag_id),\n pod_generator.make_safe_label_value(task.task_id),\n pod_generator.datetime_to_label_safe_datestring(task.execution_date),\n pod_generator.make_safe_label_value(str(self.scheduler_job_id)),\n )\n # pylint: enable=protected-access\n kwargs = dict(label_selector=dict_string)\n if self.kube_config.kube_client_request_args:\n for key, value in self.kube_config.kube_client_request_args.items():\n kwargs[key] = value\n pod_list = self.kube_client.list_namespaced_pod(self.kube_config.kube_namespace, **kwargs)\n if not pod_list.items:\n self.log.info(\n 'TaskInstance: %s found in queued state but was not launched, rescheduling', task\n )\n session.query(TaskInstance).filter(\n TaskInstance.dag_id == task.dag_id,\n TaskInstance.task_id == task.task_id,\n TaskInstance.execution_date == task.execution_date,\n ).update({TaskInstance.state: State.NONE})", "def _sync_power_states(self, context):\n db_instances = objects.InstanceList.get_by_host(context, self.host,\n expected_attrs=[],\n use_slave=True)\n\n #num_vm_instances = self.driver.get_num_instances()\n vm_instances_stats = self.driver.list_instances_stats()\n num_vm_instances = len(vm_instances_stats)\n num_db_instances = len(db_instances)\n\n if num_vm_instances != num_db_instances:\n LOG.warning(_LW(\"While synchronizing instance power states, found \"\n \"%(num_db_instances)s instances in the database \"\n \"and %(num_vm_instances)s instances on the \"\n \"hypervisor.\"),\n {'num_db_instances': num_db_instances,\n 'num_vm_instances': num_vm_instances})\n\n def _sync(db_instance, state):\n # NOTE(melwitt): This must be synchronized as we query state from\n # two separate sources, the driver and the database.\n # They are set (in stop_instance) and read, in sync.\n @utils.synchronized(db_instance.uuid)\n def query_driver_power_state_and_sync():\n self._query_driver_power_state_and_sync(context, db_instance, state)\n\n try:\n query_driver_power_state_and_sync()\n except Exception:\n LOG.exception(_LE(\"Periodic sync_power_state task had an \"\n \"error while processing an instance.\"),\n instance=db_instance)\n\n self._syncs_in_progress.pop(db_instance.uuid)\n\n for db_instance in db_instances:\n # process syncs asynchronously - don't want instance locking to\n # block entire periodic task thread\n uuid = db_instance.uuid\n if uuid in self._syncs_in_progress:\n LOG.debug('Sync already in progress for %s' % uuid)\n else:\n LOG.debug('Triggering sync for uuid %s' % uuid)\n provider_instance_id = self._get_provider_instance_id(uuid)\n provider_instance_state = vm_instances_stats.get(provider_instance_id,\n power_state.NOSTATE)\n\n self._syncs_in_progress[uuid] = True\n self._sync_power_pool.spawn_n(_sync, db_instance, provider_instance_state)", "def task_refresh_all_stats_score(request):\n start = time.time()\n cls_name = request.POST.get('cls') or 'Day'\n destroy = int(request.POST.get('destroy', '0'))\n cursor = datastore_query.Cursor(urlsafe=request.POST.get('cursor'))\n task_count = int(request.POST.get('task_count', '0'))\n assert cls_name in ('Day', 'Multi'), cls_name\n cls = (\n models.AccountStatsDay\n if cls_name == 'Day' else models.AccountStatsMulti)\n\n # Task queues are given 10 minutes. Do it in 9 minutes chunks to protect\n # against most timeout conditions.\n timeout = 540\n updated = 0\n skipped = 0\n try:\n futures = []\n chunk_size = 10\n items = []\n more = True\n if destroy:\n options = ndb.QueryOptions(keys_only=True)\n else:\n options = ndb.QueryOptions()\n while more:\n batch, cursor, more = cls.query(default_options=options).fetch_page(\n 20, start_cursor=cursor)\n if destroy:\n futures.extend(ndb.delete_multi_async(batch))\n updated += len(batch)\n else:\n for i in batch:\n score = models.compute_score(i)\n if i.score != score:\n items.append(i)\n if len(items) == chunk_size:\n futures.extend(ndb.put_multi_async(items))\n updated += chunk_size\n items = []\n futures = [f for f in futures if not f.done()]\n else:\n skipped += 1\n if time.time() - start >= timeout:\n break\n if items:\n futures.extend(ndb.put_multi_async(items))\n updated += chunk_size\n ndb.Future.wait_all(futures)\n if not more and cls_name == 'Day':\n # Move to the Multi instances.\n more = True\n cls_name = 'Multi'\n cursor = datastore_query.Cursor()\n if more:\n taskqueue.add(\n url=reverse(task_refresh_all_stats_score),\n params={\n 'cls': cls_name,\n 'cursor': cursor.urlsafe() if cursor else '',\n 'destroy': str(destroy),\n 'task_count': str(task_count+1),\n },\n queue_name='refresh-all-stats-score')\n result = 200\n except (db.Timeout, DeadlineExceededError):\n result = 500\n out = 'Index: %d\\nType = %s\\nStored %d items\\nSkipped %d\\nIn %.1fs\\n' % (\n task_count, cls.__name__, updated, skipped, time.time() - start)\n if result == 200:\n logging.info(out)\n else:\n logging.error(out)\n return HttpTextResponse(out, status=result)", "def schedule_run_instance(self, context, request_spec,\n admin_password, injected_files,\n requested_networks, is_first_time,\n filter_properties, legacy_bdm_in_spec):\n instance_uuids = request_spec.get('instance_uuids')\n for num, instance_uuid in enumerate(instance_uuids):\n request_spec['instance_properties']['launch_index'] = num\n try:\n #LOG.info(\"jach:context = %(context)s\" % {'context': context.__dict__})\n #LOG.info(\"jach:request_spec = %(request_spec)s\" % locals())\n #LOG.info(\"jach:filter_properties = %(filter_properties)s\" % locals())\n \n host = self._schedule(context, CONF.compute_topic,\n request_spec, filter_properties)\n updated_instance = driver.instance_update_db(context,\n instance_uuid)\n self.compute_rpcapi.run_instance(context,\n instance=updated_instance, host=host,\n requested_networks=requested_networks,\n injected_files=injected_files,\n admin_password=admin_password,\n is_first_time=is_first_time,\n request_spec=request_spec,\n filter_properties=filter_properties,\n legacy_bdm_in_spec=legacy_bdm_in_spec)\n except Exception as ex:\n # NOTE(vish): we don't reraise the exception here to make sure\n # that all instances in the request get set to\n # error properly\n driver.handle_schedule_error(context, ex, instance_uuid,\n request_spec)", "async def _put_records_from_page_in_queue(page, commons_url, lock, queue):\n index = Gen3Index(commons_url)\n async with lock:\n # default ssl handling unless it's explicitly http://\n ssl = None\n if \"https\" not in commons_url:\n ssl = False\n\n records = await index.async_get_records_on_page(\n page=page, limit=INDEXD_RECORD_PAGE_SIZE, _ssl=ssl\n )\n await queue.put(records)", "def queue_subtasks_for_query(entry, action_name, create_subtask_fcn, item_queryset, item_fields, items_per_query, items_per_task):\r\n task_id = entry.task_id\r\n total_num_items = item_queryset.count()\r\n\r\n # Calculate the number of tasks that will be created, and create a list of ids for each task.\r\n total_num_subtasks = _get_number_of_subtasks(total_num_items, items_per_query, items_per_task)\r\n subtask_id_list = [str(uuid4()) for _ in range(total_num_subtasks)]\r\n\r\n # Update the InstructorTask with information about the subtasks we've defined.\r\n TASK_LOG.info(\"Task %s: updating InstructorTask %s with subtask info for %s subtasks to process %s items.\",\r\n task_id, entry.id, total_num_subtasks, total_num_items) # pylint: disable=E1101\r\n progress = initialize_subtask_info(entry, action_name, total_num_items, subtask_id_list)\r\n\r\n # Construct a generator that will return the recipients to use for each subtask.\r\n # Pass in the desired fields to fetch for each recipient.\r\n item_generator = _generate_items_for_subtask(\r\n item_queryset,\r\n item_fields,\r\n total_num_items,\r\n total_num_subtasks,\r\n items_per_query,\r\n items_per_task\r\n )\r\n\r\n # Now create the subtasks, and start them running.\r\n TASK_LOG.info(\"Task %s: creating %s subtasks to process %s items.\",\r\n task_id, total_num_subtasks, total_num_items)\r\n num_subtasks = 0\r\n for item_list in item_generator:\r\n subtask_id = subtask_id_list[num_subtasks]\r\n num_subtasks += 1\r\n subtask_status = SubtaskStatus.create(subtask_id)\r\n new_subtask = create_subtask_fcn(item_list, subtask_status)\r\n new_subtask.apply_async()\r\n\r\n # Subtasks have been queued so no exceptions should be raised after this point.\r\n\r\n # Return the task progress as stored in the InstructorTask object.\r\n return progress", "def call(self, query_fn, args=(), kwargs={}):\n\n def wait_call(conn, ret_lst, query_fn, args, kwargs):\n\n time_called = time.time()\n\n cur = conn.execute(\n \"SELECT id, allowed_times_int, per_seconds_float, \"\n \"count_int, since_float \"\n \"FROM multi_proc_rate_limit;\"\n )\n ratelimits = cur.fetchall()\n cur.close()\n\n wait_time = 0.0\n ids_to_reset = []\n for rl_id, allowed_times, per_secs, cnt, t_since in ratelimits:\n\n if cnt < allowed_times and time_called < t_since + per_secs:\n conn.execute(\n \"UPDATE multi_proc_rate_limit SET count_int = \"\n \"%r WHERE id = %r;\" % (cnt + 1, rl_id)\n ).close()\n else:\n ids_to_reset.append(rl_id)\n # Cap sleep time to `per_secs` in case the user changed the\n # OS clock way forward to avoid waiting however long that\n # is because the max we should ever wait to be within the\n # rate limit is `per_secs`.\n new_wait_time = t_since + per_secs - time_called\n if new_wait_time > per_secs:\n new_wait_time = per_secs\n wait_time = max(wait_time, new_wait_time)\n\n time.sleep(wait_time)\n\n if not ret_lst:\n ret_lst.append(query_fn(*args, **kwargs))\n\n conn.execute(\n \"UPDATE multi_proc_rate_limit SET count_int = 1, since_float\"\n \" = %r WHERE id IN (%s);\" % (\n time.time(),\n \", \".join(map(lambda x: str(x), ids_to_reset))\n )\n ).close()\n\n return\n\n # And call it.\n return self.isolate_db_query(wait_call, (query_fn, args, kwargs))", "def _enqueue(self, schedule):\n updated_schedule = get_object(type(schedule), pk=schedule.pk)\n self.set.remove(schedule)\n if updated_schedule == None or updated_schedule.deleted:\n self.log.info('%s was removed.' % schedule)\n if updated_schedule != None:\n updated_schedule.scheduler = None\n updated_schedule.save()\n return\n schedule = updated_schedule\n \n if not schedule.scheduler == self:\n self.log.info(\"%s is no longer tied to this Scheduler.\" %\n schedule)\n # self.set.remove(schedule)\n return\n instance = Instance.objects.create(\n task=schedule.task, schedule=schedule)\n self.log.info('Enqueuing %s.' % instance)\n schedule.queue.push(instance)\n schedule.enqueued()\n if not schedule.finished():\n self.add(schedule)\n else:\n schedule.scheduler = None\n schedule.save()", "def appointments(resources_slots, from_date, to_date, resources=[], status_all=[], resources_all={}):\n\n query = \"\"\"\n SELECT A.STARTTIME, A.ENDTIME, V.APPOINTMENTTYPEID, V.TYPE, \\\n A.RESOURCEID, APPOINTMENTDATE, S.STATUS, S.APPOINTMENTSTATUSID\n FROM PATIENT P\n JOIN PATIENT_APPOINTMENTS AS A ON P.PATIENTID = A.PATIENTID\n JOIN APPOINTMENTTYPE AS V ON a.APPOINTMENTTYPEID = v.APPOINTMENTTYPEID\n LEFT OUTER JOIN APPOINTMENTSTATUS AS S ON A.APPOINTMENTSTATUSID = S.APPOINTMENTSTATUSID\n left join (PATIENTINSURANCE PAI\n join INSURANCE_TYPE IT on IT.INSURANCE_TYPE_ID=PAI.INSURANCE_TYPEID\n join INSURANCE_COMPANY IC on IC.INSURANCE_COMPANY_ID=PAI.INSURANCE_COMPANY_ID)\n on P.PatientID=PAI.PATIENTID and PAI.INSURANCE_TYPEID=1 and PAI.ACTIVE = 1\n WHERE V.APPOINTMENTTYPEID = A.APPOINTMENTTYPEID AND P.PATIENTID = A.PATIENTID\n AND A.ACTIVE = 1\n \"\"\"\n\n if from_date and to_date:\n query += \" AND APPOINTMENTDATE >= '%s' AND APPOINTMENTDATE <= '%s' \" % (from_date, to_date)\n\n if resources:\n query += \" AND A.RESOURCEID IN (%s)\" % ','.join([str(r) for r in resources])\n\n query += \" ORDER BY A.STARTTIME\"\n results = []\n if not EMRSQLServer.connection():\n return results\n\n rows = EMRSQLServer.execute_query(query)\n\n output = defaultdict(list)\n for row in rows:\n output[row['RESOURCEID']].append(row)\n for item, value in output.items():\n studies = defaultdict(list)\n for i, v in enumerate(output[item]):\n studies_start_date = v['APPOINTMENTDATE'].strftime('%Y-%m-%d')\n studies[item].append({\n 'name': v['TYPE'],\n 'start_time': v['STARTTIME'],\n 'end_time': v['ENDTIME'],\n 'studies_start_date': studies_start_date,\n 'status': v['STATUS'],\n 'APPOINTMENTSTATUSID': v['APPOINTMENTSTATUSID']\n })\n\n studies_by_date = defaultdict(list)\n studies_seen = defaultdict(list)\n for st in studies[item]:\n studies_by_date[st['studies_start_date']].append({\n 'name': st['name'],\n 'start_time': st['start_time'].strftime('%H:%M:%S'),\n 'end_time': st['end_time'].strftime('%H:%M:%S'),\n 'status': st['status']\n })\n studies_seen[st['APPOINTMENTSTATUSID']].append({\n 'name': st['name'],\n 'start_time': st['start_time'].strftime('%H:%M:%S'),\n 'end_time': st['end_time'].strftime('%H:%M:%S'),\n 'status': st['status']\n })\n\n number_of_confirmed_studies = sum([len(studies_seen[int(i)]) for i in status_all])\n days_taken_for_studies = len(studies_by_date)\n total_slots_for_days = resources_slots[item] * days_taken_for_studies\n utilization = (number_of_confirmed_studies * 100) // total_slots_for_days\n\n if utilization <= 79:\n color_code, text_color = '#d9534f', 'white'\n elif (utilization >= 80) and (utilization <= 89):\n color_code, text_color = '#ffe14b', 'black'\n elif utilization >= 90:\n color_code, text_color = '#3c903d', 'white'\n\n results.append({\n 'ResourceID': item,\n 'ResourceName': resources_all[item],\n 'TotalStudies': len(value),\n 'Studies': studies[item],\n 'studies_by_date': studies_by_date,\n 'utilization': '{0}%'.format(utilization),\n 'scheduled_percentage': '{0}%'.format((len(value) * 100) // total_slots_for_days),\n 'number_of_confirmed_studies': number_of_confirmed_studies,\n 'seen_percentage': '{0}%'.format((number_of_confirmed_studies * 100) // len(value)),\n 'total_slots_in_a_day': total_slots_for_days,\n 'color_code': color_code,\n 'text_color': text_color\n })\n return results", "def allowed_instances(context, requested_instances, instance_type):\n project_id = context.project_id\n context = context.elevated()\n requested_cores = requested_instances * instance_type['vcpus']\n requested_ram = requested_instances * instance_type['memory_mb']\n usage = db.instance_data_get_for_project(context, project_id)\n used_instances, used_cores, used_ram = usage\n quota = get_project_quotas(context, project_id)\n allowed_instances = _get_request_allotment(requested_instances,\n used_instances,\n quota['instances'])\n allowed_cores = _get_request_allotment(requested_cores, used_cores,\n quota['cores'])\n allowed_ram = _get_request_allotment(requested_ram, used_ram, quota['ram'])\n allowed_instances = min(allowed_instances,\n allowed_cores // instance_type['vcpus'],\n allowed_ram // instance_type['memory_mb'])\n return min(requested_instances, allowed_instances)", "def _generate_items_for_subtask(item_queryset, item_fields, total_num_items, total_num_subtasks, items_per_query, items_per_task):\r\n num_queries = int(math.ceil(float(total_num_items) / float(items_per_query)))\r\n last_pk = item_queryset.order_by('pk')[0].pk - 1\r\n num_items_queued = 0\r\n available_num_subtasks = total_num_subtasks\r\n all_item_fields = list(item_fields)\r\n all_item_fields.append('pk')\r\n\r\n for query_number in range(num_queries):\r\n # In case total_num_items has increased since it was initially calculated\r\n # include all remaining items in last query.\r\n item_sublist = item_queryset.order_by('pk').filter(pk__gt=last_pk).values(*all_item_fields)\r\n if query_number < num_queries - 1:\r\n item_sublist = list(item_sublist[:items_per_query])\r\n else:\r\n item_sublist = list(item_sublist)\r\n\r\n last_pk = item_sublist[-1]['pk']\r\n num_items_this_query = len(item_sublist)\r\n\r\n # In case total_num_items has increased since it was initially calculated just distribute the extra\r\n # items among the available subtasks.\r\n num_tasks_this_query = min(available_num_subtasks, int(math.ceil(float(num_items_this_query) / float(items_per_task))))\r\n available_num_subtasks -= num_tasks_this_query\r\n\r\n chunk = int(math.ceil(float(num_items_this_query) / float(num_tasks_this_query)))\r\n for i in range(num_tasks_this_query):\r\n items_for_task = item_sublist[i * chunk:i * chunk + chunk]\r\n yield items_for_task\r\n\r\n num_items_queued += num_items_this_query\r\n\r\n # Because queueing does not happen in one transaction the number of items in the queryset may change\r\n # from the initial count. For example if the queryset is of the CourseEnrollment model students may\r\n # enroll or unenroll while queueing is in progress. The purpose of the original count is to estimate the\r\n # number of subtasks needed to perform the requested task.\r\n if num_items_queued != total_num_items:\r\n TASK_LOG.info(\"Number of items generated by chunking %s not equal to original total %s\", num_items_queued, total_num_items)", "def get_background_jobs(self):\n # TODO: need to be more dynamic here?\n return BackgroundJob.objects.filter(\n Q(variants_exportfilebgjob_related__case=self)\n | Q(cadd_submission_bg_job__case=self)\n | Q(distiller_submission_bg_job__case=self)\n | Q(spanr_submission_bg_job__case=self)\n | Q(filter_bg_job__case=self)\n )", "def runner(event, context):\n\n logger.info('Running available_ips...')\n for region in regions:\n for acct in accounts:\n logger.info(\n \"\"\"\n cidr-house-rules-{0}-available_ips on account {1} in region {2}\n \"\"\".format(environment, acct['id'], region)\n )\n invoke_process(function_name, acct['id'], region)", "def _periodically_process_pending_rows(self):\n while self._run_process_pending_rows:\n try:\n before = time.time()\n try:\n self.process_pending_rows()\n except ResourceExhaustedError:\n _handle_resource_exhausted_error()\n continue\n except WorkbookSpaceNeededError:\n self._handle_workbook_space_needed_error()\n continue\n except Exception as ex:\n _debug_print(f\"Exception in process_pending_rows(): {ex}\")\n continue\n\n if (\n self._add_rows_time > self.max_time_per_process_loop\n or self.rows_in_active_sheet > MAX_EVENTS_TO_SPLIT_TO_NEW_SHEET\n ):\n # its taking too long to add rows to the sheet. Rotate\n _debug_print(\n f\"triggering rotation as the add_rows_time was: {self._add_rows_time} and rows_in_active_sheet was {self.rows_in_active_sheet}\"\n )\n self._rotate_to_new_sheet_in_workbook()\n\n after = time.time()\n time.sleep(self._calculate_periodic_loop_sleep_time(after - before))\n except Exception as ex:\n _debug_print(\n f\"Exception made it to the top of the loop in _periodically_process_pending_rows(): {traceback.format_exc()}\"\n )\n _handle_resource_exhausted_error()\n continue", "def checkExpiredThread(q, results, db, cursor):\n\n while not q.empty():\n url = q.get()\n logger.debug(f\"{url} started - Tasks left: {q.unfinished_tasks}\")\n pbar.update(1)\n expired = None\n\n #Check if expired\n _, expired = getPage(url)\n results[url] = expired\n\n #Insert result into db\n if expired:\n logger.debug(f\"expired url: {url}\")\n #Record todays date\n curTime = datetime.now().strftime(\"%Y-%m-%d\")\n #Prepare sql string\n sql = \"\"\"UPDATE motorcycles\n SET adExpiry=%s\n WHERE url=%s\"\"\"\n #Get Lock - Prevent multiple db inserts simulataneously\n logger.debug(f\"{url} wants the lock\")\n with lock:\n logger.debug(f\"{url} has the lock\")\n try:\n cursor.execute(sql, (curTime, url))\n db.commit()\n except Exception as e:\n db.rollback()\n print(\"Exception occured: {}\".format(e))\n logger.debug(f\"{url} is finished with the lock\")\n\n q.task_done()\n logger.debug(f\"{url} finished\")", "def execute_queries():\n fetch_job_listings(engine)\n update_job_listing(engine)", "def run(self, request, queryset):\n\n for settings in queryset:\n settings.run()\n\n self.message_user(\n request,\n _('Data synchronization started in background.'))", "def acquire(self):\n self.logger.debug(\"in JobQ acquire\")\n dataframe = pandas.DataFrame()\n (collector_host, secondary_collectors) = htcondor_query.split_collector_host(self.collector_host)\n for schedd in self.schedds:\n try:\n condor_q = htcondor_query.CondorQ(schedd_name=schedd, pool_name=self.collector_host)\n condor_q.load(\n constraint=self.constraint, format_list=self.classad_attrs, condor_config=self.condor_config\n )\n\n for eachDict in condor_q.stored_data:\n for key, value in self.correction_map.items():\n if eachDict.get(key) is None:\n eachDict[key] = value\n\n df = pandas.DataFrame(condor_q.stored_data)\n if not df.empty:\n # Add schedd name and collector host to job records\n df[\"ScheddName\"] = pandas.Series([schedd] * len(condor_q.stored_data))\n df[\"CollectorHost\"] = pandas.Series([collector_host] * len(condor_q.stored_data))\n dataframe = pandas.concat([dataframe, df], ignore_index=True)\n except htcondor_query.QueryError:\n self.logger.warning(\n f'Query error fetching job classads from schedd \"{schedd}\" in collector host(s) \"{collector_host}\".'\n )\n except Exception:\n msg = r'Unexpected error fetching job classads from schedd \"{}\" in collector host(s) \"{}\".'\n self.logger.warning(msg.format(schedd, collector_host))\n self.logger.error(msg.format(schedd, collector_host) + f\" Traceback: {traceback.format_exc()}\")\n return {\"job_manifests\": dataframe}", "def test_retrieve_instances_schedule_state(self):\n pass", "def do_query(ctx):\n\n # The local marker is a uuid of an instance in a cell that is found\n # by the special method instance_get_by_sort_filters(). It should\n # be the next instance in order according to the sort provided,\n # but after the marker instance which may have been in another cell.\n local_marker = None\n\n # Since the regular DB query routines take a marker and assume that\n # the marked instance was the last entry of the previous page, we\n # may need to prefix it to our result query if we're not the cell\n # that had the actual marker instance.\n local_marker_prefix = []\n\n if marker:\n # FIXME(danms): If we knew which cell we were in here, we could\n # avoid looking up the marker again. But, we don't currently.\n\n local_marker = db.instance_get_by_sort_filters(\n ctx, sort_keys, sort_dirs, global_marker_values)\n if local_marker:\n if local_marker != marker:\n # We did find a marker in our cell, but it wasn't\n # the global marker. Thus, we will use it as our\n # marker in the main query below, but we also need\n # to prefix that result with this marker instance\n # since the result below will not return it and it\n # has not been returned to the user yet. Note that\n # we do _not_ prefix the marker instance if our\n # marker was the global one since that has already\n # been sent to the user.\n local_marker_filters = copy.copy(filters)\n if 'uuid' not in local_marker_filters:\n # If a uuid filter was provided, it will\n # have included our marker already if this instance\n # is desired in the output set. If it wasn't, we\n # specifically query for it. If the other filters would\n # have excluded it, then we'll get an empty set here\n # and not include it in the output as expected.\n local_marker_filters['uuid'] = [local_marker]\n local_marker_prefix = db.instance_get_all_by_filters_sort(\n ctx, local_marker_filters, limit=1, marker=None,\n columns_to_join=columns_to_join,\n sort_keys=sort_keys,\n sort_dirs=sort_dirs)\n else:\n # There was a global marker but everything in our cell is\n # _before_ that marker, so we return nothing. If we didn't\n # have this clause, we'd pass marker=None to the query below\n # and return a full unpaginated set for our cell.\n return []\n\n main_query_result = db.instance_get_all_by_filters_sort(\n ctx, filters,\n limit=limit, marker=local_marker,\n columns_to_join=columns_to_join,\n sort_keys=sort_keys,\n sort_dirs=sort_dirs)\n\n return (InstanceWrapper(sort_ctx, inst) for inst in\n itertools.chain(local_marker_prefix, main_query_result))", "def lambda_handler(event, context):\n set_logging(level=logging.DEBUG)\n\n try:\n payload = json.loads(event[\"Records\"][0][\"Sns\"][\"Message\"])\n account_id = payload['account_id']\n account_name = payload['account_name']\n # get the last region from the list to process\n region = payload['regions'].pop()\n # region = payload['region']\n # if request_id is present in payload, it means this lambda was called from the API\n request_id = payload.get('request_id', None)\n except Exception:\n logging.exception(f\"Failed to parse event\\n{event}\")\n return\n\n try:\n config = Config()\n\n main_account = Account(region=config.aws.region)\n ddb_table = main_account.resource(\"dynamodb\").Table(config.sqspolicy.ddb_table_name)\n\n account = Account(id=account_id,\n name=account_name,\n region=region,\n role_name=config.aws.role_name_identification)\n if account.session is None:\n return\n\n logging.debug(f\"Checking for public SQS policies in {account}\")\n\n # existing open issues for account to check if resolved\n open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SQSPolicyIssue)\n # make dictionary for fast search by id\n # and filter by current region\n open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region}\n logging.debug(f\"SQS in DDB:\\n{open_issues.keys()}\")\n\n checker = SQSPolicyChecker(account=account)\n if checker.check():\n for queue in checker.queues:\n logging.debug(f\"Checking {queue.name}\")\n if queue.public:\n issue = SQSPolicyIssue(account_id, queue.url)\n issue.issue_details.tags = queue.tags\n issue.issue_details.name = queue.name\n issue.issue_details.region = queue.account.region\n issue.issue_details.policy = queue.policy\n if config.sqspolicy.in_whitelist(account_id, queue.url):\n issue.status = IssueStatus.Whitelisted\n else:\n issue.status = IssueStatus.Open\n logging.debug(f\"Setting {queue.name} status {issue.status}\")\n IssueOperations.update(ddb_table, issue)\n # remove issue id from issues_list_from_db (if exists)\n # as we already checked it\n open_issues.pop(queue.url, None)\n\n logging.debug(f\"SQS in DDB:\\n{open_issues.keys()}\")\n # all other unresolved issues in DDB are for removed/remediated queues\n for issue in open_issues.values():\n IssueOperations.set_status_resolved(ddb_table, issue)\n if request_id:\n api_table = main_account.resource(\"dynamodb\").Table(config.api.ddb_table_name)\n DDB.track_progress(api_table, request_id)\n except Exception:\n logging.exception(f\"Failed to check SQS policies for '{account_id} ({account_name})'\")\n return\n\n # push SNS messages until the list with regions to check is empty\n if len(payload['regions']) > 0:\n try:\n Sns.publish(payload[\"sns_arn\"], payload)\n except Exception:\n logging.exception(\"Failed to chain insecure services checking\")\n\n logging.debug(f\"Checked SQS policies for '{account_id} ({account_name})'\")", "def periodic_tasks(self, context, raise_on_error=False):\n return self.run_periodic_tasks(context, raise_on_error=raise_on_error)", "def periodic_tasks(self, context, raise_on_error=False):\n return self.run_periodic_tasks(context, raise_on_error=raise_on_error)", "def periodic_tasks(self, context, raise_on_error=False):\n return self.run_periodic_tasks(context, raise_on_error=raise_on_error)", "def apply_fixed_schedules(\n relay_mod: Union[RelayFunc, IRModule],\n target: Union[str, Target],\n params: Optional[Dict[str, NDArray]],\n schedule_fn: Callable[[ExtractedTask, Schedule], bool],\n):\n target = Target(target) if isinstance(target, str) else target\n extracted_tasks = extract_task_from_relay(relay_mod, target, params)\n\n database = DummyDatabase()\n\n for task in extracted_tasks:\n mod = Parse._mod(task.dispatched[0])\n sch = Schedule(mod)\n\n if schedule_fn(task, sch):\n workload = database.commit_workload(mod)\n tune_rec = TuningRecord(sch.trace, [0.0], workload, target, [])\n database.commit_tuning_record(tune_rec)\n\n return database", "def _manageWorkers(event=None):\n if _workerConfig is None:\n _loadWorkerConfig()\n if not isinstance(_workerConfig, dict):\n return\n activeJobs = Job().find({\n 'handler': 'celery_handler',\n 'status': {'$not': {'$in': [\n JobStatus.SUCCESS, JobStatus.ERROR, JobStatus.CANCELED\n ]}},\n 'updated': {'$gte': _workerConfig['start']}\n }).count()\n\n if activeJobs == _workerConfig['active']:\n if not activeJobs and len(_workerConfig['started']):\n _stopAllWorkers()\n return\n logger.info('Now have %d active job(s) (was %d)' % (activeJobs, _workerConfig['active']))\n _workerConfig['lastChange'] = time.time()\n startedConcurrency = sum(\n _workerConfig['workers'][idx].get('concurrency', 1) for idx in _workerConfig['started'])\n # Start a worker if we have more active jobs than workers and any workers\n # are unstarted.\n if (activeJobs > startedConcurrency and\n min(activeJobs, _workerConfig['maxConcurrency']) > startedConcurrency):\n _startWorker()\n elif not activeJobs:\n _stopAllWorkers()\n _workerConfig['active'] = activeJobs", "def query_current(cls, year_id=None, date=None, q=None):\n if q is None:\n q = Session.query(Schedule)\n\n if year_id is None:\n stmt = SchoolYear.query_started(date).limit(1).subquery()\n q = q.join((stmt, Schedule.year_id == stmt.c.id))\n else:\n q = q.filter_by(year_id=year_id)\n\n q = q.filter(Schedule.start <= func.date())\n return q", "def run(self):\n while True :\n try:\n instance_id = os.getenv(\"CF_INSTANCE_INDEX\")\n mydict = db.hgetall(application_name)\n if instance_id not in mydict :\n self.queue.put(instance_id)\n except :\n pass\n finally:\n pass", "def enqueue(self, content_object, start, end=None, batch_content_object=None,\n extra_params=None, send_expired=True):\n enqueued = []\n for ab_test in ABTest.objects.filter(stream=self):\n if not ab_test.is_enabled:\n continue\n message = ab_test.random_message()\n send_time = message.send_time(start, end)\n if send_time:\n if send_time <= datetime.datetime.now():\n if send_expired:\n message.send(content_object,\n blacklisted_emails=message.blacklisted_emails(),\n extra_params=extra_params)\n else:\n if batch_content_object:\n enqueued.append(Queue.objects.create(message=message,\n content_object=content_object, send_time=send_time,\n batch_content_object=batch_content_object))\n else:\n enqueued.append(Queue.objects.create(message=message,\n content_object=content_object, send_time=send_time))\n return enqueued", "def _get_appointment_slots(self, timezone, employee=None):\n self.ensure_one()\n appt_tz = pytz.timezone(self.appointment_tz)\n requested_tz = pytz.timezone(timezone)\n first_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(hours=self.min_schedule_hours))\n last_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(days=self.max_schedule_days))\n\n # Compute available slots (ordered)\n slots = self._slots_generate(first_day.astimezone(appt_tz), last_day.astimezone(appt_tz), timezone)\n if not employee or employee in self.employee_ids:\n self._slots_available(slots, first_day.astimezone(pytz.UTC), last_day.astimezone(pytz.UTC), employee)\n\n # Compute calendar rendering and inject available slots\n today = requested_tz.fromutc(datetime.utcnow())\n start = today\n month_dates_calendar = cal.Calendar(0).monthdatescalendar\n months = []\n while (start.year, start.month) <= (last_day.year, last_day.month):\n dates = month_dates_calendar(start.year, start.month)\n for week_index, week in enumerate(dates):\n for day_index, day in enumerate(week):\n mute_cls = weekend_cls = today_cls = None\n today_slots = []\n if day.weekday() in (cal.SUNDAY, cal.SATURDAY):\n weekend_cls = 'o_weekend'\n if day == today.date() and day.month == today.month:\n today_cls = 'o_today'\n if day.month != start.month:\n mute_cls = 'text-muted o_mute_day'\n else:\n # slots are ordered, so check all unprocessed slots from until > day\n while slots and (slots[0][timezone][0].date() <= day):\n if (slots[0][timezone][0].date() == day) and ('employee_id' in slots[0]):\n today_slots.append({\n 'employee_id': slots[0]['employee_id'].id,\n 'datetime': slots[0][timezone][0].strftime('%Y-%m-%d %H:%M:%S'),\n 'hours': slots[0][timezone][0].strftime('%H:%M')\n })\n slots.pop(0)\n dates[week_index][day_index] = {\n 'day': day,\n 'slots': today_slots,\n 'mute_cls': mute_cls,\n 'weekend_cls': weekend_cls,\n 'today_cls': today_cls\n }\n\n months.append({\n 'month': format_datetime(start, 'MMMM Y', locale=get_lang(self.env).code),\n 'weeks': dates\n })\n start = start + relativedelta(months=1)\n return months", "def process_router_floating_ip_ratelimit_rules(self, ex_gw_port=None):\n if ex_gw_port:\n interface_name = self.get_external_device_name(ex_gw_port['id'])\n LOG.info('router %s ,namespace %s ,start process_rate_limit in interface %s', self.router_id,\n self.ns_name, interface_name)\n self.process_rate_limit(self, interface_name)", "def runTasks(self):\n\n self.logger.INFO(\n f\"STARTING TASKS FOR TRADER {self.user['Name']} - ACCOUNT ID: {self.account_id}\\n\")\n\n def selectSleep():\n \"\"\"\n PRE-MARKET(0400 - 0930 ET): 5 SECONDS\n MARKET OPEN(0930 - 1600 ET): 5 SECONDS\n AFTER MARKET(1600 - 2000 ET): 5 SECONDS\n\n WEEKENDS: 60 SECONDS\n WEEKDAYS(2000 - 0400 ET): 60 SECONDS\n\n EVERYTHING WILL BE BASED OFF CENTRAL TIME\n\n OBJECTIVE IS TO FREE UP UNNECESSARY SERVER USAGE\n \"\"\"\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n day = dt_central.strftime(\"%a\")\n\n tm = dt_central.strftime(\"%H:%M:%S\")\n\n weekdays = [\"Sat\", \"Sun\"]\n\n # IF CURRENT TIME GREATER THAN 8PM AND LESS THAN 4AM, OR DAY IS WEEKEND, THEN RETURN 60 SECONDS\n if tm > \"20:00\" or tm < \"04:00\" or day in weekdays:\n\n return 60\n\n # ELSE RETURN 5 SECONDS\n return 5\n\n while self.isAlive:\n\n try:\n\n self.user = self.users.find_one({\"Name\": self.user[\"Name\"]})\n\n self.asset_type = self.user[\"Accounts\"][self.account_id][\"Asset_Type\"]\n\n self.updateAccountBalance()\n\n self.updateLastPrice()\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n # IF MIDNIGHT, ADD BALANCE, OPEN POSITIONS PROFIT/LOSS, CLOSED POSITIONS PROFIT/LOSS.\n midnight = dt_central.time().strftime(\"%H:%M\")\n\n if midnight == \"23:55\":\n\n if not self.midnight:\n\n self.balanceHistory()\n\n self.openPositionHistory()\n\n self.closedPositionHistory()\n\n self.midnight = True\n\n else:\n\n self.midnight = False\n\n # RUN TASKS ####################################################\n\n if self.asset_type == \"OPTION\":\n\n self.sellOutOptions()\n\n self.checkTrailingStop()\n\n self.killQueueOrder()\n\n self.sellAtMarketOpen()\n\n dt = datetime.now(tz=pytz.UTC).replace(microsecond=0)\n\n dt_central = dt.astimezone(pytz.timezone('US/Central'))\n\n # SELL ALL SECONDARY_AGG, SEC_AGG_V2 POSITIONS AT END OF DAY\n if dt_central.strftime(\"%H:%M\") == \"14:55\" and self.asset_type == \"EQUITY\":\n\n if not self.market_close_check:\n\n self.sellOutStrategies([{\"Strategy\": \"Secondary_Agg\"},\n {\"Strategy\": \"Sec_Agg_v2\"}])\n\n self.market_close_check = True\n\n else:\n\n self.market_close_check = False\n\n # SELL ALL Sec_Agg_Daytrade AT 14:30\n if dt_central.strftime(\"%H:%M\") == \"14:30\" and self.asset_type == \"EQUITY\":\n\n if not self.eleven_check:\n\n self.sellOutStrategies([{\"Strategy\": \"Sec_Aggressive\"}])\n\n self.eleven_check = True\n\n else:\n\n self.eleven_check = False\n\n except KeyError:\n\n self.isAlive = False\n\n except Exception:\n\n self.logger.ERROR(\n f\"ACCOUNT ID: {self.account_id} - TRADER: {self.user['Name']}\")\n\n finally:\n\n time.sleep(selectSleep())\n\n self.logger.INFO(f\"TASK STOPPED FOR ACCOUNT ID {self.account_id}\")", "def __run_schedules():\n while True:\n __scheduler.run()", "def _run_sql_query_in_batches(self, sql_query_template: str, lookup_values: Set[str]) -> list:\n lookup_values_batches = self._divide_into_chunks(lookup_values, 5000)\n all_matching_rows = []\n for lookup_values_batch in lookup_values_batches:\n sql_query = sql_query_template.replace(self.placeholder_lookup_values_str,\n self._convert_to_str_format(lookup_values_batch))\n matching_rows = self._execute_sql_query(sql_query)\n all_matching_rows += matching_rows\n return all_matching_rows", "def checkSold(auto=False):\n\n #Create connection \n db = pymysql.connect(host=\"localhost\", user=\"testUser\", passwd=\"BorrisBulletDodger\", db=\"scraperdb\", charset='utf8')\n cursor = db.cursor()\n\n #SQL Query\n sql = \"SELECT url FROM motorcycles WHERE adExpiry IS NULL\"\n\n #Find data\n try: \n cursor.execute(sql)\n sqlResult = cursor.fetchall()\n urls = [i[0] for i in sqlResult]\n db.commit()\n except Exception as e:\n db.rollback()\n print(f\"Exception occured: {e}\")\n\n #User input to proceed if not auto\n while not auto:\n cont = input(f\"{len(urls)} stored listings found - Do you wish to check if sold?: \")\n if cont.lower() == 'y' or cont.lower() == 'yes':\n break\n elif cont.lower() == 'n' or cont.lower() == 'no':\n return\n else:\n print(\"Please enter y/n\")\n continue\n \n #Use threading to check if urls have expired\n maxThreads = 5\n urlsQ = Queue(maxsize=0)\n #Set number of threads\n numThreads = min(maxThreads, len(urls))\n #Create lock\n lock = Lock()\n #Create progress bar\n pbar = tqdm(total=len(urls))\n \n #Expired test\n def checkExpiredThread(q, results, db, cursor):\n \"\"\"\n Checks whether input url has expired\n Input: [\"url\"], {} - Keys=urls, vals=False\n \"\"\"\n\n while not q.empty():\n url = q.get()\n logger.debug(f\"{url} started - Tasks left: {q.unfinished_tasks}\")\n pbar.update(1)\n expired = None\n\n #Check if expired\n _, expired = getPage(url)\n results[url] = expired\n\n #Insert result into db\n if expired:\n logger.debug(f\"expired url: {url}\")\n #Record todays date\n curTime = datetime.now().strftime(\"%Y-%m-%d\")\n #Prepare sql string\n sql = \"\"\"UPDATE motorcycles\n SET adExpiry=%s\n WHERE url=%s\"\"\"\n #Get Lock - Prevent multiple db inserts simulataneously\n logger.debug(f\"{url} wants the lock\")\n with lock:\n logger.debug(f\"{url} has the lock\")\n try:\n cursor.execute(sql, (curTime, url))\n db.commit()\n except Exception as e:\n db.rollback()\n print(\"Exception occured: {}\".format(e))\n logger.debug(f\"{url} is finished with the lock\")\n\n q.task_done()\n logger.debug(f\"{url} finished\")\n\n\n #Load queue with urls, results dict keys = urls, vals = False - Ad default not expired\n results = {}\n for url in urls:\n urlsQ.put(url)\n results[url] = False\n\n #Create threads that execute checkExpiredThread function, updates data\n for _ in range(numThreads):\n worker = Thread(target=checkExpiredThread, args=(urlsQ, results, db, cursor))\n worker.setDaemon(True)\n worker.start()\n #Wait until the queue has been processed - All URLs checked\n urlsQ.join()\n pbar.close()\n\n #Remember to close database at the end \n db.close()\n \n #Count number of expired urls\n count = sum(1 for value in results.values() if value)\n logger.info(f\"{count}/{len(urls)} tracked listings have been sold since last processed\")\n print(f\"{count}/{len(urls)} tracked listings have been sold since last processed\")", "def scheduledscansobjects():\n pass", "def job_gen(self, time_frame):\n start = time.time()\n end = start + time_frame\n jobs = []\n with self.db_lock:\n jobs = self.rcon.zrangebyscore(self.job_key, start, end)\n\n for job in jobs:\n task = self.task(job)\n yield (job, task)\n\n self._delete_job(jobs)", "def _api_query_paginated(\n self,\n options: dict[str, Any],\n case: Literal['trades', 'asset_movements'],\n ) -> Union[list[Trade], list[AssetMovement], list]:\n endpoint: Literal['trades', 'movements']\n case_: Literal['trades', 'asset_movements']\n if case == 'trades':\n endpoint = 'trades'\n case_ = 'trades'\n elif case == 'asset_movements':\n endpoint = 'movements'\n case_ = 'asset_movements'\n else:\n raise AssertionError(f'Unexpected {self.name} case: {case}')\n\n call_options = options.copy()\n limit = options['limit']\n results: Union[list[Trade], list[AssetMovement], list] = []\n processed_result_ids: set[int] = set()\n retries_left = API_REQUEST_RETRY_TIMES\n while retries_left >= 0:\n response = self._api_query(\n endpoint=endpoint,\n options=call_options,\n )\n if response.status_code != HTTPStatus.OK:\n try:\n error_response = json.loads(response.text)\n except JSONDecodeError:\n msg = f'{self.name} {case} returned an invalid JSON response: {response.text}.'\n log.error(msg, options=call_options)\n self.msg_aggregator.add_error(\n f'Got remote error while querying {self.name} {case}: {msg}',\n )\n return []\n\n # Check if the rate limits have been hit (response JSON as dict)\n if isinstance(error_response, dict):\n if error_response.get('error', None) == API_RATE_LIMITS_ERROR_MESSAGE:\n if retries_left == 0:\n msg = (\n f'{self.name} {case} request failed after retrying '\n f'{API_REQUEST_RETRY_TIMES} times.'\n )\n self.msg_aggregator.add_error(\n f'Got remote error while querying {self.name} {case}: {msg}',\n )\n return []\n\n # Trigger retry\n log.debug(\n f'{self.name} {case} request reached the rate limits. Backing off',\n seconds=API_REQUEST_RETRY_AFTER_SECONDS,\n options=call_options,\n )\n retries_left -= 1\n gevent.sleep(API_REQUEST_RETRY_AFTER_SECONDS)\n continue\n\n # Unexpected JSON dict case, better to log it\n msg = f'Unexpected {self.name} {case} unsuccessful response JSON'\n log.error(msg, error_response=error_response)\n self.msg_aggregator.add_error(\n f'Got remote error while querying {self.name} {case}: {msg}',\n )\n return []\n\n return self._process_unsuccessful_response(\n response=response,\n case=case_,\n )\n\n try:\n response_list = jsonloads_list(response.text)\n except JSONDecodeError:\n msg = f'{self.name} {case} returned invalid JSON response: {response.text}.'\n log.error(msg)\n self.msg_aggregator.add_error(\n f'Got remote error while querying {self.name} {case}: {msg}',\n )\n return []\n\n results_ = self._deserialize_api_query_paginated_results(\n case=case_,\n options=call_options,\n raw_results=response_list,\n processed_result_ids=processed_result_ids,\n )\n results.extend(cast(Iterable, results_))\n # NB: Copying the set before updating it prevents losing the call args values\n processed_result_ids = processed_result_ids.copy()\n # type ignore is due to always having a trade link for bitfinex trades\n processed_result_ids.update({int(result.link) for result in results_}) # type: ignore\n\n if len(response_list) < limit:\n break\n # Update pagination params per endpoint\n # NB: Copying the dict before updating it prevents losing the call args values\n call_options = call_options.copy()\n call_options.update({\n 'start': results[-1].timestamp * 1000,\n })\n\n return results", "def initdata(): # pylint: disable=too-many-statements\n\n # auth test data\n db.session.add(User(username='user1', active=True, roles=['user', 'operator', 'admin']))\n\n # scheduler test data\n db.session.add(Excl(family=ExclFamily.network, value='127.66.66.0/26', comment='blacklist 1'))\n db.session.add(Excl(family=ExclFamily.regex, value=r'^tcp://.*:22$', comment='avoid ssh'))\n\n queue = Queue(\n name='dev dummy',\n config=yaml_dump({'module': 'dummy', 'args': '--dummyparam 1'}),\n group_size=2,\n priority=10,\n active=True\n )\n db.session.add(queue)\n for target in range(3):\n db.session.add(Target(target=target, queue=queue))\n\n db.session.add(Queue(\n name='pentest full syn scan',\n config=yaml_dump({\n 'module': 'nmap',\n 'args': '-sS -A -p1-65535 -Pn --max-retries 3 --script-timeout 10m --min-hostgroup 20 --min-rate 900 --max-rate 1500'\n }),\n group_size=20,\n priority=10,\n ))\n\n db.session.add(Queue(\n name='sner_disco ack scan top10000',\n config=yaml_dump({'module': 'nmap', 'args': '-sA --top-ports 10000 -Pn', 'timing_perhost': 8}),\n group_size=1000,\n priority=10,\n ))\n\n db.session.add(Queue(\n name='sner_data version scan basic',\n config=yaml_dump({'module': 'manymap', 'args': '-sV --version-intensity 4 -Pn', 'delay': 10}),\n group_size=50,\n priority=15,\n ))\n\n db.session.add(Queue(\n name='sner_data version scan intense',\n config=yaml_dump({'module': 'manymap', 'args': '-sV --version-intensity 8 -Pn', 'delay': 10}),\n group_size=50,\n priority=15,\n ))\n\n db.session.add(Queue(\n name='sner_disco ipv6 dns discover',\n config=yaml_dump({'module': 'six_dns_discover', 'delay': 1}),\n group_size=1000,\n priority=10,\n ))\n\n db.session.add(Queue(\n name='sner_disco ipv6 enum discover',\n config=yaml_dump({'module': 'six_enum_discover', 'rate': 100}),\n group_size=5,\n priority=10,\n ))\n\n db.session.add(Queue(\n name='sner_data script scan basic',\n config=yaml_dump({\n 'module': 'manymap',\n 'args': '-sS --script default,http-headers,ldap-rootdse,ssl-cert,ssl-enum-ciphers,ssh-auth-methods --script-timeout 10m -Pn',\n 'delay': 10\n }),\n group_size=50,\n priority=15,\n ))\n\n db.session.add(Queue(\n name='sner_sweep ack scan portA',\n config=yaml_dump({'module': 'nmap', 'args': '-sA -p1099 -Pn', 'timing_perhost': 1}),\n group_size=4000,\n priority=50,\n ))\n\n db.session.add(Queue(\n name='sner_sweep version scan basic',\n config=yaml_dump({'module': 'manymap', 'args': '-sV --version-intensity 4 -Pn', 'delay': 10}),\n group_size=50,\n priority=55,\n ))\n\n # storage test data host1\n aggregable_vuln = {'name': 'aggregable vuln', 'xtype': 'x.agg', 'severity': SeverityEnum.medium}\n\n host = Host(\n address='127.4.4.4',\n hostname='testhost.testdomain.test<script>alert(1);</script>',\n os='Test Linux 1',\n comment='a some unknown service server'\n )\n db.session.add(host)\n\n db.session.add(Service(\n host=host,\n proto='tcp',\n port=12345,\n state='open:testreason',\n name='svcx',\n info='testservice banner',\n comment='manual testservice comment'\n ))\n\n db.session.add(Vuln(host=host, **aggregable_vuln))\n\n # storage test data host2\n host = Host(\n address='127.3.3.3',\n hostname='testhost1.testdomain.test',\n os='Test Linux 2',\n comment='another server'\n )\n db.session.add(host)\n\n db.session.add(Service(\n host=host,\n proto='tcp',\n port=12345,\n state='closed:testreason',\n name='svcx'\n ))\n\n db.session.add(Vuln(\n host=host,\n name='test vulnerability',\n xtype='testxtype.123',\n severity=SeverityEnum.critical,\n comment='a test vulnerability comment',\n refs=['ref1', 'ref2'],\n tags=['tag1', 'tag2']\n ))\n\n db.session.add(Vuln(\n host=host,\n name='another test vulnerability',\n xtype='testxtype.124',\n severity=SeverityEnum.high,\n comment='another vulnerability comment',\n tags=None\n ))\n\n db.session.add(Vuln(\n host=host,\n name='vulnerability1',\n xtype='testxtype.124',\n severity=SeverityEnum.medium,\n tags=['info']\n ))\n\n db.session.add(Vuln(\n host=host,\n name='vulnerability2',\n xtype='testxtype.124',\n severity=SeverityEnum.low,\n tags=['report']\n ))\n\n db.session.add(Vuln(\n host=host,\n name='vulnerability2',\n xtype='testxtype.124',\n severity=SeverityEnum.info,\n tags=['info']\n ))\n\n db.session.add(Vuln(\n host=host,\n service=Service.query.first(),\n name='vulnerability3',\n xtype='testxtype.124',\n severity=SeverityEnum.unknown,\n tags=['report']\n ))\n\n db.session.add(Vuln(host=host, **aggregable_vuln))\n\n db.session.add(Note(\n host=host,\n xtype='sner.testnote',\n data='testnote data',\n comment='test note comment'\n ))\n\n db.session.commit()", "def run_tasks(request):\r\n import os\r\n if not os.environ['SERVER_SOFTWARE'].startswith('Development'):\r\n logging.error(\"This URL is only valid in a development environment.\")\r\n raise Http404\r\n else:\r\n from datetime import datetime\r\n from google.appengine.api import apiproxy_stub_map\r\n stub = apiproxy_stub_map.apiproxy.GetStub('taskqueue')\r\n \r\n #get all the tasks for all the queues\r\n tasks = []\r\n for queue in stub.GetQueues():\r\n tasks.extend( stub.GetTasks(queue['name']) )\r\n \r\n #keep only tasks that need to be executed\r\n now = datetime.now()\r\n fn = lambda t: datetime.strptime(t['eta'],'%Y/%m/%d %H:%M:%S') < now\r\n tasks = filter(fn, tasks)\r\n\r\n from django.utils import simplejson as json\r\n result = '\\n'.join([json.dumps(t) for t in tasks])\r\n \r\n #remove tasks from queues\r\n for queue in stub.GetQueues():\r\n stub.FlushQueue(queue['name'])\r\n \r\n return HttpResponse(result)", "def enqueue(self, action, instance, sender, **kwargs):\n using_backends = self.connection_router.for_write(instance=instance)\n\n for using in using_backends:\n try:\n connection = self.connections[using]\n index = connection.get_unified_index().get_index(sender)\n except NotHandled:\n continue # Check next backend\n\n if isinstance(index, CelerySearchIndex):\n if action == 'update' and not index.should_update(instance):\n continue\n enqueue_task(action, instance)", "def running_celery_tasks(request):\n active_dict = CELERY_INSPECT.active()\n active_tasks = []\n if active_dict:\n for task_list in active_dict.values():\n active_tasks.extend(task_list)\n if active_tasks:\n active_tasks = [dikt.get(\"id\", \"\") for dikt in active_tasks]\n return Response({\"active_tasks\": active_tasks})", "def schedule():\n for profile in schedules['profiles']:\n instances = _get_instances(profile['instance_tags'], profile['region'])\n start_stop_instances(instances, profile['schedule'])\n reregister_elb_instances(profile)", "def execute(self):\n if not self._multiprocessing:\n for counter, subtasktuples in enumerate(self.task_scheduler):\n self._storegate.compile()\n result = self.execute_pipeline(subtasktuples, counter)\n self._history.append(result)\n\n logger.counter(counter + 1,\n len(self.task_scheduler),\n divide=1,\n message=f'metric={result.metric_value}')\n\n else: # multiprocessing\n if self._storegate.backend not in ('numpy', 'hybrid'):\n raise NotImplementedError(\n 'multiprocessing is supported for only numpy and hybrid backend'\n )\n\n ctx = mp.get_context('spawn')\n queue = ctx.Queue()\n args = []\n\n for counter, subtasktuples in enumerate(self.task_scheduler):\n args.append([subtasktuples, counter])\n\n if len(args) == self._num_workers:\n self.execute_jobs(ctx, queue, args)\n args = []\n logger.counter(counter + 1,\n len(self.task_scheduler),\n divide=1)\n\n self.execute_jobs(ctx, queue, args)", "def _get_monitor_tasks(self, desired_config):\n create_monitors = list()\n delete_monitors = list()\n update_monitors = list()\n\n for hm_type in ['http', 'https', 'tcp', 'icmp', 'udp']:\n existing = self._bigip.get_monitors(hm_type)\n config_key = \"{}_monitors\".format(hm_type)\n desired = desired_config.get(config_key, dict())\n\n (create_hm, update_hm, delete_hm) = (\n self._get_resource_tasks(existing, desired)[0:3])\n\n create_monitors += create_hm\n update_monitors += update_hm\n delete_monitors += delete_hm\n\n return (create_monitors, update_monitors, delete_monitors)", "def execute(self, task_status_queue=None):\n # List task does not need to report status information.\n del task_status_queue\n\n fields_scope = _translate_display_detail_to_fields_scope(\n self._display_detail, is_bucket_listing=self._cloud_url.is_provider())\n resources = plurality_checkable_iterator.PluralityCheckableIterator(\n wildcard_iterator.CloudWildcardIterator(\n self._cloud_url,\n all_versions=self._all_versions,\n error_on_missing_key=False,\n fields_scope=fields_scope,\n get_bucket_metadata=self._buckets_flag))\n\n if resources.is_empty():\n raise errors.InvalidUrlError('One or more URLs matched no objects.')\n if self._only_display_buckets:\n # Received a provider URL (\"gs://\") -> List all buckets.\n # Received buckets flag and bucket URL -> List matching buckets, ignoring\n # recursion.\n resources_wrappers = self._recursion_helper(resources, recursion_level=0)\n elif self._recursion_flag and '**' not in self._cloud_url.url_string:\n # \"**\" overrides recursive flag.\n resources_wrappers = self._recursion_helper(resources, float('inf'))\n elif not resources.is_plural() and resources.peek().is_container():\n # One container was returned by the query, in which case we show\n # its contents.\n resources_wrappers = self._get_container_iterator(\n resources.peek().storage_url, recursion_level=0)\n else:\n resources_wrappers = self._recursion_helper(resources, recursion_level=1)\n\n if self._display_detail == DisplayDetail.JSON:\n self._print_json_list(resources_wrappers)\n else:\n self._print_row_list(resources_wrappers)", "def _run_tasks(self, taskq_len, create_tasks, update_tasks, delete_tasks):\n # 'finished' indicates that the task queue is empty, or there is\n # no way to continue to make progress. If there are errors in\n # deploying any resource, it is saved in the queue until another\n # pass can be made to deploy the configuration. When we have\n # gone through the queue on a pass without shrinking the task\n # queue, it is determined that progress has stopped and the\n # loop is exited with work remaining.\n finished = False\n while not finished:\n LOGGER.debug(\"Service task queue length: %d\", taskq_len)\n\n # Iterate over the list of resources to create\n create_tasks = self._create_resources(create_tasks)\n\n # Iterate over the list of resources to update\n update_tasks = self._update_resources(update_tasks)\n\n # Iterate over the list of resources to delete\n delete_tasks = self._delete_resources(delete_tasks)\n\n tasks_remaining = (\n len(create_tasks) + len(update_tasks) + len(delete_tasks))\n\n # Did the task queue shrink?\n if tasks_remaining >= taskq_len or tasks_remaining == 0:\n # No, we have stopped making progress.\n finished = True\n\n # Reset the taskq length.\n taskq_len = tasks_remaining\n\n return taskq_len", "def get_reading_schedule_consistency(request):\n\t\n\tsigned_up_schedules = request.user.subscribed_sched.all()\n\t\n\tschedule_status = {}\n\tschedule_consistency = {}\n\tschedule_completion = {}\n\tfor sched in signed_up_schedules:\n\t\treading_day_num = datetime.date.today() - sched.start_date\n\t\tschedule_entries = ReadingScheduleEntry.objects.filter(schedule = sched, day_num__lte = reading_day_num.days + 1)\n\t\treadings = ReadingEntry.objects.filter(date__gte = sched.start_date, user = request.user)\n\t\t\n\t\tday_num_status = {}\n\t\tconsistency = 0\n\t\tcompletion = 0\n\t\ttotal_num_entries = 0\n\t\t\t\t\n\t\tfor entry in schedule_entries:\n\t\t\treading_status = 0\t#0 - not read, 1 - read but late, 2 - read and on time\n\t\t\tfor reading in readings:\n\t\t\t\tif(reading.reading == entry.reading):\n\t\t\t\t\t#decide whether or not a reading was finished, late, or on time\n\t\t\t\t\treading_deadline = sched.start_date + datetime.timedelta(entry.day_num)\n\t\t\t\t\n\t\t\t\t\tif(reading.date <= reading_deadline):\n\t\t\t\t\t\treading_status = 2\n\t\t\t\t\t\tconsistency += 1\n\t\t\t\t\t\tcompletion += 1\n\t\t\t\t\telif(reading.date > reading_deadline and reading_status == 0):\t#do not want to change an entry that was on time to late if the reading reads a reading twice\n\t\t\t\t\t\treading_status = 1\n\t\t\t\t\t\tcompletion += 1\n\t\t\t\t\t\t\n\t\t\t\t\ttotal_num_entries += 1\n\t\t\t\n\t\t\tif(total_num_entries > 0):\n\t\t\t\tconsistency = float(consistency) / total_num_entries\n\t\t\t\tcompletion = float(completion) / total_num_entries\n\t\t\telse:\n\t\t\t\tconsistency = 0\n\t\t\t\tcompletion = 0\n\t\t\t\n\t\t\tif(not entry.day_num in day_num_status):\n\t\t\t\tday_num_status[entry.day_num] = {}\n\t\t\tday_num_status[entry.day_num][entry.reading] = reading_status\n\t\t\t\n\t\tschedule_status[sched.title] = day_num_status\n\t\tschedule_consistency[sched.title] = consistency\n\t\tschedule_completion[sched.title] = completion\n\t\t\n\tcontext = RequestContext(request, {\"schedule_status\" : schedule_status, \"schedule_consistency\" : schedule_consistency, \"schedule_completion\" : schedule_completion})\n\treturn render_to_response(\"encourage/encourage_main.html\", context)", "async def _check_schedule(self, now, last):\n\n if self._schedule is None:\n return\n\n for event in self._schedule.events:\n if event.begin <= now:\n if event.begin > last:\n await self._announce_event(event)", "def _create_schedules(self):\n\n ''''''", "def makeLargeTracts(input_queue, output_queue, config, db_config):\n\n \n # capture the process name\n my_name = mp.current_process().name\n my_ip_address = socket.gethostbyname(socket.gethostname())\n\n while True:\n try:\n # get the next element out of the queue\n inputs = input_queue.get()\n try:\n if inputs[0] is None: break\n\n # extract the terms from the queue list\n numprov_path = inputs[0] \n blockm_df = inputs[1] \n out_tract_path = inputs[2] \n out_county_path = inputs[3] \n out_tract_df = inputs[4]\n out_county_df = inputs[5] \n start_time = inputs[6] \n worker_speed = inputs[7]\n config = inputs[8]\n geom = 'geoid%s' % config['census_vintage'][2:]\n\n continue_run, block_numprov = openNumprovFile(numprov_path, geom, \n my_name, my_ip_address, worker_speed, \n start_time, output_queue)\n\n if continue_run:\n continue_run, block_numprov = mergeWithDataFrame(\n block_numprov, blockm_df, geom, my_name, \n my_ip_address, worker_speed, start_time, \n output_queue) \n\n if continue_run:\n for geo in ['tract', 'county']:\n continue_run, out_df = findPerCapitaProviders(my_name, \n my_ip_address, geo, block_numprov, \n output_queue, start_time, config, \n worker_speed, eval('out_%s_df' % geo))\n \n if continue_run:\n continue_run = outputGeoData(out_df, \n eval('out_%s_path' % geo), my_name, \n my_ip_address, geo, worker_speed, \n start_time, output_queue)\n\n except:\n pass\n\n except:\n # nothing in the queue, wait and check again\n time.sleep(1)\n\n return True", "def run(self):\n assert self.queue is not None, \"Must specify queue or override run()\"\n\n while not self.terminated():\n qs = self.queue.objects.filter(status=self.queue.UNSUBMITTED,).order_by(\n \"-seq\"\n )[: django.conf.settings.DAEMONS_MAX_BATCH_SIZE]\n if not qs:\n self.sleep(django.conf.settings.DAEMONS_IDLE_SLEEP)\n continue\n\n for task_model in qs:\n try:\n self.do_task(task_model)\n task_model.status = self.queue.SUCCESS\n except AsyncProcessingIgnored:\n task_model.status = self.queue.IGNORED\n except Exception as e:\n if isinstance(e, AsyncProcessingRemoteError):\n # This is a bit messy. Do not log a trace when the\n # error is due to the remote service rejecting the request.\n # Such an error is still permanent for the task though.\n self.log.error(e)\n else:\n self.log.error('#' * 100)\n self.log.exception(f'Exception when handling task \"{task_model}\"')\n\n task_model.error = str(e)\n # if self.is_permanent_error(e):\n task_model.status = self.queue.FAILURE\n task_model.errorIsPermanent = True\n # raise\n else:\n task_model.submitTime = self.now_int()\n\n task_model.save()\n\n self.sleep(django.conf.settings.DAEMONS_BATCH_SLEEP)\n self.log.info(\"Exiting run loop.\")", "def check_engine_limits(current_rqmt, task):\n current_rqmt['time'] = min(168, current_rqmt.get('time', 1))\n return current_rqmt", "def manage_cache_tasks(self):\n outstanding_threads = self._has_outstanding_threads()\n try:\n # Get a task from the cache queue, waiting less if we have active threads.\n timeout = NONBLOCKING_TIMEOUT if outstanding_threads else BLOCKING_TIMEOUT\n\n # Toggle between refresh and update queues so as to prevent starvation.\n self._check_refresh_queue = not self._check_refresh_queue\n if self._check_refresh_queue:\n catalog, action = self._refresh_queue.get(timeout=timeout)\n else:\n catalog, action = self._update_queue.get(timeout=timeout)\n\n except Empty:\n # No task exists in the cache queue, proceed to check for thread execution\n pass\n\n else:\n # Create and start a thread for the task\n updater_thread = CacheUpdateWorker(\n self._component_cache,\n self._refresh_queue if self._check_refresh_queue else self._update_queue,\n catalog,\n action,\n )\n updater_thread.start()\n queue_clause = \"refreshing\" if self._check_refresh_queue else \"updating\"\n self.log.debug(f\"CacheUpdateWorker {queue_clause} catalog: '{updater_thread.name}', action: '{action}'...\")\n self._threads.append(updater_thread)", "def run(self):\n # Loop through all accounts that are marked as enabled\n accounts = list(AWSAccount.get_all(include_disabled=False).values())\n for account in accounts:\n self.log.debug('Updating VPC Flow Logs for {}'.format(account))\n\n self.session = get_aws_session(account)\n role_arn = self.confirm_iam_role(account)\n # region specific\n for aws_region in AWS_REGIONS:\n try:\n vpc_list = VPC.get_all(account, aws_region).values()\n need_vpc_flow_logs = [x for x in vpc_list if x.vpc_flow_logs_status != 'ACTIVE']\n\n for vpc in need_vpc_flow_logs:\n if self.confirm_cw_log(account, aws_region, vpc.id):\n self.create_vpc_flow_logs(account, aws_region, vpc.id, role_arn)\n else:\n self.log.info('Failed to confirm log group for {}/{}'.format(\n account,\n aws_region\n ))\n\n except Exception:\n self.log.exception('Failed processing VPCs for {}/{}.'.format(\n account,\n aws_region\n ))\n\n db.session.commit()", "def _equality_queries(self, field, dist):\n\n query_dicts = []\n for q in xrange(len(self.__queries)):\n query_dicts = []\n for count in xrange(self.__queries[q]['no_queries']*OVER_GENERATION_RATIO):\n self.__count += 1\n logger.info('EQ: Created %d out of %d queries' % (self.__count, self.__total))\n r_lower_cdf = self.__queries[q]['rss_lower_cdf']\n r_upper_cdf = self.__queries[q]['rss_upper_cdf'] \n value = self.__dists[field].generate_pdf(r_lower_cdf, r_upper_cdf, {})\n qid = qids.query_id()\n (value, where) = aqb.EqualityFishingQueryBatch.format_value_and_where(field, value)\n if qid != qids.full_where_has_been_seen(qid,where):\n continue\n query_dicts.append({qs.QRY_ENUM : qs.CAT.EQ, \n qs.QRY_QID : qid,\n qs.QRY_DBNUMRECORDS : self.__db_size,\n qs.QRY_DBRECORDSIZE : self.__row_width, \n qs.QRY_PERF : self.__perf,\n qs.QRY_CAT : self.__cat,\n qs.QRY_SUBCAT : '', \n qs.QRY_WHERECLAUSE : where,\n qs.QRY_FIELD : sv.sql_info[field][0],\n qs.QRY_NEGATE : False,\n qs.QRY_FIELDTYPE : sv.sql_info[field][1],\n qs.QRY_LRSS : self.__queries[q][qs.QRY_LRSS],\n qs.QRY_URSS : self.__queries[q][qs.QRY_URSS],\n qs.QRY_VALUE : value})\n \n self.__bobs.append(aqb.EqualityQueryBatch(query_dicts,count, \n int((count+1)/OVER_GENERATION_RATIO),\n True))", "def produce_queries(df, brokers, run_id, create_es_query, topic=mjolnir.kafka.TOPIC_REQUEST):\n\n def produce_partition(rows):\n producer = _make_producer(brokers)\n for row in rows:\n producer.send(topic, json.dumps({\n 'run_id': run_id,\n 'request': create_es_query(row),\n 'wikiid': row.wikiid,\n 'query': row.query,\n }))\n producer.close()\n\n mjolnir.spark.assert_columns(df, ['wikiid', 'query', 'hit_page_ids'])\n df.rdd.foreachPartition(produce_partition)\n\n # Send a sigil value to indicate this run is complete. The consumer will copy this\n # into TOPIC_COMPLETE so we know it's done.\n producer = _make_producer(brokers)\n partitions = producer.partitions_for(topic)\n for p in partitions:\n producer.send(topic, partition=p, value=json.dumps({\n 'run_id': run_id,\n 'complete': True,\n 'partition': p\n }))\n producer.close()\n return len(partitions)", "def _execute_wakeup_tasks(self):\n # Check the length of wakeup tasks first to avoid concurrent issues\n size = len(self.wakeup_tasks)\n for i in range(size):\n self.wakeup_tasks[i]()", "def test_query_to_tasks(self):\n Org(id='test1', status=CONNECTED).put()\n Org(id='test2', status=CONNECTED).put()\n Org(id='test3', status=DISCONNECTED).put()\n\n count = task_utils.query_to_tasks(\n query=Org.query(Org.status == CONNECTED),\n queue=Queue('adapter-update'),\n task_generator=lambda key: Task(url='/something/{}'.format(key.string_id()))\n )\n\n self.assertEqual(count, 2)\n task_count = len(self.taskqueue.get_filtered_tasks())\n self.assertEqual(task_count, 2)", "def _do(self):\n # Get all the messages in queue\n msgs = self.RPC.query.all()\n for msg in msgs:\n # Find the first msg marked as enqueued.\n\n if msg.working and \\\n (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n > self.conf.messaging_server.response_timeout:\n msg.status = message.Message.ENQUEUED\n msg.update(condition=self.working_status_condition)\n\n if not msg.enqueued:\n continue\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n\n # Change the status to WORKING (operation with a lock)\n msg.status = message.Message.WORKING\n msg.owner = socket.gethostname()\n # All update should have a condition (status == enqueued)\n _is_updated = msg.update(condition=self.enqueued_status_condition)\n\n if not _is_updated or 'FAILURE' in _is_updated:\n continue\n\n # RPC methods must not start/end with an underscore.\n if msg.method.startswith('_') or msg.method.endswith('_'):\n error_msg = _LE(\"Method {} must not start or end\"\n \"with underscores\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # The first endpoint that supports the method wins.\n method = None\n for endpoint in self.endpoints:\n if msg.method not in dir(endpoint):\n continue\n endpoint_method = getattr(endpoint, msg.method)\n if callable(endpoint_method):\n method = endpoint_method\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {} is \"\n \"handled by endpoint {}\".\n format(msg.id, msg.method,\n method.__str__.__name__))\n break\n if not method:\n error_msg = _LE(\"Message {} method {} unsupported \"\n \"in endpoints.\").format(msg.id, msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # All methods must take a ctxt and args param.\n if inspect.getfullargspec(method).args != ['self', 'ctx', 'arg']:\n error_msg = _LE(\"Method {} must take three args: \"\n \"self, ctx, arg\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n LOG.info(_LI(\"Message {} method {} received\").format(\n msg.id, msg.method))\n if self.conf.messaging_server.debug:\n LOG.debug(\n _LI(\"Message {} method {} context: {}, args: {}\").format(\n msg.id, msg.method, msg.ctxt, msg.args))\n\n failure = None\n try:\n\n # Add the template to conductor.plan table\n # Methods return an opaque dictionary\n result = method(msg.ctxt, msg.args)\n\n # FIXME(jdandrea): Remove response/error and make it opaque.\n # That means this would just be assigned result outright.\n msg.response = result.get('response', result)\n except Exception:\n # Current sys.exc_info() content can be overridden\n # by another exception raised by a log handler during\n # LOG.exception(). So keep a copy and delete it later.\n failure = sys.exc_info()\n\n # Do not log details about the failure here. It will\n # be returned later upstream.\n LOG.exception(_LE('Exception during message handling'))\n\n try:\n if failure is None:\n msg.status = message.Message.COMPLETED\n else:\n msg.failure = \\\n rpc_common.serialize_remote_exception(failure)\n msg.status = message.Message.ERROR\n LOG.info(_LI(\"Message {} method {}, status: {}\").format(\n msg.id, msg.method, msg.status))\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {}, response: {}\".format(\n msg.id, msg.method, msg.response))\n\n _is_success = 'FAILURE'\n while 'FAILURE' in _is_success and (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n <= self.conf.messaging_server.response_timeout:\n _is_success = msg.update()\n LOG.info(_LI(\"updating the message status from working to {}, \"\n \"atomic update response from MUSIC {}\").format(msg.status, _is_success))\n\n except Exception:\n LOG.exception(_LE(\"Can not send reply for message {} \"\n \"method {}\").\n format(msg.id, msg.method))\n finally:\n # Remove circular object reference between the current\n # stack frame and the traceback in exc_info.\n del failure", "def get_reschedules(cls):\n with managed_session() as session:\n requests = session.query(cls)\\\n .options(joinedload(cls.parametric_jobs)\n .joinedload(ParametricJobs.dirac_jobs))\\\n .filter_by(status=LocalStatus.FAILED)\\\n .join(cls.parametric_jobs)\\\n .filter_by(reschedule=True)\\\n .all()\n session.expunge_all()\n return requests", "def plan_asgs(asgs):\n asg_outdated_instance_dict = {}\n for asg in asgs:\n asg_name = asg['AutoScalingGroupName']\n logger.info('*** Checking autoscaling group {} ***'.format(asg_name))\n launch_type = \"\"\n asg_lc_name = \"\"\n asg_lt_name = \"\"\n asg_lt_version = \"\"\n if 'LaunchConfigurationName' in asg:\n launch_type = \"LaunchConfiguration\"\n asg_lc_name = asg['LaunchConfigurationName']\n elif 'LaunchTemplate' in asg:\n launch_type = \"LaunchTemplate\"\n asg_lt_name = asg['LaunchTemplate']['LaunchTemplateName']\n asg_lt_version = asg['LaunchTemplate']['Version']\n elif 'MixedInstancesPolicy' in asg:\n launch_type = \"LaunchTemplate\"\n asg_lt_name = asg['MixedInstancesPolicy']['LaunchTemplate']['LaunchTemplateSpecification'][\n 'LaunchTemplateName']\n asg_lt_version = asg['MixedInstancesPolicy']['LaunchTemplate']['LaunchTemplateSpecification'][\n 'Version']\n else:\n logger.error(f\"Auto Scaling Group {asg_name} doesn't have LaunchConfigurationName or LaunchTemplate\")\n\n instances = asg['Instances']\n # return a list of outdated instances\n outdated_instances = []\n for instance in instances:\n if launch_type == \"LaunchConfiguration\":\n if instance_outdated_launchconfiguration(instance, asg_lc_name):\n outdated_instances.append(instance)\n elif launch_type == \"LaunchTemplate\":\n if instance_outdated_launchtemplate(instance, asg_lt_name, asg_lt_version):\n outdated_instances.append(instance)\n logger.info('Found {} outdated instances'.format(\n len(outdated_instances))\n )\n asg_outdated_instance_dict[asg_name] = outdated_instances, asg\n\n return asg_outdated_instance_dict", "def jobs(self, time_frame):\n for name, content in self.connection.job_get(time_frame):\n task = self.task(name, content, self.connection)\n yield task", "def get_scheduler_jobs(self,\n ids=None,\n tenant_ids=None,\n all_under_hierarchy=None):\n try:\n self.logger.info('get_scheduler_jobs called.')\n\n # Prepare query URL\n self.logger.info('Preparing query URL for get_scheduler_jobs.')\n _url_path = '/public/scheduler'\n _query_builder = self.config.get_base_uri()\n _query_builder += _url_path\n _query_parameters = {\n 'ids': ids,\n 'tenantIds': tenant_ids,\n 'allUnderHierarchy': all_under_hierarchy\n }\n _query_builder = APIHelper.append_url_with_query_parameters(\n _query_builder, _query_parameters,\n Configuration.array_serialization)\n _query_url = APIHelper.clean_url(_query_builder)\n\n # Prepare headers\n self.logger.info('Preparing headers for get_scheduler_jobs.')\n _headers = {'accept': 'application/json'}\n\n # Prepare and execute request\n self.logger.info(\n 'Preparing and executing request for get_scheduler_jobs.')\n _request = self.http_client.get(_query_url, headers=_headers)\n AuthManager.apply(_request, self.config)\n _context = self.execute_request(_request, name='get_scheduler_jobs')\n\n # Endpoint and global error handling using HTTP status codes.\n self.logger.info('Validating response for get_scheduler_jobs.')\n if _context.response.status_code == 0:\n raise RequestErrorErrorException('Error', _context)\n self.validate_response(_context)\n\n # Return appropriate type\n return APIHelper.json_deserialize(\n _context.response.raw_body,\n SchedulerProto.from_dictionary)\n\n except Exception as e:\n self.logger.error(e, exc_info=True)\n raise" ]
[ "0.5548428", "0.5151464", "0.48518416", "0.47420847", "0.46796945", "0.46776414", "0.4667777", "0.4659418", "0.46430779", "0.46141425", "0.45946392", "0.45765543", "0.4571356", "0.45686328", "0.45587033", "0.45582697", "0.4556789", "0.45445147", "0.454312", "0.4534792", "0.45316252", "0.45129263", "0.45012805", "0.44768894", "0.44716182", "0.44716182", "0.44664705", "0.44520947", "0.44479093", "0.4439285", "0.44376296", "0.44124314", "0.44013712", "0.43964252", "0.43952206", "0.43790087", "0.4378465", "0.43769726", "0.4368282", "0.4361922", "0.434524", "0.43447426", "0.43376398", "0.43088272", "0.43074387", "0.4304389", "0.42976868", "0.42970908", "0.42970547", "0.42944664", "0.42881513", "0.428625", "0.42761806", "0.42755777", "0.42705142", "0.42637277", "0.42610732", "0.4256662", "0.4256662", "0.4256662", "0.42509362", "0.423194", "0.42241052", "0.42234176", "0.4215115", "0.4207083", "0.4206906", "0.4204927", "0.42026284", "0.42021018", "0.41960528", "0.41934732", "0.41922238", "0.41859403", "0.41804826", "0.4179935", "0.4177261", "0.41727903", "0.41586474", "0.41569954", "0.41498932", "0.41487008", "0.414578", "0.41357237", "0.41334996", "0.41305876", "0.41276917", "0.41272262", "0.41272032", "0.41256928", "0.41234398", "0.41207218", "0.41177362", "0.41151595", "0.41150525", "0.41126326", "0.41102338", "0.41090247", "0.4106848", "0.41006964" ]
0.62616307
0
Exponential Linear Unit function.
def elu(x, alpha=1.0): # https://github.com/muupan/chainer-elu return ELU(alpha=alpha)(x)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def exponential(value):\n return math.exp(value)", "def calculate_exponent():\n pass", "def exp(x):\n raise NotImplementedError", "def inverse_exponential(x):\n return math.exp(-x)", "def exponential(self) -> np.ndarray:\n if self.is_real():\n return np.array([1.0, 0.0, 0.0, 0.0])\n t = np.linalg.norm(self.v)\n u = self.v/t\n q_exp = np.array([np.cos(t), *u*np.sin(t)])\n if self.is_pure():\n return q_exp\n q_exp *= np.e**self.w\n return q_exp", "def Exp(num):\n return math.exp(float(num))", "def exp(self, X, U):\n raise NotImplementedError", "def exp_func(x, initial, lifetime):\n return initial * np.exp(-x/lifetime)", "def exponentialLearningRate(base):\n def function(t):\n return base ** (t-1)\n return function", "def exponential(input_dim,variance=1., lengthscale=None, ARD=False):\r\n part = parts.exponential.Exponential(input_dim,variance, lengthscale, ARD)\r\n return kern(input_dim, [part])", "def exp(t,tau):\n return np.exp(-t/tau)", "def exp(X):\n X = np.maximum(X,100)\n return np.exp(X)", "def f(x):\n return -math.exp(x[0]**3/-3 + x[0] - x[1]**2)", "def f(x):\n return -math.exp(x[0]**3/-3 + x[0] - x[1]**2)", "def exponential(t, eta_init, last_eta, d = 0.01):\n return eta_init*np.exp(-d*t)", "def exp_term(x, i):\n return x**i/math.factorial(i)", "def E_polynomial(self):\n\n from nodepy import stability_function\n p, q = self.stability_function()\n return stability_function.E_polynomial(p, q)", "def getExponent(self):\n return _libsbml.Unit_getExponent(self)", "def F(x):\n return math.exp(-0.5 * (x ** 2))", "def test_exp():\n l = Parameter('l', positive=True)\n x = Variable('x')\n\n new = l * sympy.exp(- l * x)\n assert isinstance(new, sympy.Expr)\n e = Exp(x, l)\n assert issubclass(e.__class__, sympy.Expr)\n assert new == e\n\n # A pdf should always integrate to 1 on its domain\n assert sympy.integrate(e, (x, 0, sympy.oo)) == 1", "def get_est_exp_discount_function(self,params):\n params = params[0:5]\n df = pd.DataFrame(self.maturity.apply(lambda x: x ** i) for i in range(1, 6)).T\n df.columns = ['M1', 'M2', 'M3', 'M4', 'M5']\n return np.exp(df.dot(params))", "def expdiff(x, a=a, n=5):\n return a**n * np.exp(a*x)", "def lam(E):\n return (12398.4/E)*1e-10", "def _getExponentialIndex(self, validQvals):\r\n validQvals = self._makePositive(validQvals)\r\n validQvals = self._getExponentialValues(validQvals)\r\n return self._getWeightedIndex(validQvals)", "def exp(data):\n return _make.exp(data)", "def e():\n print(math.e)", "def elu(x):\n return np.where(x < 0, np.exp(x) - 1, x)", "def lin_exp(min_iterations, i):\n\n\t# vary between the functions\n\tif i % 2 == 0:\n\t\treturn exponential(min_iterations, i)\n\telse:\n\t\treturn linear(min_iterations, i)", "def inv_efunc(z):\n return 1. / sqrt(omega_m * (1. + z)**3 + omega_lam)", "def exp_fun(self, xs, *args, **kwargs):\n raise NotImplementedError", "def func_exp(x, a, b, c):\n return a * np.exp(b * x) + c", "def energy(e: float) -> float:\n\n return (1/np.sqrt(2))*(gamma(-e/2+1/2)/(gamma(-e/2+3/4)))", "def exponentialfcn(x: np.ndarray) -> np.ndarray:\n\n x2 = x**2\n scores = -np.exp(-0.5 * np.sum(x2, axis=1))\n return scores", "def setExponent(self, *args):\n return _libsbml.Unit_setExponent(self, *args)", "def test_exp():\n x = np.linspace(-3,3,13)\n\n default_use_numexpr = accel_math._USE_NUMEXPR\n\n accel_math._USE_NUMEXPR = True\n r1 = accel_math._exp(x)\n\n accel_math._USE_NUMEXPR = False\n r2 = accel_math._exp(x)\n\n np.testing.assert_almost_equal(r1,r2)\n\n accel_math._USE_NUMEXPR = default_use_numexpr", "def exp(self):\n \n return Intervalo(math.exp(self.lo), math.exp(self.hi))", "def exponential(min_iterations, i, start = start_temp, final = final_temp):\n\n\ttemperature = (start * (final / start) ** (i / min_iterations))\n\n\treturn temperature", "def exponential(gp_link=None):\r\n if gp_link is None:\r\n gp_link = noise_models.gp_transformations.Log_ex_1()\r\n\r\n analytical_mean = False\r\n analytical_variance = False\r\n return noise_models.exponential_noise.Exponential(gp_link,analytical_mean,analytical_variance)", "def exp(self):\n return Factor().__build( VarSet(self.v) , np.exp(self.t) )", "def E(z, omega_m, omega_l):\n return 1 / np.sqrt(omega_m * (1 + z) ** 3 + omega_l)", "def fn(self, x):\n\n return math.exp(x*2) - math.exp(x) - 2", "def fn(self, x):\n\n return math.exp(x*2) - math.exp(x) - 2", "def ln(x):\n return log(x, const.e)", "def _getExponentialValues(self, arr):\r\n return [math.exp(val) for val in arr]", "def energy_function(self, x):\n \n return -T.dot(T.transpose(x), T.dot(self.W, x)) -\\\n T.dot(T.transpose(self.b), x)", "def E_StaticFromDynamic_Eissa_Kazi1988(Edyn):\n\n E = (0.74*Edyn)-0.82\n return E", "def Es_case_E(z, x, gamma):\n \n if z == 0 and x == 0:\n return 0\n \n beta2 = 1-1/gamma**2\n beta = sqrt(beta2)\n \n L = (z + beta*sqrt(x**2*(1-beta2) + z**2))/(1-beta2)\n \n S = sqrt(x**2 + L**2)\n N1 = L - beta * S\n D = S-beta*L\n \n return N1/D**3", "def erf(x):\n return 0.0", "def exp(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return self.__class__(self._diag.exp())", "def linear_polynomial(self, e: 'PFElement') -> Polynomial:\n poly = self.polynomial(-e)\n poly += poly.monic(1)\n return poly", "def _etaE(self,x):\n return self._etaE_cool(x) + self._etaE_hot(x)", "def test_call_function_ExponentialDecay():\n a = 0.4 # Decay constant\n u0 = 3.2 # Function value u(t) for some known time t\n der_u = -1.28 # Analytic value for the derivative of u at the known time t\n eps = 10**(-7)# Since we are dealing with floating point numbers,\n # we need a limit when checking that a difference is zero.\n decay_model = ExponentialDecay(a)\n assert(abs(decay_model(0, u0)-der_u) < eps)", "def erfi(x):\n a = 0.147 # MAGIC!!!\n a1 = math.log(1 - x * x)\n a2 = (2.0 / (math.pi * a) + a1 / 2.0)\n\n return (sign(x) * math.sqrt( math.sqrt(a2 * a2 - a1 / a) - a2 ))", "def schechter(l,alpha):\n return exp(-l)*(l**alpha)", "def lamda(Ei):\n Ej=Ei*1.6021*10**-22\n h_js=6.626*10**-34\n m_kg=1.674929*10**-27\n lam=h_js/np.sqrt(2*Ej*m_kg)\n return (lam)", "def exp_integral(x):\n gamma = 0.577215665\n return (-gamma - expn(x,1) - np.log(x))", "def nu_to_E(nu, ecc):\n E = 2 * np.arctan(np.sqrt((1 - ecc) / (1 + ecc)) * np.tan(nu / 2))\n return E", "def exp(module, x):\n _import_modules()\n if module in [np, ma]:\n return np.exp(x)\n elif module == torch:\n return torch.exp(x)\n elif module == jnp:\n return jnp.exp(x)\n elif module == tf:\n return tf.math.exp(x)\n raise UnknownModuleException(f\"Module {module.__name__} not supported.\")", "def getExponent(self):\n return _libsbml.ASTNode_getExponent(self)", "def negative_exponent():\n print(\"Problem: Negative exponent\")\n\n a = float(input())\n n = int(input())\n\n result = power(a, n)\n print(result)", "def iu_energy(self,val,units=\"1/cm\"):\n if units in self.units[\"energy\"]:\n x = conversion_facs_energy[units]\n i_val = x*val\n return i_val", "def expfit(self, x, y):\n n = 30 # default number of polynomials coeffs to use in fit\n a = numpy.amin(x)\n b = numpy.amax(x)\n d0 = self.chebftd(a, b, n, x, y) # coeffs for data trace...\n d1 = self.chebint(a, b, d0, n) # coeffs of integral...\n tau = -numpy.mean(d1[2:3] / d0[2:3])\n try:\n g = numpy.exp(-x / tau)\n except:\n g = 0.0\n # generate chebyshev polynomial for unit exponential function\n dg = self.chebftd(a, b, n, x, g)\n # now estimate the amplitude from the ratios of the coeffs.\n a1 = self.estimate(d0, dg, 1)\n a0 = (d0[0] - a1 * dg[0]) / 2.0 # get the offset here\n return (a0, a1, tau)", "def exp_env(N, sr, lam = 3):\n return np.exp(-lam*np.arange(N)/sr)", "def explorentzian(mu, wid, timeconstant, x): \n g = lorentzian( mu, wid, x )\n \n hly = np.round( len(g) / 2.0 )\n ey = np.r_[np.zeros(hly),g,np.zeros(hly)]\n fy = np.fft.fft(ey)\n a = np.exp(-(np.arange(len(fy))) / timeconstant )\n fa = np.fft.fft(a)\n fy1 = fy * fa\n ybz = np.real(np.fft.ifft(fy1)) / np.sum(a)\n yb = ybz[hly:len(ybz)-hly]\n \n return yb", "def elu(input, alpha=1.0, inplace=False):\n return FunctionLib.apply(\n 'Elu', input.device, [input],\n outputs=[input if inplace else None], alpha=float(alpha))", "def eval_expon(terms):\n pow_dex = terms.index('^')\n if terms[pow_dex + 1] == '-':\n terms[pow_dex + 1] = -1 * terms[pow_dex + 2]\n del terms[pow_dex + 2]\n\n terms[pow_dex - 1] = terms[pow_dex - 1] ** terms[pow_dex + 1]\n\n del terms[pow_dex: pow_dex + 2]\n\n return terms", "def col_ei(Te, nev, Zev = 1.0):\n return 2.91e-12 * Zev*nev*lnlambda(Te,nev)/Te**1.5", "def erf(t):\n P = 0.3275911\n A = [0.254829592, -0.284496736, 1.421413741, -1.453152027, 1.061405429]\n T = 1.0 / (1 + P * t)\n Tn = T\n Poly = A[0] * Tn\n for i in range(1, 5):\n Tn = Tn * T\n Poly = Poly * A[i] * Tn\n return 1.0 - Poly * np.exp(-t * t)", "def _fit_func(en, a, b, c, d, e, f, g, h):\n return a*(1./en**b)*(1./(c+d/(en**e))) + f*exp(-g*(en-h)**2)", "def exponential(base, multiplier, limit):\n def func():\n if base < 0:\n raise ValueError('base must be non-negative')\n\n if multiplier < 0:\n raise ValueError('multiplier must be non-negative')\n\n if limit < 0:\n raise ValueError('limit must be non-negative')\n\n delay = base\n for exp in range(limit):\n yield delay**exp * multiplier\n\n return func", "def free_energy_function(self, x):\n \n wx_b = T.dot(x, self.W) + self.bhid\n \n return -T.sum(T.log(1 + T.exp(wx_b)), axis=1) -T.dot(x, self.b)", "def constant_equation(funct):\n return funct + \"x\"", "def my_fn(x):\n return 0.4*(0.5*(np.exp(x*4) - np.exp(-x*4)) - 8*x + 0.3*x**2 - 2*x**3 + 0.8)", "def exponentiation():\n print(\"Problem: Exponentiation\")\n\n a = float(input())\n n = int(input())\n\n result = power_v2(a, n)\n print(result)", "def func(self, X, a, b):\n return a*np.exp(-b*X)", "def activation_func(x):\r\n a = -1\r\n return 1/(1+np.exp(-a*x))", "def Ernst_T1(TR, alpha_e):\n return -TR / np.log(np.cos(alpha_e))", "def exponential(self, data=[], init_lambdas=[1,0.75], max_iteration=500):\r\n xaxis = np.arange(1, len(data)+1)\r\n data = np.array(data)\r\n idx = 1\r\n lambdas = np.array(init_lambdas)\r\n while idx < max_iteration:\r\n y = [lmbda*np.exp(data*(-lmbda)) for lmbda in lambdas]\r\n weights = y/np.sum(y, axis=0)\r\n coefficients = np.mean(weights, axis=1)\r\n lambdas = np.sum(weights, axis=1)/np.sum(weights*data, axis=1)\r\n idx+=1 \r\n print lambdas, coefficients\r\n return lambdas, coefficients", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val -\n (T_m - self.Tamb.val_SI) * self.lkf_lin.val -\n self.lkf_quad.val * (T_m - self.Tamb.val_SI) ** 2))", "def function(self):\r\n lambd = 5*np.sin(2*np.pi*self.x_array) #The function in question\r\n return 3*np.pi*np.exp(-lambd)", "def lam2E(l):\n E=12398.4/(l*u['ang'])\n return E", "def exponential(image: np.ndarray) -> np.ndarray:\n return np.power(image, 0.75).astype('uint8')", "def general_exp(x, max_order=15):\n\n result = 1.0\n if max_order == 0:\n return result\n\n # scale by power of 2 so that its norm is < 1\n max_val = int(np.max(np.abs(x.value)))\n scale = 1\n if max_val > 1:\n max_val <<= 1\n while max_val:\n max_val >>= 1\n scale <<= 1\n\n scaled = x * (1.0 / scale)\n\n # taylor approximation\n tmp = 1.0 + 0.0*x\n for i in range(1, max_order):\n if np.any(np.abs(tmp.value) > _eps):\n tmp = tmp*scaled * (1.0 / i)\n result += tmp\n else:\n break\n\n # undo scaling\n while scale > 1:\n result *= result\n scale >>= 1\n return result", "def gem_exp(min_iterations, i):\n\n\t# vary between the functions\n\tif i % 2 == 0:\n\t\treturn geman(min_iterations, i)\n\telse:\n\t\treturn exponential(min_iterations, i)", "def exponential(self,*datas):\n\t\tdatas = list(datas)\n\t\tresult = datas.pop(0)\n\t\tfor data in datas:\n\t\t\tresult **= data\n\n\t\treturn result", "def get_alpha(self, error_rate, func='default'):\n return 0.5 * np.log((1. - error_rate) / error_rate)", "def exp(tensor):\n return _elementary_op(tensor, np.exp, np.exp)", "def evaluate(x, amplitude, x_0, alpha, beta):\n\n xx = x / x_0\n exponent = -alpha - beta * np.log(xx)\n return amplitude * xx ** exponent", "def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))", "def powerlaw(E,alpha,A):\n\n\treturn A*E**alpha", "def exponent(num,power=2):\n return num ** power", "def erf(x):\n return 1.-erfc(x)", "def exp(a: Decimal, b: Decimal) -> Decimal:\n return a ** b", "def explt(l, t): # pragma: no cover\n return cmath.exp((-1.0j * t) * l)", "def BernoulliExponentialLoss(lamb) :\n def bexl(x, p) :\n N = K.int_shape(p)[1]\n recon = N*metrics.binary_crossentropy(x, p)\n dkl = K.sum((-1./lamb) + K.log(lamb) - 1, axis=-1)\n return recon+dkl\n return bexl", "def A_EE(self, L):\n if L>2.*self.CMB.lMaxP:\n return 0.\n # integrand\n def integrand(x):\n theta = x[1]\n l1 = np.exp(x[0])\n l2 = self.l2(L, l1, theta)\n if l2<self.CMB.lMin or l2>self.CMB.lMaxP:\n return 0.\n phi = self.phi(L, l1, theta)\n result = self.f_EE(l1, l2, phi) * self.F_EE(l1, l2, phi)\n result *= l1**2\n result /= (2.*np.pi)**2\n result *= 2.\n return result\n \n # if first time, initialize integrator\n if not hasattr(self.A_EE.__func__, \"integ\"):\n self.A_EE.__func__.integ = vegas.Integrator([[np.log(self.CMB.lMin), np.log(self.CMB.lMaxP)], [0., np.pi]])\n self.A_EE.integ(integrand, nitn=8, neval=1000)\n result = self.A_EE.integ(integrand, nitn=1, neval=5000)\n result = L**2 / result.mean\n if not np.isfinite(result):\n result = 0.\n return result", "def as_exp(s: str) -> str:\n return s if \"e\" in s else \"{:1.0e}\".format(float(s))", "def __pow__(self, exp):\n # We have (p o Q)^e = p^e o Q\n coeff = (self._unit_simplex_polynomial**exp).coeff\n if isinstance(exp, numbers.Integral):\n r = self.degree() * exp\n else:\n r = 0\n for i in range(len(exp)):\n r += self[i].degree() * exp[i]\n return PolynomialBernsteinSimplex(coeff, self.vertices, r)", "def exp(self):\n try:\n return self.days/self.det\n except ZeroDivisionError:\n return float('NaN')", "def linear(e0, e1, t0, t1, e):\n alpha = max(0, min(1, (e - e0) / (e1 - e0))) # what fraction of the way through are we\n t = alpha * t1 + (1 - alpha) * t0 # interpolate accordingly\n return t" ]
[ "0.6901193", "0.68407923", "0.6819991", "0.6726578", "0.667672", "0.66451716", "0.66354614", "0.6521205", "0.6373172", "0.6362633", "0.6341832", "0.6321374", "0.6235829", "0.6235829", "0.6159893", "0.6137865", "0.61260146", "0.6114578", "0.61126935", "0.60865015", "0.6084545", "0.60739064", "0.60543025", "0.6049967", "0.6048178", "0.60462964", "0.6034846", "0.5998231", "0.5997587", "0.5966223", "0.5962638", "0.5962124", "0.5960999", "0.5936655", "0.5928634", "0.59048116", "0.5904809", "0.58990806", "0.5884155", "0.5868979", "0.5846907", "0.5846907", "0.5840778", "0.583929", "0.58381903", "0.5817179", "0.57841545", "0.5771364", "0.5770915", "0.57685107", "0.57666266", "0.57656485", "0.5764755", "0.57475597", "0.57448465", "0.5740946", "0.5738222", "0.5732984", "0.5701836", "0.5701084", "0.56961495", "0.56934434", "0.5693369", "0.5677329", "0.5676669", "0.56761", "0.5673591", "0.5673307", "0.5671367", "0.5667834", "0.56668395", "0.56575", "0.5654624", "0.56545985", "0.56473035", "0.5644973", "0.5643805", "0.5643078", "0.5641232", "0.56392926", "0.5631047", "0.5631026", "0.56209797", "0.56179905", "0.5617941", "0.560843", "0.5601007", "0.5595274", "0.5591013", "0.5586568", "0.5580942", "0.55684894", "0.5566762", "0.556416", "0.55604947", "0.55490774", "0.55472326", "0.5542335", "0.5540256", "0.55346054" ]
0.6120061
17
Rearrange Array Elements so as to form two number such that their sum is maximum.
def rearrange_digits(input_list): ## Corner cases: if len(input_list) == 0: return [0, 0] elif len(input_list) == 1: return [input_list[0], 0] # Sort an array using merge-sort sorted_list = merge_sort(input_list) # Create two empty array and pop largest number from an sorted_list # and push into each empty array one by one # This also ensures that the number of digits in both the numbers cannot differ by more than 1 first_num_list = list() second_num_list = list() while sorted_list: first_num_list.append(sorted_list.pop()) # Break the while loop if array is empty if not sorted_list: break second_num_list.append(sorted_list.pop()) first_num = int("".join(str(i) for i in first_num_list)) second_num = int("".join(str(i) for i in second_num_list)) # Create an output array of two nums out_list = [] out_list.append(first_num) out_list.append(second_num) return out_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def max_pairwise_product_sort(array):\n if len(array) <= 1:\n return 0\n\n array.sort()\n\n return array[-1] * array[-2]", "def max_pairwise_product_linear(array):\n\n if len(array) <= 1:\n return 0\n\n two_biggest_values = [0, 0]\n\n for element in array:\n if element > two_biggest_values[0]:\n two_biggest_values[0] = element\n elif element > two_biggest_values[1]:\n two_biggest_values[1] = element\n\n return two_biggest_values[0] * two_biggest_values[1]", "def max_pairwise_product_sort(numbers):\n sorted_list = sorted(numbers)\n ans = sorted_list[-1]*sorted_list[-2]\n return ans", "def highest_product_2(arr):\n\n # make a list to store the highest three ints, initializing to first three\n maxes = [arr[0], arr[1], arr[2]]\n\n # find the lowest of the highest three ints\n lowest_max = min(maxes)\n\n # go through the rest of the list to check for higher values\n for num in arr[3:]:\n # if any value is higher than the lowest max, update maxes list\n if num > lowest_max:\n # remove the old maximum\n maxes.remove(lowest_max)\n # add the new one\n maxes.append(num)\n # recalculate the lowest max for continued comparison\n lowest_max = min(maxes)\n\n return maxes[0] * maxes[1] * maxes[2]", "def get_max_sum2(a):\n s = ms = a[0]\n n = len(a)\n for i in range(1, n):\n s = max(a[i], s + a[i])\n ms = max(s, ms)\n return ms", "def maxsum(A):\n\tmax_ending_here = max_so_far = A[0]\n\tfor x in A[1:]:\n\t\tmax_ending_here = max(x, max_ending_here + x)\n\t\tmax_so_far = max(max_so_far, max_ending_here)\n\treturn max_so_far", "def max_change(arr):\n return np.max(arr) - np.min(arr)", "def get_max_arr_sum(arr):\n if len(arr) < 2:\n return 0 if len(arr) == 0 else max(0, max(arr))\n\n max_dict = [None]*len(arr)\n max_dict[0] = max(0, arr[0])\n max_dict[1] = max(max_dict[0], arr[1])\n for i in range(2, len(arr)):\n max_dict[i] = max(max_dict[i-1], max_dict[i-2]+arr[i])\n\n return max_dict[-1]", "def max_pairwise_product_brute_force(array):\n\n if len(array) <= 1:\n return 0\n\n max_product = 0\n\n for i in range(len(array)):\n for j in range(len(array)):\n if i != j:\n if array[i] * array[j] > max_product:\n max_product = array[i] * array[j]\n\n return max_product", "def solve(arr):\n for i in range(len(arr) - 2, -1, -1):\n arr[i] = [max_subtriangle(arr, i, j) for j in range(len(arr[i]))]\n return arr[0][0]", "def bubble_optimized(array):\n for passes in range(len(array)-1, 0, -1):\n for i in range(passes):\n if array[i] > array[i+1]:\n array[i], array[i+1] = array[i+1], array[i]", "def highest_product(arr):\n\n product = 1\n\n for i in range(3):\n # find the max value in the list, get the index, pop it, and mulitply\n product *= arr.pop(arr.index(max(arr)))\n\n return product", "def pmax(\n *x: Iterable,\n na_rm: bool = False\n) -> Iterable[float]:\n maxlen = max(map(length_of, x))\n x = (recycle_value(elem, maxlen) for elem in x)\n return Array([max(elem, na_rm=na_rm) for elem in zip(*x)])", "def maximum_inplace(a, b):", "def maxSubArray(nums):\n #dynamic programming\n\n n = len(nums)\n curr_sum = max_sum = nums[0]\n \n for i in range(1,n):\n curr_sum = max(nums[i], curr_sum + nums[i])\n max_sum = max(max_sum, curr_sum)\n \n return max_sum", "def greatest_difference(num_list):", "def max_val_rec_2d(arr):\n return max_val_rec([max_val_rec(each_list) for each_list in arr])", "def rearrange_digits(input_list):\n\n # Edge cases treatment: Input array of size 0\n if len(input_list) == 0:\n return 0, 0\n\n # Edge cases treatment: Invalid array element\n for n in input_list:\n if type(n) != int or n < 0 or n > 9:\n return 0, 0\n\n # Input array ordered (descending) \n ordered_list = mergesort(input_list)\n\n # Then it equally distributes the array number elements into the two \n # reaulting numbers. The higher the number in the array the more significant\n # digit it will represent in the final result.\n num1 = 0\n for i in range(0, len(ordered_list), 2):\n num1 = num1*10 + ordered_list[i]\n\n num2 = 0\n for i in range(1, len(ordered_list), 2):\n num2 = num2*10 + ordered_list[i]\n \n return num1, num2", "def max_subarray(array):\n\tmax_sum = 0\n\n\tmax_local_sum = 0\n\tfor i, value in enumerate(array):\n\t\tmax_local_sum += value\n\t\tif max_local_sum < 0:\n\t\t\tmax_local_sum = 0\n\t\telse:\n\t\t\tmax_sum = max(max_sum, max_local_sum)\n\n\treturn max_sum", "def selection_sort_max_version(arr):\n # No need to sort\n if arr is None:\n return arr\n\n n = len(arr)\n if n <= 1:\n return arr\n\n # i - range in order\n # j - range out of order\n for i in range(n - 1, 0, -1):\n max_index = i\n j = i - 1\n\n # select max element in range[0, j]\n while j >= 0:\n if arr[j] > arr[max_index]:\n max_index = j\n j -= 1\n\n arr[i], arr[max_index] = arr[max_index], arr[i]\n\n return arr", "def largest_number(a):\n\n array = new_quick_sort(a)\n\n largest_number = \"\"\n\n for number in array:\n largest_number += str(number)\n\n return largest_number", "def test_returns_largest_product_within_array(self):\n result = max_product([2,3,-2,4,10,-5,3,2,1])\n self.assertEqual(result, 14400)", "def ArrayAdditionI(arr):\n\n nums = sorted(arr)\n\n #Get highest num\n highestNum = max(arr)\n currentSum = 0 - highestNum\n\n for num in nums:\n currentSum += num\n\n if currentSum < highestNum:\n return 'false'\n else:\n return 'true'", "def maxProduct(data):\n maxval = float('-inf')\n for i in range(len(data)):\n for j in range(i+1, len(data)):\n if maxval < data[i]*data[j]:\n maxval = data[i]*data[j]\n a,b = (data[i],data[j])\n return tuple([a,b])", "def check_argmax(array):\n # Check which movements are the best, return it as a list where 1 = max of the list.\n res = [1 if i == max(array) else 0 for i in array]\n return list(compress([\"V\", \"H\", \"D\", \"X\"], res))", "def maximum_value(arr: [int], k: int) -> [int]:\n m = []\n res = []\n for i in range(len(arr)):\n if len(m) == k:\n res.append(max(m))\n if len(m) == k:\n m[i%k] = arr[i]\n else:\n m.append(arr[i])\n res.append(max(m))\n return res", "def three_array_max(array_list: List[np.ndarray]) -> np.ndarray:\n temp = np.maximum(array_list[0], array_list[1])\n all_maxs = np.maximum(temp, array_list[2])\n\n return all_maxs", "def relu(arr):\n return column_bind(zeros(len(arr)), arr).max(axis=1)", "def max_pairwise_product_fast(numbers):\n num_list = numbers.copy()\n max_num_1 = max(num_list)\n num_list.remove(max_num_1)\n max_num_2 = max(num_list)\n ans = max_num_1*max_num_2\n return ans", "def get_max_sum4(a):\n return max(get_max_sum2(a), 0)", "def sort(numbers):\n # This will loop n times according to the size of the array starting at the\n # Last index and ending at 0\n for i in range(len(numbers) - 1, 0, -1):\n # Sets the biggest index to 0\n biggest_index = 0\n\n # This loops i times, so only unsorted indexes are looped through.\n for j in range(i):\n \n # Compares the next index value to the current value at biggest index\n if numbers[biggest_index] < numbers[j + 1]:\n\n # Sets a new biggest index\n biggest_index = (j + 1)\n\n # Swaps the current last index (i) with the biggest value's index\n numbers[i], numbers[biggest_index] = numbers[biggest_index], numbers[i]", "def max_pairwise_product(numbers):\n n = len(numbers)\n max_product = 0\n for first in range(n):\n for second in range(first + 1, n):\n max_product = max(max_product,\n numbers[first] * numbers[second])\n\n return max_product", "def reduce_array(\n array: np.ndarray,\n components: typing.Optional[typing.Collection[int]] = None,\n max_val: typing.Optional[int] = None,\n dtype=None,\n) -> np.ndarray:\n # this function minimal dtype is np.uint8 so there is no need to do calculation.\n if components is None:\n components = np.unique(array.flat)\n if max_val is None:\n max_val = np.max(components)\n\n if max_val is None:\n max_val = np.max(array)\n\n translate = np.zeros(max_val + 1, dtype=dtype or minimal_dtype(len(components) + 1))\n\n for i, val in enumerate(sorted(components), start=0 if 0 in components else 1):\n translate[val] = i\n\n return translate[array]", "def get_max_sum3(a):\n s = ms = a[0]\n n = len(a)\n mstart = 0\n mend = 0\n start = 0\n end = 0\n for i in range(1, n):\n if s + a[i] < a[i]:\n s = a[i]\n start = i\n end = i\n else:\n s = s + a[i]\n end = i\n if ms < s:\n ms = s\n mstart = start\n mend = end\n return mstart, mend", "def maximo(arr):\n maxVal = float('-inf')\n maxIdx = -1\n\n for i in range(len(arr)):\n if arr[i] > maxVal:\n maxVal = arr[i]\n maxIdx = i\n\n return maxVal, maxIdx", "def renumerate(arr):\n return zip(reversed(range(len(arr))), reversed(arr))", "def stooge_sort(arr):\r\n stooge(arr, 0, len(arr) - 1)", "def tred(t):\n\n t2 = t\n\n l = []\n l2 = t2.pop()\n l1 = t2.pop()\n\n for i in range(len(l1)):\n l.append(l1[i] + max(l2[i:i+2]))\n\n t2.append(l)\n return t2", "def mysort(arr):\n arr.sort(key=int)\n\n return arr", "def findSecondLargest(self):\n l = []\n self.flatten(l)\n print(l)\n print(l[-2])", "def array_maximal_adjacent_difference( arr ):\n length = len(arr) - 1\n diffs = [ abs( arr[i] - arr[i+1] ) for i in range( length ) ]\n return max(diffs)", "def maxElem(A):\n n = len(A)\n AMax = 0.0\n for i in range(n):\n for j in range(i+1,n):\n if abs(A[i,j]) >= AMax:\n AMax = abs(A[i,j])\n k = i;l = j\n return AMax, k, l", "def largest(array, n):\n\n #set max as first array element\n max = array[0]\n\n #compare current max with next array element, replace max if next element is larger\n\n for i in range(1, n):\n if array[i] > max:\n max = array[i]\n return max", "def checkio(numbers_array):\n \n return sorted(numbers_array, key=abs)", "def max(self):\n max = 0\n a = self.array_form\n for i in xrange(len(a)):\n if a[i] != i and a[i] > max:\n max = a[i]\n return max", "def bubble_naive(array):\n length = len(array)-1\n for _ in range(length):\n for i in range(length):\n if array[i] > array[i+1]:\n array[i],array[i+1] = array[i+1],array[i]", "def sort_012(a):\n # lo keeps track of the running index coming from the beginning of the list\n # hi keeps track of the running index coming from the end of the list\n # m1 and m2 keep track where the subarray of 1's is located \n # (keeps track of the first and last index of the 1's subarray)\n assert(type(a) == list), \"Array has to be a list\"\n lo, m1 = 0, 0\n hi, m2 = len(a)-1, len(a)-1\n runtime = 0\n while lo <= hi:\n runtime += 1\n if a[lo] == 0:\n if m1 < lo:\n a[m1] = 0\n a[lo] = 1\n m1 += 1\n lo += 1\n elif a[hi] == 2:\n if m2 > hi:\n a[m2] = 2\n a[hi] = 1\n m2 -= 1\n hi -= 1\n elif a[lo] == 1:\n lo += 1\n elif a[hi] == 1:\n hi -= 1\n elif a[lo] == 2 and a[hi] == 0:\n if lo == m1:\n a[lo] = 0\n else:\n a[m1] = 0\n a[lo] = 1\n lo += 1\n m1 += 1\n if hi == m2:\n a[hi] = 2\n else:\n a[m2] = 2\n a[hi] = 1\n m2 -= 1\n hi -= 1\n else:\n print(\"Warning: Logic problem\") \n return a, runtime", "def max(x, y):\n x[:] = np.maximum(x[:], y[:])\n return x", "def max_non_adjacent_sum(a):\n if not a:\n return 0\n if len(a) == 1:\n return a[0]\n return max(a[0] + max_non_adjacent_sum(a[2:]),\n a[1] + max_non_adjacent_sum(a[3:]))", "def argmax(self, values: pdarray) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"argmax\")\n return k, cast(pdarray, v)", "def merge_max_value(old, new):\n assert isinstance(old, list) and isinstance(new, list)\n if old != []:\n assert len(old) == len(new)\n for i in range(len(old)):\n assert type(old[i]) == type(new[i])\n if isinstance(old[i], list):\n new[i] = merge_max_value(old[i], new[i])\n else:\n new[i] = old[i] if new[i] < old[i] else new[i]\n return new", "def getMax(array_list):\n m = array_list[0]\n m_index = 0\n for i,value in enumerate(array_list):\n if value > m:\n m = value\n m_index = i\n return (m_index,m)", "def solution(A):\n # Why 2? Add 1 because the length of the given array is missing a number.\n # Add another 1 because the range function stops one before the max number.\n full_array = range(1, len(A) + 2)\n return sum(full_array) - sum(A)", "def maxProduct2(nums):\n\n maxSubseq = nums[0]\n minSubseq = nums[0]\n res = nums[0]\n for i in range(1, len(nums)):\n if nums[i] < 0:\n minSubseq, maxSubseq = maxSubseq, minSubseq\n maxSubseq = max(nums[i], maxSubseq*nums[i])\n minSubseq = min(nums[i], minSubseq*nums[i])\n res = max(res, maxSubseq)\n return res", "def calc_max(data: list) -> float:\n acc = data[0]\n for n in data:\n if n > acc:\n acc = n\n return float(acc)", "def find_max(data):\n index = 0\n res = data[index]\n for i in range(1, len(data)):\n if data[i] > res:\n res = float(data[i])\n index = i\n else:\n break\n return res, index", "def min_sum(array):\n\n array.sort()\n\n x = y = 0\n for num in range(len(array)):\n if num % 2 != 0:\n x = x * 10 + array[num]\n else:\n y = y * 10 + array[num]\n\n print(f\"First number: {x}\")\n print(f\"Second number: {y}\")\n return x + y", "def maxSubArray(self, nums: List[int]) -> int:\n # O(n) solution\n # 我们定义函数 S(i) ,它的功能是计算以 0(包括 0)开始加到 i(包括 i)的值。\n # 那么 S(j) - S(i - 1) 就等于 从 i 开始(包括 i)加到 j(包括 j)的值\n # 我们进一步分析,实际上我们只需要遍历一次计算出所有的 S(i), 其中 i = 0,1,2....,n-1。\n # 然后我们再减去之前的 S(k),其中 k = 0,1,i - 1,中的最小值即可。 因此我们需要 用一个变量来维护这个最小值,还需要一个变量维护最大值。\n max_sum = nums[0]\n min_sum_from_start = curr_sum = 0\n for i in range(len(nums)):\n curr_sum = curr_sum + nums[i]\n if curr_sum - min_sum_from_start > max_sum:\n max_sum = curr_sum-min_sum_from_start\n if curr_sum < min_sum_from_start:\n min_sum_from_start = curr_sum\n return max_sum", "def splitArray(self, nums: List[int], m: int) -> int:\n low, high, res = max(nums), sum(nums), -1\n while low <= high:\n pivot=(low+high)//2\n if self.isValid(nums,m,pivot):\n res, high = pivot, pivot - 1\n else:\n low = pivot + 1\n return res", "def maximumProduct2(self, nums: List[int]) -> int:\n big_1 = big_2 = big_3 = -float(\"inf\")\n small_1 = small_2 = float(\"inf\")\n for n in nums:\n if n >= big_1:\n big_1, big_2, big_3 = n, big_1, big_2\n elif n >= big_2:\n big_2, big_3 = n, big_2\n elif n >= big_3:\n big_3 = n\n \n if n <= small_1:\n small_1, small_2 = n, small_1\n elif n <= small_2:\n small_2 = n\n \n return max(big_1 * big_2 * big_3, big_1 * small_1 * small_2)", "def maxBw(array, value):\n m = np.max(array)\n c = value\n width = len(array[0])\n height = len(array)\n new_array = np.array(np.zeros((height, width)))\n for row in range(height):\n for col in range(width):\n new_array[row,col] = (array[row,col]/float(m)) * c\n return new_array", "def robSingle_2(self, nums, start, end):\n # print((start, end))\n # print(nums[start: end + 1])\n curMax = 0\n preMax = 0\n for num in nums[start:end + 1]:\n preMax, curMax = curMax, max(curMax, preMax + num)\n # print(curMax)\n # print(\"####################################\")\n return curMax", "def compare_max(values, weights):\n return np.max(values.numpy())", "def radix_sort(arr):\n if len(arr) < 2:\n return arr\n\n for number in range(len(str(max(arr)))):\n # for the length of the biggest number\n buckets = [[] for i in range(10)]\n for item in arr:\n single_num = item % (10 ** (number + 1))\n \n index = single_num // (10 ** number)\n # print(single_num)\n # print(index)\n buckets[index].append(item)\n result = []\n for bucket in buckets:\n for item in bucket:\n result.append(item)\n \n return result", "def nextPermutation(self, nums: List[int]) -> None:\n '''\n 思路:下一个最大的数字,这个思想其实总是不明确,今天总结一下\n 原数组\n 1 2 7 4 3 1\n 下一个排列是\n 1 3 1 2 4 7\n 如何得到的?\n 观察原数组可以看出,如果从末尾往前看,数字逐渐变大,到了2时才减小,然后再从后往前找到第一个比2大的数字,3,然后交换2和3,再把此时3后面的数字转换一下即可\n\n '''\n\n # 交换元素\n def swap(i, j):\n print('%d %d' % (i, j))\n temp = nums[i]\n nums[i] = nums[j]\n nums[j] = temp\n\n # 反转列表\n def reverse(start):\n\n end = len(nums) - 1\n while start < end:\n swap(start, end)\n start += 1\n end -= 1\n\n i = len(nums) - 2\n while i >= 0 and nums[i + 1] <= nums[i]:\n i -= 1\n print(i)\n if i >= 0:\n j = len(nums) - 1\n while j >= 0 and nums[j] <= nums[i]:\n j -= 1\n if j >= 0:\n swap(i, j)\n reverse(i + 1)", "def get_maximum_value(dataset):\n d = [int(i) for i in dataset if i.isdigit()]\n op = [o for o in dataset if o in ['*', '-', '+']]\n n = len(d)\n d.insert(0, None)\n op.insert(0, None)\n m = [[0 for x in range(n+1)] for y in range(n+1)]\n M = [[0 for x in range(n+1)] for y in range(n+1)]\n for i in range(1, n+1):\n m[i][i] = d[i]\n M[i][i] = d[i]\n for s in range(1, n):\n for i in range(1, n-s+1):\n j = i + s\n m[i][j], M[i][j] = min_and_max(i, j, op, m, M)\n return M[1][n]", "def test_swap_complete(self):\n # Get random data\n data = [random.random() for _ in range(500)]\n nr_groups = random.randint(2, 15)\n # Figure out which group is the maximal-sum group\n groups = rpack.group(data, nr_groups)\n group_sums = [sum(g) for g in groups]\n max_duration = max(group_sums)\n group_id_max = group_sums.index(max_duration)\n max_group = groups[group_id_max]\n\n # For each element in max_group, try to swap element with other\n # elements in the other groups and check if the \"max-group\" can\n # be improved.\n for i in range(len(max_group)):\n for group_id, group in enumerate(groups):\n if group_id == group_id_max:\n # We don't want to swap within the same group\n continue\n for j in range(len(group)):\n # Swap\n group[j], max_group[i] = max_group[i], group[j]\n # Swap should not make any improvements\n group_sums = [sum(g) for g in groups]\n self.assertGreaterEqual(max(group_sums), max_duration)\n # Undo swap\n group[j], max_group[i] = max_group[i], group[j]", "def largestNumber(self, nums): \n def string_comp(item1, item2):\n return 1 if str(item1) + str(item2) < str(item2) + str(item1) else -1\n res_list = sorted(nums, key=cmp_to_key(string_comp))\n\n # Catch edge case where list of 0s will produce \"000..\" instead of a single \"0\"\n if set(res_list) == {0}:\n return \"0\"\n return \"\".join([str(i) for i in res_list])", "def maximumProduct1(self, nums: List[int]) -> int:\n s_nums = sorted(nums, reverse=True)\n return max(s_nums[0] * s_nums[1] * s_nums[2], s_nums[0] * s_nums[-1] * s_nums[-2])", "def bubble_sort_smart(array: list):\n size = len(array)\n\n for i in range(size):\n for j in range(size - i - 1):\n if array[j] > array[j + 1]:\n aux = array[j]\n array[j] = array[j + 1]\n array[j + 1] = aux\n #array[j], array[j + 1] = array[j + 1], array[j]", "def largestSquare(arr):\n c = arr # Cache for storing computations\n for i in c:\n print(i)\n # Won't touch arr[0] or arr[n][0] because they can't be bottom right corners\n for row in range(1, len(arr)):\n for col in range(1, len(arr[0])):\n if arr[row][col] > 0: # 0s can't make squares\n c[row][col] = min(c[row-1][col-1], c[row][col-1], c[row-1][col]) + arr[row][col]\n # Minimum of surrounding squares + current square = maximum size square\n print(\"-\" *20)\n for i in c:\n print(i)\n return max([max(i) for i in c])", "def find_max_val_unimodal_arr(unimodal_arr):\n arr = unimodal_arr\n maxfound = False\n if (len(arr) == 0):\n print('empty list')\n return -1\n\n center = math.floor(len(arr)/2)\n left = (math.floor(len(arr)/2)-1) if (math.floor(len(arr)/2)-1) >= 0 else 0\n right = (math.floor(len(arr)/2)+1) if (math.floor(len(arr)/2)+1) <= (len(arr)-1) else (len(arr)-1)\n\n if (len(arr) == 1):\n print('maximum value = ' + str(arr[center]))\n return arr[center]\n\n if (len(arr) == 2):\n print('maximum value = ' + str(arr[left] if arr[left] > arr[right] else arr[right]))\n return arr[left] if arr[left] > arr[right] else arr[right]\n\n while (not maxfound):\n if (arr[left] > arr[center]):\n arr = arr[:center]\n center = math.floor(len(arr)/2)\n left = (math.floor(len(arr)/2)-1) if (math.floor(len(arr)/2)-1) >= 0 else 0\n right = (math.floor(len(arr)/2)+1) if (math.floor(len(arr)/2)+1) <= (len(arr)-1) else (len(arr)-1)\n if (arr[right] > arr[center]):\n arr = arr[center:]\n center = math.floor(len(arr)/2)\n left = (math.floor(len(arr)/2)-1) if (math.floor(len(arr)/2)-1) >= 0 else 0\n right = (math.floor(len(arr)/2)+1) if (math.floor(len(arr)/2)+1) <= (len(arr)-1) else (len(arr)-1)\n if ((arr[right] <= arr[center]) and (arr[left] <= arr[center])):\n maxfound = True\n\n print('maximum value = ' + str(arr[center]))\n return arr[center]", "def bubble_final_position(array):\n swap_point = len(array)\n while swap_point:\n new_swap = 0\n for i in range(1, swap_point):\n if array[i-1] > array[i]:\n array[i-1], array[i] = array[i], array[i-1]\n new_swap = i\n swap_point = new_swap", "def second_largest(number_list):\n for i in range(len(number_list)):\n for j in range(len(number_list) - 1 - i):\n if number_list[j] > number_list[j+1]:\n number_list[j + 1], number_list[j] = number_list[j], number_list[j+1]\n\n return number_list[-2]", "def maximumSubarrayBF(self, array, low, high):\n max_sum = float(\"-inf\")\n for i in range(len(array)):\n temp_sum = 0\n for j in array[i:]:\n temp_sum += j\n if temp_sum > max_sum:\n max_sum = temp_sum\n left_index = i\n right_index = array.index(j)\n return (left_index, right_index, max_sum)", "def max_subarray(sequence=[-5, 20, -10, 30, 15]):\n\n sums = {}\n indices = []\n\n for i in range(len(sequence)):\n for j in range(i+1, len(sequence)):\n sub_seq = sequence[i:j+1]\n sub_seq_sum = sum(sub_seq)\n #print(sub_seq,'=>',sub_seq_sum)\n sums[sum(sub_seq)]=[i,j+1]\n\n i_indice = sums[max(sums)][0]\n j_indice = sums[max(sums)][1]\n return (max(sums), sequence[i_indice:j_indice])", "def GetMax(val, maximum):\n\tval = float(val)\n\tmaximum = float(maximum)\n\treturn max([val, maximum])", "def maxNumber(x):\n maxVal = x[0]\n for num in x:\n if maxVal <num:\n maxVal=num\n return maxVal", "def robSingle(self, nums, start, end):\n # print((start, end))\n # print(nums[start: end])\n curMax = 0\n preMax = 0\n for num in nums[start:end]:\n preMax, curMax = curMax, max(curMax, preMax + num)\n # print(curMax)\n # print(\"####################################\")\n return curMax", "def find_largest_element(num_1, num_2, num_3):\n\n return max([num_1, num_2, num_3])", "def arrayMaxConsecutiveSum(inputArray, k):\n if k == 1:\n return max(inputArray)\n \n sub = inputArray[0:k]\n largest = sum(sub)\n result = largest\n \n for val in inputArray[k:]:\n largest -= sub[0]\n sub.remove(sub[0])\n sub.append(val)\n largest += val\n \n if largest > result:\n result = largest\n\n return result", "def largest_sum(data: Iterator[str]) -> int:\n numbers = parse_input(data)\n return max(n.magnitude for n in possible_sums(numbers))", "def max_(lst: Iterable[int]) -> int:\n return reduce(lambda x, y: x if x > y else y, lst)", "def count_sort(arr: StaticArray) -> StaticArray:\n # finds the maximum element\n maximum = arr[0]\n for index in range(arr.size()):\n if abs(arr[index]) > maximum:\n maximum = abs(arr[index])\n\n # creates max+1 arrays for positives and negatives\n maximum += 1\n count_pos = StaticArray(maximum)\n count_neg = StaticArray(maximum)\n\n # records the number of iterations of an array element\n # by setting the corresponding index position of the count array to the number of iterations\n for index in range(arr.size()):\n current = arr[index]\n\n # positive numbers\n if current > 0:\n if count_pos[current] is None:\n count_pos.set(current, 1)\n else:\n count_pos[current] += 1\n\n # zero\n elif current == 0:\n if count_pos[0] is None:\n count_pos[0] = 1\n else:\n count_pos[0] += 1\n\n # negative numbers\n else:\n if count_neg[abs(current)] is None:\n count_neg.set(abs(current), 1)\n else:\n count_neg[abs(current)] += 1\n\n # sums non-empty spaces and sets empty spaces equal to zero\n length = 0\n # iterate through positive array\n for index in range(count_pos.size()):\n if count_pos[index] is None:\n count_pos[index] = 0\n else:\n length += count_pos[index]\n\n # iterate through negative array\n for index in range(count_neg.size()):\n if count_neg[index] is None:\n count_neg[index] = 0\n else:\n length += count_neg[index]\n\n # create array for the results\n result_array = StaticArray(length)\n\n # adds elements in positive array to results array from largest to smallest\n result_array_index = 0\n last = count_pos.size() - 1\n for index in range(count_pos.size()):\n while count_pos[last] > 0:\n result_array.set(result_array_index, last)\n result_array_index += 1\n count_pos[last] -= 1\n last -= 1\n\n # adds elements in negative array to results array from largest to smallest\n for index in range(count_neg.size()):\n while count_neg[index] > 0:\n result_array.set(result_array_index, -index)\n result_array_index += 1\n count_neg[index] -= 1\n\n return result_array", "def bubble_sort(input_array):\n for i in range(0, len(input_array)):\n for j in range(0,len(input_array) - i - 1):\n if input_array[j] > input_array[j+1]:\n tmp = input_array[j]\n input_array[j] = input_array[j+1]\n input_array[j+1] = tmp", "def highest_product_3(arr):\n # sort in place (this will take O(n), at least)\n arr.sort()\n\n # get the maximum positive solution (this only works if all three > 0)\n max_product = arr[-1] * arr[-2] * arr[-3]\n\n # check for better solutions involving negatives\n # the only solution involving negatives will have exactly two of them\n # check the two options manually and return the largest one. \n if arr[0] < 0 and arr[1] < 0:\n if arr[0] * arr[1] * max(arr[-1], arr[-2], arr[-3]) > max_product:\n max_product = arr[0] * arr[1] * max(arr[-1], arr[-2], arr[-3])\n\n return max_product", "def getminmax_pair_compare(arr):\n if len(arr) == 0:\n return None, None\n if len(arr) == 1:\n return arr[0], arr[0]\n\n min_num = None\n max_num = None\n index = 0\n if len(arr) % 2 == 0:\n if arr[0] > arr[1]:\n max_num = arr[0]\n min_num = arr[1]\n else:\n max_num = arr[1]\n min_num = arr[0]\n index = 2\n else:\n max_num = arr[0]\n min_num = arr[0]\n index = 1\n while index < len(arr) - 1:\n if arr[index] > arr[index + 1]:\n max_num = max(arr[index], max_num)\n min_num = min(arr[index + 1], min_num)\n else:\n max_num = max(arr[index + 1], max_num)\n min_num = min(arr[index], min_num)\n index += 2\n\n return min_num, max_num", "def largest_number_at_least_twice_of_others2(nums: [int]) -> int:\n if len(nums) == 1:\n return 0\n\n max_index = nums.index(max(nums))\n max_val = nums.pop(max_index)\n next_max = max(nums)\n\n if next_max * 2 <= max_val:\n return max_index\n return -1", "def max_rel_change(arr, neg=True):\n if neg:\n arr = arr - np.min(arr) + 1\n\n logret = np.diff(np.log10(arr))\n return np.max(logret) - np.min(logret)", "def r_max(nxs):\n largest = None\n for i,e in enumerate(nxs):\n if type(e) == type([]):\n val = r_max(e)\n else:\n val = e\n\n if i == 0 or val > largest:\n largest = val\n\n return largest", "def max_val(t):\n # Your code here\n\n def openItem(term):\n newList = []\n\n for item in term:\n if type(item) == int:\n newList.append(item)\n\n else:\n newList += openItem(item)\n\n return newList\n\n sortingList = openItem(t)\n\n maximum = sortingList[0]\n\n for item in sortingList:\n if maximum < item:\n maximum = item\n\n return maximum", "def getMinMax(self,arr):\n # not implemented for Template SED yet\n return arr[\"z\"], arr[\"z\"]", "def clean_double_values(self):\n trans_blue = self.blue_matrix().transpose()\n b_array = []\n for i in trans_blue:\n min_col = [i[0], i[1]]\n for j in trans_blue[0:]:\n if j[1] == min_col[1]:\n if j[0] < min_col[0]:\n min_col[0] = j[0]\n if min_col not in b_array:\n b_array.append(min_col)\n\n return sorted(b_array, key=lambda i: i[1])", "def list_max(numbers):\n maxnum = 0\n \n for num in numbers[0:]:\n if num > maxnum:\n maxnum = num\n return maxnum", "def generate_single_sorted_array(nums1, nums2):\r\n nums3= nums1+nums2\r\n # sort and add to resultant \r\n # add array and sort \r\n temp = 0\r\n print(\"nums3 at start %s :\" %nums3)\r\n for x in range(len(nums3)): \r\n y=x+1\r\n for y in range(y,len(nums3)): \r\n if nums3[x]> nums3[y]:\r\n print(\"nums3 before %s :\" %nums3)\r\n temp = nums3[x]\r\n nums3[x]= nums3[y]\r\n nums3[y]= temp\r\n print(\"nums3 after %s :\" %nums3)\r\n return(nums3)", "def max(input: list[int]) -> int:\n if len(input) == 0:\n raise ValueError(\"max() arg is an empty List\")\n else:\n input.sort()\n return input[-1]", "def solve_inplace(array1, array2):\n # append all b values to end of a and then sort\n pass", "def argmax(self, values):\n return self.aggregate(values, \"argmax\")", "def r_max(nxs):\n largest = None\n first_time = True\n for e in nxs:\n if type(e) == type([]):\n val = r_max(e)\n else:\n val = e\n\n if first_time or val > largest:\n largest = val\n first_time = False\n\n return largest", "def maxi(a, b):\n return max(a, b)" ]
[ "0.714988", "0.6825663", "0.65861666", "0.65528435", "0.6125746", "0.61184865", "0.611382", "0.60502493", "0.6019517", "0.5999914", "0.5950047", "0.5939359", "0.58605045", "0.5847725", "0.5821236", "0.58049685", "0.57983345", "0.576433", "0.57470614", "0.574322", "0.5742154", "0.5739261", "0.5696573", "0.5674384", "0.5671753", "0.5668747", "0.5667212", "0.5666495", "0.56568694", "0.5631188", "0.5627928", "0.5569365", "0.5551073", "0.55494833", "0.5544183", "0.5540818", "0.5539261", "0.5529703", "0.5527336", "0.55258036", "0.551618", "0.5513488", "0.5507579", "0.5502105", "0.5484819", "0.54567415", "0.5455871", "0.5450032", "0.5447331", "0.5443375", "0.54350215", "0.5427125", "0.54244554", "0.54169387", "0.5413216", "0.5403237", "0.5394733", "0.53857523", "0.5383123", "0.5378546", "0.5377179", "0.53766966", "0.53765935", "0.5375845", "0.53640103", "0.5353339", "0.5344067", "0.5325072", "0.5321031", "0.5318967", "0.5315995", "0.53122246", "0.5312086", "0.5312013", "0.5303348", "0.52881837", "0.5278881", "0.52718055", "0.5268815", "0.52612025", "0.52610517", "0.52568865", "0.52507395", "0.5249043", "0.52404654", "0.5231621", "0.52266604", "0.5222809", "0.52162415", "0.52157736", "0.51996005", "0.51903963", "0.5188429", "0.5186068", "0.51840234", "0.5179639", "0.5173798", "0.517125", "0.5170577", "0.5169942" ]
0.56220305
31
This method is used to terminate a job with the specified or a group of jobs job_id or job_name in a given cluster
def delete(cls, cluster, job, group=None): try: if group is not None: # get the job ids from the db arguments = {'cluster': cluster, 'group': group} db_jobs = cls.cm.find('batchjob', **arguments) list1 = [] for i in db_jobs: list1.append(db_jobs[i]['job_id']) # read active jobs active_jobs = json.loads(cls.queue(cluster)) list2 = [] for i in active_jobs: list2.append(active_jobs[i]['jobid']) # find intersection res = set(list1).intersection(set(list2)) if res is not None: for j in res: cmd = 'scancel {}'.format(str(j)) Shell.ssh(cluster, cmd) print("Deleted {}".format(j)) return "All jobs for group {} killed successfully".format(group) else: args = 'scancel ' if job.isdigit(): args += job else: args += "-n {}".format(job) Shell.ssh(cluster, args) return "Job {} killed successfully".format(job) except Exception as ex: print("in exceptio") print(ex) return ex
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def terminate(ctx):\n ctl = ctx.ctl\n jobs = ctl('list-avail', '--partition', 'main', flatten=False)\n\n for job in jobs:\n jobid = job['id']\n click.echo('Terminating {}'.format(jobid))\n ctl('terminate', '--jobid', jobid)", "def kill_job(self, job):\n\n if job.status == Job.STATUS_QUEUED:\n # case 1: job is in QUEUED state\n # remove it from the queue and mark as killed\n\n job_queue = job_queue_name(job.model)\n logger.info(\n \"killing job {} by removing from queue {}\".\n format(job.uuid, job_queue))\n\n command_dict = {'command': 'PROCESS_JOB', 'job_uuid': job.uuid}\n remove_command(redis_connection(), job_queue, command_dict)\n job.status = Job.STATUS_KILLED\n # save it\n Job[job.uuid] = job\n elif job.status == Job.STATUS_RUNNING:\n # case 2: job is in RUNNING state\n # send message to worker to kill the job\n worker = worker_name(job.worker_url, job.model)\n worker_channel = node_channel_name(worker)\n logger.info(\"sending command to kill job on channel {}\".\n format(worker_channel))\n command_dict = {'command': \"KILL_JOB\", 'job_uuid': job.uuid}\n publish_command(redis_connection(), worker_channel, command_dict)\n else:\n logger.info(\"kill called on job {} in incompatible state {}\".\n format(job.uuid, job.status))", "def kill_job(self , index):\n job = self.jobs.__getitem__( index )\n if job:\n job.kill()", "def terminate():\n with open (f\"{CLUSTER_FOLDER}/uuid\", \"r\") as f:\n uuid = f.read().strip()\n\n start_time = time.time() \n cluster = delete_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], uuid) \n if(not cluster):\n log(\"Failed to terminate cluster via API.\")\n exit(1)\n\n log(f\"Started termination of cluster '{cluster['id']}'. Waiting for cluster to be terminated...\")\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n while(TIMEOUT_SECONDS > (time.time()-start_time) and cluster['status']['state'] != 'TERMINATED' and not cluster['status']['failed']):\n time.sleep(5)\n cluster = get_cluster(os.environ[\"T2_URL\"], os.environ[\"T2_TOKEN\"], cluster['id'])\n\n if(cluster['status']['failed']):\n log(\"Cluster termination failed.\")\n exit(1)\n\n if(TIMEOUT_SECONDS <= (time.time()-start_time)):\n log(\"Timeout while launching cluster.\")\n exit(1)\n\n log(f\"Cluster '{cluster['id']}' is terminated.\")", "def delete_job(api_instance, job_name):\n api_response = api_instance.delete_namespaced_job(\n name=job_name,\n namespace=\"default\",\n body=client.V1DeleteOptions(\n propagation_policy=\"Foreground\", grace_period_seconds=5\n ),\n )\n logger.info(\"Job deleted with status='%s'\" % str(api_response.status))", "def stop(self) -> None:\n self._client.terminate_job(jobId = self.id, reason = self.STOP_REASON)", "def stop_job(self):\n # DELETE /jobs/{job_id}/results\n pass", "def cleanup(self):\n cluster = self.client and self.client.cluster\n\n if self.client:\n self.client.close()\n self.client = None\n\n if cluster:\n try:\n cluster.close(timeout=60.0)\n except RuntimeError as ex:\n ## For some reason, sometimes the cluster can't be closed due to some\n ## problem with 'bkill', which fails with an error that looks like the following.\n ## If that happens, try to re-run bkill one more time in the hopes of really\n ## killing the cluster and not leaving lingering workers running.\n ## (This issue has been observed on the Janelia cluster for both dask and spark clusters.)\n ##\n # RuntimeError: Command exited with non-zero exit code.\n # Exit code: 255\n # Command:\n # bkill 54421878 54421872 54421877\n # stdout:\n #\n # stderr:\n # Job <54421878>: Failed in an LSF library call: Slave LIM configuration is not ready yet\n # Job <54421872>: Failed in an LSF library call: Slave LIM configuration is not ready yet\n # Job <54421877>: Failed in an LSF library call: Slave LIM configuration is not ready yet\n m = re.search(r'bkill( \\d+)+', str(ex))\n if not m:\n raise\n\n logger.warning(\"Failed to kill cluster with bkill, trying one more time...\")\n time.sleep(2.0)\n result = subprocess.run(m.group(), shell=True)\n if result.returncode != 0:\n logger.error(\"Second attempt to kill the cluster failed!\")\n raise", "def stop_batch_job(self, name, error_on_stopped=False):\n if name not in self.batch_jobs:\n raise ValueError(\"job {} doesn't exists\".format(name))\n if name not in self.jobs:\n if error_on_stopped:\n raise ValueError(\"job {} doesn't exists\".format(name))\n return\n self.remove_job(name)\n _,args,kwargs,cleanup=self._batch_jobs_args.pop(name)\n if cleanup:\n cleanup(*args,**kwargs)", "def cli(ctx, job_id):\n return ctx.gi.jobs.cancel_job(job_id)", "def _delete_job(self, job):", "def job_stop(self, job_id):\n resp = self.backend.job_stop(job_id)\n\n self.refresh_jobs()", "def _kill_canceling(self, job):\n pidrecord = os.path.join(job.output_dir, \"jobpid\")\n if os.path.exists(pidrecord):\n with open(pidrecord, 'r') as f:\n pgid = int(f.read())\n self.logger.info(\"Signalling SIGTERM to process group: %d\", pgid)\n try:\n os.killpg(pgid, signal.SIGTERM)\n except OSError as e:\n self.logger.info(\"Unable to kill process group %d: %s\", pgid, e)\n os.unlink(pidrecord)", "def stop_labeling_job(LabelingJobName=None):\n pass", "def killJob(appName, jobId):\n jobs = db.getJobs(jobId=jobId)\n job = None if len(jobs) == 0 else jobs[0]\n\n if job == None:\n return returnError (\"Job ID, %s, does not exist\" % jobId, 404)\n\n logging.info (\"[FLASKWEB] Asked to KILL job #%s. Current Job status is %s\" % (jobId, job['status']))\n # Separate check to kill orphaned jobs in Db\n # TODO: Merge Job with experiments to post updates to correct table\n if job['status'] == 'RUNNING' or job['status'] == 'SUBMITTED':\n db.updateJob(jobId, status='KILLED')\n\n if int(jobId) in dispatcher.getActiveJobs():\n status = 'KILLED'\n logging.debug('[FLASKWEB] Job %s is active. Signaling to kill in mesos.' % jobId)\n dispatcher.cancelJob(int(jobId), driverDispatch)\n else:\n status = 'ORPHANED and CLEANED'\n logging.debug('[FLASKWEB] Job # %s is ORPHANED and does not exist in current state. Cleaning up.' % jobId)\n\n ts = db.getTS_est() #datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S')\n thisjob = dict(jobId=jobId, time=ts, url=dispatcher.getSandboxURL(jobId), status=status)\n if 'application/json' in request.headers['Accept']:\n return jsonify(thisjob)\n else:\n return render_template(\"last.html\", appName=appName, lastjob=thisjob)", "def terminate_worker_groups(cls, args, config):\n logging.debug(\"MOLNSWorkerGroup.terminate_worker_groups(args={0})\".format(args))\n worker_obj = cls._get_workerobj(args, config)\n if worker_obj is None: return\n # Check for any instances are assigned to this worker group\n instance_list = config.get_all_instances(worker_group_id=worker_obj.id)\n # Check if they are running or stopped (if so, resume them)\n inst_to_stop = []\n if len(instance_list) > 0:\n for i in instance_list:\n status = worker_obj.get_instance_status(i)\n if status == worker_obj.STATUS_RUNNING or status == worker_obj.STATUS_STOPPED:\n print \"Terminating worker at {0}\".format(i.ip_address)\n inst_to_stop.append(i)\n if len(inst_to_stop) > 0:\n worker_obj.terminate_instance(inst_to_stop)\n else:\n print \"No workers running in the worker group\"", "def stop_training_job(TrainingJobName=None):\n pass", "def cluster_stop(r):\n cluster_id = request_get(r, \"cluster_id\")\n if not cluster_id:\n logger.warning(\"No cluster_id is given\")\n return make_fail_response(\"No cluster_id is given\")\n if cluster_handler.stop(cluster_id):\n return jsonify(response_ok), CODE_OK\n\n return make_fail_response(\"cluster stop failed\")", "def delete_jobs(self):\n jobs = self.get_jobs(self.age)\n print('Jobs queued for delete: ', jobs)\n for job in jobs:\n try: \n body = k_client.V1DeleteOptions(propagation_policy='Background')\n self.kube_v1_batch_client.delete_namespaced_job(job, body=body, namespace=self.project)\n self.kube_client.delete_namespaced_persistent_volume_claim(job+\"-storage-claim\", self.project, {})\n print('Deleted job: ', job)\n except ApiException as e:\n print(\"Exception when calling BatchV1Api -> delete_namespaced_job: %s\\n\" % e)\n exit(1)", "def delete_job(self, job):\n subprocess.call(self.cli + [PlatformJenkinsJavaCLI.DELETE_JOB, job.name])", "def cancel(self):\n\n query = f\"scancel {self.jobid}\"\n if self.cluster:\n query = f\"scancel {self.jobid} --clusters={self.cluster}\"\n\n cmd = BuildTestCommand(query)\n cmd.execute()\n logger.debug(f\"Cancelling Job: {self.jobid} by running: {query}\")\n\n self.poll()\n self._state = \"CANCELLED\"", "def kill(self, job_id):\n if webtlsmdd.kill_job(job_id):\n x = ''\n x += '<center>'\n x += '<h3>Job %s has died ' % (job_id)\n x += 'or its associated pid has been manually killed.</h3>'\n x += '</center>'\n else:\n x = ''\n x += '<center>'\n x += '<h3>Error: Can not remove job %s.</h3>' % (job_id)\n x += '</center>'\n return x", "def cancel_job(self, job):\n try:\n self.jobs.remove(job)\n except ValueError:\n pass", "def terminate_job_run(\n self,\n ) -> Callable[\n [cloud_deploy.TerminateJobRunRequest], cloud_deploy.TerminateJobRunResponse\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"terminate_job_run\" not in self._stubs:\n self._stubs[\"terminate_job_run\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.deploy.v1.CloudDeploy/TerminateJobRun\",\n request_serializer=cloud_deploy.TerminateJobRunRequest.serialize,\n response_deserializer=cloud_deploy.TerminateJobRunResponse.deserialize,\n )\n return self._stubs[\"terminate_job_run\"]", "def cancel_vmware_protection_job(job_name):\n try:\n cohesity_client = _get_client()\n jobs = cohesity_client.protection_jobs.get_protection_jobs(\n is_deleted=False, names=job_name)\n if not jobs:\n return \"Job with name {} not available.\".format(job_name)\n for job in jobs:\n if job.name == job_name:\n job_id = job.id\n break\n if not job_id:\n return \"Job with name {} not available.\".format(job_name)\n\n # Get recent job run id and status.\n runs = cohesity_client.protection_runs.get_protection_runs(\n job_id=job_id)\n if not runs:\n return \"Job run details not available for job {}\".format(job_name)\n latest_run = runs[0]\n if latest_run.backup_run.status not in [\"kRunning\", \"kAccepted\"]:\n return \"No active job run available for job {}\".format(job_name)\n run_id = latest_run.backup_run.job_run_id\n body = CancelProtectionJobRunParam()\n body.job_run_id = run_id\n cohesity_client.protection_runs.create_cancel_protection_job_run(\n job_id, body)\n return \"Successfully cancelled the run for job {}\".format(job_name)\n except APIException as err:\n return \"Error while attempting to cancel the job {}, error : {}\".format(\n job_name, err)", "def delete(self):\n parser = reqparse.RequestParser()\n parser.add_argument(\"job_id\", type=str, location=\"form\")\n args = parser.parse_args()\n job_id = args[\"job_id\"]\n if job_id is None or job_id == \"\":\n return errors.all_errors(\n \"CLIENT_MISSING_PARAMETER\", \"job_id (str) parameter is required\"\n )\n\n get_job_info = get(\n config.Config.FLASK_ENDPOINT + \"/api/scheduler/job\",\n headers={\"X-SOCA-TOKEN\": config.Config.API_ROOT_KEY},\n params={\"job_id\": job_id},\n verify=False,\n ) # nosec\n\n if get_job_info.status_code != 200:\n return {\n \"success\": False,\n \"message\": \"Unable to retrieve this job. Job may have terminated\",\n }, 500\n else:\n job_info = get_job_info.json()[\"message\"]\n job_owner = job_info[\"Job_Owner\"].split(\"@\")[0]\n request_user = request.headers.get(\"X-SOCA-USER\")\n if request_user is None:\n return errors.all_errors(\"X-SOCA-USER_MISSING\")\n if request_user != job_owner:\n return errors.all_errors(\"CLIENT_NOT_OWNER\")\n try:\n qdel_command = config.Config.PBS_QDEL + \" \" + job_id\n try:\n delete_job = subprocess.check_output(shlex.split(qdel_command))\n return {\"success\": True, \"message\": \"Job deleted\"}\n except Exception as err:\n return {\n \"success\": False,\n \"message\": \"Unable to execute qdel command: \" + str(err),\n }, 500\n\n except Exception as err:\n return {\"success\": False, \"message\": \"Unknown error: \" + str(err)}, 500", "def killJob(job_id):\n \n # mark all of the Ready tasks as Killed\n with transaction() as t:\n t.cur.execute(\"\"\"update Hydra_rendertask set status = 'K' \n where job_id = '%d' and status = 'R'\"\"\" % job_id)\n \n # get hostnames for tasks that were already started\n tuples = None # @UnusedVariable\n with transaction() as t:\n t.cur.execute(\"\"\"select host from Hydra_rendertask \n where job_id = '%d' and status = 'S'\"\"\" % job_id)\n tuples = t.cur.fetchall()\n \n # make flat list out of single-element tuples fetched from db\n hosts = [t for (t,) in tuples]\n \n # send a kill request to each host, note if any failures occurred\n error = False\n for host in hosts:\n try:\n error = error or not sendKillQuestion(host)\n except socketerror:\n logger.debug(\"There was a problem communicating with {:s}\"\n .format(host))\n error = True\n \n return error", "def terminate_jobflow(self, jobflow_id):\r\n self.terminate_jobflows([jobflow_id])", "def delete(\n address: Optional[str],\n job_id: str,\n headers: Optional[str],\n verify: Union[bool, str],\n):\n client = _get_sdk_client(address, headers=headers, verify=verify)\n client.delete_job(job_id)\n cli_logger.print(f\"Job '{job_id}' deleted successfully\")", "def cancel(self):\n if not self.parent_node.is_job:\n return\n\n # First perform clean operation\n self.clean()\n\n self.winstance.send_event('Cancelling job..')\n result = self.winstance.execute_operation('hpc.interfaces.'\n 'lifecycle.cancel',\n kwargs={\"name\": self.name})\n self.winstance.send_event('.. job canceled')\n result.task.wait_for_terminated()\n\n self._status = 'CANCELLED'", "def on_kill(self):\n if self.job_id:\n self.log.info(\"on_kill: cancel the airbyte Job %s\", self.job_id)\n self.hook.cancel_job(self.job_id)", "def __clear_jobs(self):\n namespace = self._config.cluster_config.namespace\n self.__logger.info(f'Clearing old jobs in current namespace: {namespace}')\n\n for job in self.__client.get(namespace=self._config.cluster_config.namespace)['items']:\n job_name = job['metadata']['name']\n self.__logger.info(f'Deleting: {job_name}')\n try:\n self.__client.custom_api.delete_namespaced_custom_object(\n PYTORCHJOB_GROUP,\n PYTORCHJOB_VERSION,\n namespace,\n PYTORCHJOB_PLURAL,\n job_name)\n except Exception as e:\n self.__logger.warning(f'Could not delete: {job_name}')\n print(e)", "def terminateCluster():\n try:\n # delete cluster\n redshift.delete_cluster(\n ClusterIdentifier=DWH_CLUSTER_IDENTIFIER,\n SkipFinalClusterSnapshot=True\n )\n\n # clear up role\n iam.detach_role_policy(\n RoleName=DWH_IAM_ROLE_NAME,\n PolicyArn=\"arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess\"\n )\n iam.delete_role(RoleName=DWH_IAM_ROLE_NAME)\n except Exception as e:\n print(e)", "def cancel_job(job_id: str, connection: Optional['Redis'] = None, serializer=None, enqueue_dependents: bool = False):\n Job.fetch(job_id, connection=connection, serializer=serializer).cancel(enqueue_dependents=enqueue_dependents)", "def stop(\n address: Optional[str],\n no_wait: bool,\n job_id: str,\n headers: Optional[str],\n verify: Union[bool, str],\n):\n client = _get_sdk_client(address, headers=headers, verify=verify)\n cli_logger.print(f\"Attempting to stop job '{job_id}'\")\n client.stop_job(job_id)\n\n if no_wait:\n return\n else:\n cli_logger.print(\n f\"Waiting for job '{job_id}' to exit \" f\"(disable with --no-wait):\"\n )\n\n while True:\n status = client.get_job_status(job_id)\n if status in {JobStatus.STOPPED, JobStatus.SUCCEEDED, JobStatus.FAILED}:\n _log_job_status(client, job_id)\n break\n else:\n cli_logger.print(f\"Job has not exited yet. Status: {status}\")\n time.sleep(1)", "def delete_cluster(ctx, project_name, cluster_name):\n project = ctx.obj.groups.byName[project_name].get().data\n ctx.obj.groups[project.id].clusters[cluster_name].delete().data\n click.echo(\"DONE!\")", "def kill(self):\n return self._raw_execute(\"cancel\", {\"job_id\": self.job_id})", "def delete_job(self, jobid=None, squash=None):\n\n self.check_all_jobs()\n\n if jobid is None:\n if hasattr(self, 'current_job'):\n jobid = self.current_job\n\n if jobid:\n if hasattr(self, 'current_job'):\n if jobid == self.current_job:\n del self.current_job\n\n if self.job_dict[jobid] in ['COMPLETED', 'ERROR',\n 'ABORTED', 'PENDING']:\n result = self.session.delete(\n CosmoSim.QUERY_URL + \"/{}\".format(jobid),\n auth=(self.username, self.password), data={'follow': ''})\n\n else:\n warnings.warn(\"Can only delete a job with phase: \"\n \"'COMPLETED', 'ERROR', 'ABORTED', or 'PENDING'.\")\n return\n\n if not result.ok:\n result.raise_for_status()\n if squash is None:\n warnings.warn('Deleted job: {}'.format(jobid))\n\n return result", "async def job_kill(self, uid):\n self._require_running()\n await self._get_job(uid).kill()", "def post(self, job_id):\n try:\n if job_id == \"all\":\n log.info(\"Attempting to stop all jobs.\")\n self.runner_service.stop_all()\n log.info(\"Stopped all jobs!\")\n self.set_status(200)\n elif job_id:\n log.info(\"Attempting to stop job: {}\".format(job_id))\n self.runner_service.stop(job_id)\n self.set_status(200)\n else:\n ArteriaUsageException(\"Unknown job to stop\")\n except ArteriaUsageException as e:\n log.warning(\"Failed stopping job: {}. Message: \".format(job_id, e.message))\n self.send_error(500, reason=e.message)", "def cleanup(api_instance=None):\n api = api_instance or get_api()\n r = api.list_job_for_all_namespaces()\n delete_opts = kubernetes.client.V1DeleteOptions(\n propagation_policy=\"Background\")\n for job in r.items:\n if job.status.succeeded == job.spec.completions:\n print(job.metadata.name, \"finished!\")\n api.delete_namespaced_job(\n job.metadata.name, 'default', body=delete_opts)", "def terminate(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.terminate()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")", "def cancel_job(self, job_number):\n raise NotImplementedError", "def DeleteJob(self, job_urn, token=None):\n aff4.FACTORY.Delete(job_urn, token=token)", "def killJobs(self, blTaskName, rng):\n return self._genericCommand('kill', blTaskName, rng)", "def deleteJob(self, jobId):\n params = {'id': jobId}\n try:\n return self.gc.delete(JobUtils.JOB_ID_PATH, parameters=params)\n except HttpError as e:\n if e.status == 400:\n print('Error. invalid job id:', jobId)\n return {}\n raise", "def remove_job(job_id):\n subprocess.check_call(['atrm', str(job_id)])\n return job_id", "def delete_job(self, job, context=None):\n return self._client.call_method(\n 'UserAndJobState.delete_job',\n [job], self._service_ver, context)", "async def request_job_stop(self, job_id: str, *args, **kwargs) -> bool:\n # TODO: implement\n raise NotImplementedError('{} function \"request_job_stop\" not implemented yet'.format(self.__class__.__name__))", "def test_delete_job(self):\n response = self.client.open(\n '/tx-queue/2/scheduler/job/{jobId}'.format(jobId=1),\n method='DELETE')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def terminate_cluster(cluster_name: str, max_retry: int = 3) -> None:\n retry_cnt = 0\n while True:\n try:\n usage_lib.messages.usage.set_internal()\n sky.down(cluster_name)\n return\n except ValueError:\n # The cluster is already down.\n return\n except Exception as e: # pylint: disable=broad-except\n retry_cnt += 1\n if retry_cnt >= max_retry:\n raise RuntimeError('Failed to terminate the spot cluster '\n f'{cluster_name}.') from e\n logger.error('Failed to terminate the spot cluster '\n f'{cluster_name}. Retrying.'\n f'Details: {common_utils.format_exception(e)}')\n logger.error(f' Traceback: {traceback.format_exc()}')", "def delete_cluster(cluster_id: str, sg_id: str = None):\n print(\"INFO: Deleting cluster %s\" % cluster_id)\n emr = get_emr_client()\n emr.terminate_job_flows(JobFlowIds=[cluster_id])\n print(\"INFO: Cluster deleted.\")\n\n print(\"INFO: Waiting before deleting SG. . .\")\n sleep(300)\n if sg_id is not None:\n delete_sg(sg_id)\n\n os.remove(META_FILE)\n os.remove(\"connection.bash\")", "def killBatchJobs(self, jobIDs):\n raise NotImplementedError('Abstract method: killBatchJobs')", "def delete(job_id):\n job = JobModel.get_one_job(job_id)\n if not job:\n return custom_response({'Error':'Job Not Found'}, 404)\n\n JobModel.query.filter(JobModel.job_id == job_id).delete()\n\n return custom_response({'Message': 'Deleted'}, 204)", "def stop_transform_job(TransformJobName=None):\n pass", "def cancelJob(self, jobId):\n params = {'id': jobId}\n try:\n return self.gc.put(JobUtils.JOB_CANCEL_PATH, parameters=params)\n except HttpError as e:\n if e.status == 400:\n print('Error. invalid job id:', jobId)\n return {}\n raise", "def stop_text_translation_job(JobId=None):\n pass", "def remove(self, job_or_id):\n job_id = job_or_id.id if isinstance(job_or_id, Job) else job_or_id\n self.connection.lrem(self.key, 0, job_id)\n return defer.succeed(job_or_id)", "def stop(verbose_level=1, hostnames=[], servicenames=[]):\n # type: (int, List[str], List[str]) -> Job\n check_arg(verbose_level, u._('Verbose level'), int)\n check_arg(hostnames, u._('Host names'), list,\n empty_ok=True, none_ok=True)\n check_arg(servicenames, u._('Service names'), list,\n empty_ok=True, none_ok=True)\n\n check_kolla_args(hostnames=hostnames,\n servicenames=servicenames)\n\n hostnames = safe_decode(hostnames)\n servicenames = safe_decode(servicenames)\n action = KollaAction(verbose_level=verbose_level,\n playbook_name='site.yml')\n ansible_job = action.stop(hostnames, servicenames)\n return Job(ansible_job)", "def stop_hyper_parameter_tuning_job(HyperParameterTuningJobName=None):\n pass", "def terminateJobs(self, ids):\n #WARNING: terminateJobs modifies the running queue, which\n # fillJobQueue assumes can't happen\n queues = [self.__queue, self.__clientQueue, self.__running, self.__clientRunning]\n with self.__queueLock:\n for _, queue in enumerate(queues):\n toRemove = []\n for job in queue:\n if job is not None and job.identifier in ids:\n # this assumes that each uniqueHandle only exists once in any queue anywhere\n ids.remove(job.identifier)\n toRemove.append(job)\n for job in toRemove:\n # for fixed-spot queues, need to replace job with None not remove\n if isinstance(queue,list):\n job.kill()\n queue[queue.index(job)] = None\n # for variable queues, can just remove the job\n else:\n queue.remove(job)\n self.raiseADebug(f'Terminated job \"{job.identifier}\" by request.')\n if len(ids):\n self.raiseADebug('Tried to remove some jobs but not found in any queues:',', '.join(ids))", "def __init__(self,\n name: str = None,\n k8s_name: str = None,\n job_name: str = None):\n\n k8s_name = k8s_name or job_name\n if not k8s_name:\n raise ValueError(\"You need to provide a k8s_name or a job_name.\")\n\n super().__init__(\n k8s_resource={\n \"apiVersion\": \"databricks.microsoft.com/v1alpha1\",\n \"kind\": \"Djob\",\n \"metadata\": {\n \"name\": k8s_name\n }\n },\n action=\"delete\",\n name=name)", "def delete(self):\n # delete the named cluster\n # don't wait for operation to finish\n print(\"+ Deleting cluster {} (async).\".format(self.name_hyphenated))\n util.syscall(\"gcloud container clusters delete {} --quiet --async\".\n format(self.name))\n self.started = False\n self.deleted = True", "def terminate(self, _):\n self.execution_manager.terminate()\n self.menu_structure['terminate'] = ('main', [('Continue submitting jobs', self.enable_submission)])\n self.__back_to_main()", "def stopJob(self):\n if len(self.__jobQueue) > 0:\n _JobThread.stopJobThreadInstance(\n self.caller, self.__jobQueue[0].stopRun)", "def delete_vmware_protection_job(job_name, delete_snapshots=True):\n try:\n cohesity_client = _get_client()\n jobs = cohesity_client.protection_jobs.get_protection_jobs(\n is_deleted=False, names=job_name)\n if not jobs:\n return \"Job with name {} not available.\".format(job_name)\n for job in jobs:\n if job.name == job_name:\n job_id = job.id\n break\n if not job_id:\n return \"Job with name {} not available.\".format(job_name)\n # Get recent job run id and status.\n body = DeleteProtectionJobParam()\n body.delete_snapshots = delete_snapshots\n cohesity_client.protection_jobs.delete_protection_job(job_id, body)\n return \"Successfully deleted job {}\".format(job_name)\n except APIException as err:\n return \"Error while attempting to delete the job {}, error : {}\".format(\n job_name, err)", "def cancel_job(self, job_id):\n self.send(JobCommands.CANCEL_JOB, CancelJobPayload(job_id))", "def _reply_remove_job(self):\n self.remove_job_socket.linger = 0\n self.remove_job_socket.setsockopt(zmq.RCVTIMEO, remote_constants.HEARTBEAT_RCVTIMEO_S * 1000)\n while self.worker_is_alive and self.master_is_alive:\n try:\n message = self.remove_job_socket.recv_multipart()\n tag = message[0]\n assert tag == remote_constants.KILLJOB_TAG\n to_remove_job_address = to_str(message[1])\n logger.info(\"[Worker] A job requests the worker to stop this job.\")\n self._remove_job(to_remove_job_address)\n self.remove_job_socket.send_multipart([remote_constants.NORMAL_TAG])\n except zmq.error.Again as e:\n #detect whether `self.worker_is_alive` is True periodically\n pass", "def _delete_job(self, job):\n with self.db_lock:\n return self.rcon.zrem(job)", "def delete_dlp_job(project, job_name):\n\n # Import the client library.\n import google.cloud.dlp\n\n # Instantiate a client.\n dlp = google.cloud.dlp.DlpServiceClient()\n\n # Convert the project id and job name into a full resource id.\n name = dlp.dlp_job_path(project, job_name)\n\n # Call the API to delete job.\n dlp.delete_dlp_job(name)\n\n print('Successfully deleted %s' % job_name)", "def delete(self, customerguid, jobguid=\"\", executionparams=None):", "def delete_coe_cluster(self, name_or_id):\n\n cluster = self.get_coe_cluster(name_or_id)\n\n if not cluster:\n self.log.debug(\n \"COE Cluster %(name_or_id)s does not exist\",\n {'name_or_id': name_or_id},\n exc_info=True,\n )\n return False\n\n self.container_infrastructure_management.delete_cluster(cluster)\n self.list_coe_clusters.invalidate(self)\n return True", "def kill_ipcluster(name=None):\n clean_up_cmds = [\n \"qselect -u $USER | xargs qdel\",\n \"rm -f *.hpc05.hpc* ipengine* ipcontroller* pbs_*\",\n \"pkill -f hpc05_culler\",\n \"pkill -f ipcluster\",\n \"pkill -f ipengine\",\n \"pkill -f ipyparallel.controller\",\n \"pkill -f ipyparallel.engines\",\n \"scancel --name='ipy-engine-' --user=$USER\", # SLURM\n \"scancel --name='ipy-controller-' --user=$USER\", # SLURM\n ]\n\n if name is not None:\n clean_up_cmds.append(f\"scancel --name='{name}' --user=$USER\")\n\n clean_up_cmds = [cmd + \" 2> /dev/null\" for cmd in clean_up_cmds]\n\n for cmd in clean_up_cmds:\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)\n process.wait()", "def delete(profile, cluster, task_id):\n client = boto3client.get(\"ecs\", profile)\n params = {}\n params[\"cluster\"] = cluster\n params[\"task\"] = task_id\n return client.stop_task(**params)", "def stop_slurm_jobs():\n for job_id in slurm_jobs_scheduled:\n logger.info(\"Canceling previously scheduled job %s...\", job_id)\n cancel_command = [\"scancel\", str(job_id)]\n print(\" \".join(shlex.quote(part) for part in cancel_command))\n run(cancel_command, check=False)", "def delete(self, job_id):\n # Only admin can delete any job\n if not current_user.is_admin():\n return get_message_json('删除任务需要管理员权限'), HTTPStatus.FORBIDDEN\n\n try:\n result = jobs.delete_job_by_id(job_id)\n if result == 1:\n return get_message_json('已删除该任务'), HTTPStatus.OK\n else:\n if jobs.find_job_by_id(job_id) is None:\n return get_message_json('任务不存在'), HTTPStatus.NOT_FOUND\n return get_message_json('未知的任务删除失败'), HTTPStatus.BAD_REQUEST\n except Exception as err:\n return handle_internal_error(str(err))", "def stop_job(self,\n ssh_client,\n name,\n job_options,\n is_singularity,\n logger,\n workdir=None):\n if not self._checkSshClient(ssh_client, logger):\n return False\n\n call = self._build_job_cancellation_call(name,\n job_options,\n logger)\n if call is None:\n return False\n\n return self._execute_shell_command(ssh_client,\n call,\n workdir=workdir)", "def terminate_controller(cls, args, config):\n logging.debug(\"MOLNSController.terminate_controller(args={0})\".format(args))\n controller_obj = cls._get_controllerobj(args, config)\n if controller_obj is None:\n return\n instance_list = config.get_all_instances(controller_id=controller_obj.id)\n logging.debug(\"\\tinstance_list={0}\".format([str(i) for i in instance_list]))\n print(\"\\tinstance_list={0}\".format([str(i) for i in instance_list]))\n # Check if they are running or stopped\n if len(instance_list) > 0:\n for i in instance_list:\n if i.worker_group_id is None:\n status = controller_obj.get_instance_status(i)\n if status == controller_obj.STATUS_RUNNING or status == controller_obj.STATUS_STOPPED:\n print \"Terminating controller running at {0}\".format(i.ip_address)\n controller_obj.terminate_instance(i)\n else:\n worker_name = config.get_object_by_id(i.worker_group_id, 'WorkerGroup').name\n worker_obj = cls._get_workerobj([worker_name], config)\n status = worker_obj.get_instance_status(i)\n if status == worker_obj.STATUS_RUNNING or status == worker_obj.STATUS_STOPPED:\n print \"Terminating worker '{1}' running at {0}\".format(i.ip_address, worker_name)\n worker_obj.terminate_instance(i)\n else:\n print \"No instance running for this controller\"", "def run(job=None, logger=None, **kwargs):\n environment = Environment.objects.get(id=ENV_ID)\n\n # Save cluster data on the resource so teardown works later\n create_required_parameters()\n resource = kwargs['resource']\n resource.create_gke_k8s_cluster_env = environment.id\n resource.create_gke_k8s_cluster_name = CLUSTER_NAME\n resource.name = CLUSTER_NAME\n resource.save()\n\n job.set_progress('Connecting to GKE...')\n builder = GKEClusterBuilder(environment, CLUSTER_NAME)\n\n job.set_progress('Sending request for new cluster {}...'.format(CLUSTER_NAME))\n builder.create_cluster(NODE_COUNT)\n\n job.set_progress('Waiting up to {} seconds for provisioning to complete.'\n .format(TIMEOUT))\n start = time.time()\n job.set_progress('Waiting for cluster IP address...')\n endpoint = builder.wait_for_endpoint(timeout=TIMEOUT)\n if not endpoint:\n return (\"FAILURE\",\n \"No IP address returned after {} seconds\".format(TIMEOUT),\n \"\")\n\n remaining_time = TIMEOUT - (time.time() - start)\n job.set_progress('Waiting for nodes to report hostnames...')\n nodes = builder.wait_for_nodes(NODE_COUNT, timeout=remaining_time)\n if len(nodes) < NODE_COUNT:\n return (\"FAILURE\",\n \"Nodes are not ready after {} seconds\".format(TIMEOUT),\n \"\")\n\n job.set_progress('Importing cluster...')\n cluster = builder.get_cluster()\n tech = ContainerOrchestratorTechnology.objects.get(name='Kubernetes')\n kubernetes = Kubernetes.objects.create(\n name=CLUSTER_NAME,\n ip=cluster['endpoint'],\n port=443,\n protocol='https',\n serviceaccount=cluster['masterAuth']['username'],\n servicepasswd=cluster['masterAuth']['password'],\n container_technology=tech,\n )\n resource.create_gke_k8s_cluster_id = kubernetes.id\n resource.save()\n url = 'https://{}{}'.format(\n PortalConfig.get_current_portal().domain,\n reverse('container_orchestrator_detail', args=[kubernetes.id])\n )\n job.set_progress(\"Cluster URL: {}\".format(url))\n\n job.set_progress('Importing nodes...')\n for node in nodes:\n # Generate libcloud UUID from GCE ID\n id_unicode = '{}:{}'.format(node['id'], 'gce')\n uuid = hashlib.sha1(id_unicode.encode('utf-8')).hexdigest()\n # Create a bbones server record. Other details like CPU and Mem Size\n # will be populated the next time the GCE handler is synced.\n Server.objects.create(\n hostname=node['name'],\n resource_handler_svr_id=uuid,\n environment=environment,\n resource_handler=environment.resource_handler,\n group=resource.group,\n owner=resource.owner,\n )\n\n job.set_progress('Waiting for cluster to report as running...')\n remaining_time = TIMEOUT - (time.time() - start)\n status = builder.wait_for_running_status(timeout=remaining_time)\n if status != 'RUNNING':\n return (\"FAILURE\",\n \"Status is {} after {} seconds (expected RUNNING)\".format(\n status, TIMEOUT),\n \"\")\n\n return (\"SUCCESS\",\n \"Cluster is ready and can be accessed at {}\".format(url),\n \"\")", "def stop_cluster_service(self, cluster_name, service_name):\n return self._post(endpoint=('{}/clusters/{}/services/{}/'\n 'commands/stop').format(self.api_version,\n cluster_name,\n service_name)).json()", "def fail_job( self, job_state ):\n self.stop_job( self.sa_session.query( self.app.model.Job ).get( job_state.job_wrapper.job_id ) )\n job_state.job_wrapper.fail( getattr( job_state, \"fail_message\", GENERIC_REMOTE_ERROR ) )", "def stop_compilation_job(CompilationJobName=None):\n pass", "def test_job_delete_completed_job(self):\n test_app = self._create_app()\n class_path = \"spark.jobserver.VeryShortDoubleJob\"\n job = self.client.jobs.create(test_app, class_path,\n ctx=self._get_functional_context())\n time.sleep(3)\n self._wait_till_job_is_done(job)\n self.assertRaises(exceptions.NotFoundException,\n self.client.jobs.delete, job.jobId)", "def finish_job(\n self, job_id, error_message=None, error_code=None, error=None, job_output=None\n ):\n\n if not job_id:\n raise ValueError(\"Please provide valid job_id\")\n\n job = self.get_mongo_util().get_job(job_id=job_id)\n self._test_job_permissions(job, job_id, JobPermissions.WRITE)\n self._check_job_is_running(job_id=job_id)\n\n if error_message:\n if error_code is None:\n error_code = ErrorCode.job_crashed.value\n self._finish_job_with_error(\n job_id=job_id,\n error_message=error_message,\n error_code=error_code,\n error=error,\n )\n elif job_output is None:\n if error_code is None:\n error_code = ErrorCode.job_missing_output.value\n msg = \"Missing job output required in order to successfully finish job. Something went wrong\"\n self._finish_job_with_error(\n job_id=job_id, error_message=msg, error_code=error_code\n )\n raise ValueError(msg)\n else:\n self._finish_job_with_success(job_id=job_id, job_output=job_output)", "def remove(self, job_id):\n self.background_scheduler.remove_job(job_id)", "async def kill(self):\n if self._state in (JobState.PENDING, JobState.RUNNING):\n await self._process.kill()\n else:\n raise JobInvalidStateError('job is not running')", "def stop(self: RemoteCluster, wait: bool = False, timeout: int = None) -> None:\n self.server.stop(wait=wait, timeout=timeout)\n self.clients.terminate()\n super().stop(wait=wait, timeout=timeout)", "def delete(self, jobs):\n assert isinstance(jobs, list), 'Jobs must be a list'\n assert len(jobs) > 0, 'One or more jobs required'\n\n req = list()\n if len(jobs) > 1:\n for r in self._batch_request(jobs):\n req.append(\n ''.join([self._scheduler_endpoint, '?', '&'.join(r)]))\n else:\n req = \"{}?job={}\".format(\n self._scheduler_endpoint, jobs[0])\n\n try:\n self._api_delete(req)\n except HTTPError as e:\n raise JobClientError(e.message)", "def remove(self, job_or_id):\n if isinstance(job_or_id, Job):\n job = job_or_id\n else:\n job = Job(connection=self.connection, id=job_or_id)\n\n try:\n job.refresh()\n job._delete()\n except NoSuchJobError:\n pass\n\n self._remove(job.id)", "def _remove(self, job_id):\n self.connection._lrem(self.key, 1, job_id)", "async def stop(self):\n self._job.cancel()\n await super().stop()", "def stop(self: AutoScalingCluster, wait: bool = False, timeout: int = None) -> None:\n self.server.stop(wait=wait, timeout=timeout)\n self.autoscaler.stop(wait=wait, timeout=timeout)\n super().stop(wait=wait, timeout=timeout)", "def delete_cluster(self, cluster, *args, **kwargs):\n raise NotImplementedError", "def kill(self):\n self._update()\n if self.running_mode == \"local\":\n for process in self.processes:\n try:\n process.kill()\n except psutil.NoSuchProcess:\n # The process has just terminated\n # In multiprocess run this is likely to happen when other processes stops.\n pass\n elif self.running_mode == \"grid\":\n subprocess.check_call(\"qdel %d\" % self.job[\"job_number\"], shell=True)\n pass\n else:\n logger.warning(\"Asked for termination of a Run not known to be running.\")", "def cancelJob(_id):\n job = mongo.db.jobs.find_one({'_id': _id})\n tasks = mongo.db.tasks.find({'job': _id})\n for each in tasks:\n _t = ca.AsyncResult(each.get('ctid'))\n _t.revoke()\n job['status'] = 'cancelled'\n \"\"\"Set status of job to cancelled\"\"\"\n mongo.db.jobs.update({'_id': _id}, job)\n \"\"\"Bulk update tasks\"\"\"\n bulk = mongo.db.tasks.initialize_unordered_bulk_op()\n bulk.find({'job': _id, 'status': {'$ne': 'completed'}}).update({\n '$set': {\n 'status': \"cancelled\",\n 'cancelled_on': now(),\n 'slave': None,\n }})\n bulk.execute()\n\n return {'info': 'success'}", "def _Delete(self):\n cmd = self.cmd_prefix + [\n 'redshift', 'delete-cluster-subnet-group',\n '--cluster-subnet-group-name', self.name\n ]\n vm_util.IssueCommand(cmd, raise_on_failure=False)", "def remove_job(data, job):\n for j in data.queue:\n if job.proc_id == j:\n del j\n return", "def kill(self, timeout : Optional[int] = 3) -> SuccessTuple:\n daemoniker, psutil = attempt_import('daemoniker', 'psutil')\n success, msg = self._send_signal(daemoniker.SIGTERM, timeout=timeout)\n if success:\n return success, msg\n process = self.process\n if process is None or not process.is_running():\n return True, \"Process has already stopped.\"\n try:\n p.terminate()\n p.kill()\n p.wait(timeout=10)\n except Exception as e:\n return False, f\"Failed to kill job {self} with exception: {e}\"\n return True, \"Success\"", "def killall(self):\n\n for job_id, job in self.jobs:\n backend.kill( job )", "async def test_cancel_alias(my_job):\n\n # Set up callback to get notifications when job state changes.\n job = None\n\n def on_job_update(_job):\n \"\"\"The callback to update `job`.\"\"\"\n nonlocal job\n job = _job\n # Asserts that job is either pending or canceled.\n assert job.state in ['PENDING', 'CANCELED'], (\n 'Job that canceled immediately after submission has wrong '\n f'state `{job.state}`!')\n\n my_job.set_on_update(on_job_update)\n\n # Submit a job.\n new_job = await my_job.job(mustfail=False)\n\n # Test `cancel` alias on the job function.\n my_job.job_orig.cancel(new_job.id)\n\n # Process ASGI messages but do not wait for jobs (no jobs started).\n await my_job.process_jobs()\n\n # Check a state of the job.\n assert job.state == 'CANCELED', ('Canceled job has wrong state '\n f'`{job.state}`!')" ]
[ "0.69394773", "0.6439822", "0.6429004", "0.63975894", "0.6377325", "0.6343466", "0.6278735", "0.62779915", "0.62628543", "0.61729574", "0.6161055", "0.61369205", "0.6120452", "0.61027503", "0.60934454", "0.60848886", "0.60653436", "0.6043757", "0.5999601", "0.5956319", "0.5952695", "0.5952206", "0.59408826", "0.5939996", "0.58549774", "0.5837356", "0.5833367", "0.5829566", "0.5801544", "0.57941866", "0.5745139", "0.57161176", "0.5715346", "0.56866854", "0.56840116", "0.56831264", "0.56825346", "0.56732327", "0.5661318", "0.565875", "0.56495976", "0.56451684", "0.5634541", "0.5625255", "0.5613167", "0.5601375", "0.55910283", "0.55697536", "0.5568836", "0.5530111", "0.5527181", "0.5516737", "0.5496493", "0.54771525", "0.5475098", "0.5464974", "0.5457817", "0.5457", "0.545622", "0.5449276", "0.5445781", "0.5443734", "0.54408187", "0.54340255", "0.5423789", "0.54152375", "0.54083806", "0.5403056", "0.54007465", "0.5390997", "0.53883797", "0.53777", "0.5348613", "0.53408927", "0.5334178", "0.53330576", "0.5310287", "0.5301039", "0.528787", "0.52815163", "0.5280017", "0.5279104", "0.5278045", "0.5273335", "0.5253131", "0.5249494", "0.52428395", "0.5232159", "0.52307117", "0.5229271", "0.522357", "0.52215457", "0.52156854", "0.52137876", "0.5208267", "0.51966035", "0.51935726", "0.5182009", "0.51786083", "0.517487" ]
0.7375492
0
Constructor to create instance of the Line object.
def __init__(self, p1=None, p2=None, tolerance=None, l=None): if l is None: p1_arr = np.array(p1, dtype=float) p2_arr = np.array(p2, dtype=float) delta = p2_arr - p1_arr norm2 = delta[0] ** 2 + delta[1] ** 2 + delta[2] ** 2 norm1 = np.math.sqrt(norm2) if norm2 == 0: raise Exception("Norm is zero!") # Line direction. self.direction = delta / norm1 # Line point closest to the origin. self.zero = p1_arr - np.dot(p1_arr, delta) / norm2 # Tolerance below which points are considered identical. self.tolerance = 1e-10 if tolerance is None else tolerance else: # Tolerance below which points are considered identical. self.tolerance = l.tolerance # Line direction. self.direction = l.direction # Line point closest to the origin. self.zero = l.zero
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, vertices, **kwargs):\n super(Line, self).__init__(vertices, **kwargs)\n self._geotype = \"Line\"\n return", "def __init__(self, start, end, oldLine = None):\n self.__start = start\n self.__end = end\n if(self.__start == self.__end):\n \"\"\"\n If a zero length line is created that most likely means there is a\n logic problem somewhere in the program. This does not throw and error\n so that the output can still be examined to help diagnose the problem.\n \"\"\"\n# raise Exception('Zero length line')\n logger.warning('A line was created with no length at: ' + \n str(self.start))\n \"\"\" The Point which is the upper left corner of the line's bounding box \"\"\"\n self.__upperLeft = None\n \"\"\" The Point of the lower right corner of the bounding box. \"\"\"\n self.__lowerRight = None\n self.__extrusionRate = 0\n self.freezeExRate = False\n if not(oldLine is None):\n self.__extrusionRate = oldLine.extrusionRate\n self.freezeExRate = oldLine.freezeExRate\n self.vector = np.array([self.end.x-self.start.x,\n self.end.y-self.start.y])", "def getLine(self, **kwargs):\n return Line(self.p1, self.angle, **kwargs)", "def createFromLine(cls, line, **kwargs):\n angle = line.angle\n x, y = cls.cartesian([1, angle])\n return cls(x, y, **kwargs)", "def __init__(self, line):\n self.start = LineAndColumn(line, 1)\n self.end = LineAndColumn(line, 0)", "def __init__(self, c, p1=Point(), p2 = Point()):\n Line.__init__(self, p1, p2)\n self.cnv = c", "def _init():\n line.set_data([], [])\n return line,", "def __init__(self, point1, point2):\n self.point1 = point1\n self.point2 = point2\n self.vertical = False\n self.fixed_x = None\n self.k = None\n self.b = None\n\n # cached angle props\n self.angle = None\n self.angle_cos = None\n self.angle_sin = None\n\n self.set_line_props(point1, point2)", "def _defLine(self):\n self._dline=GPath(points = [0,100,GAME_WIDTH,100], linewidth = 1.5,\n linecolor = 'cyan')", "def create_line(self, x1, y1, x2, y2, style=None, parent=None):\n attrs = {'d': 'M %5f %5f L %5f %5f' % (x1, y1, x2, y2)}\n return self.create_path(attrs, style, parent)", "def __init__(self, ping_time=None, data=None, color=[0.58, 0.0, 0.83],\n name='line', linestyle='solid', thickness=1.0, **_):\n super(line, self).__init__()\n\n # Set the ping time.\n self.ping_time = ping_time\n\n # Set the number of pings.\n if ping_time is not None:\n self.n_pings = ping_time.shape[0]\n\n # Assign data based on what we're given. Arrays must be the same shape\n # as ping_time, scalars are expanded to the same shape as ping_time\n # and None is None.\n if isinstance(data, np.ndarray):\n if data.ndim == 0:\n data = np.full(ping_time.shape[0], data, dtype='float32')\n else:\n if data.shape[0] != ping_time.shape[0]:\n raise ValueError(\"The data array must be None, a scalar \"\n \"or an array the same size as ping_time.\")\n elif data is not None:\n try:\n data = float(data)\n data = np.full(ping_time.shape[0], data, dtype='float32')\n except Exception:\n raise ValueError(\"The data array must be None, a scalar or an\"\n \" array the same size as ping_time.\")\n self.data = data\n\n # Set the initial attribute values.\n self.color = color\n self.name = name\n self.linestyle = linestyle\n self.thickness = thickness\n\n # Update out data_attributes list, adding the \"data\" attribute.\n self._data_attributes += ['data']", "def _createline(self):\n return self.cv.create_line(0, 0, 0, 0, fill=\"\", width=2,\n capstyle = TK.ROUND)", "def line(\n self, x: Hashable | None = None, y: Hashable | None = None, **kwargs\n ) -> PlotAccessor:\n return self(kind=\"line\", x=x, y=y, **kwargs)", "def __init__(self, polyhedron, data):\n super(Line, self).__init__(polyhedron, data)", "def createLineSegment(self):\n return _libsbml.Curve_createLineSegment(self)", "def __init__(self, scn_line_list):\n self.scn_line_list = scn_line_list", "def line(self, x1, y1, x2, y2, markers=(None, None), cls=None, style=None, attrs=None):\n if any(markers):\n attrs = dict(attrs or {})\n if markers[0]:\n attrs['marker-start'] = 'url(#%s)' % markers[0]\n if markers[1]:\n attrs['marker-end'] = 'url(#%s)' % markers[1]\n x1, y1, x2, y2 = self._meta.units(x1, y1, x2, y2)\n payload = self._meta.make_payload(cls, style, attrs)\n self.elements.append(\"\"\"\n <line x1=\"%s\" y1=\"%s\" x2=\"%s\" y2=\"%s\" %s/>\n \"\"\".strip() % (\n x1, y1, x2, y2, payload,\n ))\n return self", "def line(self, x1, y1, x2, y2, markers=(None, None), cls=None, style=None, attrs=None):\n if any(markers):\n attrs = dict(attrs or {})\n if markers[0]:\n attrs['marker-start'] = 'url(#%s)' % markers[0]\n if markers[1]:\n attrs['marker-end'] = 'url(#%s)' % markers[1]\n x1, y1, x2, y2 = self._meta.units(x1, y1, x2, y2)\n payload = self._meta.make_payload(cls, style, attrs)\n self.elements.append(\"\"\"\n <line x1=\"%s\" y1=\"%s\" x2=\"%s\" y2=\"%s\" %s/>\n \"\"\".strip() % (\n x1, y1, x2, y2, payload,\n ))\n return self", "def __init__(self, points):\n self.points = points\n self.lines = []\n\n orientation = 1\n for i, point in enumerate(self.points):\n try:\n if points[i+1].x > point.x:\n orientation = orientation\n else:\n orientation = - 1\n point.orientation = orientation\n self.points[i+1].orientation = orientation\n self.lines.append(Line(point, self.points[i+1]))\n except IndexError:\n point.orientation = orientation\n self.lines.append(Line(point, self.points[0]))", "def __init__(self, vertices=None, vector=None, color=None):\n\n if vector and isinstance(vector, Vector3):\n vertices = [Point3(0, 0, 0), Point3(vector)]\n\n if vertices and len(vertices) > 2:\n vertices = vertices[:2]\n LineString.__init__(self, vertices=vertices, color=color)", "def __init__(self, *args, **kwargs):\n # Store props that let us dynamically and incrementally modify\n # line locations and settings like with Cartesian axes\n self._boundinglat = None\n self._latmax = None\n self._latlines = None\n self._lonlines = None\n self._lonlines_values = None\n self._latlines_values = None\n self._lonlines_labels = None\n self._latlines_labels = None\n super().__init__(*args, **kwargs)", "def to_line(self):\n v = self.vertices + self.vertices[0]\n return Line(v, properties=self.properties, crs=self.crs)", "def __init__(self, *args):\n _itkLineSpatialObjectPointPython.itkLineSpatialObjectPoint2_swiginit(self,_itkLineSpatialObjectPointPython.new_itkLineSpatialObjectPoint2(*args))", "def __init__(self, coordinates): \n\t\tself.coordinates = coordinates\n\t\tself.start = coordinates[0]\n\t\tself.end = coordinates [1]\n\t\tself.line = LineString([tuple(self.start), tuple(self.end)])\n\t\tself.normal = -grad.grad_line(self.start, self.end)\n\t\tself.n_collisions = 0\n\t\tself.length = np.sqrt((self.start[0]-self.end[0])**2+(self.start[1]-self.end[1])**2)", "def __init__(self, lineLen):\n if lineLen < 10:\n raise ValueError('lineLen cannot be less than 10')\n self.len = lineLen\n self.currentSpace = lineLen\n self.beginningOfLine = True", "def __init__(self, lines, names):\n\n # from graphing import Graph\n\n self.lines = lines\n self.remaining_events = []\n\n leftmost = _MAX_RIGHT\n\n for i, (name, left, right) in enumerate(self.lines):\n self.lines[i] = (name, left-leftmost, right-leftmost)\n\n for i, (name, left, right) in enumerate(self.lines):\n self.remaining_events.append((left, i))\n self.remaining_events.append((right, i))\n\n self.remaining_events.sort()\n\n self.active_line_segments = []\n self.sweep_line = None\n\n self.is_done = False\n self.idx = 0\n self.a_line = None\n\n self.overlap_graph = nx.Graph(names)\n # self.interval_graph = nx.Graph(names)", "def __init__(self, line, lineno, syspaths):\n self._line = line\n self._lineno = lineno\n self._orig_line = None\n self._syspaths = syspaths", "def __init__(self, timestamp: int, line_number: int) -> None:\n Event.__init__(self, timestamp)\n self.line_number = line_number", "def __init__(self, timestamp: int, line_number: int) -> None:\n Event.__init__(self, timestamp)\n self.line_number = line_number", "def createLineSegment(self):\n return _libsbml.Layout_createLineSegment(self)", "def fromLines(cls, listOfLine: list):\n points = cls.edgeToVertex(listOfLine)\n return cls.fromPoints(points)", "def fromLines(cls, listOfLine: list):\n points = cls.edgeToVertex(listOfLine)\n return cls.fromPoints(points)", "def fromLines(cls, listOfLine: list):\n points = cls.edgeToVertex(listOfLine)\n return cls.fromPoints(points)", "def __init__(\n self,\n arg=None,\n autocolorscale=None,\n cauto=None,\n cmax=None,\n cmin=None,\n color=None,\n colorscale=None,\n colorsrc=None,\n dash=None,\n reversescale=None,\n showscale=None,\n width=None,\n **kwargs\n ):\n super(Line, self).__init__('line')\n\n # Validate arg\n # ------------\n if arg is None:\n arg = {}\n elif isinstance(arg, self.__class__):\n arg = arg.to_plotly_json()\n elif isinstance(arg, dict):\n arg = copy.copy(arg)\n else:\n raise ValueError(\n \"\"\"\\\nThe first argument to the plotly.graph_objs.scatter3d.Line \nconstructor must be a dict or \nan instance of plotly.graph_objs.scatter3d.Line\"\"\"\n )\n\n # Import validators\n # -----------------\n from plotly.validators.scatter3d import (line as v_line)\n\n # Initialize validators\n # ---------------------\n self._validators['autocolorscale'] = v_line.AutocolorscaleValidator()\n self._validators['cauto'] = v_line.CautoValidator()\n self._validators['cmax'] = v_line.CmaxValidator()\n self._validators['cmin'] = v_line.CminValidator()\n self._validators['color'] = v_line.ColorValidator()\n self._validators['colorscale'] = v_line.ColorscaleValidator()\n self._validators['colorsrc'] = v_line.ColorsrcValidator()\n self._validators['dash'] = v_line.DashValidator()\n self._validators['reversescale'] = v_line.ReversescaleValidator()\n self._validators['showscale'] = v_line.ShowscaleValidator()\n self._validators['width'] = v_line.WidthValidator()\n\n # Populate data dict with properties\n # ----------------------------------\n v = arg.pop('autocolorscale', None)\n self.autocolorscale = autocolorscale if autocolorscale is not None else v\n v = arg.pop('cauto', None)\n self.cauto = cauto if cauto is not None else v\n v = arg.pop('cmax', None)\n self.cmax = cmax if cmax is not None else v\n v = arg.pop('cmin', None)\n self.cmin = cmin if cmin is not None else v\n v = arg.pop('color', None)\n self.color = color if color is not None else v\n v = arg.pop('colorscale', None)\n self.colorscale = colorscale if colorscale is not None else v\n v = arg.pop('colorsrc', None)\n self.colorsrc = colorsrc if colorsrc is not None else v\n v = arg.pop('dash', None)\n self.dash = dash if dash is not None else v\n v = arg.pop('reversescale', None)\n self.reversescale = reversescale if reversescale is not None else v\n v = arg.pop('showscale', None)\n self.showscale = showscale if showscale is not None else v\n v = arg.pop('width', None)\n self.width = width if width is not None else v\n\n # Process unknown kwargs\n # ----------------------\n self._process_kwargs(**dict(arg, **kwargs))", "def from_line(cls, line):\n # Define slices\n RECORD = slice(0, 6)\n NATOM = slice(7, 12)\n ATOM = slice(13, 18)\n RES = slice(19, 22)\n CHAIN = slice(23, 24)\n NRES = slice(24, 29)\n X = slice(30, 40)\n Y = slice(40, 50)\n Z = slice(50, 60)\n TYPE = slice(61, 66)\n NBOND = slice(66, 69)\n NLP = slice(70, 71)\n CHARGE = slice(71, 80)\n FIXED = slice(81, 82)\n record = line[RECORD].strip()\n natom = int(line[NATOM])\n atom = line[ATOM].strip()\n res = line[RES].strip()\n chain = line[CHAIN].strip()\n nres = int(line[NRES])\n x = float(line[X])\n y = float(line[Y])\n z = float(line[Z])\n fftype = line[TYPE].strip()\n nbond = int(line[NBOND])\n nlonepair = int(line[NLP])\n charge = float(line[CHARGE])\n try:\n fixed = int(line[FIXED])\n except IndexError:\n fixed = 0\n return cls(record, natom, atom, res, chain, nres, x, y, z, fftype,\n nbond, nlonepair, charge, fixed)", "def __init__(self, value, line=None):\n\n\t\tLOGGER.debug(\"> Initializing '{0}()' class.\".format(self.__class__.__name__))\n\n\t\tAbstractParsingError.__init__(self, value)\n\n\t\t# --- Setting class attributes. ---\n\t\tself.__line = None\n\t\tself.line = line", "def from_line(cls, fieldset, pclass, start, finish, size, depth=None, time=None, repeatdt=None):\n\n lonlat_type = cls.lonlatdepth_dtype_from_field_interp_method(fieldset.U)\n lon = np.linspace(start[0], finish[0], size, dtype=lonlat_type)\n lat = np.linspace(start[1], finish[1], size, dtype=lonlat_type)\n if type(depth) in [int, float]:\n depth = [depth] * size\n return cls(fieldset=fieldset, pclass=pclass, lon=lon, lat=lat, depth=depth, time=time, repeatdt=repeatdt)", "def createLineSegment(self):\n return _libsbml.ReferenceGlyph_createLineSegment(self)", "def add_line(self, point1: Point, point2: Point, counts_as_step=True, interesting=False) -> Line:\n line = Line(point1=point1, point2=point2)\n self.add_step_premade(line, counts_as_step=counts_as_step, interesting=interesting)\n return line", "def createLineSegment(self):\n return _libsbml.GeneralGlyph_createLineSegment(self)", "def __init__(self, *args):\n _itkLineSpatialObjectPointPython.itkLineSpatialObjectPoint3_swiginit(self,_itkLineSpatialObjectPointPython.new_itkLineSpatialObjectPoint3(*args))", "def __init__(self, *args):\n _itkLineSpatialObjectPointPython.vectoritkLineSpatialObjectPoint2_swiginit(self,_itkLineSpatialObjectPointPython.new_vectoritkLineSpatialObjectPoint2(*args))", "def from_line(self, line: str):\n raise NotImplementedError()", "def __init__(self, origin, end, alpha=DEFAULT_ALPHA, fade_rate=DEFAULT_FADE_RATE, thickness=DEFAULT_THICKNESS, color=DEFAULT_COLOR):\n super(ShotLine, self).__init__()\n self.origin = origin\n self.end = end\n self.alpha = alpha\n self.fade_rate = float(fade_rate) / 1000\n self.thickness = thickness\n self.color = color", "def line(x0: float, y0: float, x1: float, y1: float) -> LineCollection:\n return LineCollection([(complex(x0, y0), complex(x1, y1))])", "def createLineSegment(self):\n return _libsbml.ReactionGlyph_createLineSegment(self)", "def __init__(\n self,\n line=None,\n *,\n knl=None,\n filled=True,\n resolution=1000,\n line_length=None,\n **kwargs,\n ):\n\n if knl is None:\n if line is None:\n raise ValueError(\"Either line or knl parameter must not be None\")\n knl = range(max([e.order for e in line.elements if hasattr(e, \"order\")]) + 1)\n if isinstance(knl, int):\n knl = range(knl + 1)\n if not isinstance(knl, str):\n knl = [[[f\"k{n}l\" for n in knl]]]\n if line is None and line_length is None:\n raise ValueError(\"Either line or line_length parameter must not be None\")\n self.S = np.linspace(0, line_length or line.get_length(), resolution)\n self.filled = filled\n\n super().__init__(\n on_x=\"s\",\n on_y=knl,\n **kwargs,\n )\n\n # create plot elements\n def create_artists(i, j, k, a, p):\n kwargs = dict(\n color=f\"C{order(p)}\",\n alpha=0.5,\n label=self.label_for(p, unit=True),\n )\n if self.filled:\n return a.fill_between(self.S, np.zeros_like(self.S), zorder=3, lw=0, **kwargs)\n else:\n return a.plot([], [], **kwargs)[0]\n\n self._create_artists(create_artists)\n\n for a in self.axflat:\n a.plot(self.S, np.zeros_like(self.S), \"k-\", lw=1, zorder=4)\n self.legend(show=\"auto\", ncol=5)\n\n # set data\n if line is not None:\n self.update(line, autoscale=True)", "def create_new_line(self, coords, **options):\n\n if 'fill' not in options:\n options['fill'] = self.variables.foreground_color\n if 'width' not in options:\n options['width'] = self.variables.line_width\n\n shape_id = self.create_line(*coords, **options)\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.LINE, options)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)\n self.variables.current_shape_id = shape_id\n return shape_id", "def __init__(self, eid, pid, nids, g0, x, geom, comment=''):\n LineElement.__init__(self)\n if comment:\n self.comment = comment\n self.eid = eid\n self.pid = pid\n self.ga = nids[0]\n self.gb = nids[1]\n\n if g0 is None:\n assert x is not None, 'g0=%s x=%s; one must not be None' % (g0, x)\n self.g0 = g0\n self.x = x\n self.geom = geom\n assert self.geom in [1, 2, 3, 4], 'geom is invalid geom=%r' % self.geom\n self.ga_ref = None\n self.gb_ref = None\n self.pid_ref = None\n if self.g0 is not None:\n assert isinstance(self.g0, integer_types), self.get_stats()", "def __init__(self, line):\n # Throw an exception if we don't see the parenthesis that mark a date entry\n if not line[108] == '(':\n raise ParsingException\n if not line[164:165] == ')':\n raise ParsingException\n\n # Parsing definitions\n self.start_time = datetime.strptime(line[109:125], '%d-%b-%Y %H%M')\n self.end_time = datetime.strptime(line[128:144], '%d-%b-%Y %H%M')\n self.time_stamp = datetime.strptime(line[148:164], '%m/%d/%Y %H:%M')", "def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self._lines: TokenLines = defaultdict(list)", "def build(self, coordinates = None):\n\n # start- and endpoints of lines are nodes, but they do not need to have a point object associated to them\n # in this case, point coordinates should be set\n if (self.geo):\n coordinates = rs.PointCoordinates(self.geo)\n\n self.x = round(+ coordinates[0], 5)\n self.y = round(+ coordinates[1], 5)\n self.z = round(+ coordinates[2], 5)", "def __init__(self):\n self.circle=visual.Circle(win,radius=.5, edges=32, fillColor='white') \n self.circle2=visual.Circle(win,radius=.1, edges=32, fillColor='white') \n self.linev = visual.Line(win, start=(0,.8), end=(0,-.8), lineWidth=6, lineColor='black') \n self.lineh = visual.Line(win, start=(.8,0), end=(-.8,0), lineWidth=6, lineColor='black') \n \n self.components = [self.circle, self.circle2, self.linev, self.lineh]", "def add_line_element(self, obj, typ_sofi, layer):\n\n bm = LineElement(obj, typ_sofi)\n bm.n1 = self.nodes.add(Node(None, rs.CurveStartPoint(obj)))\n bm.n2 = self.nodes.add(Node(None, rs.CurveEndPoint(obj)))\n\n bm.mark_start_point()\n bm.layer = layer\n\n self.line_elements.add(bm)", "def __init__(self, timestamp: int, line_number: int, c: Customer) -> None:\n Event.__init__(self, timestamp)\n self.line_number = line_number\n self.customer = c", "def __init__(self, ax, useblit=False, **lineprops):\n self.ax = ax\n self.canvas = ax.figure.canvas\n\n self.canvas.mpl_connect('motion_notify_event', self.onmove)\n self.canvas.mpl_connect('draw_event', self.clear)\n\n self.visible = True\n self.horizOn = True\n self.vertOn = True\n self.useblit = useblit\n\n self.lineh = ax.axhline(0, visible=False, **lineprops)\n self.linev = ax.axvline(0, visible=False, **lineprops)\n\n self.background = None\n self.needclear = False", "def __init__(self, x, y):\n x = np.asarray(x)\n y = np.asarray(y)\n x = np.ravel(x)\n y = np.ravel(y)\n if x.shape != y.shape:\n raise ValueError(\"x and y must have the same length\")\n self.x = x\n self.y = y\n \n self.n = int(len(self.x))\n\n self.qb, self.qc = self.qspline_params()\n self.cb, self.cc, self.cd = self.cspline_params()", "def constructTimeLineItem(self):\n\t\treturn", "def init(self) -> None:\n self.started = False\n self.lines = []\n self.text = ''\n self.graphics = ''\n self.ids = {}\n self.first_line_added = False\n\n self.used_fonts = set()\n self.current_line_used_fonts = set()\n self.current_height = 0\n self.lines = []\n\n line_width = self.width - (self.indent if self.is_first_line else 0)\n self.current_line = PDFTextLine(\n self.fonts, line_width, self.text_align, self.line_height\n )\n\n self.last_indent = 0\n self.last_state = self.last_factor = self.last_fill = None\n self.last_color = self.last_stroke_width = None\n\n self.y_ = 0", "def __init__(self, lines):\n\t\tself.lines = lines\n\t\tself.points = set()\n\t\tfor l in lines:\n\t\t\tif not l.a in self.points:\n\t\t\t\tself.points.add(l.a)\n\t\t\tif not l.b in self.points:\n\t\t\t\tself.points.add(l.b)", "def __init__(self, line):\n (self.seqid, \n self.source, \n self.type, \n self.start, \n self.end, \n self.score, \n self.strand, \n self.phase, \n self.attributes_str) = line.strip().split('\\t')\n # preserve attribute order as a list of keys (attributes_order)\n attributes_list = self.attributes_str.split(';')\n self.attributes_order = [attr.split('=')[0] for attr in \n attributes_list]\n # store attribute keys and their values in a dictionary\n self.attributes = {attr.split('=')[0]:attr.split('=')[1] for attr in \n attributes_list}\n # rename the name attribute key to Name so it conforms to the\n # GFF3 specification, where Name is a reserved attribute key\n if 'name' in self.attributes:\n self.attributes['Name'] = self.attributes.pop('name')\n self.attributes_order[self.attributes_order.index('name')] = 'Name'", "def create_line(uniform = True, *args):\n axis = cmds.radioButtonGrp(widgets[\"lineAxisRBG\"], q=True, sl=True)\n length = cmds.floatFieldGrp(widgets[\"lineLenFFG\"], q=True, v1=True)\n density = cmds.floatFieldGrp(widgets[\"lineDenFFG\"], q=True, v1=True)\n\n numCvs = length * density\n if numCvs < 3.0: # curve needs 3 cvs (for 3 dg curve)\n numCvs = 3.0\n\n cvDist = length/numCvs\n\n # make a list of pt dist along some axis\n axisList = []\n for x in range(0,int(numCvs)+1):\n axisList.append(x)\n\n pts = []\n\n if axis == 1:\n for y in range(0, int(numCvs)+1):\n pt = [axisList[y]*cvDist, 0, 0]\n pts.append(pt)\n\n if axis == 2:\n for y in range(0, int(numCvs)+1):\n pt = [0, axisList[y]*cvDist, 0]\n pts.append(pt)\n\n if axis == 3:\n for y in range(0, int(numCvs)+1):\n pt = [0, 0, axisList[y]*cvDist]\n pts.append(pt)\t\t\t\n \n line = cmds.curve(name = \"line_01\", d=3, p=pts)\n shp = cmds.listRelatives(line, s=True)[0]\n cmds.rename(shp, \"{0}Shape\".format(line))\n if uniform:\n line = cmds.rebuildCurve(line, rebuildType = 0, spans = 0, keepRange = 0, replaceOriginal=True, end=1, keepControlPoints=0)[0]\n\n cmds.select(line, r=True)", "def create_lines(self) -> None:\n res = []\n for connection in self.connections:\n start_component = self.components[connection.start_entity]\n end_component = self.components[connection.end_entity]\n start_pin_location = (\n start_component.location\n + start_component.pin_locations[connection.start_pin]\n )\n end_pin_location = (\n end_component.location + end_component.pin_locations[connection.end_pin]\n )\n\n x_midpoint = (start_pin_location.x + end_pin_location.x) / 2\n bend_start = Point(x_midpoint, start_pin_location.y)\n bend_end = Point(x_midpoint, end_pin_location.y)\n bends = [bend_start, bend_end]\n res.append(Line(connection, start_pin_location, *bends, end_pin_location))\n\n self.lines = res", "def createFromLine(line):\n return HalfLine(line.point, line.angle)", "def __init__(self, *args):\n _itkLineSpatialObjectPointPython.vectoritkLineSpatialObjectPoint3_swiginit(self,_itkLineSpatialObjectPointPython.new_vectoritkLineSpatialObjectPoint3(*args))", "def __init__(self):\n\t\tself.theta = 0.8\t\t\t# Theta value, the constant of the line which x+y is.(1.2 is best)\n\t\tself.numberOfInput = 0\t\t# The number of Input\n\t\tself.weight = []\t\t\t# The list of weight.", "def __init__(self, line_item_id=None, quantity=None): # noqa: E501 # noqa: E501\n self._line_item_id = None\n self._quantity = None\n self.discriminator = None\n if line_item_id is not None:\n self.line_item_id = line_item_id\n if quantity is not None:\n self.quantity = quantity", "def __init__(self, wavelength):\n # store experimental data\n self.x = wavelength\n\n # Central wavelengths of the lines are known constants:\n self.c1 = 422.\n self.c2 = 428.", "def __init__(self, slope):\n self.slope = slope", "def __init__(self, x0, y0, x1, y1):\n\n self.x0 = x0\n self.y0 = y0\n self.x1 = x1\n self.y1 = y1", "def __init__(self, eid, pid, nids,\n x, g0, offt='GGG',\n pa=0, pb=0, wa=None, wb=None, comment=''):\n LineElement.__init__(self)\n if comment:\n self.comment = comment\n if wa is None:\n wa = np.zeros(3, dtype='float64')\n else:\n wa = np.asarray(wa)\n if wb is None:\n wb = np.zeros(3, dtype='float64')\n else:\n wb = np.asarray(wb)\n\n if x is not None:\n x = np.asarray(x)\n if isinstance(offt, str):\n offt = offt.replace('E', 'O')\n offt = int(offt) if offt.isdigit() else offt\n\n self.eid = eid\n self.pid = pid\n self.x = x\n self.g0 = g0\n self.ga = nids[0]\n self.gb = nids[1]\n self.offt = offt\n self.pa = pa\n self.pb = pb\n self.wa = wa\n self.wb = wb\n self.pid_ref = None\n self.ga_ref = None\n self.gb_ref = None\n self.g0_ref = None\n self.g0_vector = None", "def __init__(self, line, match):\n\n self.line = line\n self.match = match", "def __init__(self, x, y):\n self._x = x\n self._y = y", "def __init__(self, line_parser, *filename):\n \n self.line_parser = line_parser\n self.f = fileinput.input(filename)", "def __init__(self, linearExpression):\n LinearExpression.__init__(self)\n\n self.linearExpression = linearExpression", "def __init__(self, linearExpression):\n LinearExpression.__init__(self)\n\n self.linearExpression = linearExpression", "def createLineSegment(self):\n return _libsbml.SpeciesReferenceGlyph_createLineSegment(self)", "def Line(self, lat1, lon1, azi1,\n caps = GeodesicCapability.STANDARD |\n GeodesicCapability.DISTANCE_IN):\n\n from geographiclib.geodesicline import GeodesicLine\n return GeodesicLine(self, lat1, lon1, azi1, caps)", "def line(self, x, y):\n self.call('line', x, y)", "def __init__(self, points=None, dimensions=None):\n\n # check whether they are lines (ribbons) or not. A line consists of 2 points\n if points is not None:\n if len(points) == 4 and isinstance(points, Sequence) and isinstance(points[0], Sequence) and \\\n len(points[0]) == 2:\n # 4 lists of sequences (with each 2 points) represents lines\n points = tuple([x for x in points]) # unpack points inside a tuple\n\n # check if there are 8 points which consist of sequences\n if len(points) != 8 or not isinstance(points[0], Sequence):\n # print(\"debugging: \" + str(points))\n raise TypeError('8 points or 4 ribbons (lines) needed to create a cube, got: {}'.format(len(points)))\n elif dimensions is not None:\n # create a new rectangle from a 3-vector (length, height, depth)\n #\n if isinstance(dimensions, Sequence) and len(dimensions) == 3:\n points = []\n for p in self.UNIT:\n # very slow\n p = [s if s == 1 else 0 for s in p]\n points.append([np.prod(*v) for v in zip(dimensions, p)])\n else:\n raise TypeError(f\"dimensions vector must be of length 3 (h, l, d), actual: {dimensions}\")\n else:\n raise TypeError(\"No valid value passed (vector points or dimensions vector\")\n\n # make homogeneous\n points = [(*p, 1) for p in points]\n\n if type(points[0]) != np.ndarray:\n points = [np.array(p) for p in points]\n\n self.points = points", "def test_constructor_with_value(self):\n line = D1Line(self.test_line)\n self.assertEqual((line.gender,\n line.event_swimmer_id,\n line.last_name,\n line.first_name,\n line.nick_name,\n line.middle_initial,\n line.uss_num,\n line.team_swimmer_id,\n line.date_of_birth,\n line.age),\n (\"F\",\n 14081,\n \"Reed\",\n \"Laramie\",\n \"\",\n \"J\",\n \"021100LARJREED\",\n 1019,\n datetime.date(2000, 2, 11),\n 9))", "def __init__(self, numero, cliente: Cliente, lineas: list):\r\n self.__numero = numero\r\n self.__cliente = cliente\r\n self.__lineas = lineas", "def __init__(self, source):\n if isinstance(source, str):\n self.line_iter = iter(source.splitlines())\n elif isinstance(source, io.TextIOBase):\n self.line_iter = source\n else:\n raise TypeError('source must be either a string or a text file')\n self.line_iter = enumerate(self.line_iter)\n self.source = source", "def __init__(self, x=0, y=0):\n self._x = x\n self._y = y", "def __init__(self, lines):\n self.shop = None\n\n if lines:\n # Detect if user used semicolon and convert to comma\n if len(lines[0].split(';'))>1:\n lines = '\\n'.join(lines)\n lines = lines.replace(';', ',')\n lines = lines.split('\\n')\n # Ignore comments\n lines = [line for line in lines if not line.startswith('#')]\n\n self.lines = lines\n self.line_count = len(lines)\n self.warning = None", "def plot_line(self,x_0,y_0,x_1,y_1,col=\"black\",line_width=1,line_type=\"solid\"):\n self._fig.add_shape(\n go.layout.Shape(\n type=\"line\",\n x0=x_0,\n y0=y_0,\n x1=x_1,\n y1=y_1,\n line=dict(\n color=col,\n width=line_width,\n dash=line_type\n )\n )\n )", "def __init__(self, m=np.random.normal(M_INIT, .25, 1)[0], b=np.random.normal(B_INIT, .25, 1)[0], \\\n\t\t\t\t\tt=np.random.normal(T_INIT, .25, 1)[0], l=L_INIT*np.random.normal(1.0, .25, 1)[0]):\n\t\t\n\t\tself.shape_slope = m\n\t\tself.z_thick = b\n\t\tself.thick = t\n\t\tself.length = l", "def like(obj, **kwargs):\n\n # Create a new line object that is the same as the provided line.\n # Set the initialize leyword to False to skip initializing the data attribute.\n new_line = empty_like(obj, initialize=False, **kwargs)\n\n # Set the new line data attribute to a copy of the provided line's data\n new_line.data = obj.data.copy()\n\n return new_line", "def __init__(self, line):\n # Throw an exception if we are not in the outage section of the ticket\n if not line[:107].strip():\n raise ParsingException\n\n self._fwf = FwfSlicer(FIXED_FORMAT)\n self.line = line\n\n # Parsing definitions\n self.zone = self._get_col(2).strip()\n self.equipment_type = self._get_col(3)[1:5]\n self.station = self._get_col(3)[6:14].strip()\n self.facility_name = self._get_col(3)[21:].strip()\n self.start_time = datetime.strptime(self._get_col(4).strip(), '%d-%b-%Y %H%M')\n self.end_time = datetime.strptime(self._get_col(5).strip(), '%d-%b-%Y %H%M')\n self.open_closed = self._get_col(6).strip()", "def __init__(self, network, lines, preimages=True):\n self.network = network\n self.lines = lines\n self.preimages = preimages\n\n self.partially_computed = False\n self.transformed_lines = None\n\n self.computed = False\n self.classifications = None", "def __init__(self, x, y):\n\t\t\n\t\tself.x, self.y = x, y", "def empty_like(obj, name=None, color=None, linestyle=None, thickness=None,\n initialize=True):\n # Create a new line object to return.\n new_line = line(ping_time=obj.ping_time.copy())\n\n # Check if new properties were provided, otherwise copy from original.\n if color:\n new_line.color = color\n else:\n if isinstance(obj, line):\n new_line.color = obj.color\n if name:\n new_line.name = name\n else:\n if isinstance(obj, line):\n new_line.name = obj.name\n if linestyle:\n new_line.linestyle = linestyle\n else:\n if isinstance(obj, line):\n new_line.linestyle = obj.linestyle\n if thickness:\n new_line.thickness = thickness\n else:\n if isinstance(obj, line):\n new_line.thickness = obj.thickness\n\n # Set the data array to NaNs.\n new_line.data = np.full(new_line.ping_time.shape[0], np.nan)\n\n return new_line", "def __init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y", "def test_constructor_with_value(self):\n line = C2Line(self.good_line)\n self.assertEqual((line.addr_name,\n line.addr,\n line.addr_city,\n line.addr_state,\n line.addr_zip,\n line.addr_country,\n line.team_reg),\n (\"Jim Beam\",\n \"123 Main Street\",\n \"Lexington\",\n \"KY\",\n \"40514\",\n \"USA\",\n \"USS\"))", "def getLine(self, correct=True):\n return Line(self.point, self.angle, correct=correct)", "def __init__(self, line):\n # Throw an exception if we don't see the parenthesis that mark a history entry\n if not line[108] == '(':\n raise ParsingException\n if not line[138:139] == ')':\n raise ParsingException\n\n self.status = line[109:122].strip()\n self.time_stamp = datetime.strptime(line[122:138], '%m/%d/%Y %H:%M')", "def __init__(self, line: int, col: int) -> None:\n self._contents=\"\"\n super().__init__(self._contents)\n self._line=line\n self._col=col", "def __init__(self, src_lines):\n self.study_id = None\n self.citation = None\n self.abstract = None\n self.authors = []\n self.study_matrices = {}\n self.history_date = None\n self.history_time = None\n self.history_person = None\n self.history_event = None\n self.analyses = []\n \n self.parse_src_lines(src_lines)", "def __init__(self):\n\n self.parser = self.define_parser()\n self.pen = Pen()", "def test_line_class_parse(logger):\n raw_bytes = b''\n point_data_fragment = [\n # line:\n #\n # brush type\n (\n struct.pack(lines.BrushType.fmt, lines.BrushType.REVERSE['pen']),\n lines.BrushType.REVERSE['pen'],\n ),\n # colour\n (\n struct.pack(lines.Colour.fmt, lines.Colour.REVERSE['black']),\n lines.Colour.REVERSE['black']\n ),\n # magical unknown line attribute 1\n (\n struct.pack(lines.LineAttribute1.fmt, 0),\n 0\n ),\n # base brush size\n (\n struct.pack(\n lines.BrushBaseSize.fmt, lines.BrushBaseSize.REVERSE['small']\n ),\n lines.BrushBaseSize.REVERSE['small']\n ),\n # one point:\n (struct.pack(lines.Points.fmt, 1), 1),\n # the single point's data:\n (struct.pack(lines.X.fmt, 12.341), 12.341),\n (struct.pack(lines.Y.fmt, 107.301), 107.301),\n (struct.pack(lines.Pressure.fmt, 0.351), 0.351),\n (struct.pack(lines.RotX.fmt, 0.03), 0.03),\n (struct.pack(lines.RotY.fmt, 0.216), 0.216),\n ]\n for data in point_data_fragment:\n raw_bytes += data[0]\n\n # Set up the generator with the raw bytes:\n position = recover(raw_bytes)\n data = next(position)\n assert data == ''\n\n result = lines.Line.load(position)\n assert result.brush_type.name == 'pen'\n assert result.colour.name == 'black'\n assert result.line_attribute1.value == 0\n assert result.brush_base_size.name == 'small'\n assert result.points.count == 1\n result = result.points.points[0]\n assert round(result.x, 3) == 12.341\n assert round(result.y, 3) == 107.301\n assert round(result.pressure, 3) == 0.351\n assert round(result.rot_x, 3) == 0.03\n assert round(result.rot_y, 3) == 0.216" ]
[ "0.80231184", "0.7601697", "0.7555074", "0.74823064", "0.74259686", "0.73953325", "0.7313491", "0.6963556", "0.69590884", "0.6912488", "0.69103134", "0.68802094", "0.6859208", "0.6848757", "0.68076146", "0.6779403", "0.674837", "0.674837", "0.6747915", "0.6672266", "0.6634589", "0.6630196", "0.6562391", "0.65426254", "0.65329367", "0.6531408", "0.6526709", "0.6505575", "0.6505575", "0.6502597", "0.6417213", "0.6417213", "0.6417213", "0.64164", "0.6413515", "0.64107287", "0.6408251", "0.63593394", "0.63494605", "0.6305811", "0.6289716", "0.62864447", "0.6285673", "0.6283335", "0.6280301", "0.6260663", "0.6219677", "0.6217629", "0.6217417", "0.6207099", "0.6200831", "0.6174068", "0.6164585", "0.6162088", "0.612871", "0.611839", "0.6112043", "0.6095829", "0.6091664", "0.6074826", "0.60743314", "0.60685223", "0.60673374", "0.606588", "0.60640687", "0.6062733", "0.6061165", "0.6057584", "0.6056629", "0.60372347", "0.6036688", "0.60295945", "0.6028467", "0.6022924", "0.60201484", "0.60201484", "0.6017314", "0.60085285", "0.6005247", "0.59999853", "0.5998171", "0.59951997", "0.59927154", "0.5991066", "0.5990208", "0.5989422", "0.5988316", "0.59870887", "0.5966698", "0.59664387", "0.5964035", "0.5956207", "0.5948657", "0.5947856", "0.59464455", "0.594443", "0.5942068", "0.5940991", "0.59377015", "0.59354466" ]
0.65474164
23
Function to set the tolerance.
def set_tolerance(self, tol): self.tolerance = tol
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tolerance(self, tolerance: float) -> None:\n self._tolerance = tolerance", "def set_tolerance(self, value):\n\n self._tolerance = value", "def set_tolerance(self, tol):\n self.precision = tol\n return", "def tol(self, value):\n self._tol = value", "def set_tol(self, tol : float):\n self.tol = tol", "def set_tolerance(self, *args, **kwargs):\n raise ParameterError(\"The %s StoppingCriterioin does not yet support resetting tolerances.\")", "def set_tolerance(rel_tolerance=1e-09, abs_tolerance=0.0):\n global REL_TOLERANCE, ABS_TOLERANCE\n REL_TOLERANCE = rel_tolerance\n ABS_TOLERANCE = abs_tolerance", "def setTolerance(self, eps):\n self._simulator_.update(eps=eps)\n return", "def _set_tolerances(self, atol=None, rtol=None, maxiter=None):\n atol = self.atol if atol is None else atol\n rtol = self.rtol if rtol is None else rtol\n maxiter = self.maxiter if maxiter is None else maxiter\n # BUG: PETSc misses rtol requirement by ~10-20X -> Report to petsc4py\n self.ksp.setTolerances(atol=None, rtol=rtol/50, max_it=maxiter)", "def tolerance(self) -> float:\n return self._tolerance", "def tolerance(self):\n return self.params['tolerance']", "def set_particle_tolerance(self, value):\n\n self._particle_tolerance = value", "def tolerance(self):\n return self._tolerance", "def set_abs_tolerance(self, value):\n\n self._abs_tolerance = value", "def set_size_tolerance(self, tolerance):\n self._size_tolerance = tolerance", "def get_tolerance(self):\n return self.tolerance", "def SetTol(self, tol):\n return _hypre.HypreLOBPCG_SetTol(self, tol)", "def tol(self, atol: Real):\n if not isinstance(atol, Real):\n raise TypeError(\"The attribute tol must be a real number.\")\n if 0 <= atol < 1:\n self._tol = atol\n else:\n raise ValueError(\"Need 0 <= tol < 1.\")", "def SetTol(self, tol):\n return _hypre.HyprePCG_SetTol(self, tol)", "def SetTol(self, tol):\n return _hypre.HypreBoomerAMG_SetTol(self, tol)", "def set_tol(iprec):\n \n tol = -1\n \n if iprec == -2:\n tol = 0.5\n \n elif iprec == -1:\n tol = 0.5 * 10**-1\n \n elif iprec == 0:\n tol = 0.5 * 10**-2\n \n elif iprec == 1:\n tol = 0.5 * 10**-3\n \n elif iprec == 2:\n tol = 0.5 * 10**-6\n \n elif iprec == 3:\n tol = 0.5 * 10**-9\n \n elif iprec == 4:\n tol = 0.5 * 10**-12\n \n elif iprec == 5:\n tol = 0.5 * 10**-15\n \n return tol", "def SetTol(self, tol):\n return _hypre.HypreGMRES_SetTol(self, tol)", "def SetMaxTolerance(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_Tool_SetMaxTolerance(self, *args)", "def update_vrad_tolerance(self):\n try:\n value = float(self.edit_vrad_tolerance.text())\n except:\n value = None\n self._get_selected_model().metadata[\"velocity_tolerance\"] = value\n return None", "def SetTol(self, tol):\n return _hypre.HypreAME_SetTol(self, tol)", "def set_tol_2d(value=1e-9):\r\n geometry.gmSetXyTol(value)", "def tol(self) -> Real:\n return self._tol", "def SetMinTolerance(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_Tool_SetMinTolerance(self, *args)", "def LimitTolerance(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_Tool_LimitTolerance(self, *args)", "def SetTol(self, tol):\n return _hypre.HypreFGMRES_SetTol(self, tol)", "def tol(self):\n return self._tol", "def tolerance_level(self, tolerance_level=None):\n if tolerance_level is not None:\n self._tolerance_level = tolerance_level\n return self._tolerance_level", "def set_epsilon(value):\n global _EPSILON\n _EPSILON = value", "def SetMaxTolerance(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeDivide_SetMaxTolerance(self, *args)", "def performance_tolerance():\n tolerance = os.environ.get(\"TEST_PERFORMANCE_TOLERANCE\")\n\n if tolerance:\n tolerance = float(tolerance)\n print(f\"Testing performance tolerance multiplier set to: {tolerance}x\")\n else:\n tolerance = 1\n print(\"Testing performance tolerance not set. Using the default value: 1.0x\")\n\n return tolerance", "def Tolerance(self):\n\t\treturn self._get_attribute('tolerance')", "def get_tolerance(self):\n\n if Test.global_tolerance is None:\n return self._tolerance\n return Test.global_tolerance", "def set_epsilon(self,epsilon):\r\n\t\tself.epsilon = epsilon", "def angle_tolerance(self, angle_tolerance):\n\n self._angle_tolerance = angle_tolerance", "def update_vrad_tolerance_2(self):\n try:\n value = float(self.edit_vrad_tolerance_2.text())\n except:\n value = None\n self._get_selected_model().metadata[\"velocity_tolerance\"] = value\n return None", "def setTolerances(self, rtol = 1.0e-9,\n atol = 1.0e-20, rtolsens= -1.0, atolsens = -1.0):\n _cantera.reactornet_setTolerances(self.__reactornet_id, rtol, atol)\n _cantera.reactornet_setSensitivityTolerances(self.__reactornet_id, rtolsens, atolsens)", "def SetMinTolerance(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeDivide_SetMinTolerance(self, *args)", "def Catch(X,Tolerance=0):\n if X < (.5-(Tolerance/2)):\n return(0)\n elif X > (.5+(Tolerance/2)):\n return(1)\n else:\n return(.5)", "def update_wavelength_tolerance(self):\n try:\n value = float(self.edit_wavelength_tolerance.text())\n except:\n value = None\n self._get_selected_model().metadata[\"wavelength_tolerance\"] = value\n return None", "def get_tolerances(self) -> Tuple[float, float]:\n return self.rtol, self.atol", "def __init__(self, rt_tol):\n self.rt_tol = rt_tol", "def test_setMassFrac(self):\n target35 = 0.2\n self.fuel.setMassFrac(\"U235\", target35)\n self.assertAlmostEqual(self.fuel.getMassFrac(\"U235\"), target35)", "def StepTolerance(self):\n\t\treturn self._get_attribute('stepTolerance')", "def gtol(self, value: Union[int, float, GradientRMS]):\n\n if float(value) <= 0:\n raise ValueError(\n \"Tolerance on the gradient (||∇E||) must be \"\n f\"positive. Had: gtol={value}\"\n )\n\n self._gtol = GradientRMS(value)", "def chord_tolerance(self, chord_tolerance):\n\n self._chord_tolerance = chord_tolerance", "def SetAbsTol(self, tol):\n return _hypre.HypreGMRES_SetAbsTol(self, tol)", "def calc_tolerance(wt):\n return 1 - wt", "def MinTolerance(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_Tool_MinTolerance(self, *args)", "def test_tolerance(self):\n # With idw no sites within tolerance should result in masked output\n dset = self.dset.spec.sel(\n lons=self.lons_inexact, lats=self.lats_inexact, method=\"idw\", tolerance=1.0\n )\n assert dset.spec.hs().values.all()\n dset = self.dset.spec.sel(\n lons=self.lons_inexact, lats=self.lats_inexact, method=\"idw\", tolerance=0.01\n )\n assert np.isnan(dset.spec.hs().values).any()\n # With nearest no sites within tolerance should raise an exception\n with pytest.raises(AssertionError):\n dset = self.dset.spec.sel(\n lons=self.lons_inexact,\n lats=self.lats_inexact,\n method=\"nearest\",\n tolerance=0.01,\n )", "def _rtol(self, rtol):\n # NOTE: that this overrides the class value so applies to all\n # instances of the class.\n max_tol = self.__class__.MAX_TOL\n if rtol < 0:\n raise QiskitError(\"Invalid rtol: must be non-negative.\")\n if rtol > max_tol:\n raise QiskitError(\n \"Invalid rtol: must be less than {}.\".format(max_tol))\n self.__class__.RTOL = rtol", "def __init__(self, rt_tol, exclusion_t_0):\n super().__init__()\n self.rt_tol = rt_tol\n self.exclusion_t_0 = exclusion_t_0\n assert self.exclusion_t_0 <= self.rt_tol", "def SetAbsTol(self, atol):\n return _hypre.HyprePCG_SetAbsTol(self, atol)", "def xtol(self):\n return self._xtol", "def MaxTolerance(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_Tool_MaxTolerance(self, *args)", "def add_tol(self):\n return self._add_tol", "def set_value ( var , value , ok = lambda a , b : True ) :\n\n ## must be roofit variable! \n assert isinstance ( var , ROOT.RooAbsReal ) , 'Invalid type of ``var'' %s' % type ( var )\n \n if not hasattr ( var , 'setVal' ) :\n raise ValueError ( \"No value can be set for %s/%s\" % ( var , type ( var ) ) ) \n\n ## convert to float \n value = float ( value )\n\n ## check for the range, if defined \n minmax = var.minmax ()\n if minmax :\n mn , mx = minmax\n if not ( mn <= value <= mx or isequal ( mn , value ) or isequal ( mx , value ) ) :\n raise ValueError ( \"Value %s is outside of the [%s,%s] region\" % ( value , mn , mx ) ) \n \n ## check for external conditions, if specified \n if not ok ( var , value ) :\n raise ValueError ( \"Value %s is not OK\" % value ) \n\n ## finally set the value \n var.setVal ( value )\n\n return isequal ( value , var.getVal () )", "def get_abs_tolerance(self):\n\n if Test.global_abs_tolerance is None:\n return self._abs_tolerance\n return Test.global_abs_tolerance", "def set_accuracy_95(num: float) -> float:\n ...", "def setUp(self):\n self.upper = 230.0\n self.lower = 195.0", "def equals_exact(self, other, tolerance): # -> bool:\n ...", "def __init__(self, epsilon=1e-14):\n self.epsilon = epsilon", "def tolerance(*args, angular: Union[float, bool]=0.0, linear: Union[float, bool]=0.0, q=True,\n query=True, **kwargs)->Union[None, Any]:\n pass", "def set_feastol(self, feastol : float):\n self.feastol = feastol", "def __init__(self, epsilon=1e-7):\n super().__init__()\n self.epsilon = epsilon", "def SetRelTol(self, rel_tol):\n return _hypre.HypreAME_SetRelTol(self, rel_tol)", "def test__compute_tolerance_distance():\n classifier = classifier_module.Classifier(None)\n L1 = [11.2, 41.43, 1.33]\n L2 = [10.9, 41.45, 1.34]\n L3 = [12.0, 41.4412, 1.001]\n L4 = [11.3, 41.15, 1.12]\n L5 = [11.223, 41.0, 1.31]\n AL = [L1, L2, L3, L4, L5]\n symbol = \"a\"\n classifier._compute_tolerance_distance(AL, symbol)\n tolerance_distance_path = \\\n classifier_module.Classifier._get_file_path( \\\n classifier.files[classifier_module.DISTANCE_TOLERANCE_FILE], symbol)\n file_with_tolerance_distance = \\\n open(tolerance_distance_path, 'r')\n tolerance_distance = float(file_with_tolerance_distance.readline())\n file_with_tolerance_distance.close()\n assert fabs(tolerance_distance - 0.5506099238118276) < epsilon", "def test_set_float_percentage(self):\n\n self.feature_test.set_percentage(50.5)\n self.assertEqual(self.feature_test.percentage, 50)", "def __init__(self, ppm_tolerance=20, min_score=0.6, min_charge=1, max_charge=7,\r\n min_abundance=0.25, min_improve=0.3, verbose=False):\r\n self.ppm_tolerance = ppm_tolerance\r\n self.min_score = min_score\r\n self.min_charge = min_charge\r\n self.max_charge = max_charge\r\n self.min_abundance = min_abundance\r\n self.min_improve = min_improve\r\n self.verbose = verbose", "def __init__(self, eps: float=1e-5):\n self.eps = eps", "def adjustTolerance(desiredNbrOfColors,newColorList):\r\n global comparisonTolerance\r\n L = getFinalColorList(newColorList)\r\n while len(L) > desiredNbrOfColors:\r\n comparisonTolerance += 10\r\n L = getFinalColorList(newColorList)\r\n while len(L) < desiredNbrOfColors:\r\n comparisonTolerance -= 1\r\n L = getFinalColorList(newColorList)", "def test_set_outside_bounds_default_value(self):\n with pytest.raises(ValueError):\n Real(\"yolo\", \"uniform\", -3, 2, default_value=5)", "def vsSetValue(self, value):\n self._vs_value = float(value)", "def epsilon():\n return _EPSILON", "def update_parameter(self):\n\n if self.testing: # 1. No random choice when testing\n self.epsilon = 0\n else: # 2. Update parameters when learning\n if self.epsilon > 0.:\n self.epsilon -= 0.01\n\n return self.epsilon", "def testSetWithNegativeFloat(self):\n def setSat():\n self.node.sat = -20.1\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSat\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSat()\n\n self.assertEqual(\n Decimal('0.0'),\n self.node.sat\n )", "def setUp(self):\r\n # How many times a p-value should be tested to fall in a given range\r\n # before failing the test.\r\n self.p_val_tests = 10", "def test_fails_cval_set_wrong(self):\n msg = \"cval must be greater than 0.0\"\n with self.assertRaisesRegex(ValueError, msg):\n NonLinearWeights(-0.1)\n with self.assertRaisesRegex(ValueError, msg):\n NonLinearWeights(1.85)", "def _get_tol(self, math_fn, ty):\n\n if utils.PYVERSION > (2, 6):\n low_res = {\n (math.gamma, np.float64): 1e-14,\n (math.lgamma, np.float64): 1e-13,\n }\n else:\n low_res = {}\n\n default = 1e-15 if ty == np.float64 else 1e-6\n return low_res.get((math_fn, ty), default)", "def SetRelTol(self, rel_tol):\n return _hypre.HypreLOBPCG_SetRelTol(self, rel_tol)", "def get_particle_tolerance(self):\n\n if Test.global_particle_tolerance is None:\n return self._particle_tolerance\n return Test.global_particle_tolerance", "def test_fixed_point(testFunctions, tol, printFlag): \n pass", "def _add_tolerance_configs(CONFIG):\n CONFIG.declare(\n 'absolute_bound_tolerance',\n ConfigValue(\n default=1e-4,\n domain=PositiveFloat,\n description='Bound tolerance',\n doc='Absolute tolerance for bound feasibility checks.',\n ),\n )\n CONFIG.declare(\n 'relative_bound_tolerance',\n ConfigValue(\n default=1e-3,\n domain=PositiveFloat,\n description='Relative bound tolerance',\n doc='Relative tolerance for bound feasibility checks. '\n ':math:`|Primal Bound - Dual Bound| / (1e-10 + |Primal Bound|) <= relative tolerance`',\n ),\n )\n CONFIG.declare(\n 'small_dual_tolerance',\n ConfigValue(\n default=1e-8,\n description='When generating cuts, small duals multiplied '\n 'by expressions can cause problems. Exclude all duals '\n 'smaller in absolute value than the following.',\n ),\n )\n CONFIG.declare(\n 'integer_tolerance',\n ConfigValue(default=1e-5, description='Tolerance on integral values.'),\n )\n CONFIG.declare(\n 'constraint_tolerance',\n ConfigValue(default=1e-6, description='Tolerance on constraint satisfaction.'),\n )\n CONFIG.declare(\n 'variable_tolerance',\n ConfigValue(default=1e-8, description='Tolerance on variable bounds.'),\n )\n CONFIG.declare(\n 'zero_tolerance',\n ConfigValue(default=1e-8, description='Tolerance on variable equal to zero.'),\n )", "def set_fd_step(eps):\n assert isinstance(eps, (float, complex))\n global EPS\n EPS = eps", "def testSatSetNegative(self):\n def setSat():\n self.cc.sat = -376.23\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSat\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSat()\n\n self.assertEqual(\n Decimal('0.0'),\n self.cc.sat\n )", "def test_constants(self):\r\n\r\n # Of the form ('expr', python value, tolerance (or None for exact))\r\n default_variables = [\r\n ('i', 1j, None),\r\n ('j', 1j, None),\r\n ('e', 2.7183, 1e-4),\r\n ('pi', 3.1416, 1e-4),\r\n ('k', 1.3806488e-23, 1e-26), # Boltzmann constant (Joules/Kelvin)\r\n ('c', 2.998e8, 1e5), # Light Speed in (m/s)\r\n ('T', 298.15, 0.01), # Typical room temperature (Kelvin)\r\n ('q', 1.602176565e-19, 1e-22) # Fund. Charge (Coulombs)\r\n ]\r\n for (variable, value, tolerance) in default_variables:\r\n fail_msg = \"Failed on constant '{0}', not within bounds\".format(\r\n variable\r\n )\r\n result = calc.evaluator({}, {}, variable)\r\n if tolerance is None:\r\n self.assertEqual(value, result, msg=fail_msg)\r\n else:\r\n self.assertAlmostEqual(\r\n value, result,\r\n delta=tolerance, msg=fail_msg\r\n )", "def __init__(self):\n super().__init__()\n self.nan_penalty = nan_penalty\n self.nan_tol = nan_tol", "def _setUpdateExpected(self, value):\n self.__isUpdateExpected = value", "def _set_bet_limit(self) -> None:\n for i, ratio in enumerate(BET_LIMIT_RATIOS):\n self._bet_limits[i] = self._treasury_min.get() // ratio", "def setUp(self):\n self.maxDiff = None\n pass", "def set(self, param, value):\r\n # continuous testing of inputs\r\n if self.testing_unit.testing_level > 1 and not self.testing_unit.c_test_set_inp(param, value):\r\n raise ValueError(\"set won't run, input's aren't valid.\")\r\n\r\n # continuous testing of functional inputs\r\n if self.testing_unit.testing_level > 0:\r\n if param in [\"weighting_bias\"]:\r\n if not [self.testing_unit.c_test_weighting_bias][[\"weighting_bias\"].index(param)](value):\r\n raise ValueError(\"Bad \" + param + \" input. See log or raise testing verbosity.\")\r\n\r\n self.__locals[param] = value # Security Risk\r\n return 1 # Success\r", "def checkFloat(comment, value, expected, tol=1e-10, update=True):\n if np.isnan(value) and np.isnan(expected):\n res = True\n elif np.isnan(value) or np.isnan(expected):\n res = False\n else:\n res = abs(value - expected) <= tol\n if update:\n if not res:\n print(\"checking float\",comment,'|',value,\"!=\",expected)\n results[\"fail\"] += 1\n else:\n results[\"pass\"] += 1\n return res", "def test_mod_float():\n with pytest.raises(ValueError) as __:\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n num_a.value %= 1.5", "def __init__(self, numeric):\n self.absoluteerrorrange = abs(numeric)", "def update_epsilon(self):\n\t\tif self.epsilon > self.epsilon_min:\n\t\t\tself.epsilon *= self.epsilon_decay", "def testSetWithFloat(self):\n self.node.sat = 100.1\n\n self.assertEqual(\n Decimal('100.1'),\n self.node.sat\n )" ]
[ "0.8269015", "0.81052655", "0.79584485", "0.7956575", "0.78157336", "0.7737034", "0.7723517", "0.76122135", "0.7429519", "0.7174162", "0.71566623", "0.7126722", "0.7088782", "0.68695533", "0.6833426", "0.676617", "0.66971606", "0.668103", "0.66773707", "0.665312", "0.6650963", "0.6629326", "0.6607332", "0.65898097", "0.65870655", "0.6579697", "0.6524468", "0.6506353", "0.6489616", "0.647758", "0.64388734", "0.6417718", "0.64006156", "0.638162", "0.6370938", "0.624854", "0.624481", "0.62313116", "0.6118928", "0.6082569", "0.6076792", "0.6064491", "0.603698", "0.59487015", "0.59151417", "0.5908588", "0.59076965", "0.58465195", "0.58180916", "0.5781246", "0.57782376", "0.576489", "0.5763495", "0.57629645", "0.5747691", "0.573876", "0.56485933", "0.5585248", "0.5550472", "0.55436194", "0.553477", "0.5459162", "0.5428697", "0.5425223", "0.5393988", "0.53724134", "0.53700805", "0.53547394", "0.53233415", "0.52978116", "0.529175", "0.52855825", "0.52826333", "0.5278895", "0.5268246", "0.5226246", "0.52229494", "0.5220105", "0.5220029", "0.5216673", "0.521597", "0.5210523", "0.51896715", "0.51819754", "0.517683", "0.51749927", "0.51410645", "0.5134201", "0.5119061", "0.51147324", "0.5110805", "0.50931805", "0.50815326", "0.507946", "0.5079175", "0.507916", "0.5057973", "0.5054744", "0.5038671", "0.5037908" ]
0.8542296
0
Function to get the tolerance. Returns
def get_tolerance(self): return self.tolerance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tolerance(self):\n return self.params['tolerance']", "def tolerance(self):\n return self._tolerance", "def tolerance(self) -> float:\n return self._tolerance", "def tol(self):\n return self._tol", "def get_tolerance(self):\n\n if Test.global_tolerance is None:\n return self._tolerance\n return Test.global_tolerance", "def tol(self) -> Real:\n return self._tol", "def Tolerance(self):\n\t\treturn self._get_attribute('tolerance')", "def get_tolerances(self) -> Tuple[float, float]:\n return self.rtol, self.atol", "def calc_tolerance(wt):\n return 1 - wt", "def get_abs_tolerance(self):\n\n if Test.global_abs_tolerance is None:\n return self._abs_tolerance\n return Test.global_abs_tolerance", "def StepTolerance(self):\n\t\treturn self._get_attribute('stepTolerance')", "def performance_tolerance():\n tolerance = os.environ.get(\"TEST_PERFORMANCE_TOLERANCE\")\n\n if tolerance:\n tolerance = float(tolerance)\n print(f\"Testing performance tolerance multiplier set to: {tolerance}x\")\n else:\n tolerance = 1\n print(\"Testing performance tolerance not set. Using the default value: 1.0x\")\n\n return tolerance", "def get_particle_tolerance(self):\n\n if Test.global_particle_tolerance is None:\n return self._particle_tolerance\n return Test.global_particle_tolerance", "def xtol(self):\n return self._xtol", "def Catch(X,Tolerance=0):\n if X < (.5-(Tolerance/2)):\n return(0)\n elif X > (.5+(Tolerance/2)):\n return(1)\n else:\n return(.5)", "def get_tol_2d():\r\n return geometry.gmGetXyTol()", "def _get_tol(self, math_fn, ty):\n\n if utils.PYVERSION > (2, 6):\n low_res = {\n (math.gamma, np.float64): 1e-14,\n (math.lgamma, np.float64): 1e-13,\n }\n else:\n low_res = {}\n\n default = 1e-15 if ty == np.float64 else 1e-6\n return low_res.get((math_fn, ty), default)", "def test__compute_tolerance_distance():\n classifier = classifier_module.Classifier(None)\n L1 = [11.2, 41.43, 1.33]\n L2 = [10.9, 41.45, 1.34]\n L3 = [12.0, 41.4412, 1.001]\n L4 = [11.3, 41.15, 1.12]\n L5 = [11.223, 41.0, 1.31]\n AL = [L1, L2, L3, L4, L5]\n symbol = \"a\"\n classifier._compute_tolerance_distance(AL, symbol)\n tolerance_distance_path = \\\n classifier_module.Classifier._get_file_path( \\\n classifier.files[classifier_module.DISTANCE_TOLERANCE_FILE], symbol)\n file_with_tolerance_distance = \\\n open(tolerance_distance_path, 'r')\n tolerance_distance = float(file_with_tolerance_distance.readline())\n file_with_tolerance_distance.close()\n assert fabs(tolerance_distance - 0.5506099238118276) < epsilon", "def epsilon():\n return _EPSILON", "def distance_tolerance(distance: float) -> float:\n ret = 10.0\n if distance < 0:\n ret += distance * (100 - ret) / -2500.0\n return ret", "def tol(self, value):\n self._tol = value", "def tolerance_level(self, tolerance_level=None):\n if tolerance_level is not None:\n self._tolerance_level = tolerance_level\n return self._tolerance_level", "def epsilon(self):\n return self.__epsilon", "def _find_cutoff(self):\n cutoff = 1\n while ((self.linear_rstar_unnorm(cutoff) -\n self.turing_rstar_unnorm(cutoff))**2\n > self.approx_turing_variance(cutoff)):\n cutoff += 1\n return cutoff", "def epsilon(self):\n return self._epsilon", "def compare(a, b, *, tol=1e-6):\n if abs(a - b) < tol:\n return 0.0\n elif a > b:\n return 1.0\n else:\n return -1.0", "def set_tol(iprec):\n \n tol = -1\n \n if iprec == -2:\n tol = 0.5\n \n elif iprec == -1:\n tol = 0.5 * 10**-1\n \n elif iprec == 0:\n tol = 0.5 * 10**-2\n \n elif iprec == 1:\n tol = 0.5 * 10**-3\n \n elif iprec == 2:\n tol = 0.5 * 10**-6\n \n elif iprec == 3:\n tol = 0.5 * 10**-9\n \n elif iprec == 4:\n tol = 0.5 * 10**-12\n \n elif iprec == 5:\n tol = 0.5 * 10**-15\n \n return tol", "def set_tolerance(rel_tolerance=1e-09, abs_tolerance=0.0):\n global REL_TOLERANCE, ABS_TOLERANCE\n REL_TOLERANCE = rel_tolerance\n ABS_TOLERANCE = abs_tolerance", "def tolerance(self, tolerance: float) -> None:\n self._tolerance = tolerance", "def set_tolerance(self, tol):\n self.tolerance = tol", "def get_atol(tol, atol, bnrm2, get_residual, routine_name):\n\n if atol is None:\n atol = 'legacy'\n\n tol = float(tol)\n\n if atol == 'legacy':\n resid = get_residual()\n if resid <= tol:\n return 'exit'\n if bnrm2 == 0:\n return tol\n else:\n return tol * float(bnrm2)\n else:\n return max(float(atol), tol * float(bnrm2))", "def checkFloat(comment, value, expected, tol=1e-10, update=True):\n if np.isnan(value) and np.isnan(expected):\n res = True\n elif np.isnan(value) or np.isnan(expected):\n res = False\n else:\n res = abs(value - expected) <= tol\n if update:\n if not res:\n print(\"checking float\",comment,'|',value,\"!=\",expected)\n results[\"fail\"] += 1\n else:\n results[\"pass\"] += 1\n return res", "def epsilon_delta(self):", "def get_epsilon(self):\n step_size = float(self._eps_begin - self._eps_end) / self._total_steps\n self._epsilon = max(self._eps_end, self._epsilon - step_size)\n return self._epsilon", "def compare_values((covered, total), tolerance):\n missing = total - covered\n return cmp(tolerance, missing)", "def MaxTolerance(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_Tool_MaxTolerance(self, *args)", "def add_tol(self):\n return self._add_tol", "def test_base_period_tolerance(delta, expected):\n result = wrap(180 - delta)\n print(result, np.isclose(result, -180))\n assert np.isclose(result, -180)[0] == expected", "def get_initial_epsilon(self):\n return self.epsilon_percentile, True, self.max_rounds == 0", "def get_possible_tw(self):\n ev = self.ev\n f = np.array([np.abs(a - b) for a in ev for b in ev if not np.isclose(a, b)])\n return f[~(np.triu(np.abs(f[:, None] - f) <= settings.EQ_COMPARE_TOL, 1)).any(0)]", "def standardError(self):\n return math.sqrt(self.standardError2())", "def MinTolerance(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_Tool_MinTolerance(self, *args)", "def worst(self) -> float:\n return float(self.tsdf.pct_change().min())", "def _check_within_tolerance(value, tolerance):\n return tf.norm(tensor=value, ord=np.inf) <= tolerance", "def approx_eq(x, y, tolerance=1e-15):\n return abs(x - y) < tolerance", "def approx_eq(x, y, tolerance = 0.000001):\n\treturn abs(x - y) < tolerance", "def within_tolerance(x, y, tolerance): \r\n return abs(x) <= tolerance and abs(y) <= tolerance", "def _fp_evaluate(sequence, iteration, tolerance):\n return np.abs(sequence[iteration] - sequence[iteration - 1]) < tolerance", "def get_epsilon(self, round, abc_history):\n if round > len(abc_history):\n t = np.percentile(abc_history[-1]['distances'], self.epsilon_percentile)\n else:\n t = np.percentile(abc_history[round - 1]['distances'], self.epsilon_percentile)\n return t, False, self.max_rounds and round + 1 == self.max_rounds", "def GetEpsilonBeer(Abs, conc, pathLength):\n return Abs / (conc * pathLength)", "def eccentricity(self):\n return sqrt(self.f * 2 - self.f ** 2)", "def max_abs_error(self) -> float:\n return np.max(np.abs([self.error]))", "def deviation_ok(norm, value, epsilon):\n deviation = abs(norm-value)/norm\n # print(abs(d-epsilon))\n return deviation <= epsilon", "def get_lift(self):\n return 0.0", "def calc_conf(deviation, tolerance, mape):\n return (1 - ((mape / 100) * (deviation/tolerance))) * 100", "def set_tolerance(self, tol):\n self.precision = tol\n return", "def LimitTolerance(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_Tool_LimitTolerance(self, *args)", "def relative_error(x, y):\n return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))", "def update_vrad_tolerance(self):\n try:\n value = float(self.edit_vrad_tolerance.text())\n except:\n value = None\n self._get_selected_model().metadata[\"velocity_tolerance\"] = value\n return None", "def almost_eq(e1,e2) :\n\treturn round(e1-e2,4) == 0.0", "def check_for_float(check):", "def test_tolerance(self):\n # With idw no sites within tolerance should result in masked output\n dset = self.dset.spec.sel(\n lons=self.lons_inexact, lats=self.lats_inexact, method=\"idw\", tolerance=1.0\n )\n assert dset.spec.hs().values.all()\n dset = self.dset.spec.sel(\n lons=self.lons_inexact, lats=self.lats_inexact, method=\"idw\", tolerance=0.01\n )\n assert np.isnan(dset.spec.hs().values).any()\n # With nearest no sites within tolerance should raise an exception\n with pytest.raises(AssertionError):\n dset = self.dset.spec.sel(\n lons=self.lons_inexact,\n lats=self.lats_inexact,\n method=\"nearest\",\n tolerance=0.01,\n )", "def calc_error_dist(self):\n pass", "def rae(self) -> float:\n return float(np.sum(self._ae()) / (np.sum(np.abs(self.true - np.mean(self.true))) + EPS))", "def check_result(self, res):\n global dtParameterDesc, dtResultDesc\n if res not in dtResultDesc or 'tolerances' not in dtResultDesc[res]:\n # no tolerance definition\n return (True, None, None)\n try:\n ok = False\n show = ''\n badpars = []\n reference = None # reference parameter value to compare result with\n if 'reference' in dtResultDesc[res]:\n refpar = dtResultDesc[res]['reference']\n reference = self.parameters[refpar]\n resvalue = self.results[res]\n\n for tolpar in dtResultDesc[res]['tolerances']:\n tolvalue = self.parameters[tolpar]\n toltype = tolpar.split(' ')[1]\n if toltype == 'abstol' and reference is not None:\n if abs(reference-resvalue) <= tolvalue:\n ok = True\n else:\n show = '\\u21D1'\n badpars.append(tolpar)\n if toltype == 'reltol' and reference is not None and reference != 0:\n if abs(resvalue/reference-1) <= tolvalue:\n ok = True\n else:\n show = '\\u21D1'\n badpars.append(tolpar)\n if toltype == 'uplim':\n if resvalue <= tolvalue:\n ok = True\n else:\n show = '\\u21D1'\n badpars.append(tolpar)\n if toltype == 'lowlim':\n if resvalue > tolvalue:\n ok = True\n else:\n show = '\\u21D3'\n badpars.append(tolpar)\n return (ok, show, badpars)\n except (KeyError, TypeError):\n # some error\n print_exc()\n return (False, None, None)", "def is_unit(self, tolerance=1e-14):\n return abs(1.0 - self._sum_of_squares()) < tolerance # if _sum_of_squares is 1, norm is 1. This saves a call to sqrt()", "def expected_result(self, other):\r\n return float(1) / (1 + math.pow(10, float(other.elo - self.elo) / DIVIDER))", "def diff_of_errors(self):\n self.e_of_e = self.azimuth_error - self.altitude_error\n return self.e_of_e", "def set_tolerance(self, value):\n\n self._tolerance = value", "def determine_threshold(yval,pval):\n\n F1 = 0\n epsilon = 0\n for _epsilon in np.linspace(min(pval),max(pval),1000):\n ## Compute stats\n _F1,stats = evaluate_epsilon(yval,pval,_epsilon)\n\n if _F1 > F1:\n F1 = _F1\n epsilon = _epsilon\n print(\"Better threshold found! {} ==> F1 {}\".format(epsilon,F1))\n \n return epsilon, F1", "def expected(A, B):\n return 1 / (1 + 10 ** ((B - A) / 150))", "def wkstest(x, y):\n\tw = np.power(y*(1.0 - y), 0.5)\n\txy = np.abs(x-y)/w\n\treturn xy.max()", "def distmeter_err(self):\n from astropy import units\n return self.distmpc_err * units.Mpc.in_units(\"m\")", "def fabs(x):\n return 0.0", "def current_epsilon(self):\n t = self.action_requests\n T = self.exploration_period\n if(t >= T):\n return self.epsilon_final\n\n epsilon0 = self.epsilon_initial\n epsilonT = self.epsilon_final\n\n return epsilon0 - (t * (epsilon0 - epsilonT)) / T", "def find_epsilon(self, ltarget):\n\n dnu = self.find_large_separation()\n one = n = nu = 0.0\n for i in range(len(self.modes)):\n if (self.modes['l'][i] != ltarget): continue\n one += 1.0\n n += self.modes['n'][i]\n nu += self.modes['freq'][i]\n if (one == 0.0):\n return 0.0\n else:\n return (nu/dnu-n)/one", "def checkAnswer(comment,value,expected,results,tol=1e-10):\n if abs(value - expected) > tol:\n print(\"checking answer\",comment,value,\"!=\",expected)\n results[\"fail\"] += 1\n else:\n results[\"pass\"] += 1", "def isclose(valuex, valuey, rel_tol=1e-08, abs_tol=0.0):\n return math.isclose(valuex, valuey, rel_tol=rel_tol, abs_tol=abs_tol)", "def _check_approx_fixed_point(V_current, V_previous, tol):\n\n # Compute the sup norm between `V_current` and `V_previous`\n sup_norm = np.max(np.abs(V_current - V_previous))\n\n # Algorithm termination condition\n fp = sup_norm <= tol\n\n return fp, sup_norm", "def find_absolute_value(x):\n return math.fabs(x)", "def equalWithinTolerance(a, b, tol):\n return abs(a - b) <= tol", "def se(actual,expected):\n return np.power(np.subtract(actual,expected),2)", "def get_sqrt_2():\n return 1.41421356", "def test_constants(self):\r\n\r\n # Of the form ('expr', python value, tolerance (or None for exact))\r\n default_variables = [\r\n ('i', 1j, None),\r\n ('j', 1j, None),\r\n ('e', 2.7183, 1e-4),\r\n ('pi', 3.1416, 1e-4),\r\n ('k', 1.3806488e-23, 1e-26), # Boltzmann constant (Joules/Kelvin)\r\n ('c', 2.998e8, 1e5), # Light Speed in (m/s)\r\n ('T', 298.15, 0.01), # Typical room temperature (Kelvin)\r\n ('q', 1.602176565e-19, 1e-22) # Fund. Charge (Coulombs)\r\n ]\r\n for (variable, value, tolerance) in default_variables:\r\n fail_msg = \"Failed on constant '{0}', not within bounds\".format(\r\n variable\r\n )\r\n result = calc.evaluator({}, {}, variable)\r\n if tolerance is None:\r\n self.assertEqual(value, result, msg=fail_msg)\r\n else:\r\n self.assertAlmostEqual(\r\n value, result,\r\n delta=tolerance, msg=fail_msg\r\n )", "def test_error_at_98tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.98))", "def tol(self, atol: Real):\n if not isinstance(atol, Real):\n raise TypeError(\"The attribute tol must be a real number.\")\n if 0 <= atol < 1:\n self._tol = atol\n else:\n raise ValueError(\"Need 0 <= tol < 1.\")", "def test_suite():\n test(absolute_value(17) == 17)\n test(absolute_value(-17) == 17)\n test(absolute_value(0) == 0)\n test(absolute_value(3.14) == 3.14)\n test(absolute_value(-3.14) == 3.14)", "def threshold(self) -> float:\n return pulumi.get(self, \"threshold\")", "def test_ge(self):\n f12: Fraction = Fraction(1, 2)\n f34: Fraction = Fraction(3, 4)\n f93: Fraction = Fraction(9, 3)\n f124: Fraction = Fraction(12, 4)\n self.assertTrue(f12 >= f34)\n self.assertTrue(f93 >= f124)\n self.assertFalse(f93 >= f12)", "def eps(self):\n return self._eps", "def epsrel(self) -> float:\n return self._epsrel", "def test_for_convergence(self, error_tol):\n list_of_best_indvs = []\n for island in self._islands:\n best_indv = island.best_individual()\n list_of_best_indvs.append(best_indv)\n list_of_best_indvs.sort(key=lambda x: x.fitness)\n\n best_indv = list_of_best_indvs[0]\n converged = best_indv.fitness <= error_tol\n\n self._best_indv = best_indv\n self._converged = converged\n return converged", "def _epsilon(self, step):\n if step < 0:\n return self._start\n elif step > self._steps:\n return self._stop\n else:\n return self._step_size * step + self._start", "def rmdspe(self) -> float:\n return float(np.sqrt(np.median(np.square(self._percentage_error()))) * 100.0)", "def rrse(self) -> float:\n return float(np.sqrt(self.rse()))", "def tolerance(*args, angular: Union[float, bool]=0.0, linear: Union[float, bool]=0.0, q=True,\n query=True, **kwargs)->Union[None, Any]:\n pass", "def get_sol_value(self):\n return float(self.data[2])", "def _get_lip_best(self) -> float:\n pass", "def isclose(value, reference, rtol=5e-6):\n if isinstance(reference, (Sequence, np.ndarray)):\n ref = np.asarray(reference, np.float64)\n val = np.asarray(reference, np.float64)\n min_value = np.min(np.abs(reference))\n atol = 1e-6 if min_value == 0 else min_value / 1e4\n return np.allclose(val, ref, rtol=rtol, atol=atol)\n else:\n atol = 1e-6 if reference == 0 else 0\n return math.isclose(value, reference, rel_tol=rtol, abs_tol=atol)", "def test_diferencia_porcentual_menor(self):\r\n valorNuevo = 10\r\n valorAnterior = 0\r\n self.assertEqual(diferenciaPorcentual(valorNuevo, valorAnterior), -999999)" ]
[ "0.8676137", "0.8571045", "0.84762627", "0.8150256", "0.8027628", "0.80256224", "0.7848701", "0.7472672", "0.73756516", "0.7373416", "0.71266186", "0.69214386", "0.6822433", "0.6734726", "0.6684235", "0.6613367", "0.6550089", "0.6541348", "0.6453469", "0.63813984", "0.63173634", "0.62930965", "0.62924457", "0.62782484", "0.62225074", "0.6067151", "0.6029485", "0.5962539", "0.593212", "0.5925741", "0.5900117", "0.58847004", "0.58745295", "0.5827776", "0.5810056", "0.58080024", "0.579896", "0.579062", "0.57566655", "0.5736266", "0.57077634", "0.57050675", "0.5704986", "0.56949884", "0.5694563", "0.5686435", "0.56790686", "0.5669792", "0.5650958", "0.56330854", "0.56305355", "0.5620135", "0.5600739", "0.5595276", "0.5594352", "0.5589121", "0.5574122", "0.557229", "0.5560743", "0.5536711", "0.5526417", "0.5519018", "0.5517965", "0.55022573", "0.54980433", "0.549443", "0.54928493", "0.5488221", "0.5485279", "0.54819274", "0.54771304", "0.5472298", "0.54695404", "0.54694146", "0.5466203", "0.54654974", "0.5456971", "0.5448191", "0.5445787", "0.54279107", "0.5425063", "0.54096645", "0.5408034", "0.5404794", "0.53956366", "0.5393709", "0.5385226", "0.5377016", "0.53746754", "0.5372343", "0.5371697", "0.5365472", "0.5364105", "0.53572166", "0.53554505", "0.5351472", "0.5351075", "0.5350935", "0.53435653", "0.53377426" ]
0.8477005
2
Function to revert the direction of the current line. Returns
def revert(self): reverted = Line(l=self) reverted.direction *= -1.0 return reverted
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fliped(self):\n return Line(self.end, self.start, self)", "def reversed(self):\n return LINE(*self.elems,**{'reverse':(not self.reverse)})", "def _inverse_lines(self):\n pass", "def revert(self, *args, **kwargs):", "def _backup_line(self):\n if self._orig_line is None:\n self._orig_line = self._line", "def flip(self, p):\n return -p", "def previous_line():\r\n set_point(point().previous_line())", "def opposite(self):\n if self.direction == 8: return Direction(8)\n n = self.direction + 4\n if n >= 8: n -= 8\n return Direction(n)", "def restore(self):\n print(\"Restoring Direction\")\n if self.turn_track > 0:\n self.encL(abs(self.turn_track))\n elif self.turn_track < 0:\n self.encR(abs(self.turn_track))", "def backward_character():\r\n set_point(point().offset(-1))", "def pre_revert(self):", "def revert(self):\n\n if len(self.stack) == 0 or not self.revertable:\n return\n\n for s in self.stack:\n s[\"model\"].setPos(s[\"model\"].getPos() + Vec3(0,0,THING_REVERT_DISTANCE))\n\n state = self.stack.pop()\n\n #not sure if this helps, but it can't hurt\n self.model.detachNode()\n\n for x in self.toRevert:\n self.toRevert[x](state[x])", "def flip(self, bev_direction: str = 'horizontal') -> None:\n pass", "def __invert__(self):\n return self.reverse()", "def revert(self, a):\n raise NotImplementedError", "def flip(self):", "def back(cargo):\n # Go backwards\n line_follower.turn()\n\n # return\n new_state = \"follow\"\n txt = \"follow line..\"\n\n return (new_state, txt)", "def turn_left(self):\n temp = self.direction[0]\n self.direction[0] = self.direction[1]\n self.direction[1] = -temp", "def Reverse(self):\n if (self.translated == False):\n self.alignment = self.alignment[:,::-1]\n self.Show(self.displayedColumn)\n self.BackupAlignment()\n else:\n self.AlertMessage(\"Can't reverse protein sequences.\", 'medium')", "def down(self, wrap = None):\n len_current = self.line_length()\n \n # If there is line wrapping\n if wrap:\n wraps_current = int(len_current / wrap)\n columns_current = len_current % wrap\n \n # If the position is not in the bottom wrap of the line move it down a\n # wrap. Take into account shorter wraps below.\n if len_current > wrap and self.pos < wraps_current * wrap:\n pos_wrap = int(self.pos / wrap)\n if pos_wrap + 1 == wraps_current and self.pos % wrap > columns_current:\n self.pos = (wraps_current * wrap) + columns_current\n else:\n self.pos = self.pos + wrap\n \n # If the position is in the bottom wrap move it to the first wrap of\n # the next line. Take into acount shorter lines below.\n elif self.line < self.buffer.size() - 1:\n len_next = self.line_length(1)\n self.line += 1\n if self.pos % wrap > len_next:\n self.pos = len_next\n else:\n self.pos = self.pos % wrap\n \n # If no wrapping is being done move the line down one and adjust the\n # position if the next line is shorter.\n elif self.line < self.buffer.size() - 1:\n len_next = self.line_length(1)\n self.line += 1\n if self.pos > len_next:\n self.pos = len_next", "def delete_backward():\r\n point().delete_left_char()\r\n set_point(point().offset(-1))", "def flip(self):\n self._start, self._end = self._end, self._start", "def revert(self, ref=None):\n # TODO\n raise NotImplementedError", "def __invert__(self):\n return self.strip(axis = 1)", "def backToSource(self, point):\n if self.revertTransformation is not None:\n return self.revertTransformation(point)\n return point", "def backward(self, y):\n pass", "def undo():", "def back(self):\n self.position -= 1", "def revise():", "def getDirectionChange(pre, now, next):\r\n return RIGHT", "def revert(self, rec=0):\r\n if rec:\r\n result = self._svn('revert -R')\r\n else:\r\n result = self._svn('revert')\r\n return result", "def reverse(self):\n global motor_direction\n with self._lock:\n GPIO.output(7, False)\n GPIO.output(11, True)\n GPIO.output(13, False)\n GPIO.output(15, True)\n # time.sleep(sec)\n motor_direction = 'Reverse'\n return motor_direction", "def revert(self):\n original = getattr(self, \"_original\", None)\n if not original:\n return\n\n if hasattr(self, \"output\"):\n output = self.output\n keep_output = True\n else:\n keep_output = False\n\n del self._original\n\n self.__dict__ = original.__dict__\n\n if keep_output:\n self.output = output", "def backwards(self):\n pass", "def delete_forward():\r\n point().delete_right_char()", "def backward(self, param):\n\t\tif param:\n\t\t\tself.linear_move(-1 * param * .3048)\n\t\telse:\n\t\t\tself.linear_move(-1 * riu.default_dist * .3048)", "def up(self, wrap = None):\n len_current = self.line_length()\n \n # If there is line wrapping\n if wrap:\n \n # If the position is in the top wrap of the line move it into the\n # last wrap of the line above it. Take into account shorter lines\n if self.pos < wrap and self.line > 0:\n len_next = self.line_length(-1)\n wraps_next = int(len_next / wrap)\n columns_next = len_next % wrap\n self.line -= 1\n if self.pos > columns_next:\n self.pos = (wraps_next * wrap) + columns_next\n else:\n self.pos = (wraps_next * wrap) + self.pos\n \n # If the position is in the wraps of the current line\n elif self.pos >= wrap:\n self.pos = self.pos - wrap\n \n # If there is no line wrapping move to the same position or lower in\n # the next line up.\n elif self.line > 0:\n len_next = self.line_length(-1)\n self.line -= 1\n if self.pos > len_next:\n self.pos = len_next", "def turn_right(self):\n temp = self.direction[0]\n self.direction[0] = -self.direction[1]\n self.direction[1] = temp", "def UndoChanges(self):\n if (len(self.alignmentHistory) > 1):\n self.alignmentHistory.pop()\n self.alignment = self.alignmentHistory[-1][:,:]\n self.Show(self.displayedColumn)\n else:\n self.AlertMessage('Nothing to undo.', 'low')", "def pop_current_line(self):\n self.current_line.pop()", "def lr_flip(self):\n for g in self.grid:\n g.reverse()", "def get_direction_backwards(self, direction):\r\n return direction_backwards[direction]", "def invert(self):\n self.vertices.reverse()", "def undo(self):\n self.setIndex(self._index-1)", "def down(self):\n self.move(0,-1)", "def _backwards(self, letter):\n\t\tl = letter\n\t\tfor i in range(self.n_rotors):\n\t\t\tl = self._rotor_left2right(self.rotors[i], l, self.offsets[i],\n\t\t\t\t\t\t\t\t\tself.rings[i])\n\t\treturn l", "def backward(self, amount):\n newX = self._x - round(amount * math.sin(math.radians(self._rotation)), 2)\n newY = self._y + round(amount * math.cos(math.radians(self._rotation)), 2)\n self.goto(newX, newY)", "def backward(self):\n raise NotImplemented", "def backward(self):\n raise NotImplemented", "def backward(self):\n raise NotImplemented", "def back(self, step):\r\n self.forward(-step)", "def turn_right(self):\n pass", "def reverse(self): # real signature unknown; restored from __doc__\n pass", "def reverse(self):\n x = self._x * -1\n y = self._y * -1\n return Point(x,y)", "def reverseCurves(self):\n self.data.reverse()\n return True", "def undo(self) :\n \n raise NotImplementedError()", "def undo(self):\n if self.history:\n xy0, xy1, data_size = self.history.pop()\n x0, y0 = xy0\n x1, y1 = xy1\n self._used[y1][x1] -= data_size\n self._used[y0][x0] = data_size\n if self.goal == xy1:\n self.goal = xy0", "def undo(self):\n if self._history_position > 0:\n self._history_position -= 1\n self._commands[\n self._history[self._history_position][1]\n ].execute(self._history[self._history_position][2])\n else:\n print(\"nothing to undo\")", "def wrap_cursor_back(event):\n b = event.cli.current_buffer\n b.cursor_up(count=1)\n relative_end_index = b.document.get_end_of_line_position()\n b.cursor_right(count=relative_end_index)", "def invert(self):\n self._c = ~self._c", "def Revert():\n raise Exception(0xF1F1F2F2F3F3F4F4)", "def undoChanges(self):\n Objects.undoChanges(self)\n self.draw()", "def backward(self):\n raise NotImplementedError", "def flip(self, row: int, col: int) -> None:\n self.state[row, col] = not self.state[row, col]", "def move_down(self) -> None:\n try:\n next_newline_index: int = self.buffer.index('\\n', start=self.index)\n except ValueError:\n return\n\n if next_newline_index == self.buffer.end:\n return\n\n down_index: int\n column: int = self.buffer.get_column(self.index)\n down_index = next_newline_index + 1 + column\n\n if down_index > self.buffer.end:\n down_index = self.buffer.end\n else:\n start: int = next_newline_index + 1\n end: int = down_index\n try:\n next_next_newline_index: int = self.buffer.index('\\n', start=start, end=end)\n down_index = next_next_newline_index\n except ValueError:\n pass\n\n self.index = down_index", "def reverse(self):\n return self[::-1]", "def undo_last_move(self):\n if self.last_move is None:\n return\n x, y, i, j = self.last_move\n self.boards[x][y].undo_last_move()\n if len(self.history) > 1:\n self.last_move = self.history[-2]\n else:\n self.last_move = None\n self.__on_turn = Square.X if self.__on_turn == Square.O else Square.O\n del self.history[-1]", "def reverse(self):\n self.left_motor.reverse()\n self.right_motor.reverse()", "def post_revert(self):", "def down(self):\n if self.head.heading() != UP and self.last_direction != UP:\n self.head.setheading(DOWN)", "def undo(self):\n for command in reversed(self.commands):\n command.undo()", "def move_up(self) -> None:\n try:\n line_start: int = self.buffer.reverse_index('\\n', end=self.index) + 1\n except ValueError:\n return\n\n previous_line_start: int\n try:\n previous_line_start = self.buffer.reverse_index('\\n', end=line_start - 1) + 1\n except ValueError:\n previous_line_start = 0\n\n previous_line_length = line_start - previous_line_start\n column: int = self.index - line_start\n if previous_line_length <= column:\n previous_line_end = line_start - 1\n self.index = previous_line_end\n else:\n self.index = previous_line_start + column", "def revert(self, a):\n if self.is_one(a):\n return a\n else:\n raise NotReversible('only unity is reversible in a ring')", "def opposite(direction):\n return (direction+2)%4", "def undo_act(self):\n\n return self.history[self.position]", "def flip(self, x, y, /, *args, **kwargs):\n return self._func(y, x, *args, **kwargs)", "def __reversed__(self):\n return reverse(self)", "def BackTab(self):\n sel = self.GetSelection()\n if sel[0] == sel[1]:\n # There is no selection\n cpos = self.GetCurrentPos()\n cline = self.GetCurrentLine()\n cipos = self.GetLineIndentPosition(cline)\n if cpos <= cipos:\n # In indentation so simply backtab\n super(EditraBaseStc, self).BackTab()\n else:\n # In middle of line somewhere\n text = self.GetLine(cline)\n column = max(0, self.GetColumn(cpos) - 1)\n if len(text) > column and text[column].isspace():\n\n # Find the end of the whitespace\n end = column\n while end < len(text) and \\\n text[end].isspace() and \\\n text[end] not in '\\r\\n':\n end += 1\n\n # Find the start of the whitespace\n end -= 1\n start = end\n while end > 0 and text[start].isspace():\n start -= 1\n\n diff = end - start\n if diff > 1:\n # There is space to compress\n isize = self.GetIndent()\n if isize < diff:\n # More space than indent to remove\n repeat = isize\n else:\n # Less than one indent width to remove\n repeat = end - (start + 1)\n\n # Update the control\n self.BeginUndoAction()\n self.SetCurrentPos(cpos + (end - column))\n for x in range(repeat):\n self.DeleteBack()\n self.EndUndoAction()\n\n else:\n # There is a selection\n super(EditraBaseStc, self).BackTab()", "def flip(self) -> int:\n self.flags = ~(self.flags)\n return self.flags", "def __editRevert(self):\n self.activeWindow().revertToUnmodified()", "def reset_directions(directions, dir='from'):\n if dir == 'from':\n # reverse the direction, so it plots \"to\"\n print(\"reversing direction\")\n directions = (directions + 90) * -1\n elif dir == 'to':\n # don't reverse, so it plots \"to\"\n directions = (directions - 90) * -1\n else:\n raise ValueError('dir has to be either \"from\" or \"to\"')\n return directions", "def reverse_move(self):\n # assign previous values to game variables and remove one entry from game history\n (self.turn_number, active_player, self.player1, self.player2) = self.history.pop()\n\n # assign the right current player\n if active_player == 'player 1':\n self.current_player = self.player1\n elif active_player == 'player 2':\n self.current_player = self.player2\n\n else: # there was an error with deciding the current player\n assert False, \" reversing moves player assignment broke \"", "def backward(self, top, propagate_down, bottom):\r\n pass", "def turn_left(self):\n\t\tself.direction = (self.direction - 1)%4", "def go_left(self):\n self.change_x = -6\n self.direction = \"L\"", "def go_left(self):\n self.change_x = -6", "def go_left(self):\n self.change_x = -6", "def go_right(self):\n self.change_x = 6\n self.direction = \"R\"", "def backward(self):\n #print('backward\\r')\n self.linearVector = Vector3(x=-1.0, y=0.0, z=0.0)\n self.angularVector = Vector3(x=0.0, y=0.0, z=0.0)", "def subtract(self,l):\r\n\t\t\r\n\t\t# convert to line\r\n\t\tl = Li(l)\r\n\t\t\r\n\t\t# scale by -1 and add\r\n\t\tl = l.scale(-1)\r\n\t\ts = self.add(l)\r\n\t\t\r\n\t\treturn s", "def unindent(self):\n self.x_pos -= 10", "def moveBackward(self):\n if self.onGround:\n self.vx = -4", "def untuck(self):\n self.move_to_neutral()", "def move_backward():\n pass", "def asgop_revert(*args):\n return _ida_hexrays.asgop_revert(*args)", "def move_down(self):\n\t\treturn self._move(up=False)", "def __invert__(self):\n return self.inverse()", "def move_down(self):\n self.y -= 1", "def inverse(self) -> Alignment:\n return self._create(self._modified, self._original)", "def unindent(self):\n\n self.beginEditBlock()\n\n for cursor in self.cursors:\n sel_start, sel_end, _ = self.get_sel_start_end_reverse(cursor)\n\n # retrieve start/end blocks to get the iteration range\n cursor.setPosition(sel_end, cursor.MoveAnchor)\n end_block = cursor.blockNumber()\n # also go to the firstiteration line\n cursor.setPosition(sel_start, cursor.MoveAnchor)\n start_block = cursor.blockNumber()\n\n # go to the start of line (as cursor.NextBlock does) to be sure that\n # cursor.deleteChar() operates on the starting characters of the line\n cursor.movePosition(cursor.StartOfLine, cursor.MoveAnchor)\n\n for _ in range(end_block -start_block +1):\n line = cursor.block().text()\n\n # go to the next line if line is empty\n if not line:\n cursor.movePosition(cursor.NextBlock, cursor.MoveAnchor)\n continue\n\n if line[0] == '\\t':\n cursor.deleteChar()\n cursor.movePosition(cursor.NextBlock, cursor.MoveAnchor)\n continue\n\n if len(line) < 3:\n cursor.movePosition(cursor.NextBlock, cursor.MoveAnchor)\n continue\n\n # perform line un-indent\n if line[:4] == ' ':\n for i in range(4):\n cursor.deleteChar()\n\n # go to the next line\n cursor.movePosition(cursor.NextBlock, cursor.MoveAnchor)\n\n self.endEditBlock()" ]
[ "0.68525314", "0.6473832", "0.64653385", "0.64345104", "0.64087176", "0.6327431", "0.6293631", "0.6287279", "0.62672716", "0.6193143", "0.6137352", "0.61128193", "0.6102303", "0.60842335", "0.60765207", "0.6057564", "0.60540533", "0.60380507", "0.6030797", "0.60029155", "0.5998949", "0.597672", "0.5972079", "0.59696555", "0.59154564", "0.59099084", "0.59048104", "0.59046954", "0.5894029", "0.5850022", "0.58327997", "0.5830343", "0.5815336", "0.57979786", "0.578912", "0.57731426", "0.57712007", "0.5770808", "0.576819", "0.57619673", "0.5758032", "0.57306373", "0.57206744", "0.57102", "0.5676627", "0.5670084", "0.56680626", "0.56648177", "0.56648177", "0.56648177", "0.5663269", "0.56342065", "0.56067413", "0.55986285", "0.55929124", "0.5588933", "0.5580384", "0.5563997", "0.55625296", "0.5561182", "0.5553425", "0.5553145", "0.5552739", "0.55495375", "0.5542509", "0.55420494", "0.5541326", "0.55409", "0.55339324", "0.55249107", "0.5507997", "0.5505396", "0.5504984", "0.54997796", "0.54835117", "0.54832435", "0.5482706", "0.5482026", "0.5475513", "0.54744583", "0.5473823", "0.547294", "0.54708296", "0.54668814", "0.546381", "0.5461678", "0.5461678", "0.54613674", "0.54609144", "0.5459163", "0.5455473", "0.54515356", "0.5438002", "0.5422012", "0.5421667", "0.5419995", "0.5410059", "0.5406289", "0.5405749", "0.5402703" ]
0.85526377
0
Function to get the direction of the line. Returns
def get_direction(self): return self.direction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_direction(self):\n return self.actual_coordinates[2]", "def get_direction(self):\r\n return self.__direction", "def direction(self):\n return atan2d(self.y, self.x)", "def getDirection(self):\n return self.ray.direction", "def direction(self):\r\n return 180 - atan2(self.x, self.y)*180/pi", "def direction(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"direction\")", "def direction(self) -> str:\n return pulumi.get(self, \"direction\")", "def get_direction(x0, y0, x1, y1):\n\n # same point special case\n if x0 == x1 and y0 == y1:\n return on\n\n # vertical special cases\n if x0 == x1:\n if y1 > y0:\n return back\n else:\n return front\n\n slope = float(y1 - y0)/(x1 - x0)\n if x1 > x0:\n if slope < -2:\n return front\n elif slope < -.5:\n return front_right\n elif slope < .5:\n return right\n elif slope < 2:\n return back_right\n else:\n return back\n else:\n if slope < -2:\n return back\n elif slope < -.5:\n return back_left\n elif slope < .5:\n return left\n elif slope < 2:\n return front_left\n else:\n return front", "def direction(self) -> int:\n return self._direction", "def direction(self) -> np.ndarray:\n return self._direction", "def get_direction(self):\n\n return -1 if self.curr_player == self.PLAYER1 else 1", "def get_walking_line(self):\n\t\treturn self._bottom_rect.move(0,1)", "def get_origin_direction(self):\n return self.origin_coordinates[2]", "def direction(self):\n return self._direction.copy()", "def direction(self):\n return self.cfg.direction", "def direction(self):\n _direction = self._custom.get(\"direction\")\n if _direction is not None:\n return _direction\n\n _direction = self._infer_direction()\n self._custom[\"direction\"] = _direction\n\n return _direction", "def getDirection(self):\n return self.listener.direction", "def direction(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"direction\")", "def direction(self):\n return None if not bool(self.relation) else (self.s_end <= self.o_start)", "def find_direction_vector(line):\n pt1, pt2 = line\n pt1 = np.array(pt1).reshape(2,)\n pt2 = np.array(pt2).reshape(2,)\n direct = pt2 - pt1\n direct_norm = normalize(direct)\n return direct_norm", "def current_direction(self):\n return self._attributes.get(\"current_direction\")", "def direction(self):\n return self._dir", "def get_direction(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\tif diff_y != 0 and diff_x != 0:\r\n\t\treturn 'diagonal'\r\n\r\n\treturn 'vertical' if diff_y != 0 else 'horizontal'", "def direction(self):\n if self._is_hit:\n return Direction.NOT_MOVING\n return self._dir", "def _getDirection(coord1, coord2):\n x1, y1 = coord1\n x2, y2 = coord2\n\n if x1 == x2 and y1 == y2:\n return None # two coordinates are the same.\n elif x1 == x2 and y1 > y2:\n return UP\n elif x1 == x2 and y1 < y2:\n return DOWN\n elif x1 > x2 and y1 == y2:\n return LEFT\n elif x1 < x2 and y1 == y2:\n return RIGHT\n\n slope = float(y2 - y1) / float(x2 - x1)\n\n # Figure out which quadrant the line is going in, and then\n # determine the closest direction by calculating the slope\n if x2 > x1 and y2 < y1: # up right quadrant\n if slope > -0.4142:\n return RIGHT # slope is between 0 and 22.5 degrees\n elif slope < -2.4142:\n return UP # slope is between 67.5 and 90 degrees\n else:\n return UPRIGHT # slope is between 22.5 and 67.5 degrees\n elif x2 > x1 and y2 > y1: # down right quadrant\n if slope > 2.4142:\n return DOWN\n elif slope < 0.4142:\n return RIGHT\n else:\n return DOWNRIGHT\n elif x2 < x1 and y2 < y1: # up left quadrant\n if slope < 0.4142:\n return LEFT\n elif slope > 2.4142:\n return UP\n else:\n return UPLEFT\n elif x2 < x1 and y2 > y1: # down left quadrant\n if slope < -2.4142:\n return DOWN\n elif slope > -0.4142:\n return LEFT\n else:\n return DOWNLEFT", "def read_direction(self):\n global motor_direction\n with self._lock:\n return motor_direction", "def direction(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"direction\")", "def get_direction(self) -> int: \r\n if time.time() > self.stop_timer:\r\n return Directions.stop\r\n else:\r\n return self.direction", "def _calc_relative_move_direction(self, char, direction):\n if char in (\"Left\", \"Right\"):\n di = -1 if self.video.hflip else 1\n else:\n di = -1 if self.video.vflip else 1\n return direction * di", "def direction(self):\n norm=math.sqrt(self.x**2 + self.y**2 + self.z**2)\n return Vector3(self.x/norm, self.y/norm, self.z/norm)", "def get_dir_from_path(self):\n try:\n next_step = self.return_path[0]\n if next_step[0] > self.tile[0]:\n return 'd' # move up next\n if next_step[0] < self.tile[0]:\n return 'u' # move down next\n if next_step[1] > self.tile[1]:\n return 'r' # move right next\n if next_step[1] < self.tile[1]:\n return 'l' # move left next\n except IndexError as ie:\n print('Error while trying to get new path direction', ie)\n return None", "def getOffsetLine(self, distance, side=c.INSIDE):\n StartA = np.array([self.start.x, self.start.y])\n EndA = np.array([self.end.x, self.end.y])\n r = StartA - EndA #The slope vector of self\n rn = np.array([-r[c.Y], r[c.X]]) #flip x and y and inverse y to get the normal vector of the slope\n rn = rn/np.linalg.norm(rn)*distance #normalize by dividing by its magnitude and multipy by distance to get the correct length\n \n if side == c.INSIDE:\n return self.translate(-rn[c.X], -rn[c.Y]) #the \"minus\" side line is the left side which is inside.\n \n return self.translate(rn[c.X], rn[c.Y]) #the \"Plus\" side of the line is the right side which is outside.", "def direction(self) -> Optional[str]:\n return self._direction", "def get_direction(curr_pos, next_pos):\n if curr_pos == next_pos:\n return 'CLEAN'\n\n v_dist = next_pos[0] - curr_pos[0]\n h_dist = next_pos[1] - curr_pos[1]\n\n if h_dist != 0:\n if h_dist < 0:\n return 'LEFT'\n else:\n return 'RIGHT'\n else:\n if v_dist < 0:\n return 'UP'\n else:\n return 'DOWN'", "def get_direction(event):\n return event['result']['parameters']['direction']", "def determine_direction(self):\n # TODO: Implement when the format from the sensob is ready\n content = self.sensobs[0].content\n size = len(content)\n redCount = [0,0]\n for i in range(size):\n if i<=size/2:\n if content[i] == \"Red\":\n redCount[0]+=1\n if i>size/2:\n if content[i] == \"Red\":\n redCount[1]+=1\n if redCount[0]> redCount[1]:\n return self.LEFT\n elif redCount[0] < redCount[1]:\n return self.RIGHT\n else:\n #Same amount of red on both sides\n self.match_degree = 0.1\n return self.LEFT", "def get_line(self):\n return helpfunc.hex_line_in_direction(self.start, self.direction, self.length)", "def direction(self,four_dir=False):\n a = self.angle()\n if a is None:\n return None\n\n if four_dir:\n if a >= -45 and a <= 45:\n return \"UP\"\n elif a >= 45 and a <= 135:\n return \"RIGHT\"\n elif a >= 135 or a <= -135:\n return \"DOWN\"\n elif a >= -135 and a <= -45:\n return \"LEFT\"\n else:\n raise RuntimeError(\"Couldn't figure out %f\" % a)\n else:\n if a >= -22.5 and a <= 22.5:\n return \"UP\"\n elif a >= 22.5 and a <= 67.5:\n return \"UP-RIGHT\"\n elif a >= 67.5 and a <= 112.5:\n return \"RIGHT\"\n elif a >= 112.5 and a <= 157.5:\n return \"DOWN-RIGHT\"\n elif a >= 157.5 or a <= -157.5:\n return \"DOWN\"\n elif a >= -157.5 and a <= -112.5:\n return \"DOWN-LEFT\"\n elif a >= -112.5 and a <= -67.5:\n return \"LEFT\"\n elif a >= -67.5 and a <= -22.5:\n return \"UP-LEFT\"\n else:\n raise RuntimeError(\"Couldn't figure out %f\" % a)", "def get_normalized_direction(self, direction):\n return round(self.normal_joystick_slope * direction + self.normal_joystick_intercept, 2)", "def translate_direction(self):\n xpart = math.sin(self.direction)\n ypart = math.cos(self.direction)\n if ypart > 0:\n print(\"oben \", end='')\n else:\n print(\"unten \", end='')\n if xpart > 0:\n print(\"rechts\")\n else:\n print(\"links\")", "def direction(self):\n g = self._grad_f(self._x, *self._args)\n self._calls[1] += 1\n if self._prev_dx is None:\n dx = -g\n else:\n b = max(0, np.dot(g, g - self._prev_g) / np.sum(self._prev_g ** 2))\n dx = -g + b * self._prev_dx\n if np.dot(dx, g) > 0:\n dx = -g\n self._prev_g = g\n self._prev_dx = dx\n return np.nan_to_num(dx)", "def get_mount_direction(self):\r\n return self._studio.get_mount_direction()", "def getDirectionChange(pre, now, next):\r\n return RIGHT", "def get_direction(self, name):\n index = Domino.direction_names.find(name)\n return Domino.directions[index]", "def GetHandleDirection(self):\n ...", "def current_direction(self):\n return self.wink.current_fan_direction()", "def current_direction(self) -> str:\n if self._device.fan_dir == SENSEME_DIRECTION_FORWARD:\n return DIRECTION_FORWARD\n return DIRECTION_REVERSE", "def getDirection(self, direction: str):\n return direction", "def increasing(self):\n return self.direction()", "def getOrdinate(self):\n return self.point.y - self.slope * self.point.x", "def direction_angle(self):\n return math.atan2(self.velocity, self.velocity)", "async def direction(self, value) -> str:\n if value is None:\n return \"N\"\n\n direction_array = [\n \"N\",\n \"NNE\",\n \"NE\",\n \"ENE\",\n \"E\",\n \"ESE\",\n \"SE\",\n \"SSE\",\n \"S\",\n \"SSW\",\n \"SW\",\n \"WSW\",\n \"W\",\n \"WNW\",\n \"NW\",\n \"NNW\",\n \"N\",\n ]\n direction_str = direction_array[int((value + 11.25) / 22.5)]\n return self._translations[\"wind_dir\"][direction_str]", "def get_arm_direction(self):\n return 1", "def direction(self, segment_index, t):\n\n return self.segments[segment_index].direction(t)", "def compute_direction(self, current_observed_human_point):\n\n if self._history.size() < self._max_belief_history:\n start_idx_history_window = 1\n else:\n start_idx_history_window = self._history.size() - self._max_belief_history\n\n return current_observed_human_point - self._history.observations[start_idx_history_window - 1]", "def directions(self):\n return self.piece_behavior.directions", "def getDirection(self):\n if 'N' in str(self.trip_update.trip.trip_id):\n direction = 'northbound'\n if 'S' in str(self.trip_update.trip.trip_id):\n direction = 'southbound'\n return direction", "def get_ending_direction_vector(self):\n\n total_length = len(self.pixel_list)\n\n if total_length < 2:\n return None\n elif total_length < 15:\n delta_x = self.pixel_list[-1].x - self.pixel_list[0].x\n delta_y = self.pixel_list[-1].y - self.pixel_list[0].y\n return delta_y, delta_x\n else:\n delta_x = self.pixel_list[-15].x - self.pixel_list[-1].x\n delta_y = self.pixel_list[-15].y - self.pixel_list[-1].y\n return delta_y, delta_x", "def direction(self):\n return(copysign(1, self.volume))", "def get_side(self):\n start_y_max = max(self.start.y1, self.start.y2)\n start_y_min = min(self.start.y1, self.start.y2)\n to_y_max = max(self.to.y1, self.to.y2)\n to_y_min = min(self.to.y1, self.to.y2)\n if start_y_max < to_y_min:\n return \"Down\"\n elif start_y_min > to_y_max:\n return \"Up\"\n else:\n return \"Left\" if self.start.cx > self.to.cx else \"Right\"", "def getRobotDirection(self):\n return self.direction\n #raise NotImplementedError", "def getRobotDirection(self):\n return self.direction\n #raise NotImplementedError", "def getRobotDirection(self):\n return self.direction", "def getRobotDirection(self):\n return self.direction", "def indexToWin(self, direction, line): \n size = self.size\n if len(line[1:]) != size - 1:\n return None\n\n ## Experiment in avoiding conditional if then statements\n i = 0\n if direction in ['Vertical' , 'Horizontal']:\n # A vertical line is defined by the x coordinate of its points\n # A horizontal line is defined byt the y coordinate of its points\n i = {'Vertical': line[1]%size, 'Horizontal' : line[1]//size }[direction]\n\n return {'D-neg' : [k for k in range(size -1, size**2, size-1)[:-1] if k not in line[1:]][0],\n \n 'D-pos' : [k for k in range(0, size**2, size+1) if k not in line[1:]][0],\n \n 'Vertical' : [k for k in range(i, i + size**2, size) if k not in line[1:]][0],\n \n 'Horizontal': [k for k in range(i*size, i*size +size) if k not in line[:1]][0] } [direction]\n\n #Explanation of return statement above:\n #For each line on the grid, the index of its points belong to an arithmetic progression.\n #For example, the first horizontal line's indices are; 0,1,2..size-1 \n #Ex 6x6:\n # 0 1 2 3 4 5\n # 6 7\n # 12 14\n # 18 21\n # 24 28\n # 30 35\n # So for horizontals step size is 1, shift by n to get all others\n # For verticals step size is n, shift by i to get all others\n # For positive diagonal step size is n+1\n # for negative diagonal step size is n-1 ", "def getDirection (self, time):\n return self._response.getDirection(time)", "def get_starting_direction_vector(self):\n\n total_length = len(self.pixel_list)\n\n if total_length < 2:\n return None\n elif total_length < 15:\n delta_x = self.pixel_list[-1].x - self.pixel_list[0].x\n delta_y = self.pixel_list[-1].y - self.pixel_list[0].y\n return delta_y, delta_x\n else:\n delta_x = self.pixel_list[-15].x - self.pixel_list[0].x\n delta_y = self.pixel_list[-15].y - self.pixel_list[0].y\n return delta_y, delta_x", "def get_direction(self):\n is_direction_correct = False\n while not is_direction_correct:\n direction = random.randint(0, 2)\n if direction == 0:\n self.turtle.left(90)\n elif direction == 1:\n self.turtle.right(90)\n else:\n self.turtle.right(0)\n is_direction_correct = self.check_boundary()", "def direction(self):\n len = self.length()\n if len == 0.0:\n uvec = pos.Pos(np.transpose(np.array([0, 0, 0])))\n else:\n uvec = pos.Pos(np.transpose(np.array([(self.end.x - self.start.x) / len,\n (self.end.y - self.start.y) / len,\n (self.end.z - self.start.z) / len])))\n return uvec", "def compute_direction(self, feats):\n if feats.name == \"ARNC\":\n if feats[\"z-score\"] < -1.5:\n return Directions.long_dir\n elif feats[\"z-score\"] > 1.5:\n return Directions.short_dir\n elif feats.name == \"UNG\":\n if feats[\"z-score\"] < -1.5:\n return Directions.short_dir\n elif feats[\"z-score\"] > 1.5:\n return Directions.long_dir", "def Get_direction(n):\n if abs(n) == 0:\n return 0\n else:\n return n / abs(n)", "def traffic_direction(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"traffic_direction\")", "def get_direction_matrix(self) -> int:", "def nextPositionOffset(self):\n if self.dir == \"N\":\n return (0, -1)\n elif self.dir == \"S\":\n return (0, 1)\n elif self.dir == \"E\":\n return (1, 0)\n elif self.dir == \"W\":\n return (-1, 0)\n else:\n raise TypeError(\"invalid direction '%s'\" % self.dir)", "def wind_direction(self):\n return self.flow_field.wind_direction", "def get_direction(self):\n directions = dict(ACTIVITY_DIRECTION_CHOICES)\n return directions.get(self.direction, \"N/A\")", "def GetLayoutDirection(*args, **kwargs):\n return _gdi_.DC_GetLayoutDirection(*args, **kwargs)", "def get_direction(self, start_direction, **kwargs):\n return self.directions.get(start_direction)", "def get_direction(strategy_context):\n direction_param = strategy_context['strategy']['opt_params'][0]\n\n if 'direction' in strategy_context['strategy']:\n warnings.warn(\"'direction' parameter in strategy_context['strategy']['direction'] is obsolete, \"\n \"please remove it to suppress this warning\")\n\n if direction_param.name.lower() != 'direction':\n raise ValueError('First OptParam of strategy must be Direction')\n\n for dir_value in direction_param.array:\n if dir_value != -1 and dir_value != 1:\n raise ValueError(\"Direction OptParam value must be -1 or 1\")\n\n if len(direction_param.array) == 1:\n if direction_param.array[0] == 1:\n return 1, 'Long'\n elif direction_param.array[0] == -1:\n return -1, 'Short'\n\n elif len(direction_param.array) == 2:\n return 0, 'Bidir'\n else:\n raise ValueError(\"Direction OptParam must contain 1 or 2 elements\")", "def get_direction(self, c1, c2):\n \n if c2[0] == c1[0]+1: return NORTH\n elif c2[1] == c1[1]+1: return EAST\n elif c2[0] == c1[0]-1: return SOUTH\n elif c2[1] == c1[1]-1: return WEST\n\n raise ValueError", "def get_direction(self, start_direction):\n # get all visually connected links\n if not self.directions:\n directions = {}\n unhandled_links = list(self.get_linked_neighbors().keys())\n\n # get all straight lines (n-s, sw-ne etc) we can trace through\n # the dynamic link and remove them from the unhandled_links list\n unhandled_links_copy = unhandled_links.copy()\n for direction in unhandled_links_copy:\n if REVERSE_DIRECTIONS[direction] in unhandled_links_copy:\n directions[direction] = REVERSE_DIRECTIONS[\n unhandled_links.pop(unhandled_links.index(direction))\n ]\n\n # check if we have any non-cross-through paths left to handle\n n_unhandled = len(unhandled_links)\n if n_unhandled:\n # still remaining unhandled links. If there's not exactly\n # one 'incoming' and one 'outgoing' we can't figure out\n # where to go in a non-ambiguous way.\n if n_unhandled != 2:\n links = \", \".join(unhandled_links)\n raise MapParserError(\n f\"cannot determine how to connect in/out directions {links}.\", self\n )\n\n directions[unhandled_links[0]] = unhandled_links[1]\n directions[unhandled_links[1]] = unhandled_links[0]\n\n self.directions = directions\n\n return self.directions.get(start_direction)", "def calculate_head_direction_from_leds(positions, return_as_deg=False):\n X_led1, Y_led1, X_led2, Y_led2 = positions[:, 0], positions[:, 1], positions[:, 2], positions[:, 3]\n # Calculate head direction\n head_direction = np.arctan2(X_led1 - X_led2, Y_led1 - Y_led2)\n # Put in right perspective in relation to the environment\n offset = +np.pi/2\n head_direction = (head_direction + offset + np.pi) % (2*np.pi) - np.pi\n head_direction *= -1\n if return_as_deg:\n head_direction = head_direction * (180 / np.pi)\n\n return head_direction", "def get_line_distance(self, p):\n\n y = 1000 * p.y\n R = 1000 * self.geometry.R\n x = copysign(sqrt(y ** 2 + (R - sqrt(R ** 2 - y ** 2))), y)\n x = 2 * R * asin(x / (2 * R))\n #x=y\n b = -x / sqrt(R ** 2 - x ** 2)\n theta = atan(b) # grating tangent angle\n print b, theta\n d = 0\n for n, a in enumerate(self.an):\n d += a * x ** n\n d *= cos(theta)\n return 1e-3 / d", "def lendiag(self):\n if self.y <= self.x:\n return self.y\n else:\n return self.x", "def _get_vrf_label_direction(self):\n return self.__vrf_label_direction", "def directions(self):\n return self._directions", "def directions(self):\n return self._directions", "def directions(self):\n return self._directions", "def directions(self):\n return self._directions", "def directions(self):\n return self._directions", "def directions(self):\n return self._directions", "def directions(self):\n return self._directions", "def _get_direction(self, action, direction):\n left = [2,3,1,0]\n right = [3,2,0,1]\n if direction == 0:\n new_direction = action\n elif direction == -1:\n new_direction = left[action]\n elif direction == 1:\n new_direction = right[action]\n else:\n raise Exception(\"getDir received an unspecified case\")\n return new_direction", "def bullet_direction(self) -> Direction:\n # Randomly get a direction\n if self.get_random_direction():\n direction = Direction.UP\n\n else:\n direction = Direction.DOWN\n\n return direction", "def get_chase_direction(self, options):\n pick_direction = None\n target_pos = (self.target.rect.centerx, self.target.rect.centery)\n test = (abs(target_pos[0]), abs(target_pos[1]))\n prefer = test.index(max(test[0], test[1]))\n if prefer == 0: # x direction\n if target_pos[prefer] < self.rect.centerx: # to the left\n pick_direction = 'l'\n elif target_pos[prefer] > self.rect.centerx: # to the right\n pick_direction = 'r'\n else: # y direction\n if target_pos[prefer] < self.rect.centery: # upward\n pick_direction = 'u'\n elif target_pos[prefer] > self.rect.centery: # downward\n pick_direction = 'd'\n if pick_direction not in options: # desired direction not available\n if 'u' in options: # pick a direction that is available\n return 'u'\n if 'l' in options:\n return 'l'\n if 'r' in options:\n return 'r'\n if 'd' in options:\n return 'd'\n else: # desired direction available, return it\n return pick_direction", "def _determine_direction(self, degrees_left: float) -> float:\n if degrees_left >= 0:\n return 1.0\n else:\n return -1.0", "def get_current_facing_direction(self, DIRECTIONS=DIRECTIONS):\n return self.map_obstacle.get_current_facing_direction(DIRECTIONS=DIRECTIONS)", "def directionLeft(self):\n return self.__directionLeft", "def get_packet_direction(self, packet):\n\n src = packet.getlayer(IP).src\n if src == self.LOCAL_IP:\n return 1\n return -1", "def pick_point_not_on_line(line: Line):\n return line.point1 + line.get_perpendicular_at_point(line.point1).get_direction_vector()" ]
[ "0.7764746", "0.74921775", "0.74461246", "0.7236827", "0.7222495", "0.7183815", "0.7064334", "0.7050146", "0.7007524", "0.69288164", "0.69204354", "0.69024444", "0.6901476", "0.6863641", "0.6818004", "0.67975605", "0.67823666", "0.6723684", "0.6717148", "0.67071515", "0.66958827", "0.6673717", "0.6671995", "0.66612595", "0.6649368", "0.6637177", "0.65936524", "0.6552766", "0.6549125", "0.6544914", "0.6479716", "0.6474207", "0.64737874", "0.6463769", "0.6421973", "0.6412422", "0.6407665", "0.64036214", "0.63845", "0.6373271", "0.63679874", "0.6355369", "0.63318455", "0.63305986", "0.63217896", "0.6316818", "0.63035995", "0.62919724", "0.6285385", "0.6279923", "0.62756586", "0.6270599", "0.6258995", "0.6255758", "0.62200236", "0.6217071", "0.6205322", "0.6202902", "0.62014675", "0.619659", "0.6190911", "0.6190911", "0.6184785", "0.6184785", "0.61770093", "0.61677504", "0.6140651", "0.6135124", "0.61210763", "0.6102917", "0.6096563", "0.60909855", "0.6088299", "0.6062807", "0.605576", "0.6048334", "0.6033234", "0.60300773", "0.6002436", "0.5996876", "0.5977283", "0.597682", "0.5973754", "0.59708726", "0.5968066", "0.59670895", "0.59670895", "0.59670895", "0.59670895", "0.59670895", "0.59670895", "0.59670895", "0.5950502", "0.5944714", "0.59352916", "0.5926134", "0.59202534", "0.5915104", "0.59092104", "0.5905641" ]
0.7374277
3
Get the line point closest to the origin. Returns
def get_origin(self): return self.zero
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closest(self, x):\n # http://www.ahinson.com/algorithms_general/Sections/Geometry/PluckerLine.pdf\n # has different equation for moment, the negative\n\n x = arg.getvector(x, 3)\n\n lam = np.dot(x - self.pp, self.uw)\n p = self.point(lam) # is the closest point on the line\n d = np.linalg.norm( x - p)\n \n return namedtuple('closest', 'p d lam')(p, d, lam)", "def closest_point(self, l):\n cos = np.dot(self.direction, l.direction)\n n = 1 - cos ** 2\n if n < sys.float_info.epsilon:\n # Lines are parallel.\n return self.zero\n\n d0 = l.zero - self.zero\n a = np.dot(d0, self.direction)\n b = np.dot(d0, l.direction)\n return self.zero + self.direction * ( a - b * cos) / n", "def getClosestPointFromLine(origin, ray, point):\n # calculate the difference vector\n delta = point-origin\n # norm the ray\n ray /= np.linalg.norm(ray, axis=-1)[..., None]\n # calculate the scale product\n factor = np.sum(ray*delta, axis=-1)\n try:\n return origin + factor[:, None] * ray\n except IndexError:\n return origin + factor * ray", "def point(self, x, y):\n d1 = super().point(x, y)\n top = self._lifetime.top\n bottom = self._lifetime.bottom\n d2 = distance_line_point(top.pos, bottom.pos, (x, y))[0]\n return min(d1, d2)", "def LineClosestPoint(line, testpoint):\n line = rhutil.coerceline(line, True)\n testpoint = rhutil.coerce3dpoint(testpoint, True)\n return line.ClosestPoint(testpoint, False)", "def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)", "def getClosestPointToLine(self, A, B, P):\n AP = XYPoint(P.x - A.x, P.y - A.y)\n AB = XYPoint(B.x - A.x, B.y - A.y)\n ab2 = AB.x * AB.x + AB.y * AB.y\n ap_ab = AP.x * AB.x + AP.y * AB.y\n t = ap_ab / ab2\n\n if t < 0.0:\n t = 0.0\n elif t > 1.0:\n t = 1.0\n\n return XYPoint(A.x + AB.x * t, A.y + AB.y * t)", "def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b", "def closest_line_point(point:tuple, edge:tuple)->tuple:\n d_y, d_x, b = line_equation((edge[0], edge[1]))\n if b == None:\n # The line is vertical, need different intercept formula.\n return (edge[0][0], point[1])\n if d_y == 0:\n # The line is horizontal, we can use a faster formula:\n return (point[0], edge[0][1])\n term_1 = d_x * d_y * (point[1] - edge[1][1])\n term_2 = (d_y ** 2) * edge[1][0]\n term_3 = (d_x ** 2) * point[0]\n denom = (d_y ** 2) + (d_x ** 2)\n x_int = (term_1 + term_2 + term_3) / denom\n y_int = (d_y / d_x) * x_int + b\n return (x_int, y_int)", "def closest(self, x, y):\n pts = np.column_stack([self.x, self.y])\n # Transform data coordinates to pixel coordinates.\n pts = self.ax.transData.transform(pts)\n diff = pts - [x, y]\n dist = np.hypot(*diff.T)\n min_index = np.argmin(dist)\n return min_index, dist[min_index]", "def closest_point_to(self, x):\n x = np.array(x)\n v = self.p1 - self.p0\n b = self.p0 - x\n\n t = -np.dot(v, b) / np.dot(v, v)\n if (0 <= t <= 1):\n closest = t*(self.p1 - self.p0) + self.p0\n return closest\n else:\n if np.linalg.norm(x - self.p0) < np.linalg.norm(x - self.p1):\n return self.p0\n else:\n return self.p1", "def _nearest_point_on_line(begin, end, point):\n b2e = _vec_sub(end, begin)\n b2p = _vec_sub(point, begin)\n nom = _vec_dot(b2p, b2e)\n denom = _vec_dot(b2e, b2e)\n if denom == 0.0:\n return begin\n u = nom / denom\n if u <= 0.0:\n return begin\n elif u >= 1.0:\n return end\n else:\n return _vec_add(begin, _vec_scale(b2e, u))", "def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)", "def closest(self, x, y):\n if self.direction == 'horizontal':\n p_pts = np.array([\n self.ax.transData.transform((p, 0))[0] for p in self.positions\n ])\n dist = abs(p_pts - x)\n else:\n p_pts = np.array([\n self.ax.transData.transform((0, p))[1] for p in self.positions\n ])\n dist = abs(p_pts - y)\n index = np.argmin(dist)\n return index, dist[index]", "def line_locate_point(self, right: PointValue) -> ir.FloatingValue:\n return ops.GeoLineLocatePoint(self, right).to_expr()", "def distance_to_line(a, b, p):\n return distance(closest_point(a, b, p), p)", "def _distance_to_line(begin, end, point):\n return _vec_distance(point, _nearest_point_on_line(begin, end, point))", "def _get_closest(self, x, y, clients):\n target = min(\n clients,\n key=lambda c: math.hypot(c.x - x, c.y - y),\n default=self.clients.current_client,\n )\n return target", "def getOrdinate(self):\n return self.point.y - self.slope * self.point.x", "def LineMinDistanceTo(line, point_or_line):\n line = rhutil.coerceline(line, True)\n test = rhutil.coerceline(point_or_line)\n if test is None: test = rhutil.coerce3dpoint(point_or_line, True)\n return line.MinimumDistanceTo(test)", "def closest_point(self, point, start_param=None, Ns=25):\n x, z = self.rotate_to_xz_plane(point)\n la = self._closest_point(x, z, start_param, Ns)\n return la", "def closest_point(point, points):\n return points[cdist([point], points).argmin()]", "def pick_point_not_on_line(line: Line):\n return line.point1 + line.get_perpendicular_at_point(line.point1).get_direction_vector()", "def slope_from_origin(self):\n\n return (self.y / self.x)", "def closest_point(\n self, lx: float, ly: float, reference: Output | None = None\n ) -> tuple[float, float]:\n if reference:\n reference_ptr = reference._ptr\n else:\n reference_ptr = ffi.NULL\n\n dest_lx = ffi.new(\"double *\")\n dest_ly = ffi.new(\"double *\")\n lib.wlr_output_layout_closest_point(\n self._ptr, reference_ptr, lx, ly, dest_lx, dest_ly\n )\n return dest_lx[0], dest_ly[0]", "def closest_point(self, point, maxdist=0.0, return_param=False):\n return self.xyz", "def getPointClosestToXY(self, x, y, alter='', offsets=False):\n if isinstance(alter, str):\n alter = ['', alter]\n # select most suitable point based on x\n datax = self.x_offsets(alter=alter[0])\n absm = np.abs(datax - x)\n idx = np.where(absm == np.min(absm))\n if len(idx) == 0:\n idx = np.argmin(absm)\n elif len(idx) == 1:\n idx = idx[0]\n else:\n # len(idx) > 1: select most suitable point based on y\n datay = self.y_offsets(index=idx, alter=alter[1])\n absM = np.abs(datay - y)\n idX = np.where(absM == np.min(absM))\n if len(idX) == 0:\n idx = idx[0]\n elif len(idX) == 1:\n idx = idx[idX[0]]\n else: # equally close in x and y -> returns first datapoint found\n idx = idx[idX[0]]\n idxOut = idx if len(idx) <= 1 else idx[0]\n if offsets:\n # no alter, but offset for the return value\n return self.x_offsets(index=idx)[0], self.y_offsets(index=idx)[0], idxOut\n # no alter, no offsets for the return value\n return self.x(index=idx)[0], self.y(index=idx)[0], idxOut", "def FindClosestPoint(self, ):\n ...", "def slope_from_origin(self):\n\n return self.y / self.x", "def closestIntersectionPoint(origin, direction, outline, maxDistance):\n testLine = LineString([origin, origin + direction * maxDistance])\n inter = testLine.intersection(outline)\n if inter.is_empty:\n if TABFAIL_VISUAL:\n import matplotlib.pyplot as plt\n\n plt.axis('equal')\n x, y = outline.coords.xy\n plt.plot(list(map(toMm, x)), list(map(toMm, y)))\n x, y = testLine.coords.xy\n plt.plot(list(map(toMm, x)), list(map(toMm, y)))\n plt.show()\n raise NoIntersectionError(f\"No intersection found within given distance\", origin)\n origin = Point(origin[0], origin[1])\n geoms = list()\n for geom in listGeometries(inter):\n if isinstance(geom, Point):\n geoms.append(geom)\n elif isinstance(geom, LineString):\n # When a linestring is an intersection, we know that the starting or\n # ending points are the nearest one\n geoms.extend([Point(geom.coords[0]), Point(geom.coords[-1])])\n else:\n raise TypeError(f\"intersection() returned an unsupported datatype: {geom.__class__.__name__}\")\n return min([(g, origin.distance(g)) for g in geoms], key=lambda t: t[1])[0]", "def distance_to_origin(self):\n return np.sqrt(self.x ** 2 + self.y ** 2)", "def slope_from_origin(self):\n return round(math.degrees(abs(math.atan(self.y/self.x))), 2)", "def _dist_point2line(self, point: ndarray,\n line: Tuple[ndarray, ndarray]) -> ndarray:\n\n assert isinstance(line, tuple)\n point1, point2 = line\n d = abs(np.cross(point2 - point1, point - point1)) / (\n norm(point2 - point1) + 1e-8)\n return d", "def get_closest_point(path, point):\n np_path = convert_path_type(path) # modify path to be a numpy array\n np_point = convert_point_type(point) # modify point to be a [x,y,z] numpy array\n\n # compute the distance from current location to every point in path and find index of the min distance\n distances = ((np_path[:,0] - np_point[0])**2 + (np_path[:,1] - np_point[1])**2)**0.5\n closest_idx = np.argmin(distances)\n\n if closest_idx != len(np_path) - 1: # check if this point is behind current location, if so use index+1\n closest_point = np_path[closest_idx]\n next_closest_point = np_path[closest_idx+1]\n\n # create vectors between the three points\n path_vector = next_closest_point - closest_point\n current_vector = np_point - closest_point\n\n # compute dot product to figure out whether location is behind or in front of closest_point\n dot_prod = np.dot(path_vector, current_vector)\n\n if dot_prod >= 0: # closest point is behind current location\n closest_idx += 1\n\n closest_point = path[closest_idx] # retrieve point from original `path` argument for type consistency\n\n return closest_point, closest_idx", "def get_line_distance(self, p):\n\n y = 1000 * p.y\n R = 1000 * self.geometry.R\n x = copysign(sqrt(y ** 2 + (R - sqrt(R ** 2 - y ** 2))), y)\n x = 2 * R * asin(x / (2 * R))\n #x=y\n b = -x / sqrt(R ** 2 - x ** 2)\n theta = atan(b) # grating tangent angle\n print b, theta\n d = 0\n for n, a in enumerate(self.an):\n d += a * x ** n\n d *= cos(theta)\n return 1e-3 / d", "def _nearest_to_point(self, point):\n ptvertex = point.get_vertex(crs=self.crs)\n segments = zip(self.vertices.slice(0, -1), self.vertices.slice(1, 0))\n\n if isinstance(self.crs, CartesianCRS):\n func = _cvectorgeo.pt_nearest_planar\n def func(seg):\n return _cvectorgeo.pt_nearest_planar(ptvertex[0], ptvertex[1],\n seg[0][0], seg[0][1], seg[1][0], seg[1][1])\n else:\n fwd = self.crs.forward\n inv = self.crs.inverse\n def func(seg):\n return _cvectorgeo.pt_nearest_proj(fwd, inv, ptvertex,\n seg[0], seg[1], tol=0.01)\n\n point_dist = map(func, segments)\n min_point = None\n min_dist = -1.0\n for i, (point, dist) in enumerate(point_dist):\n if dist < min_dist or (i == 0):\n min_point = point\n min_dist = dist\n\n return min_dist, min_point", "def get_distance(self, point):\n if not isinstance(point, Point):\n point = Point(*point)\n\n distances = [(point.distance_to_point(p), p) for p in self.points]\n sortpoints = sorted(distances, key=lambda x: x[0])\n closest = sortpoints[0][1]\n\n vc = Vector(*closest)\n d1 = vc.dot(vc)\n\n secondc = sortpoints[1][1]\n vs = Vector(*secondc)\n v1 = Vector(*point) - (vc+vs)/2\n v2 = vs-vc\n v2.unitize()\n d2 = v1.dot(v2)\n\n return abs(min(d1, d2)) - self.thickness/2", "def point_to_line_abs(p: Vec2, p0: Vec2, p1: Vec2):\n return abs(point_to_line_signed(p, p0, p1))", "def nearest_on_boundary(self, point):\n _, minpt = self._nearest_to_point(point)\n return Point(minpt, crs=self.crs)", "def get_line_to(self,target):\n\n m = (target.y - self.y) / (target.x - self.x)\n\n b = self.y - m * self.x\n\n return (m,b)", "def dist_to_line(self, line, pt):\n return abs(line[0]*pt.x + line[1]*pt.y + line[2])/math.sqrt(line[0]**2 + line[1]**2)", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def distance_from_origin(self):\n return ((self.x ** 2) + (self.y ** 2)) **0.5", "def closest(point, points):\n pts = [(Point.distance(point, p), p) for p in points]\n pts.sort()\n return pts[0][1]", "def getOffsetLine(self, distance, side=c.INSIDE):\n StartA = np.array([self.start.x, self.start.y])\n EndA = np.array([self.end.x, self.end.y])\n r = StartA - EndA #The slope vector of self\n rn = np.array([-r[c.Y], r[c.X]]) #flip x and y and inverse y to get the normal vector of the slope\n rn = rn/np.linalg.norm(rn)*distance #normalize by dividing by its magnitude and multipy by distance to get the correct length\n \n if side == c.INSIDE:\n return self.translate(-rn[c.X], -rn[c.Y]) #the \"minus\" side line is the left side which is inside.\n \n return self.translate(rn[c.X], rn[c.Y]) #the \"Plus\" side of the line is the right side which is outside.", "def get_closest_distance_to_path(self, path):\n min_distance_to_line = float(\"inf\")\n for p in path:\n game_path = p[:]\n\n game_path.sort(key = lambda coord: calculate_distance(self, coord))\n point_A = game_path[0] # Closest point out of all the points on the path to to the tower\n\n try:\n point_after_A = p[p.index(point_A) + 1]\n point_before_A = p[p.index(point_A) - 1]\n closest_to_A = min(point_after_A, point_before_A, key = lambda point: calculate_distance(point_A, point))\n except:\n if p.index(point_A) == 0:\n closest_to_A = p[p.index(point_A) + 1]\n \n elif p.index(point_A) == len(p) - 1:\n closest_to_A = p[p.index(point_A) - 1]\n finally:\n if closest_to_A[0] != point_A[0]:\n m = (closest_to_A[1] - point_A[1]) / (closest_to_A[0] - point_A[0])\n else:\n m = 2\n\n b = point_A[1] - m * point_A[0]\n\n closest_distance = abs(-m * self.x + self.y - b) / math.sqrt((-m) ** 2 + 1)\n min_distance_to_line = min(closest_distance, min_distance_to_line)\n \n return min_distance_to_line", "def getNearestEdge(self, point):\n edge = mm.idx.nearest((point.getPoint().x, point.getPoint().y), objects=True)\n edges = [e.object for e in edge]\n if len(edges) == 1:\n result = edges[0]\n else:\n dist = 99999999999999999999999999999999999999999\n for edge in edges:\n distance = point.getPoint().distance(edge.getGeometry())\n if distance < dist:\n dist = distance\n result = edge\n return result", "def closest_point(self, point, maxdist=0.0):\n face, point = self.geometry.ClosestPoint(Rhino.Geometry.Point3d(*point), maxdist)\n return list(point)", "def calc_line(start, target, map):\n\t\"\"\" Returns the real world point at the farthest range \"\"\"\n\tdx = abs(target[0] - start[0])\n\tdy = abs(target[1] - start[1])\n\txi = start[0]\n\tyi = start[1]\n\tn = 1 + dx + dy\n\tx_dir = np.sign(target[0] - start[0])\n\ty_dir = np.sign(target[1] - start[1])\n\terror = dx - dy;\n\tdx *= 2\n\tdy *= 2\n\n\tfor i in xrange(n):\n\t\tif map.grid[xi,yi] is not map.empty and map.grid[xi,yi] > 0:\n\t\t\treturn xi, yi\n\n\t\tif error > 0:\n\t\t\txi += x_dir\n\t\t\terror -= dy\n\t\telse:\n\t\t\tyi += y_dir\n\t\t\terror += dx\n\treturn target", "def closest_point(p1: Vector3, p2: Vector3, p3: Vector3) -> Vector3:\n k = ((p2.y - p1.y) * (p3.x - p1.x) - (p2.x - p1.x) * (p3.y - p1.y)) / ((p2.y - p1.y) ** 2 + (p2.x - p1.x) ** 2)\n x4 = p3.x - k * (p2.y - p1.y)\n y4 = p3.y + k * (p2.x - p1.x)\n\n return Vector3(x4, y4, 0)", "def __get_closest_waypoint_index(self, x, y):\n return self.__waypoint_tree.query([x, y], 1)[1]", "def getPoint(self, a):\n lng = self.source.center.lng + (self.target.center.lng - self.source.center.lng) * min(a, 1)\n lat = self.source.center.lat + (self.target.center.lat - self.source.center.lat) * min(a, 1)\n return lng, lat", "def get_closest_waypoint(self, x, y):\n # TODO implement\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n return closest_idx", "def project_point_to_line(point, line_start, line_end):\n line_magnitude = line_start.distance(line_end)\n \n u = ((point.x - line_start.x) * (line_end.x - line_start.x) +\n (point.y - line_start.y) * (line_end.y - line_start.y)) \\\n / (line_magnitude ** 2)\n\n # closest point does not fall within the line segment, \n # take the shorter distance to an endpoint\n if u < 0.00001 or u > 1:\n ix = point.distance(line_start)\n iy = point.distance(line_end)\n if ix > iy:\n return line_end\n else:\n return line_start\n else:\n ix = line_start.x + u * (line_end.x - line_start.x)\n iy = line_start.y + u * (line_end.y - line_start.y)\n return Point([ix, iy])", "def getNearestCar(self, position, line=0):\n return self.getNearestObjectInArray(self._cars, position, line)", "def DistanceFromOrigin(self):\r\n return ((self.x ** 2) + (self.y ** 2)) ** 0.5", "def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.x,\n pose.y))\n else:\n return 0", "def dist_to_point(self, point):\n\t\treturn dist_to_line2d_seg((self.a.to_tuple(),self.b.to_tuple()), point.to_tuple())", "def get_path_point(self):\n if len(self.current_path) == 0:\n return np.zeros(2), -1\n ego_x = self.pose.position.x\n ego_y = self.pose.position.y\n ego_pose = np.array([[ego_x, ego_y]])\n disps = (ego_pose - self.current_path)\n dists = np.hypot(disps[:, 0], disps[:, 1])\n path_point_idx = np.argmin(dists[self.path_point_idx:]) + self.path_point_idx\n path_point = self.current_path[path_point_idx]\n return path_point, path_point_idx", "def getNearestGraphicsPoint(self, x, y):\n return self._getNearestGraphic(x, y, Field.DOMAIN_TYPE_POINT)", "def closest_point_to(self, p):\n p = np.array(p)\n # align with z-axis so all triangle have same z-coord\n tri_rot, rot = self.align_with([0,0,1])\n tri_rot_z = tri_rot.a[-1]\n p_rot = np.dot(rot, p)\n\n p_2d = p_rot[:2]\n tri_2d = geometry2d.Triangle(tri_rot.a[:2], tri_rot.b[:2], tri_rot.c[:2])\n\n if tri_2d.is_inside(p_2d):\n # projects onto triangle, so return difference in z\n return np.dot(np.linalg.inv(rot), np.array(list(p_2d) + [tri_rot_z]))\n else:\n closest_pt_2d = tri_2d.closest_point_to(p_2d)[1]\n\n closest_pt_3d = np.array(list(closest_pt_2d) + [tri_rot_z])\n\n return np.dot(np.linalg.inv(rot), closest_pt_3d)", "def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.position.x,\n pose.position.y))\n else:\n return 0", "def _closest_point(self, x, z, start_param, Ns):\n pi = np.pi\n def f(t):\n px, pz = self(t)\n return np.sqrt((x-px)**2 + (z-pz)**2)\n if start_param is None:\n x0 = brute(lambda x: f(x[0]), [[0, pi]], Ns=Ns, finish=None)\n step = np.pi/(Ns-1)\n res = minimize_scalar(\n f, bounds=[max(0, x0-step), min(np.pi, x0+step)], method='bounded',\n options=dict(xatol=1e-12),\n )\n else:\n res = minimize_scalar(f, bracket=(start_param, pi/Ns),\n options=dict(xtol=1e-12))\n la = res.x\n return la", "def findNearPointOnLine(node1, node2, point):\n p=point[0]\n q=point[1]\n a=node1[0]\n b=node1[1]\n c=node2[0]\n d=node2[1]\n \n x = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (d-b) + p\n y = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (a-c) + q\n \n return x, y", "def findPointOnLine(node1, node2, distance):\n m, b, _ = geometry.lineSpec(node1, node2)\n \n xy = []\n if m == True: # parallel to y axis\n xy.append(node1[0])\n if node1[1] <= node2[1]:\n xy.append(node1[1] + distance)\n else:\n xy.append(node1[1] - distance)\n \n elif m == False: # parallel to x axis\n if node1[0] <= node2[0]:\n xy.append(node1[0] + distance)\n else:\n xy.append(node1[0] - distance)\n xy.append(node1[1])\n \n else:\n x = sp.Symbol('x')\n z = (x-node1[0])**2 + (m*x+b-node1[1])**2 - distance**2\n xSolution = sp.solve(z, x)\n \n for xSol in xSolution:\n if (xSol >= node1[0] and xSol <= node2[0]) or (xSol <= node1[0] and xSol >= node2[0]):\n xy.append(xSol)\n xy.append(xSol*m + b)\n return xy", "def calculate_line(min_position):\n # School algebra to calculate the line coordinates given two points\n if min_position == \"right\": # Line from left end to min\n x1 = energy[0]\n x2 = energy[counts_min_index]\n y1 = counts[0]\n y2 = counts_min\n slope = (y1 - y2) / (x1 - x2)\n b = (x1 * y1 - x2 * y1) / (x1 - x2)\n line_end = slope * energy[-1] + b\n return np.linspace(counts[0], line_end, len(energy))\n elif min_position == \"left\":\n x1 = energy[counts_min_index]\n x2 = energy[-1]\n y1 = counts_min\n y2 = counts[-1]\n slope = (y1 - y2) / (x1 - x2)\n b = (x1 * y1 - x2 * y1) / (x1 - x2)\n line_end = slope * energy[0] + b\n return np.linspace(line_end, counts[-1], len(energy))", "def lidar_relative(self):\n return self.distance", "def closest_point(\n self, points: Union[List[\"PointMixin\"], \"PointMixin\"]\n ) -> pd.Series:\n from ..core.distance import closest_point as cp\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n if not isinstance(points, list):\n points = [points]\n\n return min(\n (cp(self.data, point) for point in points),\n key=attrgetter(\"distance\"),\n )", "def get_lookahead_point(self):\n lookahead_target_dist = self.lookahead_dist #+ (1 + self.curr_v)\n\n if self.path_point_idx == len(self.current_path) - 1 or self.path_point_idx == -1:\n #End of path, no more lookahead\n return self.path_point\n\n prev_pt = self.current_path[self.path_point_idx]\n curr_pt = self.current_path[self.path_point_idx + 1]\n pt_dist = np.hypot((prev_pt - curr_pt)[0], (prev_pt - curr_pt)[1])\n curr_dist = pt_dist\n c = self.path_point_idx\n while curr_dist < lookahead_target_dist and c < len(self.current_path) - 1:\n prev_pt = self.current_path[c]\n curr_pt = self.current_path[c + 1]\n pt_dist = np.hypot((prev_pt - curr_pt)[0], (prev_pt - curr_pt)[1])\n curr_dist += pt_dist\n c += 1\n\n if curr_dist < lookahead_target_dist:\n return self.current_path[-1]\n else:\n #Interpolate to get the actual lookahead point\n frac = (curr_dist - lookahead_target_dist) / pt_dist\n pt = frac * prev_pt + (1-frac) * curr_pt\n return pt", "def get_closest_waypoint(self, x, y):\n closest_idx = self.waypoint_tree.query([x, y])[1] # ckd tree (1st closest, idx)\n\n # Check if closest waypoint is ahead or behind vehicle\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # Equation for hyperplane through closest_coors\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)\n # Car is ahead of the closest waypoint\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n return closest_idx", "def get_origin(self) -> Vec:\n size_min, size_max = self.get_bbox()\n origin = (size_min + size_max) / 2\n return origin", "def get_start_point(self):\n return self.first_point", "def get_initial_point(self):\r\n if isinstance(self.pieces[0], LineSegment):\r\n return self.pieces[0].start", "def test_straight_line(self):\n test_x = np.linspace(0, 9, 10)\n test_y = np.linspace(0, 18, 10)\n result_y = utils.straight_line(test_x, 2, 0)\n assert_almost_equal(result_y, test_y)", "def get_origin(self) -> Vec:\n if self.is_brush():\n bbox_min, bbox_max = self.get_bbox()\n return (bbox_min + bbox_max) / 2\n else:\n return Vec.from_str(self['origin'])", "def closest_point_in_cloud(point, cloud):\n data = sort_points(point, cloud)\n return data[0]", "def distance_point_to_line(x1, y1, a, b, c):\n d = abs((a * x1 + b * y1 + c)) / (math.sqrt(a * a + b * b))\n #print(\"Distance from ({}, {}) to line {}x+{}y+{}=0 is {}\".format(\n # x1, y1, a, b, c, d))\n return(d)", "def shortest_line_to_point(point_a, point_b, point_c): # where a and b are on spin axis, c is the point spinning round\n axis_vect = np.subtract(point_a, point_b)\n axis_mag = magnitude(point_a, point_b)\n unit_axis = np.divide(axis_vect, axis_mag) # unit of pp\n # pp' constants - p\n\n # pp dot u\n t = np.sum(np.dot(unit_axis, unit_axis))\n c = np.sum(np.dot(np.subtract(point_b, point_c), unit_axis))\n p = -c / t\n project_point_on_axis_add = (np.multiply(unit_axis, p))\n project_point_on_axis = project_point_on_axis_add + point_b\n distance = magnitude(point_c, project_point_on_axis)\n return distance, project_point_on_axis", "def point_to_line_dist(P, A, B):\n\tif all(A == P) or all(B == P):\n\t\treturn0\n\tif arccos(dot((P - A) / norm(P - A), (B - A) / norm(B - A))) > pi / 2:\n\t\treturn norm(P - A)\n\tif arccos(dot((P - B) / norm(P - B), (A - B) / norm(A - B))) > pi / 2:\n\t\treturn norm(P - B)\n\treturn norm(cross(A-B, A-P))/norm(B-A)", "def linePointXYDist(l,p,inside=True):\n return linePointXY(l,p,inside,distance=True)", "def origin_x(self):\n return self._origin[0]", "def distanceFromOrigin(self):\n return ((self.x)**2+(self.y)**2)**0.5", "def FindClosestPointWithinRadius(self, p_float, , p_float_4):\n ...", "def getMinimum(self):\n v1 = Vector(*self.p1)\n v2 = Vector(*self.p2)\n if v1.angle < v2.angle:\n return self.p1\n else:\n return self.p2", "def _get_closest_waypoint(self, pose):\n pos = pose.position\n x = pos.x\n y = pos.y\n closest_idx = self.waypoints_tree.query([x,y],1)[1]\n\n return closest_idx", "def getMidPoint(self):\n return p.Point((self.start.normalVector + self.end.normalVector)/2.0)", "def closest_point_on_segment(point, segment):\n a, b = segment\n p = closest_point_on_line(point, segment)\n d = distance_point_point_sqrd(a, b)\n d1 = distance_point_point_sqrd(a, p)\n d2 = distance_point_point_sqrd(b, p)\n if d1 > d or d2 > d:\n if d1 < d2:\n return a\n return b\n return p", "def nearest(self, pose):\n # type: (Pose) -> Pose\n assert (self.nodes), 'No nodes.'\n closest = min(self.nodes, key=lambda x: self.dist(x, pose))\n return closest", "def get_normal_dist(line, point):\n \n # Rotate: \n x_rot = np.cos(line[1])*point[0] + np.sin(line[1])*point[1]\n \n # Normal distance: x_rot - rho:\n return x_rot - line[0]", "def DistPoint2Line(point,line_point1, line_point2=np.array([0,0,0])):\n return np.linalg.norm(np.cross((point-line_point2),(point-line_point1)))/np.linalg.norm(line_point1 - line_point2)", "def get_starting_point(self, Otrain, Ftrain, y):\n return self.get_curve_fmin(Otrain, Ftrain, [y])\n # xx = np.linspace(np.min(Otrain), np.max(Otrain), 50)\n # scores, xx = self.compute_scores(Otrain, Ftrain, y, xx)\n # bestScore = np.max(scores)\n # Ibest = np.where(scores == bestScore)[0]\n # x = xx[Ibest[0]]\n return x", "def get_point_line_distance(point, r0, n):\n dr = (point[0] - r0[0], point[1] - r0[1])\n return sc_mul(dr, n) / norm_2d(n)", "def get_origin(self):\n return self.coord_cls(x=0, y=0, system=self)", "def min(self):\n return self._min_coords", "def intersect_line(self, line: Line) -> Tuple[Point, Point]:\n vector_to_line = Vector.from_points(self.point, line.point)\n vector_unit = line.direction.unit()\n\n dot = vector_unit.dot(vector_to_line)\n\n discriminant = dot**2 - (vector_to_line.norm() ** 2 - self.radius**2)\n\n if discriminant < 0:\n raise ValueError(\"The line does not intersect the sphere.\")\n\n pm = np.array([-1, 1]) # Array to compute minus/plus.\n distances = -dot + pm * math.sqrt(discriminant)\n\n point_a, point_b = line.point + distances.reshape(-1, 1) * vector_unit\n\n return point_a, point_b" ]
[ "0.752301", "0.741795", "0.73585206", "0.7312732", "0.7232914", "0.71674716", "0.71045464", "0.69818723", "0.6970222", "0.69567466", "0.6912792", "0.6912227", "0.68720096", "0.67963266", "0.67862755", "0.6759423", "0.67402595", "0.6738672", "0.67113775", "0.67095816", "0.66979676", "0.6675266", "0.6662269", "0.6620485", "0.661855", "0.658058", "0.65729797", "0.6563381", "0.6558426", "0.65561014", "0.6544292", "0.65154475", "0.6477685", "0.64316076", "0.64236057", "0.6384932", "0.63502836", "0.6349206", "0.63405126", "0.6312875", "0.6302635", "0.62821877", "0.62821877", "0.62821877", "0.62821877", "0.62821877", "0.62821877", "0.62821877", "0.6277931", "0.62448967", "0.6239906", "0.62248516", "0.618205", "0.6179189", "0.6174241", "0.6169987", "0.6163672", "0.6162215", "0.615592", "0.614391", "0.6130242", "0.6128514", "0.61092913", "0.6101438", "0.60987926", "0.6094089", "0.6089966", "0.60784984", "0.6040719", "0.6032649", "0.60277635", "0.60005385", "0.5973175", "0.59428614", "0.5935781", "0.5906012", "0.5904057", "0.59031564", "0.59022665", "0.5896214", "0.5890317", "0.5879296", "0.5867532", "0.58674824", "0.58545744", "0.5854123", "0.58540523", "0.5849644", "0.58342165", "0.5821378", "0.58161795", "0.58157533", "0.5806183", "0.5805129", "0.580442", "0.58009535", "0.5799484", "0.5798553", "0.5790602", "0.5784764", "0.5778638" ]
0.0
-1
Function to get the abscissa of a point with respect to a line. The abscissa is 0 if the projection of the point and the projection of the frame origin on the line are the same point.
def get_abscissa(self, p): return np.dot(p - self.zero, self.direction)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b", "def _dist_point2line(self, point: ndarray,\n line: Tuple[ndarray, ndarray]) -> ndarray:\n\n assert isinstance(line, tuple)\n point1, point2 = line\n d = abs(np.cross(point2 - point1, point - point1)) / (\n norm(point2 - point1) + 1e-8)\n return d", "def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)", "def point_to_line_abs(p: Vec2, p0: Vec2, p1: Vec2):\n return abs(point_to_line_signed(p, p0, p1))", "def slope(self):\n if self.b == 0:\n return None\n else:\n return (-1) * self.a/self.b", "def determine_angle_slope(line, ax):\n x, y = line.get_data()\n\n sp1 = ax.transData.transform_point((x[0],y[0]))\n sp2 = ax.transData.transform_point((x[-1],y[-1]))\n\n rise = (sp2[1] - sp1[1])\n run = (sp2[0] - sp1[0])\n\n return degrees(atan(rise/run))", "def get_projection_of_pt_on_line(point, line_point1, line_point2):\n projection = Point(-1, -1)\n projection.x = point.x\n if (line_point2.x - line_point1.x) != 0:\n projection.y = (projection.x - line_point1.x) * (line_point2.y - line_point1.y) / \\\n (line_point2.x - line_point1.x) + line_point1.y\n else:\n projection.y = (projection.x - line_point1.x) * (line_point2.y - line_point1.y) / 1 + line_point1.y\n return projection", "def slope_from_origin(self):\n\n return self.y / self.x", "def distance_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector(cross_vectors(pa, pb))\n l_ab = length_vector(ab)\n return l / l_ab", "def slope_from_origin(self):\n\n return (self.y / self.x)", "def center_point(polyline):\n\tpts = unique(polyline.points)\n\treturn sum(pts) / len(pts)", "def distance_point_line_sqrd(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector_sqrd(cross_vectors(pa, pb))\n l_ab = length_vector_sqrd(ab)\n return l / l_ab", "def point(self, x, y):\n d1 = super().point(x, y)\n top = self._lifetime.top\n bottom = self._lifetime.bottom\n d2 = distance_line_point(top.pos, bottom.pos, (x, y))[0]\n return min(d1, d2)", "def DistPoint2Line(point,line_point1, line_point2=np.array([0,0,0])):\n return np.linalg.norm(np.cross((point-line_point2),(point-line_point1)))/np.linalg.norm(line_point1 - line_point2)", "def slope_from_origin(self):\n return round(math.degrees(abs(math.atan(self.y/self.x))), 2)", "def closest_line_point(point:tuple, edge:tuple)->tuple:\n d_y, d_x, b = line_equation((edge[0], edge[1]))\n if b == None:\n # The line is vertical, need different intercept formula.\n return (edge[0][0], point[1])\n if d_y == 0:\n # The line is horizontal, we can use a faster formula:\n return (point[0], edge[0][1])\n term_1 = d_x * d_y * (point[1] - edge[1][1])\n term_2 = (d_y ** 2) * edge[1][0]\n term_3 = (d_x ** 2) * point[0]\n denom = (d_y ** 2) + (d_x ** 2)\n x_int = (term_1 + term_2 + term_3) / denom\n y_int = (d_y / d_x) * x_int + b\n return (x_int, y_int)", "def process_laneOffset(self):\n center_line = np.poly1d(np.mean([self.line_l.get_LinePoly().coeffs, self.line_r.get_LinePoly().coeffs], axis=0))\n # store the center line polynomial\n self.center_poly = center_line\n center_point = IMAGE_WIDTH/2 - center_line(709)\n offset_from_center =center_point* self.line_l.x_pxm\n self.lane_offset = offset_from_center\n return center_point", "def dist_to_line(self, line, pt):\n return abs(line[0]*pt.x + line[1]*pt.y + line[2])/math.sqrt(line[0]**2 + line[1]**2)", "def get_angle_sign_pt_to_line(point, line_point1, line_point2):\n projection = get_projection_of_pt_on_line(point, line_point1, line_point2)\n if line_point1.x <= line_point2.x:\n if point.y >= projection.y:\n sign = 1\n else:\n sign = -1\n else:\n if point.y >= projection.y:\n sign = -1\n else:\n sign = 1\n return sign", "def point_at(self, abscissa):\n return self.zero + abscissa * self.direction", "def _nearest_point_on_line(begin, end, point):\n b2e = _vec_sub(end, begin)\n b2p = _vec_sub(point, begin)\n nom = _vec_dot(b2p, b2e)\n denom = _vec_dot(b2e, b2e)\n if denom == 0.0:\n return begin\n u = nom / denom\n if u <= 0.0:\n return begin\n elif u >= 1.0:\n return end\n else:\n return _vec_add(begin, _vec_scale(b2e, u))", "def slope(point_a, point_b, flip):\n\n x_a, y_a = point_a\n x_b, y_b = point_b\n\n dx = x_b - x_a\n dy = y_b - y_a\n\n return -dx / dy if flip else dy / dx", "def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)", "def slope(a, b):\r\n if a[0] == b[0]: #If the x values are both 0\r\n return 0 #Technically, undefined, but doesn't matter for finding collinearity\r\n return (a[1] - b[1]) / (a[0] - b[0])", "def pick_point_not_on_line(line: Line):\n return line.point1 + line.get_perpendicular_at_point(line.point1).get_direction_vector()", "def getClosestPointFromLine(origin, ray, point):\n # calculate the difference vector\n delta = point-origin\n # norm the ray\n ray /= np.linalg.norm(ray, axis=-1)[..., None]\n # calculate the scale product\n factor = np.sum(ray*delta, axis=-1)\n try:\n return origin + factor[:, None] * ray\n except IndexError:\n return origin + factor * ray", "def slope(start, end):\n\tx1 = start[0]\n\ty1 = start[1]\n\tx2 = end[0]\n\ty2 = end[1]\n\ttop = float(y2 - y1) \n\tbot = float(x2 - x1)\n\tif bot == 0:\n\t\treturn None\n\telse:\n\t\treturn top / bot", "def LineMinDistanceTo(line, point_or_line):\n line = rhutil.coerceline(line, True)\n test = rhutil.coerceline(point_or_line)\n if test is None: test = rhutil.coerce3dpoint(point_or_line, True)\n return line.MinimumDistanceTo(test)", "def rl_get_point() -> int: # pragma: no cover\n if rl_type == RlType.GNU:\n return ctypes.c_int.in_dll(readline_lib, \"rl_point\").value\n\n elif rl_type == RlType.PYREADLINE:\n return int(readline.rl.mode.l_buffer.point)\n\n else:\n return 0", "def mid(self, line):\n return [(line.x1 + line.x2) // 2, (line.y1 + line.y2) // 2]", "def point_to_line_dist(P, A, B):\n\tif all(A == P) or all(B == P):\n\t\treturn0\n\tif arccos(dot((P - A) / norm(P - A), (B - A) / norm(B - A))) > pi / 2:\n\t\treturn norm(P - A)\n\tif arccos(dot((P - B) / norm(P - B), (A - B) / norm(A - B))) > pi / 2:\n\t\treturn norm(P - B)\n\treturn norm(cross(A-B, A-P))/norm(B-A)", "def get_segment_slope(segment: Tuple[Point]):\n return (\n (segment[0].y - segment[1].y) / (segment[0].x - segment[1].x)\n if (segment[0].x - segment[1].x) != 0\n else float(\"inf\")\n )", "def _distance_to_line(begin, end, point):\n return _vec_distance(point, _nearest_point_on_line(begin, end, point))", "def distance_point_to_line(x1, y1, a, b, c):\n d = abs((a * x1 + b * y1 + c)) / (math.sqrt(a * a + b * b))\n #print(\"Distance from ({}, {}) to line {}x+{}y+{}=0 is {}\".format(\n # x1, y1, a, b, c, d))\n return(d)", "def abline(slope, intercept):\n global axes\n axes = plt.gca()\n x_vals = np.array(axes.get_xlim())\n y_vals = intercept + slope * x_vals\n return plt.plot(x_vals, y_vals)", "def getPoint(self, a):\n lng = self.source.center.lng + (self.target.center.lng - self.source.center.lng) * min(a, 1)\n lat = self.source.center.lat + (self.target.center.lat - self.source.center.lat) * min(a, 1)\n return lng, lat", "def slope(l):\n if l[1] == l[0]:\n return float(\"inf\")\n else:\n return float(l[3]-l[2])/(l[1]-l[0])", "def get_x_y_for_line(bounds, y_intercept, slope): \n\n x = np.sort(bounds)\n\n y = y_intercept + (slope * x)\n\n return x, y", "def find_centers(line_complex):\n # There is a line where the flux is at a minimum, i.e., the second\n # derivative is positive.\n diff2 = numpy.diff(numpy.sign(numpy.diff(line_complex)))\n zero_crossings = numpy.where(diff2 > 0.)[0]\n return zero_crossings + 1", "def make_line_points(y1, y2, line):\n if line is None:\n return None\n\n slope, intercept = line\n\n # make sure everything is integer as cv2.line requires it\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n y1 = int(y1)\n y2 = int(y2)\n\n return ((x1, y1), (x2, y2))", "def midpoint_line(a, b):\n return scale_vector(add_vectors(a, b), 0.5)", "def get_fit_x(self, y):\n if self.line_fit_m.size == 0:\n return np.empty(y.shape)\n fit = self.line_fit\n return np.array(fit[0] * y ** 2 + fit[1] * y + fit[2]).astype(\"int\")", "def xintercept(self):\n if self.slope() == 0:\n return None\n else:\n return self.c/self.a", "def get_line_distance(self, p):\n\n y = 1000 * p.y\n R = 1000 * self.geometry.R\n x = copysign(sqrt(y ** 2 + (R - sqrt(R ** 2 - y ** 2))), y)\n x = 2 * R * asin(x / (2 * R))\n #x=y\n b = -x / sqrt(R ** 2 - x ** 2)\n theta = atan(b) # grating tangent angle\n print b, theta\n d = 0\n for n, a in enumerate(self.an):\n d += a * x ** n\n d *= cos(theta)\n return 1e-3 / d", "def abline(slope, intercept):\n axes = plt.gca()\n x_vals = np.array(axes.get_xlim())\n y_vals = intercept + slope * x_vals\n plt.plot(x_vals, y_vals)", "def test_straight_line(self):\n test_x = np.linspace(0, 9, 10)\n test_y = np.linspace(0, 18, 10)\n result_y = utils.straight_line(test_x, 2, 0)\n assert_almost_equal(result_y, test_y)", "def LineClosestPoint(line, testpoint):\n line = rhutil.coerceline(line, True)\n testpoint = rhutil.coerce3dpoint(testpoint, True)\n return line.ClosestPoint(testpoint, False)", "def getClosestPointToLine(self, A, B, P):\n AP = XYPoint(P.x - A.x, P.y - A.y)\n AB = XYPoint(B.x - A.x, B.y - A.y)\n ab2 = AB.x * AB.x + AB.y * AB.y\n ap_ab = AP.x * AB.x + AP.y * AB.y\n t = ap_ab / ab2\n\n if t < 0.0:\n t = 0.0\n elif t > 1.0:\n t = 1.0\n\n return XYPoint(A.x + AB.x * t, A.y + AB.y * t)", "def project_point_to_line(point, line_start, line_end):\n line_magnitude = line_start.distance(line_end)\n \n u = ((point.x - line_start.x) * (line_end.x - line_start.x) +\n (point.y - line_start.y) * (line_end.y - line_start.y)) \\\n / (line_magnitude ** 2)\n\n # closest point does not fall within the line segment, \n # take the shorter distance to an endpoint\n if u < 0.00001 or u > 1:\n ix = point.distance(line_start)\n iy = point.distance(line_end)\n if ix > iy:\n return line_end\n else:\n return line_start\n else:\n ix = line_start.x + u * (line_end.x - line_start.x)\n iy = line_start.y + u * (line_end.y - line_start.y)\n return Point([ix, iy])", "def lineShape(self,peakAngle,width,angle):\n x = abs(peakAngle - angle)/width\n if x == 0.0:\n return 1.0\n else:\n return (2*j1(x)/x)**2", "def dist_to_line2d(line, point):\n\tx1,y1 = line[0]\n\tx2,y2 = line[1]\n\tx3,y3 = point\n\t\n\t# where on line the perpendicular is\n\tu = ( ((x3-x1)*(x2-x1) + (y3-y1)*(y2-y1))\n\t\t\t/ (math.pow(x1-x2,2) + math.pow(y1-y2,2)) )\n\t\n\t# intersection point\n\tx = x1 + u*(x2-x1)\n\ty = y1 + u*(y2-y1)\n\t\n\tdist = math.sqrt(math.pow(x-x3,2)+math.pow(y-y3,2))\n\t\n\treturn dist", "def abline(slope, intercept):\n axes = plt.gca()\n x_vals = np.array(axes.get_xlim())\n y_vals = intercept + slope * x_vals\n plt.plot(x_vals, y_vals, '--')", "def abline(slope, intercept):\n axes = plt.gca()\n x_vals = np.array(axes.get_xlim())\n y_vals = intercept + slope * x_vals\n plt.plot(x_vals, y_vals, '--')", "def abline(slope, intercept):\n axes = plt.gca()\n x_vals = np.array(axes.get_xlim())\n y_vals = intercept + slope * x_vals\n plt.plot(x_vals, y_vals, '--', color='r')", "def calculate_slope_between_two_points(point_a: Dict[str,float], point_b: Dict[str, float]) -> float: # _5 [✅] \n if len(point_a) == len(point_b) == 0: raise ValueError\n if set(point_a).symmetric_difference(set(point_b)) == set():\n return float('inf') if int(point_b['x'] - point_a['x']) == 0 else int((int(point_b['y'] - point_a['y']) / int(point_b['x'] - point_a['x'])))\n elif set(point_a).symmetric_difference(set(point_b)) != set(): raise ValueError\n elif point_a['x'] == point_b['x'] and point_b['y'] == point_a['y']: return float('inf')", "def GetLineAlpha(self):\n return self._attalpha[\"line\"]", "def get_point_line_distance(point, r0, n):\n dr = (point[0] - r0[0], point[1] - r0[1])\n return sc_mul(dr, n) / norm_2d(n)", "def dist_to_point(self, point):\n\t\treturn dist_to_line2d_seg((self.a.to_tuple(),self.b.to_tuple()), point.to_tuple())", "def compute_line_coefs(point_a, point_b):\n b_coef = -1\n if (point_b[0] - point_a[0]) == 0:\n a_coef = 0\n else:\n a_coef = (point_b[1] - point_a[1]) / (point_b[0] - point_a[0])\n c_coef = point_b[1] - a_coef*point_b[0]\n return np.array([a_coef, b_coef, c_coef])", "def _calculate_slope(klass, p1, p2):\n xdiff = p1.x - p2.x\n if xdiff:\n return (p1.y - p2.y) / xdiff\n else:\n return float(\"+inf\")", "def get_line_end_pts(line_segment, y1, y2):\n if line_segment is None:\n return None\n\n slope, intercept = line_segment\n\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n y1 = int(y1)\n y2 = int(y2)\n\n return x1, y1, x2, y2", "def find_absolute_value(x):\n return math.fabs(x)", "def get_normal_dist(line, point):\n \n # Rotate: \n x_rot = np.cos(line[1])*point[0] + np.sin(line[1])*point[1]\n \n # Normal distance: x_rot - rho:\n return x_rot - line[0]", "def intersection(self, line: AbstractLine) -> Optional[AbstractPoint]:\n plane = Plane(self.__point_a,\n self.__point_b - self.__point_a,\n self.__point_c - self.__point_a)\n\n point = plane.intersection(line)\n if point is not None:\n if self.has_point(point):\n return point\n return None", "def distance_to_line(a, b, p):\n return distance(closest_point(a, b, p), p)", "def get_lx(self):\r\n return int(self.dx * self.nx - self.ox)", "def abline(points, slope, intercept):\n x_values = get_column(points, 0)\n return [slope * i + intercept for i in x_values]", "def get_initial_point(self):\r\n if isinstance(self.pieces[0], LineSegment):\r\n return self.pieces[0].start", "def slope(self, x1, y1, x2, y2):\n if x1 == x2:\n slope = np.inf\n else:\n slope = (y2-y1)/(x2-x1)\n\n return np.math.atan(slope)", "def line_segment_intersection(line1,\n line2):\n a = float(line1[0][0]*line1[1][1] - line1[0][1]*line1[1][0])\n b = float(line1[0][1] - line1[1][1])\n c = float(line1[1][0] - line1[0][0])\n\n d = float(line2[0][0]*line2[1][1] - line2[0][1]*line2[1][0])\n e = float(line2[0][1] - line2[1][1])\n f = float(line2[1][0] - line2[0][0])\n\n prod = b*f - c*e\n if abs(prod) < 1e-10:\n return (np.inf, np.inf)\n\n xc = (d*c - a*f) / prod\n yc = (a*e - b*d) / prod\n\n sign_x1 = (xc - line1[0][0])*(xc - line1[1][0])\n sign_y1 = (yc - line1[0][1])*(yc - line1[1][1])\n\n if sign_x1 > 1e-10:\n return (np.inf, np.inf)\n if sign_x1 < 1e-10:\n if sign_y1 > 1e-10:\n return (np.inf, np.inf)\n\n sign_x2 = (xc - line2[0][0])*(xc - line2[1][0])\n sign_y2 = (yc - line2[0][1])*(yc - line2[1][1])\n\n if sign_x2 > 1e-10:\n return (np.inf, np.inf)\n if sign_x2 == 1e-10:\n if sign_y2 > 1e-10:\n return (np.inf, np.inf)\n return (int(xc), int(yc))", "def project_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n\n return add_vectors(a, c)", "def _line_intersection(self, line, point):\n den = euclidean_distance((line[0],line[1]), (line[2],line[3]))\n x1, y1, x2, y2 = line[0], line[1], line[2], line[3]\n x3, y3 = point[0], point[1]\n\n u = ( ((x3-x1) * (x2-x1)) + ((y3-y1) * (y2-y1)) ) / den\n\n x, y = (x1 + u * (x2-x1)), (y1 + u * (y2-y1))\n dist = euclidean_distance((x,y), point)\n\n # pygame.draw.circle(self.screen, SIM_COLORS['aqua'], \n # (int(x*SCALE), int(y*SCALE)), \n # int(40), \n # 0)\n # print dist*SCALE, (x*SCALE,y*SCALE)\n\n return dist, (x, y)", "def line_length_angle(line:tuple)->tuple:\n squared_dist = point_sqr_distance(line[0], line[1])\n if squared_dist == 0:\n return 0,1\n distance = math.sqrt(squared_dist)\n angle_cosine = (line[1][0] - line[0][0]) / distance\n return squared_dist, angle_cosine", "def intersect_line(self, line: Line) -> Tuple[Point, Point]:\n vector_to_line = Vector.from_points(self.point, line.point)\n vector_unit = line.direction.unit()\n\n dot = vector_unit.dot(vector_to_line)\n\n discriminant = dot**2 - (vector_to_line.norm() ** 2 - self.radius**2)\n\n if discriminant < 0:\n raise ValueError(\"The line does not intersect the sphere.\")\n\n pm = np.array([-1, 1]) # Array to compute minus/plus.\n distances = -dot + pm * math.sqrt(discriminant)\n\n point_a, point_b = line.point + distances.reshape(-1, 1) * vector_unit\n\n return point_a, point_b", "def vertex(self):\n if self.a != 0.0:\n # Find x where f'(x) = 2ax + b = 0\n x = -0.5 * self.b / self.a\n return (x, self.f(x))\n else:\n # Quadratic is actually a line, no minimum!\n return (None, None)", "def getOrdinate(self):\n return self.point.y - self.slope * self.point.x", "def slope(x1, y1, x2, y2):\n return (y2 - y1) / (x2 - x1)", "def xmin(self):\n return self.bbox[0][0]", "def project(self, point):\n return np.round(project(self.camera.P, point)).astype(int)", "def __abs__(self) -> PointType:\n return Point(abs(self.x), abs(self.y))", "def getMinAbundanceOfClrSample(self):\n #try: minimum = min(self.clr_sample['abundance'])-0.001\n try: minimum = min(self.clr_sample)-0.01\n except: minimum = 0\n return minimum", "def point_double(self, a):\n x1, y1 = modp(self.p, a.x, a.y)\n L = (3 * x1 ** 2 + self.a) / (2 * y1)\n x3 = L ** 2 - 2 * x1\n y3 = L * (x1 - x3) - y1\n return point.xy(int(x3), int(y3))", "def find_line_model(points):\n\n # [WARNING] vertical and horizontal lines should be treated differently\n # here we just add some noise to avoid division by zero\n\n # find a line model for these points\n m = (points[1, 1] - points[0, 1]) / (\n points[1, 0] - points[0, 0] + sys.float_info.epsilon) # slope (gradient) of the line\n c = points[1, 1] - m * points[1, 0] # y-intercept of the line\n\n return m, c", "def get_scale_from_linear_transform(A):\n _, _, S = decompose_rws(A)\n return abs(S.a), abs(S.e)", "def xymaxw_to_line(point, edge, walls):\r\n#\tif min(x1,x2) <= x <= max(x1,x2) and min(y1,y2) <= y <= max(y1,y2):\r\n#\t\treturn 0\r\n\t(x,y) = point\r\n\t((x1,y1),(x2,y2)) = edge\r\n\tif x1 == x2:\r\n\t\tds = [max(abs(x1-x), abs(y3-y)) \\\r\n\t\t\tfor y3 in range(min(y1,y2),max(y1,y2)+1) \\\r\n\t\t\tif not racetrack.crash(((x,y),(x1,y3)), walls)]\r\n\telse:\r\n\t\tds = [max(abs(x3-x), abs(y1-y)) \\\r\n\t\t\tfor x3 in range(min(x1,x2),max(x1,x2)+1) \\\r\n\t\t\tif not racetrack.crash(((x,y),(x3,y1)), walls)]\r\n\tds.append(infinity)\r\n\treturn min(ds)", "def calc_slope(self, left, right):\n return (left[1] - right[1]) / (left[0] - right[0])", "def apoint(rpoint):\r\n tempy = gv[\"fixedLL\"][1] + gv[\"globalscale\"]*rpoint[1]*(gv[\"fixedUR\"][1]-gv[\"fixedLL\"][1])\r\n if gv[\"localxscale\"] != -1:\r\n tempx = gv[\"fixedLL\"][0] + gv[\"localxscale\"]*gv[\"globalscale\"]*rpoint[0]*(gv[\"fixedUR\"][0]-gv[\"fixedLL\"][0])\r\n else:\r\n tempx = gv[\"fixedLL\"][0] + gv[\"globalscale\"]*rpoint[0]*(gv[\"fixedUR\"][0]-gv[\"fixedLL\"][0])\r\n if tempx - gv[\"fixedUR\"][0] > 0 and tempx - gv[\"fixedUR\"][0] < 1e-7:\r\n tempx = gv[\"fixedUR\"][0]\r\n if tempx > gv[\"fixedUR\"][0]:\r\n print ( \"problem x value : \",tempx, \" max x allowed : \",gv[\"fixedUR\"][0])\r\n return [tempx,tempy]", "def LineMaxDistanceTo(line, point_or_line):\n line = rhutil.coerceline(line, True)\n test = rhutil.coerceline(point_or_line)\n if test is None: test = rhutil.coerce3dpoint(point_or_line, True)\n return line.MaximumDistanceTo(test)", "def getMidPoint(self):\n return p.Point((self.start.normalVector + self.end.normalVector)/2.0)", "def comp_point_ref(self, is_set=False):\n\n point_list = list()\n for line in self.get_lines():\n point_list.append(line.get_middle())\n point_ref = sum(array(point_list)) / len(point_list)\n\n if is_set:\n self.point_ref = point_ref\n return point_ref", "def bytes2slope(bytes):\n return pi/2 - numpy.arcsin(bytes.astype(numpy.float32) / 0xFF)", "def root1(self):\r\n if self.discriminant() < 0.0:\r\n return None\r\n return(-self.__b + math.sqrt(self.discriminant()))/(2*self.__a)", "def y_x(self, x: datetime) -> float:\n return self.point_1_price + self.slope * ((x - self.point_1_moment).total_seconds())", "def test_point_positive_on_one_line(self):\n a = Point(1, 0)\n b = Point(34, 0)\n c = Point(42, 0)\n\n self.assertTrue(Point.on_one_line(a, b, c),\n \"Test of Point.on_one_line(a, b, c) failed, returned value != True.\")\n d = Point(1, 2)\n e = Point(34, 43)\n f = Point(42, 54)\n\n self.assertFalse(Point.on_one_line(d, e, f),\n \"Test of Point.on_one_line(d, e, f) failed, returned value != False.\")\n\n self.assertTrue(Point.on_one_line(a), \"Test of Point.on_one_line(a) failed, returned value != True.\")", "def find_slope(lat1,lon1,lat2,lon2):\n return (lon2-lon1)/(lat2-lat1)", "def slope(a, b):\n a1, a2 = PC_matrix[:, 0][a], PC_matrix[:, 1][a]\n b1, b2 = PC_matrix[:, 0][b], PC_matrix[:, 1][b]\n \n return b1-a1, b2-a2", "def offset_slope(self):\n foc_um_slope = self.focus_slope * self.pix_size\n offset_slope = 0.5 * foc_um_slope / np.tan(self.convergence_angle)\n return offset_slope", "def abline(self,slope, intercept,axis):\n #axis = plt.gca()\n x_vals = np.array(axis.get_xlim())\n y_vals = intercept + slope * x_vals\n axis.plot(x_vals, y_vals, 'k--')", "def _from_pixels_abs(self, point):\n point = self.resolution.from_pixels(point)\n self.max_x = max(self.max_x, point[0])\n self.max_y = max(self.max_y, point[1])\n self.min_x = min(self.min_x, point[0])\n self.min_y = min(self.min_y, point[1])\n return point", "def left(self) -> float:\n points = self.get_adjusted_points()\n x_points = [point[0] for point in points]\n return min(x_points)" ]
[ "0.659462", "0.6087625", "0.6012542", "0.59856695", "0.58890575", "0.58882195", "0.5846869", "0.5727574", "0.57215995", "0.57181567", "0.57152516", "0.56363255", "0.5614506", "0.5601204", "0.55724233", "0.55630857", "0.55362034", "0.5515001", "0.5511656", "0.55000144", "0.5489459", "0.5450959", "0.544936", "0.5449059", "0.5403886", "0.54027635", "0.53956074", "0.53666526", "0.5341457", "0.5309213", "0.53049195", "0.529398", "0.5283273", "0.5278663", "0.5273281", "0.5257249", "0.52415454", "0.52279085", "0.5217257", "0.5215024", "0.5202557", "0.51967126", "0.5187059", "0.51633203", "0.51568425", "0.5156483", "0.515408", "0.5133628", "0.5120596", "0.5107236", "0.51035136", "0.51004034", "0.51004034", "0.50975674", "0.5095035", "0.509091", "0.5073433", "0.5072381", "0.5071985", "0.505391", "0.5051713", "0.5050831", "0.5047472", "0.5021474", "0.50157654", "0.5009194", "0.5003401", "0.5002932", "0.4998485", "0.49944562", "0.4994286", "0.4991777", "0.49912652", "0.49735445", "0.49714187", "0.49620292", "0.49498364", "0.49477735", "0.49459788", "0.4938374", "0.49368778", "0.4933514", "0.493327", "0.4932543", "0.49295282", "0.49238527", "0.49097952", "0.49092293", "0.49057716", "0.49057442", "0.49041235", "0.48909524", "0.48850974", "0.4884163", "0.48825264", "0.48750544", "0.4874849", "0.48737496", "0.48720574", "0.4866356" ]
0.6117362
1
Function to get one point from the line.
def point_at(self, abscissa): return self.zero + abscissa * self.direction
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pick_point_not_on_line(line: Line):\n return line.point1 + line.get_perpendicular_at_point(line.point1).get_direction_vector()", "def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b", "def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)", "def closest_line_point(point:tuple, edge:tuple)->tuple:\n d_y, d_x, b = line_equation((edge[0], edge[1]))\n if b == None:\n # The line is vertical, need different intercept formula.\n return (edge[0][0], point[1])\n if d_y == 0:\n # The line is horizontal, we can use a faster formula:\n return (point[0], edge[0][1])\n term_1 = d_x * d_y * (point[1] - edge[1][1])\n term_2 = (d_y ** 2) * edge[1][0]\n term_3 = (d_x ** 2) * point[0]\n denom = (d_y ** 2) + (d_x ** 2)\n x_int = (term_1 + term_2 + term_3) / denom\n y_int = (d_y / d_x) * x_int + b\n return (x_int, y_int)", "def get_initial_point(self):\r\n if isinstance(self.pieces[0], LineSegment):\r\n return self.pieces[0].start", "def point(self, x, y):\n d1 = super().point(x, y)\n top = self._lifetime.top\n bottom = self._lifetime.bottom\n d2 = distance_line_point(top.pos, bottom.pos, (x, y))[0]\n return min(d1, d2)", "def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)", "def LineClosestPoint(line, testpoint):\n line = rhutil.coerceline(line, True)\n testpoint = rhutil.coerce3dpoint(testpoint, True)\n return line.ClosestPoint(testpoint, False)", "def get_horizontal_line(self, point: Sequence[float], **kwargs) -> Line:\n\n return self.get_line_from_axis_to_point(1, point, **kwargs)", "def begining_of_line():\r\n set_point(point().begining_of_line())", "def _dist_point2line(self, point: ndarray,\n line: Tuple[ndarray, ndarray]) -> ndarray:\n\n assert isinstance(line, tuple)\n point1, point2 = line\n d = abs(np.cross(point2 - point1, point - point1)) / (\n norm(point2 - point1) + 1e-8)\n return d", "def LineMinDistanceTo(line, point_or_line):\n line = rhutil.coerceline(line, True)\n test = rhutil.coerceline(point_or_line)\n if test is None: test = rhutil.coerce3dpoint(point_or_line, True)\n return line.MinimumDistanceTo(test)", "def intersect_line(self, line: Line) -> Tuple[Point, Point]:\n vector_to_line = Vector.from_points(self.point, line.point)\n vector_unit = line.direction.unit()\n\n dot = vector_unit.dot(vector_to_line)\n\n discriminant = dot**2 - (vector_to_line.norm() ** 2 - self.radius**2)\n\n if discriminant < 0:\n raise ValueError(\"The line does not intersect the sphere.\")\n\n pm = np.array([-1, 1]) # Array to compute minus/plus.\n distances = -dot + pm * math.sqrt(discriminant)\n\n point_a, point_b = line.point + distances.reshape(-1, 1) * vector_unit\n\n return point_a, point_b", "def line_locate_point(self, right: PointValue) -> ir.FloatingValue:\n return ops.GeoLineLocatePoint(self, right).to_expr()", "def intersection(self, line: AbstractLine) -> Optional[AbstractPoint]:\n plane = Plane(self.__point_a,\n self.__point_b - self.__point_a,\n self.__point_c - self.__point_a)\n\n point = plane.intersection(line)\n if point is not None:\n if self.has_point(point):\n return point\n return None", "def test_point_positive_on_one_line(self):\n a = Point(1, 0)\n b = Point(34, 0)\n c = Point(42, 0)\n\n self.assertTrue(Point.on_one_line(a, b, c),\n \"Test of Point.on_one_line(a, b, c) failed, returned value != True.\")\n d = Point(1, 2)\n e = Point(34, 43)\n f = Point(42, 54)\n\n self.assertFalse(Point.on_one_line(d, e, f),\n \"Test of Point.on_one_line(d, e, f) failed, returned value != False.\")\n\n self.assertTrue(Point.on_one_line(a), \"Test of Point.on_one_line(a) failed, returned value != True.\")", "def line(points):\n return LineString(points)", "def getP1(self):\n return self.points[0]", "def point(self):\n return shapely.geometry.Point(self._x[0], self._x[1])", "def _nearest_point_on_line(begin, end, point):\n b2e = _vec_sub(end, begin)\n b2p = _vec_sub(point, begin)\n nom = _vec_dot(b2p, b2e)\n denom = _vec_dot(b2e, b2e)\n if denom == 0.0:\n return begin\n u = nom / denom\n if u <= 0.0:\n return begin\n elif u >= 1.0:\n return end\n else:\n return _vec_add(begin, _vec_scale(b2e, u))", "def get_line(self, path, line):\n\t\tlines = self.find_source(path)\n\t\tif lines == None:\n\t\t\treturn None\n\t\telse:\n\t\t\ttry:\n\t\t\t\treturn lines[line - 1]\n\t\t\texcept IndexError:\n\t\t\t\treturn None", "def dist_to_point(self, point):\n\t\treturn dist_to_line2d_seg((self.a.to_tuple(),self.b.to_tuple()), point.to_tuple())", "def make_line_points(y1, y2, line):\n if line is None:\n return None\n\n slope, intercept = line\n\n # make sure everything is integer as cv2.line requires it\n x1 = int((y1 - intercept) / slope)\n x2 = int((y2 - intercept) / slope)\n y1 = int(y1)\n y2 = int(y2)\n\n return ((x1, y1), (x2, y2))", "def project_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n\n return add_vectors(a, c)", "def _get_relevant_line(self):\n # () -> (Phi.Line)\n line_name = self._get_line_name()\n print(\"looking for \"+str(line_name))\n return Phi.findLine(line_name)", "def dist_to_line(self, line, pt):\n return abs(line[0]*pt.x + line[1]*pt.y + line[2])/math.sqrt(line[0]**2 + line[1]**2)", "def mid(self, line):\n return [(line.x1 + line.x2) // 2, (line.y1 + line.y2) // 2]", "def next_line():\r\n set_point(point().next_line())", "def points (p, line: str) -> list:\n direction = line [0]\n steps = list (range (1, 1 + int (F.tail (line))))\n return F.map (point (p, direction)) (steps)", "def getPoint(self):\n return self.point", "def getPoint(self):\n return self.point", "def get_point(self):\n return self.point", "def get_start_point(self):\n return self.first_point", "def _line_intersection(self, line, point):\n den = euclidean_distance((line[0],line[1]), (line[2],line[3]))\n x1, y1, x2, y2 = line[0], line[1], line[2], line[3]\n x3, y3 = point[0], point[1]\n\n u = ( ((x3-x1) * (x2-x1)) + ((y3-y1) * (y2-y1)) ) / den\n\n x, y = (x1 + u * (x2-x1)), (y1 + u * (y2-y1))\n dist = euclidean_distance((x,y), point)\n\n # pygame.draw.circle(self.screen, SIM_COLORS['aqua'], \n # (int(x*SCALE), int(y*SCALE)), \n # int(40), \n # 0)\n # print dist*SCALE, (x*SCALE,y*SCALE)\n\n return dist, (x, y)", "def _distance_to_line(begin, end, point):\n return _vec_distance(point, _nearest_point_on_line(begin, end, point))", "def getPoint(self):\n return Point(*self.position)", "def get_projection_of_pt_on_line(point, line_point1, line_point2):\n projection = Point(-1, -1)\n projection.x = point.x\n if (line_point2.x - line_point1.x) != 0:\n projection.y = (projection.x - line_point1.x) * (line_point2.y - line_point1.y) / \\\n (line_point2.x - line_point1.x) + line_point1.y\n else:\n projection.y = (projection.x - line_point1.x) * (line_point2.y - line_point1.y) / 1 + line_point1.y\n return projection", "def project_point_to_line(point, line_start, line_end):\n line_magnitude = line_start.distance(line_end)\n \n u = ((point.x - line_start.x) * (line_end.x - line_start.x) +\n (point.y - line_start.y) * (line_end.y - line_start.y)) \\\n / (line_magnitude ** 2)\n\n # closest point does not fall within the line segment, \n # take the shorter distance to an endpoint\n if u < 0.00001 or u > 1:\n ix = point.distance(line_start)\n iy = point.distance(line_end)\n if ix > iy:\n return line_end\n else:\n return line_start\n else:\n ix = line_start.x + u * (line_end.x - line_start.x)\n iy = line_start.y + u * (line_end.y - line_start.y)\n return Point([ix, iy])", "def get_path_point(self):\n if len(self.current_path) == 0:\n return np.zeros(2), -1\n ego_x = self.pose.position.x\n ego_y = self.pose.position.y\n ego_pose = np.array([[ego_x, ego_y]])\n disps = (ego_pose - self.current_path)\n dists = np.hypot(disps[:, 0], disps[:, 1])\n path_point_idx = np.argmin(dists[self.path_point_idx:]) + self.path_point_idx\n path_point = self.current_path[path_point_idx]\n return path_point, path_point_idx", "def getPoint(self):\n return self._point", "def DrawLinePoint(*args, **kwargs):\n return _gdi_.PseudoDC_DrawLinePoint(*args, **kwargs)", "def make_points(self,image,line):\n print(\"This is line inside make_points: \",line)\n try:\n slope, intercept = line\n y1 = int(image.shape[0]) # bottom of the image\n y2 = int(y1*3/5) # slightly lower than the middle\n x1 = int((y1 - intercept)/slope)\n x2 = int((y2 - intercept)/slope)\n return [[x1, y1, x2, y2]]\n except:\n return None", "def parsePoint(line):\n parts = line.split(\",\")\n return LabeledPoint(parts[0], [parts[1], parts[2]])", "def line(self):\r\n return self._get_instantiation()[1]", "def line(self):\n return self[\"line\"]", "def line(self):\n return self[\"line\"]", "def line(self):\n return self[\"line\"]", "def line(self):\n return self[\"line\"]", "def pick_point_not_on_line_on_side(line: Line, side: Point, same_side=True):\n point2 = line.point1 + line.get_perpendicular_at_point(line.point1).get_direction_vector()\n if Construction.check_if_points_on_same_side(line, side, point2) == same_side:\n return point2\n else:\n return line.point1 - line.get_perpendicular_at_point(line.point1).get_direction_vector()", "def get_line_from_axis_to_point(\n self,\n index: int,\n point: Sequence[float],\n line_func: Line = DashedLine,\n line_config: dict | None = None,\n color: ParsableManimColor | None = None,\n stroke_width: float = 2,\n ) -> Line:\n\n line_config = line_config if line_config is not None else {}\n\n if color is None:\n color = VMobject().color\n\n line_config[\"color\"] = ManimColor.parse(color)\n line_config[\"stroke_width\"] = stroke_width\n\n axis = self.get_axis(index)\n line = line_func(axis.get_projection(point), point, **line_config)\n return line", "def lineTo(self, pt: Tuple[float, float]) -> None:\n raise NotImplementedError", "def GetPoint1(self):\n ...", "def GetPoint1(self):\n ...", "def _get_line(self, line: int) -> str:\n line_offsets_with_sentinel = self._line_offsets + [len(self._text)]\n return self._text[line_offsets_with_sentinel[line]:line_offsets_with_sentinel[line+1]]", "def get_point_from_linestring(geom_row, X=0, behaviour='last'):\n\n lat = None\n lng = None\n try:\n X = round(X)\n except Exception as e:\n raise TypeError(\"Please enter a number for the index of the point within the linestring (X)\")\n\n if behaviour in ['last', 'ignore']:\n pass\n else:\n behaviour = 'last'\n\n if type(geom_row) == shapely.geometry.multilinestring.MultiLineString:\n total_linestrings = len(geom_row)\n lengths = {}\n total_len = 0\n for line in range(total_linestrings):\n len_line = len(geom_row[line].xy[0])\n lengths[line] = len_line\n total_len += len_line\n if X > total_len and behaviour == 'ignore':\n return lng, lat\n elif X > total_len and behaviour == 'last' or X == -1:\n lat = geom_row[-1].xy[1][-1]\n lng = geom_row[-1].xy[0][-1]\n else:\n total = 0\n for key, val in lengths.items():\n # find the location of X within the dictionary by looking if its in a given key\n total += val\n if total >= X:\n ind_key = key\n dict_ind = (val - (total - X)) - 1 # minus 1 as Python has a base-0 index\n break\n lat = geom_row[ind_key].xy[1][dict_ind]\n lng = geom_row[ind_key].xy[0][dict_ind]\n\n elif type(geom_row) == shapely.geometry.linestring.LineString:\n len_line = len(geom_row.xy)\n lng = geom_row.xy[0][X]\n lat = geom_row.xy[1][X]\n\n return lng, lat", "def _extract_drift_point(self, line):\r\n\r\n self.drift_points.append(float(line.split(sep=',')[1]))\r\n return", "def getClosestPointFromLine(origin, ray, point):\n # calculate the difference vector\n delta = point-origin\n # norm the ray\n ray /= np.linalg.norm(ray, axis=-1)[..., None]\n # calculate the scale product\n factor = np.sum(ray*delta, axis=-1)\n try:\n return origin + factor[:, None] * ray\n except IndexError:\n return origin + factor * ray", "def DrawLinePoint(*args, **kwargs):\n return _gdi_.DC_DrawLinePoint(*args, **kwargs)", "def distance_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector(cross_vectors(pa, pb))\n l_ab = length_vector(ab)\n return l / l_ab", "def findNearPointOnLine(node1, node2, point):\n p=point[0]\n q=point[1]\n a=node1[0]\n b=node1[1]\n c=node2[0]\n d=node2[1]\n \n x = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (d-b) + p\n y = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (a-c) + q\n \n return x, y", "def get_point_at(self, t):\n segment = self.get_segment_for_time(t)\n return segment.point_at(t)", "def GetLine(line):\r\n pass", "def previous_line():\r\n set_point(point().previous_line())", "def get_vertical_line(self, point: Sequence[float], **kwargs) -> Line:\n return self.get_line_from_axis_to_point(0, point, **kwargs)", "def getClosestPointToLine(self, A, B, P):\n AP = XYPoint(P.x - A.x, P.y - A.y)\n AB = XYPoint(B.x - A.x, B.y - A.y)\n ab2 = AB.x * AB.x + AB.y * AB.y\n ap_ab = AP.x * AB.x + AP.y * AB.y\n t = ap_ab / ab2\n\n if t < 0.0:\n t = 0.0\n elif t > 1.0:\n t = 1.0\n\n return XYPoint(A.x + AB.x * t, A.y + AB.y * t)", "def get_line_nr(view, point):\n return view.rowcol(point)[0] + 1", "def project_points_line(points, line):\n return [project_point_line(point, line) for point in points]", "def point(self) -> Point:\n return self._point", "def find_segment(p, line, start_vertex=0):\n EPS = 1e-9\n for seg in range(start_vertex, len(line)-1):\n if is_near(p, line[seg]):\n return seg, 0\n if line[seg][0] == line[seg+1][0]:\n if not (p[0]-EPS <= line[seg][0] <= p[0]+EPS):\n continue\n px = None\n else:\n px = (p[0] - line[seg][0]) / (line[seg+1][0] - line[seg][0])\n if px is None or (0 <= px <= 1):\n if line[seg][1] == line[seg+1][1]:\n if not (p[1]-EPS <= line[seg][1] <= p[1]+EPS):\n continue\n py = None\n else:\n py = (p[1] - line[seg][1]) / (line[seg+1][1] - line[seg][1])\n if py is None or (0 <= py <= 1):\n if py is None or px is None or (px-EPS <= py <= px+EPS):\n return seg, px or py\n return None, None", "def get_full_line(self):\n return self._line", "def getPoint(self, x, y):\n return self._c[x*self.__height + y]", "def DistPoint2Line(point,line_point1, line_point2=np.array([0,0,0])):\n return np.linalg.norm(np.cross((point-line_point2),(point-line_point1)))/np.linalg.norm(line_point1 - line_point2)", "def intersect(l: Line, p: Plane) -> Point:\n if math.isclose((l.d * p.normal()), 0):\n # If the line direction is perpendicular to the plane normal,\n # the line and plane must be parallel.\n return None\n else:\n # There exists a parameter t, which makes\n # p.isInPlane(l.point(t)) == 0\n # Let's find it.\n # Initial guess\n t1 = 1\n p1 = l.point(t1)\n d1 = distancePointPlane(p1, p)\n t2 = 2\n p2 = l.point(t2)\n d2 = distancePointPlane(p2, p)\n\n # Calculate line through the two points (t,d)\n a = (d2 - d1) / (t2 - t1)\n b = d1 - a * t1\n\n # Find the t-value where d is zero\n # 0 = at+b <=> t = -b/a\n t = -b / a\n print(\"parameter: {}\".format(t))\n return l.point(t)", "def __call__( self, line ):\n return self.__getitem__( line )", "def __call__( self, line ):\n return self.__getitem__( line )", "def getPoint(self, a):\n lng = self.source.center.lng + (self.target.center.lng - self.source.center.lng) * min(a, 1)\n lat = self.source.center.lat + (self.target.center.lat - self.source.center.lat) * min(a, 1)\n return lng, lat", "def getLine(self, **kwargs):\n return Line(self.p1, self.angle, **kwargs)", "def point(self):\n return Point(self._x, self._y)", "def line (self):\n return self._line", "def LinePlane(line):\n line = rhutil.coerceline(line, True)\n rc, plane = line.TryGetPlane()\n if not rc: return scriptcontext.errorhandler()\n return plane", "def line(self):\n return self._line", "def test_line_to_points(self):\n delta = 1\n # Create simple line\n L = numpy.array([[0, 0], [2, 0]])\n V = points_along_line(L, 1)\n\n expected_V = [[0, 0], [1, 0], [2, 0]]\n msg = ('Calculated points were %s, expected '\n '%s' % (V, expected_V))\n assert numpy.allclose(V, expected_V), msg\n\n # Not starting at zero\n # Create line\n L2 = numpy.array([[168, -2], [170, -2], [170, 0]])\n V2 = points_along_line(L2, delta)\n\n expected_V2 = [[168, -2], [169, -2], [170, -2],\n [170, -1], [170, 0]]\n msg = ('Calculated points were %s, expected '\n '%s' % (V2, expected_V2))\n assert numpy.allclose(V2, expected_V2), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'indonesia_highway_sample.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n\n P = geometry[0]\n C = points_along_line(P, delta)\n\n # Check against reference centroid\n expected_v = [[106.7168975, -6.15530081],\n [106.85224176, -6.15344678],\n [106.93660016, -6.21370279]]\n assert numpy.allclose(C, expected_v, rtol=1.0e-8)\n\n # Store points to file (to e.g. check with qgis)\n out_filename = unique_filename(prefix='test_points_along_line',\n suffix='.shp')\n V = Vector(data=None,\n projection=DEFAULT_PROJECTION,\n geometry=[C],\n name='Test points_along_line')\n V.write_to_file(out_filename)", "def findPointOnLine(node1, node2, distance):\n m, b, _ = geometry.lineSpec(node1, node2)\n \n xy = []\n if m == True: # parallel to y axis\n xy.append(node1[0])\n if node1[1] <= node2[1]:\n xy.append(node1[1] + distance)\n else:\n xy.append(node1[1] - distance)\n \n elif m == False: # parallel to x axis\n if node1[0] <= node2[0]:\n xy.append(node1[0] + distance)\n else:\n xy.append(node1[0] - distance)\n xy.append(node1[1])\n \n else:\n x = sp.Symbol('x')\n z = (x-node1[0])**2 + (m*x+b-node1[1])**2 - distance**2\n xSolution = sp.solve(z, x)\n \n for xSol in xSolution:\n if (xSol >= node1[0] and xSol <= node2[0]) or (xSol <= node1[0] and xSol >= node2[0]):\n xy.append(xSol)\n xy.append(xSol*m + b)\n return xy", "def to_line_start(self):\n # type: () -> LineNo\n metadata = self.safely_parse_metadata()\n return metadata[-1][0]", "def lineInQuad(self, line):\n from Drawables.Line import Line\n for idx in range(-1, 3):\n if Line.orientation(line, self.vertices[idx]) == 0:\n if Line.orientation(line, self.vertices[idx+1]) == 0:\n return idx+1\n if Line.orientation(line, self.vertices[idx-1]) == 0:\n return idx\n return None\n return None", "def compute_dual_line(P):\n return Line(P.x, -P.y)", "def dist_to_line2d(line, point):\n\tx1,y1 = line[0]\n\tx2,y2 = line[1]\n\tx3,y3 = point\n\t\n\t# where on line the perpendicular is\n\tu = ( ((x3-x1)*(x2-x1) + (y3-y1)*(y2-y1))\n\t\t\t/ (math.pow(x1-x2,2) + math.pow(y1-y2,2)) )\n\t\n\t# intersection point\n\tx = x1 + u*(x2-x1)\n\ty = y1 + u*(y2-y1)\n\t\n\tdist = math.sqrt(math.pow(x-x3,2)+math.pow(y-y3,2))\n\t\n\treturn dist", "def first_point(self) -> np.ndarray:\n return self.evaluate_at(arc_lengths=0)", "def linePointXYDist(l,p,inside=True):\n return linePointXY(l,p,inside,distance=True)", "def intersect_line(self, line: Line, **kwargs) -> Point:\n if self.normal.is_perpendicular(line.direction, **kwargs):\n raise ValueError(\"The line and plane must not be parallel.\")\n\n vector_plane_line = Vector.from_points(self.point, line.point)\n\n num = -self.normal.dot(vector_plane_line)\n denom = self.normal.dot(line.direction)\n\n # Vector along the line to the intersection point.\n vector_line_scaled = num / denom * line.direction\n\n return line.point + vector_line_scaled", "def getNearestGraphicsPoint(self, x, y):\n return self._getNearestGraphic(x, y, Field.DOMAIN_TYPE_POINT)", "def getPoint(self, xyz):\n return Point( (xyz[0] + 0.5) * self.resolution\n , (xyz[1] + 0.5) * self.resolution\n , (xyz[2] + 0.5) * self.resolution\n )", "def line(intercept, slope, x):\n return slope*x + intercept", "def street_line_1(self):\n return self._street_line_1", "def get_point(self):\n return self._x, self._y", "def get_normal_dist(line, point):\n \n # Rotate: \n x_rot = np.cos(line[1])*point[0] + np.sin(line[1])*point[1]\n \n # Normal distance: x_rot - rho:\n return x_rot - line[0]", "def GetLineAlpha(self):\n return self._attalpha[\"line\"]", "def find_line_through_point(center, theta, length):\n\n r = length\n cx, cy = center\n\n xo = int(r * math.sin(theta))\n yo = int(r * math.cos(theta))\n\n line_start = cx, cy\n line_end = cx + xo, cy + yo\n\n return line_start, line_end", "def closest(self, x):\n # http://www.ahinson.com/algorithms_general/Sections/Geometry/PluckerLine.pdf\n # has different equation for moment, the negative\n\n x = arg.getvector(x, 3)\n\n lam = np.dot(x - self.pp, self.uw)\n p = self.point(lam) # is the closest point on the line\n d = np.linalg.norm( x - p)\n \n return namedtuple('closest', 'p d lam')(p, d, lam)", "def distance_point_to_line(x1, y1, a, b, c):\n d = abs((a * x1 + b * y1 + c)) / (math.sqrt(a * a + b * b))\n #print(\"Distance from ({}, {}) to line {}x+{}y+{}=0 is {}\".format(\n # x1, y1, a, b, c, d))\n return(d)", "def getLine(self, correct=True):\n return Line(self.point, self.angle, correct=correct)" ]
[ "0.7592827", "0.7068464", "0.6940365", "0.6910205", "0.6897533", "0.68894476", "0.6816368", "0.6789263", "0.67315537", "0.6678987", "0.66766155", "0.66728306", "0.66517955", "0.66410375", "0.663569", "0.6616116", "0.6613046", "0.65880394", "0.6556479", "0.6535376", "0.6529306", "0.651936", "0.65028447", "0.6502161", "0.6495765", "0.64537513", "0.6446241", "0.64351445", "0.6432725", "0.6419201", "0.6419201", "0.6412135", "0.6411126", "0.64055043", "0.6392727", "0.6371071", "0.6365354", "0.6354874", "0.6348348", "0.63322854", "0.6324272", "0.63045204", "0.6296083", "0.6279695", "0.6277899", "0.6277899", "0.6277899", "0.6277899", "0.6276785", "0.62750745", "0.62666535", "0.62553716", "0.62553716", "0.6247761", "0.62414354", "0.6233112", "0.62310237", "0.62232745", "0.62125313", "0.61952144", "0.61920094", "0.6164206", "0.61532325", "0.61476415", "0.6105786", "0.6100719", "0.6098268", "0.6089079", "0.60721", "0.605741", "0.60427445", "0.60421264", "0.60420763", "0.6038369", "0.6038369", "0.60372525", "0.60363847", "0.60359156", "0.60328895", "0.6026277", "0.60187346", "0.6016816", "0.60122883", "0.59985906", "0.59792405", "0.5977137", "0.5975747", "0.597537", "0.5968175", "0.59640396", "0.59639037", "0.5963832", "0.59625304", "0.59619707", "0.5958335", "0.5952735", "0.5943585", "0.5935865", "0.59293157", "0.59206665", "0.5914142" ]
0.0
-1
Function to check if the instance contains a point.
def contains(self, p): return self.distance(p=p) < self.tolerance
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __contains__(self, point: Point[Scalar]) -> bool:\n return point in self._points_set", "def __contains__(self, point: Point2D) -> bool:\n raise NotImplementedError", "def contains_point(self, point) -> bool:\n return (self.pos.x <= point[0] <= self.pos.x + self.size.x and\n self.pos.y <= point[1] <= self.pos.y + self.size.y and\n self.visible)", "def contains(self, point):\n raise Exception(\"contains not implemented.\")", "def contains(self, point):\n if in_range(point[0], self.xrange) and in_range(point[0], self.yrange) and in_range(point[0], self.zrange):\n return True\n return False", "def has_points(self):\n return bool(self.points)", "def __contains__(self, point):\n for component, dim in zip(point, self.dimensions):\n if component not in dim:\n return False\n return True", "def contains(self, point):\n return super().contains((point[0] - self.x, point[1] - self.y))", "def HasPoint(self, vtkAMRBox, , , p_float_6, p_float_7, p_float_8):\n ...", "def check_point(point,points):\n if point in points:\n return True\n else:\n return False", "def contains_pt(self, pt):\n x, y = pt\n if not self.x - self.radius < x < self.x + self.radius:\n return False\n if not self.y - self.radius < y < self.y + self.radius:\n return False\n return True", "def containsPoint(self, p):\n return self.frameGeometry().contains(p)", "def contains_point(self, point):\n\t\tthreshold = 0.6\n\t\tx = point[0]\n\t\ty = point[1]\n\t\tif (x >= (self.xmin - threshold) and x <= (self.xmax + threshold) and\n\t\t\ty >= (self.ymin - threshold) and y <= (self.ymax + threshold)):\n\t\t return True\n\t\treturn False", "def ContainsPoint(*args, **kwargs):\n return _gdi_.Region_ContainsPoint(*args, **kwargs)", "def contains_point(self, x, y):\r\n if self.m == None:\r\n if abs(x - self.start[0]) > 0.6:\r\n return False\r\n else:\r\n if (y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1]):\r\n return True\r\n else:\r\n return False\r\n else: \r\n y0 = int(self.m * x + self.n)\r\n if abs(y - y0) > 0.6: \r\n return False \r\n else: \r\n if ((x >= self.start[0] and x <= self.end[0]) or \\\r\n (x <= self.start[0] and x >= self.end[0])) and \\\r\n ((y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1])): \r\n return True\r\n else:\r\n return False", "def contains(self, point):\n return 0 <= point.x <= 1 \\\n and 0 <= point.y <= 1 \\\n and 0 <= point.z <= 1", "def contains_point(self, point):\n if self.orientation(point) == 0:\n return point >= min(self.begin, self.end) and point <= max(self.begin, self.end)\n\n return False", "def point_exists(self, point):\n qs = LocationPoint.objects.raw(\"\"\"\n SELECT * FROM script_execution_manager_locationpoint\n WHERE st_dwithin(\n thegeometry,\n st_transform(\n st_setsrid(\n st_point({point.x}, {point.y}), {point.srid}),\n 4326\n ),\n -- This should be approximately one meter.\n -- See: http://stackoverflow.com/a/8477438/198050\n -- 0.00001\n -- Gerrit Hendriksen (gerrit.hendriksen@deltares.nl) says\n -- 8*10e-6 is approximately one meter.\n 8.181818181818181e-06\n )\n \"\"\".format(point=point)\n )\n\n res = sum(1 for result in qs)\n return qs[0] if res else False", "def contains_point(self, x, y): \r\n n = len(self.points)\r\n inside = False\r\n \r\n x1, y1 = self.points[0]\r\n for i in range(n + 1):\r\n x2, y2 = self.points[i % n]\r\n if y > min(y1, y2):\r\n if y <= max(y1, y2):\r\n if x <= max(x1, x2):\r\n if y1 != y2:\r\n xinters = (y - y1) * (x2 - x1) / (y2 - y1) + x1\r\n if x1 == x2 or x <= xinters:\r\n inside = not inside\r\n x1, y1 = x2, y2\r\n \r\n return inside", "def contains(self, point):\n return point in self.console", "def __contains__(self, pos):\n if pos in self._coordinates:\n return True\n return False", "def test_contains_point() -> None:\n point_1 = Point(1, 2)\n point_2 = Point(-2, -4)\n point_3 = Point(3, 3)\n point_4 = Point(0, 0)\n\n line_segment = LineSegment(first=point_1, second=point_2)\n\n assert line_segment.does_contain(point_1)\n assert line_segment.does_contain(point_2)\n assert not line_segment.does_contain(point_3)\n assert line_segment.does_contain(point_4)", "def contains(self, Union, QPointF=None, QPoint=None): # real signature unknown; restored from __doc__\n return False", "def contains_point(self, p):\n return self.begin <= p < self.end", "def contains_point(self, point):\n assert isinstance(point, collections.Iterable), \\\n 'Invalid list of points'\n point = list(point)\n geo = self.get_geometry()\n # Only planar points are checked now\n pnt = Point(point[0], point[1])\n return geo.contains(pnt)", "def __contains__(self, point):\n raise NotImplementedError(f\"The `in` operator is not supported for {self.__class__.__name__}\")", "def _isPoint(self):\n return (self.width == 0 and self.height == 1) or (self.height == 0 and self.width == 1)", "def _isPoint(self):\n return (self.width == 0 and self.height == 1) or (self.height == 0 and self.width == 1)", "def has_point(self, point: AbstractPoint) -> bool:\n plane = Plane(self.__point_a,\n self.__point_b - self.__point_a,\n self.__point_c - self.__point_a)\n\n factor_a, factor_b = plane.calculate_point_factors(point)\n if factor_a is not None and factor_b is not None:\n return (0.0 <= factor_a <= 1.0 and 0.0 <= factor_b <= 1.0 and\n (factor_a + factor_b) <= 1.0)\n\n return False", "def is_point_within(self, x, y):\n return abs(x - self._x_position) <= self._x_length / 2 and abs(y - self._y_position) <= self._y_length / 2", "def is_point_in(self, point):\n return (self.upperleft[0] <= point[0] <= self.upperright[0] and self.upperleft[1] <= point[1] <= self.bottomleft[1])", "def is_point_in(self, point):\n return (self.upperleft[0] <= point[0] <= self.upperright[0] and self.upperleft[1] <= point[1] <= self.bottomleft[1])", "def __contains__(self, point):\n if not isinstance(point, np.ndarray):\n point = np.array(point)\n test = self.A.dot(point.flatten()) - self.b < ABS_TOL\n return np.all(test)", "def contains(self, point):\n try:\n p = vector(point)\n except TypeError: # point not iterable or no common ring for elements\n if len(point)>0:\n return False\n else:\n p = vector(self.field(), [])\n\n if len(p)!=self.ambient_dim():\n return False\n \n for H in self.Hrep_generator():\n if not H.contains(p):\n return False\n return True", "def __contains__(self, point, e=10e-10):\n v1 = self.vector\n v2 = Vector.createFromTwoPoints(self.point, point)\n return abs(v1.angle - v2.angle) < e", "def contains_point(self, x, y = None):\n x, y = y is not None and Point(x, y) or Point(x[0], x[1])\n\n cond1 = self.min_x() <= x <= self.max_x()\n cond2 = self.min_y() <= y <= self.max_y()\n return self.is_point_on_same_line(x, y) and cond1 and cond2", "def checkPointValidity(self, point: Tuple[float, float]) -> bool:\n neighbor = self.kdtree.search_nn(point)\n if not neighbor:\n self.kdtree.add(point)\n return True\n if neighbor[1] <= self.MIN_POINTS_DIST:\n return False\n else:\n self.kdtree.add(point)\n return True", "def inside( self, point ):\n for i in range( 0, len(point) ):\n if math.fabs( self.center[i] - point[i] ) > self.dimLens[i]/2.0:\n return False;\n return True;", "def __contains__(self, point, e=1e-10):\n if point == self.p1:\n return True\n v1 = Vector.createFromTwoPoints(self.p1, point)\n v2 = self.getVector()\n return (abs(v1.angle - v2.angle) % (2 * math.pi) < e) and (v1.norm <= v2.norm)", "def __contains__(self, point):\n if not isinstance(point, np.ndarray):\n point = np.array(point)\n return any(point in u for u in self.list_poly)", "def contains(self, point):\n scaledPoint = Point( point.x / self.scaleX\n , point.y / self.scaleY\n , point.z / self.scaleZ\n )\n\n return self.scaledObject.contains(scaledPoint)", "def contains(self, point):\n translatedPoint = Point( point.x - self.translateX\n , point.y - self.translateY\n , point.z - self.translateZ\n )\n\n return self.translatedObject.contains(translatedPoint)", "def __contains__(self, point): \n corners = self.corners\n\n if isinstance(point, tuple):\n from pyresample.spherical_geometry import Coordinate\n retval = planar_point_inside(Coordinate(*point), corners)\n else:\n retval = planar_point_inside(point, corners)\n\n #print ' retval from FALSE CORNERS contains '+str(retval)\n\n return retval", "def ispoint(x):\n if isvect(x) and x[3] > 0.0:\n return True\n return False", "def contains(self, point : Point):\n return ( self.corner.x <= point.x <= (self.corner.x + self.width)\n and self.corner.y <= point.y <= (self.corner.y + self.height))", "def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon", "def __contains__(self,pos):\n # Permet de donner une contenance a l objet\n # Il devient comme une liste de point\n # Ainsi on peut le parcourir comme on le ferai avec une liste\n xmin=self.pos[0]\n xmax=self.pos[0]+self.dim[0]\n ymin=self.pos[1]\n ymax=self.pos[1]+self.dim[1]\n xpt=pos[0]\n ypt=pos[1]\n return (xpt>=xmin and xpt<=xmax and ypt>=ymin and ypt<=ymax)", "def __contains__(self,pos):\n # Permet de donner une contenance a l objet\n # Il devient comme une liste de point\n # Ainsi on peut le parcourir comme on le ferai avec une liste\n xmin=self.pos[0]\n xmax=self.pos[0]+self.dim[0]\n ymin=self.pos[1]\n ymax=self.pos[1]+self.dim[1]\n xpt=pos[0]\n ypt=pos[1]\n return (xpt>=xmin and xpt<=xmax and ypt>=ymin and ypt<=ymax)", "def check_point_inside(self, points):\n return all([self._domain.check_point_inside(point) for point in points])", "def __contains__(self, item):\n try:\n pos = Vec2(*item)\n return pos.x >= self.origin.x and pos.y >= self.origin.y \\\n and pos.x < self.origin.x + self.size.x \\\n and pos.y < self.origin.y + self.size.y\n except TypeError:\n return False", "def IsInsertedPoint(self, ):\n ...", "def is_point_in_box(x, y, bbox):\n if x < 200 and y < 200:\n return True\n return False", "def contains(self, possible_point):\n# if possible_point == self.endpoints[0] or possible_point == self.endpoints[1]:\n# return False\n distance = sum(possible_point.distance_to(p) for p in self.endpoints)\n return abs(distance - self.length()) < 0.0000001", "def __contains__(self, point):\n #### Original \n from pyresample.spherical_geometry import point_inside, Coordinate\n corners = self.corners\n\n if isinstance(point, tuple):\n return point_inside(Coordinate(*point), corners)\n else:\n return point_inside(point, corners)\n #### End Original\n #from .spherical import SphPolygon\n #log.info('RUNNING SPHERICAL in __contains__')\n #sphpoly = SphPolygon(corners)\n #return sphpoly.intersection(SphPolygon(point), sphpoly)", "def is_inside(self, x: int, y: int) -> bool:\n pass", "def isPoint(point, widgetType = 'widget'):\n if not(isinstance(point, list) or isinstance(point, tuple)):\n raise pgUIException(str(point) + ' is not a valid tuple/list for ' +\n widgetType,\n code = 31)\n if len(point) != 2:\n raise pgUIException(str(point) + ' has to have two elements',\n code = 32)\n if not(isinstance(point[0], int)) or not(isinstance(point[1], int)):\n raise pgUIException(str(point) + ' is not a valid point for ' +\n widgetType + ' position',\n code = 33)\n if point[0] < 0 or point[1] < 0:\n raise pgUIException(str(point) +\n ' both coordinates have to be 0 or positive',\n code = 34)\n return True", "def point_is_shot(self, point: Point):\n return point in self.shot_locations", "def in_display(self, point):\n x, y = point\n if x < 0 or x > self.width or \\\n y < 0 or y > self.height:\n return False\n return True", "def test_points_exists(self):\n self.assertEqual(Destination.objects.filter(name='testWithin')[0].point,\n self.test_point_inside)\n self.assertEqual(Destination.objects.filter(name='testWithout')[0].point,\n self.test_point_outside)", "def add_point(self, point: Point) -> bool:\n\n return self.add_traverse(point, self.root, 0)", "def isInternal(self, aPoint):\n if (aPoint.x >= self.pMin.x and aPoint.x <= self.pMax.x) \\\n and (aPoint.y >= self.pMin.y and aPoint.y <= self.pMax.y):\n return True\n else:\n return False", "def __isPointOnArea(self, point, area):\r\n\r\n pointX, pointY = point\r\n areaX,areaY,areaWidth,areaHeight = area\r\n\r\n if (pointX >= areaX and pointX <= areaX+areaWidth) and (pointY >= areaY and pointY <= areaY+areaHeight):\r\n return True\r\n else:\r\n return False", "def belongs(self, point, tolerance=TOLERANCE):\n point = vectorization.expand_dims(point, to_ndim=2)\n\n _, point_dim = point.shape\n if point_dim is not self.dimension + 1:\n if point_dim is self.dimension:\n logging.warning('Use the extrinsic coordinates to '\n 'represent points on the hypersphere.')\n return False\n sq_norm = self.embedding_metric.squared_norm(point)\n diff = np.abs(sq_norm - 1)\n\n return diff < tolerance", "def is_bound(self, point):\n return self.__begin == point or self.__end == point", "def isInternal(self, aPoint):\n if compute_distance(self.center, aPoint) <= self.radius:\n return True\n else:\n return False", "def contains(self, pt):\n x,y = pt.as_tuple()\n return (self.left <= x <= self.right and\n self.top <= y <= self.bottom)", "def valid_point(self, row, col):\n return self.topdown_view[row][col] == 1.0", "def is_point_on_curve(self, P):\n x, y, = P[0], P[1]\n left = y * y\n right = (x * x * x) + (self.a * x) + self.b\n return (left - right) % self.p == 0", "def is_point_exist(point, a_value, b_value, field):\n\n return (\n (point.y_crd ** 2 -\n (point.x_crd ** 3 + a_value *\n point.x_crd + b_value)) % field == 0 and\n 0 <= point.x_crd < field and 0 <= point.y_crd < field)", "def can_draw(self,point):\n if point <= 0:\n return False\n else:\n return True", "def is_inside(self, mX, mY, point):\n return (math.sqrt((point[0] - mX) * (point[0] - mX)\n + (point[1] - mY) * (point[1] - mY)) <= 2)", "def particleExists(*args, **kwargs)->bool:\n pass", "def in_bounds(self, point):\n # Sanity checks\n # Check that point has same number of dimensions as graph\n if not len(point) == len(self.dimensions):\n raise Exception(\"Point has \" + str(len(point)) + \" dimensions, Coordination Space has \" + \\\n str(len(self.dimensions)) + \" dimensions.\")\n\n for i, coordinate in enumerate(point):\n if coordinate > self.dimensions[i] or coordinate < 0:\n return False\n\n return True", "def point_in_poly(x_point: float, y_point: float) -> bool:\n\n # Semi-F47 extended states all devices should be able to ride out a sag of up to 1 cycle.\n if x_point <= 1:\n return False\n\n point = shapely.geometry.Point(x_point, y_point)\n return POLYGON.contains(point) or POLYGON.intersects(point)", "def __contains__(self, item):\n if len(item) != len(self.sizes):\n raise ValueError('Point dimension does not match grid dimension')\n for i in range(len(self.sizes)):\n if not 1 <= item[i] < self.sizes[i] - 1:\n return False\n return True", "def IsInsertedPoint(self, p_float, p_float_1, p_float_2):\n ...", "def _valid_point(self, point):\n # --------------------------------\n # Check Bounds\n # --------------------------------\n # Get grid point and confirm it is within range\n coord = self._get_grid_coord(point)\n if np.logical_or(np.any(point < 0), np.any(point >= self._extent)):\n return False\n\n # --------------------------------\n # Check Distance of Neighbors\n # --------------------------------\n for idx in self._get_neighbors(coord):\n # No points in grid cell\n if self._grid[idx] == -1:\n continue\n\n # Obtains point in grid cell and confirms its distance is less than the radius.\n near_point = self._samples[self._grid[idx]]\n if metric.euclidean(near_point, point) < self._radius:\n return False\n return True", "def isPointInside(self, p):\n x,y = p[0], p[1]\n A = self.left <= x <= self.right\n B = self.bottom <= y <= self.top\n return (A and B)", "def isPointInside(self, p):\n x,y = p[0], p[1]\n A = self.left <= x <= self.right\n B = self.bottom <= y <= self.top\n return (A and B)", "def interior_contains(self, point):\n try:\n p = vector(point)\n except TypeError: # point not iterable or no common ring for elements\n if len(point)>0:\n return False\n else:\n p = vector(self.field(), [])\n\n if len(p)!=self.ambient_dim():\n return False\n \n for H in self.Hrep_generator():\n if not H.interior_contains(p):\n return False\n return True", "def point_in_key( self,point,key ):\n key_dict = self.parts_dict.get(key,None)\n if not key_dict is None:\n for lons, lats in zip( key_dict['lons'],key_dict['lats'] ):\n if in_polygon(point = point, poly = (lons,lats)):\n return True\n return False", "def point_valid(self, pt, samples):\n\n\t cell_coords = self.get_cell_coords(pt)\n\t for idx in self.get_neighbours(cell_coords):\n\t nearby_pt = samples[idx]\n\t # Squared distance between or candidate point, pt, and this nearby_pt.\n\t distance2 = (nearby_pt[0]-pt[0])**2 + (nearby_pt[1]-pt[1])**2\n\t if distance2 < (self.r)**2:\n\t # The points are too close, so pt is not a candidate.\n\t return False\n\t # All points tested: if we're here, pt is valid\n\t return True", "def has_geom(self):\n return bool(self.give_geom())", "def ok(self, point):\n return True", "def ok(self, point):\n return True", "def particle_is_inside(self, particle):\n return self.in_box_bounds(particle.position)", "def contains(self, position):\n return (position - self._position).dot(self.normal(position)) < 0", "def contains(self, point):\n x, y = point.get_vertex(crs=self.crs)[:2]\n if isinstance(self.crs, GeographicalCRS) and self.ispolar():\n return _ccontains.contains_proj(x, y, self.vertices, self.crs) \\\n and not any(p.contains(point) for p in self.subs)\n else:\n return _ccontains.contains(x, y, self.vertices) \\\n and not any(p.contains(point) for p in self.subs)", "def check_inside(self, pos):\n x,y = pos\n return x >= self.posx and x <= self.posx + self.sizex and y >= self.posy and y <= self.posy + self.sizey", "def is_point_of_interest(self, point, labels):\n r, c = point\n is_interesting = False\n if self.topdown_view[r][c] in labels:\n is_interesting = True\n\n return is_interesting, self.topdown_view[r][c]", "def __eq__(self, pt):\n return self.x == pt.x and self.y == pt.y", "def point_in_board(point: Point):\n return point in frozenset(\n [\n Point(x, y)\n for x in range(BOARD_SIZE)\n for y in range(BOARD_SIZE)\n ]\n )", "def is_in_box(self, mz, rt):\n hits = self.check_point(mz, rt)\n if len(hits) > 0:\n return True\n else:\n return False", "def is_inside(self, p):\n s, t = self.get_barycentric_coord(p)\n if 0 <= s <= 1 and 0 <= t <= 1 and s + t <= 1:\n return True\n else:\n return False", "def isInside(point_x, point_y, area_left, area_top, area_width, area_height):\n return (area_left <= point_x < area_left + area_width) and (area_top <= point_y < area_top + area_height)", "def point_inside_polygon(self, location, points):\n # Simplification: if the point is above the mean altitude of all the \n # points, then do not consider it to be inside the polygon. We could \n # also perform interesting calculations here, but we won't have that \n # many objects of differing altitude anyway.\n avg_alt = float(sum([point.alt for point in points]))/len(points)\n if avg_alt < location.alt - self.altitude_margin:\n return False\n\n edges = get_point_edges(points)\n num = sum(ray_intersects_segment(location, e[0], e[1]) for e in edges)\n return num % 2 == 1", "def isInside(self, point):\n # we rotate back the point to the frame parallel to the axis of the ellipse\n rotatedPoint = self.rotatePoint(point)\n # we check if each point is inside the associated liquid drop\n return ((rotatedPoint[:, :, 0]/self.axisA[:, None])**2 + (rotatedPoint[:, :, 1]/self.axisB[:, None])**2 < 1)", "def __contains__(self, other):\n x, y = other\n return self.radius >= sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "def point_in_map(self, x, y):\r\n return 0 <= x < self.width and 0 <= y < self.height and (x,y) not in self.walls", "def test_point(self):\n\n p = points.Point(4.3, -4.2, 3.1)\n\n self.assertEqual(p.x, 4.3)\n self.assertEqual(p.y, -4.2)\n self.assertEqual(p.z, 3.1)\n self.assertEqual(p.w, 1)", "def __contains__(self, x):\n indexes = self.get_indexes(x)\n return self.sketch[indexes] > 0" ]
[ "0.80940896", "0.79766035", "0.7874318", "0.7752589", "0.77207327", "0.76728505", "0.7655251", "0.7603095", "0.7503402", "0.7494675", "0.7432554", "0.740622", "0.74035716", "0.73980653", "0.73448753", "0.72807014", "0.7261856", "0.7195358", "0.7172193", "0.7149684", "0.7121691", "0.71145564", "0.71051943", "0.7093924", "0.70902073", "0.7054455", "0.70416594", "0.70416594", "0.70351356", "0.7034712", "0.697709", "0.697709", "0.69704676", "0.6944667", "0.6942779", "0.69357854", "0.69353026", "0.6921912", "0.69183415", "0.6917008", "0.6904693", "0.6864452", "0.68558866", "0.68274796", "0.68216205", "0.68099093", "0.6802087", "0.6802087", "0.6781508", "0.6764506", "0.6763023", "0.6758084", "0.6729337", "0.67073625", "0.6701443", "0.6684702", "0.66780514", "0.66629964", "0.66626185", "0.6637941", "0.6573697", "0.65650195", "0.6555638", "0.6533265", "0.65326273", "0.6527653", "0.6510511", "0.6489027", "0.6477645", "0.6473615", "0.64655954", "0.6451101", "0.645011", "0.6442821", "0.64363235", "0.64278704", "0.64115214", "0.63772655", "0.63772655", "0.6364049", "0.63615835", "0.6359834", "0.6355795", "0.6355791", "0.6355791", "0.63538045", "0.63371223", "0.6334593", "0.6328486", "0.63263947", "0.63236594", "0.63232076", "0.6321742", "0.6310141", "0.6296837", "0.62816906", "0.6277753", "0.6255886", "0.6246239", "0.6243623", "0.622048" ]
0.0
-1
Function to compute the distance between the instance and a point or the shortest distance between the instance and another line.
def distance(self, p=None, l=None): if l is None: d = p - self.zero n = np.zeros(3) # try: # n = d - np.dot(d, self.direction) * self.direction # except RuntimeWarning: # print(d, self.direction) # return norm(n) with warnings.catch_warnings(record=True) as w: # Cause all warnings to always be triggered. warnings.simplefilter("always") n = d - np.dot(d, self.direction) * self.direction # print(n, norm(n)) if len(w) > 0 and issubclass(w[-1].category, RuntimeWarning): # Todo: check w/ Ram if this is what he meant to do when catch a warning: n = np.zeros(3) # n = np.zeros(3) # print(d, self.direction) pass return norm(n) else: normal = np.cross(self.direction, l.direction) n = norm(normal) if n < sys.float_info.min: # Lines are parallel. return self.distance(p=l.zero) offset = np.dot(l.zero - self.zero, normal) / n return np.abs(offset)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _distance(point, line_point1, line_point2):\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance", "def _distance_to_line(begin, end, point):\n return _vec_distance(point, _nearest_point_on_line(begin, end, point))", "def distance_on_line(p1, p2, line, start_vertex=0):\n line_copy = line\n seg1, pos1 = find_segment(p1, line, start_vertex)\n if seg1 is None:\n # logging.warn('p1 %s is not projected, st=%s', p1, start_vertex)\n return None\n seg2, pos2 = find_segment(p2, line, seg1)\n if seg2 is None:\n if line[0] == line[-1]:\n line = line + line[1:]\n seg2, pos2 = find_segment(p2, line, seg1)\n if seg2 is None:\n # logging.warn('p2 %s is not projected, st=%s', p2, start_vertex)\n return None\n if seg1 == seg2:\n return distance(line[seg1], line[seg1+1]) * abs(pos2-pos1), seg1\n if seg2 < seg1:\n # Should not happen\n raise Exception('Pos1 %s is after pos2 %s', seg1, seg2)\n d = 0\n if pos1 < 1:\n d += distance(line[seg1], line[seg1+1]) * (1-pos1)\n for i in range(seg1+1, seg2):\n d += distance(line[i], line[i+1])\n if pos2 > 0:\n d += distance(line[seg2], line[seg2+1]) * pos2\n return d, seg2 % len(line_copy)", "def distance(p1, p2):\n if isparallel(p1, p2):\n # lines are parallel\n l = np.cross(p1.w, p1.v - p2.v * np.dot(p1.w, p2.w) / dot(p2.w, p2.w)) / np.linalg.norm(p1.w)\n else:\n # lines are not parallel\n if abs(p1 * p2) < 10*_eps:\n # lines intersect at a point\n l = 0\n else:\n # lines don't intersect, find closest distance\n l = abs(p1 * p2) / np.linalg.norm(np.cross(p1.w, p2.w))**2\n return l", "def distance_point_to_line(x1, y1, a, b, c):\n d = abs((a * x1 + b * y1 + c)) / (math.sqrt(a * a + b * b))\n #print(\"Distance from ({}, {}) to line {}x+{}y+{}=0 is {}\".format(\n # x1, y1, a, b, c, d))\n return(d)", "def distance_to_line(a, b, p):\n return distance(closest_point(a, b, p), p)", "def distance(self, point_1=(0, 0), point_2=(0, 0)):\n\t\treturn math.sqrt((point_1[0]-point_2[0])**2+(point_1[1]-point_2[1])**2)", "def LineMinDistanceTo(line, point_or_line):\n line = rhutil.coerceline(line, True)\n test = rhutil.coerceline(point_or_line)\n if test is None: test = rhutil.coerce3dpoint(point_or_line, True)\n return line.MinimumDistanceTo(test)", "def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )", "def getDistanceBetweenTwoPoints(self, one, two):\n dx = one.x - two.x\n dy = one.y - two.y\n return math.sqrt(dx * dx + dy * dy)", "def query_distance(self, instance1=(), instance2=()):\n distance = sum([pow((a - b), 2) for a, b in zip(instance1, instance2)])\n return distance", "def get_distance(pt1,pt2):\r\n x1 = pt1[1]\r\n y1 = pt1[0]\r\n x2 = pt2[1]\r\n y2 = pt2[0]\r\n d = np.sqrt((x2-x1)**2 + (y2-y1)**2)\r\n return d", "def distance_to(self, point1, point2):\n delta_x = self.x_points[point1] - self.x_points[point2]\n delta_y = self.y_points[point1] - self.y_points[point2]\n return math.sqrt(delta_x * delta_x + delta_y * delta_y)", "def distance_point_line_3d(point: Vector, start: Vector, end: Vector) -> float:\n if start.isclose(end):\n raise ZeroDivisionError('Not a line.')\n v1 = point - start\n # point projected onto line start to end:\n v2 = (end - start).project(v1)\n # Pythagoras:\n return math.sqrt(v1.magnitude_square - v2.magnitude_square)", "def GetPointToPointDistance(self, point1, point2):\n return math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point1, point2))", "def getDistance(self, x1, x2, y1, y2):\n return ((x1 - x2)**2 + (y1 - y2)**2)**0.5", "def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5", "def distance_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector(cross_vectors(pa, pb))\n l_ab = length_vector(ab)\n return l / l_ab", "def getDistance(point1,point2):\n dx = point2[0]-point1[0]\n dy = point2[1]-point1[1]\n return math.sqrt(dy*dy + dx*dx)", "def get_distance(point1, point2):\n a = (point1['x'] - point2['x']) ** 2\n b = (point1['y'] - point2['y']) ** 2\n return (a + b) ** (1.0 / 2)", "def get_distance(x1, y1, x2, y2):\n return math.sqrt((x1 - x2) ** 2 + (y1 * 2.38 - y2 * 2.38) ** 2)", "def DistPoint2Line(point,line_point1, line_point2=np.array([0,0,0])):\n return np.linalg.norm(np.cross((point-line_point2),(point-line_point1)))/np.linalg.norm(line_point1 - line_point2)", "def get_distance(self, point):\n if not isinstance(point, Point):\n point = Point(*point)\n\n distances = [(point.distance_to_point(p), p) for p in self.points]\n sortpoints = sorted(distances, key=lambda x: x[0])\n closest = sortpoints[0][1]\n\n vc = Vector(*closest)\n d1 = vc.dot(vc)\n\n secondc = sortpoints[1][1]\n vs = Vector(*secondc)\n v1 = Vector(*point) - (vc+vs)/2\n v2 = vs-vc\n v2.unitize()\n d2 = v1.dot(v2)\n\n return abs(min(d1, d2)) - self.thickness/2", "def distance(pt1, pt2):\n return (pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2", "def distance(self, p0, p1=None):\n if p1 == None:\n p1 = anchor\n y_span = p0[1] - p1[1]\n x_span = p0[0] - p1[0]\n return y_span ** 2 + x_span ** 2", "def calc_point_distance(x1, y1, x2, y2):\n\n return math.hypot(x2 - x1, y2 - y1)", "def distance(pt1, pt2):\n\tx1, y1 = pt1\n\tx2, y2 = pt2\n\tx = x2 - x1\n\ty = y2 - y1\n\ts = x**2 + y**2\n\treturn np.sqrt(s)", "def distance_to(self, x, y):\n\t\tdx = x - self.x\n\t\tdy = y - self.y\n\t\treturn math.sqrt((dx**2)+(dy**2))", "def get_distance_between(self, p1, p2):\n\t\treturn math.sqrt(math.pow((p1.x - p2.x), 2) + math.pow((p1.y - p2.y), 2))", "def distance(p1,p2):\n return ((p2.x - p1.x)*2 + (p2.y - p1.y))**0.5", "def _dist_point2line(self, point: ndarray,\n line: Tuple[ndarray, ndarray]) -> ndarray:\n\n assert isinstance(line, tuple)\n point1, point2 = line\n d = abs(np.cross(point2 - point1, point - point1)) / (\n norm(point2 - point1) + 1e-8)\n return d", "def distance(point_1=(0, 0), point_2=(0, 0)):\n return math.sqrt(\n (point_1[0] - point_2[0]) ** 2 +\n (point_1[1] - point_2[1]) ** 2)", "def distance(self, other: \"Point\") -> float:\n if not isinstance(other, self.__class__):\n raise TypeError(\"Expected `other` to be an instance of `{}`\"\\\n .format(self.__class__))\n dx = self.x - other.x\n dy = self.y - other.y\n return sqrt((dx ** 2) + (dy ** 2))", "def compute_distance(point_1, point_2):\n x1, y1, x2, y2 = point_1[0], point_1[1], point_2[0], point_2[1]\n distance = np.sqrt((x2-x1)**2 + (y2-y1)**2)\n\n return distance", "def get_distance_from_point(self, pstart, p_end):\n a = numpy.array((pstart.x, pstart.y, pstart.z))\n b = numpy.array((p_end.x, p_end.y, p_end.z))\n\n distance = numpy.linalg.norm(a - b)\n\n return distance", "def point_to_line_dist(P, A, B):\n\tif all(A == P) or all(B == P):\n\t\treturn0\n\tif arccos(dot((P - A) / norm(P - A), (B - A) / norm(B - A))) > pi / 2:\n\t\treturn norm(P - A)\n\tif arccos(dot((P - B) / norm(P - B), (A - B) / norm(A - B))) > pi / 2:\n\t\treturn norm(P - B)\n\treturn norm(cross(A-B, A-P))/norm(B-A)", "def calc_distance(first: Waypoint, second: Waypoint) -> int:\n return int(distance.vincenty(first.coords(), second.coords()).m)", "def obj_distance(obj1, obj2):\n distance = aversine(obj1.details['lon'], obj1.details['lat'], obj2.details['lon'], obj2.details['lat'])\n return distance", "def get_distance(start, end):\n\n\t\tloc_start, loc_end, dst_node = create_distance(start, end)\n\t\tdistance = cmds.getAttr(\"%s.distance\" % dst_node)\n\n\t\tcmds.delete([loc_start, loc_end, dst_node])\n\n\t\treturn distance", "def distance(p1, p2):\n return None", "def distance(x1, y1, x2, y2):\n return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5", "def distancetoline(p, l1, l2):\n vx = l1.x-p.x \n vy = l1.y-p.y\n ux = l2.x-l1.x\n uy = l2.y-l1.y\n\n length = ux*ux+uy*uy;\n\n det = (-vx*ux)+(-vy*uy); \n # if this is < 0 or > length then its outside the line segment\n if det<0 or det>length:\n ux=l2.x-p.x\n uy=l2.y-p.y\n return sqrt(min(vx*vx+vy*vy, ux*ux+uy*uy))\n\n det = ux*vy-uy*vx\n if length == 0.0:\n return 0.0\n else:\n return sqrt((det*det)/length)", "def line_segment_distance(start1, end1, start2, end2):\n assert end1 >= start1\n assert end2 >= start2\n if start1 <= start2 <= end1:\n return 0\n elif start1 <= start2:\n return start2 - end1\n elif start2 <= start1 <= end2:\n return 0\n else:\n return start1 - end2", "def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))", "def distance(self, other_pt, is_lla=True):\n return 0.0", "def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))", "def point_to_point_distance(p1:Point, p2: Point) -> float:\n return round(geopy.distance.distance((p1.y, p1.x), (p2.y, p2.x)).km,2)", "def getDistance(point1, point2x, point2y):\n distance = np.sqrt((point2x - point1[0])**2 + (point2y - point1[1])**2)\n return distance", "def distance(self, pt1, pt2):\r\n # productive #frequent\r\n if frequent: profprint()\r\n d = ((float(pt1[0]) - float(pt2[0])) ** 2 + (float(pt1[1]) - float(pt2[1])) ** 2 + (float(pt1[2]) - float(pt2[2])) ** 2) ** 0.5\r\n return d", "def get_distance(point_a, point_b):\n \n return np.sqrt(np.sum((point_a - point_b) ** 2, 1))", "def getDistance(p1, p2):\n\tdist = la.norm(p2 - p1)\n\treturn dist", "def calculate_distance(self, other_point):\n return math.sqrt(\n (self._x - other_point._x)**2 +\n (self._y - other_point._y)**2)", "def __distance(start_x, start_y, end_x, end_y):\n distance = math.sqrt((start_x - end_x) ** 2 + (start_y - end_y) ** 2)\n return distance", "def distance(self, point1, point2):\n\n\t\tprint \"Inside Distance!-----\"\n\t\tdist = math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2);\n\t\treturn dist", "def vertex_distance(self, v1, v2):\n return utils.real_distance(self.node_locations[v1], self.node_locations[v2])", "def get_distance(p1, p2):\n return ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** 0.5", "def distance(self, other):\n dx = self.x - other.x\n dy = self.y - other.y\n return math.sqrt(dx*dx + dy*dy)", "def point_line_dist2(p, l1, l2):\n p, l1, l2 = np.asarray(p), np.asarray(l1), np.asarray(l2)\n ap = l1 - p\n n = l2 - l1\n n /= np.sqrt(sum(n**2))\n dist = ap - np.outer(n, np.dot(ap, n)).T\n return np.sum(dist**2, 1)", "def calculate_line_length(x1, y1, x2, y2):\n distance = np.sqrt((x2 - x1)**2 + (y2 - y1)**2)\n return distance", "def distance(p1, p2):\n return math.hypot(p1.x-p2.x, p1.y-p2.y)", "def dist(a: Point, b: Point):\n return (a.x - b.x) ** 2 + (a.y - b.y) ** 2", "def getDistance(self, pt1, pt2):\n p = 2 #euclidean distance\n tot = 0\n for indexc, column in pt1.iteritems():\n if indexc in self.discrete: # need to reference VDM\n datapoint = self.VDMdict.get(indexc)\n dif = datapoint[pt1[indexc]][pt2[indexc]]\n elif indexc != \"class\": #gets distance beween 2 points\n dif = abs(float(pt1[indexc]) - float(pt2[indexc]))\n\n tot += dif ** p\n distance = tot ** (1 / p)\n return(distance)", "def dist(pnt1, pnt2):\n return ((pnt2[0] - pnt1[0])**2 + (pnt2[1] - pnt1[1])**2 + (pnt2[2] - pnt1[2])**2)**0.5", "def distance(point1, point2):\n return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) ** 0.5", "def distance_to(self, other):\n dx = other.x - self.x\n dy = other.y - self.y\n return math.sqrt(dx ** 2 + dy ** 2)", "def pointPointDistance(p1,p2):\n llsq = 0.0 # line length squared\n # faster, only for 2D\n h = p2[0] - p1[0]\n llsq = llsq + (h*h)\n h = p2[1] - p1[1]\n llsq = llsq + (h*h)\n return math.sqrt(llsq)\n\n for i in range(len(p1)): # each dimension, general case\n h = p2[i] - p1[i]\n llsq = llsq + (h*h)\n return math.sqrt(llsq)", "def distance(point0, point1):\n if point0 is None or point1 is None:\n return None\n diff = np.subtract(point0, point1)\n return np.sqrt(diff[0] ** 2 + diff[1] ** 2)", "def calculate_point_distance(p1, p2):\n\n return math.sqrt(math.pow(p1[0]-p2[0],2) + math.pow(p1[1]-p2[1],2))", "def distance(self, x, y=None):\n if y is not None:\n pos = TwoDV(x, y)\n if isinstance(x, TwoDV):\n pos = x\n elif isinstance(x, tuple):\n pos = TwoDV(*x)\n elif isinstance(x, TNavigator):\n pos = x._position\n return abs(pos - self._position)", "def distance(self,x,y,**kwargs):\n pass", "def derivative_of_dist_to_obstacle(min_dist, x_jnt0, y_jnt0, x_jnt1, y_jnt1,\n dx_jnt0, dy_jnt0, dx_jnt1, dy_jnt1,\n link_slope, x_obs, y_obs):\n dist, point_type = min_dist\n if point_type == 0:\n dist_der = ((x_jnt0 - x_obs) * dx_jnt0 + (y_jnt0 - y_obs) * dy_jnt0)\n dist_der /= dist\n elif point_type == 1:\n dist_der = ((x_jnt1 - x_obs) * dx_jnt1 + (y_jnt1 - y_obs) * dy_jnt1)\n dist_der /= dist\n elif point_type == 2:\n if link_slope is None:\n dist_der = dx_jnt0 if x_jnt0 > x_obs else -dx_jnt0\n elif link_slope == 0:\n dist_der = dy_jnt0 if y_jnt0 > y_obs else -dy_jnt0\n else:\n x_intersect = (\n x_obs / link_slope + y_obs + link_slope * x_jnt0 - y_jnt0\n ) / (link_slope + 1 / link_slope)\n y_intersect = link_slope * (x_intersect - x_jnt0) + y_jnt0\n dlink_slope = (\n (1 / (x_jnt1 - x_jnt0))\n * (dy_jnt1 - dy_jnt0 + link_slope * (dx_jnt1 - dx_jnt0))\n )\n dx_intersect = (\n link_slope**4 * dx_jnt0\n + link_slope**2 * dlink_slope * (y_jnt0 - y_obs)\n - link_slope**3 * dy_jnt0\n + dlink_slope * (y_obs - y_jnt0)\n + 2 * link_slope * dlink_slope * (x_jnt0 - x_obs)\n + link_slope**2 * dx_jnt0\n - link_slope * dy_jnt0\n ) / (1 + link_slope**2) ** 2\n dy_intersect = (link_slope * (dx_intersect - dx_jnt0)\n + dlink_slope * (x_intersect - x_jnt0)\n + dy_jnt0)\n dist_der = (\n (x_intersect - x_obs) * dx_intersect\n + (y_intersect - y_obs) * dy_intersect\n ) / dist\n return dist_der", "def point(self, x, y):\n d1 = super().point(x, y)\n top = self._lifetime.top\n bottom = self._lifetime.bottom\n d2 = distance_line_point(top.pos, bottom.pos, (x, y))[0]\n return min(d1, d2)", "def distance(x1, y1, x2, y2):\n dist = ((x1-x2)**2 + (y1-y2)**2)**0.5\n return dist", "def distance(self, other: PointOrIterable = None) -> float:\n return (self.distance_squared(other or Point())) ** 0.5", "def get_distance(self, other):\n return math.sqrt((self.x - other[0])**2 + (self.y - other[1])**2)", "def get_distance(point_1, point_2):\n result = ((point_1[0] - point_2[0]) ** 2 + (point_1[1] - point_2[1]) ** 2) ** 0.5\n return result", "def _get_dist(self, p1, p2): \r\n\r\n distance = np.sqrt(\r\n (p1[0] - p2[0]) ** 2 +\r\n (p1[1] - p2[1]) ** 2 +\r\n (p1[2] - p2[2]) ** 2)\r\n\r\n return distance", "def distance(self,pt1,pt2):\n #productive #frequent\n if frequent: profprint()\n d = ( ( float(pt1[0]) - float(pt2[0]) )**2 + ( float(pt1[1]) - float(pt2[1]) )**2 + ( float(pt1[2]) - float(pt2[2]) )**2 )**0.5\n return d", "def distance(self,other):\n return math.sqrt((self.x - other.x)**2 +(self.y - other.y)**2)", "def distance(point1, point2):\n return math.sqrt(math.pow((point1[0] - point2[0]), 2) +\n math.pow(point1[1] - point2[1], 2))", "def distance(p1,p2):\r\n x1,y1 = p1\r\n x2,y2 = p2\r\n return hypot(x2 - x1, y2 - y1)", "def distance(point1, point2):\n x1, y1 = point1[0], point1[1]\n x2, y2 = point2[0], point2[1]\n\n dx = x1 - x2\n dy = y1 - y2\n\n return math.sqrt(dx * dx + dy * dy)", "def distance(point1, point2):\n x1, y1 = point1[0], point1[1]\n x2, y2 = point2[0], point2[1]\n\n dx = x1 - x2\n dy = y1 - y2\n\n return math.sqrt(dx * dx + dy * dy)", "def line_point_shortest_dist(r: np.ndarray, v: np.ndarray, p: np.ndarray) -> Tuple[float, float]:\n\n t = np.dot(v, p - r) / np.dot(v, v)\n d = np.linalg.norm((r + v * t) - p)\n return d, t", "def distance(self, p1, p2):\n return math.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)", "def get_line_distance(self, p):\n\n y = 1000 * p.y\n R = 1000 * self.geometry.R\n x = copysign(sqrt(y ** 2 + (R - sqrt(R ** 2 - y ** 2))), y)\n x = 2 * R * asin(x / (2 * R))\n #x=y\n b = -x / sqrt(R ** 2 - x ** 2)\n theta = atan(b) # grating tangent angle\n print b, theta\n d = 0\n for n, a in enumerate(self.an):\n d += a * x ** n\n d *= cos(theta)\n return 1e-3 / d", "def _distance(point_a: tuple, point_b: tuple):\n # rgb values\n x1, y1, z1 = point_a\n x2, y2, z2 = point_b\n\n # distances\n dx = x1 - x2\n dy = y1 - y2\n dz = z1 - z2\n\n # final distance\n return sqrt(dx**2 + dy**2 + dz**2)", "def distance(self, other):\n xd, yd = self.x-other.x, self.y-other.y\n return math.sqrt(xd**2 + yd**2)", "def calculate_distance(asteroid_1: Asteroid, asteroid_2: Asteroid) -> float:\n dy = asteroid_2.y - asteroid_1.y\n dx = asteroid_2.x - asteroid_1.x\n return math.sqrt(dy * dy + dx * dx)", "def distance(self, point_a, point_b):\n distance = 0.0\n if len(self.dimensions) > 1:\n for a, b, dim in zip(point_a, point_b, self.dimensions):\n distance += dim.distance(a, b)\n\n if len(self.dimensions) == 1:\n distance += self.dimensions[0].distance(point_a[0], point_b[0])\n\n return distance", "def distance(p1, p2):\r\n return math.hypot(p1[0] - p2[0], p1[1] - p2[1])", "def distance(p1,p2):\n x1,y1 = p1\n x2,y2 = p2\n return hypot(x2 - x1, y2 - y1)", "def get_distance(self, node):\n return np.sqrt(\n (self.x - node.x) ** 2 +\n (self.y - node.y) ** 2\n )", "def distance(self, c1, c2):\r\n x = (c2.x - c1.x) ** 2\r\n y = (c2.y - c1.y) ** 2\r\n d = int(round(math.sqrt(x + y)))\r\n return d", "def distance(a, b):\n return math.sqrt((a.x - b.x) ** 2 + (a.y - b.y) ** 2)", "def distance(p1, p2):\n\treturn sqrt((p1[1]-p2[1])**2 + (p1[0]-p2[0])**2)", "def linePointXYDist(l,p,inside=True):\n return linePointXY(l,p,inside,distance=True)", "def distance(self, other):\n # only used in triangle.__str__\n return hypot(self.x - other.x, self.y - other.y)", "def distance_point_line_sqrd(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector_sqrd(cross_vectors(pa, pb))\n l_ab = length_vector_sqrd(ab)\n return l / l_ab", "def dist(pt1, pt2):\n return np.sqrt((pt2[0]-pt1[0])**2 + (pt2[1]-pt1[1])**2)" ]
[ "0.7641407", "0.7141231", "0.70598394", "0.7000537", "0.6959461", "0.69434446", "0.6942349", "0.68482417", "0.6793337", "0.6758447", "0.67557317", "0.67258215", "0.6696894", "0.6693597", "0.6688431", "0.66852915", "0.6680149", "0.6679827", "0.6665373", "0.6638376", "0.66323674", "0.661835", "0.660545", "0.658448", "0.65759254", "0.6567914", "0.655416", "0.6547762", "0.65374327", "0.65265745", "0.65177524", "0.65067303", "0.6504805", "0.6493838", "0.6493763", "0.6492859", "0.6486615", "0.6485895", "0.6482527", "0.6480465", "0.64794457", "0.64787734", "0.6477067", "0.64620924", "0.6460944", "0.6451266", "0.64502144", "0.64392996", "0.64336234", "0.64325535", "0.6430339", "0.6429014", "0.6424584", "0.64221305", "0.64212155", "0.6413106", "0.63980645", "0.63812166", "0.63632107", "0.6362318", "0.6361046", "0.6359796", "0.6358034", "0.6356477", "0.6346979", "0.6346455", "0.6344347", "0.6329675", "0.6321455", "0.6320111", "0.63165116", "0.63158953", "0.631534", "0.6314987", "0.6298457", "0.6297266", "0.6296773", "0.6296741", "0.6293288", "0.62920105", "0.62861425", "0.6285595", "0.6285595", "0.627366", "0.6263662", "0.6259926", "0.62593037", "0.6254581", "0.62543297", "0.6247196", "0.6246851", "0.62407213", "0.62404126", "0.6240376", "0.6236607", "0.62329876", "0.6223874", "0.62133294", "0.6211682", "0.6210602" ]
0.63587475
62
Function to compute the distance squared between the instance and a point.
def distance_sq(self, p): d = p - self.zero n = d - np.dot(d, self.direction) * self.direction return n[0] ** 2 + n[1] ** 2 + n[2] ** 2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def squared_distance(self, point: List[int]) -> int:\n return point[0] ** 2 + point[1] ** 2", "def squaredDistanceTo(self,other):\n if not isinstance(other,Point):\n return \n return (self.longitude - other.getLongitude())**2 +(self.latitude - other.getLatitude())**2", "def euclidean_distance(self, point: List[int]) -> float:\n return sqrt(point[0] ** 2 + point[1] ** 2)", "def distance_squared(self, other: PointOrIterable = None) -> float:\n return sum((((other or Point()) - self) ** 2))", "def distanceFromPoint(self, point):\n return Vector.createFromTwoPoints(point, self.crossLine(self.getHeight(point))).norm", "def distanceFrom(self, point = (-1, -1)):\n if (point[0] == -1 or point[1] == -1):\n point = np.array(self.image.size()) / 2\n return spsd.euclidean(point, [self.x, self.y])", "def pointwise_distance(x, y, square=True):\n with torch.no_grad():\n x = x.squeeze(-1)\n y = y.squeeze(-1)\n\n x = x.unsqueeze(-1)\n y = y.transpose(0,1).unsqueeze(0)\n diff = x - y\n dis = torch.sum(torch.square(diff), dim=1)\n if torch.min(dis) < 0:\n raise RuntimeError('dis small than 0')\n if square:\n return dis\n else:\n return torch.sqrt(dis)", "def point_sqr_distance(point_a:tuple, point_b:tuple)->float:\n return (point_b[1]-point_a[1]) ** 2 + (point_b[0] - point_a[0]) ** 2", "def distance(self, point_1=(0, 0), point_2=(0, 0)):\n\t\treturn math.sqrt((point_1[0]-point_2[0])**2+(point_1[1]-point_2[1])**2)", "def distance(self, point):\r\n assert a6checks.is_point(point)\r\n assert len(point)==len(self._centroid)\r\n\r\n sum=0\r\n for i in range (len(self._centroid)):\r\n sum+=(point[i]-self._centroid[i])*(point[i]-self._centroid[i])\r\n dist=math.sqrt(sum)\r\n return dist", "def get_distance(self, point, cpoint):\n distance = 0.0\n for m, s in zip(point, cpoint):\n distance += pow(m - s, 2)\n distance = math.sqrt(distance)\n return distance", "def euclidean_distance(self, other_point):\n return sqrt((self.x - other_point.x)**2 + (self.y - other_point.y)**2)", "def calculate_distance(self, other_point):\n return math.sqrt(\n (self._x - other_point._x)**2 +\n (self._y - other_point._y)**2)", "def get_dist(self, point_x, point_y):\n dist = sqrt((point_x - self.player_x) ** 2 + (point_y -\n self.player_y) ** 2)\n return dist", "def distanceTo(self, point):\n return np.linalg.norm([self.x - point.x, self.y - point.y, self.z - point.z])", "def distanceFrom(self, point = (-1, -1)):\n if (point[0] == -1 or point[1] == -1 and len(self)):\n point = self[0].image.size()\n\n return spsd.cdist(self.coordinates(), [point])[:,0]", "def distance_to(self, point1, point2):\n delta_x = self.x_points[point1] - self.x_points[point2]\n delta_y = self.y_points[point1] - self.y_points[point2]\n return math.sqrt(delta_x * delta_x + delta_y * delta_y)", "def distance_to(self, x, y):\n\t\tdx = x - self.x\n\t\tdy = y - self.y\n\t\treturn math.sqrt((dx**2)+(dy**2))", "def squaredDistance(vec1, vec2):\n return (distance.euclidean(vec1, vec2))**2", "def euclidean_distance(self, other_point):\n\n return math.sqrt(math.pow(other_point.x - self.x, 2) + math.pow(other_point.y - self.y, 2))", "def squared_distance(v: Vector, w: Vector) -> float:\n return sum_of_squares(subtract(v, w))", "def dist(self, p):\n return math.sqrt((p.x - self.x)**2 + (p.y - self.y)**2)", "def __get_distance(point1: np.ndarray, point2: np.ndarray) -> float:\n return np.sqrt(np.sum(np.square(point1 - point2)))", "def distance(self, x: int, y: int) -> float:\n return math.sqrt((x - self.x) ** 2 + (y - self.y) ** 2)", "def distance(self, pt):\n return math.sqrt((self.x - pt.x) ** 2 + (self.y - pt.y) ** 2)", "def distance(point_1=(0, 0), point_2=(0, 0)):\n return math.sqrt(\n (point_1[0] - point_2[0]) ** 2 +\n (point_1[1] - point_2[1]) ** 2)", "def dist(self,x, y):\n\n x1, y1 = x\n x2, y2 = y\n return np.sqrt(pow((x1 - x2), 2) + pow((y1 - y2), 2))", "def euclidean_sqr(self, instance, centroid):\n return np.linalg.norm(instance-centroid)**2", "def calc_point_squre_dist(point_a, point_b):\n distx = point_a[0] - point_b[0]\n disty = point_a[1] - point_b[1]\n return distx ** 2 + disty ** 2", "def squared_distance_calculator(position1, position2):\r\n difference_vector = position2 - position1\r\n return np.dot(difference_vector, difference_vector)", "def euclidean_distance(self, point):\n mean = self.mean()\n dist = euclidean(mean, point)\n radius = self.radius * self.distance_factor()\n if radius == 0.0:\n # corner case: the ball consists of a single point only\n # distance is defined as > 1 for flat dimensions unless point lies inside\n if point == mean:\n dist = 0.0\n else:\n dist += 1\n else:\n # normalization so that result 1.0 corresponds to dist == radius (i.e., point is on the border)\n dist /= radius\n return dist", "def distance(self, other: PointOrIterable = None) -> float:\n return (self.distance_squared(other or Point())) ** 0.5", "def distance(point1, point2):\n return math.sqrt(math.pow((point1[0] - point2[0]), 2) +\n math.pow(point1[1] - point2[1], 2))", "def get_distance(self, point):\n if not isinstance(point, Point):\n point = Point(*point)\n\n distances = [(point.distance_to_point(p), p) for p in self.points]\n sortpoints = sorted(distances, key=lambda x: x[0])\n closest = sortpoints[0][1]\n\n vc = Vector(*closest)\n d1 = vc.dot(vc)\n\n secondc = sortpoints[1][1]\n vs = Vector(*secondc)\n v1 = Vector(*point) - (vc+vs)/2\n v2 = vs-vc\n v2.unitize()\n d2 = v1.dot(v2)\n\n return abs(min(d1, d2)) - self.thickness/2", "def distance_to(self, point: Union[\"Unit\", Point2, Point3]) -> Union[int, float]:\n return self.position.distance_to_point2(point.position)", "def distance(pt1, pt2):\n\tx1, y1 = pt1\n\tx2, y2 = pt2\n\tx = x2 - x1\n\ty = y2 - y1\n\ts = x**2 + y**2\n\treturn np.sqrt(s)", "def distance(p1,p2):\n import numpy as np\n x = np.sqrt(sum(np.power(p2-p1,2)))\n return(x)", "def getDistance(point1, point2x, point2y):\n distance = np.sqrt((point2x - point1[0])**2 + (point2y - point1[1])**2)\n return distance", "def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )", "def GetPointToPointDistance(self, point1, point2):\n return math.sqrt(vtk.vtkMath.Distance2BetweenPoints(point1, point2))", "def squared_distance(v, w):\n return sum_of_squares(vector_subtraction(v, w))", "def euclidean_distance(point_one, point_two):\n return np.linalg.norm(point_one-point_two)", "def distance_point(self, point: array_like) -> np.float64:\n return abs(self.distance_point_signed(point))", "def calculate_point_distance(p1, p2):\n\n return math.sqrt(math.pow(p1[0]-p2[0],2) + math.pow(p1[1]-p2[1],2))", "def calc_point_distance(x1, y1, x2, y2):\n\n return math.hypot(x2 - x1, y2 - y1)", "def get_distance(point_a, point_b):\n \n return np.sqrt(np.sum((point_a - point_b) ** 2, 1))", "def distance_point_point_sqrd(a, b):\n ab = subtract_vectors(b, a)\n return length_vector_sqrd(ab)", "def distance(self, x2, y2):\r\n return math.sqrt((x2 - self.x) ** 2 + (y2 - self.y) ** 2)", "def distance(self, point1, point2):\n\n\t\tprint \"Inside Distance!-----\"\n\t\tdist = math.pow(point1[0] - point2[0], 2) + math.pow(point1[1] - point2[1], 2);\n\t\treturn dist", "def getSquareDistance(p1, p2):\n dx = p1[0] - p2[0]\n dy = p1[1] - p2[1]\n\n return dx * dx + dy * dy", "def euclidean_distance(self,):\n return sqrt(pow((self.pose1.x - self.pose2.x), 2) +\n pow((self.pose1.y - self.pose2.y), 2))", "def dist_squared(x_0, y_0, x_1, y_1):\n\n return (x_0 - x_1)**2 + (y_0 - y_1)**2", "def dist_squared(self, vec2):\n if type(vec2) != Vector:\n raise TypeError(\"Not a vector\")\n\n return (self - vec2) * (self - vec2)", "def squared_distance(v, w):\n return sum_of_squares(vector_subtract(v, w))", "def distance(self, other: \"Point\") -> float:\n if not isinstance(other, self.__class__):\n raise TypeError(\"Expected `other` to be an instance of `{}`\"\\\n .format(self.__class__))\n dx = self.x - other.x\n dy = self.y - other.y\n return sqrt((dx ** 2) + (dy ** 2))", "def SquareDist(x0, x1, y0, y1):\n return (x1 - x0) ** 2 + (y1 - y0) ** 2", "def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))", "def squared_dist(x1: np.ndarray, x2: np.ndarray) -> np.ndarray:\n return (\n np.sum(x1 ** 2, 1).reshape(-1, 1) +\n np.sum(x2 ** 2, 1) -\n 2 * np.dot(x1, x2.T)\n )", "def get_distance(self, node):\n return np.sqrt(\n (self.x - node.x) ** 2 +\n (self.y - node.y) ** 2\n )", "def distance_to(self, p):\n closest_pt = self.closest_point_to(p)\n return np.linalg.norm(p - closest_pt)", "def squared_distance(v, w):\n\treturn sum_squares(vector_subtract(v, w))", "def distance (p1,p2):\n return np.sqrt(np.sum(np.power(p2-p1,2)))", "def distance(x,y):\n return np.sqrt( np.power(np.array(x) - np.array(y), 2).sum() )", "def euclidean_distance(point1, point2):\n return np.linalg.norm(np.array(point1) - np.array(point2))", "def distance(p1,p2):\n return ((p1.x - p2.x)**2 + (p1.y - p2.y)**2)**0.5", "def _distance(self, new_pt):\n\t\tnew_pt = np.resize(new_point, (self.n_row, new_pt.shape[0]))\n\t\tdist = euclidean_distance(self.data[:,0:-1], new_pt)\n\n\t\treturn dist", "def dist(self, point: np.array):\n return np.linalg.norm(\n np.cross(point - self.r_start, self.direction), axis=1) / \\\n np.linalg.norm(self.direction)", "def distance_to(self, x):\n return np.linalg.norm(np.array(x) - self.closest_point_to(x))", "def _postgis_distance(cls, point: Point) -> STDistance:\n return ga.functions.ST_Distance(cls.center_raw, point)", "def get_distance_from_point(self, pstart, p_end):\n a = numpy.array((pstart.x, pstart.y, pstart.z))\n b = numpy.array((p_end.x, p_end.y, p_end.z))\n\n distance = numpy.linalg.norm(a - b)\n\n return distance", "def distance(self, second_object):\n # r = (dx^2+dy^2)^1/2\n point_1, point_2 = self.xy, second_object.xy\n delta_x, delta_y = self.sub(point_1, point_2)[:]\n squared_sum = pow(delta_x, 2) + pow(delta_y, 2)\n return math.sqrt(squared_sum)", "def distance(point1, point2):\n return ((point1[0] - point2[0])**2 + (point1[1] - point2[1])**2) ** 0.5", "def point_to_point_distance(p1:Point, p2: Point) -> float:\n return round(geopy.distance.distance((p1.y, p1.x), (p2.y, p2.x)).km,2)", "def distance_from_sphere(self, points, params, sqrt=False):\n center, radius = params\n center = center.reshape((1, 3))\n distance = (torch.norm(points - center, p=2, dim=1) - radius) ** 2\n if sqrt:\n distance = guard_sqrt(distance)\n\n if self.reduce:\n distance = torch.mean(distance)\n return distance", "def compute_distance(point_1, point_2):\n x1, y1, x2, y2 = point_1[0], point_1[1], point_2[0], point_2[1]\n distance = np.sqrt((x2-x1)**2 + (y2-y1)**2)\n\n return distance", "def DISTANCE(x,y,x2=0,y2=0):\n\treturn sqrt((x-x2)*(x-x2)+(y-y2)*(y-y2))", "def distance(p1,p2):\n return ((p2.x - p1.x)*2 + (p2.y - p1.y))**0.5", "def getDistance(point1,point2):\n dx = point2[0]-point1[0]\n dy = point2[1]-point1[1]\n return math.sqrt(dy*dy + dx*dx)", "def getDistance(self):\n return sqrt(self.state[0] * self.state[0] + self.state[2] * self.state[2])", "def minkowski_distance(point1, point2):\n md = 0\n for p1,p2 in zip(point1,point2):\n md += abs((p1-p2)**3)\n md = md**(1/3)\n return md\n raise NotImplementedError", "def distance_to_origin(self):\n return np.sqrt(self.x ** 2 + self.y ** 2)", "def euclidean_distance(x, y):\n x1, y1 = x\n x2, y2 = y\n return sqrt((x1 - x2)**2 + (y1 - y2)**2)", "def _dist(x, y):\n return np.sqrt(np.mean(np.square(x - y)))", "def distance(point, cluster):\n return np.sqrt((point[0] - cluster[0])**2 + (point[1] - cluster[1])**2)", "def minkowski_distance(point1, point2):\n p = 3\n d = [abs(x - y) ** p for x, y in zip(point1, point2)]\n a = sum(d)**(1/p)\n\n return a", "def euclidean_distance(point1, point2):\n\n return math.sqrt(sum([(x - y) ** 2 for x, y in zip(point1, point2)]))", "def norm(point):\n return np.sqrt(norm2(point))", "def get_dist_sqrd(self, other):\n return (self.x - other[0])**2 + (self.y - other[1])**2", "def __get_distance(self, game_object):\n obj_x, obj_y = game_object.get_coordinates()\n self_x, self_y = self._coordinates\n\n inner = (obj_x-self_x)**2 + (obj_y-self_y)**2\n return math.sqrt(inner)", "def distance_between_sq(x1: float, y1: float, x2: float, y2: float) -> float:\n dx = x2 - x1\n dy = y2 - y1\n return dx**2 + dy**2", "def distance(a: Point, b: Point, scale: float) -> float:\n x_north = a.y - b.y\n y_east = b.x - a.x\n return scale * math.sqrt(x_north**2 + y_east**2)", "def dist(self, point_a, point_b):\n args = {\"point_a\": point_a, \"point_b\": point_b}\n dists = gs.array(self._iterate_over_factors(\"dist\", args))\n return gs.linalg.norm(dists, ord=2, axis=0)", "def distance(p1, p2):\n\n return sqrt(((p2[0] - p1[0])**2) + ((p2[1] - p1[1])**2))", "def calculateDistance(point1, point2, dimension):\n distance=0\n # print 'p1: ' + str(point1) + 'p2: ' + str(point2) + str(dimension)\n for x in range(dimension - 1):\n distance += pow((point1[x] - point2[x]), 2)\n return math.sqrt(distance)", "def segment_point_distance_sq(x1, y1, x2, y2, px, py):\n pd2 = (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2)\n if pd2 == 0:\n # Points are coincident.\n x = x1\n y = y2\n else:\n # parameter of closest point on the line\n u = ((px - x1) * (x2 - x1) + (py - y1) * (y2 - y1)) / pd2\n if u < 0: # off the end\n x = x1\n y = y1\n elif u > 1.0: # off the end\n x = x2\n y = y2\n else: # interpolate\n x = x1 + u * (x2 - x1)\n y = y1 + u * (y2 - y1)\n return (x - px) * (x - px) + (y - py) * (y - py)", "def distance(x1, y1, x2, y2):\n return ((x2 - x1) ** 2 + (y2 - y1) ** 2) ** 0.5", "def _distance(point, line_point1, line_point2):\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance", "def euclidean_distance(pred, squared=False, eps=1e-12):\n pred_square = pred.pow(2).sum(dim=-1) # (N, )\n prod = torch.mm(pred, pred.t()) # (N, N)\n distance = (pred_square.unsqueeze(1) + pred_square.unsqueeze(0) -\n 2 * prod).clamp(min=eps) # (N, N)\n\n if not squared:\n distance = distance.sqrt()\n\n distance = distance.clone()\n distance[range(len(prod)), range(len(prod))] = 0\n return distance", "def compute_dist(self, s1, s2):\n return sp_linalg.norm(self.wrap(s1, s2))", "def compute_dist(self, s1, s2):\n return sp_linalg.norm(self.wrap(s1, s2))" ]
[ "0.78861135", "0.7388568", "0.7332369", "0.7102426", "0.7052089", "0.70430666", "0.703139", "0.6993221", "0.69181925", "0.6889745", "0.6863204", "0.6794171", "0.6785468", "0.6758498", "0.67438406", "0.6737052", "0.67257434", "0.6707908", "0.6706913", "0.67060155", "0.67053574", "0.6692949", "0.66841197", "0.6678734", "0.6677181", "0.6652835", "0.6638666", "0.6635064", "0.65652215", "0.6554318", "0.6532777", "0.6525928", "0.6523748", "0.6518141", "0.651732", "0.65169513", "0.65158254", "0.64932734", "0.64738333", "0.6473639", "0.6471096", "0.64695984", "0.64623755", "0.6460026", "0.64508975", "0.6441555", "0.64332867", "0.6430974", "0.6428798", "0.64286715", "0.64170486", "0.6403979", "0.64006215", "0.6386466", "0.63845754", "0.63809973", "0.63699615", "0.634044", "0.63367856", "0.63366085", "0.6333588", "0.6325829", "0.6314395", "0.63130045", "0.6307221", "0.63040173", "0.62869585", "0.6285175", "0.6283423", "0.6283271", "0.62733877", "0.6271218", "0.6269011", "0.6251167", "0.62489855", "0.6241975", "0.6229666", "0.6224896", "0.621639", "0.6212918", "0.6210777", "0.62057924", "0.62023383", "0.62019026", "0.619684", "0.6196154", "0.6194491", "0.61850834", "0.61813724", "0.617963", "0.6174168", "0.6171742", "0.6168495", "0.6161426", "0.6161102", "0.6153957", "0.61490583", "0.6147797", "0.61475754", "0.61475754" ]
0.6738073
15
Function to the compute the point of the instance closest to another line.
def closest_point(self, l): cos = np.dot(self.direction, l.direction) n = 1 - cos ** 2 if n < sys.float_info.epsilon: # Lines are parallel. return self.zero d0 = l.zero - self.zero a = np.dot(d0, self.direction) b = np.dot(d0, l.direction) return self.zero + self.direction * ( a - b * cos) / n
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)", "def closest(self, x):\n # http://www.ahinson.com/algorithms_general/Sections/Geometry/PluckerLine.pdf\n # has different equation for moment, the negative\n\n x = arg.getvector(x, 3)\n\n lam = np.dot(x - self.pp, self.uw)\n p = self.point(lam) # is the closest point on the line\n d = np.linalg.norm( x - p)\n \n return namedtuple('closest', 'p d lam')(p, d, lam)", "def FindClosestPoint(self, ):\n ...", "def getClosestPointToLine(self, A, B, P):\n AP = XYPoint(P.x - A.x, P.y - A.y)\n AB = XYPoint(B.x - A.x, B.y - A.y)\n ab2 = AB.x * AB.x + AB.y * AB.y\n ap_ab = AP.x * AB.x + AP.y * AB.y\n t = ap_ab / ab2\n\n if t < 0.0:\n t = 0.0\n elif t > 1.0:\n t = 1.0\n\n return XYPoint(A.x + AB.x * t, A.y + AB.y * t)", "def distance_to_line(a, b, p):\n return distance(closest_point(a, b, p), p)", "def closest_point_to(self, x):\n x = np.array(x)\n v = self.p1 - self.p0\n b = self.p0 - x\n\n t = -np.dot(v, b) / np.dot(v, v)\n if (0 <= t <= 1):\n closest = t*(self.p1 - self.p0) + self.p0\n return closest\n else:\n if np.linalg.norm(x - self.p0) < np.linalg.norm(x - self.p1):\n return self.p0\n else:\n return self.p1", "def _nearest_point_on_line(begin, end, point):\n b2e = _vec_sub(end, begin)\n b2p = _vec_sub(point, begin)\n nom = _vec_dot(b2p, b2e)\n denom = _vec_dot(b2e, b2e)\n if denom == 0.0:\n return begin\n u = nom / denom\n if u <= 0.0:\n return begin\n elif u >= 1.0:\n return end\n else:\n return _vec_add(begin, _vec_scale(b2e, u))", "def LineClosestPoint(line, testpoint):\n line = rhutil.coerceline(line, True)\n testpoint = rhutil.coerce3dpoint(testpoint, True)\n return line.ClosestPoint(testpoint, False)", "def closest(self, x, y):\n pts = np.column_stack([self.x, self.y])\n # Transform data coordinates to pixel coordinates.\n pts = self.ax.transData.transform(pts)\n diff = pts - [x, y]\n dist = np.hypot(*diff.T)\n min_index = np.argmin(dist)\n return min_index, dist[min_index]", "def _get_closest(self, x, y, clients):\n target = min(\n clients,\n key=lambda c: math.hypot(c.x - x, c.y - y),\n default=self.clients.current_client,\n )\n return target", "def closest_line_point(point:tuple, edge:tuple)->tuple:\n d_y, d_x, b = line_equation((edge[0], edge[1]))\n if b == None:\n # The line is vertical, need different intercept formula.\n return (edge[0][0], point[1])\n if d_y == 0:\n # The line is horizontal, we can use a faster formula:\n return (point[0], edge[0][1])\n term_1 = d_x * d_y * (point[1] - edge[1][1])\n term_2 = (d_y ** 2) * edge[1][0]\n term_3 = (d_x ** 2) * point[0]\n denom = (d_y ** 2) + (d_x ** 2)\n x_int = (term_1 + term_2 + term_3) / denom\n y_int = (d_y / d_x) * x_int + b\n return (x_int, y_int)", "def closest(self, x, y):\n if self.direction == 'horizontal':\n p_pts = np.array([\n self.ax.transData.transform((p, 0))[0] for p in self.positions\n ])\n dist = abs(p_pts - x)\n else:\n p_pts = np.array([\n self.ax.transData.transform((0, p))[1] for p in self.positions\n ])\n dist = abs(p_pts - y)\n index = np.argmin(dist)\n return index, dist[index]", "def point(self, x, y):\n d1 = super().point(x, y)\n top = self._lifetime.top\n bottom = self._lifetime.bottom\n d2 = distance_line_point(top.pos, bottom.pos, (x, y))[0]\n return min(d1, d2)", "def closest_point(point, points):\n return points[cdist([point], points).argmin()]", "def closest_point(p1: Vector3, p2: Vector3, p3: Vector3) -> Vector3:\n k = ((p2.y - p1.y) * (p3.x - p1.x) - (p2.x - p1.x) * (p3.y - p1.y)) / ((p2.y - p1.y) ** 2 + (p2.x - p1.x) ** 2)\n x4 = p3.x - k * (p2.y - p1.y)\n y4 = p3.y + k * (p2.x - p1.x)\n\n return Vector3(x4, y4, 0)", "def getClosestPointFromLine(origin, ray, point):\n # calculate the difference vector\n delta = point-origin\n # norm the ray\n ray /= np.linalg.norm(ray, axis=-1)[..., None]\n # calculate the scale product\n factor = np.sum(ray*delta, axis=-1)\n try:\n return origin + factor[:, None] * ray\n except IndexError:\n return origin + factor * ray", "def _distance_to_line(begin, end, point):\n return _vec_distance(point, _nearest_point_on_line(begin, end, point))", "def _nearest_to_point(self, point):\n ptvertex = point.get_vertex(crs=self.crs)\n segments = zip(self.vertices.slice(0, -1), self.vertices.slice(1, 0))\n\n if isinstance(self.crs, CartesianCRS):\n func = _cvectorgeo.pt_nearest_planar\n def func(seg):\n return _cvectorgeo.pt_nearest_planar(ptvertex[0], ptvertex[1],\n seg[0][0], seg[0][1], seg[1][0], seg[1][1])\n else:\n fwd = self.crs.forward\n inv = self.crs.inverse\n def func(seg):\n return _cvectorgeo.pt_nearest_proj(fwd, inv, ptvertex,\n seg[0], seg[1], tol=0.01)\n\n point_dist = map(func, segments)\n min_point = None\n min_dist = -1.0\n for i, (point, dist) in enumerate(point_dist):\n if dist < min_dist or (i == 0):\n min_point = point\n min_dist = dist\n\n return min_dist, min_point", "def closest(point, points):\n pts = [(Point.distance(point, p), p) for p in points]\n pts.sort()\n return pts[0][1]", "def closest_point(\n self, lx: float, ly: float, reference: Output | None = None\n ) -> tuple[float, float]:\n if reference:\n reference_ptr = reference._ptr\n else:\n reference_ptr = ffi.NULL\n\n dest_lx = ffi.new(\"double *\")\n dest_ly = ffi.new(\"double *\")\n lib.wlr_output_layout_closest_point(\n self._ptr, reference_ptr, lx, ly, dest_lx, dest_ly\n )\n return dest_lx[0], dest_ly[0]", "def LineMinDistanceTo(line, point_or_line):\n line = rhutil.coerceline(line, True)\n test = rhutil.coerceline(point_or_line)\n if test is None: test = rhutil.coerce3dpoint(point_or_line, True)\n return line.MinimumDistanceTo(test)", "def findPointOnLine(node1, node2, distance):\n m, b, _ = geometry.lineSpec(node1, node2)\n \n xy = []\n if m == True: # parallel to y axis\n xy.append(node1[0])\n if node1[1] <= node2[1]:\n xy.append(node1[1] + distance)\n else:\n xy.append(node1[1] - distance)\n \n elif m == False: # parallel to x axis\n if node1[0] <= node2[0]:\n xy.append(node1[0] + distance)\n else:\n xy.append(node1[0] - distance)\n xy.append(node1[1])\n \n else:\n x = sp.Symbol('x')\n z = (x-node1[0])**2 + (m*x+b-node1[1])**2 - distance**2\n xSolution = sp.solve(z, x)\n \n for xSol in xSolution:\n if (xSol >= node1[0] and xSol <= node2[0]) or (xSol <= node1[0] and xSol >= node2[0]):\n xy.append(xSol)\n xy.append(xSol*m + b)\n return xy", "def closest_point(self, point, start_param=None, Ns=25):\n x, z = self.rotate_to_xz_plane(point)\n la = self._closest_point(x, z, start_param, Ns)\n return la", "def closest_point(a, b, p):\n ap = [p[0]-a[0], p[1]-a[1]]\n ab = [b[0]-a[0], b[1]-a[1]]\n mag = float(ab[0]**2 + ab[1]**2)\n proj = dot(ap, ab)\n if mag ==0 :\n dist = 0\n else:\n dist = proj / mag\n if dist < 0:\n return [a[0], a[1]]\n elif dist > 1:\n return [b[0], b[1]]\n else:\n return [a[0] + ab[0] * dist, a[1] + ab[1] * dist]", "def closest_point(self, point, maxdist=0.0, return_param=False):\n return self.xyz", "def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b", "def get_closest_distance_to_path(self, path):\n min_distance_to_line = float(\"inf\")\n for p in path:\n game_path = p[:]\n\n game_path.sort(key = lambda coord: calculate_distance(self, coord))\n point_A = game_path[0] # Closest point out of all the points on the path to to the tower\n\n try:\n point_after_A = p[p.index(point_A) + 1]\n point_before_A = p[p.index(point_A) - 1]\n closest_to_A = min(point_after_A, point_before_A, key = lambda point: calculate_distance(point_A, point))\n except:\n if p.index(point_A) == 0:\n closest_to_A = p[p.index(point_A) + 1]\n \n elif p.index(point_A) == len(p) - 1:\n closest_to_A = p[p.index(point_A) - 1]\n finally:\n if closest_to_A[0] != point_A[0]:\n m = (closest_to_A[1] - point_A[1]) / (closest_to_A[0] - point_A[0])\n else:\n m = 2\n\n b = point_A[1] - m * point_A[0]\n\n closest_distance = abs(-m * self.x + self.y - b) / math.sqrt((-m) ** 2 + 1)\n min_distance_to_line = min(closest_distance, min_distance_to_line)\n \n return min_distance_to_line", "def closest_point(p, a, b):\n vector_ab = [y - x for x, y in zip(a, b)]\n vector_ap = [y - x for x, y in zip(a, p)]\n dot_ap_ab = sum(x * y for x, y in zip(vector_ap, vector_ab))\n dot_ab_ab = sum(x * y for x, y in zip(vector_ab, vector_ab))\n t = max(0.0, min(dot_ap_ab / dot_ab_ab, 1.0))\n return a[0] + vector_ab[0] * t, a[1] + vector_ab[1] * t", "def findNearPointOnLine(node1, node2, point):\n p=point[0]\n q=point[1]\n a=node1[0]\n b=node1[1]\n c=node2[0]\n d=node2[1]\n \n x = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (d-b) + p\n y = ((a-p)*(d-b) + (q-b)*(c-a)) / ((d-b)**2+(c-a)**2) * (a-c) + q\n \n return x, y", "def FindClosestInsertedPoint(self, ):\n ...", "def pick_point_not_on_line(line: Line):\n return line.point1 + line.get_perpendicular_at_point(line.point1).get_direction_vector()", "def line_point_shortest_dist(r: np.ndarray, v: np.ndarray, p: np.ndarray) -> Tuple[float, float]:\n\n t = np.dot(v, p - r) / np.dot(v, v)\n d = np.linalg.norm((r + v * t) - p)\n return d, t", "def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)", "def closest_point_on_segment(point, segment):\n a, b = segment\n p = closest_point_on_line(point, segment)\n d = distance_point_point_sqrd(a, b)\n d1 = distance_point_point_sqrd(a, p)\n d2 = distance_point_point_sqrd(b, p)\n if d1 > d or d2 > d:\n if d1 < d2:\n return a\n return b\n return p", "def _dist_point2line(self, point: ndarray,\n line: Tuple[ndarray, ndarray]) -> ndarray:\n\n assert isinstance(line, tuple)\n point1, point2 = line\n d = abs(np.cross(point2 - point1, point - point1)) / (\n norm(point2 - point1) + 1e-8)\n return d", "def _distance(point, line_point1, line_point2):\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance", "def getPointClosestToXY(self, x, y, alter='', offsets=False):\n if isinstance(alter, str):\n alter = ['', alter]\n # select most suitable point based on x\n datax = self.x_offsets(alter=alter[0])\n absm = np.abs(datax - x)\n idx = np.where(absm == np.min(absm))\n if len(idx) == 0:\n idx = np.argmin(absm)\n elif len(idx) == 1:\n idx = idx[0]\n else:\n # len(idx) > 1: select most suitable point based on y\n datay = self.y_offsets(index=idx, alter=alter[1])\n absM = np.abs(datay - y)\n idX = np.where(absM == np.min(absM))\n if len(idX) == 0:\n idx = idx[0]\n elif len(idX) == 1:\n idx = idx[idX[0]]\n else: # equally close in x and y -> returns first datapoint found\n idx = idx[idX[0]]\n idxOut = idx if len(idx) <= 1 else idx[0]\n if offsets:\n # no alter, but offset for the return value\n return self.x_offsets(index=idx)[0], self.y_offsets(index=idx)[0], idxOut\n # no alter, no offsets for the return value\n return self.x(index=idx)[0], self.y(index=idx)[0], idxOut", "def FindClosestPointWithinRadius(self, p_float, , p_float_4):\n ...", "def closest_points(self, other):\n p0_other, p1_other = other.p0, other.p1\n\n # w = p1 - p0\n # v = p1_other - p0_other\n # s*w + p0 = t*v + p0_other\n\n w = self.p1 - self.p0\n v = p1_other - p0_other\n\n A = np.vstack((w,v)).T\n b = p0_other - self.p0\n\n #soln = np.linalg.solve(A, b)\n soln = np.linalg.pinv(A).dot(b)\n s, t = soln[0], -soln[1]\n\n return s*w + self.p0, t*v + p0_other", "def nearest_on_boundary(self, point):\n _, minpt = self._nearest_to_point(point)\n return Point(minpt, crs=self.crs)", "def get_distance(self, point):\n if not isinstance(point, Point):\n point = Point(*point)\n\n distances = [(point.distance_to_point(p), p) for p in self.points]\n sortpoints = sorted(distances, key=lambda x: x[0])\n closest = sortpoints[0][1]\n\n vc = Vector(*closest)\n d1 = vc.dot(vc)\n\n secondc = sortpoints[1][1]\n vs = Vector(*secondc)\n v1 = Vector(*point) - (vc+vs)/2\n v2 = vs-vc\n v2.unitize()\n d2 = v1.dot(v2)\n\n return abs(min(d1, d2)) - self.thickness/2", "def dist_to_line(self, line, pt):\n return abs(line[0]*pt.x + line[1]*pt.y + line[2])/math.sqrt(line[0]**2 + line[1]**2)", "def get_closest_point(path, point):\n np_path = convert_path_type(path) # modify path to be a numpy array\n np_point = convert_point_type(point) # modify point to be a [x,y,z] numpy array\n\n # compute the distance from current location to every point in path and find index of the min distance\n distances = ((np_path[:,0] - np_point[0])**2 + (np_path[:,1] - np_point[1])**2)**0.5\n closest_idx = np.argmin(distances)\n\n if closest_idx != len(np_path) - 1: # check if this point is behind current location, if so use index+1\n closest_point = np_path[closest_idx]\n next_closest_point = np_path[closest_idx+1]\n\n # create vectors between the three points\n path_vector = next_closest_point - closest_point\n current_vector = np_point - closest_point\n\n # compute dot product to figure out whether location is behind or in front of closest_point\n dot_prod = np.dot(path_vector, current_vector)\n\n if dot_prod >= 0: # closest point is behind current location\n closest_idx += 1\n\n closest_point = path[closest_idx] # retrieve point from original `path` argument for type consistency\n\n return closest_point, closest_idx", "def getNearestEdge(self, point):\n edge = mm.idx.nearest((point.getPoint().x, point.getPoint().y), objects=True)\n edges = [e.object for e in edge]\n if len(edges) == 1:\n result = edges[0]\n else:\n dist = 99999999999999999999999999999999999999999\n for edge in edges:\n distance = point.getPoint().distance(edge.getGeometry())\n if distance < dist:\n dist = distance\n result = edge\n return result", "def nearest_point(pt):\n nearest_point = None\n min_dist = float(\"inf\")\n for p in cur_points:\n dist = euclidean_dist(pt, p.to_tuple())\n if dist < min_dist:\n min_dist, nearest_point = dist, p\n\n return nearest_point.to_tuple()", "def find_closest_point(point, street, streetvolume):\r\n streetdf = streetvolume[streetvolume['streetname'] == street]\r\n if streetdf.shape[0] == 0:\r\n streetdf = streetvolume\r\n streetdf['pdistance'] = streetdf['geometry'].apply(lambda x: point.distance(x))\r\n streetdf.sort_values(by = 'pdistance', ascending = True, inplace = True)\r\n return streetdf['lineid'].iloc[0]", "def point_to_line_dist(P, A, B):\n\tif all(A == P) or all(B == P):\n\t\treturn0\n\tif arccos(dot((P - A) / norm(P - A), (B - A) / norm(B - A))) > pi / 2:\n\t\treturn norm(P - A)\n\tif arccos(dot((P - B) / norm(P - B), (A - B) / norm(A - B))) > pi / 2:\n\t\treturn norm(P - B)\n\treturn norm(cross(A-B, A-P))/norm(B-A)", "def intersect_line(self, line: Line) -> Tuple[Point, Point]:\n vector_to_line = Vector.from_points(self.point, line.point)\n vector_unit = line.direction.unit()\n\n dot = vector_unit.dot(vector_to_line)\n\n discriminant = dot**2 - (vector_to_line.norm() ** 2 - self.radius**2)\n\n if discriminant < 0:\n raise ValueError(\"The line does not intersect the sphere.\")\n\n pm = np.array([-1, 1]) # Array to compute minus/plus.\n distances = -dot + pm * math.sqrt(discriminant)\n\n point_a, point_b = line.point + distances.reshape(-1, 1) * vector_unit\n\n return point_a, point_b", "def closest_to(self, a, b):\n diff_a = abs(a.ts - self.ts)\n diff_b = abs(b.ts - self.ts)\n if diff_a < diff_b and diff_a < TIME_THRESHOLD:\n return a\n elif diff_b < TIME_THRESHOLD:\n return b\n return None", "def _closest_point(self, x, z, start_param, Ns):\n pi = np.pi\n def f(t):\n px, pz = self(t)\n return np.sqrt((x-px)**2 + (z-pz)**2)\n if start_param is None:\n x0 = brute(lambda x: f(x[0]), [[0, pi]], Ns=Ns, finish=None)\n step = np.pi/(Ns-1)\n res = minimize_scalar(\n f, bounds=[max(0, x0-step), min(np.pi, x0+step)], method='bounded',\n options=dict(xatol=1e-12),\n )\n else:\n res = minimize_scalar(f, bracket=(start_param, pi/Ns),\n options=dict(xtol=1e-12))\n la = res.x\n return la", "def shortest_line_to_point(point_a, point_b, point_c): # where a and b are on spin axis, c is the point spinning round\n axis_vect = np.subtract(point_a, point_b)\n axis_mag = magnitude(point_a, point_b)\n unit_axis = np.divide(axis_vect, axis_mag) # unit of pp\n # pp' constants - p\n\n # pp dot u\n t = np.sum(np.dot(unit_axis, unit_axis))\n c = np.sum(np.dot(np.subtract(point_b, point_c), unit_axis))\n p = -c / t\n project_point_on_axis_add = (np.multiply(unit_axis, p))\n project_point_on_axis = project_point_on_axis_add + point_b\n distance = magnitude(point_c, project_point_on_axis)\n return distance, project_point_on_axis", "def nearest(self, pose):\n # type: (Pose) -> Pose\n assert (self.nodes), 'No nodes.'\n closest = min(self.nodes, key=lambda x: self.dist(x, pose))\n return closest", "def closest_dist(x, y, x_list, y_list):\n points = np.array([x, y]).T\n points_list = np.array([x_list, y_list]).T\n\n dpt0 = points_list[:, 0] - points[:, 0, np.newaxis]\n dpt1 = points_list[:, 1] - points[:, 1, np.newaxis]\n\n return np.argmin((dpt0*dpt0 + dpt1*dpt1), axis=1)", "def closest_points_naive(self, x, y):\r\n # Running time: O(n ** 2)\r\n\r\n dist = []\r\n for i in range(len(x)):\r\n for j in range(i+1, len(x)):\r\n d = self.get_euclidean_distance(x[i], x[j], y[i], y[j])\r\n dist.append(d)\r\n \r\n return min(dist)", "def getPoint(self, a):\n lng = self.source.center.lng + (self.target.center.lng - self.source.center.lng) * min(a, 1)\n lat = self.source.center.lat + (self.target.center.lat - self.source.center.lat) * min(a, 1)\n return lng, lat", "def line_locate_point(self, right: PointValue) -> ir.FloatingValue:\n return ops.GeoLineLocatePoint(self, right).to_expr()", "def closest_point_to(self, p):\n p = np.array(p)\n # align with z-axis so all triangle have same z-coord\n tri_rot, rot = self.align_with([0,0,1])\n tri_rot_z = tri_rot.a[-1]\n p_rot = np.dot(rot, p)\n\n p_2d = p_rot[:2]\n tri_2d = geometry2d.Triangle(tri_rot.a[:2], tri_rot.b[:2], tri_rot.c[:2])\n\n if tri_2d.is_inside(p_2d):\n # projects onto triangle, so return difference in z\n return np.dot(np.linalg.inv(rot), np.array(list(p_2d) + [tri_rot_z]))\n else:\n closest_pt_2d = tri_2d.closest_point_to(p_2d)[1]\n\n closest_pt_3d = np.array(list(closest_pt_2d) + [tri_rot_z])\n\n return np.dot(np.linalg.inv(rot), closest_pt_3d)", "def __get_closest_waypoint_index(self, x, y):\n return self.__waypoint_tree.query([x, y], 1)[1]", "def closest_object(geometries, point): \n min_dist, min_index = min((point.distance(geom), k) \n for (k, geom) in enumerate(geometries))\n \n return geometries[min_index], min_dist, min_index", "def get_line_distance(self, p):\n\n y = 1000 * p.y\n R = 1000 * self.geometry.R\n x = copysign(sqrt(y ** 2 + (R - sqrt(R ** 2 - y ** 2))), y)\n x = 2 * R * asin(x / (2 * R))\n #x=y\n b = -x / sqrt(R ** 2 - x ** 2)\n theta = atan(b) # grating tangent angle\n print b, theta\n d = 0\n for n, a in enumerate(self.an):\n d += a * x ** n\n d *= cos(theta)\n return 1e-3 / d", "def DistPoint2Line(point,line_point1, line_point2=np.array([0,0,0])):\n return np.linalg.norm(np.cross((point-line_point2),(point-line_point1)))/np.linalg.norm(line_point1 - line_point2)", "def closestIntersectionPoint(origin, direction, outline, maxDistance):\n testLine = LineString([origin, origin + direction * maxDistance])\n inter = testLine.intersection(outline)\n if inter.is_empty:\n if TABFAIL_VISUAL:\n import matplotlib.pyplot as plt\n\n plt.axis('equal')\n x, y = outline.coords.xy\n plt.plot(list(map(toMm, x)), list(map(toMm, y)))\n x, y = testLine.coords.xy\n plt.plot(list(map(toMm, x)), list(map(toMm, y)))\n plt.show()\n raise NoIntersectionError(f\"No intersection found within given distance\", origin)\n origin = Point(origin[0], origin[1])\n geoms = list()\n for geom in listGeometries(inter):\n if isinstance(geom, Point):\n geoms.append(geom)\n elif isinstance(geom, LineString):\n # When a linestring is an intersection, we know that the starting or\n # ending points are the nearest one\n geoms.extend([Point(geom.coords[0]), Point(geom.coords[-1])])\n else:\n raise TypeError(f\"intersection() returned an unsupported datatype: {geom.__class__.__name__}\")\n return min([(g, origin.distance(g)) for g in geoms], key=lambda t: t[1])[0]", "def dist_to_point(self, point):\n\t\treturn dist_to_line2d_seg((self.a.to_tuple(),self.b.to_tuple()), point.to_tuple())", "def distance_point_to_line(x1, y1, a, b, c):\n d = abs((a * x1 + b * y1 + c)) / (math.sqrt(a * a + b * b))\n #print(\"Distance from ({}, {}) to line {}x+{}y+{}=0 is {}\".format(\n # x1, y1, a, b, c, d))\n return(d)", "def get_line_to(self,target):\n\n m = (target.y - self.y) / (target.x - self.x)\n\n b = self.y - m * self.x\n\n return (m,b)", "def __two_nearest_line__(b1, b2):\n distances = []\n for p in b1:\n for q in b2:\n distances.append([__distance__(p, q), (p, q)])\n distances = sorted(distances, key=lambda d: d[0])\n a1, b1 = distances[0][1][0], distances[0][1][1]\n a2, b2 = distances[1][1][0], distances[1][1][1]\n a1 = (a1[0] + (a2[0] - a1[0]) * 1 / 14, a1[1] + (a2[1] - a1[1]) * 1 / 14)\n b1 = (b1[0] + (b2[0] - b1[0]) * 1 / 14, b1[1] + (b2[1] - b1[1]) * 1 / 14)\n a2 = (a2[0] + (a1[0] - a2[0]) * 1 / 14, a2[1] + (a1[1] - a2[1]) * 1 / 14)\n b2 = (b2[0] + (b1[0] - b2[0]) * 1 / 14, b2[1] + (b1[1] - b2[1]) * 1 / 14)\n return (a1, b1), (a2, b2)", "def get_closest_waypoint(self, x, y):\n # TODO implement\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n return closest_idx", "def nearest_line(city_points):\n closest = 10000\n nodes = None\n for item in linear_list:\n line = item[2]\n nearest = abs(line[1] * city_points[0] + line[0] * city_points[1] + line[2]) \\\n / math.sqrt(line[1] ** 2 + line[0] ** 2)\n x = get_x(line, city_points)\n y = get_y(line, city_points)\n x1 = get_node_points(item[0])[0]\n x2 = get_node_points(item[1])[0]\n y1 = get_node_points(item[0])[1]\n y2 = get_node_points(item[1])[1]\n\n if ((x <= x1) & (x >= x2)) | ((x >= x1) & (x <= x2)):\n if ((y <= y1) & (y >= y2)) | ((y >= y1) & (y <= y2)):\n if nearest < closest:\n closest = nearest\n nodes = (item[0], item[1], item[2])\n return nodes", "def project_point_to_line(point, line_start, line_end):\n line_magnitude = line_start.distance(line_end)\n \n u = ((point.x - line_start.x) * (line_end.x - line_start.x) +\n (point.y - line_start.y) * (line_end.y - line_start.y)) \\\n / (line_magnitude ** 2)\n\n # closest point does not fall within the line segment, \n # take the shorter distance to an endpoint\n if u < 0.00001 or u > 1:\n ix = point.distance(line_start)\n iy = point.distance(line_end)\n if ix > iy:\n return line_end\n else:\n return line_start\n else:\n ix = line_start.x + u * (line_end.x - line_start.x)\n iy = line_start.y + u * (line_end.y - line_start.y)\n return Point([ix, iy])", "def getNearestCar(self, position, line=0):\n return self.getNearestObjectInArray(self._cars, position, line)", "def _distance2_point_to_h_line(point, h_line):\n a,b,c = h_line\n x0,y0 = point\n # solve for equality\n # r^2 = (x-x0)^2 + (y-y0)^2\n # ax + by + c = 0\n # --> 2nd order polynomial\n # --> find place of exactly one solution, i.e.\n # radicant of p-q formula is identical zero\n # if radicant is zero, then\n ys = ((a*x0-c)*b + a**2*y0)/(a**2+b**2)\n # or\n xs = ((b*y0-c)*a + b**2*x0)/(a**2+b**2)\n # for a != 0\n if abs(a)>=abs(b):\n R2 = (x0-c/a)**2+y0**2 - (1.+(b/a)**2)*ys**2\n else:\n R2 = (y0-c/b)**2+x0**2 - (1.+(a/b)**2)*xs**2\n R2 = R2 if abs(R2)>1e-13 else 0.\n return R2, (xs, ys)", "def closest_points(self, entity: _entity_types) -> Tuple[Point3D, Point3D]:\n self_body = _union_entities(self.bodies)\n other_body = _union_entities(entity)\n\n occ1 = _create_component(root(), self_body, name=\"temp\")\n occ2 = _create_component(root(), other_body, name=\"temp\")\n\n try:\n result = app().measureManager.measureMinimumDistance(occ1.bRepBodies[0], occ2.bRepBodies[0])\n return result.positionOne, result.positionTwo\n finally:\n occ1.deleteMe()\n occ2.deleteMe()", "def closest_point(self, point, maxdist=0.0):\n face, point = self.geometry.ClosestPoint(Rhino.Geometry.Point3d(*point), maxdist)\n return list(point)", "def nearest_direct(row, gdf1, gdf2, src_column=None):\r\n #create a unary union\r\n unary_union = gdf2.unary_union \r\n # Find the geometry that is closest\r\n nearest = gdf2['centroid'] == nearest_points(row['centroid'], unary_union)[1]\r\n # Get the corresponding value from df2 (matching is based on the geometry)\r\n value = gdf2[nearest][src_column].get_values()[0]\r\n return value", "def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )", "def distance_point_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector(cross_vectors(pa, pb))\n l_ab = length_vector(ab)\n return l / l_ab", "def get_closest_waypoint(self, x, y):\n closest_idx = self.waypoint_tree.query([x, y])[1] # ckd tree (1st closest, idx)\n\n # Check if closest waypoint is ahead or behind vehicle\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # Equation for hyperplane through closest_coors\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)\n # Car is ahead of the closest waypoint\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n return closest_idx", "def closest_node(self, where, cartesian=False, threshold=None, vincenty=False, haversine=False):\n\n if not vincenty or not haversine:\n if cartesian:\n x, y = self.grid.x, self.grid.y\n else:\n x, y = self.grid.lon, self.grid.lat\n dist = np.sqrt((x - where[0])**2 + (y - where[1])**2)\n elif vincenty:\n grid_pts = np.asarray([self.grid.lon, self.grid.lat]).T\n where_pt_rep = np.tile(np.asarray(where), (len(self.grid.lon),1))\n dist = np.asarray([vincenty_distance(pt_1, pt_2) for pt_1, pt_2 in zip(grid_pts, where_pt_rep)])*1000\n elif haversine:\n grid_pts = np.asarray([self.grid.lon, self.grid.lat]).T\n where_pt_rep = np.tile(np.asarray(where), (len(self.grid.lon),1))\n dist = np.asarray([haversine_distance(pt_1, pt_2) for pt_1, pt_2 in zip(grid_pts, where_pt_rep)])*1000\n index = np.argmin(dist)\n if threshold:\n if dist.min() < threshold:\n index = np.argmin(dist)\n else:\n index = None\n\n return index", "def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.x,\n pose.y))\n else:\n return 0", "def closest_point(\n self, points: Union[List[\"PointMixin\"], \"PointMixin\"]\n ) -> pd.Series:\n from ..core.distance import closest_point as cp\n\n # The following cast secures the typing\n self = cast(\"Flight\", self)\n\n if not isinstance(points, list):\n points = [points]\n\n return min(\n (cp(self.data, point) for point in points),\n key=attrgetter(\"distance\"),\n )", "def get_closest_waypoint(self, pose):\n if self.kdtree:\n return kdtree_closest_point(self.kdtree,\n (pose.position.x,\n pose.position.y))\n else:\n return 0", "def get_closest_waypoint(self, pose):\n #TODO implement - Done\n # Iterate the base_waypoints' x value with current position's x value and find the closest\n # match, and pick that waypoint location index. \n min_idx = 0\n min_dist = None\n cur_x = pose.position.x\n cur_y = pose.position.y\n if self.waypoints is not None:\n for i, wp in enumerate(self.waypoints):\n wp_x = wp.pose.pose.position.x\n wp_y = wp.pose.pose.position.y\n dist = np.sqrt((cur_x - wp_x)**2 + (cur_y - wp_y)**2)\n if min_dist is None or min_dist >= dist:\n min_dist = dist\n min_idx = i\n \n # check whether the identified index is behind the current position, if so, move it by 1 index\n # https://gamedev.stackexchange.com/questions/75072/how-can-i-compare-two-quaternions-for-logical-equality\n # rospy.logwarn('min_idx before = %d', min_idx)\n eps = 1e-12\n if self.waypoints is not None:\n q1 = self.waypoints[min_idx].pose.pose.orientation\n q2 = pose.orientation\n q1_a = np.array([q1.x, q1.y, q1.z, q1.w])\n q2_a = np.array([q2.x, q2.y, q2.z, q2.w])\n direction = abs(np.dot(q1_a, q2_a))\n #rospy.logwarn('calculated direction %f', direction)\n wp_x = self.waypoints[min_idx].pose.pose.position.x\n if direction > 1-eps:\n if wp_x < cur_x:\n min_idx += 1\n else:\n min_idx -= 1\n else:\n if wp_x < cur_x:\n min_idx -= 1\n else:\n min_idx += 1\n\n # rospy.logwarn('min_idx after = %d', min_idx)\n return min_idx", "def _distance2_line_endpoints(line1, line2):\n (A,B),(C,D) = line1, line2\n R2=lambda u,v: (u[0]-v[0])**2+(u[1]-v[1])**2\n pairs = zip((A,A,B,B),(C,D,C,D))\n r2 = [R2(pair[0],pair[1]) for pair in pairs]\n mini=sorted(zip(r2,pairs),key=lambda a,b: a)[0]\n #R2_min = min((R2(A,C), R2(A,D), R2(B,C), R2(B,D)))\n return mini[0], mini[1][0], mini[1][1]", "def linePointXYDist(l,p,inside=True):\n return linePointXY(l,p,inside,distance=True)", "def distance(p1, p2):\n if isparallel(p1, p2):\n # lines are parallel\n l = np.cross(p1.w, p1.v - p2.v * np.dot(p1.w, p2.w) / dot(p2.w, p2.w)) / np.linalg.norm(p1.w)\n else:\n # lines are not parallel\n if abs(p1 * p2) < 10*_eps:\n # lines intersect at a point\n l = 0\n else:\n # lines don't intersect, find closest distance\n l = abs(p1 * p2) / np.linalg.norm(np.cross(p1.w, p2.w))**2\n return l", "def distance_point_line_sqrd(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n pa = subtract_vectors(a, point)\n pb = subtract_vectors(b, point)\n l = length_vector_sqrd(cross_vectors(pa, pb))\n l_ab = length_vector_sqrd(ab)\n return l / l_ab", "def _get_closest_waypoint(self, pose):\n pos = pose.position\n x = pos.x\n y = pos.y\n closest_idx = self.waypoints_tree.query([x,y],1)[1]\n\n return closest_idx", "def get_closest_waypoint_idx(self):\n\n # Position\n x = self.car_pose.pose.position.x\n y = self.car_pose.pose.position.y\n closest_idx = self.waypoint_tree.query([x, y], 1)[1]\n\n # Coordinates\n closest_coord = self.waypoints_2d[closest_idx]\n prev_coord = self.waypoints_2d[closest_idx - 1]\n\n # Hyper Plane\n cl_vect = np.array(closest_coord)\n prev_vect = np.array(prev_coord)\n pos_vect = np.array([x, y])\n\n val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)\n\n if val > 0:\n closest_idx = (closest_idx + 1) % len(self.waypoints_2d)\n\n return closest_idx", "def closest_point(self, shape, inf_dist=1.0, homogenous=True):\n\n self._check_pyb()\n\n if not _pyb: # pragma nocover\n raise ImportError(\n \"The package PyBullet is required for collision \"\n \"functionality. Install using pip install pybullet\"\n )\n\n if not self.pinit:\n self._init_pob()\n self._update_pyb()\n\n self._update_pyb()\n\n if not shape.pinit:\n shape._init_pob()\n shape._update_pyb()\n\n ret = p.getClosestPoints(self.co, shape.co, inf_dist)\n\n if homogenous:\n try:\n return ret[0][8], np.append(np.array(ret[0][5]), 1.0), np.append(np.array(ret[0][6]), 1.0)\n except ValueError:\n return None, None, None\n except IndexError:\n # Obstacle is further away than inf_dist\n return None, None, None\n else:\n try:\n return ret[0][8], np.array(ret[0][5]), np.array(ret[0][6])\n except ValueError:\n return None, None, None\n except IndexError:\n # Obstacle is further away than inf_dist\n return None, None, None", "def closest_point(g, p):\n\n nodes = [n for n in g.nodes]\n\n tree = KDTree(nodes)\n idx = tree.query([p], k=1, return_distance=False)[0][0]\n return nodes[idx]", "def point_line_dist2(p, l1, l2):\n p, l1, l2 = np.asarray(p), np.asarray(l1), np.asarray(l2)\n ap = l1 - p\n n = l2 - l1\n n /= np.sqrt(sum(n**2))\n dist = ap - np.outer(n, np.dot(ap, n)).T\n return np.sum(dist**2, 1)", "def is_on_line(point_a, point_b, point_c):\r\n return (point_b[0] - point_a[0]) * (point_c[1] - point_a[1]) - (point_b[1] - point_a[1]) * (point_c[0] - point_a[0])", "def crossLine(self, other):\n if self.parallel(other): return None\n line = self.getLine()\n point = other.crossLine(line)\n if point is not None:\n if point in self and point in other:\n return point", "def closest_element(self, where, cartesian=False, threshold=None, vincenty=False):\n if not vincenty:\n if cartesian:\n x, y = self.grid.xc, self.grid.yc\n else:\n x, y = self.grid.lonc, self.grid.latc\n dist = np.sqrt((x - where[0])**2 + (y - where[1])**2)\n else:\n grid_pts = np.asarray([self.grid.lonc, self.grid.latc]).T\n where_pt_rep = np.tile(np.asarray(where), (len(self.grid.lonc),1))\n dist = np.asarray([vincenty_distance(pt_1, pt_2) for pt_1, pt_2 in zip(grid_pts, where_pt_rep)])*1000\n\n index = np.argmin(dist)\n if threshold:\n if dist.min() < threshold:\n index = np.argmin(dist)\n else:\n index = None\n\n return index", "def nearest(pntGraph, lineGraph, criterion='', threshold=0):\n _, spatialJoinDict = spatialjoin._spatialjoin(pntGraph, lineGraph, criterion, threshold)\n for point in pntGraph.nodes(data=True):\n point[1]['nearEdge'] = spatialJoinDict[point[1]['Ind']]\n print('The Ind of the nearest polyline is added to the POINT type graph.')", "def _nearest(arrlist_1, arrlist_2):\n tree = KDTree(arrlist_1);\n pts = tree.query(arrlist_2)\n\n return tree.data[pts[1][pts[0].argmin()]]", "def closest_on_screen_point_optim(trajectory, viewpoint, yaw, gaze_on_screen):\n \n traj_angles = dp.world_to_angles_through_screen(trajectory, viewpoint, yaw) \n \n #pprint(traj_angles)\n\n dist, idx = closest_node_tree(traj_angles, gaze_on_screen)\n ml_screen_ref = traj_angles[idx] \n\n return(idx, ml_screen_ref)", "def calc_line(start, target, map):\n\t\"\"\" Returns the real world point at the farthest range \"\"\"\n\tdx = abs(target[0] - start[0])\n\tdy = abs(target[1] - start[1])\n\txi = start[0]\n\tyi = start[1]\n\tn = 1 + dx + dy\n\tx_dir = np.sign(target[0] - start[0])\n\ty_dir = np.sign(target[1] - start[1])\n\terror = dx - dy;\n\tdx *= 2\n\tdy *= 2\n\n\tfor i in xrange(n):\n\t\tif map.grid[xi,yi] is not map.empty and map.grid[xi,yi] > 0:\n\t\t\treturn xi, yi\n\n\t\tif error > 0:\n\t\t\txi += x_dir\n\t\t\terror -= dy\n\t\telse:\n\t\t\tyi += y_dir\n\t\t\terror += dx\n\treturn target", "def _line_intersection(self, line, point):\n den = euclidean_distance((line[0],line[1]), (line[2],line[3]))\n x1, y1, x2, y2 = line[0], line[1], line[2], line[3]\n x3, y3 = point[0], point[1]\n\n u = ( ((x3-x1) * (x2-x1)) + ((y3-y1) * (y2-y1)) ) / den\n\n x, y = (x1 + u * (x2-x1)), (y1 + u * (y2-y1))\n dist = euclidean_distance((x,y), point)\n\n # pygame.draw.circle(self.screen, SIM_COLORS['aqua'], \n # (int(x*SCALE), int(y*SCALE)), \n # int(40), \n # 0)\n # print dist*SCALE, (x*SCALE,y*SCALE)\n\n return dist, (x, y)", "def midpoint_line(a, b):\n return scale_vector(add_vectors(a, b), 0.5)" ]
[ "0.79727983", "0.7753009", "0.7605306", "0.7497314", "0.7443135", "0.7417121", "0.7388166", "0.7367654", "0.71200454", "0.71104366", "0.7108977", "0.7068614", "0.7057041", "0.7001261", "0.6992348", "0.6936836", "0.6908155", "0.6904385", "0.6875017", "0.6862292", "0.6801013", "0.6712156", "0.6703388", "0.669912", "0.66773117", "0.66499364", "0.6633628", "0.6612126", "0.66101766", "0.6600685", "0.65889937", "0.6587908", "0.6586934", "0.657232", "0.65603787", "0.6554643", "0.65250385", "0.6480818", "0.647456", "0.6468686", "0.6456968", "0.6427839", "0.6424807", "0.64242005", "0.6421678", "0.6416956", "0.6410626", "0.6398263", "0.63824224", "0.6379602", "0.63769937", "0.6366861", "0.6359059", "0.6332355", "0.6331331", "0.629351", "0.6272398", "0.6267369", "0.6266735", "0.62661195", "0.62641335", "0.6261757", "0.626111", "0.62563616", "0.62556255", "0.6244712", "0.62370765", "0.6220657", "0.6214974", "0.6200843", "0.6190626", "0.61746633", "0.61710864", "0.61680424", "0.61354935", "0.61278516", "0.6127077", "0.6116175", "0.61117816", "0.61117387", "0.6107668", "0.60988986", "0.60898477", "0.60791636", "0.6065959", "0.6065851", "0.6062035", "0.605263", "0.6052102", "0.60507274", "0.6047497", "0.6035476", "0.6019904", "0.60060155", "0.6001247", "0.59980977", "0.5995468", "0.59917635", "0.59802914", "0.5970844" ]
0.7502959
3
Function to compute the intersection point of the instance and another line.
def intersection(self, l): closest = self.closest_point(l) return closest if l.contains(closest) else None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _lines_intersection(self, other):\n\n the_slope, the_y_intercept = False, False\n\n # parallel?\n if self.slope == other.slope:\n return (\n self.y_intercept == other.y_intercept and\n self.x_value == other.x_value\n )\n\n if self.is_vertical():\n x = self.x_value\n the_slope = other.slope\n the_y_intercept = other.y_intercept\n elif other.is_vertical():\n x = other.x_value\n else:\n x = (other.y_intercept - self.y_intercept) / (self.slope - other.slope)\n\n if the_slope is None or the_slope is False:\n the_slope = self.slope\n the_y_intercept = self.y_intercept\n\n y = the_slope * x + the_y_intercept\n\n return Point(x, y)", "def intersection(line1, line2):\n p0, p1, p2, p3 = map(\n lambda tup : np.array(tup[:2]),\n [line1[0], line1[1], line2[0], line2[1]]\n )\n p1, p2, p3 = map(lambda x : x - p0, [p1, p2, p3])\n transform = np.zeros((2, 2))\n transform[:,0], transform[:,1] = p1, p2\n if np.linalg.det(transform) == 0: return\n inv = np.linalg.inv(transform)\n new_p3 = np.dot(inv, p3.reshape((2, 1)))\n #Where does line connecting (0, 1) to new_p3 hit x axis\n x_intercept = new_p3[0] / (1 - new_p3[1]) \n result = np.dot(transform, [[x_intercept], [0]])\n result = result.reshape((2,)) + p0\n return result", "def intersection(self, line):\n\t\tdenom = (line.b[1]-line.a[1])*(self.b[0]-self.a[0]) - (line.b[0]-line.a[0])*(self.b[1]-self.a[1])\n\t\t# denominator is 0 if lines are parallel\n\t\tif denom == 0:\n\t\t\treturn None\n\t\t\n\t\tnum_a = (line.b[0]-line.a[0])*(self.a[1]-line.a[1]) - (line.b[1]-line.a[1])*(self.a[0]-line.a[0])\n\t\tnum_b = (self.b[0]-self.a[0])*(self.a[1]-line.a[1]) - (self.b[1]-self.a[1])*(self.a[0]-line.a[0])\n\t\t# if both numerators are 0 then lines are coincident\n\t\tif num_a==0 and num_b==0:\n\t\t\treturn None\n\t\t\t\n\t\tu_a = num_a/denom\n\t\tu_b = num_b/denom\n\t\t\t\n\t\tif 0 <= u_a <= 1 and 0 <= u_b <= 1:\n\t\t\treturn self.a + uA*(self.b-self.a)\n\t\telse:\n\t\t\treturn None", "def _line_intersection(self, line, point):\n den = euclidean_distance((line[0],line[1]), (line[2],line[3]))\n x1, y1, x2, y2 = line[0], line[1], line[2], line[3]\n x3, y3 = point[0], point[1]\n\n u = ( ((x3-x1) * (x2-x1)) + ((y3-y1) * (y2-y1)) ) / den\n\n x, y = (x1 + u * (x2-x1)), (y1 + u * (y2-y1))\n dist = euclidean_distance((x,y), point)\n\n # pygame.draw.circle(self.screen, SIM_COLORS['aqua'], \n # (int(x*SCALE), int(y*SCALE)), \n # int(40), \n # 0)\n # print dist*SCALE, (x*SCALE,y*SCALE)\n\n return dist, (x, y)", "def LineLineIntersection(lineA, lineB):\n lineA = rhutil.coerceline(lineA, True)\n lineB = rhutil.coerceline(lineB, True)\n rc, a, b = Rhino.Geometry.Intersect.Intersection.LineLine(lineA, lineB)\n if not rc: return None\n return lineA.PointAt(a), lineB.PointAt(b)", "def _intersection(line_points_0, line_points_1):\n u,v = line_points_0,line_points_1\n (A,B),(C,D) = line_points_0,line_points_1\n h1 = _homogenous_line(A,B)\n h2 = _homogenous_line(C,D)\n P = _intersection_homogenous(h1, h2)\n return P", "def line_intersection(line1, line2):\n xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(xdiff, ydiff)\n if div == 0:\n raise Exception('lines do not intersect')\n\n d = (det(*line1), det(*line2))\n x = det(d, xdiff) / div\n y = det(d, ydiff) / div\n return x, y", "def lineintersect(line1,line2):\n a1, a2, b1, b2=line1[0],line1[1],line2[0],line2[1]\n\n s = np.vstack([a1,a2,b1,b2]) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return (float('inf'), float('inf'))\n return (x/z, y/z)", "def intersection(line1, line2):\n\n rho1, theta1 = line1[0]\n rho2, theta2 = line2[0]\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n\n return [x0, y0]", "def intersection(line1, line2):\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n A = np.array([[np.cos(theta1), np.sin(theta1)], [np.cos(theta2), np.sin(theta2)]])\r\n b = np.array([[rho1], [rho2]])\r\n x0, y0 = np.linalg.solve(A, b)\r\n x0, y0 = int(np.round(x0)), int(np.round(y0))\r\n return [[x0, y0]]", "def intersect_line(self, line: Line, **kwargs) -> Point:\n if self.normal.is_perpendicular(line.direction, **kwargs):\n raise ValueError(\"The line and plane must not be parallel.\")\n\n vector_plane_line = Vector.from_points(self.point, line.point)\n\n num = -self.normal.dot(vector_plane_line)\n denom = self.normal.dot(line.direction)\n\n # Vector along the line to the intersection point.\n vector_line_scaled = num / denom * line.direction\n\n return line.point + vector_line_scaled", "def intersection(self, line: AbstractLine) -> Optional[AbstractPoint]:\n plane = Plane(self.__point_a,\n self.__point_b - self.__point_a,\n self.__point_c - self.__point_a)\n\n point = plane.intersection(line)\n if point is not None:\n if self.has_point(point):\n return point\n return None", "def intersection(line1, line2):\n rho1, theta1 = line1\n rho2, theta2 = line2\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n return [x0, y0]", "def line_intersection_with(self, other):\n # solve following system :\n # intersection = start of self + alpha * direction of self\n # intersection = start of other + beta * direction of other\n directions = [s.endpoints[1] - s.endpoints[0] for s in (self, other)]\n denominator = directions[0].cross_product(directions[1])\n if abs(denominator) < 0.000001:\n # almost parallel lines\n return\n start_diff = other.endpoints[0] - self.endpoints[0]\n alpha = start_diff.cross_product(directions[1]) / denominator\n return self.endpoints[0] + directions[0] * alpha", "def getIntersection(line1, line2):\r\n\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n\r\n a = np.array([\r\n [np.cos(theta1), np.sin(theta1)],\r\n [np.cos(theta2), np.sin(theta2)]\r\n ])\r\n\r\n b = np.array([[rho1], [rho2]])\r\n\r\n x, y = np.linalg.solve(a, b)\r\n\r\n x = int(x[0])\r\n y = int(y[0])\r\n\r\n return [np.round(y), np.round(x)]", "def line_intersect(line1, line2):\n b1 = (line1[1][1] - line1[0][1]) / (line1[1][0] - line1[0][0])\n b2 = (line2[1][1] - line2[0][1]) / (line2[1][0] - line2[0][0])\n a1 = line1[0][1] - b1 * line1[0][0]\n a2 = line2[0][1] - b2 * line2[0][0]\n\n if a1 == a2 and b1 == b2:\n return line1\n\n xi = - (a1 - a2) / (b1 - b2)\n yi = a1 + b1 * xi\n if (line1[0][0] - xi) * (xi - line1[1][0]) >= 0\\\n and (line2[0][0] - xi) * (xi - line2[1][0]) >= 0\\\n and (line1[0][1] - yi) * (yi - line1[1][1]) >= 0\\\n and (line2[0][1] - yi) * (yi - line2[1][1]) >= 0:\n return xi, yi\n return None", "def intersection(line1, line2):\n xdiff = (line1[0][0] - line1[1][0], line2[0][0] - line2[1][0])\n ydiff = (line1[0][1] - line1[1][1], line2[0][1] - line2[1][1])\n\n def det(a, b):\n return a[0] * b[1] - a[1] * b[0]\n\n div = det(xdiff, ydiff)\n if div == 0:\n raise Exception('lines do not intersect')\n\n d = (det(*line1), det(*line2))\n x = det(d, xdiff) / div\n y = det(d, ydiff) / div\n return x, y", "def intersect_line(self, line: Line) -> Tuple[Point, Point]:\n vector_to_line = Vector.from_points(self.point, line.point)\n vector_unit = line.direction.unit()\n\n dot = vector_unit.dot(vector_to_line)\n\n discriminant = dot**2 - (vector_to_line.norm() ** 2 - self.radius**2)\n\n if discriminant < 0:\n raise ValueError(\"The line does not intersect the sphere.\")\n\n pm = np.array([-1, 1]) # Array to compute minus/plus.\n distances = -dot + pm * math.sqrt(discriminant)\n\n point_a, point_b = line.point + distances.reshape(-1, 1) * vector_unit\n\n return point_a, point_b", "def intersection_line_line(ab, cd):\n a, b = ab\n c, d = cd\n\n line_vector_1 = vector_from_points(a, b)\n line_vector_2 = vector_from_points(c, d)\n d_vector = cross_vectors(line_vector_1, line_vector_2)\n\n normal_1 = cross_vectors(line_vector_1, d_vector)\n normal_2 = cross_vectors(line_vector_2, d_vector)\n plane_1 = (a, normal_1)\n plane_2 = (c, normal_2)\n\n intx_point_line_1 = intersection_line_plane(ab, plane_2)\n intx_point_line_2 = intersection_line_plane(cd, plane_1)\n\n return [intx_point_line_1, intx_point_line_2]", "def find_intersections_line_line(line1: Line, line2: Line) -> {Point}:\n if line1.slope != line2.slope:\n if line1.slope is Infinity:\n # Line 1 is vertical, use its x value as the x value to evaluate line2\n x = line1.point1.x\n y = line2(x)\n elif line2.slope is Infinity:\n # Line 2 is vertical, use its x value as the x value to evaluate line1\n x = line2.point1.x\n y = line1(x)\n else:\n x = (line2.intercept - line1.intercept) / (line1.slope - line2.slope)\n y = line1(x)\n return {Point(x, y)}\n else:\n return {}", "def line_intercept(p1,p2,p3,p4):\n # Note if vertical line m = None and b holds x-val\n (m1,b1) = line_param(p1,p2)\n (m2,b2) = line_param(p3,p4)\n if (m1 != None) and (m2 != None):\n if (m1-m2) != 0.:\n x = (b2-b1)/(m1-m2)\n y = m1*x + b1\n else:\n return (None,0)\n elif (m1 == None) and (m2 != None):\n x = b1 \n y = m2*x + b2\n elif (m1 != None) and (m2 == None):\n x = b2\n y = m1*x + b1\n else:\n return (None,0) \n \n # min and max of points. \n max_x1 = max(p1[0], p2[0])\n min_x1 = min(p1[0], p2[0])\n max_y1 = max(p1[1], p2[1])\n min_y1 = min(p1[1], p2[1])\n max_x2 = max(p3[0], p4[0])\n min_x2 = min(p3[0], p4[0])\n max_y2 = max(p3[1], p4[1])\n min_y2 = min(p3[1], p4[1])\n #check if the intersection is in bounds\n flag = 1\n if x > max_x1 or x < min_x1:\n flag = 0\n elif x > max_x2 or x < min_x2:\n flag = 0\n elif y > max_y1 or y < min_y1: \n flag = 0\n elif y > max_y2 or y < min_y2: \n flag = 0\n #check if the intersection point corresponds to an end point\n intercept = num.array([x,y])\n def _same(p1,p2,prec=0.0001):\n \"\"\" are two points the same \"\"\"\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True\n if flag == 1:\n if _same(intercept,p1):\n flag = 2\n elif _same(intercept,p2):\n flag = 2\n elif _same(intercept,p3):\n flag = 2\n elif _same(intercept,p4):\n flag = 2\n return (intercept,flag)", "def line_line_intersection(a1: Vector3, a2: Vector3, b1: Vector3, b2: Vector3) -> Vector3:\n # From https://stackoverflow.com/a/20677983/7245441\n\n def det(a: Vector3, b: Vector3) -> float:\n return a.x * b.y - a.y * b.x\n\n y_diff = Vector3(a1.y - a2.y, b1.y - b2.y, 0)\n x_diff = Vector3(a1.x - a2.x, b1.x - b2.x, 0)\n\n div = det(x_diff, y_diff)\n if div == 0:\n raise Exception(\"Lines do not intersect\")\n\n d = Vector3(det(a1, a2), det(b1, b2), 0)\n x = det(d, x_diff) / div\n y = det(d, y_diff) / div\n\n return Vector3(x, y, 0)", "def line_intersection(p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):\n s10_x = p1_x - p0_x\n s10_y = p1_y - p0_y\n s32_x = p3_x - p2_x\n s32_y = p3_y - p2_y\n\n denom = s10_x * s32_y - s32_x * s10_y\n if denom == 0.0:\n return None # Collinear\n denomPositive = denom > 0\n\n s02_x = p0_x - p2_x\n s02_y = p0_y - p2_y\n s_numer = s10_x * s02_y - s10_y * s02_x\n if (s_numer < 0) == denomPositive:\n return None # No collision\n\n t_numer = s32_x * s02_y - s32_y * s02_x\n if (t_numer < 0) == denomPositive:\n return None # No collision\n\n if (s_numer > denom) == denomPositive or (t_numer > denom) == denomPositive:\n return 0 # No collision\n \n # Collision detected\n t = t_numer / denom\n i_x = p0_x + (t * s10_x)\n i_y = p0_y + (t * s10_y)\n\n return i_x, i_y", "def get_intersect_points(line1, line2):\n intersect_points = matrix.matrix_sol([line1, line2])\n return intersect_points", "def planeLineIntersect(p1, p2, equ):\n n = vector(equ[0], equ[1], equ[2])\n v1, v2 = vector(p1), vector(p2)\n t = (equ[3] - n.dotProduct(v2)) / (n.dotProduct(v1 - v2))\n return (t * v1 + (1 - t) * v2).coords()", "def intersect(self, line):\n c = line.cross_z\n d = self.v.dot(c)\n if d == 0:\n return False, 0, 0\n t = c.dot(line.p - self.p) / d\n return True, self.lerp(t), t", "def test_intersect_line_in_one_point(start, end):\n circle = ConstructionCircle((0, 0), 1.0)\n assert len(circle.intersect_line(ConstructionLine(start, end))) == 1", "def line_segment_intersection(line1,\n line2):\n a = float(line1[0][0]*line1[1][1] - line1[0][1]*line1[1][0])\n b = float(line1[0][1] - line1[1][1])\n c = float(line1[1][0] - line1[0][0])\n\n d = float(line2[0][0]*line2[1][1] - line2[0][1]*line2[1][0])\n e = float(line2[0][1] - line2[1][1])\n f = float(line2[1][0] - line2[0][0])\n\n prod = b*f - c*e\n if abs(prod) < 1e-10:\n return (np.inf, np.inf)\n\n xc = (d*c - a*f) / prod\n yc = (a*e - b*d) / prod\n\n sign_x1 = (xc - line1[0][0])*(xc - line1[1][0])\n sign_y1 = (yc - line1[0][1])*(yc - line1[1][1])\n\n if sign_x1 > 1e-10:\n return (np.inf, np.inf)\n if sign_x1 < 1e-10:\n if sign_y1 > 1e-10:\n return (np.inf, np.inf)\n\n sign_x2 = (xc - line2[0][0])*(xc - line2[1][0])\n sign_y2 = (yc - line2[0][1])*(yc - line2[1][1])\n\n if sign_x2 > 1e-10:\n return (np.inf, np.inf)\n if sign_x2 == 1e-10:\n if sign_y2 > 1e-10:\n return (np.inf, np.inf)\n return (int(xc), int(yc))", "def intersection(v1, v2):\n x = v1[0:2] + v2[0:2]\n y = v1[2:4] + v2[2:4]\n if( x[3] == 0 ): #To avoid a divide by zero, if x[3] is 0 then we just solve for where lineA equals x[2]\n t1 = (x[2] - x[0])/\\\n (x[1])\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]\n\n else: \n t1 = ( y[0] - y[2] + (y[3]/x[3])*(x[2] - x[0]) )/\\\n ( (y[3]*x[1])/x[3] - y[1] )\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]", "def intersection(self, other):\n log.info('self: '+str(self)+' other: '+str(other))\n if self == other:\n # Used to be return True, that is definitely not right (expects Coordinate)\n # Do we want start or end ? Does it matter? Lines are the same, everything is\n # an intersection.\n return self.start\n # If any of the start/end points match, return that point.\n if self.end==other.start or self.end == other.end:\n return self.end \n if self.start==other.start or self.start == other.end: \n return self.start\n\n # Line equation: y = mx + b\n # m = (y2-y1)/(x2-x1)\n # B_self = y - M_self*x\n # Pick any x/y on the line - try end point\n # B_self = self.end.lat - M_self*self.end.lon\n # B_other = other.end.lat - M_self*self.end.lon\n from pyresample.spherical_geometry import Coordinate\n\n selfendlon = self.end.lon\n selfstartlon = self.start.lon\n otherendlon = other.end.lon\n otherstartlon = other.start.lon\n # Not sure if this is necessary, or good...\n# if self.end.lon < 0:\n# selfendlon = self.end.lon + 2*math.pi\n# if self.start.lon < 0:\n# selfstartlon = self.start.lon + 2*math.pi\n# if other.end.lon < 0:\n# otherendlon = other.end.lon + 2*math.pi\n# if other.start.lon < 0:\n# otherstartlon = other.start.lon + 2*math.pi\n\n log.info(' self lons: '+str(math.degrees(selfstartlon))+' '+str(math.degrees(selfendlon))+' other lons: '+str(math.degrees(otherstartlon))+' '+str(math.degrees(otherendlon)))\n\n # If both vertical, will be no intersection\n if abs(selfendlon - selfstartlon) < EPSILON and abs(otherendlon - otherstartlon) < EPSILON:\n log.info(' Both vertical, no intersection')\n return None\n # If self is vertical, but not parallel, intersection will be selfstartlon and lat = Mother*lon+B_other\n if abs(selfendlon - selfstartlon) < EPSILON:\n lon = selfstartlon\n M_other = (other.end.lat - other.start.lat)/(otherendlon-otherstartlon)\n B_other = other.end.lat - M_other*otherendlon\n lat = M_other*lon+B_other\n log.info(' self is vertical')\n #Make sure it falls within the segment and not outside.\n # Previously was only checking lat, need to \n # also check lon or opposite side of world would match\n if (lat > min([self.end.lat,self.start.lat]) and \n lat < max([self.end.lat,self.start.lat]) and\n lon > min([otherendlon,otherstartlon]) and\n lon < max([otherendlon,otherstartlon])):\n log.info(' and intersects')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS use wrap_longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n return None\n # same for other\n if abs(otherendlon - otherstartlon) < EPSILON:\n lon = otherstartlon\n M_self = (self.end.lat - self.start.lat)/(selfendlon-selfstartlon)\n B_self = self.end.lat - M_self*selfendlon\n lat = M_self*lon+B_self\n log.info(' other is vertical')\n #Make sure it falls within the segment and not outside.\n # Previously was only checking lat, need to \n # also check lon or opposite side of world would match\n if (lat > min([other.end.lat,other.start.lat]) and \n lat < max([other.end.lat,other.start.lat]) and \n lon > min([selfendlon,selfstartlon]) and\n lon < max([selfendlon,selfstartlon])):\n log.info(' and intersects')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS Use wrap_longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n return None\n\n \n\n # Get slopes of the lines \n M_self = (self.end.lat - self.start.lat)/(selfendlon-selfstartlon)\n M_other = (other.end.lat - other.start.lat)/(otherendlon-otherstartlon)\n \n # If they are parallel, no intersection\n if (M_self-M_other) < EPSILON:\n log.info(' self and other are parallel, no intersection')\n return None\n\n # Get the y-intercepts of the lines \n B_self = self.end.lat - M_self*selfendlon\n B_other = other.end.lat - M_other*otherendlon\n\n # Solve the equation\n # y=m1x+b1 and y=m2x+b2, equate y's so m1x+b1=m2x+b2, x = (b1-b2)/(m2-m1)\n # equate x's so x=(y-b1)/m1=(y-b2)/m2, y = (b1m2-b2m1)/(m2-m1)\n lon = (B_self - B_other)/(M_other - M_self)\n lat = (B_self*M_other - B_other*M_self)/(M_other-M_self)\n\n # Make sure lat/lon intersects within the line segment, and not outside.\n if (lat > min([other.end.lat,other.start.lat]) and \n lat < max([other.end.lat,other.start.lat]) and\n lon > min([otherendlon,otherstartlon]) and \n lon < max([otherendlon,otherstartlon]) and\n lat > min([self.end.lat,self.start.lat]) and \n lat < max([self.end.lat,self.start.lat]) and\n lon > min([selfendlon,selfstartlon]) and \n lon < max([selfendlon,selfstartlon])):\n log.info(' self and other intersect within segment')\n # Apparently Coordinate takes degrees ??? And must be -180 to 180 ?!\n # MLS use wrap longitudes?\n if lon > math.pi:\n lon -= 2*math.pi\n return Coordinate(math.degrees(lon),math.degrees(lat))\n else:\n log.info(' self and other intersect, but not within segment')\n return None", "def _intersection_homogenous(homog_line_0, homog_line_1):\n # NB: renamed from '_intersection'\n eps = 1e-13\n a,b,c=homog_line_0\n u,v,w=homog_line_1\n D=float(b*u-v*a)\n if abs(D)<eps:\n # parallel lines\n return None, None\n xp=-(w*b-c*v)/D\n yp= (w*a-c*u)/D\n\n return xp, yp", "def intersection_line_plane(line, plane, epsilon=1e-6):\n pt1 = line[0]\n pt2 = line[1]\n p_cent = plane[0]\n p_norm = plane[1]\n\n v1 = subtract_vectors(pt2, pt1)\n dot = dot_vectors(p_norm, v1)\n\n if abs(dot) > epsilon:\n v2 = subtract_vectors(pt1, p_cent)\n fac = -dot_vectors(p_norm, v2) / dot\n vec = scale_vector(v1, fac)\n return add_vectors(pt1, vec)\n else:\n return None", "def intersection_with(self, other):\n i = self.line_intersection_with(other)\n if i is None:\n return None# parallel lines\n\n if self.contains(i) and other.contains(i) and not (i in self.endpoints and i in other.endpoints):\n return i\n return None", "def intersect(l: Line, p: Plane) -> Point:\n if math.isclose((l.d * p.normal()), 0):\n # If the line direction is perpendicular to the plane normal,\n # the line and plane must be parallel.\n return None\n else:\n # There exists a parameter t, which makes\n # p.isInPlane(l.point(t)) == 0\n # Let's find it.\n # Initial guess\n t1 = 1\n p1 = l.point(t1)\n d1 = distancePointPlane(p1, p)\n t2 = 2\n p2 = l.point(t2)\n d2 = distancePointPlane(p2, p)\n\n # Calculate line through the two points (t,d)\n a = (d2 - d1) / (t2 - t1)\n b = d1 - a * t1\n\n # Find the t-value where d is zero\n # 0 = at+b <=> t = -b/a\n t = -b / a\n print(\"parameter: {}\".format(t))\n return l.point(t)", "def midpoint_line(a, b):\n return scale_vector(add_vectors(a, b), 0.5)", "def intersect_shape_by_line(topods_shape, line, low_parameter=0.0, hi_parameter=float(\"+inf\")):\n from OCC.Core.IntCurvesFace import IntCurvesFace_ShapeIntersector\n shape_inter = IntCurvesFace_ShapeIntersector()\n shape_inter.Load(topods_shape, TOLERANCE)\n shape_inter.PerformNearest(line, low_parameter, hi_parameter)\n\n with assert_isdone(shape_inter, \"failed to computer shape / line intersection\"):\n return (shape_inter.Pnt(1),\n shape_inter.Face(1),\n shape_inter.UParameter(1),\n shape_inter.VParameter(1),\n shape_inter.WParameter(1))", "def intersect_point(self,m1,c1,m2,c2):\n\n x = (c2 - c1)/(m1 - m2)\n y = m1*x + c1\n return x, y", "def intersection_with(self, other):\n\n if self.gradient == other.gradient:\n # Lines of the same gradient never intersect.\n return None\n\n # Calculate the X and Y values of this intersection using linear algebra.\n x = (other.y_intercept - self.y_intercept) / (self.gradient - other.gradient)\n y = self.gradient * x + self.y_intercept\n\n # If this or the other line belong to a shape, add it to a new set of shapes\n # involved in this intersection.\n shapes = filter((lambda o: o is not None), (self.shape, other.shape))\n return Intersection(x, y, shapes)", "def intersection(self, L):\n if self.slope() == L.slope():\n return None\n intpt_xcood = (self.c * L.b - L.c * self.b)/(self.a * L.b - L.a * self.b)\n intpt_ycood = (self.c * L.a - L.c * self.a)/(self.b * L.a - L.b * self.a)\n\n return (intpt_xcood, intpt_ycood)", "def perpendicularIntersection(point, linePoint1, linePoint2):\n\t\tx1 = linePoint1[0]\n\t\ty1 = linePoint1[1]\n\t\tx2 = linePoint2[0]\n\t\ty2 = linePoint2[1]\n\t\tx3 = point[0]\n\t\ty3 = point[1]\n\t\tk = ((y2-y1) * (x3-x1) - (x2-x1) * (y3-y1)) / ((y2-y1)**2 + (x2-x1)**2)\n\t\tx4 = x3 - k * (y2-y1)\n\t\ty4 = y3 + k * (x2-x1)\n\t\treturn (x4, y4)", "def get_intersection(l0, l1):\n # Source: https://en.wikipedia.org/wiki/Line–line_intersection\n\n denominator = (l0[0] - l0[1]) * (l1[2] - l1[3]) -\\\n (l0[2] - l0[3]) * (l1[0] - l1[1])\n\n x_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[0] - l1[1]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[0] - l0[1])\n y_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[2] - l1[3]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[2] - l0[3])\n\n return [x_nominator / denominator, y_nominator / denominator]", "def getIntersectPoint(p1, p2, p3, p4):\n points = p1, p2, p3, p4\n gradients = (\n CollisionUtility.calculate_gradient(p1, p2), CollisionUtility.calculate_gradient(p3, p4)\n )\n\n # See if the the lines are parallel\n if gradients[0] != gradients[1]:\n return CollisionUtility.calculate_not_parallel_intersection(points, gradients)\n else:\n return CollisionUtility.calculate_parallel_intersection(points, gradients)", "def IntersectWithLine(self, , , p_float_6, p_float_7, p_float=..., p_float=..., p_float=..., *args, **kwargs):\n ...", "def intersect_ext(self, line):\n c = line.cross_z\n d = self.v.dot(c)\n if d == 0:\n return False, 0, 0, 0\n dp = line.p - self.p\n c2 = self.cross_z\n u = c.dot(dp) / d\n v = c2.dot(dp) / d\n return u > 0 and v > 0 and u < 1 and v < 1, self.lerp(u), u, v", "def LinePlaneIntersection(line, plane):\n plane = rhutil.coerceplane(plane, True)\n line_points = rhutil.coerce3dpointlist(line, True)\n line = Rhino.Geometry.Line(line_points[0], line_points[1])\n rc, t = Rhino.Geometry.Intersect.Intersection.LinePlane(line, plane) \n if not rc: return scriptcontext.errorhandler()\n return line.PointAt(t)", "def inside_of_line_2d(pt1, pt2, reference_point, pt, tol=None):\r\n if tol is None:\r\n tol = get_tol_2d()\r\n return geometry.gmInsideOfLineWithTol(pt1, pt2, reference_point, pt, tol)", "def intersection( l1, l2):\n #coordonees de la lignes 1\n x1, y1, x2, y2 = l1.point\n #coordonees de la lignes 2\n x3, y3, x4, y4 = l2.point\n #\n a1 = y2 - y1\n b1 = x1 - x2\n a2 = y4 - y3\n b2 = x3 - x4\n #\n c1 = a1 * x1 + b1 * y1\n #\n c2 = a2 * x3 + b2 * y3\n #\n det = a1 * b2 - a2 * b1\n assert det, \"lines are parallel\"\n return (1. * (b2 * c1 - b1 * c2) / det, 1. * (a1 * c2 - a2 * c1) / det)", "def _intersection_forward_line_segment(semiinf_line1, line2, semiinf_h_line1=None, h_line2=None):\n\n semiinf_h_line1 = _homogenous_line(*semiinf_line1) if semiinf_h_line1 is None else semiinf_h_line1\n h_line2 = _homogenous_line(*line2) if h_line2 is None else h_line2\n\n P = _intersection_homogenous(semiinf_h_line1, h_line2)\n if not _point_within_bounds(line2,P):\n # semi-infinite line does not intersect the particular SEGMENT of line2\n return None, P\n\n A,B = semiinf_line1\n if abs(B[1]-A[1])>=abs(B[0]-A[0]):\n t = (P[1]-A[1])/(B[1]-A[1])\n else:\n t = (P[0]-A[0])/(B[0]-A[0])\n\n if t>0: # intersection lies behind A, i.e. toward or beyond B\n return None, P\n\n return (P[0]-A[0])**2+(P[1]-A[1])**2, P", "def intersect_2_lines(P1, V1, P2, V2):\n Vx = np.cross(V1, V2)\n s = np.dot(np.cross(P2 - P1, V1), Vx)/np.dot(Vx, Vx)\n return s", "def intersectionOfTwoLines(p1, v1, p2, v2):\n # if we transform multiple points in one go\n if len(v1.shape) == 2:\n a1 = np.einsum('ij,ij->i', v1, v1)\n a2 = np.einsum('ij,ij->i', v1, v2)\n b1 = -np.einsum('ij,ij->i', v2, v1)\n b2 = -np.einsum('ij,ij->i', v2, v2)\n c1 = -np.einsum('ij,j->i', v1, p1 - p2)\n c2 = -np.einsum('ij,j->i', v2, p1 - p2)\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]).transpose(2, 0, 1), np.array([c1, c2]).T)\n res = res[:, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)\n else: # or just one point\n a1 = np.dot(v1, v1)\n a2 = np.dot(v1, v2)\n b1 = -np.dot(v2, v1)\n b2 = -np.dot(v2, v2)\n c1 = -np.dot(v1, p1 - p2)\n c2 = -np.dot(v2, p1 - p2)\n try:\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]), np.array([c1, c2]))\n except np.linalg.LinAlgError:\n return np.ones(3)*np.nan\n res = res[None, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)[0]", "def segmentsIntersect(self, other, allowProjInt = False):\n \n \"\"\"\n If we are not allowing projected intersection and the bounding boxes\n do not intersect then return -3, None.\n \"\"\"\n if(not(allowProjInt) and not(self.doBoundingBoxesIntersect(other))): return -3, None #return if bounding boxes do not intersect\n \"\"\" A special case for colinear lines. \"\"\" \n if(self.areColinear(other)):\n \"\"\"\n First place all four endpoint into a set. This will elliminate shared\n end points. Next, convert the set back into a list so it can\n finally be sorted.\n \"\"\"\n pointList = sorted(list(set([self.start, self.end, other.start, other.end])), key=self.calcT) \n if len(pointList) == 3:\n \"\"\"\n if there are only three points in the list then return 2, the\n middle point in the list since it is the shared point of the\n two lines.\n \"\"\"\n return 2, pointList[1] #if they are colinear and two ends have the same point return that point\n elif len(pointList) == 2:\n \"\"\" If the two lines have the same endpoints. \"\"\"\n return 2.5, self.getMidPoint()\n else:\n \"\"\"\n If the length was not three then we know it is length 4 in which case\n we turn the two middle points into a line and return 3, the line's\n midpoint.\n \"\"\"\n tempLine = Line(pointList[1], pointList[2])\n return 3, tempLine.getMidPoint() #If they are colinear return half way inbetween middle two points\n \"\"\"\n To calculate the intersection of two points we put the lines into the\n form P+tr and Q+us where P and Q are the starting points of the lines\n r and s are vectors form the starting point to the end point, and\n t and u are scalars. Set the two equations equal to each other and \n then solve for t and u. If t and u are in the range [0-1] then the\n intersection point lines on the lines, else it is a projected point.\n \"\"\"\n r = np.subtract(self.end.get2DPoint(), self.start.get2DPoint())\n s = np.subtract(other.end.get2DPoint(), other.start.get2DPoint())\n Q_Less_P = np.subtract(other.start.get2DPoint(), self.start.get2DPoint())\n denom = np.cross(r, s)*1.0\n t = np.cross(Q_Less_P, s)/denom\n u = np.cross(Q_Less_P, r)/denom \n point = p.Point(self.start.x + r[c.X]*t, self.start.y+r[c.Y]*t) \n #If t or u are not in the range 0-1 then the intersection is projected\n if(t > 1 or u > 1 or t < 0 or u < 0):\n \"\"\"\n Due to floating point problems sometimes if t or u is outside the 0-1\n range we end up inside this if statement but are actually at the end\n of one of the lines. I can't figure out how to properly add in a tolerance\n so we are taking the four end points putting them into a list,\n then comparing them to the calculated point. The Point module is\n properly handling tolerances so if the point == any of the end\n points then we should not return a projected point.\n \"\"\"\n if not any(point == lineEnd for lineEnd in (self.start, self.end,\n other.start, other.end)):\n return -1, point #return for projected intersection of non-colinear lines\n return 1, point #lines intersect at given point", "def get_shape_line_intersections(cls, shape, line):\n shape_inter = IntCurvesFace_ShapeIntersector()\n shape_inter.Load(shape, 1e-3)\n shape_inter.PerformNearest(line, float(\"-inf\"), float(\"+inf\"))\n with assert_isdone(shape_inter, \"failed to computer shape / line intersection\"):\n intersections = [(shape_inter.Pnt(i), shape_inter.Face(i), line) for i in\n range(1, shape_inter.NbPnt() + 1)] # Indices start at 1 :(\n return intersections", "def get_intersection(self, particle): \n\t\tline_string_coord = particle.line_coordinates()\n\t\ttrajectory = LineString(line_string_coord)\n\t\tintersection = self.line.intersection(trajectory)\n\t\treturn intersection", "def intersect_ext(self, line):\n res, p, v = self.intersect(line)\n v0 = self.p0 - self.c\n v1 = p - self.c\n u = self.signed_angle(v0, v1) / self.da\n return res and u > 0 and v > 0 and u < 1 and v < 1, p, u, v", "def get_intersect(a1, a2, b1, b2):\n s = np.vstack((a1, a2, b1, b2)) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return None\n return np.array([x / z, y / z])", "def get_intersect(a1, a2, b1, b2):\r\n s = np.vstack([a1,a2,b1,b2]) # s for stacked\r\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\r\n l1 = np.cross(h[0], h[1]) # get first line\r\n l2 = np.cross(h[2], h[3]) # get second line\r\n x, y, z = np.cross(l1, l2) # point of intersection\r\n if z == 0: # lines are parallel\r\n return (float('inf'), float('inf'))\r\n return (x/z, y/z)", "def get_intersection_point(l1, l2):\n m, b = l1\n n, c = l2\n # Find when mx + b = nx + c\n # mx - nx = c - b\n # And...\n x = (c-b) / (m-n)\n # Then plug back in\n y = m*x + b\n return (x, y)", "def intersection(self, segment):\n p0, p1 = segment.p0, segment.p1\n\n # x = t*(p1 - p0) + p0\n # n'*(x - origin) = 0\n # combine to get\n # n'*(t*(p1-p0) + p0 - origin) = 0\n # solve for t\n\n v = p1 - p0\n w = p0 - self.origin\n t = -np.dot(self.normal, w)/np.dot(self.normal, v)\n\n if 0-epsilon <= t <= 1+epsilon:\n return t*(p1-p0) + p0\n else:\n return None", "def line_sphere_intersection(p1, p2, c, r):\n\t# FILL in your code here\n\n\tline_vector=np.subtract(p2,p1) #np.array([p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2] ])\n\tval=np.sqrt(np.sum([(p2 - p1)**2\n\t\t\t\t\t\t for p1, p2 in zip(p1,p2)]))\n\n\tif val==0:\n\t\tunit_vector=np.array([0,0,0])\n\telse:\n\t\tunit_vector=[linevec/val for linevec in line_vector]\n\tvecO_C=np.subtract(p1,c)\n\t\t\n\tres=np.dot(unit_vector,vecO_C)* np.dot(unit_vector,vecO_C) - ( np.dot(vecO_C, vecO_C) - r*r )\n\treturn res", "def find_line_intersection(self, point, vector, Ns=50):\n point = np.asarray(point, dtype=float)\n vector = np.asarray(vector, dtype=float)\n if point.size == 3:\n point = np.array([point[0], point[2]])\n if vector.size == 3:\n vector = np.array([vector[0], vector[2]])\n normal = np.array([-vector[1], vector[0]])\n normal /= norm(normal)\n with self.fix_evaluator():\n def f(t):\n t = clip(t, 0, np.pi)\n rel_vec = self(t) - point\n return normal.dot(rel_vec)\n f0 = f(0)\n if f0 == 0.0:\n return 0.0\n step = np.pi/Ns\n a = 0\n while f(a+step)*f0 > 0:\n if a == np.pi:\n raise RuntimeError(\"Line seems to not intersect curve.\")\n a = min(np.pi, a+step)\n return brentq(f, a=a, b=a+step)", "def get_intersect(a1, a2, b1, b2):\n s = np.vstack([a1, a2, b1, b2]) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return float('inf'), float('inf')\n return x / z, y / z", "def compute_x_intersection(y, x1, x2, y1, y2):\n delta_y = y2 - y1\n if delta_y == 0:\n return x1\n return ((y - y1) * (x2 - x1) / delta_y) + x1", "def is_intersection_line_line(ab, cd, epsilon=1e-6):\n a, b = ab\n c, d = cd\n\n line_vector_1 = normalize_vector(vector_from_points(a, b))\n line_vector_2 = normalize_vector(vector_from_points(c, d))\n # check for parallel lines\n print(abs(dot_vectors(line_vector_1, line_vector_2)))\n if abs(dot_vectors(line_vector_1, line_vector_2)) > 1.0 - epsilon:\n return False\n # check for intersection\n d_vector = cross_vectors(line_vector_1, line_vector_2)\n if dot_vectors(d_vector, subtract_vectors(c, a)) == 0:\n return True\n return False", "def test_intersect_line_in_no_point(start, end):\n circle = ConstructionCircle((0, 0), 1.0)\n assert len(circle.intersect_line(ConstructionLine(start, end))) == 0", "def intersect(self, other):\n if isinstance(other, Arc):\n return other.intersect(self)\n elif not isinstance(other, LineSegment):\n raise TypeError(other)\n S = (self.p2 - self.p1).scale(1.)\n T = (other.p2 - other.p1).scale(1.)\n denom = S.y * T.x - S.x * T.y\n if nearly_zero(denom):\n if nearly_zero(S.cross(other.p1 - self.p1)):\n q1 = (other.p1 - self.p1) * S / (S * S)\n q2 = (other.p2 - self.p1) * S / (S * S)\n if q2 < q1:\n q1, q2 = q2, q1\n left, right = max(0, q1), min(1, q2)\n if left < right:\n return LineSegment(self.p1 + left * S, self.p1 + right * S)\n return None\n a = (T.x * (other.p1.y - self.p1.y) - T.y * (other.p1.x - self.p1.x)) / denom\n b = (S.x * (other.p1.y - self.p1.y) - S.y * (other.p1.x - self.p1.x)) / denom\n if 0 <= a <= 1 and 0 <= b <= 1:\n return self.p1 + a * S\n # else return None because we don't intersect", "def get_intersection(self, l, max_y=None):\n\n # Get the points\n i, j = self.breakpoint\n\n # Initialize the resulting point\n result = Coordinate()\n p: Coordinate = i\n\n # First we replace some stuff to make it easier\n a = i.xd\n b = i.yd\n c = j.xd\n d = j.yd\n u = 2 * (b - l)\n v = 2 * (d - l)\n\n # Handle the case where the two points have the same y-coordinate (breakpoint is in the middle)\n if i.yd == j.yd:\n result.xd = (i.xd + j.xd) / 2\n\n if j.xd < i.xd:\n result.yd = max_y or float('inf')\n return result\n\n # Handle cases where one point's y-coordinate is the same as the sweep line\n elif i.yd == l:\n result.xd = i.xd\n p = j\n elif j.yd == l:\n result.xd = j.xd\n else:\n # We now need to solve for x\n # 1/u * (x**2 - 2*a*x + a**2 + b**2 - l**2) = 1/v * (x**2 - 2*c*x + c**2 + d**2 - l**2)\n # Then we let Wolfram alpha do the heavy work for us, and we put it here in the code :D\n x = -(Decimal.sqrt(\n v * (a ** 2 * u - 2 * a * c * u + b ** 2 * (u - v) + c ** 2 * u) + d ** 2 * u * (v - u) + l ** 2 * (\n u - v) ** 2) + a * v - c * u) / (u - v)\n result.xd = x\n\n # We have to re-evaluate this, since the point might have been changed\n a = p.xd\n b = p.yd\n x = result.xd\n u = 2 * (b - l)\n\n # Handle degenerate case where parabolas don't intersect\n if u == 0:\n result.yd = float(\"inf\")\n return result\n\n # And we put everything back in y\n result.yd = 1 / u * (x ** 2 - 2 * a * x + a ** 2 + b ** 2 - l ** 2)\n return result", "def crossLine(self, other):\n if self.parallel(other): return None\n line = self.getLine()\n point = other.crossLine(line)\n if point is not None:\n if point in self and point in other:\n return point", "def crossSegment(self, other, e=1e-14, **kwargs):\n # Determine the point of intersection between the line of the given segment ang the line\n line = other.getLine()\n point = self.crossLine(line)\n if point is None:\n return None\n x, y = point\n # Determine if the point of intersection belongs to both the segment and the line\n if other.xmin - e <= point.x <= other.xmax + e and other.ymin - e <= y <= other.ymax + e:\n return Point(x, y, **kwargs)\n # By default if nothing is returned the function returns None", "def line_equation(x1, y1, x2, y2):\n \n a = y2 - y1\n b = x1 - x2\n c = x2*y1 - x1*y2\n return a, b, c", "def lineLineIntersectXY(l1,l2,inside=True,params=False):\n\n x1=l1[0][0]\n y1=l1[0][1]\n z1=l1[0][2]\n \n x2=l1[1][0]\n y2=l1[1][1]\n z2=l1[1][2]\n\n x3=l2[0][0]\n y3=l2[0][1]\n z3=l2[0][2]\n \n x4=l2[1][0]\n y4=l2[1][1]\n z4=l2[1][2]\n\n ## check for x,y planar consistency\n if abs(z2-z1) > epsilon or abs(z3-z1) > epsilon or abs(z4-z1) > epsilon:\n raise ValueError('lines not in same x-y plane')\n\n ## do lines intersect anywhere?\n denom=(x1-x2)*(y3-y4)-(y1-y2)*(x3-x4)\n if denom*denom < epsilon:\n return False\n\n ## the lines do intersect, so let's see if they intersect\n ## inside both line segments\n t = ((x1-x3)*(y3-y4) - (y1-y3)*(x3-x4))/denom\n u = -1 * ((x1-x2)*(y1-y3) - (y1-y2)*(x1-x3))/denom\n\n ## return the paramater space intersection\n if params:\n return [t,u]\n \n ## do we care about falling inside the line segments? if so,\n ## check that the intersection falls within\n if inside and ( t < 0.0 or t > 1.0 or u < 0.0 or u > 1.0):\n return False\n\n return [x1 + t*(x2-x1), y1+t*(y2-y1), z1, 1.0]", "def intersection(x, y, f, p):", "def is_on_line(point_a, point_b, point_c):\r\n return (point_b[0] - point_a[0]) * (point_c[1] - point_a[1]) - (point_b[1] - point_a[1]) * (point_c[0] - point_a[0])", "def crossLine(self, other):\n a, b = self.point\n c, d = other.point\n m, n = self.vector\n o, p = other.vector\n if n * o == m * p: # The lines are parallels\n return None\n elif self.angle == -math.pi / 2:\n return Point(a, d)\n elif other.angle == -math.pi / 2:\n return Point(b, c)\n else:\n x = (a * n * o - b * m * o - c * m * p + d * m * o) / (n * o - m * p)\n y = (x - a) * n / m + b\n return Point(x, y)", "def intersect_segment(self, p1, p2):\n p1 = base.getvector(p1)\n if len(p1) == 2:\n p1 = np.r_[p1, 1]\n p2 = base.getvector(p2)\n if len(p2) == 2:\n p2 = np.r_[p2, 1]\n \n\n z1 = self.line * p1\n z2 = self.line * p2\n\n if np.sign(z1) != np.sign(z2):\n return True\n if self.contains(p1) or self.contains(p2):\n return True\n return False", "def intersection(self, pn1, pn2, h):\n #print \"intersectionection:\", pn1, pn2, h\n #print \"z: \", (pn2[0]-pn1[0])/(pn2[1]-pn1[1])*(h-pn1[1])+pn1[0]\n return (pn2[0]-pn1[0])/(pn2[1]-pn1[1])*(h-pn1[1])+pn1[0], h", "def vlinecomp(self):\n m_h, c_h = self.fitline(0,2) # Computes the equation for a line joining the points on the outside of the gear on opposites sides of the edm cut\n\n m_v_avg = self.average_grad() # Computes the average gradient of the constructed vertical line\n\n m_v_avg, c_v = self.line_through_point(m_v_avg,4) # Equation of line with average gradient though crack start point\n\n x_intersect,y_intersect = self.intersect_point(m_h, c_h, m_v_avg, c_v)\n\n coord_top = [x_intersect,y_intersect]\n coord_bot = [self.points[4, 0], self.points[4, 1]]\n\n distance = self.distance(coord_bot,coord_top)\n\n return coord_top, coord_bot, distance", "def linesegment_plane_intersection(self, p0,p1,point,normal): # only returns lines...intersections through the segment end points are ignored\n\t\tp0dot=numpy.dot(p0-point,normal)\n\t\tp1dot=numpy.dot(p1-point,normal)\n\t\tif (p0dot>0 and p1dot<0) or (p0dot<0 and p1dot>0): \n\t\t\t# if the dot products have opposing signs, then the line intersects the plane\n\t\t\treturn True,p0+(p1-p0)*abs(p0dot)/(abs(p0dot)+abs(p1dot))\n\t\telse:\n\t\t\treturn False", "def _lines_intersect(self, line1, line2):\n return self._lines_overlap_on_x_axis(line1, line2) and self._lines_overlap_on_y_axis(line1, line2)", "def intersects(self, other_line):\n intpt= self.intersection(other_line)\n return bool(intpt)", "def intersection_line_triangle(line, triangle, epsilon=1e-6):\n a, b, c = triangle\n v1 = subtract_vectors(line[1], line[0])\n p1 = line[0]\n # Find vectors for two edges sharing V1\n e1 = subtract_vectors(b, a)\n e2 = subtract_vectors(c, a)\n # Begin calculating determinant - also used to calculate u parameter\n p = cross_vectors(v1, e2)\n # if determinant is near zero, ray lies in plane of triangle\n det = dot_vectors(e1, p)\n # NOT CULLING\n if(det > - epsilon and det < epsilon):\n return None\n inv_det = 1.0 / det\n # calculate distance from V1 to ray origin\n t = subtract_vectors(p1, a)\n # Calculate u parameter and make_blocks bound\n u = dot_vectors(t, p) * inv_det\n # The intersection lies outside of the triangle\n if(u < 0.0 or u > 1.0):\n return None\n # Prepare to make_blocks v parameter\n q = cross_vectors(t, e1)\n # Calculate V parameter and make_blocks bound\n v = dot_vectors(v1, q) * inv_det\n # The intersection lies outside of the triangle\n if(v < 0.0 or u + v > 1.0):\n return None\n t = dot_vectors(e2, q) * inv_det\n if t > epsilon:\n return add_vectors(p1, scale_vector(v1, t))\n # No hit\n return None", "def intersection(self, segment):\n intersection = self.hyperplane.intersection(segment)\n if intersection is not None and np.linalg.norm(intersection - self.closest_point_to(intersection)) < epsilon:\n return intersection\n\n return None", "def lines_intersect_2d(line1_pt1, line1_pt2, line2_pt1, line2_pt2):\r\n return geometry.gmLinesIntersect(line1_pt1, line1_pt2, line2_pt1, line2_pt2)", "def inside_or_on_line_2d(p1, p2, reference_point, pt, tol=None):\r\n if tol is None:\r\n tol = get_tol_2d()\r\n return geometry.gmInsideOrOnLineWithTol(p1, p2, reference_point, pt, tol)", "def get_line_intersects_line(self) -> List[List[Line]]:\n intersections = []\n\n for line_bin in self.line_bins.values():\n for connection_pair in itertools.combinations(line_bin, 2):\n line_segments = (\n connection_pair[0].line_segments + connection_pair[1].line_segments\n )\n\n for segment_pair in itertools.combinations(line_segments, 2):\n if check_cross(segment_pair[0], segment_pair[1]):\n intersections.append(connection_pair)\n # for line_bin in self.line_bins.values():\n # segments = []\n # line_idx_map = []\n # for line_1, line_2 in itertools.combinations(line_bin, 2):\n # for segment in line_1.line_segments:\n # if segment[0] != segment[1]:\n # line_idx_map.append(line_1)\n # segments.append(((segment[0].x, segment[0].y), (segment[1].x, segment[1].y)))\n # for segment in line_2.line_segments:\n # if segment[0] != segment[1]:\n # line_idx_map.append(line_2)\n # segments.append(((segment[0].x, segment[0].y), (segment[1].x, segment[1].y)))\n #\n # for collision_point in segments_intersections(segments).values():\n # for intersection in collision_point:\n # intersections.append([line_idx_map[i] for i in intersection])\n return intersections", "def intersects(connection, blocker):\n # this function solves two bounded lines for the point of intersection.\n # if (x,y) is in the domain of both of the lines this function return true.\n cslope = float(connection[0][1] - connection[1][1]) / (connection[0][0] - connection[1][0])\n bslope = float(blocker[0][1] - blocker[1][1]) / (blocker[0][0] - blocker[1][0])\n if cslope != bslope: # check for parallelism.\n dm = float(cslope - bslope)\n cintercept = float(connection[0][1] - cslope * connection[0][0])\n bintercept = float(blocker[0][1] - bslope * blocker[0][0])\n db = float(cintercept - bintercept)\n ix = -db/dm # solving for x\n iy = cslope*ix + cintercept # solving for y.\n # now we have the point of interception but is it on the domain\n # of **both** lines?\n cdomain = sorted([connection[0][0], connection[1][0]])\n bdomain = sorted([blocker[0][0], blocker[1][0]])\n if cdomain[0] < ix and cdomain[1] > ix and bdomain[0] < ix and bdomain[1] > ix:\n # the point of intersection is on the domain of both lines.\n return True\n # slopes are equal, or the point of intersection is not on the domain\n # of both lines.\n return False", "def _distance_to_line(begin, end, point):\n return _vec_distance(point, _nearest_point_on_line(begin, end, point))", "def intersection(self, other): # -> BaseGeometry:\n ...", "def intersection(self, other):\n return self._geomgen(capi.geom_intersection, other)", "def get_line_to(self, provided_point):\n\n \"\"\"Calculate slope\"\"\"\n a = (provided_point.y - self.y) / (provided_point.x - self.x)\n\n \"\"\"Calculate b\"\"\"\n b = self.y - a * self.x\n\n return (a,b)", "def _distance(point, line_point1, line_point2):\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance", "def intersectionOnBaseline( thisLayer ):\n\tgoodMeasure = 1\n\n\toriginX = thisLayer.bounds.origin.x - goodMeasure\n\toriginPoint = NSPoint( originX, 0.0 )\n\ttargetX = originX + thisLayer.bounds.size.width + goodMeasure\n\ttargetPoint = NSPoint( targetX, 0.0 )\n\t\n\tlistOfIntersections = sliceIntersections( thisLayer, originPoint, targetPoint )\n\t\n\tprint(\"intersectionOnBaseline:\", listOfIntersections, originPoint, targetPoint)\n\tif listOfIntersections:\n\t\trightmostIntersection = listOfIntersections[-2].pointValue()\n\t\treturn rightmostIntersection\n\telse:\n\t\treturn None", "def crossLine(self, other):\n ml = self.getLine(correct=False)\n point = ml.crossLine(other)\n if point:\n if (point in self) and (point in other):\n return point", "def line_equation_ap(angle, (x1, y1)):\n \n # get second point on the line\n x2 = float(x1) + cos(angle)\n y2 = float(y1) + sin(angle)\n \n # return A, B and C coefficients\n return (y1 - y2, x2 - x1, x1*y2 - x2*y1)", "def intersect_plane(self, other: Plane, **kwargs) -> Line:\n if self.normal.is_parallel(other.normal, **kwargs):\n raise ValueError(\"The planes must not be parallel.\")\n\n array_normals_stacked = np.vstack((self.normal, other.normal))\n\n # Construct a matrix for a linear system.\n array_00 = 2 * np.eye(3)\n array_01 = array_normals_stacked.T\n array_10 = array_normals_stacked\n array_11 = np.zeros((2, 2))\n matrix = np.block([[array_00, array_01], [array_10, array_11]])\n\n dot_a = np.dot(self.point, self.normal)\n dot_b = np.dot(other.point, other.normal)\n array_y = np.array([0, 0, 0, dot_a, dot_b])\n\n # Solve the linear system.\n solution = np.linalg.solve(matrix, array_y)\n\n point_line = Point(solution[:3])\n direction_line = self.normal.cross(other.normal)\n\n return Line(point_line, direction_line)", "def lineBoxIntersection(w1, w2, b, xmin, ymin, xmax, ymax):\n \n point1 = None\n point2 = None\n if w2 == 0:\n x1a = -(w2*ymin + b)*1.0/w1\n x1b = -(w2*ymax + b)*1.0/w1\n \n point1 = (x1a, ymin)\n point2 = (x1b, ymax)\n else:\n x2a = -(w1*xmin + b)*1.0/w2\n x2b = -(w1*xmax + b)*1.0/w2\n \n if w1 == 0:\n point1 = (xmin, x2a)\n point2 = (xmax, x2b)\n else:\n\n x1a = -(w2*ymin + b)*1.0/w1\n x1b = -(w2*ymax + b)*1.0/w1\n # Point 1\n if x2a < ymin:\n if xmin <= x1a and x1a <= xmax:\n # Point 1 on bottom edge\n point1 = (x1a, ymin)\n elif x2a > ymax:\n if xmin <= x1b and x1b <= xmax:\n # Point 1 on top edge\n point1 = (x1b, ymax)\n else:\n # Point 1 on left edge\n point1 = (xmin, x2a)\n \n # Point 2\n if point1 is not None:\n if x2b < ymin:\n # Point 2 on bottom edge\n point2 = (x1a, ymin)\n elif x2b > ymax:\n # Point 2 on top edge\n point2 = (x1b, ymax)\n else:\n # Point 2 on right edge\n point2 = (xmax, x2b) \n return (point1, point2)", "def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)", "def intersection(L1, L2):\n D = L1[0] * L2[1] - L1[1] * L2[0]\n Dx = L1[2] * L2[1] - L1[1] * L2[2]\n Dy = L1[0] * L2[2] - L1[2] * L2[0]\n if D != 0:\n x = Dx / D\n y = Dy / D\n return x, y\n else:\n return False", "def project_point_to_line(point, line_start, line_end):\n line_magnitude = line_start.distance(line_end)\n \n u = ((point.x - line_start.x) * (line_end.x - line_start.x) +\n (point.y - line_start.y) * (line_end.y - line_start.y)) \\\n / (line_magnitude ** 2)\n\n # closest point does not fall within the line segment, \n # take the shorter distance to an endpoint\n if u < 0.00001 or u > 1:\n ix = point.distance(line_start)\n iy = point.distance(line_end)\n if ix > iy:\n return line_end\n else:\n return line_start\n else:\n ix = line_start.x + u * (line_end.x - line_start.x)\n iy = line_start.y + u * (line_end.y - line_start.y)\n return Point([ix, iy])", "def find_intersection(A, B, C, D):\n \n a1, b1, c1 = line_equation(A.x, A.y, B.x, B.y)\n a2, b2, c2 = line_equation(C.x, C.y, D.x, D.y)\n \n Y = - np.array([[c1],\n [c2]])\n M = np.array([[a1, b1],\n [a2, b2]])\n\n X = np.linalg.solve(M, Y)\n intersection = Coordinates(X[0], X[1])\n \n return intersection", "def get_line_to(self, point):\n\n b = ((self.x - point.x)*point.y - (self.y - point.y)*point.x)/(self.x - point.x)\n\n a = (self.y - point.y)/(self.x - point.x)\n\n return a, b", "def DistPoint2Line(point,line_point1, line_point2=np.array([0,0,0])):\n return np.linalg.norm(np.cross((point-line_point2),(point-line_point1)))/np.linalg.norm(line_point1 - line_point2)" ]
[ "0.7900334", "0.78217113", "0.7766068", "0.7715372", "0.76640767", "0.7580761", "0.7566446", "0.7503989", "0.7500533", "0.7499418", "0.74659604", "0.7449178", "0.74213684", "0.7360603", "0.734246", "0.7337199", "0.7291764", "0.7291017", "0.71756595", "0.7140106", "0.7113183", "0.71001303", "0.7089124", "0.70817584", "0.70784986", "0.70772004", "0.70533574", "0.70417684", "0.6999572", "0.69585997", "0.69283545", "0.69271576", "0.6896659", "0.6853066", "0.6821582", "0.6760024", "0.67476296", "0.67341727", "0.6733868", "0.6729351", "0.6723029", "0.6696059", "0.668778", "0.6685048", "0.6642115", "0.6640774", "0.6624141", "0.6609242", "0.6583588", "0.655167", "0.65505314", "0.6548308", "0.65347254", "0.6522243", "0.65208197", "0.65023327", "0.64958245", "0.6480969", "0.6454524", "0.6442412", "0.6426536", "0.641671", "0.6413847", "0.6413538", "0.6403689", "0.64030725", "0.63974655", "0.6394598", "0.63896924", "0.6381531", "0.6377065", "0.6370031", "0.6353973", "0.6326713", "0.632278", "0.6312347", "0.62958425", "0.6288065", "0.62840384", "0.6281395", "0.62675804", "0.6265877", "0.62632805", "0.6258351", "0.62478524", "0.6245829", "0.62292576", "0.6220621", "0.6220434", "0.6214616", "0.61978626", "0.6181066", "0.6177515", "0.61712795", "0.61578655", "0.61570096", "0.6153031", "0.6134175", "0.6103625", "0.6103544", "0.60927254" ]
0.0
-1
Call the Translation API hosted on the micropayments server.
def cli(text): #click.echo("Welcome to the Instant Translation Command Line Tool.\n") sel_url = server_url+'translate?text={0}&payout_address={1}' response = requests.get(url=sel_url.format(text, wallet.get_payout_address())) #click.echo("The following is the translation of the text you input.\n") click.echo(response.text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def machine_translation(request):\n log.debug(\"Get translation from machine translation service.\")\n\n try:\n text = request.GET['text']\n locale = request.GET['locale']\n check = request.GET['check']\n except MultiValueDictKeyError as e:\n log.error(str(e))\n return HttpResponse(\"error\")\n\n if hasattr(settings, 'MICROSOFT_TRANSLATOR_API_KEY'):\n api_key = settings.MICROSOFT_TRANSLATOR_API_KEY\n else:\n log.error(\"MICROSOFT_TRANSLATOR_API_KEY not set\")\n return HttpResponse(\"apikey\")\n\n obj = {}\n\n # On first run, check if target language supported\n if check == \"true\":\n supported = False\n languages = settings.MICROSOFT_TRANSLATOR_LOCALES\n\n if locale in languages:\n supported = True\n\n else:\n for lang in languages:\n if lang.startswith(locale.split(\"-\")[0]): # Neutral locales\n supported = True\n locale = lang\n break\n\n if not supported:\n log.debug(\"Locale not supported.\")\n return HttpResponse(\"not-supported\")\n\n obj['locale'] = locale\n\n url = \"http://api.microsofttranslator.com/V2/Http.svc/Translate\"\n payload = {\n \"appId\": api_key,\n \"text\": text,\n \"from\": \"en\",\n \"to\": locale,\n \"contentType\": \"text/html\",\n }\n\n try:\n r = requests.get(url, params=payload)\n log.debug(r.content)\n\n # Parse XML response\n root = ET.fromstring(r.content)\n translation = root.text\n obj['translation'] = translation\n\n return HttpResponse(json.dumps(obj), content_type='application/json')\n\n except Exception as e:\n log.error(e)\n return HttpResponse(\"error\")", "def translate(sentence,target,api_key):\n #translate without using googletrans wrapper library\n URL = \"https://translation.googleapis.com/language/translate/v2?target=\"+target+\"&key=\"+api_key+\"&q=\"+sentence\n # sending get request and saving the response as response object \n r = requests.get(url = URL)\n\n if r.status_code == 200:\n # extracting data in json format \n data = r.json()\n return data['data']['translations'][0]['translatedText']", "def post(self):\n if not request.json:\n return self._build_bad_json_response()\n\n service = TranslationService()\n success, response = service.execute(request.json)\n\n if not success:\n return self._build_bad_request_response(response)\n\n return response, 200", "def translations(self, **kwargs):\n\n path = self._get_movie_id_path('translations')\n resp = self._get_method(path, kwargs)\n return resp", "def translate():\n text = request.args.get('text')\n\n # Send a request to Google's Translate REST API using your API credentials defined above\n ans = service.translations().list(source='en', target='zh-CN', q=text).execute()\n\n # Return translated text back to user\n return ans['translations'][0]['translatedText']", "def translate(self, source: str, target: str, text: str) -> str:\n\n request_url: str = \"{0}?lang={1}-{2}&key={3}\".format(\n self.yandex_api_path, source, target, self.yandex_api_key\n )\n\n response: Response = requests.post(url=request_url, data={\"text\": text})\n\n translated_text: str = \"\"\n if response.status_code == 200:\n response_data: Dict = response.json()\n translated_text: str = response_data.get(\"text\")[0]\n return translated_text", "def microsoft_terminology(request):\n log.debug(\"Get translations from Microsoft Terminology Service.\")\n\n try:\n text = request.GET['text']\n locale = request.GET['locale']\n check = request.GET['check']\n except MultiValueDictKeyError as e:\n log.error(str(e))\n return HttpResponse(\"error\")\n\n obj = {}\n locale = locale.lower()\n url = 'http://api.terminology.microsoft.com/Terminology.svc?singleWsdl'\n client = Client(url)\n\n # On first run, check if target language supported\n if check == \"true\":\n supported = False\n languages = settings.MICROSOFT_TERMINOLOGY_LOCALES\n\n if locale in languages:\n supported = True\n\n elif \"-\" not in locale:\n temp = locale + \"-\" + locale # Try e.g. \"de-de\"\n if temp in languages:\n supported = True\n locale = temp\n\n else:\n for lang in languages:\n if lang.startswith(locale + \"-\"): # Try e.g. \"de-XY\"\n supported = True\n locale = lang\n break\n\n if not supported:\n log.debug(\"Locale not supported.\")\n return HttpResponse(\"not-supported\")\n\n obj['locale'] = locale\n\n sources = client.factory.create('ns0:TranslationSources')\n sources[\"TranslationSource\"] = ['Terms', 'UiStrings']\n\n payload = {\n 'text': text,\n 'from': 'en-US',\n 'to': locale,\n 'sources': sources,\n 'maxTranslations': 5\n }\n\n try:\n r = client.service.GetTranslations(**payload)\n translations = []\n\n if len(r) != 0:\n for translation in r.Match:\n translations.append({\n 'source': translation.OriginalText,\n 'target': translation.Translations[0][0].TranslatedText,\n 'quality': translation.ConfidenceLevel,\n })\n\n obj['translations'] = translations\n\n return HttpResponse(json.dumps(obj), content_type='application/json')\n\n except WebFault as e:\n log.error(e)\n return HttpResponse(\"error\")", "def test_translate(self):\n body = Question()\n response = self.client.open('/api/rtx/v1/translate',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response, \"Response body is : \" + response.data.decode('utf-8'))", "def translate(q, target, source=None):\n # Build the RESTful call.\n p = {\n 'key': config.TRANSLATE_API_KEY,\n 'target': target,\n 'q': urllib.quote_plus(q,\"'\")\n }\n url = TRANSLATE_BASE_URI + '?' + TRANSLATE_API_PARAMETERS % p\n\n if source:\n url = url + '&source=%s' % source\n\n try:\n response = urlfetch.fetch(url=url, method=urlfetch.GET)\n if response.status_code == 200:\n # Succesful API call.\n result = json.loads(response.content)\n # TODO: take into account any errors.\n if 'data' in result:\n return result['data']['translations'][0]['translatedText'] \n else:\n logging.error('query = %s , http_status_code = %s' % (url, response.status_code))\n return None\n except urlfetch.DownloadError:\n # Request timed out or failed.\n logging.error('There was an error translating the text')\n return None", "def get_translation ( self ):\n self.verify_post_data ( )\n\n text = request.json[ 'text' ]\n src_lang = request.json[ 'source_lang' ]\n target_lang = request.json[ 'target_lang' ]\n\n # if translation is available in cache, just fetch it from there. Otherwise use translation service.\n translated_text = self.get_set_translation_from_cache ( text, src_lang, target_lang )\n\n return jsonify ( {\"Translation\": translated_text} )", "def translate():\n\n # Logging the input payload\n json_payload = request.json\n my_word = json_payload['word']\n LOG.info(f\"Word to be translated: \\n{my_word}\")\n\n sql = f\"select * from translation.translator where origin='{my_word}';\"\n result = db.engine.execute(sql)\n result = result.fetchall()\n if len(result) > 0:\n LOG.info(f\"Results: \\n{result}\")\n json_result = [{column: value for column, value in rowproxy.items()}\n for rowproxy in result]\n json_result[0][\"translated_from\"] = \"translator_db\"\n else:\n json_result = dict()\n json_result[\"translated_from\"] = \"google_api\"\n translator = Translator()\n result = translator.translate(my_word)\n json_result[\"origin\"] = my_word\n json_result[\"origin_language\"] = result.src\n json_result[\"translation\"] = result.text\n json_result[\"translation_language\"] = result.dest\n sql_statement = f\"insert into translation.translator(origin, origin_language, translation, translation_language) values('{my_word}', '{json_result['origin_language']}','{json_result['translation']}', '{json_result['translation_language']}')\"\n result = db.engine.execute(sql_statement)\n\n db.session.commit()\n\n return jsonify({'result': json_result})", "async def translate(self, ctx: commands.Context, *, text: str):\n # Check for cooldown\n await self.check_cooldown(ctx)\n\n # Create new translation context and contact API\n context = contexts.create_translation_context(self.bot.config.data_path, text=text)\n async with ctx.typing():\n result = await utils.create_completion_result_from_context(self.bot.loop, context)\n await ctx.send(\"```\"+result[:1993]+\"```\")", "def transvision(request, repo, title):\n log.debug(\"Get Mozilla translations from Transvision service.\")\n\n try:\n text = request.GET['text']\n locale = request.GET['locale']\n except MultiValueDictKeyError as e:\n log.error(str(e))\n return HttpResponse(\"error\")\n\n src = \"en-US\"\n url = \"https://transvision.mozfr.org/api/v1/tm/%s/%s/\" \\\n \"%s/%s/?max_results=%s&min_quality=70\" % (repo, src, locale, text, 5)\n\n try:\n r = requests.get(url)\n\n if r.text != '[]':\n translations = r.json()\n\n return HttpResponse(json.dumps({\n 'translations': translations,\n 'title': title,\n }), content_type='application/json')\n\n else:\n return HttpResponse(\"no\")\n\n except Exception as e:\n log.error(e)\n return HttpResponse(\"error\")", "def translate_wrapper(atext):\n print(\"translating:\",atext)\n res=\"\"\n res=translate(atext,\"pl\",\"fr\")\n time.sleep(0.5)\n print(\"translation:\",res)\n return res", "def run_translate(self,\n provider: str,\n azure_subscription_key,\n azure_endpoint_url,\n azure_subscription_region) -> int:\n if provider == self.TRANSLATION_PROVIDER_OFFLINE:\n print(\"Using OfflineTranslator provider\")\n translator = OfflineTranslator()\n elif provider == self.TRANSLATION_PROVIDER_AZURE:\n print(\"Using MicrosoftAzureTranslator provider\")\n translator = MicrosoftAzureTranslator(subscription_key=azure_subscription_key,\n endpoint_url=azure_endpoint_url,\n subscription_region=azure_subscription_region)\n else:\n raise NotImplementedError\n\n print(\"Reading JSON corpus from\", self.GATHERED_JSON_FILENAME)\n corpus = ParallelCorpus.from_json(self.GATHERED_JSON_FILENAME)\n original_strings = list(corpus.orig_to_translation.keys())\n\n print(\"Translating...\")\n translated_strings = translator.translate_all(original_strings)\n\n new_corpus = ParallelCorpus()\n new_corpus.orig_to_translation = dict(zip(original_strings, translated_strings))\n new_corpus.to_json(self.GATHERED_JSON_FILENAME)\n print(\"Wrote\", self.GATHERED_JSON_FILENAME)\n return 0", "def test_translate(self):\n result = self.app.get('/translate?text=Something')\n self.assertEqual(result.status_code, 200)\n\n result = self.app.get('/translate')\n self.assertEqual(result.status_code, 500)", "def transcript(self, request, dispatch):\r\n if dispatch.startswith('translation'):\r\n\r\n language = dispatch.replace('translation', '').strip('/')\r\n\r\n if not language:\r\n log.info(\"Invalid /translation request: no language.\")\r\n return Response(status=400)\r\n\r\n if language not in ['en'] + self.transcripts.keys():\r\n log.info(\"Video: transcript facilities are not available for given language.\")\r\n return Response(status=404)\r\n\r\n if language != self.transcript_language:\r\n self.transcript_language = language\r\n\r\n try:\r\n transcript = self.translation(request.GET.get('videoId', None))\r\n except NotFoundError, ex:\r\n log.info(ex.message)\r\n # Try to return static URL redirection as last resort\r\n # if no translation is required\r\n return self.get_static_transcript(request)\r\n except (\r\n TranscriptException,\r\n UnicodeDecodeError,\r\n TranscriptsGenerationException\r\n ) as ex:\r\n log.info(ex.message)\r\n response = Response(status=404)\r\n else:\r\n response = Response(transcript, headerlist=[('Content-Language', language)])\r\n response.content_type = Transcript.mime_types['sjson']\r\n\r\n elif dispatch == 'download':\r\n try:\r\n transcript_content, transcript_filename, transcript_mime_type = self.get_transcript(self.transcript_download_format)\r\n except (NotFoundError, ValueError, KeyError, UnicodeDecodeError):\r\n log.debug(\"Video@download exception\")\r\n return Response(status=404)\r\n else:\r\n response = Response(\r\n transcript_content,\r\n headerlist=[\r\n ('Content-Disposition', 'attachment; filename=\"{}\"'.format(transcript_filename.encode('utf8'))),\r\n ('Content-Language', self.transcript_language),\r\n ]\r\n )\r\n response.content_type = transcript_mime_type\r\n\r\n elif dispatch == 'available_translations':\r\n available_translations = []\r\n if self.sub: # check if sjson exists for 'en'.\r\n try:\r\n Transcript.asset(self.location, self.sub, 'en')\r\n except NotFoundError:\r\n pass\r\n else:\r\n available_translations = ['en']\r\n for lang in self.transcripts:\r\n try:\r\n Transcript.asset(self.location, None, None, self.transcripts[lang])\r\n except NotFoundError:\r\n continue\r\n available_translations.append(lang)\r\n if available_translations:\r\n response = Response(json.dumps(available_translations))\r\n response.content_type = 'application/json'\r\n else:\r\n response = Response(status=404)\r\n else: # unknown dispatch\r\n log.debug(\"Dispatch is not allowed\")\r\n response = Response(status=404)\r\n\r\n return response", "def translate(self, language=None):", "def translate_many(text, lang, format='plain'):\n if format not in FORMATS:\n raise TypeError(\"The format should be one of %s, not '%s'\" % (FORMATS, format))\n\n params = {'lang': lang, 'text': text, 'format': format}\n r = requests.get('http://translate.yandex.net/api/v1/tr.json/translate', params=params)\n if not r.ok:\n raise ServiceFailure(r.status_code)\n code = r.json['code']\n if code == 200:\n return r.json['text']\n elif code == 413:\n raise TextTooLong\n elif code == 501:\n raise LanguageNotSupported(lang)\n else:\n raise TranslationError(code)", "def translate_text(query, source_lang_code, target_lang_code):\n\n try:\n translations = TRANSLATION_SERVICE.translations().list(\n source=source_lang_code,\n target=target_lang_code,\n q=query\n ).execute()\n translation = translations['translations'][0]\n if 'detectedSourceLanguage' in translation.keys():\n source_lang_code = translation['detectedSourceLanguage']\n resp = random.choice(_TRANSLATE_RESULT).format(\n text=translation['translatedText'],\n fromLang=language_code_dict[source_lang_code],\n toLang=language_code_dict[target_lang_code])\n except (HTTPError, URLError, HTTPException):\n resp = random.choice(_TRANSLATE_NETWORK_ERROR)\n except Exception:\n resp = random.choice(_TRANSLATE_ERROR)\n return resp", "def translate():\n pass", "def studio_transcript(self, request, dispatch):\r\n _ = self.runtime.service(self, \"i18n\").ugettext\r\n\r\n if dispatch.startswith('translation'):\r\n language = dispatch.replace('translation', '').strip('/')\r\n\r\n if not language:\r\n log.info(\"Invalid /translation request: no language.\")\r\n return Response(status=400)\r\n\r\n if request.method == 'POST':\r\n subtitles = request.POST['file']\r\n save_to_store(subtitles.file.read(), unicode(subtitles.filename), 'application/x-subrip', self.location)\r\n generate_sjson_for_all_speeds(self, unicode(subtitles.filename), {}, language)\r\n response = {'filename': unicode(subtitles.filename), 'status': 'Success'}\r\n return Response(json.dumps(response), status=201)\r\n\r\n elif request.method == 'GET':\r\n\r\n filename = request.GET.get('filename')\r\n if not filename:\r\n log.info(\"Invalid /translation request: no filename in request.GET\")\r\n return Response(status=400)\r\n\r\n content = Transcript.get_asset(self.location, filename).data\r\n response = Response(content, headerlist=[\r\n ('Content-Disposition', 'attachment; filename=\"{}\"'.format(filename.encode('utf8'))),\r\n ('Content-Language', language),\r\n ])\r\n response.content_type = Transcript.mime_types['srt']\r\n\r\n else: # unknown dispatch\r\n log.debug(\"Dispatch is not allowed\")\r\n response = Response(status=404)\r\n\r\n return response", "async def translate(self,ctx,lang=\"ja\",txt=None):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n await self.translater(ctx,lang,txt)", "async def translate_from(self, ctx, language: to_language, *, text: str): \n if len(text) >= 200:\n raise TextTooLongError(message=f\"This text is too long to be processed\")\n \n resp = await self.translator.translate(text, src=language or 'auto')\n await self._display(ctx, resp, text)", "async def translate_to(self, ctx, language: to_language, *, text: str):\n if len(text) >= 200:\n raise TextTooLongError(message=f\"This text is too long to be processed\")\n \n if language is None:\n raise LanguageNotFoundError(message=f\"Couldn't find the language : {language}\")\n \n resp = await self.translator.translate(text, dest=language)\n \n await self._display(ctx, resp, text)", "def amagama(request):\n log.debug(\"Get open source translations from amaGama service.\")\n\n try:\n text = request.GET['text']\n locale = request.GET['locale']\n except MultiValueDictKeyError as e:\n log.error(str(e))\n return HttpResponse(\"error\")\n\n try:\n text = urllib.quote(text.encode('utf-8'))\n except KeyError as e:\n log.error(str(e))\n return HttpResponse(\"error\")\n\n url = \"http://amagama.locamotion.org/tmserver\" \\\n \"/en/%s/unit/%s?max_candidates=%s\" \\\n % (locale, text, 5)\n\n try:\n r = requests.get(url)\n\n if r.text != '[]':\n translations = r.json()\n\n return HttpResponse(json.dumps({\n 'translations': translations\n }), content_type='application/json')\n\n else:\n return HttpResponse(\"no\")\n\n except Exception as e:\n log.error(e)\n return HttpResponse(\"error\")", "async def translate_modern(context, arguments):\n return await translate(context, arguments, \"modern\")", "def translate(self, to_lang: str = TARGET_LANG):\n if not self.language:\n self.detect_language()\n if not all([self.clean, self.language != to_lang]):\n return\n self.payload += '&source={}&target={}'.format(self.language, to_lang)\n resp = requests.request('POST', self.url_translation, data=self.payload.encode('utf-8'),\n headers=self.translate_headers)\n try:\n self.translation = json.loads(resp.text)['data']['translations'][0]['translatedText']\n except KeyError:\n return", "def translate_text_google_cloud(target, text):\n if isinstance(text, six.binary_type):\n text = text.decode(\"utf-8\")\n\n # Text can also be a sequence of strings, in which case this method\n # will return a sequence of results for each text.\n result = translate_client.translate(text, target_language=target)\n result = html.unescape(result[\"translatedText\"])\n # print(u\"Text: {}\".format(result[\"input\"]))\n print(\"Translation: {}\".format(result))\n # print(u\"Detected source language: {}\".format(result[\"detectedSourceLanguage\"]))\n return result", "def test_translation_get_translate_document(self):\n file_name = \"test_en.html\"\n src_lang = \"en\"\n res_lang = \"de\"\n try:\n # Upload file to storage\n res = TestHelper.upload_file(file_name)\n self.assertEqual(res.Code, 200, \"Error upload file to server\")\n\n # Translate document\n res = self.api.translation_get_translate_document(file_name, src_lang, res_lang,\n folder=TestHelper.folder)\n self.assertTrue(isinstance(res, str), \"Error translate html document\")\n\n # Move to test folder\n TestHelper.move_file(str(res), TestHelper.test_dst)\n except ApiException as ex:\n print(\"Exception\")\n print(\"Info: \" + str(ex))\n raise ex", "def translate(atext,fromlang,tolang):\n rtext=\"\"\n \n base_link = \"http://translate.google.com/m?hl=%s&sl=%s&q=%s\"\n url = base_link % (tolang, fromlang, atext)\n print(url)\n headers={'Accept-Charset': 'utf-8'}\n r = requests.get(url,headers=headers)\n content = r.content.decode('utf-8')\n\n if r.status_code != 200:\n print(\"ERROR\")\n print(r.status_code)\n print(r.headers)\n print(\"content\",content)\n time.sleep(1)\n else:\n soup = bs4.BeautifulSoup(content,'html.parser')\n # print(soup) # div class=\"t0\"\n res=soup.find(\"div\",attrs={\"class\":\"t0\"})\n # print(\"res:\",res)\n print(\"res.text:\",res.text)\n rtext=res.text\n return rtext", "def ms_transliterate_word(logger, word, lang_code=None, script_code=None):\n if lang_code is None:\n lang_code = 'hi'\n if script_code is None:\n script_code = 'Deva'\n params = {\n 'api-version' : '3.0',\n 'language' : lang_code,\n 'fromScript' : script_code,\n 'toScript' : 'Latn'\n }\n body = [{\n 'text': word\n }]\n\n request = requests.post(MS_TRANS_URL, headers=MS_REQUEST_HEADERS, params=params, json=body)\n if request.status_code == 200:\n response = request.json()\n trans = response[0]['text']\n else:\n logger.info(f\"Transliterate error with {request.status_code} for {word}\")\n trans = None\n return trans", "def get_translation(self):", "def translate(self, text: str, src_lang: str, target_lang: str) -> str:\n result = self.__translate(text, src_lang, target_lang)\n obj_result = json.loads(result)\n\n list_sentence = [x[0] for x in obj_result[0][:-1]]\n\n return ''.join(list_sentence)", "async def translate_musical(context, arguments):\n return await translate(context, arguments, \"musical\")", "def send_translations(translation_dict):\n\tif \"__messages\" not in frappe.local.response:\n\t\tfrappe.local.response[\"__messages\"] = {}\n\n\tfrappe.local.response[\"__messages\"].update(translation_dict)", "def send_translations(translation_dict):\n\tif \"__messages\" not in frappe.local.response:\n\t\tfrappe.local.response[\"__messages\"] = {}\n\n\tfrappe.local.response[\"__messages\"].update(translation_dict)", "def translate_text(target: str, text: str) -> str:\n if isinstance(text, six.binary_type):\n text = text.decode(\"utf-8\")\n # Text can also be a sequence of strings, in which case this method\n # will return a sequence of results for each text.\n return translate_client.translate(text,\n target_language=target)[\"translatedText\"]", "def translate_speech(service_instance, audio_file=None, to_lang=\"fr-FR\", from_lang=\"en-US\"):\r\n # Specify to and from languages to use\r\n service_instance.speech_recognition_language = from_lang\r\n service_instance.add_target_language(to_lang)\r\n\r\n # Configure audio input\r\n # Use microphone as default input if no file was provided; else use a file\r\n if audio_file is None:\r\n audio_config = AudioConfig()\r\n else:\r\n audio_config = AudioConfig(filename=audio_file)\r\n\r\n # Create a translation recognizer and use it to translate speech input\r\n recognizer = TranslationRecognizer(service_instance, audio_config)\r\n result = recognizer.recognize_once()\r\n\r\n # Save the translated text and transcribed speech\r\n translation = \"\"\r\n speech_text = \"\"\r\n # Both were returned\r\n if result.reason == ResultReason.TranslatedSpeech:\r\n speech_text = result.text\r\n translation = result.translations[to_lang]\r\n # Only speech was returned\r\n elif result.reason == ResultReason.RecognizedSpeech:\r\n speech_text = result.text\r\n translation = \"Unable to translate speech\"\r\n # None were returned\r\n else:\r\n translation = \"Unknown\"\r\n speech_text = \"Unknown\"\r\n\r\n # Return the transcribed speech and translation\r\n return speech_text, translation", "async def public_get_languages_async(\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = PublicGetLanguages.create(\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def __translate(self, text, originalLanguage, translationLanguage):\n if self.__translatorRequest is None:\n from .TranslatorRequest import TranslatorRequest\n self.__translatorRequest = TranslatorRequest(self)\n \n self.__ensureTranslationEngineReady()\n if self.__translationEngine is None:\n return \"\", False\n else:\n result, ok = self.__translationEngine.getTranslation(\n self.__translatorRequest, text, originalLanguage,\n translationLanguage)\n \n return result, ok", "def test_simple_translation_using_get(self):\n pass", "def test_get_translation_resources(self):\n pass", "def GetTranslation(*args):\n return _gdi_.GetTranslation(*args)", "def translate(self, args):\n parse_args = args.partition(\" \")\n fromlang = parse_args[0].split(\"|\")[0]\n tolang = parse_args[0].split(\"|\")[1]\n text = args[1]\n\n open = urllib2.build_opener()\n open.addheaders = [(\"User-agent\", \"Mozilla/5.0 (X11; U; FreeBSD i686; en-US; rv:1.8.1.9) Gecko/20071025 Firefox/2.0.0.9\")]\n\n translate = open.open(\"http://translate.google.com/translate_t?\" + \n urlencode({\"sl\": fromlang, \"tl\": tolang}),\n data = urlencode({\"hl\": \"en\", \"ie\": \"UTF8\", \"text\": text.encode(\"utf-8\"),\n \"sl\": fromlang, \"tl\": tolang})\n )\n\n soup = BeautifulSoup(translate)\n return soup(\"span\", id=\"result_box\")", "def send_api_request(self, query):\n\n params = {\"v\": \"20170712\",\n \"query\": query,\n \"lang\": \"de\",\n \"sessionId\": self.session_id,\n \"timezone\": \"Europe/Berlin\"\n }\n headers = {\"Authorization\": self.bearer_token}\n\n response = requests.get(\n \"https://api.dialogflow.com/v1/query\",\n params=params, headers=headers)\n\n #for debugging\n #print(response.url)\n print(\"Dialogflow response: %s\" % response.content)\n\n return response.content", "def __pronounce(self, text, language):\n if not text or not language:\n return\n \n if self.__translatorRequest is None:\n from .TranslatorRequest import TranslatorRequest\n self.__translatorRequest = TranslatorRequest(self)\n \n if self.__mediaPlayer is None:\n self.__mediaPlayer = QMediaPlayer(self)\n self.__mediaPlayer.stateChanged.connect(\n self.__mediaPlayerStateChanged)\n \n if self.__mediaPlayer.state() == QMediaPlayer.PlayingState:\n return\n \n self.__ensureTranslationEngineReady()\n if self.__translationEngine is not None:\n if not self.__translationEngine.hasTTS():\n E5MessageBox.critical(\n self,\n self.tr(\"Translation Error\"),\n self.tr(\"The selected translation service does not\"\n \" support the Text-to-Speech function.\"))\n return\n \n data, ok = self.__translationEngine.getTextToSpeechData(\n self.__translatorRequest, text, language)\n if ok:\n self.__mediaFile = QTemporaryFile(self)\n self.__mediaFile.open()\n self.__mediaFile.setAutoRemove(False)\n self.__mediaFile.write(data)\n \n self.__mediaPlayer.setMedia(QMediaContent(), self.__mediaFile)\n self.__mediaPlayer.play()\n else:\n E5MessageBox.critical(\n self,\n self.tr(\"Translation Error\"),\n data)", "def one_translation_path(self, api_path):\n try:\n from jupyterlab_server.translation_utils import (\n get_language_pack,\n get_language_packs,\n )\n\n all_packs, _ = get_language_packs()\n packs = {\n locale: {\"data\": get_language_pack(locale)[0], \"message\": \"\"}\n for locale in all_packs.keys()\n }\n metadata = {\"data\": all_packs, \"message\": \"\"}\n except ImportError as err: # pragma: no cover\n self.log.warning(\n f\"[lite] [translation] `jupyterlab_server` was not importable, \"\n f\"cannot create translation data {err}\"\n )\n\n metadata = {\n \"data\": {\n \"en\": {\"displayName\": \"English\", \"nativeName\": \"English\"},\n },\n \"message\": \"\",\n }\n packs = {\"en\": {\"data\": {}, \"message\": \"Language pack 'en' not installed!\"}}\n\n # save the metadata about available packs\n api_path.parent.mkdir(parents=True, exist_ok=True)\n api_path.write_text(\n json.dumps(metadata, indent=2, sort_keys=True),\n encoding=\"utf-8\",\n )\n\n for locale, data in packs.items():\n language_pack_file = self.api_dir / f\"{locale}.json\"\n language_pack_file.write_text(\n json.dumps(data, indent=2, sort_keys=True),\n encoding=\"utf-8\",\n )\n self.maybe_timestamp(language_pack_file)", "def post_unbabel_translation(item_id):\n item = db.stories.find_one({\"id\": item_id})\n if item:\n response, objects = [], []\n for lang in [l[0] for l in UNBABEL_API_LANGUAGES if l[0] != 'en']:\n objects.append({\"text\": item.get('title'), \"target_language\": lang, \"text_format\": \"text\"})\n data = {\"objects\": objects}\n resp = do_request('PATCH', \"http://sandbox.unbabel.com/tapi/v2/translation/\", data=data,\n headers=UNBABEL_HEADERS)\n if resp.status_code == 202:\n data = resp.json()\n for object in data.get('objects', []):\n db.stories.update_one({\"_id\": item.get('_id')},\n {\"$set\": {\"unbabel_uid_{}\".format(lang): object.get('uid')}})\n response.append([resp.content, resp.json()])\n return response\n return \"Item not found {}\".format(item_id)", "def get_notifications_translated(self: object, *args, parameters: dict = None, **kwargs) -> dict:\n # [GET] https://assets.falcon.crowdstrike.com/support/api/swagger.html#/recon/GetNotificationsTranslatedV1\n return process_service_request(\n calling_object=self,\n endpoints=Endpoints,\n operation_id=\"GetNotificationsTranslatedV1\",\n keywords=kwargs,\n params=handle_single_argument(args, parameters, \"ids\")\n )", "def main():\n # Parse args\n parser = argparse.ArgumentParser(description=\"Open-source offline translation.\\n\")\n parser.add_argument(\n \"text\",\n nargs=\"?\",\n metavar=\"TEXT\",\n help=\"The text to translate. Read from standard input if missing.\",\n )\n parser.add_argument(\n \"--from-lang\",\n \"-f\",\n help=\"The code for the language to translate from (ISO 639-1)\",\n )\n parser.add_argument(\n \"--to-lang\", \"-t\", help=\"The code for the language to translate to (ISO 639-1)\"\n )\n args = parser.parse_args()\n\n from_and_to_lang_provided = args.from_lang is not None and args.to_lang is not None\n\n # Get text to translate\n if args.text:\n # argos-translate-cli --from-lang en --to-lang es \"Text to translate\"\n text_to_translate = args.text\n elif from_and_to_lang_provided:\n # echo \"Text to translate\" | argos-translate-cli --from-lang en --to-lang es\n text_to_translate = \"\".join(sys.stdin)\n else:\n # argos-translate\n parser.print_help()\n return\n\n # Perform translation\n if from_and_to_lang_provided:\n installed_languages = {\n lang.code: lang for lang in translate.load_installed_languages()\n }\n if args.from_lang not in installed_languages:\n parser.error(\"{!r} is not an installed language.\".format(args.from_lang))\n if args.to_lang not in installed_languages:\n parser.error(\"{!r} is not an installed language.\".format(args.to_lang))\n from_lang = installed_languages[args.from_lang]\n to_lang = installed_languages[args.to_lang]\n translation = from_lang.get_translation(to_lang)\n if translation is None:\n parser.error(\n f\"No translation installed from {args.from_name} to {args.to_name}\"\n )\n else:\n translation = translate.IdentityTranslation(\"\")\n\n # Print translation\n print(translation.translate(text_to_translate))", "def translate(lang):\n\n\tlangfilename = os.path.join(\"data\", \"translations\", lang + \".json\")\n\tif os.path.exists(langfilename):\n\t\twith open(langfilename, 'r') as langfile:\n\t\t\ttranslations = json.loads(langfile.read())\n\telse:\n\t\ttranslations = {}\n\n\twith open(os.path.join(\"data\", \"translations\", \"message_list.json\"), \"r\") as message_list_file:\n\t\tmessages = json.loads(message_list_file.read())\n\n\tcnt = 0\n\tfor m in messages:\n\t\tcnt += 1\n\t\t#if cnt > 15: break\n\t\tif not translations.get(m):\n\t\t\tprint 'translating: ' + m\n\t\t\tresponse = requests.get(\"\"\"https://www.googleapis.com/language/translate/v2\"\"\",\n\t\t\t\tparams = {\n\t\t\t\t\t\"key\": conf.google_api_key,\n\t\t\t\t\t\"source\": \"en\",\n\t\t\t\t\t\"target\": lang,\n\t\t\t\t\t\"q\": m\n\t\t\t\t}, verify=False)\n\n\t\t\tt = response.json[\"data\"][\"translations\"][0][\"translatedText\"] or m\n\t\t\ttranslations[m] = t.encode('utf-8')\n\n\t\t\twith open(langfilename, 'w') as langfile:\n\t\t\t\tlangfile.write(json.dumps(translations, indent=1, sort_keys=True))", "def get_language(language_id):\n\n api = (api_name, 'language')\n args_params = (str(language_id), )\n \n response = make_request(*args_params, api=api, action='get', **{})\n status_code = response.status_code\n content = response.text\n\n msg = str(status_code) + ' : ' + content\n \n if status_code >= 300:\n\n click.echo(\"response error message: %s \" % msg)\n raise click.Abort()\n \n\n logger.debug(\"response from spanglish get_language: {}\".format(response))\n logger.debug(\"response msg from spanglish get_language: {}\".format(msg))\n\n click.echo(\"response message: %s \" % msg)", "def fetchTranslation(self, language):\n pass", "def translate(gcf_request=None):\n\n # Flask Request object passed in for Cloud Functions\n # (use gcf_request for GCF but flask.request otherwise)\n local_request = gcf_request if gcf_request else request\n\n # reset all variables (GET)\n text = translated = None\n\n # form submission and if there is data to process (POST)\n if local_request.method == 'POST':\n text = local_request.form['text'].strip()\n if text:\n data = {\n 'contents': [text],\n 'parent': PARENT,\n 'target_language_code': TARGET[0],\n }\n # handle older call for backwards-compatibility\n try:\n rsp = TRANSLATE.translate_text(request=data)\n except TypeError:\n rsp = TRANSLATE.translate_text(**data)\n translated = rsp.translations[0].translated_text\n\n # create context & render template\n context = {\n 'orig': {'text': text, 'lc': SOURCE},\n 'trans': {'text': translated, 'lc': TARGET},\n }\n return render_template('index.html', **context)", "def translate(self, word, context=None, pos_tag=None):\n #Get contextual translation from google translate\n par = {\"text\": word, \"raw\": \"raw\"}\n r = requests.post(self.translation_url, data=par)\n results = r.text\n translated_word = get_from_html_text(results, 'TRANSLATED_TEXT')\n \n #Perform lookup in the text file from the C# translator\n #if there is no match, take the best match from the bing file\n# print \"Translated: \", word, \" ->\", translated_word\n return translated_word", "def api_call():\n\tresponse = requests.get(URL_API)\n\treturn response", "def translate(self):\n pass", "def translate_text(Text=None, TerminologyNames=None, SourceLanguageCode=None, TargetLanguageCode=None):\n pass", "def get_languages():\n\n api = (api_name, 'languages')\n\n response = make_request(api=api, action='get', **{})\n status_code = response.status_code\n content = response.text\n\n msg = str(status_code) + ' : ' + content\n \n logger.debug(\"response from spanglish languages: {}\".format(response))\n logger.debug(\"response statuscode from spanglish languages: {}\".format(status_code))\n\n click.echo(\"response message: %s \" % msg)", "def upload_messages_to_transifex(\n self, legalcode, pofile: polib.POFile = None\n ):\n language_code = legalcode.language_code\n resource_slug = legalcode.license.resource_slug\n resource_name = legalcode.license.resource_name\n pofilename = legalcode.translation_filename()\n\n resources = self.get_transifex_resources()\n resource_slugs = [item[\"slug\"] for item in resources]\n\n if pofile is None:\n pofile = legalcode.get_pofile()\n\n pofile_content = get_pofile_content(pofile)\n\n if resource_slug not in resource_slugs:\n if language_code != DEFAULT_LANGUAGE_CODE:\n raise ValueError(\n f\"The resource {resource_slug} does not yet exist in\"\n \" Transifex. Must upload English first to create it.\"\n )\n self.create_resource(\n resource_slug, resource_name, pofilename, pofile_content\n )\n elif language_code == DEFAULT_LANGUAGE_CODE:\n # We're doing English, which is the source language.\n self.update_source_messages(\n resource_slug, pofilename, pofile_content\n )\n else:\n self.update_translations(\n resource_slug, language_code, pofilename, pofile_content\n )", "def test_translation_get_translate_document_by_url(self):\n source_url = \"https://www.le.ac.uk/oerresources/bdra/html/page_02.htm\"\n src_lang = \"en\"\n res_lang = \"fr\"\n try:\n # Translate url\n res = self.api.translation_get_translate_document_by_url(source_url, src_lang, res_lang)\n self.assertTrue(isinstance(res, str), \"Error translate document from url\")\n\n # Move to test folder\n TestHelper.move_file(res, TestHelper.test_dst)\n except ApiException as ex:\n print(\"Exception\")\n print(\"Info: \" + str(ex))\n raise ex", "def cmd_lingvo(ensoapi, word_from_lang_to_lang = \"\"):\n \n translate_word(ensoapi, word_from_lang_to_lang)", "def get(self):\n \n global tw_margarita\n success = tw_margarita.ProcessRequest(self.request)\n self.response.out.write('Success: %s' % success)", "def hello_world(request):\n content_type = request.headers['content-type']\n print('content_type: {}'.format(content_type))\n\n request_json = request.get_json(silent=True)\n print('request_json: {}'.format(request_json))\n \n #if request.args and 'message' in request.args:\n # return request.args.get('message')\n #elif request_json and 'message' in request_json:\n # return request_json['message']\n #else:\n # return f'Hello World!'\n\n # Instantiates a client\n translate_client = translate.Client()\n # The text to translate\n text = u'Hello, world!'\n # The target language\n target = 'es'\n # Translates some text into Russian\n translation = translate_client.translate(\n text,\n target_language=target)\n print(u'Text: {}'.format(text))\n return u'Translation: {}'.format(translation['translatedText'])", "def generate(self, text):\n self.__params['text']=text\n self._data = requests.get(self.TTS_URL, params=self.__params,\n stream=False).iter_content()", "def test_parametrized_add_translation(self,\n translation,\n term_id,\n tags,\n status_code,\n message):\n translation_data = json.dumps(dict(\n translation=translation,\n term_id=term_id,\n tags=tags,\n date_created='2018-01-11T15:43:00',\n modified_date='2018-01-11T15:43:00'\n ))\n\n res = self.client.post(url_for('main_api.translations'),\n data=translation_data,\n headers={'Authorization': self.jwt_header()},\n content_type='application/json')\n\n assert res.status_code == status_code\n assert message.encode(encoding='UTF-8') in res.data\n if res.status_code == 201:\n # Test creation and modification dates are read-only\n date_created = res.json['translations'][1]['date_created']\n modified_date = res.json['translations'][1]['modified_date']\n assert date_created != '2018-01-11T15:43:00+00:00'\n assert modified_date != '2018-01-11T15:43:00+00:00'\n # Test translation author\n assert res.json['translations'][1]['author'] == 1", "def translate(data:object, **kwargs) -> object:\n\n return translator.TranslateVisitor(**kwargs).translate(data)", "def get(self, from_=None, to=None, limit=None, offset=None, checkids=None,\r\n contactids=None, status=None, via=None):\r\n\r\n params = base.get_params(None, locals(),\r\n translate_param=resource.translate_param)\r\n\r\n return http.Request('GET', self.get_url(), params), parsers.parse_json", "def get_message():\n ## Get the body of the text\n body = request.values.get('Body', None)\n print('Full message: ',body)\n ## Get the number of the sms\n senderNumber = request.values.get('From',None)\n ## call the translate function with the body of the text and get the translated text\n message, number = extractMessage(body)\n print('message stripped: ',message)\n print('number is: ',number)\n translated = translate(message)\n print('translated: ',translated)\n sendText(number, translated + ' from ' + senderNumber)\n ## respond with the translated text\n ##resp = twilio.twiml.Response()\n ##resp.message('Your message has been sent')\n ##return str(resp)\n return('Hello')", "def post(self):\n data = json.dumps(request.get_json())\n houseNumber = json.loads(data)['HouseNumber']\n street = json.loads(data)['Street']\n city = json.loads(data)['city']\n #address = '&housenumber='+houseNumber+'&street='+street+'&city='+city\n response = hereService.getLatLang(houseNumber, street, city)\n return response", "def _call_api(endpoint, query, data):\n headers = {\"Content-Type\": \"application/json\", }\n response = InnerTube._execute(f\"{endpoint}?{parse.urlencode(query)}\", \"POST\", headers=headers, data=data)\n\n try:\n resp = json.loads(response.read())\n except JSONDecodeError as e:\n log(f\"{__class__.__name__}: Parsing response error: {e}\")\n else:\n return resp", "def videos(self, **kwargs):\n\n path = self._get_movie_id_path('translations')\n resp = self._get_method(path, kwargs)\n return resp", "def Translate(ops: List[op.Operation]) -> List[str]:\n print(\"Translating\")\n\n # Multi threading\n pool = ProcessPoolExecutor()\n futures = []\n\n # Start Thread to translate\n for op in ops:\n futures.append(pool.submit(op.translate))\n\n # Put results in list\n wait(futures)\n translated: List[str] = []\n for future in futures:\n result = future.result()\n translated.extend(result)\n\n return translated", "def hit(self, endpoint, **params):\n params['api_key'] = self.api_key\n params['language'] = 'es-ES'\n url = self.base_url + endpoint\n response = requests.get(url, params=params)\n data = response.json()\n return data", "def __init__(self, text: tuple, content_type: str = CONTENT_TYPE, accept_encoding: str = ACCEPT_ENCODING,\n x_rapidapi_key: str = X_RAPID_API_KEY, x_rapidapi_host: str = X_RAPID_API_HOST,\n url_language: str = URL_LANGUAGE, url_translation: str = URL_TRANSLATION,\n sentiment_key: str = SENTIMENT_SUBSCRIPTION_KEY, url_sentiment: str = URL_SENTIMENT):\n self.text = text\n self.translate_headers = {'content-type': content_type, 'accept-encoding': accept_encoding,\n 'x-rapidapi-key': x_rapidapi_key, 'x-rapidapi-host': x_rapidapi_host}\n self.sentiment_headers = {'Ocp-Apim-Subscription-Key': sentiment_key}\n self.url_language = url_language\n self.url_translation = url_translation\n self.url_sentiment = url_sentiment\n self.clean = False\n self.language = None\n self.payload = None\n self.translation = None\n self.sentiment = None", "def polly_request_speech(intext: str, intlanguage: str):\n session = Session(profile_name=\"default\")\n polly = session.client(\"polly\")\n try:\n response = polly.synthesize_speech(Text=intext,LanguageCode = intlanguage,OutputFormat=\"mp3\",VoiceId=\"Joanna\")\n print(response)\n except (BotoCoreError, ClientError) as error:\n print(error)\n sys.exit(1)\n return response", "async def get_languages_async(\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetLanguages.create(\n namespace=namespace,\n )\n return await run_request_async(\n request, additional_headers=x_additional_headers, **kwargs\n )", "def translate_wrapper(sentence,target):\n translator = Translator()\n \n try:\n # translate the 'text' column\n response = translator.translate(sentence, dest=target)\n\n except Exception as e: # mean Google restrict IP address\n response = \"Probably Google has banned your client IP addres\"+str(e)\n return response\n \n return normalize_text(response.text)", "def translation(self, youtube_id):\r\n if youtube_id:\r\n # Youtube case:\r\n if self.transcript_language == 'en':\r\n return Transcript.asset(self.location, youtube_id).data\r\n\r\n youtube_ids = youtube_speed_dict(self)\r\n assert youtube_id in youtube_ids\r\n\r\n try:\r\n sjson_transcript = Transcript.asset(self.location, youtube_id, self.transcript_language).data\r\n except (NotFoundError):\r\n log.info(\"Can't find content in storage for %s transcript: generating.\", youtube_id)\r\n generate_sjson_for_all_speeds(\r\n self,\r\n self.transcripts[self.transcript_language],\r\n {speed: youtube_id for youtube_id, speed in youtube_ids.iteritems()},\r\n self.transcript_language\r\n )\r\n sjson_transcript = Transcript.asset(self.location, youtube_id, self.transcript_language).data\r\n\r\n return sjson_transcript\r\n else:\r\n # HTML5 case\r\n if self.transcript_language == 'en':\r\n return Transcript.asset(self.location, self.sub).data\r\n else:\r\n return get_or_create_sjson(self)", "def main(speech_files, output_directory, lexical = False, enable_proxy = False, *argv):\n speech_config = speechsdk.SpeechConfig(subscription = pa.stt_key, region = pa.stt_region)\n # If necessary, you can enable a proxy here: \n # set_proxy(hostname: str, port: str, username: str, password: str)\n if enable_proxy: \n speech_config.set_proxy(argv[0], argv[1], argv[2], argv[3])\n # Set speech service properties, requesting the detailed response format to make it compatible with lexical format, if wanted\n speech_config.set_service_property(name='format', value='detailed', channel=speechsdk.ServicePropertyChannel.UriQueryParameter)\n if pa.stt_endpoint != \"\": \n speech_config.endpoint_id = pa.stt_endpoint\n logging.info(f'[INFO] - Starting to transcribe {len(next(os.walk(speech_files))[2])} audio files')\n results = []\n filenames = []\n for index, audio in enumerate(glob.iglob(f'{speech_files}*av')):\n result, filename = request_endpoint(audio, speech_config, output_directory, lexical)\n results.append(result)\n filenames.append(filename)\n # Check the result\n return zip(filenames, results)", "async def translate(self, post_or_comment_id, is_post=False, is_comment=False, p_obj=None, community_id=None):\n post_check = False\n comment_check = False\n method_url = None\n if is_post:\n method_url = \"posts/\"\n if not p_obj:\n p_obj = self.get_post_by_id(post_or_comment_id)\n post_check = True\n elif is_comment:\n method_url = \"comments/\"\n if not p_obj:\n p_obj = self.get_comment_by_id(post_or_comment_id)\n comment_check = True\n if not community_id:\n if p_obj:\n if comment_check:\n if p_obj.post:\n community_id = p_obj.post.artist.community_id\n if post_check:\n if p_obj.artist:\n community_id = p_obj.artist.community_id\n else:\n return None\n url = self._api_communities_url + str(community_id) + \"/\" + method_url + str(\n post_or_comment_id) + \"/translate?languageCode=en\"\n async with self.web_session.get(url, headers=self._headers) as resp:\n if self.check_status(resp.status, url):\n data = await resp.json()\n return data.get('translation')", "def list_text_translation_jobs(Filter=None, NextToken=None, MaxResults=None):\n pass", "def tapi(self,method,argc,**kwargs):\n url = self.btce_trade_url + argc + '/'\n kwargs['nonce'] = str(int(time.time()))\n kwargs['method'] = argc\n body = urllib.urlencode(kwargs)\n sign = self.hash_tapi( body )\n headers = dict( Sign = sign, Key = self.trade_key )\n if method == 'POST':\n response = requests.post( url,\n data = body,\n headers = headers,\n )\n elif method == 'GET':\n response = requests.get( url,\n headers = headers,\n )\n return response.text", "def test_translation_smoke():\n english_to_morse = get_translator(\"english\", \"morse\")\n morse_to_english = get_translator(\"morse\", \"english\")\n morse = english_to_morse.translate(\"hello world\")\n english = morse_to_english.translate(morse)\n assert english == \"HELLO WORLD\"", "def translate(self, source_file):\n \n # Name of output file. The extension of output file is .translated\n if self.pos_tagging:\n output_file = os.path.splitext(source_file)[0] + '_pos.translated'\n else:\n output_file = os.path.splitext(source_file)[0] + '.translated'\n \n try:\n # Open output file for writing\n output_file = open(output_file, 'w')\n except:\n print('Cannot open file' + output_file + ' for writing', file=sys.stderr)\n sys.exit(1)\n \n source_lines = self.read_text_file(source_file)\n # Loop on source file line by line\n for source_line in source_lines:\n # Generate word tokens\n source_words = list(word_tokenize(source_line.strip()))\n # Preform POS tagging\n if self.pos_tagging:\n source_words = pos_tag(source_words)\n \n translated_words = []\n # Generate translated words\n for word in source_words:\n if self.model[word]:\n translated_word = max(self.model[word].items(), key=itemgetter(1))[0]\n translated_words.append(translated_word)\n \n # Remove POS tags\n if self.pos_tagging:\n translated_words = [word[0] for word in translated_words]\n \n # Convert words to sentences\n translated_sentence = self.words_to_sentence(translated_words)\n \n # Write translated sentence to the output file\n output_file.write(translated_sentence + '\\n')", "def on_pronounceTransButton_clicked(self):\n self.__pronounce(\n self.transEdit.toPlainText(), self.__translationLanguage())", "def auto_translate(phrases=phrases_to_translate, languages=LANGUAGES):\n driver = webdriver.Chrome()\n translator = GoogleTranslate(driver)\n translate_phrases_into_languages(translator, phrases, languages)\n\n driver.quit()", "def translate_phrases(translator, phrases, language):\n for phrase in phrases:\n translator.type_phrase_to_translate(phrase)\n sleep(0.5)\n translated_phrase = translator.read_translated_phrase()\n add_translation_to_file(language, translated_phrase)", "def get_transcription(url):\n\n # Checks the format of the URL\n if \"https://www.youtube.com/watch?v=\" in url:\n input_url_id = url.replace(\"https://www.youtube.com/watch?v=\", \"\")\n elif \"https://youtu.be/\" in url:\n input_url_id = url.replace(\"https://youtu.be/\", \"\")\n\n # Creates a blank list to iterate over\n text_parts = []\n\n # Gets a list of all available transcripts\n try:\n\n list_of_transcripts = YouTubeTranscriptApi.list_transcripts(input_url_id)\n print(\"Checking for Transcriptions...\")\n\n # Checks to see if a manual transcript is created if not, checks to see if a generated one is created\n if 'en-US' in list_of_transcripts._manually_created_transcripts:\n print(\"Manual Transcription Found.\")\n transcript = list_of_transcripts.find_manually_created_transcript(['en-US'])\n elif 'en' in list_of_transcripts._manually_created_transcripts:\n print(\"Manual Transcription Found.\")\n transcript = list_of_transcripts.find_manually_created_transcript(['en'])\n elif 'en' in list_of_transcripts._generated_transcripts:\n print(\"Auto-Generated Transcription Found.\")\n transcript = list_of_transcripts.find_generated_transcript(['en'])\n\n # Saves the transcript into a variable to iterate over\n raw_transcription = transcript.fetch()\n\n # Indexing of raw transcripts\n iteration_of_raw = 0\n\n # Iterates over each dictionary and extracts 'text' key then appends the blank text_parts list\n for i in raw_transcription:\n indexed_dictionary = raw_transcription[iteration_of_raw]\n text_from_dictionary = indexed_dictionary['text']\n text_parts.append(text_from_dictionary)\n iteration_of_raw += 1\n # Defines how we want each text element to be separated with\n separator_for_each_text = \" \"\n\n # Joins the separator with the text_parts\n clean_transcription = separator_for_each_text.join(text_parts)\n\n # Returns the cleaned transcripts\n return clean_transcription\n\n except:\n print(\"No Transcriptions Found\")\n clean_transcription = \"No Transcriptions Found\"\n return clean_transcription", "def make_rest_api_call(method, url,\r\n http_headers=None, timeout=None, proxy=None):\r\n LOGGER.info('%s %s', method, url)\r\n try:\r\n resp = requests.request(method, url,\r\n headers=http_headers,\r\n timeout=timeout,\r\n proxies=_proxies_dict(proxy))\r\n resp.raise_for_status()\r\n LOGGER.debug(resp.content)\r\n if url.endswith('.json'):\r\n return json.loads(resp.content)\r\n else:\r\n return resp.text\r\n except requests.HTTPError as ex:\r\n if url.endswith('.json'):\r\n content = json.loads(ex.response.content)\r\n raise SoftLayerAPIError(ex.response.status_code, content['error'])\r\n else:\r\n raise SoftLayerAPIError(ex.response.status_code, ex.response.text)\r\n except requests.RequestException as ex:\r\n raise TransportError(0, str(ex))", "def __init__(self):\n self.client = language.LanguageServiceClient()", "def translate(\n self,\n results: Optional[List[Dict[str, Any]]] = None,\n query: Optional[str] = None,\n documents: Optional[Union[List[Document], List[Answer], List[str], List[Dict[str, Any]]]] = None,\n dict_key: Optional[str] = None,\n ) -> Union[str, List[Document], List[Answer], List[str], List[Dict[str, Any]]]:\n queries_for_translator = None\n answers_for_translator = None\n if results is not None:\n queries_for_translator = [result[\"query\"] for result in results]\n answers_for_translator = [result[\"answers\"][0].answer for result in results]\n if not query and not documents and results is None:\n raise AttributeError(\"Translator needs a query or documents to perform translation.\")\n\n if query and documents:\n raise AttributeError(\"Translator needs either a query or documents but not both.\")\n\n if documents and len(documents) == 0:\n logger.warning(\"Empty documents list is passed\")\n return documents\n\n dict_key = dict_key or \"content\"\n\n if queries_for_translator is not None and answers_for_translator is not None:\n text_for_translator = queries_for_translator + answers_for_translator\n\n elif isinstance(documents, list):\n if isinstance(documents[0], Document):\n text_for_translator = [doc.content for doc in documents] # type: ignore\n elif isinstance(documents[0], Answer):\n text_for_translator = [answer.answer for answer in documents] # type: ignore\n elif isinstance(documents[0], str):\n text_for_translator = documents # type: ignore\n else:\n if not isinstance(documents[0].get(dict_key, None), str): # type: ignore\n raise AttributeError(f\"Dictionary should have {dict_key} key and it's value should be `str` type\")\n text_for_translator = [doc[dict_key] for doc in documents] # type: ignore\n else:\n text_for_translator: List[str] = [query] # type: ignore\n\n batch = self.tokenizer(\n text=text_for_translator,\n return_tensors=\"pt\",\n max_length=self.max_seq_len,\n padding=\"longest\",\n truncation=True,\n ).to(self.devices[0])\n\n generated_output = self.model.generate(**batch)\n translated_texts = self.tokenizer.batch_decode(\n generated_output, skip_special_tokens=True, clean_up_tokenization_spaces=self.clean_up_tokenization_spaces\n )\n\n if queries_for_translator is not None and answers_for_translator is not None:\n return translated_texts\n elif query:\n return translated_texts[0]\n elif documents:\n if isinstance(documents, list) and isinstance(documents[0], str):\n return [translated_text for translated_text in translated_texts]\n\n translated_documents: Union[\n List[Document], List[Answer], List[str], List[Dict[str, Any]]\n ] = [] # type: ignore\n for translated_text, doc in zip(translated_texts, documents):\n translated_document = deepcopy(doc)\n if isinstance(translated_document, Document):\n translated_document.content = translated_text\n elif isinstance(translated_document, Answer):\n translated_document.answer = translated_text\n else:\n translated_document[dict_key] = translated_text # type: ignore\n translated_documents.append(translated_document) # type: ignore\n\n return translated_documents\n\n raise AttributeError(\"Translator needs a query or documents to perform translation\")", "async def funslate(self,ctx,lang=\"ja\"):\r\n if await bMsg(ctx,ctx.message.author.name,client):\r\n return\r\n await self.translater(ctx,lang,wordsDict.generate())", "def decode_speech(request, *args, **kwargs):\n\n _ERROR_POST = {\"oper\": 0, \"args\": []}\n _CT_JSON = \"application/json\"\n logger = logging.getLogger('django.request')\n errorlog = logging.getLogger('django')\n try:\n logger.debug(\"POST: \" + json.dumps(request.POST))\n if request.method == 'POST':\n value = settings.SSA.processSentence(unquote(request.POST.get(\"text\")))\n oper = value[0]\n args = value[1:]\n if len(args) > 0:\n args[0] = quote(args[0])\n logger.debug(\"oper: \" + str(oper))\n response_data = {\"oper\": oper, \"args\": args}\n return HttpResponse(json.dumps(response_data), _CT_JSON)\n else:\n return HttpResponse(json.dumps(_ERROR_POST), _CT_JSON)\n except:\n errorlog.error(traceback.format_exc())\n return HttpResponse(json.dumps(_ERROR_POST), _CT_JSON)", "async def cebolate(self, mask, target, args):\n method = 'POST'\n url = 'http://cebolatol.julianofernandes.com.br/api/tlanslate'\n payload = {'message': ' '.join(args['<message>'])}\n headers = {'content-type': 'application/json'}\n\n async with aiohttp.ClientSession() as session:\n async with session.post(url, data=json.dumps(payload),\n headers=headers) as response:\n response = await reponse.json()\n\n if type(response) is dict:\n if 'phlase' in response:\n return response['phlase']\n elif 'ellol' in response:\n return response['ellol']\n\n return 'Sorry, something went wlong'", "def get_transcripts_from_youtube(youtube_id, settings, i18n):\r\n _ = i18n.ugettext\r\n\r\n utf8_parser = etree.XMLParser(encoding='utf-8')\r\n\r\n youtube_text_api = copy.deepcopy(settings.YOUTUBE['TEXT_API'])\r\n youtube_text_api['params']['v'] = youtube_id\r\n data = requests.get('http://' + youtube_text_api['url'], params=youtube_text_api['params'])\r\n\r\n if data.status_code != 200 or not data.text:\r\n msg = _(\"Can't receive transcripts from Youtube for {youtube_id}. Status code: {status_code}.\").format(\r\n youtube_id=youtube_id,\r\n status_code=data.status_code\r\n )\r\n raise GetTranscriptsFromYouTubeException(msg)\r\n\r\n sub_starts, sub_ends, sub_texts = [], [], []\r\n xmltree = etree.fromstring(data.content, parser=utf8_parser)\r\n for element in xmltree:\r\n if element.tag == \"text\":\r\n start = float(element.get(\"start\"))\r\n duration = float(element.get(\"dur\", 0)) # dur is not mandatory\r\n text = element.text\r\n end = start + duration\r\n\r\n if text:\r\n # Start and end should be ints representing the millisecond timestamp.\r\n sub_starts.append(int(start * 1000))\r\n sub_ends.append(int((end + 0.0001) * 1000))\r\n sub_texts.append(text.replace('\\n', ' '))\r\n\r\n return {'start': sub_starts, 'end': sub_ends, 'text': sub_texts}", "def mocked_translate(lur):\n lur = {\n \"success\": {\"total\": 1},\n \"contents\": {\n \"translated\": \"Hi,Zaafira, I am\",\n \"text\": \"Hi, I am Zaafira\",\n \"translation\": \"yoda\",\n },\n }\n\n json_response_mock = mock.Mock()\n json_response_mock.json.return_value = lur\n return json_response_mock", "def translate(x, y, z):\n global _cmds\n _cmds = f\"translate([{x},{y},{z}])\\n\" + _cmds", "def tr(text, sourcelang, targetlang):\n request = urllib2.Request(url.format(text, sourcelang, targetlang),\n headers={ 'User-Agent': 'Mozilla/5.0', 'Accept-Charset': 'utf-8' })\n response = urllib2.urlopen(request).read()\n fixedJSON = re.sub(r',{2,}', ',', response).replace(',]', ']')\n data = json.loads(fixedJSON)\n result = {}\n result[\"definition\"] = data[0][0]\n for row in data[1]:\n try:\n result[row[0]] = row[1]\n except:\n pass\n return result" ]
[ "0.704143", "0.69701964", "0.6816958", "0.66153824", "0.642939", "0.63775456", "0.62047935", "0.61574394", "0.6141896", "0.61338365", "0.60861075", "0.6081854", "0.6030524", "0.5947659", "0.5792178", "0.5742774", "0.57173043", "0.56933045", "0.56787753", "0.56550264", "0.56533915", "0.5644059", "0.56039256", "0.5603346", "0.55718255", "0.5551083", "0.5519261", "0.5438226", "0.54205626", "0.54192793", "0.5415898", "0.5389208", "0.53826225", "0.5358368", "0.53567284", "0.53268266", "0.53268266", "0.53245413", "0.5315514", "0.53088135", "0.53059727", "0.5288405", "0.52663356", "0.525964", "0.52561814", "0.5249821", "0.52038455", "0.51707906", "0.5154303", "0.5152159", "0.5148491", "0.513857", "0.51377517", "0.51164615", "0.5111959", "0.51097983", "0.51036847", "0.51013577", "0.5081594", "0.5044502", "0.5043573", "0.5041137", "0.50340515", "0.5020289", "0.4999275", "0.49970832", "0.4980939", "0.49732602", "0.49717793", "0.49622756", "0.49587408", "0.49557358", "0.49553046", "0.49510685", "0.4948565", "0.49462932", "0.49456346", "0.49438834", "0.49364096", "0.49330392", "0.4932707", "0.4887777", "0.48791224", "0.48620242", "0.48593467", "0.48560435", "0.48555934", "0.4849584", "0.48484144", "0.48271486", "0.48237255", "0.48185807", "0.48153162", "0.48130587", "0.48029804", "0.47798738", "0.47777623", "0.47702625", "0.47592777", "0.4755254" ]
0.6338963
6
Makes a new account and adds it into the database.
def create_account(): if not request.json or not 'name' in request.json: abort(400) account = { 'id': accounts[-1]['id'] + 1, #last id + 1 'name': request.json['name'], 'surname': request.json['surname'], 'product': request.json.get('product', ""), 'balance': request.json.get('balance', 0.00) } accounts.append(account) return json.dumps({'New Account': account}, ensure_ascii=False), 201, {'Content-Type': 'text/css; charset=utf-8'}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_user(self) -> None:\n # update when the account was created\n self.account_created = datetime.now().date()\n self.insert_to_db()\n log(f\"An account for User:{self.id} has been created.\")", "def create(self, account):\n model = models.load('Account', account)\n\n return self.client.create_account(model=model)", "def newaccount(accountname, account, owner, active, memo, posting, create_claimed_account):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not account:\n account = mph.config[\"default_account\"]\n if not unlock_wallet(stm):\n return\n acc = Account(account, morphene_instance=stm)\n if owner is None or active is None or memo is None or posting is None:\n password = click.prompt(\"Keys were not given - Passphrase is used to create keys\\n New Account Passphrase\", confirmation_prompt=True, hide_input=True)\n if not password:\n print(\"You cannot chose an empty password\")\n return\n if create_claimed_account:\n tx = mph.create_claimed_account(accountname, creator=acc, password=password)\n else:\n tx = mph.create_account(accountname, creator=acc, password=password)\n else:\n if create_claimed_account:\n tx = mph.create_claimed_account(accountname, creator=acc, owner_key=owner, active_key=active, memo_key=memo, posting_key=posting)\n else:\n tx = mph.create_account(accountname, creator=acc, owner_key=owner, active_key=active, memo_key=memo, posting_key=posting) \n tx = json.dumps(tx, indent=4)\n print(tx)", "def new_account(firstname, lastname, pin):\n pass", "def create_test_account(self):\n if not hasattr(self, \"headers\"):\n self.headers = {\"Content-Type\": \"application/json\"}\n self.account = {\n \"account_number\": \"11223344\",\n \"pin\": \"1234\",\n \"first_name\": \"John\",\n \"last_name\": \"Doe\",\n \"opening_balance\": 100000\n }\n with self.app.app_context():\n db.session.query(Account.account_number).filter_by(\n account_number=self.account[\"account_number\"]).delete()\n db.session.commit()\n self.client.post(\"/accounts/create\",\n data=json.dumps(self.account),\n headers=self.headers)", "def create_account():\n form = CreateAccountForm(request.form)\n form.set_site_choices()\n\n if not form.validate():\n return create_account_form(form)\n\n screen_name = form.screen_name.data.strip()\n first_names = form.first_names.data.strip()\n last_name = form.last_name.data.strip()\n email_address = form.email_address.data.lower()\n password = form.password.data\n site_id = form.site_id.data\n\n if site_id:\n site = site_service.get_site(site_id)\n else:\n site = None\n\n if user_service.is_screen_name_already_assigned(screen_name):\n flash_error(gettext('This username cannot be used.'))\n return create_account_form(form)\n\n if user_service.is_email_address_already_assigned(email_address):\n flash_error(gettext('This email address cannot be used.'))\n return create_account_form(form)\n\n initiator_id = g.user.id\n\n try:\n user, event = user_creation_service.create_basic_user(\n screen_name,\n email_address,\n password,\n first_names=first_names,\n last_name=last_name,\n creator_id=initiator_id,\n )\n except user_creation_service.UserCreationFailed:\n flash_error(\n gettext(\n 'User \"%(screen_name)s\" could not be created.',\n screen_name=screen_name,\n )\n )\n return create_account_form(form)\n\n flash_success(\n gettext(\n 'User \"%(screen_name)s\" has been created.',\n screen_name=user.screen_name,\n )\n )\n\n if site:\n user_creation_service.request_email_address_confirmation(\n user, email_address, site_id\n )\n flash_success(\n gettext('An email has been sent to the corresponding address.'),\n icon='email',\n )\n\n user_signals.account_created.send(None, event=event)\n\n return redirect_to('.view', user_id=user.id)", "def _do_create_account(post_vars):\r\n user = User(username=post_vars['username'],\r\n email=post_vars['email'],\r\n is_active=False)\r\n user.set_password(post_vars['password'])\r\n registration = Registration()\r\n\r\n # TODO: Rearrange so that if part of the process fails, the whole process fails.\r\n # Right now, we can have e.g. no registration e-mail sent out and a zombie account\r\n try:\r\n user.save()\r\n except IntegrityError:\r\n # Figure out the cause of the integrity error\r\n if len(User.objects.filter(username=post_vars['username'])) > 0:\r\n raise AccountValidationError(\r\n _(\"An account with the Public Username '{username}' already exists.\").format(username=post_vars['username']),\r\n field=\"username\"\r\n )\r\n elif len(User.objects.filter(email=post_vars['email'])) > 0:\r\n raise AccountValidationError(\r\n _(\"An account with the Email '{email}' already exists.\").format(email=post_vars['email']),\r\n field=\"email\"\r\n )\r\n else:\r\n raise\r\n\r\n # add this account creation to password history\r\n # NOTE, this will be a NOP unless the feature has been turned on in configuration\r\n password_history_entry = PasswordHistory()\r\n password_history_entry.create(user)\r\n\r\n registration.register(user)\r\n\r\n profile = UserProfile(user=user)\r\n profile.name = post_vars['name']\r\n profile.level_of_education = post_vars.get('level_of_education')\r\n profile.gender = post_vars.get('gender')\r\n profile.mailing_address = post_vars.get('mailing_address')\r\n profile.city = post_vars.get('city')\r\n profile.country = post_vars.get('country')\r\n profile.goals = post_vars.get('goals')\r\n\r\n try:\r\n profile.year_of_birth = int(post_vars['year_of_birth'])\r\n except (ValueError, KeyError):\r\n # If they give us garbage, just ignore it instead\r\n # of asking them to put an integer.\r\n profile.year_of_birth = None\r\n try:\r\n profile.save()\r\n except Exception:\r\n log.exception(\"UserProfile creation failed for user {id}.\".format(id=user.id))\r\n raise\r\n\r\n UserPreference.set_preference(user, LANGUAGE_KEY, get_language())\r\n\r\n return (user, profile, registration)", "def _create_account(self, new_account):\n if User.check_existing_user(new_account['account_holder']):\n user = User(new_account['account_holder'])\n if new_account['account_type'] in user.accounts.keys():\n self.session.output({\n 'error':\n 'user already has an account of this type. Returning to main menu.\\n'},\n '[ INVALID ACCOUNT TYPE ERROR ]')\n self._navigate_mainmenu(1)\n return False\n else:\n new_account_created = Account(userid=user.user_id, account_type=new_account['account_type'],\n balance=new_account['initial_balance'])\n self.session.output(new_account_created.get_info(),\n '\\n[ New account created for user {} ]'.format(new_account['account_holder']))\n return True\n else:\n self.session.output({'invalid_account_holder': 'please enter valid account holder id\\n'},\n '\\n[ USER ID ERROR ]')\n return False", "def new_account (self, name = 'Account', default = False, welcome = False):\n\t\tif self.currentAccount:\n\t\t\tself.save_account(self.currentAccount)\n\t\tself.currentAccount = Account(self, name)\n\t\tif default:\n\t\t\tself.set_as_default(self.currentAccount)", "def create(self, data):\n url = self.base_url + '/v2/account/create/'\n return self._call_vendasta(url, data)", "def create_account(self):\n account_identifier = \"\".join([str(num) for num in random.sample(range(10), 9)])\n first_fifteen_digit = self.BIN + account_identifier\n checksum = self.create_checksum(first_fifteen_digit)\n card_number = first_fifteen_digit + str(checksum)\n pin = \"\".join([str(num) for num in random.sample(range(10), 4)])\n balance = 0\n print(\"\\nYour card has been created\")\n print(f\"Your card number:\\n{card_number}\\nYour card PIN:\\n{pin}\")\n # fetching max id from database\n database_cursor.execute(\"SELECT id FROM card;\")\n ids = [x[0] for x in database_cursor.fetchall()]\n if ids:\n max_id = max(ids) + 1\n else:\n max_id = 1\n # insert new account into database\n database_cursor.execute(f\"INSERT INTO card VALUES ({max_id}, {card_number}, {pin}, {balance});\")\n database_connection.commit()", "def create_account():\n account = w3.eth.account.create()\n return account", "def create_user(user, first_name, last_name, major, bio):\n return userAccount.objects.create(user=user, first_name=first_name, last_name=last_name, major=major, bio=bio)", "def create(self, username, password, email):\n pass", "def _create_account(self, username, email, password):\r\n resp = self.client.post('/create_account', {\r\n 'username': username,\r\n 'email': email,\r\n 'password': password,\r\n 'location': 'home',\r\n 'language': 'Franglish',\r\n 'name': 'Fred Weasley',\r\n 'terms_of_service': 'true',\r\n 'honor_code': 'true',\r\n })\r\n return resp", "def create_new_record(account,userName,password):\n new_record = Records(account,userName,password)\n return new_record", "def CreateAccount(self):\n \n username = self.username.get().lstrip().rstrip()\n if not username:\n messagebox.showerror('Error', 'No username entered!')\n return False\n for user in self.user_db:\n if user['User'] == username:\n messagebox.showerror('Error', f'{username} already exists!')\n return False\n \n if not self.PasswordMatch():\n messagebox.showerror('Error', 'Passwords must match!')\n return False\n password = self.password.get().lstrip().rstrip()\n \n user_data = {\n 'User': username,\n 'Password': password,\n 'CreationDate': date.today().strftime('%B %d, %Y'),\n 'LastLogIn': ''\n }\n self.user_db.append(user_data)\n return True", "def account():\n\n bank_test = Bank.objects.create(name='R-Bank')\n company_test = Company.objects.create(name='Tre Belarus', country='Belarus')\n account = Account.objects.create(iban_number='TEEdddddddfs', swift_code='tertrefdsf',\n bank=bank_test, company=company_test)\n return account", "def post(self):\n ctx = _request_ctx_stack.top\n current_user = ctx.user\n request_body = request.get_json()\n name = request_body.get('name')\n account_type = request_body.get('type')\n initial_balance = request_body.get('ini_bal')\n if name:\n try:\n acc_factory = AccountFactory()\n if account_type == 'credit':\n limit = request_body.get('limit')\n if limit is None:\n return response('failed', 'Please specify a credit limit for a credit account', 400)\n new_account = acc_factory.create_account(\n name=name,\n account_type=account_type,\n user_id=current_user.id,\n initial_balance=initial_balance,\n limit=limit\n )\n else:\n new_account = acc_factory.create_account(\n name=name,\n account_type=account_type,\n user_id=current_user.id,\n initial_balance=initial_balance\n )\n new_account.save()\n except IntegrityError:\n return response('failed', 'Duplicate account name', 400)\n else:\n return response_created_account(new_account, 200)\n return response('failed', 'Missing account name attribute', 400)", "def CreateAccount():\n \n if not self.CreateAccount():\n return\n \n # Offer to log the new user account in\n ask = messagebox.askyesno('Success!',\n f'Account created. Log in as {username}?')\n if ask:\n # Save data to the file and load the main program\n self.SaveData()\n self.main_frame.destroy()\n MainWindow.MainWindow(self, username, login_date=None)\n else:\n # Clear variable fields and return to initial 'Log In' window\n self.username.set('')\n self.password.set('')\n self.confirm_pass.set('')\n Return()", "def signup(self, request):\n # TODO: Add user authentication. Currently, we will create an acct \n new_user = Account.add_new_user(request)\n if new_user is None:\n return AccountResponse(errmsg=\"Username already exists!\")\n return AccountResponse(id=new_user.key.id())", "def register():\n\n if request.method == 'POST':\n new_account = Account(fullname = request.form['fullname'],\n email = request.form['email'],\n username = request.form['username'],\n password = request.form['password'])\n \n new_account.save()\n return \"Welcome\"\n else:\n return render_template('register.html')", "def create_account():\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n\n user = create_user(username, password)\n\n if not user:\n return redirect(url_for('login'))\n\n session['username'] = user.username\n session['user_id'] = user.id\n session['logged_in'] = True\n session['is_admin'] = user.is_admin\n\n return redirect(url_for('index'))\n\n return render_template('createaccount.html')", "def open_account():\n # TODO: refactor this endpoint be secure\n # HINT: this route should hash the password before it is saved\n holder = request.json.get(\"holder\")\n account = Account.query.filter_by(holder=holder).first()\n if account:\n return jsonify({\"error\": \"Account already exists\"})\n account = Account(holder=holder)\n db.session.add(account)\n db.session.commit()\n return (\n jsonify(\n {\"message\": f\"An account for {account.holder} has been created\"}\n ),\n 201,\n )", "def post(self, data):\n conn = pecan.request.db_conn\n try:\n account = db_models.Account(**data.as_dict())\n return conn.create_account(request.context, account)\n except Exception:\n LOG.exception('Fail to create account: %s' % data.as_dict())\n raise exception.AccountCreateFailed(user_id=data.user_id,\n domain_id=data.domain_id)", "def create_account():\n user_id = get_jwt_identity()\n user = User.filter(id=user_id)[0]\n data = json.loads(request.data)\n\n if 'title' not in data:\n return jsonify_response({\"errors\": \"`title` field is required.\"}, 400)\n\n held_accounts = user.get_held_accounts(user.id)\n if held_accounts:\n user_accounts = \",\".join(f\"'{i}'\" for i in held_accounts)\n user_account_names_q = \\\n f\"g.V().hasLabel('{Account.LABEL}')\" + \\\n f\".has('id', within({user_accounts}))\" + \\\n f\".values('title')\"\n user_account_names = client.submit(user_account_names_q).all().result()\n\n if data[\"title\"] in user_account_names:\n return jsonify_response(\n {\"errors\": \"Users with the title already exist\"}, 400)\n\n account = Account.create(title=data[\"title\"])\n edge = UserHoldsAccount.create(user=user.id, account=account.id,\n relationType=\"secondary\")\n\n response = {\n \"title\": account.title\n }\n return jsonify_response(response, 201)", "def create_account(request):\n if request.method == 'POST':\n\n post = request.POST\n form = forms.RegisterForm(post)\n\n if form.is_valid():\n # create a new user\n user = models.HAWCUser.objects.create_user(post['email'],\n post['password1'])\n user.first_name = post['first_name']\n user.last_name = post['last_name']\n user.full_clean()\n user.save()\n\n # create a new user profile\n profile = models.UserProfile(user=user)\n profile.save()\n\n # after save, log user in\n user = authenticate(username=post['email'],\n password=post['password1'])\n login(request, user)\n return redirect('portal')\n else:\n form = forms.RegisterForm()\n\n return render(request, 'registration/create_account.html', {'form': form})", "def create(self, validated_data):\n account = Account.objects.create(**validated_data)\n account.save()\n return account", "def create_new_user(self):\n username = 'pseudo'\n email = 'carole@tests.com'\n password = '00000000'\n user_created = self.user.objects.create_user(id=1, username=username,\n email=email, password=password)\n HistoryUser.objects.create(user=user_created)\n StatusUser.objects.create(user=user_created)\n\n return user_created", "def create_account_request(request):\n if request.method == \"POST\":\n form = NewAccountForm(request.POST)\n if form.is_valid():\n form.save()\n messages.success(request, \"Creation successful.\")\n return redirect(\"home\")\n messages.error(request, \"Unsuccessful creation. Invalid information.\")\n form = NewAccountForm\n customer_list = Customer.objects.all()\n context = {'customer_list': customer_list, 'account_form': form}\n return render(request, \"accounts/account_creation.html\", context)", "def make_new_user():\n\n new_user = User(\n first_name=request.form['first_name'],\n last_name=request.form['last_name'],\n image_url=request.form['image_url'] or None)\n\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def add_user(self):\n query = \"INSERT INTO users (first_name, last_name, email, password) VALUES (%s, %s, %s, %s)\"\n self.cursor.execute(query,(\n self.first_name, \n self.last_name, \n self.email, \n self.password))", "def create_new_user(first_name, last_name, email, password):\n \n new_user = User(first_name, last_name, email, password)\n db.session.add(new_user)\n db.session.commit()\n \n # link a root storage folder to the user\n root_folder = Folder()\n db.session.add(root_folder)\n db.session.commit()\n new_user.storage_root_id = root_folder.id\n new_user.storage_root = root_folder\n db.session.commit()\n\n # link usage tracking to the user\n usage = Usage()\n usage.user_id = new_user.id\n new_user.usage = usage\n db.session.add(usage)\n db.session.commit()\n\n # link a billing address to the user\n billing_address = BillingAddress()\n billing_address.user_id = new_user.id\n new_user.billing_address = billing_address\n db.session.add(billing_address)\n db.session.commit()\n\n # link settings to the User\n settings = Settings()\n settings.user_id = new_user.id\n new_user.settings = settings\n db.session.add(settings)\n db.session.commit()", "async def create(self):\n assert self.backend.username_field in self\n assert \"password\" in self\n self.setdefault(\"date_joined\", now_utc())\n self.setdefault(\"is_superuser\", False)\n self.setdefault(\"is_staff\", False)\n self.setdefault(\"is_active\", True)\n self[\"id\"] = await self.backend.insert(**self)", "def createAccount(self, loginName, password, data):\n return self.talk('create',\n data=self.__makeLoginDict(loginName, password, data))", "def new_user():\n success = True\n try:\n usr = User(request.json['username'], request.json['email'])\n db.session.add(usr)\n db.session.commit()\n except:\n success = False\n return jsonify(success=success)", "def POST(self, data={}):\n\t\torigname = data['username'];\n\t\twith transaction() as t:\n\t\t\tif UserModel().load_by_username(origname.lower()):\n\t\t\t\tself.logger.debug('User tried to create a new account with a chosen username [%s]', origname)\n\t\t\t\tt.rollback()\n\t\t\t\treturn 'error.user.new.user_exists'\n\t\t\tself.logger.debug('User created new account with username [%s]', origname)\n\t\t\tUserModel().new(is_active=True, username=origname.lower(), name=origname)\n\t\treturn 'ok'", "def create_account():\n\n return render_template('account.html')", "def add_account(self, account):\n self.accounts[account.account_number] = account.json()\n # We should save in database the new account using self.di, but not now in order to get our tests passed", "def do_user_create():\n target = User(\n request.form['gender'],\n request.form['first_name'],\n request.form['name'],\n request.form['mail'],\n request.form['meter_id'],\n request.form['group_id'],\n secrets.token_hex(33))\n target.set_role(request.form['role'])\n target.nick = request.form['nick']\n db.session.add(target)\n db.session.commit()\n return user_list(\"Created user \" + target.name)", "def insert_user(self, *args):\n name = args[0]\n email_address = args[1]\n password = args[2]\n account_type = args[3]\n created_on = args[4]\n last_modified = args[5]\n insert_user = \"INSERT INTO users(name, email_address, password, account_type, created_on, last_modified) \" \\\n \"VALUES('{}', '{}', '{}', '{}', '{}', '{}');\"\\\n .format(name, email_address, password, account_type, created_on, last_modified)\n self.cursor.execute(insert_user, (name, email_address, password, account_type, created_on, last_modified))\n self.connection.commit()", "def save_accounts(account):\n account.save_account()", "def save_accounts(account):\n account.save_account()", "def create_user(self):\n User.objects.create_user('test', 'testing@test.com', 'testing')", "def register_new_user(first_name,email,password):\n\n new_user = User(first_name=first_name, email=email, password=password)\n\n db.session.add(new_user)\n db.session.commit()\n\n return new_user", "def create(self, username, password):\n pass", "def users_create():", "def insert_account(self, email, password, username=None):\n email_id = self.get_email_id(email)\n \n #Check if email or username already exists\n errors = []\n if self.sql('SELECT count(*) FROM accounts WHERE email_id = %s', email_id):\n errors.append('Email address is already in use.')\n if username:\n if self.sql('SELECT count(*) FROM accounts WHERE username = %s', username):\n errors.append('Username is already in use.')\n else:\n username = 'NULL'\n \n #Insert into database\n if not errors:\n sql = 'INSERT INTO accounts (email_id, username, password, permission)'\n sql += ' VALUES (%s, %s, %s, %s)'\n \n if not PRODUCTION_SERVER:\n print 'Account created for \"{}\"'.format(email)\n \n status = self.sql(sql, email_id, username, password_hash(password), PERMISSION_REGISTERED)\n else:\n status = 0\n return dict(status=status, errors=errors)", "def add_account(insert_dict):\n return ar.add_account(insert_dict)", "def register():\n print('****************Registration from for Zuri Bank****************')\n email = input('what is your email address? \\n')\n firstName = input('what is your first name? \\n')\n lastName = input('What is your last name? \\n')\n password = getpass('create your password?\\n')\n \n\n accountNo = genAccountNo()\n \n is_user_created = database.create(accountNo,firstName, lastName, email,password)\n if is_user_created:\n print('Your account has been created.\\nHere is your account Number %d please keep it safe.' % accountNo)\n login()\n else:\n print('Something went wrong.')\n register()", "def submit(self):\n name = self.nameEntry.get()\n server = self.servEntry.get()\n key = self.keyEntry.get()\n secret = self.secEntry.get()\n try:\n accounts.new(name, key, secret, server)\n # Test if account valid\n try:\n core.account_available_margin(name)\n self.quit()\n except Exception as e:\n tkinter.messagebox.showerror(\"Error\", \"Wasn't able to validate \"\n + \"the new account:\\n\" + str(e))\n accounts.delete(name)\n except BitmexAccountsException as e:\n tkinter.messagebox.showerror(\"Error\", str(e))", "def create_user(username, name):\n db.session.add(Users(username=username, name=name))\n db.session.commit()", "def create_user(session, phone_number, name, pass_hash, funds=0.0):\n # Perform the db job\n user = User(phone_number=phone_number, name=name, pass_hash=pass_hash, funds=funds)\n session.add(user)\n session.commit()\n return USER_GET_URI.format(user_id=phone_number)", "def create_user_profile(sender, instance, created, **kwargs):\n if created:\n # create new Stellar account\n stellar.api.create_account(user=instance)", "def create_user():\n new_user = User(id=login_session['gplus_id'],\n name=login_session['username'],\n email=login_session['email'],\n picture=login_session['picture'])\n session.add(new_user)\n session.flush()\n session.commit()\n user = session.query(User).filter_by(email=login_session['email']).one()\n return user.id", "def _create_account(user_id: int):\r\n now = datetime.now()\r\n _created_at = now.strftime(\"%m/%d/%Y at %H:%M:%S\")\r\n Wealth.collection.insert_one({\r\n \"_id\": user_id,\r\n \"coins\": 100,\r\n \"cookie\": 0,\r\n \"choc\": 0,\r\n \"poop\": 0,\r\n \"beans\": 0,\r\n \"pizza\": 0,\r\n \"waffles\": 0,\r\n \"Fish\": 0,\r\n \"apple\": 0,\r\n \"afk\": \"No status set, run w/status to set a status\",\r\n \"Reputation\": 0,\r\n \"LastUsed\": \"Isnotset\",\r\n \"TargetMember\": 0,\r\n \"BadgeSlot1\": \"Doesn't Have Noob\",\r\n \"BadgeSlot2\": \"Doesn't Have Beginner\",\r\n \"BadgeSlot3\": \"Doesn't Have Leader\",\r\n \"AccountCreated\": _created_at,\r\n \"Premium\": \"No\",\r\n \"Developer\": \"No\",\r\n \"Bank\": 0,\r\n \"Tickets\": 0,\r\n \"LastWithdraw\": \"No date\",\r\n \"LastTransfer\": \"No date\",\r\n \"MarriedTo\": \"Nobody\",\r\n \"MarriedDate\": \"No date\",\r\n })", "def create_user():\n first_name = request.form['first_name'].capitalize()\n last_name = request.form['last_name'].capitalize()\n image_url = request.form['image_url']\n\n new_user = User(first_name=first_name, last_name=last_name, image_url=image_url)\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def new_user(request):\r\n rdict = request.params\r\n\r\n u = User()\r\n\r\n u.username = unicode(rdict.get('username'))\r\n if u.username:\r\n u.username = u.username.lower()\r\n u.email = unicode(rdict.get('email')).lower()\r\n passwd = get_random_word(8)\r\n u.password = passwd\r\n u.activated = True\r\n u.is_admin = False\r\n u.api_key = User.gen_api_key()\r\n\r\n try:\r\n DBSession.add(u)\r\n DBSession.flush()\r\n # We need to return the password since the admin added the user\r\n # manually. This is only time we should have/give the original\r\n # password.\r\n ret = dict(u)\r\n ret['random_pass'] = passwd\r\n return _api_response(request, ret)\r\n\r\n except IntegrityError, exc:\r\n # We might try to add a user that already exists.\r\n LOG.error(exc)\r\n request.response.status_int = 400\r\n return _api_response(request, {\r\n 'error': 'Bad Request: User exists.',\r\n })", "def add_user(first_name,last_name,email,password,typeOfUser):\n user=User.objects.create(first_name=first_name,last_name=last_name,email=email,password=password,role=typeOfUser)\n return user", "def create_account(self, user):\n tx = self.iroha.transaction(\n [\n self.iroha.command(\n \"CreateAccount\",\n account_name=user.gov_id,\n domain_id=\"afyamkononi\",\n public_key=user.public_key,\n )\n ]\n )\n IrohaCrypto.sign_transaction(tx, self.creator_account_details.private_key)\n return self.send_transaction_and_return_status(tx)", "def new_user():\n new_user = User(first_name=request.form['first_name'], last_name=request.form['last_name'], image_url=request.form['image_url'] or None)\n\n db.session.add(new_user)\n db.session.commit()\n\n return redirect(\"/users\")", "def account_post(request):\n fields = [\"fname\", \"lname\", \"email\", \"token\"]\n body = None\n\n try:\n body = request.get_json()\n except:\n return http400(\"Missing body\")\n\n body_validation = validate_body(body, fields)\n # check that body validation succeeded\n if body_validation[1] != 200:\n return body_validation\n\n auth = azure_refresh_token(body[\"token\"])\n if not auth[0]:\n return http400(\"Not Authenticated\")\n\n account_db = Database(\"accounts\")\n\n try:\n db_entry = {\n \"fname\": body[\"fname\"],\n \"lname\": body[\"lname\"],\n \"email\": body[\"email\"],\n }\n\n account_db.add(db_entry, id=body[\"email\"])\n except:\n return http400(\"Email already taken\")\n\n response = {\n \"fname\": body[\"fname\"],\n \"lname\": body[\"lname\"],\n \"email\": body[\"email\"],\n \"access_token\": auth[0],\n \"refresh_token\": auth[1],\n }\n\n return jsonHttp200(\"Account Created\", response)", "def create_user(email, password):\n email_used = AuthUser.query.filter_by(email=email).first()\n if email_used:\n return False, \"Email address has already been used\"\n account = Account(email)\n account.plan_key = 'BASIC'\n account.is_active = True\n account.created = datetime.datetime.now()\n db.session.add(account)\n user = AuthUser(email, password, account)\n user.created = datetime.datetime.now()\n db.session.add(user)\n db.session.commit()\n return user.id, None", "def do_user_create(cs, args):\n cs.users.create(args.username, args.password, args.email, args.realname,\n args.comment)\n print(\"Create user '%s' successfully.\" % args.username)", "def create(cls, body: CloudAccount):\n\t\tpass", "def add_user(self, username, password): #WORKS\n password_hash = generate_password_hash(password) # Generates a SHA256 hash.\n try:\n self.cur.execute(\"INSERT INTO users VALUES(\\\"{}\\\", \\\"{}\\\")\".format(username, password_hash))\n self.db.commit()\n except:\n self.db.rollback()", "def addUsertoDatabase(self):\r\n self.c.execute(\"\"\"INSERT INTO student_information VALUES (?,?,?)\"\"\",(self.name,self.password,self.budget,))\r\n self.con.commit()\r\n print(\"Added to Database Student..\")", "def CreateAccount():\n login_frame.forget()\n self.LoadCreateAccountWindow()", "def create_account(self, username, email, password):\r\n resp = check_for_post_code(self, 200, reverse('create_account'), {\r\n 'username': username,\r\n 'email': email,\r\n 'password': password,\r\n 'name': 'username',\r\n 'terms_of_service': 'true',\r\n 'honor_code': 'true',\r\n })\r\n data = json.loads(resp.content)\r\n self.assertEqual(data['success'], True)\r\n # Check both that the user is created, and inactive\r\n self.assertFalse(User.objects.get(email=email).is_active)", "def create_user(self, name, email, password):\n new_user = User(name=name, email=email, password=password)\n db.session.add(new_user)\n db.session.commit()", "def add_user(self, firstname, lastname, email, username, password, role):\n\n new_user = {\n \"id\": len(self.db) + 1,\n \"firstname\": firstname,\n \"lastname\": lastname,\n \"email\": email,\n \"username\": username,\n \"password\": password,\n \"role\": role\n }\n\n ALL_USERS.append(new_user)", "def create_user(self, username, password, email, name):\n\n duplicate_check = User.query.filter_by(username=username).first()\n if duplicate_check is not None:\n return\n user = User(username=username, password=password, email=email, name=name)\n db.session.add(user)\n db.session.commit()", "def new_user():\n pass", "def create(self, validated_data):\n\n user_data = {\n \"username\" : validated_data.get(\"username\"),\n \"email\" : validated_data.get(\"email\"),\n \"password\" : validated_data.get(\"password\")\n }\n user = User.objects.create_user(**user_data)\n user.save()\n\n account_data = {\n \"phone\" : validated_data.get(\"phone\"),\n \"type\" : validated_data.get(\"type\"),\n \"lat\" : validated_data.get(\"lat\"),\n \"lang\" : validated_data.get(\"lang\"),\n \"center_point\" : validated_data.get(\"center_point\")\n }\n account = Account(user = user, **account_data)\n account.save()\n\n return user", "def add_new_user_to_db():\n first_name = request.form['first_name']\n last_name = request.form['last_name']\n img_url = request.form['img_url']\n\n new_user = User(first_name=first_name,last_name=last_name, img_url=img_url)\n db.session.add(new_user)\n db.session.commit()\n\n return redirect('/users')", "def register(self, form):\n new_user = form.save(commit=False)\n username_field = getattr(new_user, 'USERNAME_FIELD', 'username')\n # Save lowercased email as username.\n setattr(new_user, username_field, form.cleaned_data['email'].lower())\n new_user.first_name = form.cleaned_data['first_name']\n new_user.last_name = form.cleaned_data['last_name']\n new_user.save()\n new_user = authenticate(username=getattr(new_user, username_field), password=form.cleaned_data['password1'])\n login(self.request, new_user)\n user_registered.send(sender=self.__class__, user=new_user, request=self.request)\n profile, _ = Profile.objects.get_or_create(user=new_user)\n self.request.session['signed_up'] = True\n profile.payment_plan = int(form.cleaned_data['payment_plan'])\n profile.company_name = form.cleaned_data['company']\n profile.phone = form.cleaned_data['phone']\n profile.save(update_fields=['payment_plan', 'company_name', 'phone'])\n if profile.payment_plan != Profile.PAYMENT_PLAN_FREE:\n messages.add_message(self.request, messages.INFO,\n 'Congratulations! We won\\'t charge you for this plan for now.')\n return new_user", "def create_account(self):\r\n logger.info('*' * 20 + ' Starting creating user account ' + '*' * 20)\r\n logger.info(f'\\nfor user {self}')\r\n self.automation.wait.until(EC.presence_of_element_located((By.ID, 'email_create')))\r\n self.automation.driver.find_element_by_css_selector(\"#email_create\").send_keys(self.email) # send email\r\n self.automation.driver.find_element_by_css_selector(\"#SubmitCreate\").click() # 'create an account' btn\r\n\r\n # ##############################################\r\n # 1- mr. or mrs. ?\r\n logger.info(f'Choose title {self.title}')\r\n self.automation.wait.until(EC.presence_of_element_located((By.CSS_SELECTOR, '#account-creation_form div.account_creation div.clearfix')))\r\n if self.title == 'mr.':\r\n gender_selector = \"input#id_gender1\"\r\n\r\n else:\r\n gender_selector = \"input#id_gender2\"\r\n\r\n self.automation.driver.find_element_by_css_selector(gender_selector).click()\r\n self.automation.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight - 2000)\") # scroll down\r\n\r\n # ##############################################\r\n logger.info(f'adding fname {self.fname}')\r\n # 2- first name\r\n self.automation.driver.find_element_by_css_selector(\"#customer_firstname\").send_keys(self.fname)\r\n\r\n # ##############################################\r\n logger.info(f'adding lname {self.lname}')\r\n # 3- last name\r\n self.automation.driver.find_element_by_css_selector(\"#customer_lastname\").send_keys(self.lname)\r\n\r\n # ##############################################\r\n logger.info(f'adding email {self.email}')\r\n # 4- email\r\n email_elem = self.automation.driver.find_element_by_css_selector(\"#email\")\r\n email = email_elem.get_attribute('value')\r\n if not email: # check email is passed or not ?\r\n logger.info('email was not added , add it again ')\r\n email.send_keys(self.email)\r\n\r\n # ##############################################\r\n logger.info(f'adding password')\r\n # 5- password\r\n password = f'document.getElementById(\"passwd\").value=\"{self.password}\";' # js code to change password elm value\r\n self.automation.driver.execute_script(password)\r\n\r\n self.automation.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight - 1000)\") # scroll down\r\n\r\n # ##############################################\r\n # 6- date of birth year-month-day\r\n logger.info(f'adding dob {self.dob}')\r\n self.select_dob()\r\n\r\n # ##############################################\r\n logger.info(f'adding fname#2 {self.fname}')\r\n # 7- fname\r\n get_fname = 'return document.querySelectorAll(\"div.account_creation #firstname\")[0].value;'\r\n fname = self.automation.driver.execute_script(get_fname)\r\n if not fname: # check fname is passed or not ?\r\n fname = f'document.querySelectorAll(\"div.account_creation #firstname\")[0].value=\"{self.fname}\";'\r\n self.automation.driver.execute_script(fname)\r\n\r\n # ##############################################\r\n logger.info(f'adding lname#2 {self.lname}')\r\n # 8- last name\r\n get_lname = 'return document.querySelectorAll(\"div.account_creation #lastname\")[0].value;'\r\n lname = self.automation.driver.execute_script(get_lname)\r\n if not lname: # check lname is passed or not ?\r\n lname = f'document.querySelectorAll(\"div.account_creation #lastname\")[0].value=\"{self.lname}\";'\r\n self.automation.driver.execute_script(lname)\r\n\r\n # ##############################################\r\n # 9- complete profile ( company, city, address, mobile, postalcode, alias address)\r\n logger.info('complete profile with ( company, city, address, mobile, postalcode, alias address)')\r\n logger.info(f'company({self.company}) , city({self.city}) , address({self.address}), mobile({self.phone}) , postalcode({self.postalcode}) , alias address({self.address[0] + self.address[-1]})')\r\n self.complete_profile()\r\n\r\n # ##############################################\r\n # 10- state (randomly choice)\r\n logger.info('choose state randomly')\r\n states = [state.text for state in self.automation.driver.find_elements_by_css_selector('#id_state option')]\r\n Select(self.automation.driver.find_element_by_css_selector('#id_state')).select_by_visible_text(choice(states))\r\n # ###############################################\r\n self.automation.driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight - 700)\") # scroll down\r\n self.automation.driver.find_element_by_css_selector('#submitAccount').click() # register btn\r\n # ################ wait to login ###############################\r\n account_lst = self.automation.driver.find_elements_by_css_selector('.myaccount-link-list')\r\n timer = 1\r\n is_login = True\r\n while not account_lst:\r\n if timer == 60:\r\n is_login = False\r\n break\r\n time.sleep(.3)\r\n account_lst = self.automation.driver.find_elements_by_css_selector('.myaccount-link-list')\r\n timer += 1\r\n return is_login", "def cli_create(dbfile, username, email, password, group):\n with atomic(dbfile) as cursor:\n create_user(cursor, username=username, password=password, \n email=email, groups=group)\n click.echo(f\"Created user {username!r} with password {password!r}\")", "def create_user(self, username, password, email, height, weight):\n\n try:\n self.c.execute(\"INSERT INTO profiles VALUES (?,?,?,?,?)\", (username, password, email, weight, height))\n self.user_id = self.c.lastrowid\n self.conn.commit()\n return self.user_id\n\n except sqlite3.IntegrityError:\n print('Somebody already has that name, try again.')", "def register():\n insert_user(json_body())\n try:\n db.session.commit()\n except IntegrityError:\n raise exc.CouldNotCreateEntry()\n\n return jsonify({'message': 'Created user.'}), 200", "def create_account(request, role):\n context = {}\n if request.method == \"POST\":\n if(role.lower() == \"academic\"):\n form = AcademicRegisterForm(request.POST)\n elif(role.lower() == \"average\"):\n form = AvgRegisterForm(request.POST)\n\n if(form.is_valid()):\n createNewUser(form)\n username = form.cleaned_data.get('username')\n messages.success(request, f\"Account has been created for {username}!\")\n return redirect('login')\n else:\n if(role.lower() == \"academic\"):\n form = AcademicRegisterForm()\n elif(role.lower() == \"average\"):\n form = AvgRegisterForm()\n else:\n context['error'] = \"URL does not exist. Please return to home and try again\"\n return render(request, 'classroom_main/create_account.html', context)\n\n context[\"type\"] = role\n context['title'] = \"Sign up to the Online Coding Classroom\"\n context['form'] = form\n\n return render(request, 'classroom_main/create_account.html', context)", "def create_user(username):\n\n password = getpass.getpass('Password for {0}: '.format(username))\n confirm = getpass.getpass('Again: ')\n\n if password != confirm:\n print >> sys.stderr, \"Passwords don't match\"\n\n sys.exit(1)\n\n with transaction.manager:\n Users(username, password).save()", "def make_user(self, name, snowflake):\n to_exec = \"INSERT INTO users (snowflake_pk, username, balance) VALUES(%s, %s, %s)\"\n self.__cursor.execute(to_exec, (str(snowflake), name, '0'))\n self.__connection.commit()", "def save_account(self):\n Credential.account_list.append(self)", "def addUser(self, accountId, username, accesstype, **kwargs):\n #put your code here to implement this method\n raise NotImplementedError (\"not implemented method addUser\")", "def test_create_account(self):\n url = reverse('account:accounts')\n data = {'name': 'Test Account 1'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Account.objects.count(), 1)\n self.assertEqual(Account.objects.get().name, 'Test Account 1')", "def signup():", "def create_user(email, password, fname, lname):\n\n user = User(email=email, password=password, fname=fname, lname=lname)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def add_user(self, username, email, password):\n\n new_user = User(username, email, password)\n new_user_details = new_user.get_details()\n for user in self.users:\n if new_user_details['email'] == user['email']:\n return 'User already exists'\n else:\n new_user_details['id'] = len(self.users)\n self.users.append(new_user_details)\n return 'Account created. You can now log in'", "def _insert(self):\n self.account_number = randint(1111111,9999999)\n with sqlite3.connect(self.dbpath) as connection: \n cursor = connection.cursor()\n INSERTSQL = \"\"\"INSERT INTO accounts(first_name, last_name, \n username, email_address, \n password_hash, balance, \n account_number, admin,\n api_key) \n VALUES (:first_name, :last_name, \n :username, :email_address, \n :password_hash, :balance, \n :account_number, :admin,\n :api_key); \"\"\"\n values = {\n \"first_name\": self.first_name,\n \"last_name\": self.last_name,\n \"username\": self.username,\n \"email_address\": self.email_address,\n \"password_hash\": self.password_hash, \n \"balance\": self.balance, \n \"account_number\": self.account_number,\n \"admin\": self.admin,\n \"api_key\": randint(111111111, 999999999)\n }\n try: \n cursor.execute(INSERTSQL, values)\n self.id = cursor.lastrowid\n except sqlite3.IntegrityError:\n raise ValueError(\"ticker not set or a position for this ticker already exists\")", "async def create_new_user(*, user: User):\n with Session(engine) as session:\n user.password = simple_hash(user.name, user.password) #Hashing password for security\n session.add(user)\n session.commit()\n return {\"message\": \"User {user_id} created\".format(user_id = user.id)}", "def create_personal_account_for_user(\n sender, instance: User, created: bool, *args, **kwargs\n):\n if sender is User and created:\n Account.objects.create(\n name=instance.username,\n display_name=(\n (instance.first_name or \"\") + \" \" + (instance.last_name or \"\")\n ).strip(),\n creator=instance,\n user=instance,\n )", "def register(self,name,email,password):\n\t\t#code for actual registration in the database", "def add_new_user(self, user):\n # print(\"Saving new user\")\n self.execute(TABELLE['id_users']['insert']['complete_user'],\n (user['id'], False, False, True, False, False))\n\n self.execute(TABELLE['users']['insert'],\n (user['id'], user['username']))", "def new_user():\n username = request.json.get('username')\n password = request.json.get('password')\n picture = request.json.get('picture')\n email = request.json.get('email')\n if username is None or password is None:\n print(\"missing arguments\")\n abort(400)\n\n if getUserByUsername(username) is not None:\n print(\"existing user\")\n return jsonify({'message': 'user already exists'}), 200\n\n user = addUser(username, picture, email, password)\n return jsonify(user=user.serialize), 201", "def do_createuser(self, *args):\n self.connection_obj.initialize_table()\n print(\"UserTable Created Successful\")", "def create_user(self, request):\n if User.query(User.name == request.user_name).get():\n raise endpoints.ConflictException(\n 'A User with that name already exists!')\n user = User(name=request.user_name, email=request.email)\n user.put()\n return StringMessage(message='User {} created!'.format(\n request.user_name))", "def insert_to_db(self) -> None:\n query = \"\"\"INSERT INTO Users(Username, Password, Firstname, Surname, Currency_id,\n Has_First_Sign_In, Account_Created, Last_Sign_In)\n VALUES(?,?,?,?,?,?,?,?);\"\"\"\n self.db.commit(query, values=self.to_tuple())", "def create_user(username, password, user_fname, user_lname, email, profile_picture=\"/static/img/profile_pictures/default.png\"):\n\n user = User(username=username, password=password, user_fname=user_fname, user_lname=user_lname, profile_picture=profile_picture, email=email)\n\n db.session.add(user)\n db.session.commit()\n\n return user", "def create_user(fname, lname, email, password, phone_number):\n user = User(fname = fname, lname = lname , email = email ,password = password, phone_number = phone_number)\n #setting password hash\n user.set_password(password)\n db.session.add(user)\n db.session.commit()\n\n return user" ]
[ "0.8021572", "0.750744", "0.74803704", "0.7311251", "0.72799855", "0.724296", "0.72236335", "0.71949494", "0.7147477", "0.7138259", "0.7131161", "0.7060654", "0.704185", "0.6993125", "0.6962196", "0.6949189", "0.6949064", "0.6937117", "0.6913708", "0.68980956", "0.6873912", "0.6861509", "0.6831878", "0.6828941", "0.68157613", "0.68154454", "0.6802118", "0.6772887", "0.67684376", "0.6718999", "0.67056006", "0.67000365", "0.66998905", "0.66987294", "0.66868734", "0.6660122", "0.6659858", "0.66597736", "0.66578245", "0.66534543", "0.6634948", "0.66322076", "0.66322076", "0.66264427", "0.66193086", "0.66161156", "0.6606426", "0.6600897", "0.6593656", "0.6572708", "0.65691155", "0.6565211", "0.6557789", "0.6541455", "0.65413177", "0.653451", "0.6514323", "0.65048707", "0.65047103", "0.6503823", "0.6502327", "0.6484078", "0.64815813", "0.6481302", "0.64583504", "0.64548516", "0.6454357", "0.645169", "0.6450289", "0.64486504", "0.6443174", "0.6440162", "0.64300317", "0.64224946", "0.64223975", "0.6412929", "0.64075136", "0.64047116", "0.6382864", "0.6352902", "0.6334694", "0.63330096", "0.6329671", "0.6326091", "0.6318523", "0.63051903", "0.6300459", "0.6294922", "0.6291608", "0.62911326", "0.6288776", "0.62844926", "0.6275051", "0.6274929", "0.6273431", "0.6254883", "0.6253868", "0.6250859", "0.6246149", "0.6241008" ]
0.7375597
3
Makes deposit/withdrawal from an account and updates balance.
def make_transaction(): account_id = request.json['account_id'] aux_account = [account for account in accounts if account['id'] == account_id] if len(aux_account) == 0: abort(404) account_balance = Decimal(aux_account[0].get('balance')).quantize(Decimal('0.00')) transaction = request.json['transaction'] transaction_amount = Decimal(abs(request.json['amount'])).quantize(Decimal('0.00')) if not request.json: abort(400) if transaction not in ['withdrawal', 'deposit']: abort(400, f'Invalid transaction name: {transaction}') if transaction == 'withdrawal': transaction_amount = transaction_amount*-1 # the user can't withdraw more than the account has validation_sum = (account_balance + transaction_amount).quantize(Decimal('.01'), rounding=ROUND_DOWN) if validation_sum >= 0: for real_account in accounts: if real_account.get('id') == account_id: real_account['balance'] = round(float(validation_sum),2) else: abort(400, {'error':'Not enough funds for this transaction'}) return json.dumps({f'{transaction.capitalize()} Done. New balance': str(validation_sum)}, ensure_ascii=False), 200
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deposit(account, amount):\n pass", "def deposit(self, amount):\n self.balance += amount", "def deposit(self, amount):\n self.balance += amount", "def deposit(self, deposit_money):\r\n self.balance += deposit_money", "def deposit(self, amount):\n self.balance += amount\n self.transactions.append((\"Deposit\", amount))\n print \"Your new balance is $%d.\" % self.balance", "def deposit(self, amount):\n self.balance = self.balance + amount\n return self.balance", "def deposit(self, amount):\n self.balance += amount\n return self.balance", "def deposit(self, amount):\n self.balance += amount\n return self.balance", "def deposit(self, amount):\n self.balance += amount\n return self.balance", "def deposit(self, amount):\r\n new_balance = self['get']('balance') + amount\r\n self['set']('balance', new_balance)\r\n return self['get']('balance')", "def deposit(self, amount):\n self.__balance += amount\n return self.__balance", "def make_deposit(conn, userid, acctype, amount):\n print('\\n\\nUpdating account user:{}, type:{}, amount:{}'.format(userid, acctype, amount))\n with conn.cursor() as curs:\n res = curs.execute(\"\"\"UPDATE accounts\n SET balance=%s\n WHERE owner_id=%s AND type=%s\"\"\", (amount, userid, acctype))\n if res is not None:\n print(res)", "def deposit_money():\n print(\"\\n\")\n print(messages.account_credentials)\n u_id = pyip.inputInt(\"Your Id: \", greaterThan=0)\n password = pyip.inputPassword(\"Your Password: \")\n\n credentials = {\"id\":u_id, \"password\":password}\n result = BankOperationsBackend.deposit_money(credentials)\n start_again() if result else BankOperationsUi.deposit_money()", "def deposit(self, account_number: int, deposit: float): \n self._accounts[account_number][1] += deposit", "def deposit(self, amount):\n connection = sqlite3.connect('/home/BorneAgain/Desktop/flasktest/accounts.db')\n\n cursor = connection.cursor()\n\n if self.getBalance() + amount > 0:\n cursor.execute(\"\"\"update accounts set amount=? where name =?;\"\"\", (amount+self.getBalance(), self.name))\n cursor.execute(\"\"\"insert into history (username,dt,amount) values (?,?,?);\"\"\", (self.name, datetime.utcnow(), amount))\n else:\n \n cursor.execute(\"\"\"update accounts set amount=? where name =?;\"\"\", (0, self.name))\n\n cursor.execute(\"\"\"insert into history (username,dt,amount) values (?,?,?);\"\"\", (self.name, datetime.utcnow(), amount))\n connection.commit()", "def withdrawMoney(self, withdraw_amount):\r\n self.balance_amt = self.balance_amt - withdraw_amount", "def withdraw(account, amount):\n pass", "def withdraw(self, amount):\n self.balance -= amount\n if self.balance < 10:\n self.balance -= 5\n self.fees += 5", "def deposit(self, amount):\n self.dep = amount\n self.balance += self.dep", "def do_withdraw(self, args):\n \n amount = float(input(\"How much? \"))\n \n balance = self.cur.execute(\"SELECT * FROM balance ORDER BY date DESC\").fetchone()[2]\n if amount > balance:\n print(\"Insufficient funds! Withdrawl canceled.\")\n print(\"Use the `balance` command to check your account balance\")\n return\n \n balance -= amount\n now = time()\n self.cur.execute(\"INSERT INTO withdrawls VALUES (?,?)\", (now, amount))\n self.cur.execute(\"INSERT INTO balance VALUES (?,?,?)\", (now, 0.0, balance))\n self.db.commit()\n print(\"Withdrawl complete. Your new balance is $%.2f\" % balance)", "def deposit(self, amount):\n self.transactions += [('deposit', amount)]\n self.balance = self.balance + amount\n return self.balance", "def deposit(self, amount) -> None:\n self._balance += amount\n return None", "def withdraw(self, amount):\n self.balance -= amount", "def withdraw_money():\n print(\"\\n\")\n print(messages.account_credentials)\n u_id = pyip.inputInt(\"Your Id: \", greaterThan=0)\n password = pyip.inputPassword(\"Your Password: \")\n\n credentials = {\"id\":u_id, \"password\":password}\n result = BankOperationsBackend.withdraw_money(credentials)\n start_again() if result else BankOperationsUi.withdraw_money()", "def deposit(self, amount):\r\n self.balance = self.balance + amount\r\n amount = abs(amount)\r\n self.transactions.append(+amount)\r\n return amount", "def deposit(amt) :\r\n\tglobal bal\r\n\tbal_in = bal\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r\n\t# (bal >= 0)\r\n\t# (bal == bal_in)\r\n\tbal = bal + amt\r\n\t#PREMISES FOR ATTACHED PROOF, IF ANY: \r\n\t# (bal == (bal_old + amt))\r\n\t# (amt >= 0)\r\n\t# (bal_old >= 0)\r\n\t# (bal_old == bal_in)\r\n\t#PREMISES FOR NEXT LINE: \r\n\t# (amt >= 0)\r", "def deposit(cls, amount):\n if amount >= 0 and cls.is_logged_in():\n cls.__current_acct.__transaction(amount)\n else:\n print('deposit error')", "def update_balance(self):\n if self.calculated_balance < 0:\n raise AccountBalanceError('calculated_balance on account {} is below 0'.format(self))\n self.balance = self.calculated_balance\n self.save()", "def balance(self, balance):\n\n self._balance = balance", "def balance(self, balance):\n\n self._balance = balance", "def balance(self, balance):\n\n self._balance = balance", "def cash_deposit(name, bank_id, password):\n amount = int(raw_input(\"Enter Amount to Deposit:\"))\n for i in range(0, len(MY_MEMBER)):\n if MY_MEMBER[i].Name == name and \\\n MY_MEMBER[i].Password == password and \\\n MY_MEMBER[i].BankID == bank_id:\n old_balance = MY_MEMBER[i].balance\n MY_MEMBER[i].balance += amount\n new_balance = MY_MEMBER[i].balance\n print\"*************************\"\n print\"****Depositing Cash******\"\n print\"your Old Bank balance: %r\" % old_balance\n print\"Amount Deposited: %r\" % amount\n print\"your New Bank balance: %r\" % new_balance\n print\"*************************\"\n what_to_do(name, bank_id, password)", "async def update_account_balance():\n\n try:\n balance = App.client.get_asset_balance(asset=App.config[\"base_asset\"])\n except Exception as e:\n log.error(f\"Binance exception in 'get_asset_balance' {e}\")\n return\n\n App.base_quantity = Decimal(balance.get(\"free\", \"0.00000000\")) # BTC\n\n try:\n balance = App.client.get_asset_balance(asset=App.config[\"quote_asset\"])\n except Exception as e:\n log.error(f\"Binance exception in 'get_asset_balance' {e}\")\n return\n\n App.quote_quantity = Decimal(balance.get(\"free\", \"0.00000000\")) # USD\n\n pass", "def deposit(self, amount):\n\n print(\"\\nDeposit - {self.name}\".format(self=self))\n\n # checks for negative amount value\n if amount < 0:\n print(\"Cannot deposit £{0:.2f}\".format(amount))\n print(\"Deposit amount cannot be a negative value.\")\n \n # adds amount to account balance\n else:\n self.balance += amount\n print(\"{0} has deposited £{1:.2f}. New balance is £{2:.2f}\".format(self.name, amount, self.balance))", "def addMoney(self, deposit_amount):\r\n self.balance_amt = self.balance_amt + deposit_amount", "def deposit(self, amount, budget):\r\n if budget != \"Total Balance\":\r\n assert budget in self.budgets, \"Specified budget doesn't exist\"\r\n self.budgets[budget] += float(amount)\r\n self.balance += float(amount)", "def withdraw_money(transaction):\n conn = create_connection(database)\n\n sql = ''' UPDATE card\n SET balance = balance - ?\n WHERE number = ?'''\n\n with conn:\n cur = conn.cursor()\n cur.execute(sql, transaction)\n\n conn.commit()", "def updateAccountBalance(self):\n account = self.tdameritrade.getAccount()\n\n liquidation_value = float(\n account[\"securitiesAccount\"][\"currentBalances\"][\"liquidationValue\"])\n\n available_for_trading = float(\n account[\"securitiesAccount\"][\"currentBalances\"][\"cashAvailableForTrading\"])\n\n self.users.update_one({\"Name\": self.user[\"Name\"]}, {\"$set\": {\n f\"Accounts.{self.account_id}.Account_Balance\": liquidation_value, f\"Accounts.{self.account_id}.Available_For_Trading\": available_for_trading}})", "def withdraw(self, amount):\n if self.overdrawn:\n print('You have overdrawn, please add more money!')\n return self.balance\n self.balance = self.balance - amount\n return self.balance", "def test_withdraw_amount_view(self):\n self.account.current_balance = 100000\n self.account.save()\n\n amount = random.randint(10, 100000)\n client.force_authenticate(user=self.account.user, token=self.token)\n url = reverse('customer_withdraw')\n request = client.post(url, {'amount': amount}, format='json')\n self.account.refresh_from_db()\n self.assertEqual(100000-amount, self.account.current_balance)", "def deposit(holder):\n account = Account.query.filter_by(holder=holder).first()\n if not account:\n return jsonify({\"error\": \"Account does not exist\"})\n amount = request.json.get(\"amount\")\n account.balance += amount\n db.session.commit()\n return jsonify(\n {\n \"holder\": account.holder,\n \"balance\": account.balance,\n \"message\": \"The deposit has been processed\",\n }\n )", "def deposit(self, amount=None):\n if amount is None:\n amount = random() * 1000\n acct_info = {\"account_num\": choice(TRANSACTION_ACCT_LIST),\n \"routing_num\":\"111111111\"}\n transaction = {\"account\": json.dumps(acct_info),\n \"amount\": amount,\n \"uuid\": generate_username()}\n with self.client.post(\"/deposit\",\n data=transaction,\n catch_response=True) as response:\n if response.url is None or \"failed\" in response.url:\n response.failure(\"deposit failed\")", "def withdraw(self, amount):\n self.withdrw = amount\n \n if (self.balance-self.withdrw) < 0:\n self.balance = self.balance - 5 - self.withdrw\n self.fee += 5\n else:\n self.balance -= self.withdrw", "def withdraw(self, amount):\r\n self.balance = self.balance - amount\r\n self.transactions.append(-amount)\r\n return amount", "def set_balance(self, balance=0.0):\n self.balance = balance", "def set_balance(self, balance=0.0):\n self.balance = balance", "def withdrawMoney(self, withdraw_amount):\r\n if (self.balance_amt - withdraw_amount) > 0:\r\n self.balance_amt = self.balance_amt - withdraw_amount\r\n else:\r\n raise WithdrawError #Exception('Overdraft withdrawal Error. Cannot withdraw more than amount in account balance: {}'.format(self.balance_amt))\r", "def withdrawal(self, amount):\n if self.balance - amount < self.minimum_balance:\n print \"This would take you below your minimum balance.\"\n return\n else:\n self.balance -= amount\n print \"Please take your cash.\"\n print \"Your balance is now $%d.\" % self.balance\n self.transactions.append((\"Withdrawal\", amount))", "def withdraw(self, amount):\n self.deposit(-amount)", "async def balance(self, ctx: commands.Context, user: discord.Member = None):\r\n if user is None:\r\n user = ctx.author\r\n\r\n bal = await bank.get_balance(user)\r\n currency = await bank.get_currency_name(ctx.guild)\r\n max_bal = await bank.get_max_balance(ctx.guild)\r\n if bal > max_bal:\r\n bal = max_bal\r\n await bank.set_balance(user, bal)\r\n await ctx.send(\r\n _(\"{user}'s balance is {num} {currency}\").format(\r\n user=user.display_name, num=humanize_number(bal), currency=currency\r\n )\r\n )", "async def balance(self, ctx):\n try:\n cash = await ctx.bot.pool.fetchrow(f'select cash from wallet where id={ctx.author.id}')\n\n if cash is None:\n await ctx.bot.pool.execute(f'insert into wallet values ({ctx.author.id}, 0);')\n return await ctx.send('You do not have a wallet yet.')\n\n if cash[0] is None:\n return await ctx.send('You do not have a wallet yet.')\n\n await ctx.send(f'You have {cash[0]} robux.')\n except Exception as e:\n await ctx.send(e)", "def cash_withdrawal(amt):\r\n global withdraw_money\r\n global balance_money\r\n withdraw_money = amt\r\n print(\"Amout enetered : \", withdraw_money)\r\n balance_money = balance_money - withdraw_money\r\n print(\"Withdraw success\")", "def make_payment(self,amount):\n self._balance-=amount", "def withdraw(self, amount):\n if amount > self.balance:\n raise RuntimeError('Amount greater than available balance.')\n self.balance -= amount\n return self.balance", "def withdraw(self, amount):\n if amount > self.balance:\n raise RuntimeError('Amount greater than available balance.')\n self.balance -= amount\n return self.balance", "def deposit(self, account_id: int, amount: float) -> Dict[bool, Accounts]:\n raise Exception(\"Method should be implemented\")", "def save(self, *args, **kwargs): #pylint: disable=W0221\n created = not self.uuid\n if created:\n existing_balance = self.account.calculated_balance\n else:\n all_other_active_transactions = [x for x in self.account.transactions.all() if x != self and x.active]\n existing_balance = sum(x.amount for x in all_other_active_transactions)\n\n if not self.active:\n pass\n elif (existing_balance + self.amount) < 0:\n raise AccountBalanceError(\n 'Balance of account {} would be brought below 0'.format(self.account)\n )\n\n instance = super().save(*args, **kwargs)\n self.account.update_balance()\n return instance", "def withdraw(self, currency, amount, address):\n return self.__call__('balance', 'withdrawcurrency',\n {\"currencyname\": currency, \n \"quantity\": amount, \n \"address\": address})", "def updateAccountBalance(self):\n account = self.tdameritrade.getAccount()\n\n liquidation_value = float(\n account[\"securitiesAccount\"][\"currentBalances\"][\"liquidationValue\"])\n\n self.users.update_one({\"Name\": self.user[\"Name\"]}, {\"$set\": {\n f\"Accounts.{self.account_id}.Account_Balance\": liquidation_value}})", "def withdraw(self, amount):\n\n print(\"\\nWithdrawal - {self.name}\".format(self=self))\n\n # retrieves the available balance in the account\n availableBalance = self.getAvailableBalance()\n \n # checks for negative amount value \n if amount < 0:\n print(\"Cannot withdraw £{0:.2f}\".format(amount))\n print(\"Deposit amount cannot be a negative value.\")\n\n # checks whether amount requested is greater than the available balance\n elif amount > availableBalance:\n print(\"Cannot withdraw £{0:.2f}\".format(amount))\n print(\"Insufficient funds.\")\n\n # subtracts amount from account balance\n else:\n self.balance -= amount\n print(\"{0} has withdrew £{1:.2f}. New balance is £{2:.2f}\".format(self.name, amount, self.balance))", "def deposit(account):\r\n print(\"Your account balance is $\", format(account, \"0.2f\"), sep='')\r\n while True:\r\n try:\r\n deposit_amount = int(input(\"Enter deposit amount. $\"))\r\n break\r\n except ValueError:\r\n print(\"Error. Must be a whole number.\")\r\n account += deposit_amount\r\n print(\"Your new account balance is $\", format(account, \"0.2f\"), sep='')", "def withdraw(self, amount):\n if amount > self.balance:\n raise ValueError('insufficient funds to withdraw $%.2f' % amount)\n self.balance -= amount\n return self.balance", "def withdraw(self, amount):\n self.transactions += [('withdraw', amount)]\n if amount > self.balance:\n return 'Insufficient funds'\n self.balance = self.balance - amount\n return self.balance", "def withdrawal(cls, amount):\n if amount >= 0 and cls.is_logged_in():\n cls.__current_acct.__transaction(-amount)\n else:\n print('withdrawal error')", "def save(self, *args, **kwargs):\n wallet = self.wallet.withdraw(self.value)\n super(Payment, self).save(*args, **kwargs)", "def withdraw(self, amount):\n if amount > self.balance:\n return 'Insufficient funds'\n self.balance = self.balance - amount\n return self.balance", "def deposit(self, amount):\n message = self.account.deposit(float(amount))\n if message:\n return message\n else:\n self.myView.displayAccount()\n return \"success\"", "def account_balance(self, account_balance):\n\n self._account_balance = account_balance", "def account_balance(self, account_balance):\n\n self._account_balance = account_balance", "def Credit(self):\n self.Deposit()\n self.balance += self.amount\n print \"balance credited\"\n print \" Total balance =\",self.balance\n return self.balance", "def do_balance(self, args):\n if not self._check_args(args):\n return\n else:\n self.wallet.update_balances()\n balance = self.wallet.addresses.get(args, -1)['balance']\n if balance == -1:\n print(\"Address not found.\")\n else:\n print(balance)", "def withdraw(self, amount):\r\n balance = self['get']('balance')\r\n if amount > balance:\r\n return 'Insufficient funds'\r\n self['set']('balance', balance - amount)\r\n return self['get']('balance')", "def handle_balance_update(self, form):\n\n # Update balances of old and new accounts\n account_object: Account = form.cleaned_data.get('account', None)\n if account_object:\n if account_object == self.data_previous_account:\n \"\"\"\n Case 1: New account is same as previous account\n \"\"\"\n # Find difference between new and old balances, and deduct the difference from account\n balance_diff = form.cleaned_data.get('amount', None) - self.data_previous_amount\n account_object.balance -= balance_diff\n account_object.save()\n else:\n \"\"\"\n Case 2: New account is not the same as previous account\n \"\"\"\n # Add old amount to the previous account\n self.data_previous_account.balance += self.data_previous_amount\n self.data_previous_account.save()\n\n # Remove new amount from new account\n account_object.balance -= self.object.amount\n account_object.save()\n elif self.data_previous_account:\n \"\"\"\n Case 3:\n Previous account exists but was removed from expense; \n no account listed on submitted form\n \"\"\"\n # Add old amount to previous account\n self.data_previous_account.balance += self.data_previous_amount\n self.data_previous_account.save()", "def _balance_update(self):\n return_rate = self.df.loc[self.currentStep, \"return_Close\"]\n self.buy_amount += return_rate * self.buy_amount\n self.sell_amount -= return_rate * self.sell_amount", "def deposit(self, amount, another_user=None):\n if another_user:\n another_user.deposit(amount)\n self.register_operation(self.ACTIONS['RECEIVING'], amount)\n self.register_operation(self.ACTIONS['TRANSFERING'], amount, another_user)\n else:\n self.__balance = float(Decimal(str(self.__balance + amount)))\n self.register_operation(self.ACTIONS['RECEIVING'], amount)\n\n return True # False is never reached", "def deposit(self, cr, uid, ids, amount, context=None):\n record = self.browse(cr, uid, ids, context=context)[0]\n current_amount = record.current_amount\n deposit_amount = record.deposit_amount\n record.write({'current_amount':current_amount + amount,\n 'deposit_amount':deposit_amount + amount })\n return True", "def withdraw(self, currency, amount, address):\n pass", "def draw_money(name, bank_id, password):\n amount = int(raw_input(\"Enter Amount to withdraw:\"))\n for i in range(0, len(MY_MEMBER)):\n if MY_MEMBER[i].Name == name and \\\n MY_MEMBER[i].Password == password and \\\n MY_MEMBER[i].BankID == bank_id:\n if MY_MEMBER[i].balance >= amount:\n MY_MEMBER[i].balance -= amount\n new_balance = MY_MEMBER[i].balance\n print\"*************************\"\n print\"****Withdrawing Cash*****\"\n print\"your New Bank balance: %r\" % new_balance\n print\"Amount Withdraw: %r\" % amount\n print\"*************************\"\n\n else:\n print\"your Account Balance is low!! \"\n print\"Transaction Failed...\"\n what_to_do(name, bank_id, password)\n return\n what_to_do(name, bank_id, password)", "def deposit(self, amount, category=None, trans=None):\n\n # validates the amount is positive\n self.validate_amount(amount)\n\n #\n # creates the transaction\n if(category == None):\n category = TransactionType.objects.get(pk=TransactionTypeConstants.BonusCashDeposit.value)\n self.create(category,amount, trans)\n Logger.log(ErrorCodes.INFO, \"Bonus Cash Deposit\", self.user.username+\" deposited \"+str(amount)+\" \"+self.accountName+\" into their account.\")", "def withdraw(self, amount, budget):\r\n if budget != \"Total Balance\":\r\n assert budget in self.budgets, \"Specified budget doesn't exist\"\r\n self.budgets[budget] -= float(amount)\r\n self.balance -= float(amount)", "def bankerInvest(account, ongAmount):\n # RequireWitness(account)\n if CheckWitness(account) == False:\n # \"Check witness failed!\",\n Notify([\"BankerInvestErr\", 101])\n return False\n\n currentRound = getCurrentRound()\n\n # Require(getRoundGameStatus(currentRound) == STATUS_ON)\n if getRoundGameStatus(currentRound) != STATUS_ON:\n # Please wait for the admin to set initial investment!\n Notify([\"BankerInvestErr\", 102])\n return False\n\n # Require(_transferONG(account, ContractAddress, ongAmount))\n res = _transferONG(account, ContractAddress, ongAmount)\n if res == False:\n # Transfer ONG failed!\n Notify([\"BankerInvestErr\", 103])\n return False\n # try to update banker list\n bankersListKey = concatKey(concatKey(ROUND_PREFIX, currentRound), BANKERS_LIST_KEY)\n bankersListInfo = Get(GetContext(), bankersListKey)\n bankersList = []\n if bankersListInfo:\n bankersList = Deserialize(bankersListInfo)\n if checkInBankerList(account, bankersList):\n bankersList.append(account)\n bankersListInfo = Serialize(bankersList)\n Put(GetContext(), bankersListKey, bankersListInfo)\n else:\n bankersList.append(account)\n bankersListInfo = Serialize(bankersList)\n Put(GetContext(), bankersListKey, bankersListInfo)\n\n dividendForBankersPercentage = getDividendForBankersPercentage()\n runningVaultPercentage = getRunningVaultPercentage()\n\n # add dividend to all the bankers, 48%\n dividend = Div(Mul(ongAmount, dividendForBankersPercentage), 100)\n\n # update profit per investment for bankers\n bankersInvestment = getBankersInvestment(currentRound)\n if bankersInvestment != 0:\n profitPerInvestmentForBankersToBeAdd = Div(Mul(dividend, MagnitudeForProfitPerSth), bankersInvestment)\n Put(GetContext(), concatKey(concatKey(ROUND_PREFIX, currentRound), PROFIT_PER_INVESTMENT_FOR_BANKERS_KEY), Add(profitPerInvestmentForBankersToBeAdd, getProfitPerInvestmentForBankers(currentRound)))\n else:\n # there will be no dividend\n dividend = 0\n # add running vault, 50%\n runningVaultToBeAdd = Div(Mul(ongAmount, runningVaultPercentage), 100)\n Put(GetContext(), concatKey(concatKey(ROUND_PREFIX, currentRound), RUNNING_VAULT_KEY), Add(getRunningVault(currentRound), runningVaultToBeAdd))\n\n # add running vault balance\n Put(GetContext(), concatKey(concatKey(ROUND_PREFIX, currentRound), concatKey(BANKER_RUNING_VAULT_BALANCE_PREFIX, account)), Add(getBankerBalanceInRunVault(currentRound, account), runningVaultToBeAdd))\n # update real time running vault\n Put(GetContext(), concatKey(concatKey(ROUND_PREFIX, currentRound), REAL_TIME_RUNNING_VAULT), Add(getRealTimeRunningVault(currentRound), runningVaultToBeAdd))\n\n # treat the rest as the commission fee to admin, 2%\n restOngAmount = Sub(Sub(ongAmount, dividend), runningVaultToBeAdd)\n # update the commission fee\n Put(GetContext(), COMMISSION_KEY, Add(getCommission(), restOngAmount))\n\n # update the account (or the banker) 's dividend\n updateBankerDividend(account)\n # update account's investment\n bankerKey = concatKey(concatKey(ROUND_PREFIX, currentRound), concatKey(BANKER_INVEST_BALANCE_PREFIX, account))\n Put(GetContext(), bankerKey, Add(getBankerInvestment(currentRound, account), ongAmount))\n\n # update total bankers' investment\n Put(GetContext(), concatKey(concatKey(ROUND_PREFIX, currentRound), BANKERS_INVESTMENT_KEY), Add(bankersInvestment, ongAmount))\n\n # update total ong amount\n Put(GetContext(), TOTAL_ONG_KEY, Add(getTotalONG(), ongAmount))\n\n Notify([\"bankerInvest\", currentRound, account, ongAmount])\n\n return True", "def withdraw(self,withdrawal_money):\r\n if self.balance < withdrawal_money:\r\n print(\"Funds are insufficient\")\r\n \r\n else:\r\n self.balance -= withdrawal_money\r\n print(\"Withdrawal Accepted\")", "def update_account(row, account):\n if row['LAST_UPDATED_FROM_PAYGOV']:\n updated_at = datetime_from(row['LAST_UPDATED_FROM_PAYGOV'])\n account.donations.filter(time__lte=updated_at).delete()\n if account.category == Account.PROJECT:\n set_balances(row, account)\n account.save()", "def deposit_to_account(list_of_all_accounts_known, ID_account_to_deposit_to, money_amount_to_deposit):\n for account in list_of_all_accounts_known:\n if ID_account_to_deposit_to == account.account_id:\n account.balance += money_amount_to_deposit", "def do_balance(self,args):\n \"\"\"Can show total, available(available for trading), or reserved(reserved in open orders)\"\"\"\n \"\"\"usage: balance [available/reserved](optional)\"\"\"\n args = stripoffensive(args)\n if 'available' in args:\n btc,usd = available() \n elif 'reserved' in args:\n btc,usd = reserved()\n else:\n btc,usd = bal()\n word = args if args else \"total\"\n print 'Your %s balance is %.8f BTC and $%.2f USD ' % (word,btc,usd)\n if word == \"total\":\n last = D(bitstamp.ticker()['last'])\n print 'Account Value: $%.2f @ Last BTC Price of $%.2f' % (btc*last+usd,last)", "def transfer(self, transferee, transfer_amount):\n self.withdraw(transfer_amount)\n transferee.deposit(transfer_amount)\n return self.balance", "def do_balance(self, args):\n \n balance = self.cur.execute(\"SELECT * FROM balance ORDER BY date DESC\").fetchone()[2]\n print(\"Your current balance is $%.2f\" % balance)", "def withdraw(self, amount, trigger_transaction, trans=None):\n\n #\n # validates the amount is positive\n self.validate_amount(amount)\n\n #\n # Validate the user has the amount for the withdraw\n if not self.check_sufficient_funds(amount):\n raise OverdraftException(self.user.username)\n\n #\n # creates the transaction\n category = TransactionType.objects.get(pk=TransactionTypeConstants.BonusCashWithdraw.value)\n\n #\n # makes the amount negative because it is a withdrawal\n self.create(category, -amount, trans)\n self.transaction_detail.trigger_transaction = trigger_transaction\n self.transaction_detail.save()\n\n Logger.log(ErrorCodes.INFO,\"Bonus Cash Withdraw\", self.user.username+\" withdrew \"+str(amount)+\" \"+self.accountName+\" from their account.\")", "async def deposit(ctx, money:int):\n author = ctx.message.author\n if str(author) in settings.BOT_ADMIN:\n database.add_pokedollars(author, money)\n await ctx.send(\"funds deposited\")\n else:\n await ctx.send(\"You are not the bot admin. Go awai.\")", "def balance(self) -> float:\n\t\tbalance = 0\n\t\tfor transaction in self.transactions:\n\t\t\tsign = 1 if transaction.receiving_account == self.__number else -1\n\t\t\tbalance += sign*transaction.usd*transaction.completed\n\t\t# The bank has infinite money\n\t\tif self.name == Account.BANK:\n\t\t\tbalance = Decimal('Infinity')\n\t\treturn balance", "def set_balance(self, user, to):\n to_exec = \"UPDATE users SET balance = %s WHERE snowflake_pk = %s\"\n self.__cursor.execute(to_exec, (to, user.id,))\n self.__connection.commit()", "def OperateAccount(self, user_id, amount_money):\n user_data = self.db_manager.GetData(user_id)\n user_data = self._parsetouserDTO(user_data)\n old_balance = user_data.GetAmountMoney()\n new_balance = int(old_balance) + int(amount_money)\n if new_balance >= 0:\n user_data.SetAmountMoney(new_balance)\n self.db_manager.UpdateData(user_id, user_data.GetAmountMoney())\n return JsonSerializer.SerializeObject(user_data)\n else:\n return \"{\\\"ERROR\\\":\\\"Operation denied insufficient money\\\"}\"", "def update_db_account_balances(self, quote:str, **kwargs):\n\n\t\t# real_balance \t\t\t\t= kwargs.get('real_balance', None)\n\t\t# real_locked \t\t\t\t= kwargs.get('real_locked', None)\n\t\t# internal_balance \t\t\t= kwargs.get('internal_balance', None)\n\t\t# internal_locked \t\t\t= kwargs.get('internal_locked', None)\n\t\t# internal_profit \t\t\t= kwargs.get('internal_profit', None)\n\t\t# internal_quote_fees \t\t= kwargs.get('internal_quote_fees', None)\n\t\t# internal_BNB_fees \t\t\t= kwargs.get('internal_BNB_fees', None)\n\t\t# internal_profit_minus_fees \t= kwargs.get('internal_profit_minus_fees', None)\n\t\t# quoteAssetPrecision \t\t= kwargs.get('quoteAssetPrecision', None)\n\t\t# BNB_Precision \t\t\t\t= kwargs.get('BNB_Precision', None)\n\n\t\tconn \t\t\t = sqlite3.connect(self.name, detect_types=sqlite3.PARSE_DECLTYPES)\n\t\tconn.row_factory = sqlite3.Row\n\t\tc \t\t\t\t = conn.cursor()\n\n\t\t# # Get the current balance in the db\n\t\t# c.execute('SELECT * FROM account_balances WHERE quote = ?', (quote, ))\n\t\t# accountBalance \t \t = c.fetchone()\n\t\t# int_balance \t\t = dict(accountBalance)['internal_balance']\n\t\t# int_locked \t\t \t = dict(accountBalance)['internal_locked']\n\t\t# int_profit \t\t = dict(accountBalance)['internal_profit']\n\t\t# int_quote_fees \t \t = dict(accountBalance)['internal_quote_fees']\n\t\t# int_BNB_fees \t\t = dict(accountBalance)['internal_BNB_fees']\n\t\t# int_profit_minus_fees = dict(accountBalance)['internal_profit_minus_fees']\n\t\t#\n\t\t# values = (real_balance,\n\t\t# \t\t real_locked,\n\t\t# \t\t format(round(Decimal(int_balance) + Decimal(internal_balance), quoteAssetPrecision), 'f'),\n\t\t# \t\t format(round(Decimal(int_locked) + Decimal(internal_locked), quoteAssetPrecision), 'f'),\n\t\t# \t\t format(round(Decimal(int_profit) + Decimal(internal_profit), quoteAssetPrecision), 'f'),\n\t\t# \t\t format(round(Decimal(int_quote_fees) + Decimal(internal_quote_fees), quoteAssetPrecision), 'f'),\n\t\t# \t\t format(round(Decimal(int_BNB_fees) + Decimal(internal_BNB_fees), BNB_Precision), 'f'),\n\t\t# \t\t format(round(Decimal(int_profit_minus_fees) + Decimal(internal_profit_minus_fees), quoteAssetPrecision), 'f'),\n\t\t# \t\t quote)\n\t\t#\n\t\t# c.execute('UPDATE account_balances SET real_balance = ?,'\n\t\t# \t\t \t\t\t\t\t\t\t 'real_locked = ?,'\n\t\t# \t\t\t\t\t\t\t\t 'internal_balance = ?,'\n\t\t# \t\t\t\t\t\t\t\t 'internal_locked = ?,'\n\t\t# \t\t\t\t\t\t\t\t 'internal_profit = ?,'\n\t\t# \t\t\t\t\t\t\t\t \t 'internal_quote_fees = ?,'\n\t\t# \t\t\t\t\t\t\t\t \t 'internal_BNB_fees = ?,'\n\t\t# \t\t\t\t\t\t\t\t \t 'internal_profit_minus_fees = ?'\n\t\t# \t\t 'WHERE quote = ?', values)\n\n\t\tif 'real_balance' in kwargs:\n\t\t\tc.execute('UPDATE account_balances SET real_balance = ? WHERE quote = ?', (kwargs['real_balance'], quote))\n\n\t\tif 'real_locked' in kwargs:\n\t\t\tc.execute('UPDATE account_balances SET real_locked = ? WHERE quote = ?', (kwargs['real_locked'], quote))\n\n\t\tif 'internal_balance' in kwargs:\n\t\t\tc.execute('SELECT * FROM account_balances WHERE quote = ?', (quote, ))\n\t\t\tov = dict(c.fetchone())['internal_balance']\n\t\t\tvalues = (str(Decimal(ov) + Decimal(kwargs['internal_balance'])), quote)\n\t\t\tc.execute('UPDATE account_balances SET internal_balance = ? WHERE quote = ?', values)\n\n\t\tif 'internal_locked' in kwargs:\n\t\t\tc.execute('SELECT * FROM account_balances WHERE quote = ?', (quote, ))\n\t\t\tov = dict(c.fetchone())['internal_locked']\n\t\t\tvalues = (str(Decimal(ov) + Decimal(kwargs['internal_locked'])), quote)\n\t\t\tc.execute('UPDATE account_balances SET internal_locked = ? WHERE quote = ?', values)\n\n\t\tif 'internal_profit' in kwargs:\n\t\t\tc.execute('SELECT * FROM account_balances WHERE quote = ?', (quote, ))\n\t\t\tov = dict(c.fetchone())['internal_profit']\n\t\t\tvalues = (str(Decimal(ov) + Decimal(kwargs['internal_profit'])), quote)\n\t\t\tc.execute('UPDATE account_balances SET internal_profit = ? WHERE quote = ?', values)\n\n\t\tif 'internal_quote_fees' in kwargs:\n\t\t\tc.execute('SELECT * FROM account_balances WHERE quote = ?', (quote, ))\n\t\t\tov = dict(c.fetchone())['internal_quote_fees']\n\t\t\tvalues = (str(Decimal(ov) + Decimal(kwargs['internal_quote_fees'])), quote)\n\t\t\tc.execute('UPDATE account_balances SET internal_quote_fees = ? WHERE quote = ?', values)\n\n\t\tif 'internal_BNB_fees' in kwargs:\n\t\t\tc.execute('SELECT * FROM account_balances WHERE quote = ?', (quote, ))\n\t\t\tov = dict(c.fetchone())['internal_BNB_fees']\n\t\t\tvalues = (str(Decimal(ov) + Decimal(kwargs['internal_BNB_fees'])), quote)\n\t\t\tc.execute('UPDATE account_balances SET internal_BNB_fees = ? WHERE quote = ?', values)\n\n\t\tif 'internal_profit_minus_fees' in kwargs:\n\t\t\tc.execute('SELECT * FROM account_balances WHERE quote = ?', (quote, ))\n\t\t\tov = dict(c.fetchone())['internal_profit_minus_fees']\n\t\t\tvalues = (str(Decimal(ov) + Decimal(kwargs['internal_profit_minus_fees'])), quote)\n\t\t\tc.execute('UPDATE account_balances SET internal_profit_minus_fees = ? WHERE quote = ?', values)\n\n\t\tconn.commit()", "def update_balance(self, multiplier: int) -> int:\n self.user.balance += DEFAULT_BET * multiplier\n return self.user.balance", "def main():\n account1 = Money(87, 15)\n account2 = Money(5, 5)\n account3 = Money(99, 99)\n\n # Display each account balance\n account1.display()\n account2.display()\n account3.display()\n\n # Now add 20 cents to each\n account1.add_cents(20)\n account2.add_cents(20)\n account3.add_cents(20)\n\n # Display each account balance again\n print()\n account1.display()\n account2.display()\n account3.display()", "def withdraw(holder):\n account = Account.query.filter_by(holder=holder).first()\n amount = request.json.get(\"amount\")\n if not account:\n return jsonify({\"error\": \"Account does not exist\"})\n if account.balance >= amount:\n account.balance -= amount\n db.session.commit()\n return jsonify(\n {\n \"holder\": account.holder,\n \"balance\": account.balance,\n \"message\": \"The withdraw has been processed\",\n }\n )\n return jsonify({\"error\": \"The account balance is insufficient\"})", "def test_client_bank_account_update(self):\n pass", "def transfer_amount(self, conn, data_subtract, data_add):\n sql_subtract = 'UPDATE card SET balance = balance - ? WHERE number = ?;'\n sql_add = 'UPDATE card SET balance = balance + ? WHERE number = ?;'\n\n c = conn.cursor()\n c.execute(sql_subtract, data_subtract)\n conn.commit()\n\n c = conn.cursor()\n c.execute(sql_add, data_add)\n conn.commit()\n\n # print(f\"amount {data_add[0]} was added to account {data_add[1]}\")\n print(\"Success!\")\n self.menus()", "def make_withdraw(balance):\n\tdef withdraw(amount):\n\t\t# declare the name 'balance' nonlocal at the top of body of the function in which it is re-assigned\n\t\tnonlocal balance # nonlocal change of the value of balance will happen in the frame of make_withdraw !!!\n\t\tif amount > balance:\n\t\t\treturn 'insufficient funds'\n\t\tbalance = balance - amount # rebind balance in the first non-local frame in which it was bound previously\n\t\treturn balance \n\treturn withdraw", "def withdraw(self, user_id, money, **kwargs):\n user = User.objects(user_id=user_id).first()\n\n if money > 0:\n if user.balance >= money:\n print('Cantidad retirada: ', money)\n user.balance = float(user.balance) - float(money)\n user.save()\n else:\n print('No hay fondos suficientes para realizar el retiro.')\n else:\n print('No es posible retirar valores negativos.')" ]
[ "0.7868934", "0.7839314", "0.7839314", "0.7767801", "0.7667896", "0.75666636", "0.75089145", "0.75089145", "0.75089145", "0.74225825", "0.7420341", "0.741208", "0.7388114", "0.7388107", "0.73456854", "0.72894204", "0.7286367", "0.72752404", "0.7254404", "0.72427124", "0.7223686", "0.72235966", "0.7199819", "0.7177579", "0.71494186", "0.7086808", "0.70343417", "0.70139945", "0.7003982", "0.7003982", "0.7003982", "0.6987659", "0.6963732", "0.6945454", "0.69367754", "0.6904975", "0.6900548", "0.6891768", "0.688023", "0.68723243", "0.6859561", "0.68509495", "0.68477094", "0.6837088", "0.6802833", "0.6802833", "0.68018085", "0.6797857", "0.6779588", "0.67627114", "0.6745157", "0.67275524", "0.67225456", "0.6715101", "0.6715101", "0.66941553", "0.6689739", "0.6682109", "0.66739887", "0.66640073", "0.66571337", "0.6652804", "0.6648904", "0.6626594", "0.6607258", "0.6604577", "0.6597235", "0.65711313", "0.65711313", "0.65679806", "0.6561136", "0.654437", "0.65287167", "0.6523993", "0.65212893", "0.6493814", "0.6488537", "0.6472861", "0.6455944", "0.6451586", "0.6444104", "0.6423241", "0.64213365", "0.6419754", "0.64183545", "0.6415008", "0.64102817", "0.64100957", "0.64046335", "0.63593036", "0.6355632", "0.63308924", "0.63298374", "0.6315097", "0.6295863", "0.62931377", "0.6288206", "0.6281442", "0.6278121", "0.62761885" ]
0.6433212
81
Gets the details from an account.
def get_account(): account_id = request.json['id'] account = [account for account in accounts if account['id'] == account_id] if len(account) == 0: abort(404, 'Account not found') return json.dumps(account[0], ensure_ascii=False), 200, {'Content-Type': 'text/css; charset=utf-8'}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_account_details(self):\n pass", "def get_account_info(self):\n resp = requests.get(\n self.URL + 'info/',\n headers={'Authorization': 'Token ' + self.api_key}\n )\n\n return self.__handle_response(resp)", "def get_account_details(account_id, writer, key):\n query = iroha.query(\n \"GetAccountDetail\", account_id=account_id, writer=writer, key=key\n )\n ic.sign_query(query, user_private_key)\n response = net.send_query(query)\n data = json.loads(response.account_detail_response.detail)\n pprint(data)", "def account_info(self):\n url, params, headers = self.request(\"/account/info\", method='GET')\n\n return self.rest_client.GET(url, headers)", "def get_account(self, account):\n \n pass", "def get_account_info(self):\n resource = self.domain + \"/account\"\n self.logger.debug(\"Pulling data from {0}\".format(resource))\n response = self.session.get(resource)\n\n if response.status_code != requests.codes.ok:\n return response.raise_for_status()\n data = response.text\n root = Et.fromstring(data)\n bf = BadgerFish(dict_type=dict)\n account_info = bf.data(root)\n return account_info", "def get(self, account_id):\n self.client.get_account(account_id)", "def get_account_information(self):\n self.account_information = retry(lambda: self.client\n .futures_account_v2())\n return self.account_information", "def account_info(account):\n return {\n 'status': account.status,\n 'availability': account.availability,\n 'blurb': account.message,\n 'email': account.email,\n 'name': account.name,\n 'success': True\n }", "async def get_account_info(self, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = kwargs.pop(\"headers\", {}) or {}\n _params = case_insensitive_dict(kwargs.pop(\"params\", {}) or {})\n\n restype: Literal[\"account\"] = kwargs.pop(\"restype\", _params.pop(\"restype\", \"account\"))\n comp: Literal[\"properties\"] = kwargs.pop(\"comp\", _params.pop(\"comp\", \"properties\"))\n cls: ClsType[None] = kwargs.pop(\"cls\", None)\n\n request = build_get_account_info_request(\n url=self._config.url,\n restype=restype,\n comp=comp,\n version=self._config.version,\n template_url=self.get_account_info.metadata[\"url\"],\n headers=_headers,\n params=_params,\n )\n request = _convert_request(request)\n request.url = self._client.format_url(request.url)\n\n _stream = False\n pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n\n response_headers = {}\n response_headers[\"x-ms-client-request-id\"] = self._deserialize(\n \"str\", response.headers.get(\"x-ms-client-request-id\")\n )\n response_headers[\"x-ms-request-id\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-request-id\"))\n response_headers[\"x-ms-version\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-version\"))\n response_headers[\"Date\"] = self._deserialize(\"rfc-1123\", response.headers.get(\"Date\"))\n response_headers[\"x-ms-sku-name\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-sku-name\"))\n response_headers[\"x-ms-account-kind\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-account-kind\"))\n\n if cls:\n return cls(pipeline_response, None, response_headers)", "def display_accounts_details():\n return Credentials.display_credentials()", "def get_account():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}\".format(wallet)\n\n print(url)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def get_account(self, accountid):\n payload = {'appkey': self._lr_object._get_api_key(), 'appsecret': self._lr_object._get_api_secret(),\n 'accountid': accountid}\n url = SECURE_API_URL + \"raas/v1/account\"\n return self._lr_object._get_json(url, payload)", "async def get_account_info(\n self,\n **kwargs\n ) -> None:\n cls = kwargs.pop('cls', None) # type: ClsType[None]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n restype = \"account\"\n comp = \"properties\"\n accept = \"application/xml\"\n\n # Construct URL\n url = self.get_account_info.metadata['url'] # type: ignore\n path_format_arguments = {\n 'url': self._serialize.url(\"self._config.url\", self._config.url, 'str', skip_quote=True),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['restype'] = self._serialize.query(\"restype\", restype, 'str')\n query_parameters['comp'] = self._serialize.query(\"comp\", comp, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['x-ms-version'] = self._serialize.header(\"self._config.version\", self._config.version, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(_models.StorageError, response)\n raise HttpResponseError(response=response, model=error)\n\n response_headers = {}\n response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))\n response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))\n response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))\n response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))\n response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name'))\n response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind'))\n\n if cls:\n return cls(pipeline_response, None, response_headers)", "def accounts_info(self):\r\n param = {}\r\n param['appid'] = self.apiKey\r\n param['nonce'] = int(time.time() * 1000)\r\n param['timestamp'] = int(time.time())\r\n return self.__signed_GET('/api/v1/account/all', param, self.timeout)", "def account_info(request):\r\n user = request.user\r\n\r\n return _api_response(request, user.safe_data())", "async def get_user_account(self):\n uri = \"/fapi/v1/account\"\n ts = tools.get_cur_timestamp_ms()\n params = {\n \"timestamp\": str(ts)\n }\n success, error = await self.request(\"GET\", uri, params, auth=True)\n return success, error", "def display_accounts_details():\n return Records.display_records()", "async def get_user_account(self):\n ts = tools.get_cur_timestamp_ms()\n params = {\n \"timestamp\": str(ts)\n }\n success, error = await self.request(\"GET\", \"/api/v3/account\", params, auth=True)\n return success, error", "def get_account(self, account_id, **kwargs):\r\n\r\n if 'mask' not in kwargs:\r\n kwargs['mask'] = 'status'\r\n\r\n return self.account.getObject(id=account_id, **kwargs)", "def get_account(self, *args):\n\n account_data = api.get_account(\n *args,\n api_key=self.__creds.api_key_v2)\n\n return en.Account(creds=self.__creds, **account_data)", "def account(self, account_id: str):\n return get_from_list(self.accounts, \"id\", account_id)", "def get_account():\n\n # get user\n user = g.user\n\n # response\n return jsonify({'user_account': UserAccountAdminSchema().dump(user)}), 200", "def get_account(self):\n return self._account", "def get_account(self):\n return self._account", "def account_credential_details(self) -> Sequence['outputs.AccountCredentialDetailsResponse']:\n return pulumi.get(self, \"account_credential_details\")", "def account_credential_details(self) -> Sequence['outputs.AccountCredentialDetailsResponse']:\n return pulumi.get(self, \"account_credential_details\")", "def get_adaccount(self, account_id, fields=None, batch=False):\n path = 'act_%s' % account_id\n args = {'fields': fields} if fields else {}\n return self.make_request(path, 'GET', args, batch=batch)", "def account_get(request):\n fields = [\"email\", \"token\"]\n\n # serializes the quert string to a dict (neeto)\n args = request.args\n\n query_validation = validate_query_params(args, fields)\n # check that body validation succeeded\n if query_validation[1] != 200:\n return query_validation\n\n auth = azure_refresh_token(args[\"token\"])\n if not auth[0]:\n return http400(\"Not Authenticated\")\n\n account_db = Database(\"accounts\")\n\n try:\n response = {\n \"access_token\": auth[0],\n \"refresh_token\": auth[1],\n \"data\": account_db.get(args[\"email\"]).to_dict(),\n }\n return jsonHttp200(\"Account returned\", response)\n except:\n return http400(\"Account not found\")", "def _account_info(remote, resp):\n g.oauth_logged_in_with_remote = remote\n resource = get_resource(remote, resp)\n\n valid_roles = current_app.config.get(\n \"OAUTHCLIENT_CERN_OPENID_ALLOWED_ROLES\",\n OAUTHCLIENT_CERN_OPENID_ALLOWED_ROLES,\n )\n cern_roles = resource.get(\"cern_roles\")\n if cern_roles is None or not set(cern_roles).issubset(valid_roles):\n raise OAuthCERNRejectedAccountError(\n \"User roles {0} are not one of {1}\".format(cern_roles, valid_roles),\n remote,\n resp,\n )\n\n email = resource[\"email\"]\n external_id = resource[\"cern_upn\"]\n nice = resource[\"preferred_username\"]\n name = resource[\"name\"]\n\n return dict(\n user=dict(email=email.lower(), profile=dict(username=nice, full_name=name)),\n external_id=external_id,\n external_method=\"cern_openid\",\n active=True,\n )", "def account(request: Request) -> Dict:\n # Get account\n account_id: int = request.matchdict.get(\"account_id\")\n account_obj: Optional[Account] = get_account_by_id(\n session=request.dbsession,\n account_id=account_id,\n )\n # TODO: Check access\n\n\n return {\n \"account\": account_obj,\n }", "def get_account(self, name):\n return self._accounts[name]", "def query_account(self, account: str, fields: str = None):\n if fields and (type(fields) != str):\n raise TypeError('fields: %s' % repr(fields))\n args = {'account': account}\n if fields:\n args['fields'] = fields\n ret = self._call_txtrader_api('query_account', args)\n return ret", "def account(self):\n return self.request('/account')", "def get_account_details(self, gov_id, data_key=None):\n\n if data_key == None:\n query = self.iroha.query(\n \"GetAccountDetail\", account_id=f\"{gov_id}@afyamkononi\"\n )\n else:\n query = self.iroha.query(\n \"GetAccountDetail\", account_id=f\"{gov_id}@afyamkononi\", key=data_key\n )\n IrohaCrypto.sign_query(query, self.creator_account_details.private_key)\n\n response = self.net.send_query(query)\n return response.account_detail_response", "def GetAccount(host):\n return FetchUrlJson(host, 'accounts/self')", "def fetch_accounts(self):\n return self.fetch('/accounts')", "def get_info(self):\n\n (app_key,app_secret,access_type) = self.get_dropbox_app_keys()\n sess = session.DropboxSession(app_key, app_secret, access_type)\n sess.set_token(self.access_token_key,self.access_token_secret)\n\n db_client = client.DropboxClient(sess)\n\n #can throw ErrorResponse\n info = db_client.account_info()\n\n message = info\n\n return message", "def test_get_account(self):\n account = Account(self.client, \"support@linode.com\", {})\n\n self.assertEqual(account.email, \"support@linode.com\")\n self.assertEqual(account.state, \"PA\")\n self.assertEqual(account.city, \"Philadelphia\")\n self.assertEqual(account.phone, \"123-456-7890\")\n self.assertEqual(account.tax_id, \"\")\n self.assertEqual(account.balance, 0)\n self.assertEqual(account.company, \"Linode\")\n self.assertEqual(account.address_1, \"3rd & Arch St\")\n self.assertEqual(account.address_2, \"\")\n self.assertEqual(account.zip, \"19106\")\n self.assertEqual(account.first_name, \"Test\")\n self.assertEqual(account.last_name, \"Guy\")\n self.assertEqual(account.country, \"US\")\n self.assertIsNotNone(account.capabilities)\n self.assertIsNotNone(account.active_promotions)\n self.assertEqual(account.balance_uninvoiced, 145)\n self.assertEqual(account.billing_source, \"akamai\")\n self.assertEqual(account.euuid, \"E1AF5EEC-526F-487D-B317EBEB34C87D71\")", "def account_information(self) -> MetatraderAccountInformation:\n return self._accountInformation", "def view_bank_account_details(self) -> None:\n Menu.prompt_view_bank_account_details()\n print(\"Bank Account Details:\")\n print(self.user.account)\n\n for tx_num, tx_details in \\\n self.user.tx_manager.transaction_records.items():\n print(f\"\\nTransaction #{tx_num}:\\n\"\n f\"{tx_details}\")\n\n print(f\"\\nSpending Summary:\")\n print(f\" Starting Bank Balance: \"\n f\"{'{:.2f}'.format(self.user.account.starting_balance)}\")\n print(f\" Total Transactions Amount: \"\n f\"{'{:.2f}'.format(self.user.tx_manager.calc_total_spent())}\")\n print(f\" Closing Bank Account Balance: \"\n f\"{'{:.2f}'.format(self.user.account.current_balance)}\")", "def find_by_account(cls,account):\n for credentials in cls.credential_list:\n if credentials.account == account:\n return credentials", "def get_account(self, account_number):\n\n if not isinstance(account_number, str):\n raise ValueError('Invalid type <{}> for account number'.format(\n type(account_number)))\n\n try:\n if self.di is not None:\n result = self.di.get(account_number)\n else:\n result = self.accounts.get(account_number, None)\n\n except DBConnectionError:\n result = \"Connection error occurred. Try Again.\"\n return result", "def get_user_details(self, response):\n name = response.get(\"name\")\n return {\n \"username\": str(response.get(\"account_id\")),\n \"email\": response.get(\"email\"),\n \"fullname\": name.get(\"display_name\"),\n \"first_name\": name.get(\"given_name\"),\n \"last_name\": name.get(\"surname\"),\n }", "def account(self, acct):\n aMgr = self.acctManager\n if len(aMgr.accounts) <= acct:\n raise Exception(\"requested unknown account number %i\" % acct)\n return aMgr.account(acct)", "async def test_retrieve_account_information(self):\n account_information = {\n 'broker': 'True ECN Trading Ltd',\n 'currency': 'USD',\n 'server': 'ICMarketsSC-Demo',\n 'balance': 7319.9,\n 'equity': 7306.649913200001,\n 'margin': 184.1,\n 'freeMargin': 7120.22,\n 'leverage': 100,\n 'marginLevel': 3967.58283542\n }\n client.get_account_information = AsyncMock(return_value=account_information)\n actual = await api.get_account_information()\n assert actual == account_information\n client.get_account_information.assert_called_with('accountId')", "def get_user_info(uid):\r\n session = tables.get_session()\r\n account_name = ''\r\n description = ''\r\n if session is None:\r\n return account_name, description\r\n try:\r\n user_account = UserAccount()\r\n account_name = user_account.get_field_by_key(UserAccount.account_name, UserAccount.user_id, uid,\r\n session)\r\n description = user_account.get_field_by_key(UserAccount.description, UserAccount.user_id, uid,\r\n session)\r\n except SQLAlchemyError as err:\r\n LOGGER.error('User login failed: %s', err)\r\n return account_name, description\r\n finally:\r\n session.close()\r\n return account_name, description", "async def get_user_account(self):\n uri = \"/v3/spot/assets\"\n success, error = await self.request(\"GET\", uri, auth=True)\n return success, error", "def accounts():", "def profile(self, name=\"johndoe\"):\r\n url = \"/account/%s\" % name\r\n return self.app.get(url, follow_redirects=True)", "def account(self, account_id):\r\n return resources.Account(self, account_id)", "def get(self, account=None, user=None, account_id=None):\n self.get_object = lambda: account\n return super().get()", "def get_account_info(accountType,baseDN,user,password):\n accounts = []\n searchScope = ldap.SCOPE_SUBTREE\n try:\n l = ldap.initialize('ldap://ldap1.kps')\n l.protocol_version = ldap.VERSION3\n l.simple_bind_s(user, password)\n r = l.search_s(baseDN, searchScope, searchFilter, retrieveAttributes)\n res = ldaphelper.get_search_results(r)\n for record in res:\n cn = record.get_attr_values('cn')[0]\n cn = cn.lower()\n mail = record.get_attr_values('mail')[0]\n mail = mail.lower()\n id = record.get_attr_values('studentID')[0]\n accounts.append(id,cn,email)\n except ldap.LDAPError, e:\n print e\n return accounts", "def getaccount(self, vergeaddress):\n return self.proxy.getaccount(vergeaddress)", "def account(self, account_code):\r\n return acc.Account(self, account_code)", "def get_account():\n\n bus = session_bus()\n\n goa_manager = bus.get_object(GOA_NAME, GOA_PATH)\n\n goa_objects = goa_manager.GetManagedObjects(dbus_interface=OBJECT_MANAGER)\n\n accounts = [\n obj for obj in goa_objects\n if obj != GOA_MANAGER_PATH\n ]\n\n if len(accounts) > 1:\n sys.exit(\"More than one account found.\")\n\n (account_path,) = accounts\n\n return bus.get_object(GOA_NAME, account_path)", "def get(self, id, timeout=None):\n req = AccountGetRequest()\n\n req.id = (id)\n tries = 0\n plumbing_response = None\n while True:\n try:\n plumbing_response = self.stub.Get(\n req,\n metadata=self.parent.get_metadata('Accounts.Get', req),\n timeout=timeout)\n except Exception as e:\n if self.parent.shouldRetry(tries, e):\n tries += 1\n self.parent.jitterSleep(tries)\n continue\n raise plumbing.convert_error_to_porcelain(e) from e\n break\n\n resp = models.AccountGetResponse()\n resp.meta = plumbing.convert_get_response_metadata_to_porcelain(\n plumbing_response.meta)\n resp.account = plumbing.convert_account_to_porcelain(\n plumbing_response.account)\n resp.rate_limit = plumbing.convert_rate_limit_metadata_to_porcelain(\n plumbing_response.rate_limit)\n return resp", "def test_duo_account_get(self):\n pass", "def __fetch_user_account(self, guid):\n\n try:\n user_account = UserAccount.objects.get(guid=guid)\n except Exception as e:\n logger.exception(e)\n else:\n return user_account", "def get_account_settings():\n pass", "def get_profile_details(self):\n cursor = self.__connection.cursor()\n cursor.execute(\n \"select first_name, last_name, purchased_products from neutron_buyer where buyer_id=%s\",\n (self.__buyer_id,)\n )\n result = cursor.fetchone()\n if result:\n return result\n raise IDNotFoundException", "def get_account_summary(self):\r\n return self.get_object('GetAccountSummary', {}, SummaryMap)", "def find_credentials(cls, account):\n for credential in cls.credentials_list:\n if credential.account == account:\n return credential", "def getaccountaddress(self, account):\n return self.proxy.getaccountaddress(account)", "def get_personal_info(self):\n self.get(\"INFO\",\"GetPersonalInfo\")\n response = self.send()\n return response", "def describe_account_attributes():\n pass", "def details(self, identifier):\n return self.client.request_with_method(Methods.GET % (self.name, identifier,))", "def get_user_data(self, account, signing_account=None):\n account = Account(account, hive_instance=self.hive)\n user_data = self._conveyor_method(account, signing_account,\n \"conveyor.get_user_data\",\n [account['name']])\n if \"result\" in user_data:\n return user_data[\"result\"]\n else:\n return user_data", "def get_details(self):", "def test_access_account_info_with_token(self):\n\n print(\" --------------------------- Test 6 - Access Account Information ----------------------------\")\n\n user_id = uuid.uuid4()\n password = \"my-precious\"\n currency = \"EUR\"\n\n register_user(user_id, password, currency)\n response = login_user(user_id, password)\n\n self.assertTrue(response.json()['message']['auth_token'])\n\n auth_token = response.json()['message']['auth_token']\n headers = {'Content-Type': \"application/json\", 'Authorization': auth_token}\n\n data = \"{\\\"amount\\\" : 20.0}\"\n requests.post('http://192.168.85-208/account/amount', headers=headers, data=data)\n requests.post('http://192.168.85-208/account/amount', headers=headers, data=data)\n requests.post('http://192.168.85-208/account/amount', headers=headers, data=data)\n\n # Get the buyer account information to check if the money comes in\n response = requests.get('http://0.0.0.0:5000/account', headers=headers)\n print(json.dumps(response.json()['message'], indent=4))", "def get(self, request, pk, format=None):\n account = Account.objects.get(pk=pk)\n users = account.get_users()\n s = UserSerializer(users)\n return Response(s.data)", "def test_account_information(self):\r\n res = self.testapp.get(u'/api/v1/admin/account?api_key=' + API_KEY,\r\n status=200)\r\n\r\n # make sure we can decode the body\r\n user = json.loads(res.body)\r\n\r\n self.assertEqual(\r\n user['username'], 'admin',\r\n \"Should have a username of admin {0}\".format(user))\r\n\r\n self.assertTrue(\r\n 'password' not in user,\r\n 'Should not have a field password {0}'.format(user))\r\n self.assertTrue(\r\n '_password' not in user,\r\n 'Should not have a field password {0}'.format(user))\r\n self.assertTrue(\r\n 'api_key' not in user,\r\n 'Should not have a field password {0}'.format(user))\r\n self._check_cors_headers(res)", "def query_accounts(self):\n return self._call_txtrader_api('query_accounts', {})", "def _get_accounts_data(self, accounts, display_account,tables,where_clause,where_params):\n\n account_result = {}\n # Prepare sql query base on selected parameters from wizard\n tables, where_clause, where_params = tables,where_clause,where_params\n\n # print tables, where_clause, where_params\n # print \"tables data\",tables\n # print \"Table Type\",type(tables)\n # print \"where clause data\",where_clause\n # print \"where clause\",type(where_clause)\n # print \"where params data\",where_params\n # print \"where params\",type(where_params)\n\n tables = tables.replace('\"','')\n if not tables:\n tables = 'account_move_line'\n wheres = [\"\"]\n if where_clause.strip():\n wheres.append(where_clause.strip())\n filters = \" AND \".join(wheres)\n # compute the balance, debit and credit for the provided accounts\n request = (\"SELECT account_id AS id, SUM(debit) AS debit, SUM(credit) AS credit, (SUM(debit) - SUM(credit)) AS balance\" +\\\n \" FROM \" + tables + \" WHERE account_id IN %s \" + filters + \" GROUP BY account_id\")\n params = (tuple(accounts.ids),) + tuple(where_params)\n self.env.cr.execute(request, params)\n for row in self.env.cr.dictfetchall():\n account_result[row.pop('id')] = row\n account_res = []\n for account in accounts:\n res = dict((fn, 0.0) for fn in ['credit', 'debit', 'balance'])\n currency = account.currency_id and account.currency_id or account.company_id.currency_id\n res['code'] = account.code\n res['name'] = account.name\n if account.id in account_result.keys():\n res['debit'] = account_result[account.id].get('debit')\n res['credit'] = account_result[account.id].get('credit')\n res['balance'] = account_result[account.id].get('balance')\n if display_account == 'all':\n account_res.append(res)\n if display_account == 'not_zero' and not currency.is_zero(res['balance']):\n account_res.append(res)\n if display_account == 'movement' and (not currency.is_zero(res['debit']) or not currency.is_zero(res['credit'])):\n account_res.append(res)\n print \"data from core report model\",account_res\n return account_res", "async def metadata(self) -> AccountInformationMetaData:\n\n e = await self.request.request(url=f'https://accountinformation.roblox.com/v1/metadata', method='get')\n return AccountInformationMetaData(item=e)", "def retrieve(cls, account):\n requested_acct = None\n try:\n requested_acct = BankAccount.__acct_store[account]\n except KeyError:\n return False\n finally:\n return requested_acct", "def get_account_by_name(self, account_name):\n accounts = self.service_old.management().accounts().list().execute()\n\n account = None\n if accounts.get('items'):\n account = next(acnt for acnt in accounts.get('items') if acnt[\"name\"] == account_name)\n\n if account is None:\n log_msg = \"The account named \" + account_name + \" does not exist!\"\n print(log_msg)\n\n return account", "def account():\n\n bank_test = Bank.objects.create(name='R-Bank')\n company_test = Company.objects.create(name='Tre Belarus', country='Belarus')\n account = Account.objects.create(iban_number='TEEdddddddfs', swift_code='tertrefdsf',\n bank=bank_test, company=company_test)\n return account", "def getUserDetails(self,name):\n raise BorkedGetUserDetails", "def account_order(self, orderid):\n return self.get(f'orders/{orderid}', auth=True)", "def getreceivedbyaccount(self, account, minconf=1):\n return self.proxy.getreceivedbyaccount(account, minconf)", "def account(self) -> str:\n return self._account", "def account(self) -> str:\n return self._account", "def get_accounts(self):\n\n data = {\n 'customerId': self.personal_identity_number,\n 'responseControl': {\n 'filter': {\n 'includes': ['ALL']\n }\n }\n }\n\n headers = {'Content-type': 'application/json',\n 'Accept': 'application/json',\n 'CSRFToken': self.json_token}\n path = '/im/json/overview/getaccounts'\n req = self.session.post(\n self.BASE_URL + path,\n data=json.dumps(data),\n headers=headers)\n\n for account in req.json()['response']['accounts']:\n self.accounts[account['number']] = account\n del(self.accounts[account['number']]['number'])\n\n return self.accounts", "async def fetch_account_status(account_id):\n res_object = requests.get(_ACCOUNTS_URL.format(account_id=account_id))\n return res_object.json() if res_object.status_code == 200 else {}", "def find_credential(cls, account):\n for credential in cls.credential_list:\n if credential.account == account:\n return credential", "def pp_get_account(username=None, domain=None, server=None):\n # build Requests session\n pp = requests.Session()\n pp.auth = (udata.pp2['user'], udata.pp2['pass'])\n pp.cookies.update(get_cookies('secure1.inmotionhosting.com'))\n\n # build search query\n if username:\n stype = \"username\"\n sterm = username\n elif domain:\n stype = \"domain\"\n sterm = domain\n elif server:\n stype = \"vpsid\"\n sterm = server\n else:\n print(\"!! No valid search term specified.\")\n return None\n\n # perform search\n srez = pp.post('https://secure1.inmotionhosting.com/admin/account',\n data={'search_type': stype, 'search': sterm})\n\n # validate login\n check_pp_login(srez)\n\n # parse with BeautifulSoup/lxml\n bs = BeautifulSoup(srez.text, \"lxml\")\n\n alraw = bs.find_all('table')[2].find_all('tr')\n alraw.pop(0)\n\n uresult = []\n for tacct in alraw:\n trx = tacct.find_all('td')\n urez = {\n 'id': trx[8].a['href'].split('/')[-1].strip(),\n 'acct_id': trx[0].text.strip(),\n 'username': trx[1].text.strip(),\n 'domain': trx[2].text.strip(),\n 'status': trx[3].text.strip().lower(),\n 'icon': trx[4].img['src'],\n 'verify': trx[5].a['href'],\n 'technical': trx[6].a['href'],\n 'billing': trx[7].a['href'],\n 'notes': trx[8].a['href'],\n 'url': \"https://secure1.inmotionhosting.com\" + trx[8].a['href']\n }\n uresult.append(urez)\n\n return uresult", "def get_account(self, account_id=None, account_name=None, search=False):\n if not (account_id or account_name):\n aliases = self.get_account_aliases()\n if aliases:\n account_name = aliases[0]\n else:\n raise ValueError('get_account(). Account id, name, or alias not found')\n accounts = self.get_all_accounts(account_id=account_id, account_name=account_name,\n search=search)\n if accounts:\n if len(accounts) > 1:\n raise ValueError('get_account matched more than a single account with the '\n 'provided criteria: account_id=\"{0}\", account_name=\"{1}\". '\n 'Matched:{2}'\n .format(account_id, account_name,\n \", \".join(str(x) for x in accounts)))\n else:\n return accounts[0]\n return None", "def get_accounts(self):\n return self.accounts.all()", "def get_account(url, token, marker=None, limit=None, prefix=None,\n end_marker=None, http_conn=None, full_listing=False,\n service_token=None, headers=None, delimiter=None):\n req_headers = {'X-Auth-Token': token, 'Accept-Encoding': 'gzip'}\n if service_token:\n req_headers['X-Service-Token'] = service_token\n if headers:\n req_headers.update(headers)\n\n close_conn = False\n if not http_conn:\n http_conn = http_connection(url)\n close_conn = True\n if full_listing:\n rv = get_account(url, token, marker, limit, prefix, end_marker,\n http_conn, headers=req_headers, delimiter=delimiter)\n listing = rv[1]\n while listing:\n marker = listing[-1]['name']\n listing = get_account(url, token, marker, limit, prefix,\n end_marker, http_conn, headers=req_headers,\n delimiter=delimiter)[1]\n if listing:\n rv[1].extend(listing)\n return rv\n parsed, conn = http_conn\n qs = 'format=json'\n if marker:\n qs += '&marker=%s' % quote(marker)\n if limit:\n qs += '&limit=%d' % limit\n if prefix:\n qs += '&prefix=%s' % quote(prefix)\n if delimiter:\n qs += '&delimiter=%s' % quote(delimiter)\n if end_marker:\n qs += '&end_marker=%s' % quote(end_marker)\n full_path = '%s?%s' % (parsed.path, qs)\n method = 'GET'\n conn.request(method, full_path, '', req_headers)\n resp = conn.getresponse()\n body = resp.read()\n if close_conn:\n conn.close()\n http_log((\"%s?%s\" % (url, qs), method,), {'headers': req_headers},\n resp, body)\n\n resp_headers = resp_header_dict(resp)\n if resp.status < 200 or resp.status >= 300:\n raise ClientException.from_response(resp, 'Account GET failed', body)\n if resp.status == 204:\n return resp_headers, []\n return resp_headers, parse_api_response(resp_headers, body)", "def retrieve_user_details(self, email):\n if self.database is None:\n raise Exception(\"No database.\")\n if email is None or len(email) == 0:\n raise Exception(\"Bad parameter.\")\n return self.database.retrieve_user_details(email)", "def simple_account_data(self,accountinfo):\n\n data = {\n 'username' : accountinfo['username'],\n 'password' : accountinfo['password'],\n 'passwordconfirm' : accountinfo['password'],\n 'firstname' : accountinfo['firstname'],\n 'middlename' : accountinfo['middlename'],\n 'lastname' : accountinfo['lastname'],\n 'email' : accountinfo['email'],\n 'emailconfirm' : accountinfo['email'],\n 'usageagreement' : True,\n }\n return data", "def getAccount(self, acctNo):\n for account in self._accountList:\n if account.getAccountNumber() == acctNo:\n return account\n return None", "def loadInfo(self):\n \n response, = self.client.query( 'get_user_account_by_login', (self.name, ), version = 14 )\n result = utils.responseToDict(self.info_query_description, response)\n self.__dict__.update(result)\n\n if self.sponsor_type != 'NONE':\n self.sponsor = ListMember.create(self.client, self.sponsor_type, self.sponsor_name)\n else:\n self.sponsor = None", "def account_look_up_info(self):\n return self._account_look_up_info", "def find_credential(account):\n return Credentials.find_by_username(account)", "def account_info(args: object):\n info(\"Batch Account Name: {}\".format(args.BatchAccountName))\n info(\"Batch Account URL: {}\".format(args.BatchAccountUrl))\n info(\"Storage account: {}\".format(args.StorageAccountName))\n info(\"Reading in the list of test in the : {} file\".format(args.TestConfig))", "def account(self, sid):\r\n return accounts.Account(self, sid)", "def get_user_details(self, response):\n token = response.get('access_token')\n headers = {\"Authorization\": \"Bearer %s\" % token}\n endpoint = self.USER_INFO_URL\n response = requests.get(endpoint, headers=headers)\n return {'email': response.json()['email'] or '',\n # We'll need sub, the unique ID, for get_user_id.\n 'sub': response.json()['sub']}", "def parseAccountInfo(self, response):\n parser = make_parser()\n prid = ParseAccountInfo(self.ns_customer_ent)\n parser.setContentHandler(prid)\n parser.setFeature(handler.feature_namespaces, 1)\n parser.parse(StringIO.StringIO(response))\n rval = prid.getAccountInfo()\n self.logger.debug(\"AccountInfo: %s\", rval)\n return rval" ]
[ "0.8391992", "0.7705361", "0.76916474", "0.76766896", "0.7665401", "0.7580927", "0.7388152", "0.7283165", "0.6980997", "0.6968043", "0.6938249", "0.6916687", "0.6901691", "0.6892927", "0.6841231", "0.6769148", "0.6728201", "0.67043024", "0.6670262", "0.6635612", "0.6632791", "0.6597222", "0.6566836", "0.6525024", "0.6525024", "0.6503175", "0.6503175", "0.6498239", "0.6490761", "0.6436837", "0.63872266", "0.6351314", "0.6345126", "0.6335858", "0.631203", "0.63103867", "0.629721", "0.62845016", "0.6225429", "0.62225974", "0.61663276", "0.6161206", "0.6091572", "0.60851717", "0.6075902", "0.60701406", "0.6059054", "0.6057481", "0.60469884", "0.6036704", "0.60344124", "0.60265714", "0.6021438", "0.6012925", "0.59974796", "0.59935725", "0.59917516", "0.5969059", "0.59640574", "0.5955045", "0.5946267", "0.5938816", "0.59365577", "0.5927731", "0.58547384", "0.5817763", "0.5808682", "0.5783023", "0.57737577", "0.57565737", "0.5753192", "0.5743407", "0.57098776", "0.57075757", "0.5704679", "0.569387", "0.5692914", "0.56880516", "0.56784296", "0.5673981", "0.56593543", "0.56546664", "0.56546664", "0.56536114", "0.5652603", "0.56510246", "0.564653", "0.5644336", "0.56414306", "0.56235236", "0.5623226", "0.56162584", "0.56151545", "0.56060004", "0.55976933", "0.55858165", "0.55771434", "0.557484", "0.5568786", "0.5563604" ]
0.6153258
42
Checks the availability of the service.
def health_check(): now = datetime.datetime.now() return make_response(jsonify({'Alive': f'{now.strftime("%Y-%m-%d %H:%M")}'}), 200)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_availability(self):\n pass", "def print_service_available():\n if WithingsDataManager.service_available is not True:\n _LOGGER.info(\"Looks like the service is available again\")\n WithingsDataManager.service_available = True\n return True", "def is_available():", "def print_service_unavailable():\n if WithingsDataManager.service_available is not False:\n _LOGGER.error(\"Looks like the service is not available at the moment\")\n WithingsDataManager.service_available = False\n return True", "async def healthcheck(self):\n for service in self.services:\n await service.healthcheck()", "def check_service(self, url: str, check_wfs_member: bool = False, check_image: bool = False):\n service_status = self.check_status(url, check_wfs_member=check_wfs_member, check_image=check_image)\n if service_status.success is True:\n self.handle_service_success(service_status)\n else:\n self.handle_service_error(service_status)", "def check_service_availability(self, nodes, cmd, expected,\n succeed_nodes=1):\n def check_services():\n succeed_count = 0\n for node in nodes:\n remote = ssh.Client(node, self.usr, self.pwd,\n key_filename=self.key,\n timeout=self.timeout)\n try:\n output = remote.exec_command(cmd)\n LOG.debug(output)\n if expected in output:\n succeed_count += 1\n except Exception:\n pass\n if succeed_count == succeed_nodes:\n return True\n else:\n return False\n\n if not fuel_health.test.call_until_true(check_services, 30,\n self.timeout):\n self.fail('Failed to discover service {0} '\n 'within specified timeout'.format(expected))\n return True", "def __some_alive(self):\n for service in self.__services.values():\n if service.is_alive():\n return True\n return False", "def check_services_ready(self, services):\n for ser in services:\n services[ser] = False\n response = self.bus.wait_for_response(Message(\n 'mycroft.{}.is_ready'.format(ser)))\n if response and response.data['status']:\n services[ser] = True\n return all([services[ser] for ser in services])", "def healthy_service(self):\n return not self.service_currently_down and not self.service_recently_down", "def check_health(self):\n return defer.succeed(True)", "def check_services(self):\n for service in self.services:\n try:\n self.cloud.search_services(service)[0]\n except Exception: # pylint: disable=broad-except\n self.is_skipped = True\n break", "def ready(self):\n\n if not self.running:\n return False\n\n try:\n response = requests.get(\n 'http://{}:{}/v1/kv/health'.format(\n self.running_host,\n self.running_port\n )\n )\n except requests.ConnectionError:\n return False\n\n if response.status_code == 404:\n return True\n elif response.status_code == 500:\n return False\n else:\n return False", "def service_check(self, env):\n import params\n\n self.active_master_host = params.hawqmaster_host\n self.active_master_port = params.hawq_master_address_port\n self.checks_failed = 0\n self.total_checks = 2\n\n # Checks HAWQ cluster state\n self.check_state()\n\n # Runs check for writing and reading tables on HAWQ\n self.check_hawq()\n\n # Runs check for writing and reading external tables on HDFS using PXF, if PXF is installed\n if params.is_pxf_installed:\n self.total_checks += 1\n self.check_hawq_pxf_hdfs()\n else:\n Logger.info(\"PXF not installed. Skipping HAWQ-PXF checks...\")\n\n if self.checks_failed != 0:\n Logger.error(\"** FAILURE **: Service check failed {0} of {1} checks\".format(self.checks_failed, self.total_checks))\n sys.exit(1)\n\n Logger.info(\"Service check completed successfully\")", "def is_available(cls):\n\n try:\n proc = subprocess.Popen(\n ['systemctl', 'status', 'NetworkManager'],\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n )\n proc.communicate()\n return proc.returncode == 0\n except OSError:\n return False", "def __check(self):\n status = '200 OK'\n try:\n response = get(self.__url)\n status = '{} {}'.format(\n response.status_code,\n http.client.responses[response.status_code]\n )\n except Exception as e:\n status = e.__class__.__name__\n \n if status[:3] == '200':\n self.__notify_up()\n else:\n if not self.downtime_info:\n self.downtime_info = DowntimeInfo(status)\n self.__notify_down()", "def _IsReady(self):\n if self.ip_address is None:\n self._GetIpAddress()\n if self.ip_address is not None:\n url = 'http://%s' % (self.ip_address)\n r = requests.get(url)\n if r.status_code == 200:\n return True\n return False", "def check_availability(self):\n\t\tif not self.connection_is_usable:\n\t\t\treturn False\n\t\twith self.client_lock:\n\t\t\tif self.stream is None:\n\t\t\t\treturn False\n\t\t\tif self.last_ping is None or self.last_ping.age() >= self.ping_max_age:\n\t\t\t\tself.last_ping = SendPing(self, self.ping_timeout)\n\t\t\tlast_ping = self.last_ping\n\t\treturn last_ping.answered(self.ping_timeout)", "def is_available(self) -> bool:\n raise NotImplementedError", "def celery_available():\n try:\n res = check_celery.apply_async()\n return \"OK\" == res.get(timeout=2)\n except:\n return False", "def check(self, name):\n return self.find(name).available", "def wait_for_container():\n for i in xrange(30):\n print(\"Waiting for service to come up\")\n try:\n requests.get(URL).raise_for_status()\n return True\n except Exception as e:\n print e\n sleep(1)\n\n return False", "def is_available(**kwargs: Any) -> bool:\n try:\n _check_available()\n except Unavailable as e:\n logger.info('Database not available: %s', e)\n return False\n return True", "def is_available_while_running(cls) -> bool:\n\n return True", "def check_available():\n\n rm = current_app.config['rm_object']\n\n return rm.check_availability()", "def is_available(self) -> bool:\n raise NotImplementedError() # pragma: nocover", "def health_check(self):\n headers = {\"NDS-Proxy-Ping\": \"NPP\"}\n url = \"http://{host}:{port}/upm\".format(host=self.upm_host, port=self.upm_port)\n is_available, http_code = http_utilities.get(url, headers=headers)\n\n if http_code == 200:\n self.log.info(\"The UPM is available\")\n return True\n else:\n self.log.error(\"The UPM is not available\")\n return False", "def check_microservice(params) -> None:\n cmd = \"docker container inspect -f '{{.State.Running}}' bg_changer >/dev/null 2>&1\"\n if os.system(cmd) == 0:\n print(\"Microservice is running\")\n else:\n print(\"Microservice is NOT running\")", "def ready(self):\n\n if not self.running:\n return False\n\n try:\n response = requests.get(\n 'http://{}:{}'.format(\n self.running_host,\n self.running_port\n )\n )\n except requests.ConnectionError:\n return False\n\n if response.status_code == 404:\n return True\n elif response.status_code == 500:\n return False\n else:\n return False", "def _wait_for_management(self, ip, timeout, port=80):\n validation_url = 'http://{0}:{1}/blueprints'.format(ip, port)\n\n end = time.time() + timeout\n\n while end - time.time() >= 0:\n try:\n status = urllib.urlopen(validation_url).getcode()\n if status == 200:\n return True\n except IOError:\n time.sleep(5)\n\n return False", "def check_status(self):\n try:\n self.server.ping()\n return True\n except Exception as e:\n return False", "def is_available(self):\n raise NotImplementedError", "def check_stellar_service(self):\n _THREEFOLDFOUNDATION_TFTSTELLAR_SERVICES = {\n \"TEST\": \"https://testnet.threefold.io/threefoldfoundation/transactionfunding_service/fund_transaction\",\n \"STD\": \"https://tokenservices.threefold.io/threefoldfoundation/transactionfunding_service/fund_transaction\",\n }\n _HORIZON_NETWORKS = {\"TEST\": \"https://horizon-testnet.stellar.org\", \"STD\": \"https://horizon.stellar.org\"}\n\n services_status = True\n\n # urls of services according to identity explorer\n if \"testnet\" in j.core.identity.me.explorer_url:\n stellar_url = _HORIZON_NETWORKS[\"TEST\"]\n tokenservices_url = _THREEFOLDFOUNDATION_TFTSTELLAR_SERVICES[\"TEST\"]\n else:\n stellar_url = _HORIZON_NETWORKS[\"STD\"]\n tokenservices_url = _THREEFOLDFOUNDATION_TFTSTELLAR_SERVICES[\"STD\"]\n\n # check stellar service\n try:\n j.tools.http.get(stellar_url)\n except:\n services_status = False\n\n # check token services\n try:\n j.tools.http.get(tokenservices_url)\n except:\n services_status = False\n\n return services_status", "def check_one_request(self):\n resp = self.client.create_service(service_name=self.service_name,\n domain_list=self.domain_list,\n origin_list=self.origin_list,\n caching_list=self.caching_list,\n flavor_id=self.flavor_id)\n if 'location' in resp.headers:\n self.service_url = resp.headers['location']\n else:\n self.service_url = ''\n\n # delete the service\n self.assertTrue(resp.status_code < 503)\n\n if self.service_url != '':\n self.client.delete_service(location=self.service_url)", "def _verify_service(node, present=True):\n # Get iscsi service status\n out = CephAdm(node).ceph.orch.ls(service_type=\"iscsi\", format=\"json-pretty\")\n\n # If ISCSI was deployed\n if present:\n service_details = json.loads(out)[0]\n return (\n False\n if \"service was created\" not in (service_details[\"events\"][0])\n else True\n )\n else:\n # If ISCSI was removed, wait for a timeout to check whether its removed\n timeout, interval = 20, 2\n for w in WaitUntil(timeout=timeout, interval=interval):\n out = CephAdm(node).ceph.orch.ls(service_type=\"iscsi\", format=\"json-pretty\")\n if \"No services reported\" in out:\n return True\n if w.expired:\n log.info(\"Service iscsi is not removed after rm operation.\")\n return False", "def available(self) -> bool:\n return self._api.available", "def check_status(self):", "def checkstatus(self):\n # define cross-platform /dev/null\n devnull = open(os.devnull, 'w')\n\n # if the OS is windows\n if os.name == 'nt':\n ping = ['ping', '-n', '1', self.device]\n\n # if the OS is posix\n else:\n ping = ['ping', '-c', '1', self.device]\n\n print(self.device + ' Checking for device availability', end='', flush=True)\n time.sleep(5)\n count = 0\n while count < 2:\n print('.', end='', flush=True)\n ping_call = subprocess.Popen(ping, stdout=devnull)\n returncode = ping_call.wait()\n if returncode == 0:\n break\n time.sleep(1)\n count = count + 1\n\n print('')\n if count == 2:\n print(self.device + ' Device is not up')\n print(self.device + ' Exiting...')\n return 'FAIL'\n else:\n print(self.device + ' Device is Online')\n print(self.device + ' Please wait for script initialization')\n time.sleep(5)", "def _check_all_systems_ready(self):\n raise NotImplementedError()", "def the_service_should_be_enabled_with_no_errors(driver):\n assert wait_on_element_disappear(driver, 30, xpaths.progress.spinner)\n assert wait_for_attribute_value(driver, 20, xpaths.services.ssh_Service_Toggle, 'class', 'mat-checked')", "def sox_check_is_available(self):\n result = self._process_command('sox -h', PIPE, supress_dry_run=True)\n return result[0] == 0", "def health_check():\n printed_something = False\n\n job_checks = {}\n job_names = []\n for job in config.enabled_jobs:\n spec = nomad.parse(get_job(job.template))\n printed_something |= bool(nomad.check_events_and_logs(job.name))\n for service, checks in nomad.get_health_checks_from_spec(spec):\n if not checks:\n log.warn(f'service {service} has no health checks')\n continue\n job_checks[service] = checks\n job_names.append(job.name)\n printed_something |= nomad.wait_for_service_health_checks(consul, job_names, job_checks, nowait=True)\n\n if printed_something:\n log.error('Problems detected; see logs above.')\n sys.exit(1)\n else:\n log.info('No problems detected.')", "def available(self) -> bool:\n return True", "def available(self) -> bool:\n return True", "def is_available(self, **kwargs: Any) -> bool:\n config = get_application_config()\n status_endpoint = config.get('FILEMANAGER_STATUS_ENDPOINT', 'status')\n timeout: float = kwargs.get('timeout', 0.2)\n try:\n response = self.request('get', status_endpoint, timeout=timeout)\n return bool(response.status_code == 200)\n except Exception as e:\n logger.error('Error when calling filemanager: %s', e)\n return False\n return True", "def verify_server_availability(url, timeout=60):\n for i in range(timeout):\n try:\n assert all_services_running(), (\"Webservice(s) failed to launch:\\n\"\n + '\\n'.join(supervisor_status()))\n response = requests.get(url)\n assert response.status_code == 200, (\"Expected status 200, got\"\n f\" {response.status_code}\"\n f\" for URL {url}.\")\n response = requests.get(url + '/static/build/bundle.js')\n assert response.status_code == 200, (\"Javascript bundle not found,\"\n \" did Webpack fail?\")\n return # all checks passed\n except Exception as e:\n if i == max(range(timeout)): # last iteration\n raise ConnectionError(str(e)) from None\n time.sleep(1)", "def is_service_installed(klass, service):\n return True", "def health_ok(self):\n for client in self.clients():\n if client.run_cmd('ls'):\n log.info('Vmware cluster is up.')\n return True\n else:\n return False", "def http_checker(service):\n verify_ssl = getattr(settings, 'VERIFY_SSL', True)\n try:\n resp = requests.get(service.connection_string, verify=verify_ssl)\n if resp.status_code >= 500:\n service.update_status('Down', resp.status_code)\n else:\n service.update_status('Up', resp.status_code)\n except requests.exceptions.RequestException:\n # for an unknown reason, curl may work here, and requests fail\n # so let's try it out\n skip_ssl_flag = '-k ' if not verify_ssl else ''\n p = subprocess.Popen(\n ('curl %s %s-m 3 -I' %\n (service.connection_string, skip_ssl_flag)).split(),\n stdout=subprocess.PIPE)\n\n res = p.communicate()[0]\n if any([status in res for status in\n ('500', '501', '502', '503', '504')]):\n service.update_status('Down', res)\n else:\n service.update_status('Up', res)", "def is_ready(self, addr: int, /) -> bool:", "def DataAvailable(self) -> bool:", "def available(self) -> bool:\n return pulumi.get(self, \"available\")", "def checkService(self, serviceName, options):\n url = self._getURL(serviceName, options)\n self.log.info(\"Pinging service\", url)\n pingRes = Client().ping(url=url)\n if not pingRes['OK']:\n self.log.info('Failure pinging service: %s: %s' % (url, pingRes['Message']))\n res = self.restartInstance(int(options['PID']), serviceName, self.restartServices)\n if not res[\"OK\"]:\n return res\n elif res['OK'] and res['Value'] != NO_RESTART:\n self.accounting[serviceName][\"Treatment\"] = \"Successfully Restarted\"\n self.log.info(\"Agent %s has been successfully restarted\" % serviceName)\n self.log.info(\"Service responded OK\")\n return S_OK()", "def status_check(self):\n try:\n client = self.connect()\n client.sys.is_initialized() # make an actual network connection\n return True\n except:\n return False", "def check_services_status(system, **kwargs):\n logger = kwargs[\"logger\"]\n hosts = list(set([host.host_name for host in system.api.hosts.list()]))\n hosts_agents = dict()\n hosts_status = dict()\n services = kwargs['services']\n for host in hosts:\n # if a hostname contains localhost, we want to avoid trying to connect\n if 'localhost' in host:\n continue\n try:\n service_for_host = services[host]\n with ssh_client(host, username=\"root\", password=system.password) as ssh:\n service_status_dict = get_services_status_list(ssh)\n except KeyError:\n logger.info(\"Skipping host {} as it is not in yml.\".format(host))\n continue\n for service_name, expected_status in service_for_host.items():\n # if service_status_dict has service `service_name` get its status\n # compare it with expected_status\n try:\n logger.debug(\"service:{} status: {} expected_status: {}\"\n .format(service_name, service_status_dict[service_name], expected_status))\n service_status = (expected_status in service_status_dict[service_name])\n except KeyError:\n # This is because not all hosts may have all services installed\n logger.debug(\"Service {} not found on host {}\".format(service_name, host))\n continue\n try:\n hosts_agents[host].update({service_name: service_status})\n except KeyError:\n hosts_agents[host] = {service_name: service_status}\n hosts_status[host] = all(hosts_agents[host].values())\n overall_status = all(hosts_status.values())\n\n if overall_status: # all true, everything is running\n msg = (\"Ok: all services {} are in the desired state on all hosts\".format(services.keys()))\n logger.info(msg)\n print(msg)\n sys.exit(0)\n else:\n trouble_hosts = [host for host, status in hosts_status.iteritems() if not status]\n msg = (\"Critical: These hosts don't have all agents in the desired state: {}.\"\n \"Overall status is {} (where False means not in desired state)\"\n .format(trouble_hosts, hosts_agents))\n logger.error(msg)\n print(msg)\n sys.exit(2)", "def health_check(task_service_id):\n logger.info(f\"Checking task service status for {task_service_id}\")\n task_service = TaskService.objects.get(kf_id=task_service_id)\n task_service.refresh_from_db()\n task_service.health_check()", "def test_store_is_available(self, mock_current_session):\n mock_store = mock.MagicMock()\n mock_store.is_available.return_value = True\n mock_current_session.return_value = mock_store\n _, code, _ = controllers.service_status()\n self.assertEqual(code, status.OK)", "def is_available(self, product_url):\n\t\tpass", "def is_host_available(self, context, hostname):\n try:\n host = self._get_plugin().get_host_by_name(context, hostname)\n return host['availability'] == constants.HOST_UP\n except ext_host.HostNotFoundByName:\n # Does not exist yet\n return False", "def _is_ready(self):\n current_wait_time = 0\n start_time = time.time()\n while current_wait_time < self.max_wait_time_ready:\n try:\n response = requests.get(os.path.join(self.url, \"ready\"), timeout=1)\n if response.status_code == 200:\n break\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n except:\n current_wait_time = time.time() - start_time\n if current_wait_time >= self.max_wait_time_ready:\n raise TimeoutError(\"Interrupting execution\\n'/ready' endpoint is not ready \" +\n \"for maximum allowed {:d} seconds!\".format(self.max_wait_time_ready))", "def is_auto_update_service_installed(self, install_check_cmd):\n self.composite_logger.log_debug(\"Checking if auto update service is installed...\")\n code, out = self.env_layer.run_command_output(install_check_cmd, False, False)\n self.composite_logger.log_debug(\" - Code: \" + str(code) + \", Output: \\n|\\t\" + \"\\n|\\t\".join(out.splitlines()))\n if len(out.strip()) > 0 and code == 0:\n self.composite_logger.log_debug(\"Auto OS update service is installed on the machine\")\n return True\n else:\n self.composite_logger.log_debug(\"Auto OS update service is NOT installed on the machine\")\n return False", "def _is_ready(self):\n current_wait_time = 0\n start_time = time.time()\n while current_wait_time < self.max_wait_time_ready:\n try:\n response = requests.get(os.path.join(self.url, \"ready\"))\n if response.status_code == 200:\n break\n except KeyboardInterrupt:\n raise KeyboardInterrupt\n except:\n time.sleep(1)\n current_wait_time = time.time() - start_time\n if current_wait_time >= self.max_wait_time_ready:\n raise TimeoutError(\"Interrupting execution\\n'/ready' endpoint is not ready \" +\n \"for maximum allowed {:d} seconds!\".format(self.max_wait_time_ready))", "def available(self) -> bool:\n return self._device.is_online", "def check_available(rs):\n info = rs.get_cluster_info()\n if info['ClusterStatus'] == 'available':\n return True\n elif info['ClusterStatus'] == 'deleting':\n raise AttributeError(f'Cluster is currently deleting. Please wait and try again.\\n{info}')\n elif info['ClusterStatus'] == 'creating': \n return False\n \n raise NameError(f'Could not get cluster status information. Current information about the cluster: \\n{info}')", "def available(self):\n return True", "def available(self):\n return True", "def check_url_availability(url):\n\n response = website_alive.get_response_object(url)\n return response.status_code == requests.codes['ok']", "def detect_os_service_scan(self):\n self._scanned = True\n return self._scanner.scan(self._ips, self._ports, arguments='-A')", "def get_available(self) -> bool:\n return self._available", "def check_for_service(self, remote_node, status):\n with remote_node.client() as c:\n r = c.get(\"/node/network\")\n current_status = r.body.json()[\"service_status\"]\n current_cert = r.body.json()[\"service_certificate\"]\n\n expected_cert = open(\n os.path.join(self.common_dir, \"networkcert.pem\"), \"rb\"\n ).read()\n\n assert (\n current_cert == expected_cert[:-1].decode()\n ), \"Current service certificate did not match with networkcert.pem\"\n assert (\n current_status == status.value\n ), f\"Service status {current_status} (expected {status.value})\"", "def is_available() -> bool:\n # This function never throws and returns 0 if driver is missing or can't\n # be initialized\n return device_count() > 0", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def available(self):\n\t\t\treturn False", "def _check_all_systems_ready(self):\n self._check_all_sensors_ready()\n return True", "def _check_all_systems_ready(self):\n self._check_all_sensors_ready()\n return True", "def _check(self):\n\t\tif not self._raven:\n\t\t\traise NoDeviceFoundException", "def has_available(self):\n now = time()\n # We have cached False response\n if self.available_timestamp is not None and now < self.available_timestamp:\n return False\n\n # Get oldestTask from queue stats\n exc = None\n for _repeat in range(6):\n try:\n count = self.handle.count()\n break\n except IOError as e:\n sleep(_repeat * 2 + 1)\n exc = e\n else:\n if exc is not None:\n raise exc\n return False\n # There is at least one availabe task\n if int(count) > 0:\n return True\n # No available task, cache this response for 5 minutes\n self.available_timestamp = now + 300 # 5 minutes\n return False", "def check_active(self):\n try:\n r = requests.get('/'.join([self.base_url, self.ENDPOINT_STATUS])).status_code\n return r.status_code == 200\n except Exception as e:\n logger.warn(e)\n return False", "def available(self) -> bool:\n raise NotImplementedError", "def check(self):\n # Determine which services to test\n # TODO: use a smarter algorithm to detect which services to check\n max_lag = max(service.lag for service in self.services)\n now = datetime.utcnow()\n services = [ service\n for service in self.services\n if service.next_update_in(now) <= max_lag\n ]\n if not services:\n return 0, []\n\n period = max(service.period for service in services)\n\n # Test them\n service_states = self._check_services(services)\n\n # Report\n return int(period), service_states", "def check_api(self):\n catalog = self.service_catalog\n for service in catalog:\n if service['name'] not in self.RESOURCE_MAP:\n self.logger.notice(\"Don't know how to check service '%s'\" %\n service['name'])\n status = self.UNKNOWN\n else:\n r = self.get(service['name'],\n self.RESOURCE_MAP[service['name']])\n if not r or r.status_code < 200 or r.status_code > 299:\n status = self.FAIL\n else:\n status = self.OK\n\n yield {\n 'service': service['name'],\n 'status': status,\n 'region': service['region']\n }", "def available(self):\n\t\t\treturn True", "def available(self):\n\t\t\treturn True", "def available(self):\n\t\t\treturn True", "def reachable(self):\n service = build('gmail', 'v1', http=Http(timeout=1.0))\n url = urlparse.urlparse(service._baseUrl)\n host = url.hostname\n port = url.port\n try:\n socket.getaddrinfo(host, port, proto=socket.IPPROTO_TCP)\n except (socket.herror, socket.gaierror, URLError, OSError):\n return False\n return True", "def available(self) -> bool:\n return self._tm_client.api.available", "def available(self) -> bool:\n return self._product and self._product.online", "def available(self) -> bool:\n if self._coordinator and not self._coordinator.last_update_success:\n return False\n return self.rest.data is not None", "def is_healthy(self) -> bool:\n try:\n self.health()\n except MeiliSearchError:\n return False\n return True", "def _check_services(self, services):\n now = datetime.utcnow()\n\n # Worker\n service_states = []\n def task(service):\n # Get state, measure lag\n start = datetime.utcnow()\n state = service.get_state()\n finish = datetime.utcnow()\n\n # Update lag\n service.lag = (finish - start).total_seconds()\n\n # Add state\n service_states.append(state)\n logger.debug(u'Checked service {} (lag={}, real_period={}): last checked {} ago, state={}: {}'.format(\n service.name,\n service.lag,\n service.real_period,\n now - service.last_tested if service.last_tested else '(never)',\n state['state'], state['info']\n ))\n\n # Update timestamp\n service.last_tested = now\n\n # Run\n threads = [threading.Thread(target=task, args=(service,)) for service in services]\n for t in threads: t.start()\n for t in threads: t.join()\n # TODO: declare max waiting time. If any process doesnt manage to finish in time -- report it as a separate request\n\n return service_states", "def internet_availability(cls, **kwargs):\n if internet_connectivity_check():\n cls.response(\"The internet connection is ok\")\n return True\n else:\n cls.response(\"The internet is down for now\")\n return False", "def gotConnectionWithServices(timeout):\n\n try:\n rospy.wait_for_service('fluid/take_off', timeout=timeout)\n rospy.wait_for_service('fluid/explore', timeout=timeout)\n rospy.wait_for_service('fluid/travel', timeout=timeout)\n rospy.wait_for_service('fluid/land', timeout=timeout)\n rospy.wait_for_service('fluid/interact', timeout=timeout)\n return True\n except rospy.ROSException:\n return False", "def available(self) -> bool:\n return self._is_available", "def data_checker(xml):\n if not xml or 'response code=\"102\"' in xml:\n LOGGER.debug(\"The service 'oclc' is temporarily down!\")\n return False\n return True", "def testCheckAvailable(self):\n img = self.img\n img.inspect()\n with converter.RootMounted(img.converter._h,\n '/dev/VolGroup00/LogVol00'):\n c = img.converter\n installer = redhat.LocalInstaller(\n c._h, '/dev/VolGroup00/LogVol00',\n db.DB(['{}/conf/guestconv.db'.format(env.topdir)]),\n log.get_logger_object(test_helper.logger)\n )\n\n kernel = redhat.Package('kernel',\n version='2.6.9', release='89.EL',\n arch='i686')\n self.assertTrue(installer.check_available([kernel]))", "def iap_is_ready(url, wait_min=15):\n google_open_id_connect_token = None\n\n service_account_credentials = get_service_account_credentials(\"CLIENT_ID\")\n google_open_id_connect_token = get_google_open_id_connect_token(\n service_account_credentials)\n # Wait up to 30 minutes for IAP access test.\n num_req = 0\n end_time = datetime.datetime.now() + datetime.timedelta(\n minutes=wait_min)\n while datetime.datetime.now() < end_time:\n num_req += 1\n logging.info(\"Trying url: %s\", url)\n try:\n resp = None\n resp = requests.request(\n \"GET\",\n url,\n headers={\n \"Authorization\":\n \"Bearer {}\".format(google_open_id_connect_token)\n },\n verify=False)\n logging.info(resp.text)\n if resp.status_code == 200:\n logging.info(\"Endpoint is ready for %s!\", url)\n return True\n else:\n logging.info(\n \"%s: Endpoint not ready, request number: %s\" % (url, num_req))\n except Exception as e:\n logging.info(\"%s: Endpoint not ready, exception caught %s, request number: %s\" %\n (url, str(e), num_req))\n sleep(10)\n return False", "def enabled(name, **kwargs):\n if not available(name):\n log.error(\"Service %s not found\", name)\n return False\n\n run_file = os.path.join(SERVICE_DIR, name, \"run\")\n down_file = os.path.join(SERVICE_DIR, name, \"down\")\n\n return (\n os.path.isfile(run_file)\n and os.access(run_file, os.X_OK)\n and not os.path.isfile(down_file)\n )", "def available_on_system(cls):\n return (cls.reason_to_be_disabled() is None)", "def available(self) -> bool:\n return self._product.online", "def available(self) -> bool:\n return self._product.online" ]
[ "0.7628735", "0.74451256", "0.7004689", "0.6775977", "0.67273784", "0.66786855", "0.6657399", "0.65845925", "0.6574978", "0.65488654", "0.6541251", "0.6489893", "0.6482576", "0.64753973", "0.63969254", "0.63804036", "0.6367508", "0.6347607", "0.6273594", "0.62443465", "0.6236623", "0.62312496", "0.6224895", "0.619554", "0.6181187", "0.61492246", "0.61468035", "0.61234915", "0.6115713", "0.6110163", "0.6100044", "0.6099751", "0.60783076", "0.60677236", "0.6062633", "0.6062077", "0.60297376", "0.6025859", "0.60144", "0.60098636", "0.6003167", "0.5988978", "0.5984853", "0.5984853", "0.5981703", "0.5972444", "0.5971249", "0.594159", "0.5921096", "0.5915811", "0.59133023", "0.59018016", "0.58889306", "0.58882713", "0.58823824", "0.5861095", "0.5850491", "0.5848677", "0.58436924", "0.5842746", "0.584023", "0.5827075", "0.5821435", "0.58195364", "0.5816424", "0.5816424", "0.5805218", "0.5805093", "0.58031535", "0.5799329", "0.57904476", "0.5784115", "0.5784115", "0.5784115", "0.5782733", "0.5782733", "0.5768758", "0.5755192", "0.57533115", "0.57526475", "0.57515097", "0.5749788", "0.5738166", "0.5738166", "0.5738166", "0.5737889", "0.57353497", "0.5734457", "0.57335", "0.5732043", "0.5731553", "0.57265437", "0.57243484", "0.57150376", "0.57111865", "0.5710582", "0.57092494", "0.5706162", "0.56986976", "0.569589", "0.569589" ]
0.0
-1
Handles errors from the requests gotten from clients
def not_found(error): return make_response(jsonify({'error': 'Resource not found'}), 404)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_error(self, request_handler, client_address):\n logger.debug('handle_error(%s:%s)' % client_address)", "def handle_error(self, request, client_address):\n\t\tprint '-'*40\n\t\tprint 'Exception happened during processing of request from',\n\t\tprint client_address\n\t\timport traceback\n\t\ttraceback.print_exc() # XXX But this goes to stderr!\n\t\tprint '-'*40", "def _handle_error(self, path, reqs, headers, get=True):\n call = requests.get if get else requests.post\n resp = None\n dump = json.dumps(reqs)\n wait = self.config.start_reconnect_wait\n while resp is None:\n if wait > self.config.max_reconnect_wait:\n raise Exception(\"To many reconnect attempts\")\n time.sleep(wait)\n try:\n resp = call(path, dump, headers=headers)\n except requests.exceptions.ConnectionError:\n resp = None\n wait *= 2\n return resp", "async def handle_client_error(self, e):\n\t\terrorData = {}\n\t\terrorData['error'] = e.code\n\t\tif e.message:\n\t\t\terrorData['message'] = e.message\n\t\t\tawait self.send_json(errorData)\n\t\treturn", "def _raise_http_error(self, *args, **kwargs):", "def _raise_performing_request_error(self, *args, **kwargs):", "def manage_client(request, conn):\n try:\n response_msg = assemble_response(request)\n except RequestError as ex:\n response_msg = response_error(*ex.args)\n except IOError:\n response_msg = response_error(404, \"File Not Found\")\n except OSError:\n response_msg = response_error(404, \"File Not Found\")\n finally:\n try:\n conn.sendall(response_msg)\n except NameError:\n pass", "def on_request_error(locust_instance, exception, tb, **kwargs):", "def http_error_handler(ex, req, resp, params):\n resp.body = encode.encode({\n 'status': 1,\n 'msg': 'HTTP error: ' + ex.status\n })", "def error_handler(response, **kwargs):\n if 400 <= response.status_code <= 499:\n message = response.json()['error_description'] \\\n if 'error_description' in response.json() \\\n else response.json()['error_detail']\n raise ClientError(response, message)\n\n elif 500 <= response.status_code <= 599:\n raise ServerError(response)\n\n return response", "async def handle_client_error(self, exception: ClientError):\n error_data = {\"error\": exception.code}\n if exception.message:\n error_data[\"message\"] = exception.message\n await self.send_json(error_data)", "def handle_err(self):\n pass", "def _on_server_error(server, *_):\n exception = sys.exc_info()[1]\n if isinstance(exception, ConnectionError):\n # These are expected errors when the browser closes the connection.\n return\n # Other errors would be unexpected, so print them.\n traceback.print_exc()", "def on_request_error(self, status_code):\n log.error(\"Stream encountered HTTP error: %d\", status_code)", "def handle_request(self):\n\t\ttry:\n\t\t\trequest, client_address = self.get_request()\n\t\texcept socket.error:\n\t\t\treturn\n\t\tif self.verify_request(request, client_address):\n\t\t\ttry:\n\t\t\t\tself.process_request(request, client_address)\n\t\t\texcept:\n\t\t\t\tself.handle_error(request, client_address)\n\t\t\t\tself.close_request(request)", "def handle_error_response(resp):\n error_message = ''\n error_message_with_reason = ''\n try:\n error_message = (\n resp.json()\n .get('fireeyeapis', {})\n .get('description', '')\n .strip()\n )\n error_message = error_message.replace('\\n', '')\n if error_message:\n error_message_with_reason = f'Reason: {error_message}'\n except ValueError: # ignoring json parsing errors\n pass\n if resp.headers.get('Content-Type', '') == CONTENT_TYPE_ZIP:\n error_message = error_message_with_reason = resp.text\n\n status_code_messages = {\n 400: f\"{MESSAGES['BAD_REQUEST_ERROR']} {error_message_with_reason}\",\n 401: MESSAGES['AUTHENTICATION_ERROR'],\n 403: error_message,\n 404: error_message,\n 406: error_message,\n 407: MESSAGES['PROXY_ERROR'],\n 500: MESSAGES['INTERNAL_SERVER_ERROR'],\n 503: MESSAGES['INTERNAL_SERVER_ERROR'],\n }\n\n if resp.status_code in status_code_messages:\n demisto.debug(\n f'Response Code: {resp.status_code}, Reason: {status_code_messages[resp.status_code]}'\n )\n raise DemistoException(status_code_messages[resp.status_code])\n else:\n raise DemistoException(resp.raise_for_status())", "def _handle_requests(self):\n for request in self._requests[:]:\n self.logger.debug(\"Handling request: %r\", request)\n\n # an orphan request, client is not alive.\n if not request.server_request and not request.worker.is_alive:\n self.logger.warning(\"Client %r disconnected, request dropped\",\n request.worker.name)\n self._requests.remove(request)\n continue\n\n try:\n request_handler = self._get_request_handler(request)\n reply = request_handler(request)\n\n except _WaitingForResourceException as ex:\n self.logger.exception(str(ex))\n continue\n\n except Exception as ex:\n if isinstance(ex, ServerError):\n code = ex.ERROR_CODE\n content = ex.get_error_content()\n\n else:\n code = ServerError.ERROR_CODE\n content = str(ex)\n\n self.logger.exception(str(ex))\n reply = ErrorReply(code=code, content=content)\n\n reply.request_id = request.message.msg_id\n self._reactor.callFromThread(request.respond, reply)\n\n self._requests.remove(request)", "def error_handler(source, prod, HEADERS):\n\n try:\n req = requests.get(source, params=prod, headers=HEADERS)\n except Timeout as e:\n print(\"\\nThe website took too long to respond. Please try after sometime.\\n\")\n sys.exit(1)\n except ConnectionError as e:\n print(\"\\nYou do not have a descent internet connection. Please check your Internet Connection and try again later.\\n\")\n sys.exit(1)\n except TooManyRedirects as e:\n print(\"\\nYour request exceeded the configured number of maximum redirections. Please try after sometime.\\n\")\n sys.exit(1)\n except Exception as e:\n print(\"\\nRequest souldn't be completed. Please try after sometime.\\n\")\n sys.exit(1)\n\n return req", "def raise_on_error(request: requests.Response) -> None:\n if request.status_code >= 400:\n json_res = request.json()\n raise requests.HTTPError(json_res)\n\n return None", "def handle_error(self, request, error):\n self.log.error(\"An error occurred at request \" + repr(request) + \": \" + repr(error))", "def handle_errors(resp: requests.Response):\n error_text = resp.text\n if isinstance(resp.text, bytes):\n try:\n error_text = error_text.decode(UTF_ENCODING)\n except UnicodeDecodeError:\n error_text = error_text.decode(\"iso-8859-1\")\n if error_text != \"\":\n _raise_error(error_text)\n resp.raise_for_status()", "def handle_error(e: ODPAPIError):\n\n if e.status_code == 401:\n flash('Your session has expired. Please log in again to continue.', category='error')\n return redirect(url_for('hydra.logout'))\n\n if e.status_code == 403:\n flash('You do not have permission to access that page.', category='warning')\n return redirect(request.referrer or url_for('home.index'))\n\n if e.status_code == 503:\n flash('Service unavailable. Please try again in a few minutes.', category='error')\n return\n\n try:\n detail = e.error_detail['detail']\n if e.status_code == 422 and isinstance(detail, list):\n # duplicate validation errors are returned when multiple\n # server-side dependencies validate the same input; we\n # eliminate duplicates by packing them into a dict\n errors = {\n error['loc'][1]: error['msg']\n for error in detail\n }\n for field, msg in errors.items():\n flash(f'{field}: {msg}', category='error')\n else:\n flash(detail, category='error')\n\n except (TypeError, KeyError, IndexError):\n flash(e.error_detail, category='error')", "def send_rpc_error(req, rpcreq, e):", "def handle_connection(conn):\n\n\ttry:\n\t\treq = http_parse_req(http_read(conn))\n\t\thandlers[req.method](conn, req)\n\texcept:\n\t\ttry:\n\t\t# Ignore nested exceptions, as we dont care if the 400\n\t\t# reaches the client or not\n\t\t\thttp_400(conn, b\"Invalid request\\n\")\n\t\texcept:\n\t\t\tpass", "def api_exception_handler(e):\n ep = request.url\n log.error(\"An error occured talking to k8s while working on %s: %s\", ep, e)\n\n if e.status == 404:\n msg = \"The requested resource could not be found in the API Server\"\n else:\n msg = utils.parse_error_message(e)\n\n return api.failed_response(msg, e.status)", "def _handle_error(url, response):\n handlers = {\n http.client.NOT_FOUND: NotFoundError('Resource not found: %s' % url),\n http.client.FOUND: AlreadyExistsError(\n 'Resource already exists: %s' % url\n ),\n http.client.FAILED_DEPENDENCY: ValidationError(response),\n http.client.UNAUTHORIZED: NotAuthorizedError(response),\n http.client.BAD_REQUEST: BadRequestError(response),\n }\n\n if response.status_code in handlers:\n raise handlers[response.status_code]", "def sm_error_handler(self, errors):\n try:\n yield\n except Exception as e:\n if issubclass(e.__class__, ManagerError) or \\\n issubclass(e.__class__, ManagerFatalError) or \\\n isinstance(e, ConnectionError) or \\\n xmlrpclib.ProtocolError or \\\n xmlrpclib.Fault:\n\n errors.append(repr(e))\n elif isinstance(e, socket.error):\n errors.append(repr(e))\n errors.append(\"Please make sure the server port is open.\")\n else:\n raise e", "async def handle_api_error(ctx: Context, e: ResponseCodeError) -> None:\n if e.status == 404:\n log.debug(f\"API responded with 404 for command {ctx.command}\")\n await ctx.send(\"There does not seem to be anything matching your query.\")\n ctx.bot.stats.incr(\"errors.api_error_404\")\n elif e.status == 400:\n log.error(\n \"API responded with 400 for command %s: %r.\",\n ctx.command,\n e.response_json or e.response_text,\n )\n await ctx.send(\"According to the API, your request is malformed.\")\n ctx.bot.stats.incr(\"errors.api_error_400\")\n elif 500 <= e.status < 600:\n log.warning(f\"API responded with {e.status} for command {ctx.command}\")\n await ctx.send(\"Sorry, there seems to be an internal issue with the API.\")\n ctx.bot.stats.incr(\"errors.api_internal_server_error\")\n else:\n log.warning(f\"Unexpected API response for command {ctx.command}: {e.status}\")\n await ctx.send(f\"Got an unexpected status code from the API (`{e.status}`).\")\n ctx.bot.stats.incr(f\"errors.api_error_{e.status}\")", "def handle_api_error(self, response):\n code = response.status_code\n self.__log(f'Handling API error with status code {code}.', 'error')\n if code == 401:\n self.__log(f'Invalid credentials. Please make sure your token is correct.', 'error')\n raise InvalidCredentialsError\n if code == 404:\n self.__log(f'File not found on query. Make sure query URL is correct and retry.', 'error')\n raise FileNotFoundError\n if code == 422:\n content = json.loads(response.content)\n for error in content['errors']:\n self.__log(f'API could not process the request. Message: {error[\"message\"]}.', 'error')\n raise UnprocessableRequestError(f'Issue with field {error[\"field\"]}: {error[\"message\"]}')\n if code == 429:\n self.__log(f'Monthly request limits exceeded. Upgrade billing or change token.', 'error')\n raise MonthlyRequestLimitExceededError\n self.__log(f'Response for code: \"{code}\" was unhandled by wrapper. Sorry to not be more helpful.', 'error')\n raise UnknownApiError(\"An unhandled API exception occurred\")", "def handle_error(error):\n if isinstance(error, ClientError):\n message = {\"message\": \"Error - Unexpected \" + error.response.get(\"Error\").get(\"Code\")}\n return generate_http_response(message), 500\n if isinstance(error, MissingParameterException):\n return generate_http_response(error.response), 400\n message = {\"message\": \"Error: Unexpected error\"}\n return generate_http_response(message), 500", "async def handle_request(self, api_endpoint, api_version):\n code = 400\n data = self.get_post_data()\n request_method = self.request.method.lower()\n if data:\n try:\n # will call process_get or process_post methods for the given API\n res = await getattr(api_endpoint, 'process_' + request_method)(api_version, data)\n code = 200\n except ValidationError as validerr:\n if validerr.absolute_path:\n res = '%s : %s' % (\n validerr.absolute_path.pop(), validerr.message)\n else:\n res = '%s' % validerr.message\n LOGGER.error('ValidationError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except ValueError as valuerr:\n res = str(valuerr)\n LOGGER.error('ValueError: %s', res)\n raise tornado.web.HTTPError(reason=res)\n except DatabaseError as dberr:\n err_id = dberr.__hash__()\n res = str(dberr.reason)\n LOGGER.error(res)\n LOGGER.info(\"Input data for <%s>: %s\", err_id, data)\n raise dberr\n except Exception as err: # pylint: disable=broad-except\n err_id = err.__hash__()\n res = 'Internal server error <%s>:' \\\n 'please include this error id in bug report.' % err_id\n code = 500\n LOGGER.exception(res)\n LOGGER.info(\"Input data for <%s>: %s\", err_id, data)\n raise tornado.web.HTTPError(reason=res)\n else:\n res = 'Error: malformed input JSON.'\n LOGGER.error(res)\n raise tornado.web.HTTPError(reason=res)\n\n # raise tornado.web.HTTPError(status_code=444, reason='error happened')\n self.set_status(code)\n self.write(res)", "def on_error(self, status_code, data):\n\t\tprint(\"error_code: \",status_code)", "def handle_error(self, err): # pragma: no cover\n # log every exception raised in the application\n print('we ended up in the API handle_error()', err, err.__class__)\n\n # catch other HTTP errors\n if isinstance(err, HTTPException):\n original = getattr(err, \"original_exception\", None)\n return jsonify({\n 'success': False,\n 'error': err.code,\n \"message\": getattr(err.error, 'message')\n }), err.code\n\n # if 'message' attribute isn't set, assume it's a core Python exception\n if not getattr(err, 'message', None):\n original = getattr(err, \"original_exception\", None)\n return jsonify({\n 'message': 'Server has encountered an unknown error'\n }), 500\n\n # Handle application-specific custom exceptions\n return jsonify(**err.kwargs), err.http_status_code", "def httperror( status_code=500, message=b'' ):", "def exception_handler(res):\n try:\n res_data = res.json()\n error_code = res_data['status']\n error_msg = build_error_msg(res_data['errors'])\n exception = DemistoException(ERROR_TITLES.get(error_code, '') + error_msg)\n\n except Exception:\n exception = DemistoException(f'Error in API call [{res.status_code}] - {res.reason}')\n\n raise exception", "def error(self, flow: mitmproxy.http.HTTPFlow):", "def handle_request_parsing_error(err):\n\n code, msg = getattr(err, 'status_code', 400), getattr(err, 'messages', 'Invalid Request')\n abort(code, msg)", "def catch_backend_errors(handler, registry):\n def catch_backend_errors_tween(request):\n try:\n return handler(request)\n except BackendError as err:\n logger = get_logger(request)\n err_info = str(err)\n err_trace = traceback.format_exc()\n try:\n extra_info = \"user: %s\" % (request.user,)\n except Exception:\n extra_info = \"user: -\"\n error_log = \"%s\\n%s\\n%s\" % (err_info, err_trace, extra_info)\n hash = create_hash(error_log)\n logger.error(hash)\n logger.error(error_log)\n msg = json.dumps(\"application error: crash id %s\" % hash)\n if err.retry_after is not None:\n if err.retry_after == 0:\n retry_after = None\n else:\n retry_after = err.retry_after\n else:\n settings = request.registry.settings\n retry_after = settings.get(\"mozsvc.retry_after\", 1800)\n\n return HTTPServiceUnavailable(body=msg, retry_after=retry_after,\n content_type=\"application/json\")\n\n return catch_backend_errors_tween", "def loadErrors(app):\n\n @app.error(400)\n def error400(ex):\n \"\"\"\n Bad Request\n \"\"\"\n bottle.response.set_header('content-type', 'application/json')\n return json.dumps(dict(error=ex.body))\n\n @app.error(401)\n def error401(ex):\n \"\"\"\n Unauthorized\n \"\"\"\n bottle.response.set_header('content-type', 'application/json')\n return json.dumps(dict(error=ex.body))\n\n @app.error(404)\n def error404(ex):\n \"\"\"\n Not Found\n \"\"\"\n bottle.response.set_header('content-type', 'application/json')\n return json.dumps(dict(error=ex.body))\n\n @app.error(405)\n def error405(ex):\n \"\"\"\n Method Not Allowed\n \"\"\"\n bottle.response.set_header('content-type', 'application/json')\n return json.dumps(dict(error=ex.body))\n\n @app.error(409)\n def error409(ex):\n \"\"\"\n Conflict\n \"\"\"\n bottle.response.set_header('content-type', 'application/json')\n return json.dumps(dict(error=ex.body))\n\n @app.error(503)\n def error503(ex):\n \"\"\"\n Service Unavailable\n \"\"\"\n bottle.response.set_header('content-type', 'application/json')\n return json.dumps(dict(error=ex.body))", "def dispatch_request(self, *args, **kwargs):\n try:\n return super().dispatch_request(*args, **kwargs)\n except HTTPException as e:\n logger.error(\"HTTP Error on APIResource %s\", e, exc_info=1)\n return return_response({\n \"code\": e.code,\n \"message\": e.description\n }, e.code)\n except BaseException as e:\n logger.error(\"Error occurred in APIResource %s\", e, exc_info=1)\n return return_response({\n \"code\": 500,\n \"message\": str(e)\n }, 500)", "def init_error_responses(app):\n\tfrom werkzeug.exceptions import default_exceptions\n\tfrom werkzeug.exceptions import HTTPException\n\tmake_json_error = lambda ex: json_response(dict(description=str(ex)), ex.code)\n\tfor code in default_exceptions.iterkeys():\n\t\tif code != 500: app.errorhandler(code)(make_json_error)\n\t# Use HTTP Basic auth (json object in password field)\n\tapp.errorhandler(401)(lambda ex: json_response(\n\t\tdict(description='Authenticate with HTTP Basic json:{auth object}'), 401,\n\t\t#headers={'WWW-Authenticate': 'Basic realm=\"JSON auth required\"'}\n\t))", "def handle_request(fun):\n\n def wrapper(self, *args, **kwargs):\n \"\"\"\n We raise an exception when\n the code on the client side fails\n Server side errors are taken care of\n through response codes\n \"\"\"\n try:\n return fun(self, *args, **kwargs)\n except Exception as req_exception:\n self.logger.exception(\"internal error\")\n raise ClientSideError(str(req_exception))\n\n return wrapper", "def _request(request_handler, *args):\n try:\n method(request_handler, *args)\n except Error, error:\n response_body = {\n 'error': {\n 'status': error.code,\n 'message': error.message\n }\n }\n request_handler.response.clear()\n request_handler.response.set_status(error.code)\n util.write_response(request_handler, response_body)", "def _processGETErr(self, e, request):\r\n if e.check(InvalidRequest):\r\n msg = e.getErrorMessage()\r\n code = httplib.BAD_REQUEST\r\n elif e.check(UnauthorizedLogin):\r\n msg = e.getErrorMessage()\r\n code = httplib.UNAUTHORIZED\r\n elif e.check(InternalError):\r\n e.printTraceback()\r\n msg = 'Internal Error'\r\n code = httplib.INTERNAL_SERVER_ERROR\r\n else:\r\n e.printTraceback()\r\n msg = 'Fatal Error'\r\n code = httplib.INTERNAL_SERVER_ERROR\r\n\r\n self._render_GET(request, code, 'text/plain; charset=utf-8', msg)", "def bad_request():\n return HttpError(400)", "def _handle_api_error(ex):\n if request.path.startswith('/api/'):\n message, detail = str(ex).split(\": \")\n return jsonify(message=message, detail=detail), ex.code\n else:\n return ex", "def handle_exception(error):\n return make_response(jsonify({'message': error.description}), 400)", "def error_handler(msg):\n print(\"Server Error: %s\" % msg)", "def handle_api_error(e):\n return f\"Failed to call Giphy API: {e}\", 500", "def do(self, *args, **kwargs):\n try:\n return super().do(*args, **kwargs)\n except ResponseError as e:\n self.handle_error(e)", "def _handle_error(cls, e, request):\r\n if e.check(InvalidRequest):\r\n msg = e.getErrorMessage()\r\n code = httpstatus.HTTP_STATUS_CODE_BAD_REQUEST[0]\r\n else:\r\n e.printTraceback()\r\n msg = 'Fatal Error'\r\n code = httpstatus.HTTP_STATUS_CODE_INTERNAL_SERVER_ERROR[0]\r\n\r\n cls._render(request, code, 'text/plain; charset=utf-8', msg)", "def raise_on_error(self):\n if not self._status.success:\n cls = UrlApi.InfraHTTPError if self._infra_step else UrlApi.HTTPError\n raise cls('HTTP status (%d)' % (self.status_code,), self)", "def test_call_httperror(self):\n\n with Client('username', 'password') as client:\n self.setSessionResponse(500)\n with self.assertRaises(APIError):\n data = client.call(**self.build_parameters)", "def root_simple_error_handler(exc, *args, app_name=''):\n\n #print('args',args)\n check_exception = 0\n for each_args in args:\n #print('each_args',each_args['view'].__module__)\n if each_args['view'].__module__ == 'hrms.views' or each_args['view'].__module__ == 'pms.views':\n #print('ok')\n check_exception = 1\n if isinstance(exc,ValidationError):\n print('ValidationError',exc)\n print('ValidationError',exc.get_codes())\n #n = dict(exc.detail)\n headers = {}\n if check_exception == 1:\n return Response({'error': exc.detail},status=exc.status_code,headers=headers)\n else:\n return Response(exc.detail,status=exc.status_code,headers=headers)\n\n elif isinstance(exc, exceptions.APIException):\n print('APIException',exc.get_full_details())\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['X-Throttle-Wait-Seconds'] = '%d' % exc.wait\n print('exc.detail',exc.detail)\n if check_exception == 1:\n return Response({'error': exc.detail},status=exc.status_code,headers=headers)\n else:\n return Response(exc.detail,status=exc.status_code,headers=headers)\n\n elif isinstance(exc, Http404):\n print('Http404')\n if check_exception == 1:\n return Response({'error': 'Not found'},status=status.HTTP_404_NOT_FOUND)\n else:\n return Response('Not found',status=status.HTTP_404_NOT_FOUND)\n\n elif isinstance(exc, PermissionDenied):\n print('PermissionDenied')\n if check_exception == 1:\n return Response({'error': 'Permission denied'},\n status=status.HTTP_403_FORBIDDEN)\n else:\n return Response('Permission denied',status=status.HTTP_403_FORBIDDEN)\n\n # Note: Unhandled exceptions will raise a 500 error.\n return None", "async def _handle_exception(self, e, sock):\n if isinstance(e, (RemoteProtocolError, AssertionError)):\n await sock.aclose()\n raise BadHttpResponse(\"Invalid HTTP response from server.\") from e\n\n if isinstance(e, Exception):\n await sock.aclose()\n raise e", "def test_server_error(self):\n self._error_test(fitbit_exceptions.HTTPServerError)", "def error(self, handler):\n pass", "def handle_error(self):\n # perhaps do some nifty stuff here to\n # mark bad workers, try to restart, etc.\n msg = ''\n Nworkers = len(self.workers)\n Nsend_errors = len(self.had_send_error)\n Nsend_error_types = len(self.send_exc.keys())\n Nrecv_errors = len(self.had_recv_error)\n Nrecv_error_types = len(self.recv_exc.keys())\n if (Nsend_errors == Nworkers and\n Nsend_error_types == 1):\n sock_err_type,err_msg = self.send_exc.keys()[0]\n if sock_err_type == 111:\n # An attempt at helpful info for a common problem.\n msg = '\\n\\nConnection refused on all workers.\\n'\n msg = msg + ' Perhaps restarting the cluster would help.\\n'\n msg = msg + ' Use Your_Clusters_Name_Here.restart()'\n else:\n msg = 'A Socket error occured sending to all workers.\\n\\t'\n msg = msg + str(sock_err_type) + ': ' + str(err_msg)\n elif Nsend_errors:\n msg = '\\n\\nThe following errors occured when sending data:\\n\\t'\n for err,guilty_workers in self.send_exc.items():\n msg = msg + str(err) + '\\n\\t'\n msg = msg + 'Guilty workers: ' + str(guilty_workers) + '\\n'\n\n if (Nrecv_errors == Nworkers and\n Nrecv_error_types == 1):\n err,dummy = self.recv_exc.items()[0]\n err_type, err_msg, err_traceback = err\n msg = '\\n\\nThe same error occured on all workers:\\n\\t'\n msg = msg + str(err_type) + ': ' + str(err_msg)\n msg = msg + err_traceback\n elif Nrecv_errors:\n msg = '\\n\\nThe following errors occured on workers:\\n\\t'\n for err,guilty_workers in self.recv_exc.items():\n err_type, err_msg, err_traceback = err\n msg = msg + str(err_type) + ': ' + str(err_msg) + '\\n'\n msg = msg + 'Guilty workers: ' + str(guilty_workers) + '\\n'\n msg = msg + err_traceback\n\n\n raise ClusterError, msg", "def request_failed(self, ignored):\n self._errors += 1", "def error_handler(msg):\n print \"Server Error: %s\" % msg", "def error_handler(msg):\n print \"Server Error: %s\" % msg", "def handle_connection_lost(self, exc: Optional[Exception]) -> None:", "def on_error(self, status_code, data):\n if status_code == 420:\n self.disconnect()", "def _check_response_for_request_errors(self):\r\n if self.response.HighestSeverity == \"ERROR\":\r\n for notification in self.response.Notifications:\r\n if notification.Severity == \"ERROR\":\r\n raise FedexError(notification.Code,\r\n notification.Message)", "def handle_expt(self):\r\n self._perform_on_error_handling()", "def process_exception(self, request, exception):\n logging.error(\"ERROR\")\n logging.error(traceback.format_exc())\n response = set_response(\"Internal server error\", False, 500, {})\n return JsonResponse(response, status=response[\"http_code\"])", "def on_response_validation_error(err):\n return jsonify(message='Bad response'), 500", "def handling_unknown_err(e):\n app.logger.exception(e)\n return resp_json(BaseResp.err(e.name))", "def request_handler(self, client_connection):\n request = client_connection.recv(1024)\n\n #Make sure we recieved some data before proceeding\n if not request:\n response = 'Empty request'\n http_code = 400\n else:\n response, http_code = self.parse_request(request)\n\n #print response\n self.send_response(client_connection, response, http_code)", "def test_api_request_can_handle_errors(self):\n # wrong repo name to test error handling.\n payload = request_github_issues('razat249', 'wrong_repo')\n self.assertEqual(payload['error'], True)\n self.assertGreaterEqual(payload['status_code'], 400)", "def handle_500_error(_error):\n return make_response(jsonify(SERVER_ERROR), 500)", "def handle_error(self, error):\n html = error.response.content\n raise SystemExit(\"API Error:\\n %s\" %\n \"\\n \".join(html.itertext()))", "def handle_bui_server_exception(error):\n bui.logger.error(error)\n return {\"message\": error.description}, error.code", "def _raise_if_error(response):\n if response.status_code != 200:\n raise SimpleHTTPException(response)", "def finish_failed_request(self):\n # build new response to be safe\n request = get_request()\n original_response = request.response\n request.response = HTTPResponse()\n #self.log(\"caught an error (%s), reporting it.\" %\n # sys.exc_info()[1])\n\n (exc_type, exc_value, tb) = sys.exc_info()\n error_summary = traceback.format_exception_only(exc_type, exc_value)\n error_summary = error_summary[0][0:-1] # de-listify and strip newline\n\n plain_error_msg = self._generate_plaintext_error(request,\n original_response,\n exc_type, exc_value,\n tb)\n\n if not self.config.display_exceptions:\n # DISPLAY_EXCEPTIONS is false, so return the most\n # secure (and cryptic) page.\n request.response.set_header(\"Content-Type\", \"text/html\")\n user_error_msg = self._generate_internal_error(request)\n elif self.config.display_exceptions == 'html':\n # Generate a spiffy HTML display using cgitb\n request.response.set_header(\"Content-Type\", \"text/html\")\n user_error_msg = self._generate_cgitb_error(request,\n original_response,\n exc_type, exc_value,\n tb)\n else:\n # Generate a plaintext page containing the traceback\n request.response.set_header(\"Content-Type\", \"text/plain\")\n user_error_msg = plain_error_msg\n\n self.logger.log_internal_error(error_summary, plain_error_msg)\n request.response.set_status(500)\n self.session_manager.finish_failed_request()\n return user_error_msg", "def _listen_to_requests(self):\n while True:\n try:\n request = self._client.recv(1024)\n except socket.error as err:\n if DEBUG_LEVEL >= 1:\n print \"Got socket error: {}\".format(err.message)\n self._client.close()\n return True\n\n if not request:\n if DEBUG_LEVEL >= 0:\n print \"Closing connection\"\n self._client.close()\n return True\n\n if DEBUG_LEVEL >= 2:\n print request\n\n if not HTTPValidation.validate_request(request):\n if DEBUG_LEVEL >= 0:\n print \"Invalid request, closing...\"\n self._client.send(public_response_functions.get_error_response())\n self._client.close()\n return True\n\n if not self._send_response(request):\n if DEBUG_LEVEL >= 0:\n print \"Closing connection...\"\n self._client.close()\n return", "def handle_api_exception(error):\n\n logger.debug(error)\n logger.debug(request.path)\n logger.debug(urllib.quote_plus(request.path))\n flash(error.message + \" (\" + str(error.status_code) + \")\", \"danger\")\n if error.status_code == 401:\n session.clear()\n return redirect(url_for('login') + \"?next=\" + urllib.quote_plus(request.path))\n return redirect(url_for('landing'))", "def handle_error(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def on_request_validation_error(err):\n print(err)\n return jsonify(message='Bad request'), 400", "def handle_discovery_errors(fn):\n @functools.wraps(fn)\n def wrapped(*args, **kwargs):\n try:\n return fn(*args, **kwargs)\n except (ValueError, requests.RequestException) as e:\n logger.warning('', exc_info=True)\n return redirect('/?' + urllib.parse.urlencode({'failure': str(e)}))\n\n return wrapped", "def api_error_handler(ex):\n try:\n status_code = ex.code\n except AttributeError:\n status_code = 500\n if flask.request.path.startswith('/api/'):\n app.logger.error(str(ex))\n if app.config.get('DEBUG', False):\n resp = flask.jsonify(message=str(ex))\n else:\n resp = flask.jsonify(message='Internal Server Error')\n resp.status_code = status_code\n return resp\n return flask.make_response(\n flask.render_template(\n 'error.html', exc=ex,\n title=error_titles.get(status_code, 'Error')),\n status_code)", "def error_received(self, exc):\n print('Error received:', exc)", "def _check_response_for_request_errors(self):\r\n if self.response.HighestSeverity == \"ERROR\":\r\n for notification in self.response.Notifications:\r\n if notification.Severity == \"ERROR\":\r\n if \"Invalid tracking number\" in notification.Message:\r\n raise FedexInvalidTrackingNumber(notification.Code,\r\n notification.Message)\r\n else:\r\n raise FedexError(notification.Code,\r\n notification.Message)", "def error(msg=\"Invalid query\", code=400):\n\tjson = {'error': msg}\n\t#return jsonify(json), code\n\tabort(make_response(jsonify(json), code))", "def test_request_failed(self, kasserver, kasapi):\n kasapi.side_effect = zeep.exceptions.Fault(\"failed\")\n with pytest.raises(zeep.exceptions.Fault):\n kasserver._request(self.REQUEST_TYPE, self.REQUEST_PARAMS)", "def error(logger_id, data):\n if data.status_code == 200:\n log.error(logger_id, _(\"CC1 - Problem with request: \") + data.url\n + _(\" obtain problem: \") + ast.literal_eval(data.text).get(DATA))\n else:\n log.error(logger_id, _(\"CC1 - Problem with request: \") + data.url)", "def on_failed(self, status_code: int, request: Request):\n self.update_rate_limit(request)\n\n data = request.response.json()\n error = data[\"error\"]\n msg = f\"请求失败,状态码:{status_code},类型:{error['name']}, 信息:{error['message']}\"\n self.gateway.write_log(msg)", "def handle_exception(e):\n print(e)\n return error()", "def server_error(e):\n return 'Error while serving request', 500", "def recvError(self, errorMessage):\n LOG_ERROR(\"Client.recvError: \" + errorMessage, \"EDEN\")\n # default implementation: disconnect from server\n self.disconnectFromServer()", "def handle_request_parsing_error(err, req, schema):\n abort(422, errors=err.messages)", "def process_error_response(self, resources, resource, api, operation,\n error_response, context):\n pass", "def on_errors(self, errors):\n log.error(\"Received errors: %s\", errors)", "def handle_error(self, p_ctx, others, error, start_response):\n\n if p_ctx.transport.resp_code is None:\n p_ctx.transport.resp_code = \\\n p_ctx.out_protocol.fault_to_http_response_code(error)\n\n self.get_out_string(p_ctx)\n p_ctx.out_string = [b''.join(p_ctx.out_string)]\n\n p_ctx.transport.resp_headers['Content-Length'] = \\\n str(len(p_ctx.out_string[0]))\n self.event_manager.fire_event('wsgi_exception', p_ctx)\n\n start_response(p_ctx.transport.resp_code,\n _gen_http_headers(p_ctx.transport.resp_headers))\n\n try:\n process_contexts(self, others, p_ctx, error=error)\n except Exception as e:\n # Report but ignore any exceptions from auxiliary methods.\n logger.exception(e)\n\n return itertools.chain(p_ctx.out_string, self.__finalize(p_ctx))", "def check_response_errors(self, resp):\n return True", "def bad_request(message):\n return error_response(400, message)", "def error_handler(result_code, resp):\n if result_code == 1:\n return render_template(\n \"error.html\", error=resp[\"error\"]\n )\n elif result_code == 2:\n return render_template(\n \"rate_exceed.html\", seconds=resp[\"retry_after\"]\n )\n elif result_code == 3:\n return render_template(\n \"not_found.html\"\n )\n elif result_code == 4:\n return render_template(\n \"service_unavailable.html\", seconds=resp[\"retry_after\"]\n )\n else:\n return render_template(\n \"error.html\", error=resp[\"error\"]\n )", "def handle_error(self):\n self.cmd_channel.debug(\"DTPHandler.handle_error()\")\n try:\n raise\n # if error is connection related we provide a detailed\n # information about it\n except socket.error, err:\n if err[0] in errno.errorcode:\n error = err[1]\n else:\n error = \"Unknown connection error\"\n # an error could occur in case we fail reading / writing\n # from / to file (e.g. file system gets full)\n except EnvironmentError, err:\n error = _strerror(err)\n except:\n # some other exception occurred; we don't want to provide\n # confidential error messages to user so we return a\n # generic \"unknown error\" response.\n logerror(traceback.format_exc()) \n error = \"Unknown error\"\n self.cmd_channel.respond(\"426 %s; transfer aborted.\" %error)\n self.close()", "def on_failure(self, exc: BaseException) -> None:", "def handle_internal_error(exception):\n logging.error(exception)\n db.session.rollback()\n return jsonify({\n 'message': 'An unexpected internal error has occurred'\n }), 500", "def exception_handler(result, name=\"\"):\n try:\n response_content = result.json()\n # pylint: disable=broad-except\n except Exception:\n response_content = result.text\n\n exc_map = {\n 300: SFDC_MoreThanOneRecord,\n 400: SFDC_MalformedRequest,\n 401: SFDC_ExpiredSession,\n 403: SFDC_RefusedRequest,\n 404: SFDC_ResourceNotFound,\n }\n exc_cls = exc_map.get(result.status_code, SFDC_GeneralError)\n\n raise exc_cls(result.url, result.status_code, name, response_content)" ]
[ "0.71784735", "0.71308863", "0.71102184", "0.70657563", "0.7006739", "0.6947597", "0.68516433", "0.6658565", "0.6613221", "0.65786", "0.6538273", "0.6400244", "0.63673776", "0.6355248", "0.63404506", "0.63398343", "0.63361794", "0.6322117", "0.6317077", "0.63164055", "0.6314089", "0.63062376", "0.6300083", "0.6293652", "0.626999", "0.62655836", "0.62649393", "0.62064344", "0.61982256", "0.61935097", "0.6191082", "0.6184943", "0.6182625", "0.61549264", "0.6144541", "0.6129078", "0.6119199", "0.61146724", "0.6102855", "0.6100612", "0.6091169", "0.6080231", "0.60503924", "0.60487485", "0.60468584", "0.6014821", "0.60017544", "0.5977885", "0.5975548", "0.597296", "0.59571", "0.5949615", "0.5946082", "0.5939588", "0.5921178", "0.5898591", "0.58937234", "0.5884523", "0.5877507", "0.5877423", "0.5877423", "0.5871477", "0.5867385", "0.58580583", "0.5856701", "0.58477163", "0.58402956", "0.5836776", "0.58273435", "0.58224124", "0.5821785", "0.58089286", "0.58080506", "0.580794", "0.5795462", "0.5783298", "0.57826763", "0.5782057", "0.5775782", "0.5772119", "0.57670957", "0.5764814", "0.57590777", "0.5757924", "0.5755268", "0.5750215", "0.5749138", "0.5747428", "0.57443637", "0.5737399", "0.57349694", "0.5731255", "0.57282245", "0.57268476", "0.5708971", "0.5708167", "0.5705653", "0.5693624", "0.56858397", "0.56848794", "0.5681261" ]
0.0
-1
Upload File on Streamlit Code
def run(self): st.title('Acne Classifier') st.markdown(STYLE, unsafe_allow_html=True) file = st.file_uploader("Upload file", type=self.fileTypes) show_file = st.empty() if not file: show_file.info("Please upload a file of type: " + ", ".join(["png", "jpg"])) return file_bytes = np.asarray(bytearray(file.read()), dtype=np.uint8) opencv_image = cv2.imdecode(file_bytes, 1) cv2.imwrite('out.jpg', opencv_image) df = get_score() df2 = df.set_index('Issue') st.dataframe(df2) st.bar_chart(df2) if isinstance(file, BytesIO): show_file.image(file) else: data = pd.read_csv(file) st.dataframe(data.head(10)) file.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upload():\n\treturn render_template(\"upload.html\", title=\"Upload a file\")", "def upload_file(self, file_path, file_name, output_path):", "def upload():\n\n file = request.files['query']\n filepath = upload_filepath(secure_filename(file.filename))\n file.save(filepath)\n classification = classify(filepath)\n classification['filename'] = file.filename\n return render_template('index.html', classification=classification)", "def upload(self, filename, file_path):\n return", "def upload(self, asset, file):\n uploader = FrameioUploader(asset, file)\n uploader.upload()", "def upload_file(self, f):\n return self._telegraph.upload_file(f)", "def upload_file(self):\n self.master.switch_frame(UploadFileView)", "def upload_file( processor, user, local_path ):\n operations.publish_work_item(\n operations.create_asset_from_file(\n file_name = local_path,\n owner = user,\n producer = processor,\n child_number = 0,\n asset_class = models.AssetClass.UPLOAD ))", "def post(self):\n\n upload_files = self.get_uploads('file')\n blob_info = upload_files[0]\n self.redirect('/?upload_info=%s' % urllib.quote(blob_info.filename))", "def file_upload():\n\n click.secho('*** Uploading image...', fg='green')\n uploaded = _uploaded_file('cover.jpg')\n click.secho(json.dumps(uploaded, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Creating a Picture document for it...', fg='green')\n picture = _make_document('picture', title='cover image', sys_filename=uploaded['path'])\n click.secho(json.dumps(picture, indent=2, sort_keys=True), fg='yellow')\n\n click.secho('*** Attaching it to a Blueray as cover...', fg='green')\n slp = _make_document('movie', title='Silver Linings Playbook')\n blueray = _make_document('blueray', movie_id=slp['_id'], cover_id=picture['_id'])\n click.secho(json.dumps(blueray, indent=2, sort_keys=True), fg='yellow')", "def upload_file():\n global gui\n print(request.data)\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n print(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n gui.controller.main('openFile %s' % os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return redirect('/data')\n return render_template('upload_file.html')", "def upload(state, name, wid, file):\n client = state.api_client\n\n # Get the workspace details\n w_details = helpers.workspace.details(client, wid, name)\n if w_details is None:\n # Can only happen when the name is used and there are no results. Not\n # with the wid option because it would raise a 404 QuetzalAPIException\n raise click.ClickException(f'Workspace named \"{name}\" does not exist.')\n\n file_details = helpers.workspace.upload(client, w_details.id, file)\n click.secho(f'File {file.name} uploaded successfully. Its id is {file_details.id}.',\n fg='green')", "def uploadimg():\n print(str(pathlib.Path(__file__).resolve().parents[1])+\"im hereeeeeeeeeeeeeeeeeeeeeeeee\")\n path = str(pathlib.Path(__file__).resolve().parents[1])\n target = os.path.join(path,'Facial recognition/dataset')\n email = session['username']\n target = target+'/'+email\n # app_root, 'C:/Users\\meetp\\OneDrive\\Desktop\\IotAssigment2\\src\\Facial recognition\\dataset/')\n # print(target)\n\n if not os.path.isdir(target):\n os.mkdir(target)\n\n for file in request.files.getlist(\"file\"):\n print(file)\n filename = file.filename\n destination = \"/\".join([target, filename])\n print(destination)\n file.save(destination)\n\n # encode the image\n # en = encode()\n # en.run(target)\n\n return render_template(\"imguploaded.html\")", "def upload(request):\n # return render(request, 'upload.html')\n # print(request.FILES)\n if request.FILES == {}:\n return render(request, 'simple_upload.html')\n else:\n request.method == \"POST\" and request.FILES['myfile']\n myfile = request.FILES['myfile']\n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n messages.success(request, 'Your Book was added successfully!')\n return render(request, 'simple_upload.html', {'uploaded_file_url': uploaded_file_url})", "def _upload_file_to_file_system(upload_details):\n upload_url = \"%s%s\" % (main_url, upload_details['upload_path'])\n fsysparams = {\n 'qqfile': upload_filepath,\n 'import_record': upload_dataset_id,\n 'source_type': upload_datatype\n }\n return requests.post(upload_url,\n params=fsysparams,\n files={'file': open(upload_filepath, 'rb')},\n headers=upload_header)", "def upload():\n uploaded_file = request.files.get('file')\n\n if not uploaded_file:\n return 'No file uploaded.', 400\n\n # Create a Cloud Storage client.\n gcs = storage.Client()\n\n # Get the bucket that the file will be uploaded to.\n bucket = gcs.get_bucket('foodie_helper_bucket_1')\n #app.config['CLOUD_STORAGE_BUCKET']\n # Create a new blob and upload the file's content.\n blob = bucket.blob(uploaded_file.filename)\n\n blob.upload_from_string(\n uploaded_file.read(),\n content_type=uploaded_file.content_type\n )\n\n # The public URL can be used to directly access the uploaded file via HTTP.\n result = runImage(blob.public_url)\n machineResult = getConcept(result)\n return render_template('results.html', url=blob.public_url, machineResult=machineResult)\n #return render_template('results.html', url=\"https://www.foodiesfeed.com/wp-content/uploads/2019/02/pizza-ready-for-baking.jpg\", machineResult=\"Pizza\")", "def upload(self):\n # TODO: Should CD to the working directory set by the robscript.\n src = self.state_frame[0]\n dest = self.state_frame[1]\n self.send_upload(src, dest, True, None)\n self.state = STATE_READ_LINE", "def upload_local_file(self, path_to_file, name):\n file1 = self._drive.CreateFile()\n file1.SetContentFile(path_to_file)\n file1['title'] = name\n file1.Upload()\n print('File successfully uploaded!')", "def upload(url, file_data=io.BytesIO(b'ShareX text upload test'), file_name='Test.txt', form_name='sharex', secret=None, field_name='secret'):\n files = {form_name: (file_name, file_data, 'text/plain')}\n\n data = {field_name: secret} if secret is not None else {}\n\n return requests.post(url, headers={'User-Agent': 'ShareX/13.2.1'}, files=files, data=data)", "def upload_preset(self, filename, title, description, version, author, REQUEST=None):\r\n\r\n # TODO presets.py - upload_preset - specify how to authenticate\r\n\r\n raise NotImplementedError", "def upload():\n return handle_upload(app, request)", "def upload_file(self, source, dest):\n print(f\"Uploading {source} to {dest}\")\n with open(source, \"rb\") as data:\n self.client.upload_blob(name=dest, data=data)", "def sendRequest(event, context):\n file = event\n print(f\"Processing file: {file['name']}.\")\n\n filename = file['name']\n\n url = 'http://34.123.136.112:5000'\n myobj = {'filename': filename}\n\n x = requests.post(url, data = myobj)\n\n print(x.text)", "def upload_doc():\n file = request.files[\"file\"]\n meta_data = {\"name\": request.form[\"name\"].lower()}\n file_id = save_file(meta_data, file)\n print('file-id: ' + file_id)\n index_after_uploading(file_id)\n return jsonify({\"file_id\": file_id})", "def _upload(self):\r\n loc = os.path.dirname(__file__)\r\n del_file = open(os.path.join(loc, 'delicious.html'))\r\n res = self.app.post(\r\n '/admin/import',\r\n params={'api_key': self.api_key},\r\n upload_files=[('import_file',\r\n 'delicious.html',\r\n del_file.read())],\r\n )\r\n return res", "def api_upload():\n return make_response(file_manager.save_uploaded_file(), 200)", "def upload_file(name):\n subprocess.check_output(cmd_preamble + [\"cp\", name, f\"jot://{name}\"])", "def upload_training_file(self):\n\n file_path = os.getcwd() + \"/\" + self.console_label.training_file_name\n\n with open(file_path, 'r') as f:\n r = requests.post(self.upload_url, files={'file': f})\n\n if r.status_code != requests.codes.ok:\n messagebox.showerror(\"Error\", \"The training file could not be uploaded!\")", "def upload_file(self):\n request = copy.deepcopy(self.request_template)\n data = json.dumps(request)\n curr_file = {\n 'request': data,\n 'file': open(self.file_path, 'rb')\n }\n print(\"Sending Upload request of av for file {}\".format(self.file_name))\n try:\n response = requests.post(url=self.url + \"upload\", files=curr_file, verify=False)\n except Exception as E:\n print(\"Upload file failed. file: {} , failure: {}\".format(self.file_name, E))\n raise\n response_j = response.json()\n print(\"av Upload response status for file {} : {}\".format(self.file_name,\n response_j[\"response\"][0][\"status\"][\"label\"]))\n return response_j", "def upload_samples(args):\n clarity_epp.upload.samples.from_helix(lims, config.email, args.input_file)", "def post(self):\n source = 'uploaded by user'\n upload_files = self.get_uploads('file')\n blob_key = upload_files[0].key()\n name = self.request.get('name')\n\n user = users.get_current_user()\n\n username = 'admin'\n date = datetime.datetime.now()\n str_blob_key = str(blob_key)\n key = FileMetadata.get_key_name(username, date, str_blob_key)\n\n ctx = ndb.get_context()\n meta = FileMetadata(key_name=key, parent=_PARENT)\n meta.owner = user\n meta.filename = name\n meta.uploaded_on = date\n meta.source = source\n meta.blobkey = str_blob_key\n meta.put()\n ctx.clear_cache()\n self.redirect('/admin')", "def submitFiles(self):\n formData =__new__(FormData)();\n \"\"\"\n Iteate over any file sent over appending the files\n to the form data.\n \"\"\"\n i=0\n console.log(self.vue.files)\n while i < self.vue.files.length:\n file = self.vue.files[i];\n formData.append('files[' + i + ']', file);\n i+=1\n \"\"\"\n Make the request to the POST /file-drag-drop URL\n \"\"\"\n formData.append(\"type\",\"upload\")\n __pragma__ ('jsiter') \n fetch('/json/plugins/',\n {\n \"method\":\"POST\",\n \"body\":formData,\n })\\\n .then(lambda res:res.json())\\\n .then(self.uploaded)\\\n .catch(lambda e:console.log('FAILURE!!',e));\n __pragma__ ('nojsiter')", "def upload(self, upload_request):\n raise NotImplementedError", "def fileupload():\r\n response.view = 'generic.json'\r\n\r\n if not session.add_files:\r\n session.add_files = []\r\n\r\n def GET(tulip_url, file=None, deletefile=None, uploads=None, commit=None):\r\n try:\r\n tulip_url = request.args[0]\r\n tulip = Tulip(url=tulip_url)\r\n except:\r\n return json.dumps({\"success\": \"false\"})\r\n if not tulip.is_wb():\r\n return json.dumps({\"success\": \"false\"})\r\n\r\n if deletefile:\r\n session.add_files = [f for f in session.add_files \\\r\n if f.filename != deletefile]\r\n return json.dumps(FileUpload.delete(uploads=True))\r\n elif file:\r\n upload = json.loads(FileUpload.get())\r\n\r\n filedir = FileUpload.get_file_dir(leak_id=tulip.leak.id)\r\n\r\n src_file = os.path.join(request.folder, 'uploads',\r\n session.upload_dir, upload[0]['name'])\r\n dst_folder = os.path.join(request.folder, 'material', filedir)\r\n\r\n return json.dumps(upload)\r\n elif commit:\r\n # print \"Session value: %s\" % session.add_files\r\n if not session.add_files:\r\n return json.dumps({\"success\": \"false\"})\r\n filedir = FileUpload.get_file_dir(leak_id=tulip.leak.id)\r\n # finding right progressive number\r\n prog = 1\r\n dst_folder = os.path.join(request.folder, 'material',\r\n filedir, str(prog))\r\n while os.path.exists(dst_folder):\r\n prog += 1\r\n dst_folder = os.path.join(request.folder, 'material',\r\n filedir, str(prog))\r\n os.makedirs(dst_folder)\r\n\r\n for filedata in session.add_files:\r\n if os.path.exists(os.path.join(request.folder,\r\n 'uploads', session.upload_dir,\r\n filedata.filename)):\r\n src_file = os.path.join(request.folder, 'uploads',\r\n session.upload_dir, filedata.filename)\r\n try:\r\n shutil.move(src_file,\r\n os.path.join(dst_folder.decode(\"utf-8\"),\r\n filedata.filename))\r\n except OSError:\r\n pass\r\n else:\r\n session.add_files.remove(filedata)\r\n\r\n tulip.leak.add_material(tulip.leak.id, prog, None,\r\n file=json.dumps(session.add_files))\r\n add_files = [(f.ext, f.filename, f.size)\r\n for f in session.add_files]\r\n session.add_files = None\r\n # Leak needs to be spooled again\r\n db(db.leak.id == tulip.leak.id).update(spooled=False)\r\n\r\n for t_id in gl.get_targets(None):\r\n target = gl.get_target(t_id)\r\n try:\r\n t_url = db((db.tulip.leak_id==tulip.leak.id) & (db.tulip.target_id==t_id.id)).select().first().url\r\n db.notification.insert(target=target.name,\r\n address=target.contact,\r\n tulip=t_url,\r\n leak_id=tulip.leak.id,\r\n type=\"material\")\r\n except:\r\n print \"problem in adding to notification DB\"\r\n\r\n db.commit()\r\n\r\n return json.dumps({\"success\": \"true\", \"data\": add_files})\r\n elif uploads:\r\n return \"not implemented\"\r\n else:\r\n return json.dumps({\"success\": \"false\"})\r\n\r\n def POST(tulip_url, **vars):\r\n try:\r\n tulip = Tulip(url=tulip_url)\r\n except:\r\n return json.dumps({\"success\": \"false\"})\r\n if not tulip.is_wb():\r\n return json.dumps({\"success\": \"false\"})\r\n upload = FileUpload.post(tulip.leak.id)\r\n\r\n upload = json.loads(upload)\r\n\r\n filedata = Storage()\r\n\r\n # Store the number of bytes of the uploaded file\r\n filedata.bytes = upload[0]['size']\r\n\r\n # Store the file size in human readable format\r\n filedata.size = mutils.human_size(filedata.bytes)\r\n\r\n filedata.fileid = upload[0]['id']\r\n\r\n # Store filename and extension\r\n filedata.filename = upload[0]['name']\r\n\r\n filedata.ext = mutils.file_type(upload[0]['name'].split(\".\")[-1])\r\n\r\n session.add_files.append(filedata)\r\n\r\n return json.dumps(upload)\r\n\r\n return locals()", "def main():\n st.info(\n \"This webpage lets you upload wav audio file and transribe it to Amharic, CHECK THAT OUT !!\")\n st.markdown(STYLE, unsafe_allow_html=True)\n st.header(\"Upload audio file\")\n file = st.file_uploader(\"Audio file\", type=FILE_TYPES)\n show_file = st.empty()\n if not file:\n show_file.info(\"Please upload a file of type: \" +\n \", \".join(FILE_TYPES))\n return\n\n file_type = get_file_type(file)\n if file_type == FileType.PYTHON:\n st.code(file.getvalue())\n\n elif file_type == FileType.SOUND:\n # st.code(file.getvalue())\n audio_bytes = file.read()\n st.audio(audio_bytes, format=\"audio/ogg\")\n\n else:\n data = pd.read_csv(file)\n st.dataframe(data.head(10))\n\n with open(os.path.join(\"./tempfile\", file.name), \"wb\") as f:\n f.write(file.getbuffer())\n st.success(\"Processing File..\")\n\n st.header(\"Transcribe audio\")\n if st.button('Transcribe'):\n st.write(\"\")\n with st.spinner('wait for it ...'):\n time.sleep(60)\n st.success('Done!')\n else:\n st.write('')\n\n # if file:\n # token, t_id = upload_file(file)\n # result = {}\n # #polling\n # sleep_duration = 1\n # percent_complete = 0\n # progress_bar = st.progress(percent_complete)\n # st.text(\"Currently in queue\")\n # while result.get(\"status\") != \"processing\":\n # percent_complete += sleep_duration\n # time.sleep(sleep_duration)\n # progress_bar.progress(percent_complete/10)\n # result = get_text(token,t_id)\n\n # sleep_duration = 0.01\n\n # for percent in range(percent_complete,101):\n # time.sleep(sleep_duration)\n # progress_bar.progress(percent)\n\n # with st.spinner(\"Processing.....\"):\n # while result.get(\"status\") != 'completed':\n # result = get_text(token,t_id)\n\n # st.balloons()\n # st.header(\"Transcribed Text\")\n # st.subheader(result['text'])\n\n file.close()", "def put( filename, file_type = 'auto', history_id = None ):\n conf = _get_conf()\n gi = get_galaxy_connection()\n tc = ToolClient( gi )\n history_id = history_id or _get_history_id()\n tc.upload_file(filename, history_id, file_type = file_type)", "def uploadFile(self, filename, name=\"Dummy name\", type=\"DummyType\"):\n\n with open(filename, 'rb') as f:\n data = f.read()\n\n if (name == \"Dummy name\"):\n name = filename\n\n data = {'name': name,\n 'type': type,\n 'bits': xmlrpclib.Binary(data),\n 'overwrite': True}\n\n try:\n r = self.server.wp.uploadFile(\n self.blogid, self.username, self.password, data)\n except xmlrpclib.Fault as fault:\n display_XMLRPC_errors(\"upload file \" + filename, fault)\n\n #FIXME: do we really need to split the url ?\n try:\n r['url'] = r['url'].split('?')[1]\n except IndexError:\n from urlparse import urlparse\n r['url'] = urlparse(r['url']).path\n\n print \"uploaded file file =\", r['file']\n print \"uploaded file url =\", r['url']\n print \"uploaded file type =\", r['type']", "def post(cls, flow_name: str):\n data = file_schema.load(request.files) # {\"file\": FileStorage}\n try:\n file_path = uploads.save_file(data[\"file\"], folder=flow_name)\n basename = uploads.get_basename(file_path)\n return {\"message\": gettext(\"file_uploaded\").format(basename)}, 200\n \n except UploadNotAllowed:\n extension = uploads.get_extension(data[\"file\"])\n return {\"message\": gettext(\"file_illegal_extension\").format(extension)}, 400", "def file_upload(self, req, folder_path):\n\t\tresult, filename = self.handle_upload(req, folder_path)\n\t\tfile_url = self.selected_root['url_callback'](req, folder_path, filename)\n\t\t\n\t\tself.content_type = 'text/html'\n\t\tself.content = [str(tags.script(type=\"text/javascript\")[\n\t\t\t\t\t\t\"window.parent.frames['frmUpload'].OnUploadCompleted(%s, '%s');\\n\" % (result, filename)\n\t\t\t\t\t\t])]", "def put_upload(self):\n # print \"starting upload...\", self.current_upload['filepath']\n self.touch()\n self.log(\"STARTING_UPLOAD\", level=INFO)\n try:\n Backend.put_file(self.fileobj, self.current_upload[\"gcs_url\"])\n except exceptions.FilePutError as err:\n self.handle_put_error(err, self.fileobj)\n raise", "def upload():\n file = None\n if 'file' in request.files:\n file = request.files['file']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return json_response(\n message=\"Upload successful\",\n result=\"/v/{}\".format(filename)\n )\n return json_response(\n message=\"Invalid filename or extension (jpg, png, gif)\",\n status_code=500\n )", "def upload_file(self, input_file_path, input_file_desc=\"\", input_content_type=DEFAULT_CTNT_TYPE_JSON, input_public_flag=True):\n # init varirable\n input_file_name = os.path.basename(input_file_path)\n\n # get gist list and generate gist file table\n list_gists_obj = self.list_gists()\n if list_gists_obj:\n gist_file_table_dict = self.generate_gist_file_table(list_gists_obj)\n else:\n logger.error(\n \"Cannot get gist list of user [%s], skip upload file to avoid file name duplicate!\" % self.user_name)\n return None\n\n # get file content\n if os.path.exists(input_file_path):\n with open(input_file_path, 'rb') as fh:\n if input_content_type == self.DEFAULT_CTNT_TYPE_JSON:\n file_data = json.dumps(json.load(fh)).replace('\\\\', '\\\\\\\\').replace('\"', '\\\\\"')\n else:\n file_data = fh.read()\n else:\n logger.error(\"The file[%s] you specify for uploading is not exist!\" % input_file_path)\n return None\n\n # create new gist if file not exist, update gist if file exists\n if input_file_name in gist_file_table_dict:\n response_gist_obj = self.update_existing_gist(gist_file_table_dict[input_file_name][\"id\"], input_file_name, file_data, input_file_desc, input_content_type)\n else:\n response_gist_obj = self.create_new_gist(input_file_name, file_data, input_file_desc, input_content_type, input_public_flag)\n\n # get raw url of upload file\n if response_gist_obj:\n tmp_file_list = response_gist_obj.json().get(\"files\", {}).values()\n if len(tmp_file_list) == 1:\n file_download_url = tmp_file_list[0].get(\"raw_url\", None)\n else:\n logger.error(\"Gist upload failed, return obj format incorrect [%s]\" % response_gist_obj.json())\n return None\n return file_download_url\n else:\n return None", "def anon_upload(infile: str):\n if exists(infile):\n URL = upload(infile)\n return URL\n return 5", "def get(self):\n\n upload_url = blobstore.create_upload_url('/upload')\n\n self.response.headers['content-type'] = 'text/plain'\n self.response.out.write(upload_url)", "def upload():\n return render_template(\n 'upload.html',\n title='upload',\n year=datetime.now().year,\n message='Your contact page.'\n )", "def upload_file(self, file_name, content):\n return self.files.upload(file_name, content)", "def ocr():\n return render_template('upload.html')", "def Uploads1():\n if request.method==\"POST\":\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file=request.files[\"file\"]\n file.save(os.path.join(\"Wind\", file.filename))\n return render_template(\"index.html\", message = \"File Uploaded Successfuly\")\n return render_template(\"index.html\", message = \"Upload Wind Maintenance File\")\n return \"File Uploaded!\"", "def chunk_upload_to(instance, filename):\n return os.path.join(FLOWJS_PATH, instance.filename)", "def upload_file(self):\n \n try:\n with open(self.full_path_of_file, 'rb') as f:\n r = requests.post(self.upload_url, files={'file': f})\n\n self.selected_file_name.configure(text=\"<Selected file name>\")\n\n if r.status_code == requests.codes.ok:\n self.__set_full_path_of_file(None)\n messagebox.showinfo(\"Information\", \"File uploaded successfully!\")\n else:\n messagebox.showerror(\"Error\", \"Could not upload file\")\n except AttributeError:\n # this exceptions is raised when the 'Upload file' button was pressed but\n # no file was previously selected\n pass\n except TypeError:\n # this exceptions is raised when the 'Upload file' button was pressed \n # after the user already uploaded a file. Now a new file shoud be selected\n # and uploaded or just go Back to the main menu\n pass", "def fileUpload(fieldName):\n## we don't deal with OS specific \"\\n\"\n## because R does not have a problem (at least with Windows files)\n## no problem in R either with empty carriage returns at end of file\n \n if fs.has_key(fieldName):\n fileClient = fs[fieldName].file\n if not fileClient:\n shutil.rmtree(tmpDir)\n commonOutput()\n print \"<h1> ADaCGH ERROR </h1>\" \n print \"<p> The \", fieldName, \"file you entered is not a file </p>\"\n print \"<p> Please fill up the required fields and try again</p>\"\n print \"</body></html>\"\n sys.exit()\n else:\n shutil.rmtree(tmpDir)\n commonOutput()\n print \"<h1> ADaCGH ERROR </h1>\" \n print \"<p> \", fieldName, \"file required </p>\"\n print \"<p> Please fill up the required fields and try again</p>\"\n print \"</body></html>\"\n sys.exit()\n \n # transferring files to final destination;\n\n fileInServer = tmpDir + \"/\" + fieldName\n srvfile = open(fileInServer, mode = 'w')\n fileString = fs[fieldName].value\n srvfile.write(fileString)\n srvfile.close()\n\n os.chmod(fileInServer, 0666)\n \n if os.path.getsize(fileInServer) == 0:\n shutil.rmtree(tmpDir)\n commonOutput()\n print \"<h1> ADaCGH ERROR </h1>\"\n print \"<p>\", fieldName, \" file has size 0 </p>\"\n print \"<p> Please enter a file with something in it.</p>\"\n print \"<p> (Did you enter only a single file, but did not check 'One file'?\\\n If you are using only one file, the 'Two files' button should not be checked.)</p>\"\n print \"</body></html>\"\n sys.exit()", "def put_upload_import_file() -> str:\n upload = request.files.get(\"file\", None)\n\n if not upload:\n raise FavaAPIError(\"No file uploaded.\")\n if not upload.filename:\n raise FavaAPIError(\"Uploaded file is missing filename.\")\n filepath = filepath_in_primary_imports_folder(upload.filename, g.ledger)\n\n if filepath.exists():\n raise TargetPathAlreadyExistsError(filepath)\n\n if not filepath.parent.exists():\n filepath.parent.mkdir(parents=True)\n\n upload.save(filepath)\n\n return f\"Uploaded to {filepath}\"", "def action(self):\n return blobstore.create_upload_url(self.upload_url)", "def studio_submit(self, data, suffix=''):\n self.oppiaid = data.get('oppiaid')\n self.src = data.get('src')\n self.width = data.get('width')\n self.height = data.get('height')\n\n return {'result': 'success'}", "def uploadFile(self,path):\n\n response = requests.post('https://api.imagga.com/v1/content',\n auth=(self.apikey, self.secret),\n files={'image': open(path, 'r')})\n json_data = json.loads(response.text)\n uploadedData=json_data[u'uploaded'][0]\n resourceId=uploadedData[u'id']\n filename = uploadedData[u'filename']\n self.fileToIdMap[filename] = resourceId\n self.getTagsUsingId(resourceId)", "def upload():\n global FILE_NAME\n target = os.path.join(APP_ROOT, \"images\")\n print(target)\n\n if not os.path.isdir(target):\n os.mkdir(target)\n\n for file in request.files.getlist(\"file\"):\n print(file)\n filename = file.filename\n destination = \"/\".join([target, filename])\n FILE_NAME = destination\n file.save(destination)\n return render_template(\"complete.html\")", "async def upload(self, request):\n\n userid = await authenticated_userid(request)\n project = await request.app.context_project(request, userid)\n\n payload = await request.post()\n\n filename = payload['file'].filename\n upload_stream = payload['file'].file\n\n ext = os.path.splitext(filename)[1]\n\n if not re_filename_ext.match(ext):\n # paranoid check in case a script doesn't protect from code injection\n raise web.HTTPBadRequest(text='file extension not supported: %s' % filename)\n\n camera_id = uuid.uuid1().hex\n\n log = request['slog']\n log.debug('request: camera upload', filename=filename)\n\n config = request.app.config\n\n tmppath = dump_stream(config['media']['tempdir'], upload_stream)\n\n log.debug('file dump', camera_id=camera_id, tmppath=tmppath)\n\n await Camera.insert(request,\n camera_id=camera_id,\n filename=filename,\n project_id=project.project_id)\n\n await request.app.task_broker.publish('camera_upload', {\n 'userid': userid,\n 'project_id': project.project_id,\n 'camera_id': camera_id,\n 'tmppath': tmppath,\n 'filename': filename\n }, log=log)\n\n response_js = {\n 'camera_file_id': camera_id\n }\n\n return web.json_response(response_js, status=HTTPStatus.CREATED)", "def PostInputsFile(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def upload_file(cls, uri, fobj):\n msg = \"Backend doesn't implement upload_file()\"\n raise NotImplementedError(msg)", "def upload_file(\n self, bucket_id: uplink.Path, filename: uplink.Path, file: uplink.Body\n ):\n pass", "def test_upload_file(self):\n pass", "def click_upload_button(self):\n self.click_element(self.upload_button_locator)", "def upload_from_filename(self, file_name, name_on_storage, **keyword_args):\n blob = self.bucket.blob(name_on_storage)\n blob.upload_from_filename(file_name, **keyword_args)\n print(f\"Upload file {file_name} and name as {name_on_storage}\")", "def upload(context, request):\n if request.method == 'POST':\n if not hasattr(request.POST['content'], 'file'):\n raise RuntimeError('No file attached')\n\n fieldstorage = request.POST['content']\n filename = fieldstorage.filename\n logger.info(\"%s posted\", filename)\n\n with bm(\"%s released\" %filename):\n dest = path(request.file_root) / request.namer(filename)\n dest.write_bytes(fieldstorage.file.read())\n try:\n request.registry.notify(event.PackageAdded(request.index, path=dest))\n request.response.headers['X-Swalow-Status'] = 'SUCCESS'\n try:\n for ep in pkg_resources.iter_entry_points('cheeseprism.on_upload'):\n func = ep.load()\n func(context, request, dest)\n except Exception as e:\n logger.exception('Entry point %r failed', ep)\n return request.response\n except :\n logger.exception(\"Processing of %s failed\", filename)\n raise\n return {}", "def use_uploadpy(request):\n if request.method == 'POST':\n return HttpResponseRedirect(reverse(customized_upload_py))\n return respond(request, 'use_uploadpy.html')", "def fusion_api_upload_firmware_bundle(self, localfile, api=None, headers=None):\n return self.bundle.upload(localfile, api, headers)", "def upload_sample(self, file):\n return self._upload_sample(file)", "def upload_from_file(self, file_obj, name_on_storage, **keyword_args):\n blob = self.bucket.blob(name_on_storage)\n blob.upload_from_file(file_obj, **keyword_args)\n print(f\"Upload object {name_on_storage}\")", "def action_upload(self):\n options=self.env['plm.config.settings'].GetOptions()\n status = 'uploaded'\n action = 'upload'\n default = {\n 'state': status,\n 'engineering_writable': False,\n }\n doc_default = {\n 'state': status,\n 'writable': False,\n }\n operationParams = {\n 'status': status,\n 'statusName': _('Uploaded'),\n 'action': action,\n 'docaction': 'uploaddoc',\n 'excludeStatuses': ['uploaded', 'confirmed', 'transmitted','released', 'undermodify', 'obsoleted'],\n 'includeStatuses': ['draft'],\n 'default': default,\n 'doc_default': doc_default,\n }\n if options.get('opt_showWFanalysis', False):\n return self.action_check_workflow(operationParams)\n else:\n ids=self._ids\n self.logging_workflow(ids, action, status)\n return self._action_to_perform(ids, operationParams, default)", "def upload_sales_data() -> str:\r\n # Contains CSV file uploaded by the user.\r\n response = request.files\r\n # True if response contains a file.\r\n if bool(response):\r\n csv_file = response.get(\"csv_file\")\r\n csv_filename = csv_file.filename\r\n # Saves CSV file in the folder where this module is located.\r\n csv_file.save(csv_filename)\r\n log_message = \"CSV file was successfully uploaded.\"\r\n app.config[\"logger\"].info(log_message)\r\n return \"Upload was successful!\"\r\n return \"\"\"<style>\r\n h1, h2, h3 {\r\n font-family: arial, sans-serif;\r\n }\r\n </style>\r\n <h2>Upload sales data in a CSV file</h2>\r\n <form enctype=\"multipart/form-data\" method=\"POST\">\r\n <input type=\"file\" name=\"csv_file\" accept=\".csv\">\r\n <br><br>\r\n <input type=\"submit\" value=\"Upload CSV\">\r\n </form>\r\n <form action=\"/\" method=\"POST\">\r\n <input type=\"hidden\">\r\n <br>\r\n <input type=\"submit\" value=\"Go back to tracking screen\">\r\n </form>\"\"\"", "def test_mutation_file_upload(client):\n query = '''mutation M {\n updatePerson(id: 1,avatar: \"cat1.jpg\") {\n person {\n id name age avatar\n }\n }\n }\n '''\n data = {\n 'query': query,\n 'file': (open('files/cat1.jpg'), 'cat1.jpg'),\n }\n response = client.post(\n '/graphql', data=data,\n content_type='multipart/form-data',\n )\n\n expected_response = '{\"data\":{\"updatePerson\":{\"person\":{\"id\":1,\"name\":null,\"age\":34.0,\"avatar\":\"/files/cat1.jpg\"}}}}'\n assert response.data == expected_response", "def upload_file(self, file: Union[str, bytes, StringIO, TextIOWrapper, BytesIO], filename: str, directory: str = 'gcodes') -> Dict:\n raise NotImplementedError", "async def hastebin_upload(self, text: str) -> Union[str, None]:\n req = await self.session.post(\"https://hastebin.com/documents\", data=text)\n reqjson = None\n try:\n reqjson = await req.json()\n key = reqjson[\"key\"]\n except (TypeError, KeyError, aiohttp.ContentTypeError):\n print_error(f\"[red]Could not upload error,[/] Raw Data: {reqjson or 'Could not get raw data'}\")\n url = None\n else:\n url = f\"https://hastebin.com/{key}.txt\"\n return url", "def upload_file():\r\n if not LOGGEDIN:\r\n return render_template(\"login_temp.html\", msg=\"\")\r\n\r\n if request.method == 'POST':\r\n firstname = flask.request.form[\"firstname\"]\r\n lastname = flask.request.form[\"lastname\"]\r\n city = flask.request.form[\"city\"]\r\n state = flask.request.form[\"state\"]\r\n status = flask.request.form[\"status\"]\r\n date = flask.request.form[\"date\"]\r\n photo = flask.request.form[\"photo\"]\r\n\r\n f_d = open(\"users/\" + lastname + firstname + \".txt\", \"a\")\r\n f_d.write(firstname + \"\\n\")\r\n f_d.write(lastname + \"\\n\")\r\n f_d.write(city + \"\\n\")\r\n f_d.write(state + \"\\n\")\r\n f_d.write(status + \"\\n\")\r\n f_d.write(date + \"\\n\")\r\n f_d.write(photo + \"\\n\")\r\n f_d.close()\r\n return render_template(\"home.html\")\r\n else:\r\n return render_template('check_in.html')", "def form_File(request):\n schema = schemaish.Structure()\n schema.add('myFile', schemaish.File())\n form = formish.Form(schema, 'form')\n return form", "def uploadResourceFile(url, user, pWd, resourceName, fileName, fullPath):\n print(\"uploading file for resource \" + url + \" resource=\" + \n resourceName + ' user=' + user)\n apiURL = url + '/access/1/catalog/resources/' + resourceName + \"/files\"\n print(\"\\turl=\" + apiURL)\n # header = {\"accept\": \"*/*\", \"Content-Type\" : \"multipart/form-data\"} \n header = {\"accept\": \"*/*\"} \n print(\"\\t\" + str(header))\n params = {\"scannerid\": \"LineageScanner\", \"filename\": fileName, \n \"optionid\": \"File\"}\n print(\"\\t\" + str(params))\n# files = {'file': fullPath}\n mimeType = 'text/csv'\n readMode = 'rt'\n if fileName.endswith(\".zip\"):\n mimeType = 'application/zip'\n readMode = 'rb'\n \n file = { 'file' : (fileName, open(fullPath, readMode), mimeType)}\n print('\\t' + str(file))\n uploadResp = requests.post(apiURL, data=params, files=file, headers=header, \n auth=HTTPBasicAuth(user,pWd))\n print(\"\\tresponse=\" + str(uploadResp.status_code))\n if uploadResp.status_code == 200:\n # valid - return the jsom\n return uploadResp.status_code\n else:\n # not valid\n print(\"\\tupload file failed\")\n print(\"\\t\" + str(uploadResp))\n print(\"\\t\" + str(uploadResp.text))\n return uploadResp.status_code", "def upload2notion(page, name, tags):\n row = page.collection.add_row()\n row.name = str(name.stem)\n row.Tags = tags\n for image_path in image_dir.iterdir():\n image = row.children.add_new(ImageBlock)\n image.upload_file(str(image_path))\n pdf = row.children.add_new(FileBlock)\n pdf.upload_file(str(name))", "def uploaded_file(filename):\n return send_from_directory('/static/images/uploads/', filename)", "def upload():\n\n # TODO: decorator to check token\n token = request.headers.get(\"Authorization\")\n\n has_text = bool(request.get_json())\n has_file = request.files and request.files[\"file\"]\n if not has_text and not has_file:\n error = \"No text input and no file provided\"\n return jsonify({\"success\": False, \"message\": error})\n\n filename, error = save_text(request)\n if error:\n return jsonify({\"success\": False, \"message\": error})\n\n job_id = schedule(filename, token)\n add_user_job(job_id, token)\n\n return jsonify({\"success\": True, \"data\": {\"jobId\": job_id}})", "def upload_file(self, session, output, serverdir):\n name = output.metadata['filename']\n self.log.debug(\"uploading %r to %r as %r\",\n output.file.name, serverdir, name)\n\n kwargs = {}\n if self.blocksize is not None:\n kwargs['blocksize'] = self.blocksize\n self.log.debug(\"using blocksize %d\", self.blocksize)\n\n upload_logger = KojiUploadLogger(self.log)\n session.uploadWrapper(output.file.name, serverdir, name=name,\n callback=upload_logger.callback, **kwargs)\n path = os.path.join(serverdir, name)\n self.log.debug(\"uploaded %r\", path)\n return path", "def Uploads():\n if request.method==\"POST\":\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file=request.files[\"file\"]\n file.save(os.path.join(\"Solar\", file.filename))\n return render_template(\"index.html\", message = \"File Uploaded Successfuly\")\n return render_template(\"index.html\", message = \"Upload Solar Maintenance File\")\n return \"File Uploaded!\"", "def gcloud_upload_file(file):\n if not file:\n return None\n\n public_url = storage.upload_file(\n file.read(),\n file.filename,\n file.content_type\n )\n\n current_app.logger.info(\n \"Uploaded file %s as %s.\", file.filename, public_url)\n\n return public_url", "def upload_sna_viz_data_csv(request):\n if request.method == \"POST\":\n \n f = request.FILES['dataset']\n handle_uploaded_file(f, 'temp/sna_viz/sna_viz_dataset.csv')\n\n return HttpResponse(\n json.dumps({\"status\": \"success\"}),\n content_type=\"application/json\"\n )\n else:\n return HttpResponse(\n json.dumps({\"error\": \"error, GET request not supported\"}),\n content_type=\"application/json\"\n )", "def UploadFile(self, request_iterator, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def upload_file():\n if request.method == 'POST':\n user_details = request.form\n print(user_details)\n file = request.files['myfile']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n image = cv2.imread(filename)\n emb = singleimg_embedding(image)\n\n add_person = MissingPerson(id=user_details['id'], first_name=user_details[\n 'first_name'], last_name=user_details['last_name'],\n last_seen=user_details['last_seen'],\n embedding=emb)\n print(add_person)\n db.session.add(add_person)\n db.session.commit()\n return 'Success'\n return render_template('index.html')", "def main():\r\n activities = [\"EDA\",\"Plots\"]\t\r\n choice = st.sidebar.selectbox(\"Select Activities\",activities)\r\n\r\n if choice == 'EDA':\r\n result = st.file_uploader(\"Upload\", type=\"txt\")\r\n\r\n # filename =st.text_input('Enter a file path:')\r\n try:\r\n if result:\r\n # Process you file here\r\n data = result.getvalue()\r\n # file1 = open(filename,\"r\") \r\n # data=file1.read()\r\n data=data.lower().replace('\\n','')\r\n # file1.close() \r\n st.write(data[:200])\r\n obj=Lyrics()\r\n add_split = st.sidebar.slider(\r\n 'Select a split of values',\r\n 2, 25\r\n )\r\n st.write(\"Select Split from Left Slider .\")\r\n if add_split>3:\r\n # split=st.text_input(\"Enter String split for Prediction :\")\r\n gen=obj.generator(data=data,split=int(add_split))\r\n if gen:\r\n startString=st.text_input(\"Enter Starting String for Prediction :\")\r\n if len(startString)>0:\r\n val=st.sidebar.slider(\r\n \"How many char's want's to Prediction :\",\r\n 100, 1000\r\n )\r\n st.write(\"Select no of char's want's to Prediction from Left Slider .\")\r\n if val>100:\r\n final_op=obj.future_data(startString,val,add_split)\r\n st.write(final_op)\r\n except FileNotFoundError:\r\n st.error('File not found.')\r\n except IndexError:\r\n st.error('Select only one Author. ')\r\n except KeyError:\r\n st.error(\"Enter correct Integer. \")", "def GET_upload_sr_img(self, *a, **kw):\r\n return \"nothing to see here.\"", "def upload_submission_file(self, config_file, data_version, session_id, tag='stable'):\n\n submission_file_name = self \\\n .generate_submission_filename(config_file, data_version, session_id, tag)\n submission_file_name = 'submissions/{}'.format(submission_file_name)\n source_file_name = os.path.join(os.environ['PROJ_HOME'], submission_file_name)\n\n GoogleStorage().upload_blob(self.bucket_name, source_file_name, submission_file_name)\n print('Uploaded submission file {}'.format(source_file_name))", "def click_upload_statement_button(self):\n self.click_element(self.upload_statement_button_locator)", "def transfer(file_obj):", "def select_file_upload_method():\n\n if not Settings.prompt(\"upload files\"): \n return \"unset\"\n Settings.print(\"Select an upload source\")\n sources = Settings.get_source_options()\n question = {\n 'type': 'list',\n 'name': 'upload',\n 'message': 'Upload:',\n 'choices': [src.title() for src in sources]\n }\n upload = PyInquirer.prompt(question)[\"upload\"]\n\n\n # everything after this part should be in another function\n # this should just return the string of the upload source\n\n\n if str(upload) == \"Local\":\n return File.select_files()\n elif str(upload) == \"Google\":\n return Google_File.select_files()\n # elif str(upload) == \"Dropbox\":\n # return Dropbox.select_files()\n elif str(upload) == \"Remote\":\n return Remote.select_files()\n return File.select_files()", "def upload(self,toolname,data,username,userpass):\n\n msg = 'initial upload of source code'\n self.upload_code(toolname,data,username,userpass,msg)\n\n # navigate to the tool status page\n po = self.catalog.load_pageobject('ToolsStatusCreatedPage',toolname)\n po.goto_page()\n\n # on web page, mark project as uploaded\n po.flip_status_to_uploaded()\n\n self.__wait_for_tool_state(po,'Uploaded')", "def handle_upload(f, attrs):\n\n # chunked = False\n dest_folder = os.path.join(app.config['UPLOAD_DIRECTORY'], attrs['qquuid'])\n dest = os.path.join(dest_folder, attrs['qqfilename'])\n save_upload(f, dest)", "def put(self, request):\n ProcessingService.save_file(uploaded_file=request.data['file'],\n artist=request.data['artist'], title=request.data['title'])\n tasks.process_audio.delay(uploaded_file_path=settings.FILE_UPLOAD_DIR + request.data['file'].name,\n artist=request.data['artist'], title=request.data['title'])\n return Response(status=status.HTTP_200_OK)", "def upload_experiment():\n response = \"\"\n response_code = 400\n if 'file' not in request.files:\n response = \"Error with request: No file field in body of request.\"\n else:\n file = request.files['file']\n if file.filename == '':\n response = \"Error with request: File field in body of response with no file present.\"\n elif file and allowed_file(file.filename, ALLOWED_EXPERIMENT_EXTENSIONS):\n filename = file.filename\n save_path = os.path.dirname(os.path.abspath(__file__)) + EXPERIMENT_UPLOAD_FOLDER\n file.save(os.path.join(save_path, filename))\n response = \"Success: Experiment saved.\"\n response_code = 201\n else:\n response = \"Error with request: File extension not allowed.\"\n return make_response(jsonify({'message': response}), response_code)", "def file(ctx, data_dir, data_file):\n ctx.obj['DATA_DIR'] = data_dir\n ctx.obj['DATA_FILE'] = data_file\n ctx.obj['TYPE'] = 'file'", "def multipart(self):\n self.add_file_string('Multipart file')\n self.should_copy = False", "def post(self):\n filename = str(time.time())\n filepath = os.path.join(\n os.path.join(current_app.config['UPLOAD_FOLDER'], filename))\n with open(filepath, 'bw') as uploadfile:\n chunk_size = 1024\n while True:\n chunk = request.stream.read(chunk_size)\n if len(chunk) == 0:\n break\n uploadfile.write(chunk)\n current_app.logger.info('file %s upload successfully', filename)\n return {'timestamp': filename}, http.HTTPStatus.CREATED", "def upload_file_to_shock(self,\n console, # DEBUG\n shock_service_url = None,\n filePath = None,\n ssl_verify = True,\n token = None):\n self.log(console,\"UPLOADING FILE \"+filePath+\" TO SHOCK\")\n\n if token is None:\n raise Exception(\"Authentication token required!\")\n\n #build the header\n header = dict()\n header[\"Authorization\"] = \"Oauth {0}\".format(token)\n if filePath is None:\n raise Exception(\"No file given for upload to SHOCK!\")\n\n dataFile = open(os.path.abspath(filePath), 'rb')\n m = MultipartEncoder(fields={'upload': (os.path.split(filePath)[-1], dataFile)})\n header['Content-Type'] = m.content_type\n\n #logger.info(\"Sending {0} to {1}\".format(filePath,shock_service_url))\n try:\n response = requests.post(shock_service_url + \"/node\", headers=header, data=m, allow_redirects=True, verify=ssl_verify)\n dataFile.close()\n except:\n dataFile.close()\n raise\n if not response.ok:\n response.raise_for_status()\n result = response.json()\n if result['error']:\n raise Exception(result['error'][0])\n else:\n return result[\"data\"]", "def upload_file_to_shock(self,\n console, # DEBUG\n shock_service_url = None,\n filePath = None,\n ssl_verify = True,\n token = None):\n self.log(console,\"UPLOADING FILE \"+filePath+\" TO SHOCK\")\n\n if token is None:\n raise Exception(\"Authentication token required!\")\n\n #build the header\n header = dict()\n header[\"Authorization\"] = \"Oauth {0}\".format(token)\n if filePath is None:\n raise Exception(\"No file given for upload to SHOCK!\")\n\n dataFile = open(os.path.abspath(filePath), 'rb')\n m = MultipartEncoder(fields={'upload': (os.path.split(filePath)[-1], dataFile)})\n header['Content-Type'] = m.content_type\n\n #logger.info(\"Sending {0} to {1}\".format(filePath,shock_service_url))\n try:\n response = requests.post(shock_service_url + \"/node\", headers=header, data=m, allow_redirects=True, verify=ssl_verify)\n dataFile.close()\n except:\n dataFile.close()\n raise\n if not response.ok:\n response.raise_for_status()\n result = response.json()\n if result['error']:\n raise Exception(result['error'][0])\n else:\n return result[\"data\"]", "def click_upload_button(self):\n return self" ]
[ "0.6423938", "0.63981605", "0.63896406", "0.6386179", "0.6256276", "0.62473285", "0.62320954", "0.62008077", "0.6166157", "0.61652756", "0.6073021", "0.5995549", "0.59897304", "0.5989222", "0.5944204", "0.59314686", "0.5905269", "0.5904682", "0.5895401", "0.5894111", "0.58836085", "0.5880417", "0.5846711", "0.5838051", "0.58372647", "0.58326906", "0.58283484", "0.5807569", "0.5797341", "0.5788617", "0.57822347", "0.57661504", "0.5752284", "0.5733989", "0.57236946", "0.5703362", "0.5693407", "0.5665396", "0.56618774", "0.56510884", "0.5650929", "0.5650323", "0.5640546", "0.5639462", "0.5638867", "0.56310976", "0.5627932", "0.5627856", "0.5619957", "0.5619779", "0.5616007", "0.56130606", "0.56105715", "0.5610177", "0.5594488", "0.5575948", "0.5565179", "0.5564811", "0.556454", "0.5557561", "0.55574334", "0.55454767", "0.55362225", "0.553313", "0.5516002", "0.5515674", "0.55146104", "0.5513383", "0.55029804", "0.55020857", "0.55015767", "0.54841757", "0.5473479", "0.5469765", "0.54675305", "0.5465878", "0.5465019", "0.54592377", "0.5455327", "0.5453998", "0.5450056", "0.5440987", "0.5440656", "0.54278135", "0.54174703", "0.5414964", "0.5407446", "0.5405475", "0.5401449", "0.53988683", "0.5394656", "0.5393353", "0.5386474", "0.53827834", "0.5380038", "0.53790766", "0.53787756", "0.53710926", "0.53589845", "0.53589845", "0.53546643" ]
0.0
-1
Denormalizes image tensors using mean and std
def denormalize(tensors): if len(tensors.shape) < 4: for c in range(3): tensors[c, ...].mul_(std[c]).add_(mean[c]) else: for c in range(3): tensors[:, c].mul_(std[c]).add_(mean[c]) return torch.clamp(tensors, 0, 255)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def un_normalize(tensor, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n mean = torch.FloatTensor(mean).view(1,3,1,1)\n std = torch.FloatTensor(std).view(1,3,1,1)\n \n image = tensor.cpu().detach()\n image = image*std+mean\n image = image.numpy()\n \n image = np.transpose(image, (0,2,3,1))\n \n #print(np.max(image))\n #print(np.min(image))\n return image", "def normalize(tensor, mean, std):\n if not _is_tensor_image(tensor):\n raise TypeError('tensor is not a torch image.')\n # TODO: make efficient\n for t, m, s in zip(tensor, mean, std):\n t.sub_(m).div_(s)\n return tensor", "def normalize(tensor, mean, std):\n if not _is_tensor_image(tensor):\n raise TypeError('tensor is not a torch image.')\n # TODO: make efficient\n for t, m, s in zip(tensor, mean, std):\n t.sub_(m).div_(s)\n return tensor", "def denormalize(tensors):\r\n for c in range(3):\r\n tensors[:, c].mul_(std[c]).add_(mean[c])\r\n return torch.clamp(tensors, 0, 255)", "def unnormalize(images, mean, std):\n \n unnorm_images = images * std + mean\n \n \n return unnorm_images", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def _normalize(images):\n images -= images.mean(axis=0, keepdims=True)\n images /= np.maximum(images.std(axis=0, keepdims=True), 3e-1)", "def unnormalize(tensor, mean, std):\n for t, m, s in zip(tensor, mean, std):\n t.mul_(s).add_(m)\n return tensor", "def denormalize(img, means, stds, resize_to_original=False):\n\n img = np.moveaxis(img, 0, 2)\n img = img*stds + means\n img = np.clip(img, 0, 255).astype('uint8')\n\n if resize_to_original:\n # revert def preprocess_image()\n img = img[:,(img_w//4): (img_w - img_w//4),:]\n img = cv2.copyMakeBorder( img, img.shape[0], 0,0,0, cv2.BORDER_CONSTANT) #, borderType)\n img = cv2.resize(img, (img_orig_w, img_orig_h))\n \n return img", "def normalize(img, mean, std, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n mean = paddle.to_tensor(mean, place=img.place)\n std = paddle.to_tensor(std, place=img.place)\n\n if _is_channel_first(data_format):\n mean = mean.reshape([-1, 1, 1])\n std = std.reshape([-1, 1, 1])\n\n return (img - mean) / std", "def normalize(tensor: np.ndarray):\n if len(tensor.shape) < 4:\n tensor = np.expand_dims(tensor, axis=2)\n mean = np.array([tensor[..., chn, :].mean() for chn in range(tensor.shape[2])])\n std = np.array([tensor[..., chn, :].std() for chn in range(tensor.shape[2])])\n return (tensor - mean[:, np.newaxis]) / std[:, np.newaxis]", "def denormalize(img, dataset=\"imagenet\"):\r\n if dataset == \"cifar10\":\r\n c_std = [0.247, 0.243, 0.261]\r\n c_mean = [0.4914, 0.4822, 0.4466]\r\n elif dataset == \"imagenet\":\r\n c_std = [0.229, 0.224, 0.225]\r\n c_mean = [0.485, 0.456, 0.406]\r\n for i in [0, 1, 2]:\r\n img[i] = img[i] * c_std[i] + c_mean[i]\r\n return img", "def imnormalize_tensor(self, img, mean, std, to_rgb=True):\n mean = np.float32(mean.reshape(1, -1))\n stdinv = 1 / np.float32(std.reshape(1, -1))\n if to_rgb:\n img = img[:, :, [2, 1, 0]]\n img = torch.sub(img, torch.tensor(mean).cuda())\n img = torch.mul(img, torch.tensor(stdinv).cuda())\n return img", "def normalize(image):\n image = image.astype(np.float32)\n mean = np.mean(image)\n std = np.std(image)\n if std > 0:\n ret = (image - mean) / std\n else:\n ret = image * 0.\n return ret", "def standardize(image, mean=[0.48462227599918, 0.45624044862054, 0.40588363755159], std=[0.22889466674951, 0.22446679341259, 0.22495548344775]):\n image = image.astype(np.float32) / 255.0\n image = np.divide(np.subtract(image, mean), std)\n return image", "def norm(imagestack, mean, std):\n \n new_im = (imagestack - mean)/std \n \n return new_im", "def normalize_std(img, eps=1e-10):\n with tf.name_scope('normalize'):\n std = tf.sqrt(tf.reduce_mean(tf.square(img)))\n return img/tf.maximum(std, eps)", "def normalize_img(img):\n channel_mean = img.mean(axis=0, keepdims=True).mean(axis=1, keepdims=True)\n channel_std = img.std(axis=0, keepdims=True).mean(axis=1, keepdims=True)\n return (img - channel_mean) / channel_std", "def normalize_fn(tensor, mean, std):\n # here we assume the color channel is in at dim=1\n mean = mean[None, :, None, None]\n std = std[None, :, None, None]\n return tensor.sub(mean).div(std)", "def denormalize(x, std, mean):\n out = x * std + mean\n return out.clamp(0, 1)", "def img_normalize(image, label):\n mean, std = ds_stats\n image -= tf.constant(mean, shape=[1, 1, num_channels], dtype=image.dtype)\n image /= tf.constant(std, shape=[1, 1, num_channels], dtype=image.dtype)\n return image, label", "def unnormalize(tensor, mean, std, inplace: bool = False) :\n if not isinstance(tensor, torch.Tensor):\n raise TypeError('Input tensor should be a torch tensor. Got {}.'.format(type(tensor)))\n\n if tensor.ndim < 3:\n raise ValueError('Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = '\n '{}.'.format(tensor.size()))\n\n if not inplace:\n tensor = tensor.clone()\n\n dtype = tensor.dtype\n mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)\n std = torch.as_tensor(std, dtype=dtype, device=tensor.device)\n if (std == 0).any():\n raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))\n if mean.ndim == 1:\n mean = mean.view(-1, 1, 1)\n if std.ndim == 1:\n std = std.view(-1, 1, 1)\n tensor.mul_(std).add_(mean)\n return tensor", "def normalize_batch(batch, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):\n # normalize using imagenet mean and std\n batch = batch.clone()\n mean = torch.tensor(mean).view(-1, 1, 1)\n std = torch.tensor(std).view(-1, 1, 1)\n # if your image data is scaled to scale 0-255, uncomment the line below\n # batch.div_(255.0)\n return (batch - mean) / std", "def normalize(tensor, mean, std, inplace=False):\n if not torch.is_tensor(tensor):\n raise TypeError('tensor should be a torch tensor. Got {}.'.format(type(tensor)))\n\n if tensor.ndimension() != 3:\n raise ValueError('Expected tensor to be a tensor image of size (C, H, W). Got tensor.size() = '\n '{}.'.format(tensor.size()))\n\n if not inplace:\n tensor = tensor.clone()\n\n dtype = tensor.dtype\n mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)\n std = torch.as_tensor(std, dtype=dtype, device=tensor.device)\n if (std == 0).any():\n raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype))\n if mean.ndim == 1:\n mean = mean[:, None, None]\n if std.ndim == 1:\n std = std[:, None, None]\n tensor.sub_(mean).div_(std)\n return tensor", "def normalize(image):\n mean = image.mean()\n stddev = image.std()\n adjusted_stddev = max(stddev, 1.0/math.sqrt(image.size))\n standardized_image = (image - mean) / adjusted_stddev\n \n return standardized_image", "def normalize(image):\n mean = image.mean()\n stddev = image.std()\n adjusted_stddev = max(stddev, 1.0/math.sqrt(image.size))\n standardized_image = (image - mean) / adjusted_stddev\n \n return standardized_image", "def normalize(self, tensor: torch.Tensor, mean: List[float], std: List[float]):\n\n # standard operation defined in ToTensor\n tensor = tensor.div(255)\n dtype = tensor.dtype\n mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device)\n std = torch.as_tensor(std, dtype=dtype, device=tensor.device)\n\n if (std == 0).any():\n raise ValueError(\n f\"std evaluated to zero after conversion to {dtype}​​​​​, leading to division by zero.\"\n )\n if mean.ndim == 1:\n mean = mean.view(-1, 1, 1)\n if std.ndim == 1:\n std = std.view(-1, 1, 1)\n\n tensor = tensor.sub(mean)\n tensor = tensor.div(std)\n\n return tensor", "def tensor2img(tensImg,mean,std):\n\t# undo normalize\n\tfor t, m, s in zip(tensImg, mean, std):\n\t\tt.mul_(s).add_(m)\n\t# undo transpose\n\ttensImg = (tensImg.numpy().transpose(1,2,0)*float(255)).astype(np.uint8)\n\treturn tensImg", "def image_normalize(im, axis=(0, 1), c=1e-8):\n return (im - im.mean(axis)) / (im.std(axis) + c)", "def normalize_image(self):\n # The image normalization is identical to Cloud TPU ResNet.\n self._image = tf.image.convert_image_dtype(self._image, dtype=tf.float32)\n offset = tf.constant(DATASET_MEAN)\n offset = tf.expand_dims(offset, axis=0)\n offset = tf.expand_dims(offset, axis=0)\n self._image -= offset\n\n scale = tf.constant(DATASET_VAR)\n scale = tf.expand_dims(scale, axis=0)\n scale = tf.expand_dims(scale, axis=0)\n self._image /= scale", "def normalize_image(image, mean=(0.485, 0.456, 0.406), var=(0.229, 0.224, 0.225)):\n with tf.name_scope('NormalizeImage', values=[image]):\n image = tf.to_float(image)\n image /= 255.0\n\n image -= mean\n image /= var\n\n return image", "def _normalize_images(self, images: th.Tensor) -> th.Tensor:\n output = ((images+2)/4 - self._norm_mean)/self._norm_std\n return output", "def normalize(image, xbar, sigma):\n image = image.transpose(2, 0, 1) # Switch to channel-first\n mean, std = np.array(xbar), np.array(sigma)\n image = (image - mean[:, None, None]) / std[:, None, None]\n return image.transpose(1, 2, 0)", "def normalize(self, image, transpose=False, data_type=None):\n return normalize(image, self.mean, self.std, transpose)", "def truncated_normal_(tensor):\n mean = 0\n std = 1/float(np.sqrt(tensor.shape[0]))\n size = tensor.shape\n tmp = tensor.new_empty(size + (4,)).normal_()\n valid = (tmp < 2) & (tmp > -2)\n ind = valid.max(-1, keepdim=True)[1]\n tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))\n tensor.data.mul_(std).add_(mean)\n return tensor", "def normalise(image):", "def standardize(X):\n mu = X.mean(axis=0, keepdims=True)\n s = X.std(axis=0, keepdims=True)\n return (X-mu)/s", "def _normalize(image):\n return tf.multiply(tf.subtract(image, 0.5), 2.0)", "def normalize_image(image_tensor, mean_rgb, stddev_rgb,\n data_format='channels_last'):\n # TODO: support GPU by using shape=[3, 1, 1]\n if data_format == 'channels_first':\n shape = [3, 1, 1]\n elif data_format == 'channels_last':\n shape = [1, 1, 3]\n else:\n raise Exception('Unexpected data_format: {}'.format(data_format))\n image_tensor -= tf.constant(mean_rgb, shape=shape,\n dtype=image_tensor.dtype)\n image_tensor /= tf.constant(stddev_rgb, shape=shape,\n dtype=image_tensor.dtype)\n return image_tensor", "def normalize_data(img):\n nor = np.linalg.norm(img, axis = 1)\n nor = np.reshape(nor, (len(img), 1))\n img = np.divide(img, nor)\n return img", "def unnormalize(self, image, transpose=False):\n return unnormalize(image, self.mean, self.std, transpose)", "def normalize(x):\n MEAN_VALUES = np.array([104, 117, 123])\n means = theano.shared(MEAN_VALUES.astype(\"float32\"))\n return x[:, ::-1, :, :] - means[np.newaxis, :, np.newaxis, np.newaxis]", "def normalization_func(img):\n vmin, vmax = img.min(), img.max()\n if vmin != vmax:\n im = (img - vmin) / (vmax - vmin)\n else:\n im = np.ones(img.shape)\n return im", "def normalize(values):\n return (values - np.mean(values)) / np.std(values)", "def normalize(dataset):\n return normalize_standard_deviation(normalize_mean(dataset))", "def standardize(x, mean=None, std=None): \n \n mean = mean if mean is not None else x.mean(axis=0)\n std = std if std is not None else x.std(axis=0) \n \n return (x - mean) / std, mean, std", "def normal_init(m, mean, std):\n if isinstance(m, nn.ConvTranspose2d) or isinstance(m, nn.Conv2d):\n m.weight.data.normal_(mean, std)\n m.bias.data.zero_()", "def reshape_normalise(img):\n\t# The image shape is expected to match the input of VGG19\n\timg = np.resize(img, (1, CONFIG.IMAGE_HEIGHT, CONFIG.IMAGE_WIDTH, CONFIG.COLOR_CHANNELS)).astype('float32')\n\timg -= CONFIG.MEAN_PIXEL\n\treturn img", "def imgNormalize(img): \n constant = np.sum(sitk.GetArrayFromImage(img))*np.prod(img.GetSpacing())\n return img/constant", "def variance_normalize(self):\n self.img = self.img / np.sqrt(np.sum(self.img ** 2))", "def standardize(image):\n \n ### START CODE HERE (REPLACE INSTANCES OF 'None' with your code) ###\n \n # initialize to array of zeros, with same shape as the image\n standardized_image = np.zeros(image.shape)\n\n # iterate over channels\n for c in range(image.shape[0]):\n # iterate over the `z` dimension\n for z in range(image.shape[3]):\n # get a slice of the image \n # at channel c and z-th dimension `z`\n image_slice = image[c,:,:,z]\n\n # subtract the mean from image_slice\n centered = image_slice - np.mean(image_slice)\n \n # divide by the standard deviation (only if it is different from zero)\n centered_scaled = centered / np.std(centered)\n\n # update the slice of standardized image\n # with the scaled centered and scaled image\n standardized_image[c, :, :, z] = centered_scaled\n\n ### END CODE HERE ###\n\n return standardized_image", "def processImage(imgs):\r\n imgs = imgs.astype(np.float32)\r\n for i, img in enumerate(imgs):\r\n m = img.mean()\r\n s = img.std()\r\n imgs[i] = (img - m) / s\r\n return imgs", "def denormalize(D, means, stds=None): \n \n (D,initial_shape) = ensure_column(D) \n \n n_rows = D.shape[0] \n \n if stds is not None:\n result = np.multiply( D, stds ) + np.tile( means, (n_rows,1) )\n else:\n result = D + np.tile( means, (n_rows,1) )\n \n result = rev_ensure_column(result,initial_shape)\n D = rev_ensure_column(D,initial_shape) \n \n return result", "def normalize(image, label):\n image -= settings.DATASET_MEAN\n image /= settings.DATASET_STD\n\n return image, label", "def normalize(im: np.ndarray) -> np.ndarray:\n im = im.astype(np.float32)\n return (im - im.min()) / (im.max() - im.min())", "def per_img_demean(img):\n assert (len(img.size()) == 3 and img.size(0) == 3) # 1 RGB image, tensor\n mean = img.sum(dim=1, keepdim=True).sum(dim=2, keepdim=True) / (img.size(1) * img.size(2))\n return img - mean # expands", "def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):\n # type: (Tensor, float, float, float, float) -> Tensor\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)", "def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):\n # type: (Tensor, float, float, float, float) -> Tensor\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)", "def normalization(imgs):\n\n imgs = np.asarray(imgs).astype(np.float32)\n imgs = np.expand_dims(imgs / 255, axis=-1)\n return imgs", "def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)", "def normalize(batch_img: np.ndarray) -> np.ndarray:\n batch_img = batch_img.astype('float32')\n return batch_img / 127.5 - 1", "def normalise_tensor(tensor, upper, lower):\n\n normalised_tensor = (tensor-lower)/(upper-lower)\n return normalised_tensor", "def unstandardize(\n x: torch.Tensor,\n stats: Dict[str, torch.Tensor]) -> torch.Tensor:\n x_scaled = x * stats['std'] + stats['mean']\n return x_scaled", "def preprocess_img(img):\n # Make a copy of img as array\n img = np.array(img)\n\n # Convert into tensor\n img = torch.Tensor(img).permute(2, 0, 1) / 255.0\n\n # Normalize\n for t, m, s in zip(img, TORCH_IMG_MEAN, TORCH_IMG_STD):\n t.sub_(m).div_(s)\n\n return img", "def NgNormalization2(Pin,g=10.0):\n Pmean = np.mean(Pin,axis=1,keepdims=True) \n Pstd = np.sqrt(np.var(Pin,axis=1,keepdims=True)+g ) # g = 10 for images of brightness 0...255 \n O = (Pin - Pmean) / Pstd\n return O", "def normalize_features(array):\n \n array_normalized = (array-array.mean())/array.std()\n mu = array.mean()\n sigma = array.std()\n\n return array_normalized, mu, sigma", "def normalize(input_tensor, output_tensor):\n image_dims = utils.get_img_shape(input_tensor)[1:]\n return output_tensor / np.prod(image_dims)", "def _denormalize_joints(x, mean, std):\n assert x.ndim == 3\n assert x.shape == mean.shape == std.shape\n return x * std + mean", "def standardize(\n x: torch.Tensor,\n stats: Dict[str, torch.Tensor]) -> torch.Tensor:\n\n x_scaled = (x - stats['mean']) / stats['std']\n return x_scaled", "def standardize(x, axis=-1):\n stds_avg = np.std(x, axis=axis, keepdims=True)\n x -= np.mean(x, axis=axis, keepdims=True)\n x /= (stds_avg + 1e-8)\n return x", "def normalise(dataset):\n # Scale images to the [0, 1] range\n dataset = dataset.astype(\"float32\") / 255\n # Make sure images have shape (28, 28, 1)\n return np.expand_dims(dataset, -1)", "def normalize(image):\n min = np.min(image)\n max = np.max(image)\n normalImg = 255*(image - min) / (max - min)\n return normalImg", "def BatchNormalize(S):\n mu = np.mean(S, axis=0)\n v = np.mean((S-mu)**2, axis=0)\n S = (S - mu) / np.sqrt(v + epsilon)\n return S", "def unstandardize(da: xr.DataArray, mean: xr.DataArray, std: xr.DataArray):\n return (std * da) + mean", "def make_flat_avg(images, out):\n image = Image(avg_images(images, out))\n image.normalise()\n return out", "def featureNormalization(X):\n mean=np.hstack(np.mean(X[:,0]),np.mean(X[:,1]),np.mean(X[:,2]))\n std=np.hstack(np.std(X[:,0]),np.std(X[:,1]),np.std(X[:,2]))\n \n X_norm = (X - mean)/std\n \n return X_norm", "def normalizeData(meanAndStd, dataset):\n\n for i in range(len(dataset)):\n for j in range(len(dataset[i])-1):\n mean = meanAndStd[j][\"mean\"]\n std = meanAndStd[j][\"std\"]\n dataset[i][j] = (dataset[i][j] - mean)/std", "def denormalize(y, close):\n mean = close[:,-1].reshape(y.shape[0],1).repeat(1,y.shape[1])\n std = torch.std(close, dim=1).reshape(y.shape[0],1).repeat(1,y.shape[1])\n return std*y + mean", "def norm_and_stack(images):\n imagestack = np.dstack(tuple([cv2.imread(image, cv2.IMREAD_UNCHANGED) for image in images]))\n mean = np.mean(imagestack)\n std = np.std(imagestack)\n new_im = (imagestack - mean)/std \n \n return new_im, mean, std", "def normalize(X):\n\tX = X - np.mean(X,axis=1)[:,np.newaxis]\n\tX = X/np.std(X,axis=0)[np.newaxis,:];\n\tX = X - np.mean(X,axis=0)[np.newaxis,:]\n\treturn X", "def preprocess_image(img):\n return (img.astype(np.float32)/255.0 - FACENET_MEAN) / FACENET_STD", "def normalize_features(X):\n std = X.std(axis=0)\n std = np.where(std == 0, 1, std) # to avoid division by zero\n x_normed = (X - X.mean(axis=0)) / std\n return x_normed", "def demean_normalize(one_d_array):\n\n temp_arr = one_d_array - np.nanmean(one_d_array)\n\n return temp_arr/np.nanstd(temp_arr)", "def transform_test(imgs, short=600, max_size=1000, mean=(0.485, 0.456, 0.406),\n std=(0.229, 0.224, 0.225)):\n if isinstance(imgs, np.ndarray):\n imgs = [imgs]\n for im in imgs:\n assert isinstance(im, np.ndarray), \"Expect NDArray, got {}\".format(type(im))\n\n tensors = []\n origs = []\n for img in imgs:\n img = timage.resize_short_within(img, short, max_size)\n orig_img = img.astype('uint8')\n img = vf.to_tensor(img)\n img = vf.normalize(img, mean=mean, std=std)\n tensors.append(img.unsqueeze(0))\n origs.append(orig_img)\n if len(tensors) == 1:\n return tensors[0], origs[0]\n return tensors, origs", "def _normalize_tensor(input_tensor):\n\n rms_tensor = K.sqrt(K.mean(K.square(input_tensor)))\n return input_tensor / (rms_tensor + K.epsilon())", "def normalize(data):\n data = numpy.asmatrix(data)\n std_devs = numpy.std(data, axis=1)\n std_devs[std_devs == 0] = 1 # prevent div by 0\n return (data - numpy.mean(data, axis=1)) / std_devs", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def normalize_data(x):\n mvec = x.mean(0)\n stdvec = x.std(axis=0)\n \n return (x - mvec)/stdvec", "def denormalize(batch_img: np.ndarray) -> np.ndarray:\n return np.uint8((batch_img + 1) * 127.5)", "def normalize_transform():\n\n # Default for PyTorch's pre-trained models\n return transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])", "def normalise(self):\n return self / self.mean(axis=1).reshape(self.shape[0], 1)", "def inverse_transform(self, data):\n transed = self.restoreDim(data)\n mean = torch.zeros(transed.size())\n std = torch.ones(transed.size())\n if args.cuda:\n mean = mean.cuda()\n std = std.cuda()\n mean[...,0] = self.mean0\n mean[...,1] = self.mean1\n std[...,0] = self.std0\n std[...,1] = self.std1\n transformed = torch.add(torch.mul(transed, std), mean)\n del mean, std\n return transformed.permute(1,0,3,2)", "def color_normalize(x, mean, std):\n if x.dim() in {3, 4}:\n if x.size(0) == 1:\n x = x.repeat(3, 1, 1)\n assert x.size(0) == 3, \"For single video format, expected RGB along first dim\"\n for t, m, s in zip(x, mean, std):\n t.sub_(m)\n t.div_(s)\n elif x.dim() == 5:\n assert (\n x.shape[1] == 3\n ), \"For batched video format, expected RGB along second dim\"\n x[:, 0].sub_(mean[0]).div_(std[0])\n x[:, 1].sub_(mean[1]).div_(std[1])\n x[:, 2].sub_(mean[2]).div_(std[2])\n return x", "def standardize(data):\r\n mean = data.mean(axis=0)\r\n std = data.std(axis=0)\r\n return (data - mean)/std", "def standardize(x, mean_x=None, std_x=None):\n if mean_x is None:\n mean_x = np.mean(x, axis=0)\n x = x - mean_x\n if std_x is None:\n std_x = np.std(x, axis=0)\n x[:, std_x > 0] = x[:, std_x > 0] / std_x[std_x > 0]\n\n tx = np.hstack((np.ones((x.shape[0], 1)), x))\n return tx, mean_x, std_x", "def vis_normalize(a, s=0.1):\n return s * (a - a.mean()) / (max(a.std(), 1e-4)) + 0.5", "def normalization(image):\n return (image - np.min(image)) / (np.max(image) - np.min(image))", "def normalize(D, ntype=0, means=None, stds=None):\n \n if (not isinstance(D,np.ndarray)) or (len(D.shape) > 2):\n raise AssertionError(\"Input D must be derivative of numpy.ndarray and have less than 3 dimensions.\")\n \n (D,initial_shape) = ensure_column(D)\n \n n_rows = D.shape[0] \n \n if means is None:\n means = bn.nanmean(D, axis= 0) \n \n tmp = D - np.tile( means, (n_rows,1) ) # temporary result. Data with \n # substracted mean \n \n if stds is None:\n if (ntype == 0): \n stds = bn.nanstd(tmp,axis=0, ddof=1 ) # one degree of freadom as matlab default\n \n elif (ntype == 1):\n stds = bn.nanmax(np.abs(tmp), axis=0)\n \n elif (ntype == 2): \n stds = np.sqrt( bn.nansum( np.power(tmp,2) , axis = 0) ) \n \n elif (ntype == 3): \n stds = np.ones( (D.shape[1],) )\n \n else:\n raise ValueError(\"Normalization type %s is unknown\" % ntype)\n \n # result = np.dot( tmp , np.diagflat( 1./stds ) )\n result = np.divide( tmp, stds ) \n \n result = rev_ensure_column(result,initial_shape)\n D = rev_ensure_column(D,initial_shape) \n \n return (result,means,stds)", "def smdm_normalize(images, window, padding, name=\"unnamed_smdm_normalize\"):\n\tMEDIAN_JITTER = tf.constant(1e-8)\n\t\n\tif window % 2 == 0:\n\t\traise ValueError(\"attempted to smdm_normalize() with even-sized window\")\n\n\timages = tf.cast(images, tf.float32)\n\tbatch_size, height, width, channels = tf.shape(images)[0], tf.shape(images)[1], tf.shape(images)[2], tf.shape(images)[3]\n\n\tspatial_last = tf.transpose(images, (0, 3, 1, 2))\n\tspatial_last_and_flat = tf.reshape(spatial_last, (batch_size, channels, -1))\n\tn = tf.multiply(height, width)\n\tk = tf.to_int32(tf.divide(n, 2)) + 1\n\ttop_k = tf.nn.top_k(spatial_last_and_flat, k, name=name + \"_top_half_of_images\")[0]\n\tmedians_spatial_last_and_flat = tf.cond(\n\t\ttf.equal(tf.mod(n, 2), 0),\n\t\tlambda: tf.reduce_mean(top_k[:, :, k - 2: k], -1, keep_dims=True),\n\t\tlambda: top_k[:, :, k - 1]\n\t)\n\tmedians_spatial_last_and_flat = tf.add(\n\t\tmedians_spatial_last_and_flat,\n\t\ttf.fill(tf.shape(medians_spatial_last_and_flat), MEDIAN_JITTER)\n\t)\n\tmedians_spatial_last = tf.expand_dims(medians_spatial_last_and_flat, 3)\n\tmedians = tf.transpose(medians_spatial_last, (0, 2, 3, 1))\n\timages = tf.divide(images, medians, name=name + \"_divide_images_by_medians\")\n\n\tpadding_amount = int((window - 1) / 2)\n\tpadding_amounts = ((0, 0), (padding_amount, padding_amount), (padding_amount, padding_amount), (0, 0))\n\timages_padded = tf.pad(images, padding_amounts, padding)\n\tlocal_means = tf.nn.pool(images_padded, (window, window), \"AVG\", \"VALID\", name=name + \"_local_means_of_images\")\n\timages = tf.subtract(images, local_means, name=name + \"_subtract_local_means_from_images\")\n\n\treturn images", "def standardize(X):\n X_std = X\n mean = X.mean(axis=0)\n std = X.std(axis=0)\n for col in range(np.shape(X)[1]):\n if std[col]:\n X_std[:, col] = (X_std[:, col] - mean[col]) / std[col]\n # X_std = (X - X.mean(axis=0)) / X.std(axis=0)\n return X_std" ]
[ "0.78944635", "0.7882652", "0.7882652", "0.78073585", "0.7707587", "0.75766003", "0.75766003", "0.75701815", "0.7559962", "0.75194675", "0.75118655", "0.74785763", "0.73548406", "0.7345986", "0.73086804", "0.72494674", "0.72197634", "0.71848446", "0.7110974", "0.7100263", "0.7030862", "0.69995165", "0.69598615", "0.68827486", "0.6844572", "0.6844572", "0.68365127", "0.68249583", "0.6799495", "0.67780995", "0.6768002", "0.6762072", "0.6751974", "0.67418694", "0.6712207", "0.66610897", "0.6599532", "0.65566707", "0.65173584", "0.651684", "0.65123636", "0.6498215", "0.6489733", "0.64466983", "0.6431992", "0.6431372", "0.6428953", "0.6427255", "0.64245135", "0.6417567", "0.6404624", "0.6402098", "0.63959163", "0.6387844", "0.6387475", "0.63793063", "0.637754", "0.637754", "0.63737077", "0.63611954", "0.63510114", "0.6349697", "0.63443255", "0.6342964", "0.6341816", "0.63341624", "0.63191533", "0.63074577", "0.6306118", "0.6296154", "0.62901443", "0.6289841", "0.6283155", "0.62812334", "0.62756354", "0.6272716", "0.62691486", "0.6268604", "0.62685347", "0.62672055", "0.6264777", "0.62451154", "0.6241531", "0.6217307", "0.62171847", "0.6216371", "0.62075233", "0.62075233", "0.6199602", "0.6178903", "0.61676884", "0.6160752", "0.61516446", "0.61468995", "0.61454403", "0.6135486", "0.6135266", "0.6126005", "0.6122945", "0.6117333" ]
0.765841
5
Get the status code as per ttype and it's status_val
def get_status_code(self, ttype, status_val) -> str: # get the status code from __status_code or __default_code pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def status_code(self):\n return int(self.status.split()[1])", "def get_status_code(status):\n return dict(const.STATUS_CODES).get(status)", "def status_code(self):\r\n return int(self._status[:3])", "def status_code(self):\n return int(self._status[:3])", "def _parse_status(self, status):\n if status in (STATUS_FINISHED, 'FINISHED'):\n return STATUS_FINISHED\n elif status in (STATUS_ERROR, 'ERROR'):\n return STATUS_ERROR\n elif status in (STATUS_CANCELED, 'CANCELED'):\n return STATUS_CANCELED\n return STATUS_STARTED", "def _get_status(self):\n return self.__status", "def status_code(self) -> int:\n return pulumi.get(self, \"status_code\")", "def _get_status_code(response: Response) -> int:\n status_code = response.status_code\n if isinstance(status_code, HTTPStatus):\n return status_code.value\n else:\n return status_code", "def status(self, value):\r\n if isinstance(value, (int, long)):\r\n if 100 <= value <= 999:\r\n st = _RESPONSE_STATUSES.get(value, '')\r\n if st:\r\n self._status = '%d %s' % (value, st)\r\n else:\r\n self._status = str(value)\r\n else:\r\n raise ValueError('Bad response code: %d' % value)\r\n elif isinstance(value, basestring):\r\n if isinstance(value, unicode):\r\n value = value.encode('utf-8')\r\n if _RE_RESPONSE_STATUS.match(value):\r\n self._status = value\r\n else:\r\n raise ValueError('Bad response code: %s' % value)\r\n else:\r\n raise TypeError('Bad type of response code.')", "def status(self):\n return STATUS[self.fields['status']]", "def GetStatus(self):\r\n return self.status", "def gather_http_status_code(self):\n\n if self.status.ipv6_syntax_validation:\n self.status.http_status_code = PyFunceble.lookup.HTTPCode(\n self.subject, \"ipv6\"\n ).get()\n else:\n self.status.http_status_code = PyFunceble.lookup.HTTPCode(\n self.subject, self.subject_type\n ).get()", "def status(_):\n return {\"status\": \"ok\"}", "def get_status_code(self, response):\n if hasattr(response, 'status_int'):\n return response.status_int\n return response.status", "def get_status_code(self, response):\r\n if hasattr(response, 'status_int'):\r\n return response.status_int\r\n else:\r\n return response.status_code", "def get_status_code(self, response):\r\n if hasattr(response, 'status_int'):\r\n return response.status_int\r\n else:\r\n return response.status_code", "def _get_status(trial: dict) -> int:\n if trial['overall_status'] in {'Not yet recruiting', 'Active, not recruiting'}:\n return 0\n elif trial['overall_status'] in {'Enrolling by invitation', 'Recruiting', 'Available'}:\n return 1\n elif trial['overall_status'] in {'Approved for marketing'}:\n return 2\n else:\n return 3", "def status_to_event_code(status: str):\n return {\n \"sent\": \"txSent\",\n \"pending\": \"txPool\",\n \"pending-simulation\": \"txPoolSimulation\",\n \"stuck\": \"txStuck\",\n \"confirmed\": \"txConfirmed\",\n \"failed\": \"txFailed\",\n \"speedup\": \"txSpeedUp\",\n \"cancel\": \"txCancel\",\n \"dropped\": \"txDropped\",\n }[status]", "def status(self, value):\n if isinstance(value, (long, int)):\n if 100 <= value <= 900:\n status = _RESPONSE_STATUSES.get(value, '')\n if status:\n self._status = '%d %s' % (value, status)\n else:\n self._status = str(value)\n else:\n raise ValueError('Bad response code: %d' % value)\n elif isinstance(value, basestring):\n if isinstance(value, unicode):\n value = value.encode('utf-8')\n if _RE_RESPONSE_STATUS.match(value):\n self._status = value\n else:\n raise ValueError('Bad response code: %d' % value)\n else:\n raise TypeError('Bad type of response code.')", "def _GetStatusFromOp(op):\n for prop in op.response.additionalProperties:\n if prop.key == 'status':\n return prop.value.string_value\n return 'UNKNOWN'", "def code(self):\n\t\treturn self.status_code", "def __dec_status(self, status_code):\n ret = self.status_codes.get(status_code)\n if ret == None:\n return \"Unknown\"\n else:\n return ret", "def status(self, code, content_length=None):", "def status(self):\n return self.get(self._names[\"status\"])", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status():\n return \"OK\" # defaults to a 200 HTML status return code", "def status_code(self):\n return self._status_code", "def rtt_get_status(self):\n status = structs.JLinkRTTerminalStatus()\n res = self.rtt_control(enums.JLinkRTTCommand.GETSTAT, status)\n return status", "def get_status(self):\n return self.read_register(259, 0, 3)", "def status_str(status):\n\n status_map = {\n 0: 'MATCH',\n 10: 'OK',\n 15: 'SKIP',\n 20: 'FAIL',\n 30: 'CRASH'\n }\n return status_map.get(status, 'UNKNOWN')", "def axapi_status(self, result):\n try:\n status = result.json()['response']['status']\n if status == 'fail':\n error = '\\n ERROR: ' + result.json()['response']['err']['msg']\n return error, status\n else:\n return status\n except:\n good_status_codes = ['<Response [200]>', '<Response [204]>']\n status_code = str(result)\n if status_code in good_status_codes:\n return 'OK'\n else:\n return status_code", "def status(self):\n return status_dict[self._get_property_(self.STATUS).upper()]", "def translate_status(s):\n exc = StatusNotOk(s.message())\n exc.code = s.code()\n return exc", "def work_status(value):\n for status in models.WORK_STATUS:\n if status[0]==value:\n return status[1]\n\n return \"WORK STATUS NOT FOUND\"", "def status(self):\n return self._data['status']", "async def get_status():", "def getstatus(self):\n return self.__status", "def get_status(self):\n return self._status", "def get_status(id):\n task = run_ctx_request.AsyncResult(id)\n if task.state == states.PENDING:\n abort(404)\n if task.state == states.RECEIVED or task.state == states.STARTED:\n return '', 202, {'Location': url_for('api.get_status', id=id)}\n return task.info", "def _decode_sensor_status(self, status: str) -> str:\n k = int(status)\n return self.SENSOR_STATUSES[k]", "def get_status_code(self, status_line):\n try:\n return int(status_line.split(' ')[1])\n except ValueError:\n return 400\n except IndexError:\n return 404", "def get_status(self):\n return self.msg", "def status_enum(self):\n return self.valid_statuses()", "def _get_status(self):\n return u'%s' % (self.get_status_display())", "def get_status(self):\n statuses = dict(ACTIVITY_STATUS_CHOICES)\n return statuses.get(self.status, \"N/A\")", "def get_status(self, state):\n raise NotImplementedError", "def status_check():\n return {\"status\": \"OK\"}", "def get_status(self):\n # TODO retrieve from db if not set\n return self.status", "def status():\n return 'OK'", "def get_status(self):\n status = self._status.get_message()\n \n if status == \"N\":\n return \"offline\"\n \n elif status == \"Y\":\n return \"online\"\n \n elif status == \"A\":\n return \"away\"\n \n elif status == \"B\":\n return \"busy\"", "def status(self) -> str:\n trial_status = {0: \"SURVIVED\", 1: \"DETECTED\", 2: \"ERROR\", 3: \"TIMEOUT\"}\n return trial_status.get(self.return_code, \"UNKNOWN\")", "def test_get_status(self):\n pass", "def test_get_status(self):\n pass", "def get_status():\n return ('off', 'off')", "def get_validation_status(processed_code):\n if is_code_has_unknown_digit(processed_code):\n if is_code_valid_checksum(processed_code):\n return VALID_CODE_STATUS\n else:\n return CHECKSUM_ERROR_STATUS\n else:\n return DIGIT_ERROR_STATUS", "def getStatus():", "def get_response_status(response_code):\n if is_success(response_code):\n return 'success'\n return 'error'", "def get_status_code(self):\n return self.__response.status_code", "def status_class(status):\n status = status.split('-')[-1] # e.g. \"overall-passed\" -> \"passed\"\n classes = {\n 'passed': 'success',\n 'failed': 'danger',\n 'skipped': 'warning',\n 'match': 'success',\n 'diff': 'danger',\n 'missing': 'warning',\n 'generated': 'warning',\n }\n return classes[status]", "def status_reason(self):\n return self.status.split()[2]", "async def get_status(self) -> dict[str, Any]:\n\n def check_int(s):\n if s[0] in (\"-\", \"+\"):\n return s[1:].isdigit()\n return s.isdigit()\n\n cmd = await self.send_command(\"STATUS\", timeout=1)\n if not cmd.succeeded():\n raise ArchonError(f\"Command finished with status {cmd.status.name!r}\")\n\n keywords = str(cmd.replies[0].reply).split()\n status = {\n key.lower(): int(value) if check_int(value) else float(value)\n for (key, value) in map(lambda k: k.split(\"=\"), keywords)\n }\n\n return status", "def test_status_code(self):\n formatted_status_code = get_status_code('python')\n self.assertEqual(formatted_status_code, 200) #compares the test result with the result expected", "def translate_from_rpc(rpc_enum_value):\n return {\n 0: StatusText.StatusType.INFO,\n 1: StatusText.StatusType.WARNING,\n 2: StatusText.StatusType.CRITICAL,\n }.get(rpc_enum_value, None)", "def set_status( code ):", "def comando_status(self):\r\n\tif args.tipo == 'web':\r\n return self.status_web()\r\n\r\n\tif args.tipo == 'nfce':\r\n return self.consulta_status_nfce()\r\n\r\n\tif args.tipo == 'dual':\r\n return self.status_impressora_dual()", "def _mapErrorCodeToStatus(code):\n if code == 103:\n return http.NOT_FOUND\n return http.INTERNAL_SERVER_ERROR", "def status_code(self) -> int:\n raise NotImplementedError # pragma: no cover", "def status(self) -> Optional[int]:\n return pulumi.get(self, \"status\")", "def get_current_status(cls):\n from sauna.plugins.base import Plugin\n from sauna import check_results_lock, check_results\n\n def reduce_status(accumulated, update_value):\n if update_value.status > Plugin.STATUS_CRIT:\n return accumulated\n return accumulated if accumulated > update_value.status else \\\n update_value.status\n\n with check_results_lock:\n code = reduce(reduce_status, check_results.values(), 0)\n\n return Plugin.status_code_to_str(code), code", "def status(cls, stat, request=Retrieve):\n res = cls.STATUS_MAP.get(stat)\n if res is None:\n res = status.Status('%d.00' % (stat // 100))\n if res.success:\n res = request.success\n return res", "def getStatusCode(statusWord):\n try:\n return ['ordered', 'wished', 'owned'].index(statusWord)\n except ValueError:\n return -1", "def status(self) -> dict[str, str] | None:\n return self._status", "def status2word(self,status):\n if status == 'T':\n return \"Toggle\"\n elif status == 'S':\n return \"Signature\"\n elif status == 'R':\n return \"Random\"\n elif status == 'N':\n return \"Normal\"\n elif status == 'E':\n return \"FE Error\"\n elif status == 'X':\n return \"NO TIN\"\n else:\n print \"Unknown status !\"\n return \"BUG\"", "def get_test_status(self) -> str:\n return self.__test_result[Result.__RESULT]", "def getStatus(self, key, time):\n return self.get(\"status\", key, time)", "def getStatus(self):\n return self._status", "def getStatus(self):\n return self.__status", "def get_raw_status(self):\n self.__param_lock.acquire()\n status = self.__status\n self.__param_lock.release()\n return status", "def status(self):\n\t\treturn self._status", "def convert_to_rp_status(behave_status):\n if behave_status == \"passed\":\n return \"PASSED\"\n elif behave_status == \"failed\":\n return \"FAILED\"\n elif behave_status == \"skipped\":\n return \"SKIPPED\"\n else:\n # todo define what to do\n return \"PASSED\"", "def get_text_status(json):\n if json is None:\n return None\n elif 'statusdetail' in json:\n return json['statusdetail']\n elif 'status' in json:\n return json['status']\n else:\n return None", "def status(self):\n st = ct.c_int()\n self.lib.GetStatus(ct.pointer(st))\n if st.value == 20073:\n return 'Camera is idle, waiting for instructions.'\n elif st.value == 20074:\n return 'Camera is executing the temperature cycle.'\n elif st.value == 20072:\n return 'Acquisition in progress.'\n elif st.value == 20023:\n return 'Unable to meet accumulate cycle time.'\n elif st.value == 20022:\n return 'Unable to meet kinetic cycle time.'\n elif st.value == 20013:\n return 'Unable to communicate with card.'\n elif st.value == 20018:\n return ('Computer unable to read the data via the ISA slot at the '\n 'required rate.')\n elif st.value == 20026:\n return 'Overflow of the spool buffer.'", "def read_status(ctl):\n\tr = ctl.bus_read_struct_coherent(tm.status_addr, 'BBBBI')\n\treturn r", "def get_status_string(self, instance):\n return instance.get_status_string()", "def status() -> Dict[str, Any]:", "def transaction_status_enum(self) -> TransactionStatus:\n return _TRANSACTION_STATUS_MAPPING.get(self.transaction_status)", "def Status(self):\r\n\t\treturn self._get_attribute('status')", "def t_status_process(self, *args, **kwargs):\n\n self.dp.qprint(\"In status process...\")\n\n d_state = self.job_state(*args, **kwargs)\n\n d_ret = d_state['d_ret']\n b_status = d_state['status']\n\n l_keys = d_ret.items()\n l_status = []\n for i in range(0, int(len(l_keys)/2)):\n b_startEvent = d_ret['%s.start' % str(i)]['startTrigger'][0]\n try:\n endcode = d_ret['%s.end' % str(i)]['returncode'][0]\n except:\n endcode = None\n\n if endcode == None and b_startEvent:\n l_status.append('started')\n if not endcode and b_startEvent and type(endcode) is int:\n l_status.append('finishedSuccessfully')\n if endcode and b_startEvent:\n l_status.append('finishedWithError')\n\n self.dp.qprint('b_startEvent = %d' % b_startEvent)\n self.dp.qprint(endcode)\n self.dp.qprint('l_status = %s' % l_status)\n\n d_ret['l_status'] = l_status\n return {\"d_ret\": d_ret,\n \"status\": b_status}", "def _grep_status(self, status_type):\n args = \"qstat -f {}\".format(self.id).split()\n res, _ = call(args)\n exit_status = [line for line in res.split('\\n')\n if 'exit_status' in line]\n try:\n _, __, code = exit_status[0].split()\n except IndexError:\n code = None\n\n if status_type == 'complete' and code == '0':\n return True\n elif status_type == 'error' and code != '0':\n return True\n else:\n return False", "def check_status(self):\n return self.status", "def check_status(self):\n return self.status", "def getstatus(self):\n with self.lock:\n return (self.status, self.time_start)", "def get_object_status(obj):\n return get_object_parameter(obj, 'status')", "def status():\n (code, message) = rest_api.status(request)\n if (code == 200):\n return 'Running'\n else:\n abort(code)", "def get_previous_ti_statuses(self, context: dict) -> enum.Enum:\n dagrun = context['ti'].get_dagrun()\n failed_ti = dagrun.get_task_instances(state='failed')\n success_ti = dagrun.get_task_instances(state='success')\n if not failed_ti and not success_ti: # There is no prev task so it can't have been failed\n logger.info(\"There are no tasks before this one. So it has status RUNNING\")\n return self.prev_ti_state.NONE\n if failed_ti:\n logger.info(\"There are failed tasks before this one. So it has status FAILED\")\n return self.prev_ti_state.FAILED\n logger.info(\"There are successed tasks before this one. So it has status SUCCESSED\")\n return self.prev_ti_state.SUCCESS", "def status(self) -> Optional[pulumi.Input[Union[str, 'Status']]]:\n return pulumi.get(self, \"status\")", "def status(self) -> Optional[pulumi.Input[Union[str, 'Status']]]:\n return pulumi.get(self, \"status\")", "def __status(self, *args):\n return \"status\"" ]
[ "0.7202656", "0.71696675", "0.7097821", "0.6938727", "0.68742913", "0.68445003", "0.68133837", "0.6747887", "0.67144114", "0.6705535", "0.662726", "0.6622442", "0.6621217", "0.66081154", "0.6606708", "0.6606708", "0.6598715", "0.65756345", "0.65702355", "0.65538526", "0.6550194", "0.6547384", "0.64904195", "0.64783126", "0.6455558", "0.6455558", "0.6455558", "0.643943", "0.64168113", "0.6397609", "0.63947904", "0.63888264", "0.6382888", "0.63799244", "0.6377142", "0.63657093", "0.63269365", "0.6324667", "0.6298099", "0.6297632", "0.6285447", "0.6263669", "0.6252722", "0.6244736", "0.6244179", "0.6239435", "0.6230815", "0.6223377", "0.6213037", "0.62099165", "0.62080806", "0.6195519", "0.61884314", "0.6163547", "0.6163547", "0.6160813", "0.6149706", "0.61455107", "0.61392915", "0.6138388", "0.613179", "0.6120888", "0.61012715", "0.60995287", "0.6085704", "0.6084001", "0.60737973", "0.6071897", "0.6067587", "0.60506237", "0.604468", "0.6041915", "0.6039498", "0.60249054", "0.6020175", "0.60067266", "0.60044324", "0.5992973", "0.59859765", "0.5979511", "0.5969355", "0.5950201", "0.59501183", "0.59476775", "0.59425557", "0.59350675", "0.59329283", "0.5930634", "0.5920934", "0.5916951", "0.59139", "0.5908678", "0.5908678", "0.589083", "0.58842814", "0.58840126", "0.58743453", "0.5873399", "0.5873399", "0.58676195" ]
0.86103326
0
To check if payload to be processed with this lambda
def apply_filter(self, payload: dict, ainfos) -> (dict, dict): # check if needs to process by this lambda pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_payload():\n return True", "def process(self, payload, status_code=0):", "def has_payload(self):\n\n if self._payload:\n return True\n return False", "def __should_payload_execute(self, queue_item):\n\n soup = queue_item.get_soup_response()\n\n ng_app_soup = soup.select(\"[ng-app]\")\n if not ng_app_soup:\n return False\n\n for non_bindable in ng_app_soup[0].select(\"[ng-non-bindable]\"):\n non_bindable.decompose()\n\n in_scope_html = str(ng_app_soup[0])\n\n if queue_item.payload[\"value\"] in in_scope_html:\n return True\n\n return False", "async def exists(self, payload: TPayload) -> bool:", "def payload_undefined(self):\n return self._attr is None", "def payload_valid(self, payload):\n return (\n isinstance(payload, DPTArray)\n and len(payload.value) == self.dpt_class.payload_length\n )", "def lambda_handler(event, context):\n return", "def lambda_handler(event, context):\n return", "def lambda_handler(event, context):\n\n event_body = json.loads(event['body'])\n print(\"EVENT:\")\n print(event_body)\n\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n recs = flow(event_body, textract, cache = True)\n rval = {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\" : \"hello world\",\n \"textract\" : recs\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }\n\n return rval", "def payload(self):", "def lambda_handler(event, context):\n return dispatch(event)", "def lambda_handler(event, context):\n\n operations = {\n 'POST': main,\n }\n\n if event.get('httpMethod', False):\n operation = event['httpMethod']\n else:\n operation = \"not available\"\n\n payload = base64.b64decode(event['body'])\n try:\n payload = json.loads(payload)\n except TypeError:\n pass\n\n if operation in operations:\n return respond(None, operations[operation](payload))\n else:\n return respond(ValueError(f'Unsupported method {operation}'))", "def process_webhook(self):\n if self.token:\n self.verify = VerificationMethod.TOKEN\n if self.secret:\n self.verify = VerificationMethod.HMAC\n return True", "def lambda_handler(event, context):\n\n print(\"EVENT:\")\n print(event)\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n\n recs = flow(event, s3)\n print(recs)\n\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"message\": \"hello world\",\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }", "def payload_is_handleable(self, payload):\n\t\tif payload.get_filename():\n\t\t\treturn True\n\t\treturn False", "def consume(self, payload):\n raise NotImplementedError()", "def handler(self, *args, **kwargs):\n return True", "def handler(event, context):\n if event and \"Records\" in event:\n for record in event[\"Records\"]:\n time_str = time.ctime()\n if \"body\" in record:\n try:\n hasura_request(record[\"body\"])\n except Exception as e:\n print(f\"Start Time: {time_str}\", str(e))\n time_str = time.ctime()\n print(\"Done executing: \", time_str)\n raise_critical_error(\n message=f\"Could not process record: {str(e)}\",\n data=record,\n exception_type=Exception\n )", "async def triggered_on(self, ctx: FilterContext) -> bool:", "def check_message_payload(dequeued_item):\n key_array = [\"dateTime\",\n \"payload\",\n \"messageType\"]\n\n # Note that the \"ttl\" key (and others) may be present but its not checked here!\n\n for key in key_array:\n if key not in dequeued_item.keys():\n return False\n\n key_array = [\"zoomR\",\n \"spatial\",\n \"circuitID\",\n \"reputationEnabled\",\n \"assetID\",\n \"temporal\",\n \"outageTime\",\n \"company\",\n \"votes\",\n \"zoomT\",\n \"longitude\",\n \"latitude\"]\n for key in key_array:\n if key not in dequeued_item[\"payload\"].keys():\n return False\n return True", "def execute_request(self, request: Request):\r\n print(\"Handler is validating data\")\r\n if request.data_input is not None or request.input_file is not None:\r\n if not self.next_handler:\r\n return True\r\n return self.next_handler.execute_request(request)\r\n else:\r\n print(\"Data is not validated\")\r\n return False", "def payload_handle(self, payload, mail):\n\t\tif self.payload_is_handleable(payload):\n\t\t\tif self.export_payload:\n\t\t\t\tself.payload_pipe(payload, mail)\n\t\t\tif self.reduce_payload:\n\t\t\t\t# Mark email as deleted:\n\t\t\t\tself.delete_marked.append(self.payload_index(payload, mail))", "def lambda_handler(event, context):\n print(event)\n print(context)\n storage_gateway_status()", "def validate_payload(payload):\n\n if not isinstance(payload, dict):\n raise Exception(\"payload is a %s, not a dictionary\" % type(payload))\n\n if \"nmo\" not in payload:\n raise Exception(\"No nmo in payload\")\n\n if \"job\" not in payload[\"nmo\"]:\n raise Exception(\"No job in nmo \\nnmo is %s\" % payload[\"nmo\"])\n\n if \"task\" not in payload[\"nmo\"]:\n raise Exception(\"No task in nmo \\nnmo is %s\" % payload[\"nmo\"])\n\n try:\n isGroup = payload['nmo']['source']['misc']['isGroup']\n except:\n isGroup = False\n\n if \"jsonld\" not in payload and not isGroup:\n raise Exception(\"No jsonld in payload \\nPayload is:- %s\" % payload)", "def check_Data(self):\r\n \r\n if self._target_data is None:\r\n self.processData()", "def body(self, _):\r\n return False", "def add_event(payload: PayloadDict) -> bool:\n ...", "def handler(event, context):\n pub_sub_message = base64.b64decode(event['data']).decode('utf-8')\n\n if pub_sub_message == 'executor':\n LOGGER.debug('POST: %s', EVENTS_EXECUTION_ENDPOINT)\n response = requests.post(EVENTS_EXECUTION_ENDPOINT, json={'type': 'POLICY'},\n headers=utils.get_auth_header())\n LOGGER.debug('Response: %s', response.text)\n\n elif pub_sub_message == 'validator':\n LOGGER.debug('POST: %s', EVENTS_VALIDATION_ENDPOINT)\n response = requests.post(EVENTS_VALIDATION_ENDPOINT,\n headers=utils.get_auth_header())\n LOGGER.debug('Response: %s', response.text)\n\n else:\n LOGGER.warn('Unexpected message from PubSub: %s', pub_sub_message)\n return", "def preprocess_body(self) -> None:\n self._verify_archive_url_and_zip_path()\n self._verify_upload_url_and_zip_path()\n self._verify_upload_url_and_no_zip_path()\n if self.upload_function is None:\n self.upload_function = False", "def process_request(self, req):\n return None", "def _check_parameter(self, data):\n return self._pre_process_record(data) is not None", "def test_filter_function_any(self):\n self.es.register_filter(lambda x: True, ftype='any')\n self.assertTrue(self.es.streamfilter(self.data))\n self.es.register_filter(lambda x: False, ftype='any')\n self.assertTrue(self.es.streamfilter(self.data))", "def accepts_body(self):\n return self._request_body_parameter is not None", "def has_payload(self, name: str) -> bool:\n return name in self.payload or name in self.payload_persistent", "def lambda_handler(event, context):\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n print(event)\n method=event['httpMethod']\n print(f\"method={method}\")\n print(f\"table_name={table_name}\")\n myTriggerType='instrument_price'\n\n \n if method == \"DELETE\":\n #path=event['path']\n trigger_id=event['pathParameters']['trigger_id']\n print(f\"triggerId={trigger_id}\")\n\n try:\n #see https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html#DynamoDB.Table.delete_item\n response = table.delete_item(\n Key={'PK':f\"TR#{myTriggerType}#{trigger_id}\", \"SK\":f\"TR#{myTriggerType}#{trigger_id}\"},\n ConditionExpression=And(Attr('PK').eq(Attr('SK')),Attr('triggerType').eq(myTriggerType)),\n )\n except ClientError as e:\n print(f\"clientError={e}\")\n if e.response['Error']['Code']=='ConditionalCheckFailedException':\n return iftttError(404,\"item not found\")\n raise\n print(f\"response={response}\")\n return {\n \"statusCode\": 200,\n \"body\":\"\",\n }\n \n elif method == \"POST\":\n body=json.loads(event['body'])\n trigger_id=body['trigger_identity']\n print(f\"triggerId={trigger_id}\")\n\n response = table.get_item(\n Key={'PK':f\"TR#{myTriggerType}#{trigger_id}\", \"SK\":f\"TR#{myTriggerType}#{trigger_id}\"},\n ProjectionExpression=\"triggerEvents, triggerType\",\n )\n print(f\"response={response}\")\n\n if \"Item\" not in response:\n #brand new \n print(f\"inserting {trigger_id}\")\n if 'triggerFields' not in body:\n return iftttError(400, \"triggerFields missing from request\")\n triggerFields=body['triggerFields']\n #todo validate trigger fields\n try:\n response = table.put_item(\n Item={\n 'PK':f\"TR#{myTriggerType}#{trigger_id}\", \n \"SK\":f\"TR#{myTriggerType}#{trigger_id}\",\n 'triggerId': trigger_id,\n #hacky string way to avoid having multiple columns\n 'triggerFields': json.dumps(triggerFields),\n 'triggerType': myTriggerType,\n },\n ConditionExpression=Or(Attr('triggerType').eq(myTriggerType),Attr('triggerType').not_exists())\n )\n except ClientError as e:\n print(f\"clientError={e}\")\n #somehow got created with someone elses triggerType\n if e.response['Error']['Code']=='ConditionalCheckFailedException':\n return iftttError(404,\"item not found\")\n raise\n print(\"response \",response)\n triggered=[]\n elif response['Item'].get(\"triggerType\",myTriggerType) != myTriggerType:\n #it exists but it is someone elses\n return iftttError(404,\"item not found\")\n else:\n item=response['Item']\n print(f\"found {item} \")\n #hacky string way to avoid having multiple columns\n #TODO: change this to use a Map? (will allow to add without overwrite)\n events = json.loads(item.get(\"triggerEvents\",\"[]\"))\n triggered= []\n for event in events:\n #TODO: implement limit (not needed now becasue I expect only up to one events)\n triggered.append(event['data'])\n \n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n \"data\": triggered,\n # \"location\": ip.text.replace(\"\\n\", \"\")\n }),\n }\n else :\n return iftttError(400, f\"unexpected httpMethod {method}\")", "def on_push(self, payload):\n pass", "def on_watch(self, payload):\n pass", "def sniffer_callback(self, pkt):\n #if \"Ether\" in pkt and \"IP\" in pkt and \"TCP\" in pkt:\n if \"TCP\" in pkt:\n\n # Debug check for packet details\n # print(pkt.summary())\n\n if pkt[TCP].payload:\n # print(\"[PAYLOAD]:\\n%s\" % pkt[TCP].payload)\n self.callback_object.process_packet(pkt)\n\n # Ignore packets without payload\n # else:\n # print(\"[NO-LOAD]Packet does not have payload!\")", "def lambda_handler(event, context):\n\n params = ['comp_name', 'action', 'level', 'msg']\n missing = []\n for i in range(len(params)):\n if params[i] not in event:\n missing.append(params[i])\n \n if missing:\n err = \"Missing these parameters: \" + str(missing)\n print(err)\n \n return {\n \"statusCode\": 500,\n \"body\": json.dumps(err)\n }\n notify_snitch(event['comp_name'], event['action'], event['level'], event['msg'])\n \n return {\n \"statusCode\": 200,\n \"body\": json.dumps('Notified snitch')\n }", "def test4():\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": \"/tmp/\"\n },\n \"object\": {\n \"key\": \"tic000147203645/tic000147203645_s0001-1-1_stlc.fits\"\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n \n assert out[\"statusCode\"] == 200", "def onMessage(self, payload, isBinary):", "def check(self, **kwargs):\n runopts = self.runoptions.copy()\n if isinstance(self, ExploitModule):\n payload = kwargs.get('payload')\n runopts['TARGET'] = self.target\n if 'DisablePayloadHandler' in runopts and runopts['DisablePayloadHandler']:\n pass\n elif payload is None:\n runopts['DisablePayloadHandler'] = True\n else:\n if isinstance(payload, PayloadModule):\n if payload.modulename not in self.payloads:\n raise ValueError(\n 'Invalid payload (%s) for given target (%d).' % (payload.modulename, self.target)\n )\n runopts['PAYLOAD'] = payload.modulename\n for k, v in payload.runoptions.items():\n if v is None or (isinstance(v, str) and not v):\n continue\n if k not in runopts or runopts[k] is None or \\\n (isinstance(runopts[k], str) and not runopts[k]):\n runopts[k] = v\n # runopts.update(payload.runoptions)\n elif isinstance(payload, str):\n if payload not in self.payloads:\n raise ValueError('Invalid payload (%s) for given target (%d).' % (payload, self.target))\n runopts['PAYLOAD'] = payload\n else:\n raise TypeError(\"Expected type str or PayloadModule not '%s'\" % type(kwargs['payload']).__name__)\n\n return self.rpc.call(MsfRpcMethod.ModuleCheck, [self.moduletype, self.modulename, runopts])", "def lambda_handler(event, _):\n logger.info(event)\n response = {\"isAuthorized\": False, \"context\": {\"AuthInfo\": \"QueryStringTokenCheck\"}}\n\n if event[\"queryStringParameters\"][\"access_token\"] == ACCESS_TOKEN:\n response[\"isAuthorized\"] = True\n\n return response", "def lambda_handler(event, context):\n\n # try:\n # ip = requests.get(\"http://checkip.amazonaws.com/\")\n # except requests.RequestException as e:\n # # Send some context about this error to Lambda Logs\n # print(e)\n\n # raise e\n\n try:\n response = s3.get_object(Bucket=BUCKET, Key=KEY)\n print('CONTENT TYPE:', response['ContentType'])\n print('response:')\n pprint.pprint(response)\n print('event')\n pprint.pprint(event)\n print('payload')\n pprint.pprint(event.get('payload'))\n # return json.loads(json.dumps(response, default=str))\n # defined by https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-output-format\n return {\n 'statusCode': 200,\n 'isBase64Encoded': False,\n 'body': json.dumps(response, default=str)\n }\n # return response['ContentType']\n except Exception as e:\n print(e)\n print('Error getting object {} from bucket {}. Make sure they exist and your bucket is in the same region as this function.'.format(KEY, BUCKET))\n raise e", "def verify_as_target(self, message_handler):", "def handle(self, rawdata):\r\n\r\n return self.__filter(self.__handler(rawdata))", "def __is_item_vulnerable(self, queue_item):\n\n try:\n HTTPHandler(None, queue_item)\n except Exception:\n return False\n\n if not \"html\" in queue_item.response.headers.get(\"content-type\"):\n return False\n\n if not queue_item.get_soup_response():\n return False\n\n if not self.__should_payload_execute(queue_item):\n return False\n\n if self.__verify_payload:\n if not self.__verify_queue_item(queue_item.verify_item):\n return False\n\n return True", "def execute_request(self, request: Request) -> bool:\r\n print(\"Handler is validating key\")\r\n if request.key is not None:\r\n if not self.next_handler:\r\n return True\r\n return self.next_handler.execute_request(request)\r\n else:\r\n print(\"Key is not valid\")\r\n return False", "def handle_execution(self, data, *args, **kwargs):\n return {}", "def handle_execution(self, data, *args, **kwargs):\n return {}", "def lambda_handler(event, context=None):\n response = {}\n try:\n response = middleware.IdentityAuthMiddleWare.process_request(event, response)\n except Exception as e:\n response[\"message\"] = e.message\n response[\"errors\"] = e.errors\n # removing request_dump data\n if \"request_dump\" in response[\"errors\"]:\n del response[\"errors\"][\"request_dump\"]\n for _k, _v in response[\"errors\"].items():\n response[\"errors\"][_k] = str(_v)\n return response", "def is_binary_payload(cls) -> bool:\n return True", "def lambda_handler(event, context):\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # Decode the bytes to base64\n decoded_record_data = []\n for record in event['Records']:\n try:\n decoded_record_data.append(base64.b64decode(record['kinesis']['data']))\n except Exception as e:\n logger.error('%s - %s', \"Error decoding record\", e)\n\n # Deserialize the data\n deserialized_data = []\n for decoded_record in decoded_record_data:\n try:\n deserialized_data.append(json.loads(decoded_record))\n except Exception as e:\n logger.error('%s - %s', \"Error deserializing data\", e)\n\n # Try opening a connection to DynamoDB\n try:\n # Get a handle to the table\n dynamo_db = boto3.resource('dynamodb')\n curr_pos_table = dynamo_db.Table('current_position')\n except Exception as e:\n logger.error('%s - %s', \"Error connecting to DynamoDB\", e)\n return\n\n # Try sending the data\n transmit_data(curr_pos_table, deserialized_data, 0)", "def lambda_handler(event, context):\n\n # S3 resource invocation\n s3_resource = boto3.resource('s3')\n # S3 bucket selection\n data_bucket_name = \"put_here_data_bucket_name\"\n # The SageMaker runtime is what allows us to invoke the endpoint that we've created.\n runtime = boto3.Session().client('sagemaker-runtime')\n\n request_body_dict = json.loads(event['body'])\n\n # Now we use the SageMaker runtime to invoke our endpoint, sending both ticker and start date if given\n if request_body_dict['start_date'] != \"\":\n response = runtime.invoke_endpoint(EndpointName='DeepAR-ml-spp', # The name of the endpoint we created\n ContentType='application/json', # The data format that is expected\n Body=encode_future_request(request_body=request_body_dict,\n s3_resource=s3_resource,\n s3_bucket=data_bucket_name, prefix='valid'))\n # or only ticker name if no start date has been provided\n elif request_body_dict['ticker_name'] != \"\":\n response = runtime.invoke_endpoint(EndpointName='DeepAR-ml-spp', # The name of the endpoint we created\n ContentType='application/json', # The data format that is expected\n Body=encode_request(ticker_name=request_body_dict['ticker_name'],\n s3_resource=s3_resource, s3_bucket=data_bucket_name,\n prefix='train'))\n\n # The response is an HTTP response whose body contains the result of our inference\n result = response['Body'].read().decode('utf-8')\n\n # print data for debug purposes\n print(result)\n\n return {\n 'statusCode': 200,\n 'headers': {'Content-Type': 'application/json', 'Access-Control-Allow-Origin': '*'},\n 'body': str(result)\n }", "def process_request(self):\r\n # Middleware should pass request through\r\n self.assertEquals(self.middleware.process_request(self.request), None)", "def map_over_json(self, stream, func):\n value = func(self.block, stream)\n if self.block.required and value is None:\n raise ValidationError('This block requires a value')\n return value", "def test_lambda_support_no_parameters_no_body(self):\n self.assert_contains_lambda_expression_in_m(\n parse.parse(setup_java_class(\"() -> {};\")))", "def checkError(invoke_response, message):\n\n if 'FunctionError' in invoke_response:\n err_message = invoke_response['Payload'].read()\n print(message)\n print(err_message)\n return {\n 'statusCode': 500,\n 'body': json.dumps(str(err_message))\n }\n return None", "def on_pull_request(self, payload):\n pass", "def lambda_handler(event, context):\n print(\"event.session.application.applicationId=\" +\n event['session']['application']['applicationId'])\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n #if (event['session']['application']['applicationId'] != \"<APPLICATION_ID>\"):\n # raise ValueError(\"Invalid Application ID\")\n\n\n if event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])", "def check_body(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"check_body\")", "def check_body(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"check_body\")", "def payload(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:\n return pulumi.get(self, \"payload\")", "def lambda_handler(event, context):\r\n print(\"Incoming request...\")\r\n\r\n \"\"\"\r\n Uncomment this if statement and populate with your skill's application ID to\r\n prevent someone else from configuring a skill that sends requests to this\r\n function.\r\n \"\"\"\r\n # if (event['session']['application']['applicationId'] !=\r\n # \"amzn1.echo-sdk-ams.app.[unique-value-here]\"):\r\n # raise ValueError(\"Invalid Application ID\")\r\n\r\n if event['session']['new']:\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])", "def validate_observation(self, observation_payload):\n pass", "def lambda_handler(event, context):\n\n \"\"\"\n This statement prevents someone else from configuring a skill that sends \n requests to this function.\n \"\"\"\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def handler(event, context): # pylint: disable=unused-argument\n\n if \"queue\" in event:\n # Lambda is being invoked to read messages directly from queue URL\n # In that mode SNS events are always sent to the internal\n # reconcile topic\n process_queue(\n stac_bucket=os.environ[\"STAC_BUCKET\"],\n cog_pds_meta_pds=json.loads(os.environ[\"COG_PDS_META_PDS\"]),\n queue=event[\"queue\"],\n message_batch_size=int(os.environ[\"MESSAGE_BATCH_SIZE\"]),\n sns_reconcile_target_arn=os.environ[\"SNS_RECONCILE_TARGET_ARN\"],\n catalog_update_queue=os.environ.get(\"CATALOG_UPDATE_QUEUE\"),\n catalog_update_table=os.environ[\"CATALOG_UPDATE_TABLE\"],\n corrupted_xml_queue=os.environ[\"corrupted_xml_queue_url\"],\n delete_processed_messages=int(os.environ[\"DELETE_MESSAGES\"]) == 1,\n )\n else:\n # Lambda is being invoked as trigger to SQS\n process_trigger(\n stac_bucket=os.environ[\"STAC_BUCKET\"],\n cog_pds_meta_pds=json.loads(os.environ[\"COG_PDS_META_PDS\"]),\n event=event,\n sns_target_arn=os.environ[\"SNS_TARGET_ARN\"],\n sns_reconcile_target_arn=os.environ[\"SNS_RECONCILE_TARGET_ARN\"],\n catalog_update_queue=os.environ.get(\"CATALOG_UPDATE_QUEUE\"),\n catalog_update_table=os.environ[\"CATALOG_UPDATE_TABLE\"],\n corrupted_xml_queue=os.environ[\"corrupted_xml_queue_url\"],\n )", "def process_message(self, tag, value):\n return False", "def lambda_handler(event, context):\n print(\"Incoming request...\")\n\n \"\"\"\n Uncomment this if statement and populate with your skill's application ID to\n prevent someone else from configuring a skill that sends requests to this\n function.\n \"\"\"\n if (event['session']['application']['applicationId'] !=\n \"amzn1.ask.skill.2994421a-75ef-4502-9d4a-bf83f20a7ade\"):\n raise ValueError(\"Invalid Application ID\")\n\n if event['session']['new']:\n on_session_started({'requestId': event['request']['requestId']},\n event['session'])\n\n if event['request']['type'] == \"LaunchRequest\":\n return on_launch(event['request'], event['session'])\n elif event['request']['type'] == \"IntentRequest\":\n return on_intent(event['request'], event['session'])\n elif event['request']['type'] == \"SessionEndedRequest\":\n return on_session_ended(event['request'], event['session'])", "def lambda_handler(event, context):\n set_logging(level=logging.DEBUG)\n\n try:\n payload = json.loads(event[\"Records\"][0][\"Sns\"][\"Message\"])\n account_id = payload['account_id']\n account_name = payload['account_name']\n # get the last region from the list to process\n region = payload['regions'].pop()\n # region = payload['region']\n # if request_id is present in payload, it means this lambda was called from the API\n request_id = payload.get('request_id', None)\n except Exception:\n logging.exception(f\"Failed to parse event\\n{event}\")\n return\n\n try:\n config = Config()\n\n main_account = Account(region=config.aws.region)\n ddb_table = main_account.resource(\"dynamodb\").Table(config.sqspolicy.ddb_table_name)\n\n account = Account(id=account_id,\n name=account_name,\n region=region,\n role_name=config.aws.role_name_identification)\n if account.session is None:\n return\n\n logging.debug(f\"Checking for public SQS policies in {account}\")\n\n # existing open issues for account to check if resolved\n open_issues = IssueOperations.get_account_open_issues(ddb_table, account_id, SQSPolicyIssue)\n # make dictionary for fast search by id\n # and filter by current region\n open_issues = {issue.issue_id: issue for issue in open_issues if issue.issue_details.region == region}\n logging.debug(f\"SQS in DDB:\\n{open_issues.keys()}\")\n\n checker = SQSPolicyChecker(account=account)\n if checker.check():\n for queue in checker.queues:\n logging.debug(f\"Checking {queue.name}\")\n if queue.public:\n issue = SQSPolicyIssue(account_id, queue.url)\n issue.issue_details.tags = queue.tags\n issue.issue_details.name = queue.name\n issue.issue_details.region = queue.account.region\n issue.issue_details.policy = queue.policy\n if config.sqspolicy.in_whitelist(account_id, queue.url):\n issue.status = IssueStatus.Whitelisted\n else:\n issue.status = IssueStatus.Open\n logging.debug(f\"Setting {queue.name} status {issue.status}\")\n IssueOperations.update(ddb_table, issue)\n # remove issue id from issues_list_from_db (if exists)\n # as we already checked it\n open_issues.pop(queue.url, None)\n\n logging.debug(f\"SQS in DDB:\\n{open_issues.keys()}\")\n # all other unresolved issues in DDB are for removed/remediated queues\n for issue in open_issues.values():\n IssueOperations.set_status_resolved(ddb_table, issue)\n if request_id:\n api_table = main_account.resource(\"dynamodb\").Table(config.api.ddb_table_name)\n DDB.track_progress(api_table, request_id)\n except Exception:\n logging.exception(f\"Failed to check SQS policies for '{account_id} ({account_name})'\")\n return\n\n # push SNS messages until the list with regions to check is empty\n if len(payload['regions']) > 0:\n try:\n Sns.publish(payload[\"sns_arn\"], payload)\n except Exception:\n logging.exception(\"Failed to chain insecure services checking\")\n\n logging.debug(f\"Checked SQS policies for '{account_id} ({account_name})'\")", "def lambda_handler(event, context):\n logging.info(\"Received event: \" + json.dumps(event, indent=2))\n request_type = event['RequestType']\n if request_type == 'Create':\n attach_policy(event, context)\n elif request_type == 'Delete':\n detach_policy(event, context)\n elif request_type == 'Update':\n update_policy(event, context)", "async def check_payload(self, payload: discord.RawReactionActionEvent):\n guild = self.bot.get_guild(payload.guild_id)\n emoji = payload.emoji\n\n \"\"\"\n Cache implementation:\n {\n guild_id: {\n message_id: {\n emoji: roleid\n }\n }\n }\n } \n \"\"\"\n\n if guild.id in self._cache:\n if payload.message_id in self._cache[guild.id]:\n if str(emoji) in self._cache[guild.id][payload.message_id]:\n return guild.get_role(self._cache[guild.id][payload.message_id][str(emoji)])", "def payload(self):\n if self._extf:\n raise UnsupportedCall(f\"'{self.__class__.__name__}' object has no attribute 'payload'\")\n return self._next", "def is_function(self):\n return self.args is not None", "def test_webhook_empty_event(self):\n event = {\n 'body': json.dumps({})\n }\n context = {}\n resp = webhook(event, context)\n self.assertEqual(resp[\"statusCode\"], 500)\n self.assertEqual(resp[\"body\"], json.dumps({}))", "def lambda_handler(event, context):\n \n try:\n print('QueryStringParameter: {}'.format(event['queryStringParameters']))\n if event['queryStringParameters'] == None or not 'text' in event['queryStringParameters']:\n return {\n 'statusCode': 200,\n 'body': json.dumps({'message': 'Please provide a text parameter as a querystring.'}),\n }\n text = event['queryStringParameters']['text']\n if len(text) <= 5:\n return {\n 'statusCode': 200,\n 'body': json.dumps({'message': 'Text length must be superior to 5 caracters.'}),\n }\n response = client.classify_document(\n Text=text,\n EndpointArn=endpoint_arn\n )\n return {\n 'statusCode': 200,\n 'body': json.dumps(response['Classes']),\n }\n except Exception as e:\n print(e)\n return {\n 'statusCode': 200,\n 'body': json.dumps({'message': 'An error occured, please try again later.'}),\n }", "def _passed_to_processor(self):\n return self._processor.processed", "def _valid(self):\n return all(map(lambda v: v is not None,\n (self.connection, self.sender, self.receiver, self.subject, self.body)))", "def check_action_status(payload):\n response = requests.post(url, data=payload)\n return response.json()", "def test_generic(key,bucket):\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": bucket\n },\n \"object\": {\n \"key\": key\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n print(out)\n assert out[\"statusCode\"] == 200", "def isJWS_unserialized_single(x):\n if isinstance(x, dict) \\\n and \"payload\" in x and \"signature\" in x \\\n and (\"protected\" in x or \"unprotected\" in x):\n try: \n if \"protected\" in x: \n json.loads(x[\"protected\"])\n return True\n except:\n return False\n else: \n return False", "def lambda_handler(event, content):\n imap = email_startup()\n status, messages = imap.select('Inbox')\n days_old = input('Enter many days ago do you want to use as the cutoff?: ')\n new_date = get_days_old(days_old)\n messages = apply_filter(imap, new_date)\n initial_unread = get_unread_count(imap)\n print(f'Initial unread emails: {initial_unread}')\n print(f'Emails to be filter: {len(messages)}')\n a_pause = input('Continue by pressing enter.')\n\n print(f'Processing {len(messages)} unread emails from before {new_date}')\n print(\"=\"*100)\n process_messages(imap, messages)\n print(\"=\"*100)\n\n # Determine results from script\n post_unread = get_unread_count(imap)\n print(f'Processed Emails: {initial_unread - post_unread}')\n print(f'After processing, there are {post_unread} unread emails.')\n\n # close the connection and logout\n imap.close()\n imap.logout()", "def lambda_handler(event, context):\r\n print(\"event.session.application.applicationId=\" +\r\n event['session']['application']['applicationId'])\r\n\r\n\r\n if event['session']['new']:\r\n #print (\"**** Reached\")\r\n on_session_started({'requestId': event['request']['requestId']},\r\n event['session'])\r\n\r\n #print(\"**** Intent coming is : \" + event['request']['type'])\r\n if event['request']['type'] == \"LaunchRequest\":\r\n return on_launch(event['request'], event['session'])\r\n elif event['request']['type'] == \"IntentRequest\":\r\n return on_intent(event['request'], event['session'])\r\n elif event['request']['type'] == \"SessionEndedRequest\":\r\n return on_session_ended(event['request'], event['session'])", "def process_request(self, request):\n return None", "def _wrap_handler(self, handler, body):\n try:\n decoded_body = json.loads(body)\n result = yield handler(decoded_body)\n return result\n except Exception as e:\n return {\"error\": str(e)}", "def hosted_function(payload):\n logger.info('hosted_function called')\n return 'OK', 201", "def execute_request(self, request: Request):\r\n print(\"Handler is validating output\")\r\n if request.output is not None:\r\n if not self.next_handler:\r\n return True\r\n return self.next_handler.execute_request(request)\r\n else:\r\n print(\"Output is not validated\")\r\n return False", "def __call__(self, event, payload):\n logging.debug(\"Event: %s %s\" % (event, payload))\n\n # register new task\n if event == \"CRAB_Cmd_Mgr:NewTask\":\n self.newTaskRegistration(payload)\n elif event == \"KillTask\":\n taskUniqName, cmdRng = payload.split(':')\n self.killingRequestes[taskUniqName] = cmdRng\n # usual stuff\n elif event == \"TaskRegisterComponent:StartDebug\":\n logging.getLogger().setLevel(logging.DEBUG)\n elif event == \"TaskRegisterComponent:EndDebug\":\n logging.getLogger().setLevel(logging.INFO)\n elif event == \"TaskRegisterComponent:HeartBeat\":\n logging.info(\"HeartBeat: I'm alive \")\n self.ms.publish(\"TaskRegisterComponent:HeartBeat\",\"\",self.HeartBeatDelay)\n self.ms.commit()\n else:\n logging.info('Unknown message received %s + %s'%(event,payload))\n return True", "def can_handle(self, handler_input):\n return is_request_type(\"LaunchRequest\")(handler_input)", "def lambda_handler(event, context):\n\n logger.debug('event.bot.name={}'.format(event['bot']['name']))\n\n return dispatch(event)", "def get_payload(self):\n if self.payload == '':\n return {}\n\n return json.loads(self.payload)", "def testNonJSONObjectPayload(self):\n body = dumps('Not a dict')\n headers = Headers({'Content-Length': [str(len(body))],\n 'Content-Type': ['application/json']})\n request = FakeRequest(headers=headers, body=body)\n resource = TestResource(None, None)\n result = yield resource.deferred_render_POST(request)\n response = loads(result)\n self.assertEqual(JSONRPC_PARSE_ERROR, response['error']['code'])\n message = 'Payload was not a JSON object.'\n self.assertEqual(message, response['error']['message'])\n self.assertIn(message, self.log.getvalue())\n self.assertIn('Request payload: \"Not a dict\".', self.log.getvalue())", "def lambda_handler(event, context):\n try:\n if not _is_setup:\n _setup()\n\n previous_attempts = event.get(\"Artifact\", dict(ReadAttempts=0))[\"ReadAttempts\"]\n\n manifest = _load_manifest(event[\"ResourceKey\"])\n artifact_location = dict(S3Bucket=_bucket_name, S3Key=manifest[\"ArtifactS3Key\"])\n artifact_exists = _artifact_exists(manifest[\"ArtifactS3Key\"])\n\n return dict(\n Found=artifact_exists,\n ReadAttempts=previous_attempts + 1,\n Location=artifact_location,\n ProjectName=manifest[\"ProjectName\"],\n Runtimes=manifest[\"Runtimes\"],\n )\n except Exception:\n # TODO: Turn these into known-cause state machine failures.\n raise", "def _get_conclusion_data(self, payload):\n pass", "def lambda_handler(event, context):\n\n # Log the values received in the event argument\n logger.info(f'Request event: {event}')\n\n # Define default hard-coded return values\n response = {\n 'uid': 'Example function ID',\n 'return_val01': 'Return value #1',\n 'return_val02': 'Return Value #2',\n }\n\n # Retrieve type of invocation (GET, PUT, etc.)\n if 'http_verb' in event:\n operation = event['http_verb'].upper()\n if operation == 'PUT':\n # Return the values passed to the function\n response = {\n 'uid': event['functionID'],\n 'return_val01': event['parameters']['parm01'],\n 'return_val02': event['parameters']['parm02'],\n }\n\n logger.info(f'Response={response}')\n return response", "def test3():\n event = {\n \"Records\": [\n {\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"configurationId\": \"b0efd5b1-cc92-47b4-8501-1c34f5eba235\",\n \"bucket\": {\n \"name\": \"/tmp/\"\n },\n \"object\": {\n \"key\": \"tic000129646247_s0001-1-1_stlc.fits\"\n }\n }\n }\n ]\n}\n context = {}\n \n out = lambda_handler(event, context)\n \n assert out[\"statusCode\"] == 200", "def lambda_handler(event, context):\n return {\n 'statusCode': 200,\n 'body': say_hello()\n }", "def consume(self, handler) -> None:\n pass # pragma: no cover", "def lambda_handler(event, context):\n if event.get('content') and event.get('type') == 'text':\n # user_key = event.get('user_key')\n # state_set(user_key, randint(0,9))\n return resp_dict(event.get('content'))\n return resp_type_2(phrase_dict('처음으로'), buttons_dict('처음으로'))" ]
[ "0.6974256", "0.6329951", "0.6317524", "0.5889682", "0.58806306", "0.5717963", "0.57109547", "0.56970483", "0.56970483", "0.56906414", "0.566029", "0.55937403", "0.5567957", "0.5565435", "0.5472518", "0.5453765", "0.5410358", "0.54089427", "0.53794", "0.5371008", "0.53691435", "0.53511924", "0.5346419", "0.53367436", "0.5328826", "0.5311681", "0.52784127", "0.5276847", "0.52756053", "0.5258173", "0.525243", "0.52469736", "0.5244724", "0.5230113", "0.5224427", "0.5222794", "0.5218363", "0.52126265", "0.5205093", "0.51571536", "0.5155555", "0.5141196", "0.5140796", "0.51369846", "0.5130888", "0.51277345", "0.51274467", "0.51195264", "0.51173407", "0.5116651", "0.5116651", "0.5116398", "0.5112025", "0.51049435", "0.51037484", "0.51031643", "0.510082", "0.5088313", "0.5079516", "0.50777817", "0.5058625", "0.50573874", "0.50573874", "0.50500214", "0.5048678", "0.50474966", "0.50382954", "0.5029941", "0.5024424", "0.50233877", "0.50216293", "0.500152", "0.500096", "0.49974185", "0.49961528", "0.4972642", "0.49673203", "0.4966885", "0.49643585", "0.495983", "0.49580157", "0.49579385", "0.49542123", "0.49517244", "0.49503228", "0.49498442", "0.494745", "0.4940734", "0.49382573", "0.49351963", "0.49333543", "0.4919606", "0.4917288", "0.49103162", "0.490884", "0.4898812", "0.48982206", "0.48973995", "0.4892893", "0.48926023" ]
0.66218215
1
Process Kinesis Record and save to Redis Cache
def enrich_payload(self, payload) -> None: filtered_payloads = self.apply_filter(payload) enriched = self.enrich(filtered_payloads) return enriched
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handler(kinesis_records, context):\n data = kinesis_records[0].parse()\n detail = data.get('detail')\n return publish({\n \"eventSource\": data['source'],\n \"awsRegion\": data['region'],\n \"eventTime\": data['time'],\n \"eventName\": detail['eventName'],\n \"userIdentity\": {\n \"principalId\": detail['userIdentity']['principalId']\n },\n \"requestParameters\": {\n \"sourceIPAddress\": detail['sourceIPAddress']\n },\n \"s3\": {\n \"s3SchemaVersion\": \"1.0\",\n \"bucket\": {\n \"name\": detail['requestParameters']['bucketName'],\n \"arn\": detail['resources'][1]['ARN']\n },\n \"object\": {\n \"key\": detail['requestParameters']['key'],\n \"size\": detail['additionalEventData']['bytesTransferredIn']\n }\n }\n })", "def process_record(self, record):\n raise NotImplementedError('Process record needs to be customized')", "def lambda_handler(event, context):\n print('Received request')\n item = None\n\n mysql_host = '54.212.197.235'\n mysql_username = 'rts'\n mysql_password = 'SamWangRamsay520-S'\n mysql_dbname = 'rts_kinesis'\n mysql_tablename = 'benchmark_kinesis'\n\n print('Start connection')\n conn = mysql.connector.connect(host=mysql_host,\n user=mysql_username,\n passwd=mysql_password,\n db=mysql_dbname )\n print('End connection')\n '''Write the message to the mysql database'''\n cur = conn.cursor()\n\n #dynamo_db = boto3.resource('dynamodb')\n #table = dynamo_db.Table('benchmark_kinesis')\n _mysql_buffer = [] #ad-hoc message buffering for mysql, equivalent to dynamodb batch-write behavior\n _mysql_buffer_limit = 25\n records = [record for record in event['Records']]\n new_records = deaggregate_records(records)\n #decoded_record_data = [record['kinesis']['data'] for record in new_records]\n #deserialized_data = [decoded_record for decoded_record in records]\n #for data in decoded_record_data:\n for record in new_records:\n\t#d_record = \"%.15g\" % record['kinesis']['partitionKey']\n\t#con_time = \"%.15g\" % time.time()\n\tcreation_time = Decimal(record['kinesis']['partitionKey'])\n\tconsumer_time = Decimal(time.time())\n\tvalue = record['kinesis']['data']\n\t#cur.execute('INSERT INTO '+mysql_tablename+'(creation_time, consumer_time, value) VALUES (%s, %s, %s)', (creation_time, consumer_time, value))\n sql = 'INSERT INTO '+mysql_tablename+'(creation_time, consumer_time, value) VALUES (%s, %s, %s)'\n _mysql_buffer.append((creation_time, consumer_time, value))\n if len(_mysql_buffer) > _mysql_buffer_limit:\n cur.executemany(sql, _mysql_buffer)\n _mysql_buffer = []\n\t# Add a processed time so we have a rough idea how far behind we are\n #item['processed'] = datetime.datetime.utcnow().isoformat()\n\n conn.commit()\n conn.close()\n cur.close()\n # Print the last item to make it easy to see how we're doing\n #print(json.dumps(item))\n print('Number of records: {}'.format(str(len(new_records))))", "def processRecord(record):\n event_name = record['eventName']\n bucket = record['s3']['bucket']['name']\n key = record['s3']['object']['key']\n blob_id = key.replace('{}/'.format(os.environ['S3_KEY_BASE']), '')\n\n if 'ObjectCreated:Put' == event_name:\n try:\n blob = BlobModel.get(hash_key=blob_id)\n blob.mark_uploaded()\n labels = getImageLabels(bucket, key)\n blob.update_state_to_processed_and_add_labels(labels)\n except UpdateError:\n logger.exception('Unable to update blob')\n\n except botocore.exceptions.ClientError as e:\n logger.exception('Client provided a bad image')\n blob.set_rekognition_error_and_mark_processed(str(e))\n\n except DoesNotExist:\n logger.exception('Blob does not exist')", "def kinesis_process(self, payload, classifier):\n data = StreamPreParsers.pre_parse_kinesis(payload.raw_record)\n self.process_alerts(classifier, payload, data)", "def _process(self, data, cache):\n stop = False\n try:\n super(PickleCache, self).process(data)\n except StopIteration:\n stop = True\n\n data_to_save = data\n\n cache = dict() if cache is None else cache\n cache[self.chain_info['chain_hash']] = {\"data\": data_to_save,\n \"stopped\": stop,\n 'chain_repr': self.chain_info[\n 'chain_repr'],\n 'chain_mtime': self.chain_info[\n 'chain_mtime']}\n return cache, stop", "def handle(event, context):\r\n for record in event.get('Records', []):\r\n payload = json.loads(base64.b64decode(record[\"kinesis\"][\"data\"]))\r\n LOGGER.info('Payload: %s', payload)\r\n key_name = get_key_name(payload)\r\n app_slug, uuid_folder, file_name = get_key_details(key_name)\r\n if not app_slug or not file_name:\r\n return event\r\n\r\n response = DYNAMO.get_item(\r\n TableName='FilesToPipelines',\r\n Key={\r\n 'App': {'S': app_slug},\r\n 'File': {'S': file_name}\r\n },\r\n ProjectionExpression='Pipeline')\r\n if 'Item' not in response:\r\n LOGGER.info('Could not file pipeline for %s', key_name)\r\n return\r\n if not response['Item'].get('Pipeline'):\r\n LOGGER.info('Could not file pipeline for %s: Got %s',\r\n key_name, response)\r\n return\r\n\r\n pipeline = response['Item']['Pipeline'][\"S\"]\r\n response = LAMBDA.invoke_async(\r\n FunctionName=pipeline,\r\n InvokeArgs=json.dumps({\r\n 'trigger': 'evented',\r\n 'uuid': uuid_folder\r\n })\r\n )\r\n LOGGER.debug(response)\r\n LOGGER.info('Executed %s on %s', pipeline, uuid_folder)", "def memcacheSetRecord(self, key, record):\n\n self.memcacheSet(key, self.pickleRecord(record))", "def write(self, record):\n if not record:\n return\n\n # Convert to a dict - inefficient, I know...\n if type(record) is DASRecord:\n record = json.loads(record.as_json())\n if type(record) is dict:\n # If our local queue is full, throw away the oldest entries\n while self.send_queue.full():\n try:\n logging.debug('CachedDataWriter queue full - dropping oldest...')\n self.send_queue.get_nowait()\n except asyncio.QueueEmpty:\n logging.warning('CachedDataWriter queue is both full and empty?!?')\n\n # Enqueue our latest record for send\n self.send_queue.put_nowait(record)\n else:\n logging.warning('CachedDataWriter got non-dict/DASRecord object of '\n 'type %s: %s', type(record), str(record))", "def run(self, event, context):\n logger.debug('Number of Records: %d', len(event.get('Records', [])))\n\n config = load_config()\n env = load_env(context)\n\n for record in event.get('Records', []):\n payload = StreamPayload(raw_record=record)\n classifier = StreamClassifier(config=config)\n classifier.map_source(payload)\n\n # If the kinesis stream or s3 bucket is not in our config,\n # go onto the next record\n if not payload.valid_source:\n continue\n\n if payload.service == 's3':\n self.s3_process(payload, classifier)\n elif payload.service == 'kinesis':\n self.kinesis_process(payload, classifier)\n elif payload.service == 'sns':\n self.sns_process(payload, classifier)\n else:\n logger.info('Unsupported service: %s', payload.service)\n\n # returns the list of generated alerts\n if self.return_alerts:\n return self.alerts\n # send alerts to SNS\n self.send_alerts(env, payload)", "def lambda_handler(event, context):\n\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # Decode the bytes to base64\n decoded_record_data = []\n for record in event['Records']:\n try:\n decoded_record_data.append(base64.b64decode(record['kinesis']['data']))\n except Exception as e:\n logger.error('%s - %s', \"Error decoding record\", e)\n\n # Deserialize the data\n deserialized_data = []\n for decoded_record in decoded_record_data:\n try:\n deserialized_data.append(json.loads(decoded_record))\n except Exception as e:\n logger.error('%s - %s', \"Error deserializing data\", e)\n\n # Try opening a connection to DynamoDB\n try:\n # Get a handle to the table\n dynamo_db = boto3.resource('dynamodb')\n curr_pos_table = dynamo_db.Table('current_position')\n except Exception as e:\n logger.error('%s - %s', \"Error connecting to DynamoDB\", e)\n return\n\n # Try sending the data\n transmit_data(curr_pos_table, deserialized_data, 0)", "def lambda_handler(event, context):\n raw_kinesis_records = event['Records']\n\n # Deaggregate all records in one call\n records = deaggregate_records(raw_kinesis_records)\n for record in records:\n # Kinesis data in Python Lambdas is base64 encoded\n payload = base64.b64decode(record['kinesis']['data'])\n # payload is the actual ion binary record published by QLDB to the stream\n ion_record = ion.loads(payload)\n print(\"Ion reocord: \", (ion.dumps(ion_record, binary=False)))\n\n if ((\"recordType\" in ion_record) and (ion_record[\"recordType\"] == \"REVISION_DETAILS\")):\n revision_data, revision_metadata = get_data_metdata_from_revision_record(ion_record)\n print(revision_metadata[\"version\"])\n table_info = get_table_info_from_revision_record(ion_record)\n\n # Check if new wallet is being created or balance update.\n if (revision_metadata[\"version\"] == 0): # a new wallet created\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo DB insertion\n print(\"Proceed to create wallet in dynamo userwallet table\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'Balance': revision_data[\"Balance\"],\n 'last_txn_source': revision_data[\"last_txn_source\"],\n 'last_txn_ref': revision_data[\"last_txn_ref\"],\n 'last_txn_type': revision_data[\"last_txn_type\"],\n 'last_txn_amount': revision_data[\"last_txn_amount\"],\n 'last_txn_date': revision_data[\"last_txn_date\"],\n 'version' : 0\n }\n )\n else: # Balance updates\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo db logic to update the balance\n print(\"Dyanmo update balance\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.update_item(\n Key={\n 'walletid': revision_data[\"walletid\"]\n },\n UpdateExpression=\"set Balance=:a , last_txn_source=:b , last_txn_ref=:c, last_txn_type=:d ,last_txn_amount=:e ,last_txn_date=:f ,version=:g\",\n ExpressionAttributeValues={\n ':a': revision_data[\"Balance\"],\n ':b': revision_data[\"last_txn_source\"],\n ':c': revision_data[\"last_txn_ref\"],\n ':d': revision_data[\"last_txn_type\"],\n ':e': revision_data[\"last_txn_amount\"],\n ':f': revision_data[\"last_txn_date\"] ,\n ':g': revision_metadata[\"version\"],\n },\n ConditionExpression=\"version < :g\",\n ReturnValues=\"UPDATED_NEW\"\n )\n\n # update all transactions to dynamodb except for getfunds\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Transactions')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'updated_balance': revision_data[\"Balance\"],\n 'txn_source': revision_data[\"last_txn_source\"],\n 'txn_ref': revision_data[\"last_txn_ref\"],\n 'txn_type': revision_data[\"last_txn_type\"],\n 'txn_amount': revision_data[\"last_txn_amount\"],\n 'txn_date': revision_data[\"last_txn_date\"],\n 'version' : revision_metadata[\"version\"]\n }\n )\n\n return {\n 'statusCode': 200\n }", "def buffer(self, entry):\n # TODO\n print(\"Storing {} in Redis.\".format(entry))\n\n # Redis list to store all ids of entities\n self._pipeline.rpush(\n self._list_name,\n '{}'.format(entry.id)\n )\n\n # Redis hash to store all attributes of entities\n hash_name = '{}:{}'.format(self._list_name, entry.id)\n hash_dict = {}\n field_names = list(entry.__all__)\n field_names.remove('id')\n for field_name in field_names:\n hash_dict[field_name] = getattr(entry, field_name)\n\n self._pipeline.hmset(hash_name, hash_dict)", "def emit(self, record):\r\n try:\r\n self.enqueue(self.prepare(record))\r\n except Exception:\r\n self.handleError(record)", "def handler(event, context):\n debug = False\n rewind = False\n dry_run = False\n\n table = _ensure_dynamo_table()\n consumer_id = 'test-consumer'\n\n if debug:\n state = table.scan()\n print \"Active leases in Dynamo:\", state[\"Count\"]\n for item in state[\"Items\"]:\n print json.dumps(item, indent=4, sort_keys=True)\n\n lease = None\n shard = None\n\n try:\n visitors = set()\n last_timestamp = None\n for i, record in enumerate(event.get('Records', [])):\n event_id, data = (record['eventID'], record['kinesis']['data'])\n shard, checkpoint = event_id.split(u':')\n if rewind:\n print \"Rewinding to checkpoint 0\"\n _clear_consumer_lease(table, consumer_id, shard)\n rewind = False\n if lease is None:\n lease = _get_consumer_lease(table, consumer_id, shard) \\\n or {\"checkpoint\": \"0\"}\n\n if checkpoint <= lease[\"checkpoint\"]:\n # replayed event, we should skip it\n print \"Replayed event; skipping\"\n continue\n # => decode from b64\n raw_event = base64.b64decode(data)\n # => parse from JSON\n json_event = json.loads(raw_event)\n # => extract out visitor id and timestamp if present\n visitor = json_event.get(\"visitor_site_id\", \"N/A\")\n visitors.add(visitor)\n last_timestamp = json_event.get(\"ts_action\", \"N/A\")\n # => do something with the data\n result = process(json_event)\n if result:\n pass\n # => checkpoint the shard\n lease[\"checkpoint\"] = checkpoint\n logger.info(\"Saw {} unique visitors in batch ending with {}\".format(\n len(visitors), last_timestamp))\n if not dry_run:\n _put_consumer_lease(table, consumer_id, shard, lease)\n except Exception as ex:\n # do not save consumer checkpoints because error happened\n # instead, we should probably log something about the error\n # in the consumer lease, to allow the Lambda to retry a fixed\n # number of times, before finally \"giving up\" and skipping\n # the records\n raise\n \"^ some form of error handling required\"\n if ex:\n pass", "def _set_record_to_backend(self, key: str, record: CacheRecord):\n raise NotImplementedError", "def handler(event, context):\n if event and \"Records\" in event:\n for record in event[\"Records\"]:\n time_str = time.ctime()\n if \"body\" in record:\n try:\n hasura_request(record[\"body\"])\n except Exception as e:\n print(f\"Start Time: {time_str}\", str(e))\n time_str = time.ctime()\n print(\"Done executing: \", time_str)\n raise_critical_error(\n message=f\"Could not process record: {str(e)}\",\n data=record,\n exception_type=Exception\n )", "def handle_record(self, record):\n raise NotImplementedError", "def lambda_handler(event, context):\n # EOL char append function\n encode_data = lambda x: \"{data}{eol}\".format(data=json.dumps(x), eol=chr(10)).encode(\"UTF-8\")\n \n # Punk API call\n try:\n logger.debug(\"Requesting api: {api}\".format(api=os.environ[\"API_URL\"]))\n request = r.get(os.environ[\"API_URL\"])\n except Exception as e:\n logger.error(\"An error occured while requesting api: {api}\".format(api=os.environ[\"API_URL\"]))\n raise e\n \n # Send records to kinesis stream\n logger.debug(\"Sending data to stream: {stream}\".format(stream=os.environ[\"STREAM_NAME\"]))\n for data in request.json():\n client.put_record(\n StreamName=os.environ[\"STREAM_NAME\"],\n Data=encode_data(data),\n PartitionKey=\"key\"\n )\n\n return {\n 'statusCode': request.status_code,\n 'body': data\n }", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def processor(self, data):\n streaming_data = self.decoder.decodeData(data)\n # Add Your code here to process data and handle transport/storage", "def save(self, pipeline: Optional['Pipeline'] = None, include_meta: bool = True, include_result: bool = True):\n key = self.key\n connection = pipeline if pipeline is not None else self.connection\n\n mapping = self.to_dict(include_meta=include_meta, include_result=include_result)\n\n if self.get_redis_server_version() >= (4, 0, 0):\n connection.hset(key, mapping=mapping)\n else:\n connection.hmset(key, mapping)", "def import_to_reids(record_count=8):\n\ttry:\n\t\tconn = redis.Redis(host=HOST,port=PORT,password=PASSWD)\n\texcept:\n\t\tprint 'connection error'\n\t\tsys.exit(0)\n\n\t# add to a set,transaction with pipeline\n\ttrans = conn.pipeline(transaction=True) \n\tset_name = 'activation_code'\n\ttry:\n\t\tfor i in xrange(record_count):\n\t\t\tcode = activation_code_generaor()\n\t\t\ttrans.sadd(set_name,code)\n\t\ttrans.execute() #commit all commands at a time\n\t\t# show the code\n\t\tprint'success,number of keys in a set:',conn.scard(set_name)\n\texcept:\n\t\tprint 'error,rollback'\n\t\tsys.exit(0)", "def lambda_handler(event, context):\n\n for record in event['Records']:\n\n bucket = record['s3']['bucket']['name']\n key = unquote_plus(record['s3']['object']['key'])\n\n str_value = s3_utils.download_file_as_string(bucket, key)\n data = json.loads(str_value)\n\n normalized_data = {\n 'meta': {\n 'table': 'parcels',\n 'column_names': [\n 'dataset',\n 'as_of',\n 'apn',\n 'objectid',\n 'city',\n 'x_coordinate',\n 'y_coordinate',\n 'area',\n 'length'\n ]\n }\n }\n\n rows = []\n\n dataset = data['meta']['dataset']\n as_of = data['meta']['datetime']\n\n for r in data['results']:\n\n attr = r['attributes']\n\n temp_dict = {\n 'dataset': dataset,\n 'as_of': as_of,\n 'apn': attr.get('APN_SPACE'),\n 'objectid': attr.get('OBJECTID'),\n 'city': attr.get('CITY'),\n 'x_coordinate': attr.get('X'),\n 'y_coordinate': attr.get('Y'),\n 'area': attr.get('Shape.STArea()'),\n 'length': attr.get('Shape.STLength()')\n }\n\n rows.append(temp_dict)\n\n normalized_data['rows'] = rows\n \n bucket = 'gis-data-normalized'\n file_name = 'normalized_' + key\n s3_utils.upload_json_as_file(normalized_data, bucket, file_name)", "def handler(message):\n records = message.collect()\n list_collect = []\n for record in records:\n # Parse record\n read = json.loads(record[1].decode('utf-8'))\n list_collect.append((read['text'],read['tags']))\n data = (clean(read['text']),read['tags'])\n job = read['index']\n\n data = spark.createDataFrame([data],['cleaned_body','tags'])\n data = model.transform(data)\n d = data.select('features','tags').collect()\n\n keys = retrieve_keys(d[0]['tags'])\n # look to optimize slice length based on keys and throughput\n slice_length = max(len(keys)//10000,min(len(keys)//49,200))\n print(slice_length)\n keys_sliced = [','.join(keys[i:i+slice_length]) for i in range(0,len(keys),slice_length)]\n keys = spark.createDataFrame(keys_sliced, StringType())\n score_udf = udf(lambda r: get_features(r,d[0]['features']), FloatType())\n keys = keys.withColumn('features', score_udf(keys['value'])).collect()\n # need to get top result from zadd\n report_to_redis(job)\n return", "def emit(self, record):\n self.buffer.append(record.__dict__)", "def emit(self, record):\n self.buffer.append(record.__dict__)", "def publish(event: dict):\n return kinesis.put_record(\n StreamName=DATA_STREAM,\n Data=json.dumps(event).encode('utf-8'),\n PartitionKey=randomize_arn(INVENTORY_ARN)\n )", "def emit(self, record):\n log_entry = self.format(record)\n try: \n requests.post(self.host+self.url, log_entry,headers={\"Content-type\": \"application/json\"}).content\n except Exception as e:\n if self.debug:\n print(e)", "def emit(self, record: LogRecord):\n try:\n self.enqueue(self.prepare(record))\n except Exception:\n self.handleError(record)", "def _cache_data(self):\n while self._run:\n try:\n values = self._data_streamer.get_data_current_state()\n for parameter, mapping_method in self._mapping.items():\n value = values[parameter]\n mapped_notes = self._data_streamer.get_mapper_for_param(parameter, mapping_method[0]).map(value)\n self._value_queues[parameter].put((value,mapped_notes))\n except Exception, e:\n print e.message", "def generate(\n stream_name, field, hotspot_size, hotspot_weight, batch_size, kinesis_client):\n points_generated = 0\n hotspot = None\n while True:\n if points_generated % 1000 == 0:\n hotspot = get_hotspot(field, hotspot_size)\n records = [\n get_record(field, hotspot, hotspot_weight) for _ in range(batch_size)]\n points_generated += len(records)\n pprint(records)\n kinesis_client.put_records(StreamName=stream_name, Records=records)\n\n time.sleep(0.1)", "def _records_to_redis_naive(self, records: List[Any]) -> bool:\n redis_client: Redis = self.redis_client\n\n queue_type: str = self._config[\"graph_queue_type\"]\n queue_key: str = self._config[\"graph_queue_key\"]\n\n try:\n redis_action = getattr(\n redis_client, self._redis_methods_map[queue_type].lower()\n )\n\n for r in records:\n gevent.sleep()\n redis_action(queue_key, json_dumps(r))\n\n except RedisError as e:\n self._logger.exception(\"Redis Exception: %s\", str(e)) # noqa: G200\n result = False\n\n else:\n result = True\n\n return result", "def enqueue(self, record):\r\n self.queue.put_nowait(record)", "def post(self, record_type, record_id, record, metadata):\n \n if not self.cache.get(record_type, None):\n self.cache[record_type] = {}\n\n if not self.cache[record_type].get(record_id, None):\n self.cache[record_type][record_id] = {}\n\n \n self.cache[record_type][record_id]['record'] = record\n self.cache[record_type][record_id]['metadata'] = metadata\n\n \n d = Date()\n self.cache[record_type][record_id]['last_updated'] = d.now()\n\n # Check space, remove old items if not enough space", "def Transform(self, record):\n pass", "def test_kinesis_too_large_record(sdc_builder, sdc_executor, aws, keep_data):\n record_1_content = 'Hello 1'\n record_2_content = 'Hello ' + '2' * 1024 * 1024\n record_3_content = 'Hello 3'\n file_content = f'{record_1_content}\\n{record_2_content}\\n{record_3_content}'\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(\n data_format='TEXT',\n raw_data=file_content,\n stop_after_first_batch=True,\n max_line_length=len(record_2_content)\n )\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n\n kinesis_producer = pipeline_builder.add_stage('Kinesis Producer')\n kinesis_producer.set_attributes(\n data_format='TEXT',\n stream_name=stream_name,\n record_separator='',\n preserve_record_order=True,\n kinesis_producer_configuration=[{'key': 'AggregationEnabled', 'value': 'false'}]\n )\n\n wiretap = pipeline_builder.add_wiretap()\n\n dev_raw_data_source >> [kinesis_producer, wiretap.destination]\n pipeline = pipeline_builder.build().configure_for_environment(aws)\n\n client = aws.kinesis\n try:\n logger.info(f'Creating a Kinesis Stream {stream_name} on AWS...')\n client.create_stream(\n StreamName=stream_name,\n ShardCount=1\n )\n aws.wait_for_stream_status(\n stream_name=stream_name,\n status='ACTIVE'\n )\n desc_response = client.describe_stream(\n StreamName=stream_name\n )\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n logger.info(f'Reading the data from the Kinesis Stream...')\n shard_iterator = client.get_shard_iterator(\n StreamName=stream_name,\n ShardId=desc_response['StreamDescription']['Shards'][0]['ShardId'],\n ShardIteratorType='TRIM_HORIZON'\n )\n response = client.get_records(\n ShardIterator=shard_iterator['ShardIterator']\n )\n received_data = [rec['Data'].decode().strip() for rec in response['Records']]\n assert len(received_data) == 2\n assert received_data[0] == record_1_content\n assert received_data[1] == record_3_content\n\n error_records = wiretap.error_records\n assert len(error_records) == 1\n assert error_records[0].header['errorCode'] == 'KINESIS_08'\n\n finally:\n _ensure_pipeline_is_stopped(sdc_executor, pipeline)\n if not keep_data:\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name)", "def emit(self, record):\n pass", "async def ingest_data(redis_con, article_key, data):\n if not redis_con.exists(article_key):\n redis_con.hmset(article_key, mapping=data)\n redis_con.zincrby(\n name=ARTICLE_SUMMARY_KEY.format(source=data['source']),\n amount=1,\n value=data['story_date'],\n )\n redis_con.zadd(\n name=ARTICLE_LIST,\n mapping={\n article_key: datetime.now().strftime('%s')\n }\n )", "def _records_to_redis_pipe(self, records: List[Any]) -> bool:\n redis_client: Redis = self.redis_client\n\n queue_type: str = self._config[\"graph_queue_type\"]\n queue_key: str = self._config[\"graph_queue_key\"]\n\n try:\n with redis_client.pipeline() as pipe:\n\n pipe.multi()\n\n redis_action = getattr(\n pipe, self._redis_methods_map[queue_type].lower()\n )\n\n for r in records:\n gevent.sleep()\n redis_action(queue_key, json_dumps(r))\n\n pipe.execute()\n\n except RedisError as e:\n self._logger.exception(\"Redis Exception: %s\", str(e)) # noqa: G200\n result = False\n\n else:\n result = True\n\n return result", "def metadata_processor(self):\n counts = {key: int(value) for key, value in\n self.redis.hgetall(self.metadata_cache_key).iteritems()}\n\n counts['cached'] = len(self.tweet_cache)\n\n metadata = {'counts': counts}\n log.debug(metadata)\n\n if self.is_queuing:\n rqworker.enqueue(self.metadata_processor_fct, metadata)\n else:\n self.metadata_processor_fct(metadata)", "def emit(self, record):\n try:\n payload = self.to_loggly(record)\n headers = generate_header()\n SESSION.post(self.url, data=payload, background_callback=bg_cb, headers=headers)\n except (KeyboardInterrupt, SystemExit):\n raise\n except Exception:\n self.handleError(record)", "def process_data(self):\n num_records = len(self.records_data)\n for i in range(len(self.keys)):\n student_key = self.keys[i]\n if (i < num_records):\n self._load_student_record(student_key,\n self.records_data[i])", "def cache(self,redis_wrapper,key='default'):\n \n \n if key == 'default':\n key = self.showId()\n \n logger.info('Serializing GriddedTaxonomy. \\n Depending on the amount of data it can take some time')\n \n #Cleaning GeoQueryValuesSets fields\n map(lambda grid : grid.removeQuerySets(),self)\n \n import pickle\n logger.info('Serializing with pickle') \n self_pickle = pickle.dumps(self)\n logger.info(\"Storing in Cache\")\n try:\n \n redis_wrapper.set(key,self_pickle)\n return True\n except:\n logger.error(\"Problem in serializing. The intented caching object could be very big!\")\n return self_pickle", "def handler(event, context):\n s3conn = s3.connect_to_region(region, profile_name=profile_name)\n bucket = s3conn.get_bucket(bucket_name)\n\n # Use a map to track keys that are no longer in the feed, used for deletion\n remaining_keys = { key.name : True for key in bucket.list(prefix=key_prefix)}\n\n logger.debug(\"Existing keys in bucket\\n%s\", '\\n'.join(remaining_keys));\n\n for id, json_data in fn_inner():\n key_name = key_prefix + str(uuid.uuid5(uuid.NAMESPACE_URL, id.encode('utf-8')))\n\n # Key found, delete it from cleanup map\n if key_name in remaining_keys:\n del remaining_keys[key_name]\n\n string_data = json.dumps(json_data)\n s3_object = bucket.get_key(key_name)\n if s3_object == None:\n key = bucket.new_key(key_name);\n key.set_contents_from_string(string_data)\n logger.info('Creating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n else:\n if s3_object.etag[1:len(s3_object.etag)-1] != s3etag.from_string(string_data):\n logger.info('Updating:\\ts3://%s/%s', bucket_name, key_name)\n logger.debug(string_data)\n s3_object.set_contents_from_string(string_data)\n else:\n logger.info('Same:\\ts3://%s/%s', bucket_name, key_name);\n logger.debug(string_data)\n\n # Remvoe remaining keys from the bucket to allow for cleanup\n for key in remaining_keys:\n logger.info('Removing:\\ts3://%s/%s', bucket_name, key);\n bucket.delete_key(key);\n\n logger.info('Done');", "def put_record(self, tag, json_str):\n a = 0\n while a < 2000:\n if a % 100 == 0 and a != 0:\n logger.info(\"A batch of 100 simple json records have been sent\")\n self.firehose_client.put_record(DeliveryStreamName=self.get_stream_name(tag),\n Record={\n 'Data': json_str\n }\n )\n a = a + 1\n logger.info(\"Records were placed successfully!!\")", "def record(self, data):\n\n # Init the session when start the recording\n if self.session_id not in self._cache:\n self._cache[self.session_id] = {}\n\n self._cache[self.session_id] = dict_deep_update(self._cache[self.session_id], data)\n\n return self", "def _add_crawl_info(crawl, crawl_id, engine_redis):\n crawl_info = json.dumps(crawl)\n engine_redis.set(crawl_id, crawl_info)\n hour = 60 * 60 \n engine_redis.expire(crawl_id, hour)", "def emit(self, record):\n if self.list is not None:\n try:\n self.r.lpush(self.list, json.dumps(self.format(record)))\n except Exception:\n self.handleError(record)", "def query(self):\r\n records = self.input()\r\n if self.to_investigate:\r\n records = self.investigate(records)\r\n post.log.info(\"Caching {} records for {}\".format(len(records), self.name))\r\n self.cache_records(records)", "def s3_process(self, payload, classifier):\n s3_file_lines = StreamPreParsers.pre_parse_s3(payload.raw_record)\n for line in s3_file_lines:\n data = line.rstrip()\n payload.refresh_record(data)\n self.process_alerts(classifier, payload, data)", "def run_redis_example():\n\n try:\n print('\\nStep 1: Connect to Redis')\n r = login_redis_cloud()\n print('\\nStep 2: Cache some data in Redis and read it back')\n r.set('andy', 'andy@somewhere.com')\n email = r.get('andy')\n print(f\"r.get('andy'): {email}\")\n\n print('\\nStep 3: Cache more data in Redis')\n r.set('pam', 'pam@anywhere.com')\n r.set('fred', 'fred@fearless.com')\n\n print(\"\\nStep 4: Delete 'andy' from cache\")\n r.delete('andy')\n\n print('\\nStep 5: Make a unique ID and use it to count.')\n r.set('user_count', 21)\n r.incr('user_count')\n r.incr('user_count')\n r.decr('user_count')\n result = r.get('user_count')\n print(f'user_count=21+1+1-1={result}')\n\n print('\\nStep 6: Make richer data for a SKU')\n r.rpush('186675', 'chair')\n r.rpush('186675', 'red')\n r.rpush('186675', 'leather')\n r.rpush('186675', '5.99')\n\n print('\\nStep 7: Pull some data from the SKU structure')\n cover_type = r.lindex('186675', 2)\n print(f'Type of cover = {cover_type}')\n\n print('\\nStep 8: Add customer data for 6 customers')\n PHONE_IDX = 0\n ZIP_IDX = 1\n customer_data = {\n 'apple': {\n 'phone': '012-345-6789',\n 'zip': '01234'\n },\n 'lucky': {\n 'phone': '503-832-2833',\n 'zip': '53098'\n },\n 'zeke': {\n 'phone': '555-555-5555',\n 'zip': '98000'\n },\n 'blake': {\n 'phone': '838-608-0199',\n 'zip': '12011'\n },\n 'naomi': {\n 'phone': '721-608-8223',\n 'zip': '24587'\n },\n 'kale': {\n 'phone': '444-385-9115',\n 'zip': '62214'\n },\n }\n for customer, data in customer_data.items():\n print(f\"Inserting {customer}: [phone: {data['phone']}\"\n f\", zip: {data['zip']}]\")\n r.rpush(customer, data['phone'])\n r.rpush(customer, data['zip'])\n\n print('\\nStep 9. Retrieve zip and phone for blake')\n blake_phone = r.lindex('blake', PHONE_IDX)\n blake_zip = r.lindex('blake', ZIP_IDX)\n print(f\"Blake's info: [phone: {blake_phone}, zip: {blake_zip}]\")\n\n print('\\nFinally: Delete all data so we can start over.')\n r.flushdb()\n\n except Exception as e:\n print(f'Redis error: {e}')", "def ingest(self):\n datetime_retrieved = datetime.now()\n prefix = self.prefix_template.format(**self.feed, year=datetime_retrieved.strftime('%Y'), month=datetime_retrieved.strftime('%m'))\n fp = self.generate_fp(\n template='{feedname}_{datetime_retrieved}',\n feedname=self.feed['feedname'],\n datetime_retrieved=datetime_retrieved\n )\n\n url_to_request = self.url_dict[(self.feed['state'],self.feed['feedname'])]\n try:\n r = requests.get(url_to_request)\n if r.status_code == 200:\n data_to_write = r.content\n self.s3helper.write_bytes(data_to_write, self.bucket, key=prefix+fp)\n self.print_func('Raw data ingested from {} to {} at {} UTC'.format(url_to_request, prefix+fp, datetime_retrieved))\n else:\n self.print_func('Received status code {} from {} feed.'.format(r.status_code,self.feed['feedname']))\n self.print_func('Skip triggering ingestion of {} to sandbox.'.format(self.feed['feedname']))\n self.print_func('Skip triggering ingestion of {} to Socrata.'.format(self.feed['feedname']))\n return\n except BaseException as e:\n data_to_write = f'The feed at {datetime_retrieved.isoformat()}.'.encode('utf-8')\n fp += '__FEED_NOT_RETRIEVED'\n self.s3helper.write_bytes(data_to_write, self.bucket, key=prefix+fp)\n self.print_func('We could not ingest data from {} at {} UTC'.format(url_to_request, datetime_retrieved))\n raise e\n\n # trigger semi-parse ingest\n if self.feed['pipedtosandbox'] == True:\n self.print_func('Trigger {} for {}'.format(self.lambda_to_trigger, self.feed['feedname']))\n lambda_client = self.s3helper.session.client('lambda')\n data_to_send = {'feed': self.feed, 'bucket': self.bucket, 'key': prefix+fp}\n response = lambda_client.invoke(\n FunctionName=self.lambda_to_trigger,\n InvocationType='Event',\n LogType='Tail',\n ClientContext='',\n Payload=json.dumps(data_to_send).encode('utf-8')\n )\n self.print_func(response)\n else:\n self.print_func('Skip triggering ingestion of {} to sandbox.'.format(self.feed['feedname']))\n\n # trigger ingest to socrata\n if self.feed['pipedtosocrata'] == True:\n self.print_func('Trigger {} for {}'.format(self.socrata_lambda_to_trigger, self.feed['feedname']))\n lambda_client = self.s3helper.session.client('lambda')\n data_to_send = {'feed': self.feed, 'bucket': self.bucket, 'key': prefix+fp}\n response = lambda_client.invoke(\n FunctionName=self.socrata_lambda_to_trigger,\n InvocationType='Event',\n LogType='Tail',\n ClientContext='',\n Payload=json.dumps(data_to_send).encode('utf-8')\n )\n self.print_func(response)\n else:\n self.print_func('Skip triggering ingestion of {} to Socrata.'.format(self.feed['feedname']))", "def write(self, record):\n # Make Splunk ready payload data and append it to self._buffers list.\n self._buffer.append({\n 'index': self._index,\n 'sourcetype': 'json',\n 'event': record\n })\n\n # If the records count in self._buffer is more than allowed by\n # self._buffer_size, send those records to Splunk.\n if len(self._buffer) >= self._buffer_size:\n self._flush()", "def handle(req):\n start = time()\n event = json.loads(req)\n\n user_id = event[\"user_id\"]\n post_id = event[\"post_id\"]\n timestamp = event[\"timestamp\"]\n\n myclient = pymongo.MongoClient(user_timeline_mongodb)\n mydb = myclient['user-timeline']\n mycol = mydb[\"user-timeline\"]\n\n myquery = { \"user_id\": user_id }\n mydoc = mycol.find(myquery)\n\n if mydoc.count() == 0:\n posts_j = {}\n posts_j[str(post_id)] = timestamp\n mydict = {\"user_id\": user_id, \"posts\": json.dumps(posts_j)}\n mycol.insert_one(mydict)\n else:\n posts_j = json.loads(mydoc.next()[\"posts\"])\n posts_j[str(post_id)] = timestamp\n posts_update = {\"$set\": {\"posts\": json.dumps(posts_j)}}\n mycol.update_one(myquery, posts_update)\n\n r = redis.Redis(host=user_timeline_redis, port=6379, decode_responses=True)\n r.hset(user_id, post_id, timestamp)\n\n #r.hset(\"end_time\", event[\"req_id\"], str(time()))\n\n return str(time() - start)", "def emit(self, record):\n try:\n # Format: [ [queueMsgID, PID], record ]\n self.queue.put([[config.DUMMYMP_LOG_ID, os.getpid(), self.int_pid], record])\n except:\n # Something went wrong...\n self.handleError(record)", "def raid_table_dynamodb_stream_event(event, context):\n try:\n # Log AWS Lambda event\n logger.info('Event: {}'.format(json.dumps(event, indent=4)))\n for record in event['Records']:\n # Convert low-level DynamoDB format to Python dictionary\n deserializer = TypeDeserializer()\n table_keys = {k: deserializer.deserialize(v) for k, v in record['dynamodb']['Keys'].items()}\n table_attributes = {k: deserializer.deserialize(v) for k, v in record['dynamodb']['NewImage'].items()}\n\n if record['eventSourceARN'] == os.environ['DEMO_RAID_STREAM_ARN']:\n ands_url_path = \"{}modifyValueByIndex?handle={}&value={}&index={}\".format(\n os.environ[\"DEMO_ANDS_SERVICE\"],\n table_keys['handle'],\n table_attributes['contentPath'],\n table_attributes['contentIndex']\n )\n\n ands_secret = os.environ[\"ANDS_DEMO_SECRET\"]\n\n elif record['eventSourceARN'] == os.environ['RAID_STREAM_ARN']:\n ands_url_path = \"{}modifyValueByIndex?handle={}&value={}&index={}\".format(\n os.environ[\"ANDS_SERVICE\"],\n table_keys['handle'],\n table_attributes['contentPath'],\n table_attributes['contentIndex']\n )\n\n ands_secret = os.environ[\"ANDS_SECRET\"]\n\n else:\n logger.info('Unknown DynamoDB Stream')\n continue\n\n # Process new records\n if record['eventName'] == 'INSERT':\n # Skip if default Raid\n if table_attributes['contentPath'] == settings.RAID_SITE_URL:\n logger.info('Not updating content path \"{}\" on new RAiD as it is the default: {}'.format(\n table_attributes['contentPath'], table_keys['handle'])\n )\n continue\n\n logger.info('Updating content path \"{}\" on new RAiD: {}'.format(\n table_attributes['contentPath'], table_keys['handle'])\n )\n\n ands_mint = ands_helpers.ands_handle_request(\n ands_url_path,\n os.environ[\"ANDS_APP_ID\"],\n \"raid\",\n \"raid.org.au\",\n ands_secret,\n )\n\n logger.info(json.dumps(ands_mint))\n\n elif record['eventName'] == 'MODIFY':\n old_table_attributes = {\n k: deserializer.deserialize(v) for k, v in record['dynamodb']['OldImage'].items()\n }\n\n # Update handle content Path if it is different\n if old_table_attributes['contentPath'] != table_attributes['contentPath']:\n logger.info('Updating content path \"{}\" on existing RAiD: {}'.format(\n table_attributes['contentPath'], table_keys['handle'])\n )\n\n ands_mint = ands_helpers.ands_handle_request(\n ands_url_path,\n os.environ[\"ANDS_APP_ID\"],\n \"raid\",\n \"raid.org.au\",\n ands_secret,\n )\n\n logger.info(json.dumps(ands_mint))\n\n else:\n logger.info('Existing RAiD has no changes to content path.')\n\n except Exception as e:\n logger.error('Unknown error occurred.')\n logger.error(str(e))\n\n logger.info('DynamoDB Stream Processed...')", "def emit(self, record):\n try:\n s = self._prepare_increment(self._metric(record))\n self.send(s)\n except (KeyboardInterrupt, SystemExit):\n raise\n except:\n self.handleError(record)", "def cache_handler(event, context):\n events.cache()", "def process(msg, context, region):\n\n job_id = int(msg['ingest_job'])\n chunk_key = msg['chunk_key']\n tile_key = msg['tile_key']\n print(\"Tile key: {}\".format(tile_key))\n\n proj_info = BossIngestProj.fromTileKey(tile_key)\n\n # Set the job id\n proj_info.job_id = msg['ingest_job']\n\n print(\"Data: {}\".format(msg))\n\n # update value in the dynamo table\n tile_index_db = BossTileIndexDB(proj_info.project_name)\n chunk = tile_index_db.getCuboid(chunk_key, job_id)\n if chunk:\n if tile_index_db.cuboidReady(chunk_key, chunk[\"tile_uploaded_map\"]):\n print(\"Chunk already has all its tiles: {}\".format(chunk_key))\n # Go ahead and setup to fire another ingest lambda so this tile\n # entry will be deleted on successful execution of the ingest lambda.\n chunk_ready = True\n else:\n print(\"Updating tile index for chunk_key: {}\".format(chunk_key))\n chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)\n else:\n # First tile in the chunk\n print(\"Creating first entry for chunk_key: {}\".format(chunk_key))\n try:\n tile_index_db.createCuboidEntry(chunk_key, job_id)\n except ClientError as err:\n # Under _exceptional_ circumstances, it's possible for another lambda\n # to beat the current instance to creating the initial cuboid entry\n # in the index.\n error_code = err.response['Error'].get('Code', 'Unknown')\n if error_code == 'ConditionalCheckFailedException':\n print('Chunk key entry already created - proceeding.')\n else:\n raise\n chunk_ready = tile_index_db.markTileAsUploaded(chunk_key, tile_key, job_id)\n\n # ingest the chunk if we have all the tiles\n if chunk_ready:\n print(\"CHUNK READY SENDING MESSAGE: {}\".format(chunk_key))\n # insert a new job in the insert queue if we have all the tiles\n ingest_queue = IngestQueue(proj_info)\n ingest_queue.sendMessage(json.dumps(msg))\n\n # Invoke Ingest lambda function\n names = AWSNames.from_lambda(context.function_name)\n lambda_client = boto3.client('lambda', region_name=region)\n lambda_client.invoke(\n FunctionName=names.tile_ingest.lambda_,\n InvocationType='Event',\n Payload=json.dumps(msg).encode())\n else:\n print(\"Chunk not ready for ingest yet: {}\".format(chunk_key))\n\n print(\"DONE!\")", "def test_kinesis_preserve_record_order(sdc_builder, sdc_executor, aws, keep_data):\n expected_data = [f'Hello {i}' for i in range(100)]\n stream_name = '{}_{}'.format(aws.kinesis_stream_prefix, get_random_string(string.ascii_letters, 10))\n\n builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(\n data_format='TEXT',\n raw_data='\\n'.join(expected_data),\n stop_after_first_batch=True\n )\n\n kinesis_producer = builder.add_stage('Kinesis Producer')\n kinesis_producer.set_attributes(\n data_format='TEXT',\n stream_name=stream_name,\n record_separator='',\n preserve_record_order=True,\n kinesis_producer_configuration=[{'key': 'AggregationEnabled', 'value': 'false'}]\n )\n\n dev_raw_data_source >> kinesis_producer\n pipeline = builder.build().configure_for_environment(aws)\n\n client = aws.kinesis\n try:\n logger.info(f'Creating a Kinesis Stream {stream_name} on AWS ...')\n client.create_stream(\n StreamName=stream_name,\n ShardCount=1\n )\n aws.wait_for_stream_status(\n stream_name=stream_name,\n status='ACTIVE'\n )\n desc_response = client.describe_stream(\n StreamName=stream_name\n )\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n logger.info(f'Reading the data from the Kinesis Stream ...')\n shard_iterator = client.get_shard_iterator(\n StreamName=stream_name,\n ShardId=desc_response['StreamDescription']['Shards'][0]['ShardId'],\n ShardIteratorType='TRIM_HORIZON'\n )\n response = client.get_records(\n ShardIterator=shard_iterator['ShardIterator']\n )\n received_data = [rec['Data'].decode().strip() for rec in response['Records']]\n\n logger.debug(f'Number of messages received from Kinesis = {len(received_data)}')\n assert received_data == expected_data\n\n finally:\n _ensure_pipeline_is_stopped(sdc_executor, pipeline)\n if not keep_data:\n logger.info('Deleting %s Kinesis stream on AWS ...', stream_name)\n client.delete_stream(StreamName=stream_name)", "def main(delivery_stream, region, print_record_id):\n client = _get_firehose_client(region_name=region)\n for line in click.get_binary_stream('stdin'):\n response = client.put_record(\n DeliveryStreamName=delivery_stream,\n Record={'Data': line},\n )\n if print_record_id:\n click.echo(message=response['RecordId'])", "def parse_records(raw_records: list) -> Generator[str, None, None]:\n for record in iter_deaggregate_records(raw_records):\n logger.debug(f\"Raw Kinesis record: {record}\")\n\n # Kinesis data is base64 encoded\n raw_data = base64.b64decode(record[\"kinesis\"][\"data\"])\n\n # decompress data if raw data is gzip (log data from CloudWatch Logs subscription filters comes gzipped)\n # gzip magic number: 0x1f 0x8b\n if raw_data[0] == 0x1F and raw_data[1] == 0x8B:\n raw_data = gzip.decompress(raw_data)\n\n data = raw_data.decode()\n payloads = normalize_cloudwatch_messages(data)\n logger.debug(f\"Normalized payloads: {payloads}\")\n\n for payload in payloads:\n yield payload", "def __call__(self, record, source, **kwds):\n # build a cache\n cache = {}\n # go through the fields in {record}\n for field in record.pyre_fields:\n # ask each one to dispatch to the appropriate handler to build the node\n node = field.identify(authority=self, cache=cache, source=source)\n # update the cache\n cache[field] = node\n # and make the node available\n yield node\n # all done\n return", "async def transform_record(db_pool, record):\n\n # Before creating the dict, we want to get the stable_id frm the DB\n async with db_pool.acquire(timeout=180) as connection:\n try: \n query = f\"\"\"SELECT stable_id, access_type\n FROM beacon_dataset\n WHERE id={dict(record).pop(\"dataset_id\")};\n \"\"\"\n statement = await connection.prepare(query)\n extra_record = await statement.fetchrow()\n except Exception as e:\n raise BeaconServerError(f'Query metadata (stableID) DB error: {e}') \n\n response = dict(record)\n\n response.pop(\"id\")\n response[\"datasetId\"] = dict(extra_record).pop(\"stable_id\") \n response[\"internalId\"] = response.pop(\"dataset_id\")\n response[\"exists\"] = True\n response[\"variantCount\"] = response.pop(\"variant_cnt\") \n response[\"callCount\"] = response.pop(\"call_cnt\") \n response[\"sampleCount\"] = response.pop(\"sample_cnt\") \n response[\"frequency\"] = 0 if response.get(\"frequency\") is None else float(response.pop(\"frequency\"))\n response[\"numVariants\"] = 0 if response.get(\"num_variants\") is None else response.pop(\"num_variants\")\n response[\"info\"] = {\"access_type\": dict(extra_record).pop(\"access_type\")} \n \n return response", "def __call__(self, record):\n fs = record.fs\n signal = record.p_signal\n filtered_signal = self.filter_signal(signal, fs)\n record.p_signal = filtered_signal\n\n return record", "def process_message(self, message):\n self.post_to_redis(message)\n return", "def cache(self,redis_wrapper,key='default'):\n \n \n if key == 'default':\n key = self.showId()\n \n logger.info('Serializing NestedGriddedTaxonomy. \\n Depending on the amount of data it can take some time')\n \n #Cleaning GeoQueryValuesSets fields\n map(lambda grid : grid.removeQuerySets(),self.levels.values())\n \n import pickle\n logger.info('Serializing with pickle') \n self_pickle = pickle.dumps(self)\n logger.info(\"Storing in Cache\")\n try:\n \n redis_wrapper.set(key,self_pickle)\n return True\n except:\n logger.error(\"Problem in serializing. The intented caching object could be very big!\")\n return self_pickle", "def lambda_handler(event, context):\n \n filename = None\n fobj = None\n\n try:\n \n filename = 'dlq' + '-' + datetime.datetime.now().strftime(\"%s\")\n fobj = open('/tmp/'+filename, 'w')\n logger.debug('S3 client set up.')\n\n for record in event['Records']:\n fobj.write(json.dumps(record['body']))\n fobj.write(\"\\n\")\n \n except Exception as ex:\n logger.error('Exception in executing ingestion to S3: {}'.format(ex))\n send_sns_alert(str(ex))\n raise\n\n else:\n \n #Saves file to S3\n fobj.close()\n load_data_s3(filename)\n\n return {\n 'statusCode': 200,\n 'body': json.dumps('Success!')\n }\n\n finally:\n\n # S3 - close temp object\n fobj.close()", "def record(self):\n # TODO: record the data", "def put_record(self, obj):\r\n for output in self.outputs:\r\n output.put_record(obj)", "def connection():\n\n\tclient = boto3.client('firehose', region_name=\"us-east-1\")\n\n\tuber_json = {}\n\tlyft_json = {}\n\n\tfor start,end in rides.items():\n\t\tuber_json[start] = {}\n\t\tlyft_json[start] = {}\n\n\t\tuber_json[start][end] = get_uber(start,end)\n\t\tlyft_json[start][end] = get_lyft(start,end)\n\t\tuber_json[start][\"time\"] = int(round(time.time()/60))\n\t\tlyft_json[start][\"time\"] = int(round(time.time()/60))\n\n\tresponse = client.put_record(\n DeliveryStreamName='uber_stream',\n Record={'Data': json.dumps(uber_json) + \"\\n\"})\n\n\tresponse = client.put_record(\n DeliveryStreamName='lyft_stream',\n Record={'Data': json.dumps(lyft_json) + \"\\n\"})", "def handle(cls, record):\n print(datetime.datetime.now(), record, flush=True)", "def emit(self, record):\n # encode data\n # Panda logger is going to be migrated. Until this is completed we need to support the old and new logger\n # The new logger needs to be json encoded and use POST method\n try:\n if self.encoding == JSON:\n arr=[{\n \"headers\":{\"timestamp\" : int(time.time())*1000, \"host\" : \"%s:%s\"%(self.url, self.port)},\n \"body\": \"{0}\".format(json.dumps(self.mapLogRecord(record)))\n }]\n data = json.dumps(arr)\n else:\n data = urlencode(self.mapLogRecord(record))\n\n # try to lock Semaphore\n if self.mySemaphore.acquire(False):\n # start Emitter\n _Emitter(self.host, self.port, self.urlprefix, self.method, data, self.mySemaphore).start()\n except UnicodeDecodeError:\n #We lose the message\n pass", "def smap(self, records, task):\n for key, json in records:\n record = happy.json.decode(json)\n if record.has_key(self.joinkey1):\n record['__joinorder__'] = 1\n task.collect(record[self.joinkey1], 1, happy.json.encode(record))\n if record.has_key(self.joinkey2):\n record['__joinorder__'] = 2\n task.collect(record[self.joinkey2], 2, happy.json.encode(record))", "def redis_save(key: object, value: object) -> object:\n if key is not None and value is not None:\n red.redis.set(json.dumps(key), json.dumps(value))", "def process_route_keys(session, redis, key, ip, date):\n try:\n route_metrics = []\n routes = redis.hgetall(key)\n for key_bstr in routes:\n route = key_bstr.decode('utf-8').strip('/')\n val = int(routes[key_bstr].decode('utf-8'))\n\n version = \"0\" # default value if version is not present\n path = route\n query_string = None\n\n route_subpaths = route.split('/')\n\n # Extract the version out of the path\n if route_subpaths[0].startswith('v') and len(route_subpaths[0]) > 1:\n version = route_subpaths[0][1:]\n path = '/'.join(route_subpaths[1:])\n\n # Extract the query string out of the path\n route_query = path.split('?')\n if len(route_query) > 1:\n path = route_query[0]\n query_string = route_query[1]\n route_metrics.append(\n RouteMetrics(\n version=version,\n route_path=path,\n query_string=query_string,\n count=val,\n ip=ip,\n timestamp=date\n )\n )\n\n if route_metrics:\n session.bulk_save_objects(route_metrics)\n redis.delete(key)\n except Exception as e:\n raise Exception(\"Error processing route key %s with error %s\" % (key, e))", "def _redis_record_id_key(self):\n return 'tesseract:table:%s:rowid' % self.table_name", "def _store_cache(self):\n assert self._already_generated, \"Must generate before storing to cache\"\n\n if self.variant_unit is not None:\n logger.warning(\"Cannot cache once variant_unit has been set\")\n return\n\n try:\n os.mkdir(os.path.dirname(self._cache_key))\n except FileExistsError:\n # Easier than checking and risking race conditions\n pass\n\n with open(self._cache_key, 'w') as f:\n json.dump(self.rows, f)\n\n logger.debug(\"Stored cache to {}\".format(self._cache_key))", "def getrecord_fetcher(record_uuid):\n record = current_oaiserver.record_cls.get_record(record_uuid)\n record_dict = record.dumps()\n record_dict[\"updated\"] = record.updated\n return record_dict", "def create_records(data: List[str]) -> List[dict]:\n records = []\n for d in data:\n records.append(create_record(d))\n\n logger.debug(f\"Formed Kinesis Records batch for PutRecords API: {records}\")\n return records", "def process_records(self, shard_iterator: str, shard_id: str):\n response = self.client.get_records(\n ShardIterator=shard_iterator\n )\n\n logger.debug('Getting data from shard: {shard_id}', extra=response)\n records = response['Records']\n\n if len(records) == 0:\n logger.info(f'Nothing to process for shard: \"{shard_id}\"')\n self._empty_shards.append(shard_id)\n else:\n for item in records:\n if self.item_callback:\n data = json.loads(item.get('Data'))\n order = data.get('order')\n contents = data.get('contents')\n self.item_callback(order, contents)\n self._sequences[shard_id] = item['SequenceNumber']\n\n next_shard_iterator = response['NextShardIterator']\n date = response.get('ResponseMetadata').get('HTTPHeaders').get('date')\n self._iterators[shard_id] = next_shard_iterator, date", "def update_record(self, context, payload):\n access_token = util.get_access_token(context[\"headers\"])\n record = ZohorecruitRecord(**payload)\n endpoint = f\"{record.module}/{record.record_id}\"\n record_data = self.retrieve_record_body(record)\n response = util.rest(\"PUT\",endpoint,access_token,record_data)\n return json.loads(response.text)", "def run(self):\n data = query_orm(self.orm)\n # To avoid confusion downstream: don't write out data if there isn't any\n if not data:\n return\n for table_key, version_key in self.make_s3_keys():\n save_to_s3(table_key, version_key, data)", "def run(self):\n metrics = Metrics()\n\n count = 0\n while not self.queue.empty():\n count += 1\n try:\n key = self.queue.get(timeout=1)\n except queue.Empty:\n continue\n\n try:\n self.copy_key(key)\n metrics.count()\n except Exception as err:\n self.log.error(f\"Error for key '{key}'\")\n self.log.debug(err, exc_info=True)\n metrics.error()\n\n self.log.info(f\"Thread completed. {count} keys processed.\")", "def store(self,key,start,end,data):\n\n pass", "def new_archive_record(self, event):\n end_ts = event.record['dateTime']\n start_ts = end_ts - event.record['interval'] * 60\n\n for topic in self.subscriber.subscribed_topics: # topics might not be cached.. therefore use subscribed?\n self.logger.debug(\"Service record prior to update is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(event.record['dateTime']),\n to_sorted_string(event.record)))\n target_data = self.subscriber.get_accumulated_data(topic, start_ts, end_ts, event.record['usUnits'])\n event.record.update(target_data)\n self.logger.debug(\"Service record after update is: %s %s\"\n % (weeutil.weeutil.timestamp_to_string(event.record['dateTime']),\n to_sorted_string(event.record)))", "def cache(self,redis_wrapper,key='default',refresh=False):\n \n \n if key == 'default':\n key = self.showId()\n \n if not redis_wrapper.exists(key) or refresh: \n logger.info('Serializing Object. \\n Depending on the amount of data it can take some time')\n \n #Cleaning GeoQueryValuesSets fields\n #map(lambda grid : grid.removeQuerySets(),self)\n logger.info('Removing GeoQueryValuesSets')\n self.removeQuerySets()\n import pickle\n logger.info('Serializing with pickle') \n self_pickle = pickle.dumps(self)\n logger.info(\"Storing in Taxonomy in Cache\")\n try:\n \n redis_wrapper.set(key,self_pickle)\n return True\n except:\n logger.error(\"Problem in serializing. The intented caching object could be very big!\")\n return self_pickle \n else:\n logger.info('Object exists on Cache System. For update activate flag: refresh to True')\n return True", "def filter(self, record):\n if super(FrequencyFilter, self).filter(record) == 0:\n return 0\n\n # distinguish this error log\n params = [\n record.module,\n record.filename,\n record.funcName,\n str(record.lineno),\n record.levelname\n ]\n if self._prefix:\n params.append(self._prefix)\n key = ','.join(params)\n\n # get redis value of this key\n cache = FrequencyCache.p_col.find_one_and_update(\n {'_id': key},\n {\n '$inc': {FrequencyCache.Field.data: 1},\n '$setOnInsert': {\n '_id': key,\n FrequencyCache.Field.time: datetime.utcnow()\n }\n },\n return_document=ReturnDocument.AFTER,\n upsert=True\n )\n v = cache[FrequencyCache.Field.data]\n\n if v <= self._repeat_count + 1:\n # will be handled\n return 1\n else:\n return 0", "def process(self, data):\n file = self.get_cache_file(data)\n loaded = False\n if self.check_cache_exists(data):\n if self.force:\n log.info(\"Item found in cache but force=True\")\n else:\n try:\n log.info(\"Found in cache, skipping chain\")\n with open(file, 'rb') as f:\n # https://stackoverflow.com/questions/2766685/how-can-i-speed-up-unpickling-large-objects-if-i-have-plenty-of-ram/36699998#36699998\n # disable garbage collector for speedup unpickling\n gc.disable()\n cache = pickle.load(f)\n\n # enable garbage collector again\n gc.enable()\n\n retrieved_data = cache['data']\n stop = cache[\"stopped\"]\n if stop:\n raise StopIteration()\n self._check_time_consistency(cache['chain_mtime'],\n self.chain_info['chain_mtime'])\n for key, value in retrieved_data.items():\n data[key] = value\n loaded = True\n except EOFError:\n log.warning(\n \"Failed to load cache item {} (corrupted file will be deleted)\".format(file))\n os.unlink(file)\n if not loaded:\n log.debug(\"Not found in cache, processing chain\")\n cache, stop = self._process(data, {})\n cache = cache[self.chain_info['chain_hash']]\n if self.save_cache:\n with open(file, 'wb') as f:\n try:\n pickle.dump(cache, f, protocol=HIGHEST_PROTOCOL)\n except:\n pickle.dump(cache, f)\n\n # Try to set some more flexible access rights\n try:\n os.chmod(file, RWRWRW)\n except OSError:\n pass", "def on_incoming_records(self, connection: ConnectionInterface) -> None:\n self.generate_metadata()\n\n df = connection.record_containers[0].build_dataframe()\n df[\"optional_value\"] = self.workflow_config[\"Value\"]\n\n self.output_anchor.push_records(\n generate_records_from_df(df, self.output_anchor.record_info)\n )\n\n connection.clear_records()", "def run(self):\n init()\n list_name = comet_config.REDIS_NAMESPACE + \"incoming/\" + self.service_name\n list_name_processing = list_name + \"/processing\"\n self.redis = r\n while True:\n try:\n item = self.redis.brpoplpush(list_name, list_name_processing)\n self.process_incoming(item)\n self.redis.lrem(list_name_processing, item)\n\n except redis.ConnectionError:\n pass", "def event(event, context):\n# Sample event:\n #\n # _event = { \"Records\":[\n # {\n # \"eventVersion\":\"2.1\",\n # \"eventSource\":\"aws:s3\",\n # \"awsRegion\":\"us-east-1\",\n # \"eventTime\":\"2021-10-14T07:40:55.113Z\",\n # \"eventName\":\"ObjectCreated:Put\",\n # \"userIdentity\":{\n # \"principalId\":\"AWS:AROA6L2YJX2JCJYHEJ4UI:serverless-image-processing-test-create\"\n # },\n # \"requestParameters\":{\n # \"sourceIPAddress\":\"94.140.8.209\"\n # },\n # \"responseElements\":{\n # \"x-amz-request-id\":\"7CJHSGZ9MZF9995F\",\n # \"x-amz-id-2\":\"X5OtpRb+P9CuYKDHvjT8z9prnqqsH1yatZchN2uw8/158mcRUVhQNSW/z5ffXLqkLhu+4Kc163vZiRgVk3XaGd8H1NhZCu8N\"\n # },\n # \"s3\":{\n # \"s3SchemaVersion\":\"1.0\",\n # \"configurationId\":\"9b8f4135-35d4-4e07-b8a5-7d68cc95870b\",\n # \"bucket\":{\n # \"name\":\"serverless-image-processing-test-serverless-image-processing\",\n # \"ownerIdentity\":{\n # \"principalId\":\"A5IHQSLNTJKZN\"\n # },\n # \"arn\":\"arn:aws:s3:::serverless-image-processing-test-serverless-image-processing\"\n # },\n # \"object\":{\n # \"key\":\"test/6e7ef3f0-dcb6-4db6-9518-3bc6ec0ba492\",\n # \"size\":116716,\n # \"eTag\":\"f04e70e100f653a0e67f32f6098dea1c\",\n # \"sequencer\":\"006167DF06C888A626\"\n # }\n # }\n # }\n # ]\n # }\n\n logger.debug('event: {}'.format(event))\n for record in event['Records']:\n processRecord(record)\n\n return {'statusCode': httplib.ACCEPTED}", "def emit(self, record):\n try:\n msg = self.format(record)\n stream = self.stream\n stream.Write_shared((msg + self.terminator).encode(self.encoding))\n # self.flush()\n except Exception:\n self.handleError(record)", "def setup_method(self):\n super().setup_method()\n self.stager.redis.from_dict(self.redis_staging_data)\n\n self.redis_client = self.tcex.redis_client", "def save_data(self, record):\n self.dbm.addRecord(record)", "def continuous_migration():\n from redis import StrictRedis\n redis_url = current_app.config.get('CACHE_REDIS_URL')\n r = StrictRedis.from_url(redis_url)\n\n try:\n while r.llen('legacy_records'):\n raw_record = r.lpop('legacy_records')\n if raw_record:\n # The record might be None, in case a parallel\n # continuous_migration task has already consumed the queue.\n raw_record = zlib.decompress(raw_record)\n record = marc_create_record(raw_record, keep_singletons=False)\n recid = int(record['001'][0])\n prod_record = InspireProdRecords(recid=recid)\n prod_record.marcxml = raw_record\n try:\n with db.session.begin_nested():\n errors, dummy = create_record(\n record, force=True, validation=True\n )\n logger.info(\"Successfully migrated record {}\".format(recid))\n prod_record.successful = True\n prod_record.valid = not errors\n prod_record.errors = errors\n db.session.merge(prod_record)\n except Exception as err:\n logger.error(\"Error when migrating record {}\".format(recid))\n logger.exception(err)\n prod_record.successful = False\n db.session.merge(prod_record)\n finally:\n db.session.commit()\n db.session.close()" ]
[ "0.60701543", "0.60549337", "0.58346707", "0.58061445", "0.57601625", "0.56928015", "0.5668168", "0.56287974", "0.5627504", "0.55000585", "0.54971075", "0.545186", "0.5447746", "0.5404357", "0.5392328", "0.5323944", "0.5304658", "0.52952385", "0.52630335", "0.52459717", "0.52459717", "0.52459717", "0.52459717", "0.52459717", "0.52459717", "0.5233552", "0.5197884", "0.5195375", "0.5190534", "0.51838905", "0.51838905", "0.51814705", "0.5175899", "0.5156159", "0.51524305", "0.51496935", "0.5145357", "0.5137729", "0.51350015", "0.5085388", "0.5077182", "0.50658286", "0.5063475", "0.5047912", "0.5043904", "0.5020429", "0.501102", "0.5001854", "0.5000067", "0.49941662", "0.49900776", "0.49762586", "0.4967858", "0.49662217", "0.49631613", "0.49576557", "0.4948637", "0.49481624", "0.49236795", "0.48917502", "0.48862007", "0.4884452", "0.48725563", "0.48575264", "0.4850052", "0.48499915", "0.48421946", "0.48241293", "0.48112902", "0.48097163", "0.4808192", "0.47950283", "0.47880217", "0.47840965", "0.47828412", "0.4781394", "0.47700885", "0.4768325", "0.47612727", "0.4759984", "0.47578177", "0.4748779", "0.47474208", "0.47473153", "0.4746414", "0.4739432", "0.4733756", "0.47187474", "0.47133803", "0.46991834", "0.4691663", "0.46884614", "0.46884248", "0.4682982", "0.46683592", "0.4664471", "0.46530414", "0.46505108", "0.4646865", "0.46438766", "0.46358562" ]
0.0
-1
Use ansi code on 'string' if the output is the terminal of a not Windows platform
def isSpecial(ansiCode,string): if IS_TERMINAL and not IS_WIN32: return ansiCode+string+ANSI_END else: return string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def b(string):\n return \"\\033[94m{0}\\033[0m\".format(string)", "def ansi(color, text):\r\n code = COLOR_CODES[color]\r\n return '\\033[1;{0}m{1}{2}'.format(code, text, RESET_TERM)", "def ansi_escape(text: object) -> str:\n return str(text).replace(\"\\x1b\", \"?\").replace(\"\\b\", \"?\")", "def _handle_ansi_color_codes(self, s):\r\n def ansi_code_to_css(code):\r\n return ' '.join(['ansi-%s' % c for c in code.split(';')])\r\n return '<span>' +\\\r\n HtmlReporter._ANSI_COLOR_CODE_RE.sub(\r\n lambda m: '</span><span class=\"%s\">' % ansi_code_to_css(m.group(1)), s) +\\\r\n '</span>'", "def colour(string: str) -> str:\n string = f\"\\033[32m{string}\\033[0m\"\n return string", "def strc(text, color='black', style='normal'):\n\n ansii = ANSIIcode(color, style)\n back_to_normal = ANSIIcode('normal', 'normal') # '\\033[0m'\n\n return ansii + text + back_to_normal", "def ansi_code(text: str, color: List[ANSICode] or ANSICode or None):\n if color is None:\n return text\n elif type(color) is list:\n return \"\".join(color) + f\"{text}{colors.Reset}\"\n else:\n return f\"{color}{text}{colors.Reset}\"", "def ansi(color=\"none\"):\n if color == \"\" or color is None:\n return \"\\33[0m\"\n if isinstance(color, tuple):\n return \"\\33[38;2;{:d};{:d};{:d}m\".format(int(255*color[0]),\n int(255*color[1]),\n int(255*color[2]))\n tupl = clr_tuple(color)\n if tupl is not None:\n return ansi(tupl)\n if color == \"bold\":\n return \"\\33[1m\"\n if color == \"/bold\":\n return \"\\33[22m\"\n return \"\\33[0m\"", "def ansi(*args):\n code = Term.ESCAPE_START\n code += ';'.join(args)\n code += Term.ESCAPE_END\n return code", "def process(self, string: str) -> str:\r\n self._check_all_repeaters(string)\r\n no_repeaters_str = self._process_repeaters(string)\r\n ansi_compliant_str = self._process_colors(no_repeaters_str)\r\n\r\n return ansi_compliant_str", "def _ansi_equivalent(self, s: str) -> str:\r\n color_id = self._color_id_regexp.search(s).groups()[0]\r\n\r\n # TODO: Replace this with a class the handles dynamic color configuration!\r\n return {\r\n '0': '\\u001b[37m',\r\n '1': '\\u001b[32m',\r\n '2': '\\u001b[31m',\r\n '3': '\\u001b[33m',\r\n '4': '\\u001b[34m',\r\n '5': '\\u001b[36m',\r\n '6': '\\u001b[37m',\r\n '7': '\\u001b[35m',\r\n '8': '\\u001b[30m',\r\n '.': '\\u001b[0m',\r\n }[color_id]", "def redtext(mesg):\n if sys.platform == 'win32':\n import win32console\n handle = win32console.GetStdHandle(win32console.STD_OUTPUT_HANDLE)\n reset = handle.GetConsoleScreenBufferInfo()['Attributes']\n handle.SetConsoleTextAttribute(12)\n sys.stdout.writelines(mesg+'\\n')\n handle.SetConsoleTextAttribute(reset)\n else:\n sys.stdout.write('\\033[91m'+mesg+'\\033[0m\\n')", "def __repr__(self):\n return \"ANSIString(%s, decoded=True)\" % repr(self._raw_string)", "def _ansi_wrap(self, text, fg, bg):\n codes = []\n\n if fg is not None:\n codes.append(30 + self._to_code(fg))\n\n if bg is not None:\n codes.append(40 + self._to_code(bg))\n\n if fg is not None and 'i' in fg:\n codes.append(1) # Bold\n\n if bg is not None and 'i' in bg:\n codes.append(4) # Underscore\n\n return \"\\033[\" + \";\".join([str(code) for code in codes]) + \"m\" + text + \"\\033[0m\"", "def _color_string(string, color):\n if color is None:\n return string\n else:\n return color + string + '\\033[0m'", "def test_asciitable_m_pretty_ansi(self):\n input = '''\n┏━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━┳━━━━━━━━━━━━━━━━┓ \n┃\\x1b[1m \\x1b[0m\\x1b[1mReleased \\x1b[0m\\x1b[1m \\x1b[0m┃\\x1b[1m \\x1b[0m\\x1b[1mTitle \\x1b[0m\\x1b[1m \\x1b[0m┃\\x1b[1m \\x1b[0m\\x1b[1m Box Office\\x1b[0m\\x1b[1m \\x1b[0m┃ \n┡━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━╇━━━━━━━━━━━━━━━━┩ \n│\\x1b[36m \\x1b[0m\\x1b[36mDec 20, 2019\\x1b[0m\\x1b[36m \\x1b[0m│\\x1b[35m \\x1b[0m\\x1b[35mStar Wars: The Rise of Skywalker \\x1b[0m\\x1b[35m \\x1b[0m│\\x1b[32m \\x1b[0m\\x1b[32m $952,110,690\\x1b[0m\\x1b[32m \\x1b[0m│ \n│\\x1b[36m \\x1b[0m\\x1b[36mMay 25, 2018\\x1b[0m\\x1b[36m \\x1b[0m│\\x1b[35m \\x1b[0m\\x1b[35mSolo: A Star Wars Story \\x1b[0m\\x1b[35m \\x1b[0m│\\x1b[32m \\x1b[0m\\x1b[32m $393,151,347\\x1b[0m\\x1b[32m \\x1b[0m│ \n│\\x1b[36m \\x1b[0m\\x1b[36mDec 15, 2017\\x1b[0m\\x1b[36m \\x1b[0m│\\x1b[35m \\x1b[0m\\x1b[35mStar Wars Ep. V111: The Last Jedi\\x1b[0m\\x1b[35m \\x1b[0m│\\x1b[32m \\x1b[0m\\x1b[32m$1,332,539,889\\x1b[0m\\x1b[32m \\x1b[0m│ \n│\\x1b[36m \\x1b[0m\\x1b[36mDec 16, 2016\\x1b[0m\\x1b[36m \\x1b[0m│\\x1b[35m \\x1b[0m\\x1b[35mRogue One: A Star Wars Story \\x1b[0m\\x1b[35m \\x1b[0m│\\x1b[32m \\x1b[0m\\x1b[32m$1,332,439,889\\x1b[0m\\x1b[32m \\x1b[0m│ \n└──────────────┴───────────────────────────────────┴────────────────┘ \n'''\n expected = [\n {\n \"released\": \"Dec 20, 2019\\nMay 25, 2018\\nDec 15, 2017\\nDec 16, 2016\",\n \"title\": \"Star Wars: The Rise of Skywalker\\nSolo: A Star Wars Story\\nStar Wars Ep. V111: The Last Jedi\\nRogue One: A Star Wars Story\",\n \"box_office\": \"$952,110,690\\n$393,151,347\\n$1,332,539,889\\n$1,332,439,889\"\n }\n ]\n\n self.assertEqual(jc.parsers.asciitable_m.parse(input, quiet=True), expected)", "def in_green(s: str) -> str:\n return f\"\\033[92m{str(s)}\\033[0m\"", "def color(code):\n return lambda t: \"\\033[{0}{1}\\033[0;m\".format(code, t)", "def _strip_ansi(s):\n if isinstance(s, str):\n return _ansi_codes.sub(r\"\\4\", s)\n else: # a bytestring\n return _ansi_codes_bytes.sub(r\"\\4\", s)", "def style_output(msg='{}'):\n green_code = '\\033[0;32m'\n return text_color(msg, green_code)", "def ANSIIcode(color='black', style='normal'):\n\n colorCode = colorCodes[color]\n styleCode = styleCodes[style]\n\n return '\\033[' + styleCode + colorCode + 'm'", "def color_str(text, color):\n if not is_cli() or no_color():\n # Disable color output if not in CLI mode or if color is disabled\n return text\n return f'\\033[{_COLORS[color]}m{text}\\033[30m'", "def strip_ansi(text):\n return ANSI_ESCAPE_RE.sub('', text)", "def text_color(string: str, color: str) -> str:\n return f\"\\x1b{_code(color)}{string}\\x1b[0m\"", "def test_plain_ansi(self):\n irc_ansi = irc.parse_ansi_to_irc(string.printable)\n ansi_irc = irc.parse_irc_to_ansi(string.printable)\n self.assertEqual(irc_ansi, string.printable)\n self.assertEqual(ansi_irc, string.printable)", "def colorize_string(string: str, r: int, g: int, b: int, *, reset: bool = True) -> str:\n # Todo: optimize sequential characters with same colors.\n output = f\"\\u001b[38;2;{r};{g};{b}m{string}\"\n if reset:\n output += \"\\033[0m\"\n return output", "def __termcode(num):\r\n return \"\\033[%sm\" % num", "def emph_text(text):\n\n if use_color():\n return colorama.Style.BRIGHT + text + colorama.Style.RESET_ALL\n else:\n return text", "def strip_ansi(text: str):\n return _ANSI_SEQUENCE_REGEX.sub('', text)", "def printColorizedInWindows(text, color):\n std_out_handle = ctypes.windll.kernel32.GetStdHandle(-11)\n for i in range(0, len(color)):\n ctypes.windll.kernel32.SetConsoleTextAttribute(std_out_handle, color[i])\n sys.stdout.write(text)\n # cor padrão é 7, white\n ctypes.windll.kernel32.SetConsoleTextAttribute(std_out_handle, 7)", "def status(s):\n print(\"\\033 {}\".format(s))#print(\"\\033[1m{0}\\033[0m\".format(s))", "def writec(text, color='black', style='normal'):\n\n sys.stdout.write(strc(text, color, style))", "def ColorizeAA(self, text):\n if (text in ['A', 'F', 'H', 'I', 'K', 'L', 'M', 'P', 'R', 'V', 'W']):\n escape = '\\033[91m' # Red\n elif (text in ['C', 'G', 'N', 'Q', 'S', 'T', 'Y', 'B', 'Z']):\n escape = '\\033[96m' # Blue\n elif (text in ['D', 'E']):\n escape = '\\033[92m' # Green\n elif (text in ['X', '*']):\n escape = '\\033[93m' # Yellow\n else:\n return text\n return escape + text + '\\033[0m'", "def highlight(string: str) -> str:\n return text_color(string, \"cyan\")", "def test_html_conversion(self):\n ansi_encoded_text = 'I like %s - www.eelstheband.com' % ansi_wrap('birds', bold=True, color='blue')\n assert ansi_encoded_text == 'I like \\x1b[1;34mbirds\\x1b[0m - www.eelstheband.com'\n html_encoded_text = convert(ansi_encoded_text)\n assert html_encoded_text == (\n '<code>I like <span style=\"font-weight:bold;color:blue\">birds</span> - '\n '<a href=\"http://www.eelstheband.com\" style=\"color:inherit\">www.eelstheband.com</a></code>'\n )", "def style_string(message, no_color, fg='yellow'):\n if no_color:\n return message\n else:\n return click.style(message, fg=fg)", "def bold(msg):\n return '\\033[1m%s\\033[0m' % msg", "def strip_raw_ansi(string, parser=ANSI_PARSER):\n string = string or \"\"\n return parser.strip_raw_codes(string)", "def _color_wrap(termcode):\n return lambda x: \"\\033[{}m{}\\033[0m\".format(termcode, x)", "def test_tabulate_ansi_escape_in_default_value():\n\n data = [[\"1\", None], [\"2\", \"Sam\"], [\"3\", \"Joe\"]]\n headers = [\"id\", \"name\"]\n\n styled = format_output(\n iter(data),\n headers,\n format_name=\"psql\",\n missing_value=\"\\x1b[38;5;10mNULL\\x1b[39m\",\n )\n unstyled = format_output(\n iter(data), headers, format_name=\"psql\", missing_value=\"NULL\"\n )\n\n stripped_styled = [strip_ansi(s) for s in styled]\n\n assert list(unstyled) == stripped_styled", "def color(color):\n if sys.platform == \"win32\":\n if color == \"green\":\n set_text_attr(FOREGROUND_GREEN | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"yellow\":\n set_text_attr(FOREGROUND_YELLOW | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"red\":\n set_text_attr(FOREGROUND_RED | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"blue\":\n set_text_attr(FOREGROUND_BLUE | get_text_attr() & 0x0070 | FOREGROUND_INTENSITY)\n elif color == \"reset\":\n set_text_attr(FOREGROUND_GREY | get_text_attr() & 0x0070)\n else :\n if color == \"green\":\n sys.stdout.write('\\033[92m')\n elif color == \"red\":\n sys.stdout.write('\\033[91m')\n elif color == \"blue\":\n sys.stdout.write('\\033[94m')\n elif color == \"reset\":\n sys.stdout.write('\\033[0m')", "def add_color_emit_ansi(fn):\n def new(*args):\n \"\"\"Method overload.\"\"\"\n if len(args) == 2:\n new_args = (args[0], copy(args[1]))\n else:\n new_args = (args[0], copy(args[1]), args[2:])\n if hasattr(args[0], 'baseFilename'):\n return fn(*args)\n levelno = new_args[1].levelno\n if levelno >= 50:\n color = '\\x1b[31;5;7m\\n ' # blinking red with black\n elif levelno >= 40:\n color = '\\x1b[31m' # red\n elif levelno >= 30:\n color = '\\x1b[33m' # yellow\n elif levelno >= 20:\n color = '\\x1b[32m' # green\n elif levelno >= 10:\n color = '\\x1b[35m' # pink\n else:\n color = '\\x1b[0m' # normal\n try:\n new_args[1].msg = color + str(new_args[1].msg) + ' \\x1b[0m'\n except Exception as reason:\n print(reason) # Do not use log here.\n return fn(*new_args)\n return new", "def remove_ansi_escape_sequences(input_string):\n ansi_escape = re.compile(r'(\\x9B|\\x1B\\[)[0-?]*[ -/]*[@-~]')\n result = ansi_escape.sub('',input_string)\n return result", "def ColorizeDNA(self, text):\n if (text == 'A'):\n escape = '\\033[92m' # Green\n elif (text == 'G'):\n escape = '\\033[93m' # Yellow\n elif (text == 'T'):\n escape = '\\033[91m' # Red\n elif (text == 'C'):\n escape = '\\033[96m' # Blue\n else:\n return text\n return escape + text + '\\033[0m'", "def sub_ansi(self, ansimatch):\n return self.ansi_map_dict.get(ansimatch.group(), \"\")", "def strip_ansi(content):\n return ANSI_ESCAPES_REGEX.sub('', content)", "def parse_ansi(string, strip_ansi=False, parser=ANSI_PARSER, xterm256=False, mxp=False):\n string = string or \"\"\n return parser.parse_ansi(string, strip_ansi=strip_ansi, xterm256=xterm256, mxp=mxp)", "def color_string(self, data, type):\n\n # http://stackoverflow.com/questions/287871/print-in-terminal-with-colors-using-python\n\n if self.options['no_color']:\n return data\n\n CEND = '\\x1b[0m'\n\n if type == ColorType.ok:\n return '\\x1b[1;32;40m{0}{1}'.format(data, CEND)\n if type == ColorType.error:\n return '\\x1b[1;31;40m{0}{1}'.format(data, CEND)\n if type == ColorType.warning:\n return '\\x1b[1;36;40m{0}{1}'.format(data, CEND)\n if type == ColorType.info:\n return '\\x1b[1;34;40m{0}{1}'.format(data, CEND)\n\n return str", "def ascii_print(string):\n string = ANSI_ESCAPE.sub(\"\", string)\n print(\"\".join(ch for ch in string if ch == \"\\n\" or unicodedata.category(ch)[0] != \"C\"))", "def strip_raw_codes(self, string):\n return self.ansi_regex.sub(\"\", string)", "def status(s: str):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def ascii_convert_str(the_str: str):\n return ANSI_ESCAPE.sub(rb\"\", the_str)", "def strip_ansi(string, parser=ANSI_PARSER):\n string = string or \"\"\n return parser.parse_ansi(string, strip_ansi=True)", "def remove_ansi_escape_sequence(self, text):\n\n # By default no string returned\n output = \"\"\n\n # By default no escape sequence found\n esc_found = 0\n\n # Read char by char a string\n for i in text:\n\n # Display char\n # log.info(f\"{str(i).encode('ascii')}\")\n\n # No escape previously found?\n if esc_found == 0:\n\n # No escape sequence currently found\n\n # Escape?\n if i == \"\\x1b\":\n\n # Yes\n log.info(\"Esc!\")\n\n # Escape found\n esc_found = 1\n\n else:\n\n # No\n\n # Then the current char can be saved\n output += i\n\n # Escape previously found?\n elif esc_found == 1:\n\n # Yes\n\n # Then check if this is a CSI sequence\n if i == \"[\":\n\n # Beginning of CSI sequence\n log.info(\"CSI sequence\")\n\n # CSI sequence\n esc_found = 2\n\n else:\n\n # Another Escape sequence\n\n # Keep the escape sequence in the string\n output += \"\\x1b\" + i\n\n # No escape sequence next\n esc_found = 0\n\n else:\n\n # Char between 'a' and 'z' or 'A' and 'Z'?\n if (i >= \"a\" and i <= \"z\") or (i >= \"A\" and i <= \"Z\"):\n\n # Yes\n\n # Then it is the end of CSI escape sequence\n log.info(\"End of escape sequence\")\n\n # No escape sequence next\n esc_found = 0\n\n # Return a string without ANSI escape sequence\n return output", "def parse_ansi(self, string, strip_ansi=False, xterm256=False, mxp=False):\n if hasattr(string, \"_raw_string\"):\n if strip_ansi:\n return string.clean()\n else:\n return string.raw()\n\n if not string:\n return \"\"\n\n # check cached parsings\n global _PARSE_CACHE\n cachekey = \"%s-%s-%s-%s\" % (string, strip_ansi, xterm256, mxp)\n if cachekey in _PARSE_CACHE:\n return _PARSE_CACHE[cachekey]\n\n # pre-convert bright colors to xterm256 color tags\n string = self.brightbg_sub.sub(self.sub_brightbg, string)\n\n def do_xterm256_fg(part):\n return self.sub_xterm256(part, xterm256, \"fg\")\n\n def do_xterm256_bg(part):\n return self.sub_xterm256(part, xterm256, \"bg\")\n\n def do_xterm256_gfg(part):\n return self.sub_xterm256(part, xterm256, \"gfg\")\n\n def do_xterm256_gbg(part):\n return self.sub_xterm256(part, xterm256, \"gbg\")\n\n in_string = utils.to_str(string)\n\n # do string replacement\n parsed_string = []\n parts = self.ansi_escapes.split(in_string) + [\" \"]\n for part, sep in zip(parts[::2], parts[1::2]):\n pstring = self.xterm256_fg_sub.sub(do_xterm256_fg, part)\n pstring = self.xterm256_bg_sub.sub(do_xterm256_bg, pstring)\n pstring = self.xterm256_gfg_sub.sub(do_xterm256_gfg, pstring)\n pstring = self.xterm256_gbg_sub.sub(do_xterm256_gbg, pstring)\n pstring = self.ansi_sub.sub(self.sub_ansi, pstring)\n parsed_string.append(\"%s%s\" % (pstring, sep[0].strip()))\n parsed_string = \"\".join(parsed_string)\n\n if not mxp:\n parsed_string = self.strip_mxp(parsed_string)\n\n if strip_ansi:\n # remove all ansi codes (including those manually\n # inserted in string)\n return self.strip_raw_codes(parsed_string)\n\n # cache and crop old cache\n _PARSE_CACHE[cachekey] = parsed_string\n if len(_PARSE_CACHE) > _PARSE_CACHE_SIZE:\n _PARSE_CACHE.popitem(last=False)\n\n return parsed_string", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def status(s):\n print(\"\\033[1m{0}\\033[0m\".format(s))", "def fg(value: int) -> str:\n return f\"\\033[38;5;{value}m\"", "def scrub_output(output):\n ansi_escape = re.compile(r'\\x1b[^m]*m')\n return ansi_escape.sub('', output)", "def _get_output_letter(rgb):\n\n\t\tif rgb == Rgb.pastel_purple():\n\t\t\treturn \"p\"\n\t\telif rgb == Rgb.pastel_yellow():\n\t\t\treturn \"y\"\n\t\telif rgb == Rgb.pastel_green():\n\t\t\treturn \"g\"\n\t\telif rgb == Rgb.pastel_blue():\n\t\t\treturn \"b\"\n\t\telif rgb == Rgb.strong_red():\n\t\t\treturn \" \"\n\n\t\treturn \"?\"", "def textColor(colorNumber):\n return '\\033[%dm' % (30 + colorNumber)", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def status(s):\n print('\\033[1m{0}\\033[0m'.format(s))", "def colorText(s, c):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n HEAD = \"\\033[\"\n TAIL = \"m\"\n\n color = \"39;49\"\n lastDifference = 800\n\n for i in COLORS:\n diff = abs(i[0] - c[0]) + abs(i[1] - c[1]) + abs(i[2] - c[2]) #calculates difference to stock color\n if diff < lastDifference:\n lastDifference = diff #chooses closest match\n color = i[3]\n\n return HEAD+color+TAIL+s+COLOR_RESET #color code + string + reset code", "def strip_ansi_escape(data):\n if isinstance(data, bytes):\n data = data.decode(\"utf-8\")\n\n return re.sub(r\"\\x1b[^m]*m\", \"\", data)", "def flag(code):\n\tOFFSET = ord('🇦') - ord('A')\n\tif not code:\n\t\treturn u''\n\tpoints = list(map(lambda x: ord(x) + OFFSET, code.upper()))\n\ttry:\n\t\treturn chr(points[0]) + chr(points[1])\n\texcept ValueError:\n\t\treturn ('\\\\U%08x\\\\U%08x' % tuple(points)).decode('unicode-escape')", "def _surround_ansi_escapes(prompt, start=\"\\x01\", end=\"\\x02\"):\n # Windows terminals don't use ANSI escape codes and Windows readline isn't based on GNU Readline\n if sys.platform == \"win32\":\n return prompt\n\n escaped = False\n result = \"\"\n\n for c in prompt:\n if c == \"\\x1b\" and not escaped:\n result += start + c\n escaped = True\n elif c.isalpha() and escaped:\n result += c + end\n escaped = False\n else:\n result += c\n\n return result", "def strip_ansi_color(data):\n if isinstance(data, bytes):\n data = data.decode(\"utf-8\")\n\n # Taken from tabulate\n invisible_codes = re.compile(r\"\\x1b\\[\\d*m\")\n\n return re.sub(invisible_codes, \"\", data)", "def text(string, font = default_FONT, align = 'left'):\n map(lambda c: data(font[c] + [0x00]), string)", "def clowder_command(cmd):\n\n return colored(cmd, attrs=['bold'])", "def terminal(string):\n if os.name is 'posix':\n if string[0] == \"'\" and string[-1] == \"'\": \n #GNU/Linux (Ubuntu at least) treat the files that you can include with drag and drop, adding single quotes\n return string.replace(\"'\", '')\n #OSX scape spaces with double backslash\n return string.replace('\\\\', '')\n \n if os.name in ('nt', 'dos', 'ce'):\n #Windows treat the files that you can include with drag and drop, adding double quotes\n return string.replace('\"', '')\n \n return string", "def _code(color: str) -> str:\n # Escape codes for the 8 main colors go from 30 to 37.\n num_str = str(30 + COLORS.index(color))\n return f\"[{num_str}m\"", "def _color_text(text, color):\n color_mapping = {\n 'black': '0;30',\n 'red': '0;31',\n 'green': '0;32',\n 'brown': '0;33',\n 'blue': '0;34',\n 'magenta': '0;35',\n 'cyan': '0;36',\n 'lightgrey': '0;37',\n 'default': '0;39',\n 'darkgrey': '1;30',\n 'lightred': '1;31',\n 'lightgreen': '1;32',\n 'yellow': '1;33',\n 'lightblue': '1;34',\n 'lightmagenta': '1;35',\n 'lightcyan': '1;36',\n 'white': '1;37'}\n\n if sys.platform == 'win32':\n # On Windows do not colorize text unless in IPython\n return text\n\n color_code = color_mapping.get(color, '0;39')\n return '\\033[{0}m{1}\\033[0m'.format(color_code, text)", "def __color(self, color):\n return '\\033[' + \\\n \";\".join(\n [\n unicode(COLORS[c_lower])\n for c_lower in [col.lower() for col in color]\n if c_lower in COLORS\n ]\n ) + \\\n 'm'", "def error(text):\n print(red(\"✘ {0}\".format(text)))\n sys.stdout.flush()", "def test_assembleForegroundColor(self):\n self.assertEqual(\n irc.assembleFormattedText(A.fg.blue[\"hello\"]), \"\\x0f\\x0302hello\"\n )", "def init_writing():\n\n # This module is a quick workaround for Unicode \n # varying byte length in windows. \n win_unicode_console.enable()\n colorama.init(convert=True)", "def from_val(value: int) -> str:\n return f\"\\033[{value}m\"", "def color_style():\n if (sys.platform == 'win32' or sys.platform == 'Pocket PC'\n or sys.platform.startswith('java') or not sys.stdout.isatty()):\n return no_style()\n class dummy: pass\n style = dummy()\n style.ERROR = termcolors.make_style(fg='red', opts=('bold',))\n style.ERROR_OUTPUT = termcolors.make_style(fg='red', opts=('bold',))\n style.NOTICE = termcolors.make_style(fg='red')\n style.SQL_FIELD = termcolors.make_style(fg='green', opts=('bold',))\n style.SQL_COLTYPE = termcolors.make_style(fg='green')\n style.SQL_KEYWORD = termcolors.make_style(fg='yellow')\n style.SQL_TABLE = termcolors.make_style(opts=('bold',))\n return style", "def colorize_output(output: str, color: str) -> str:\n colors = {\n \"red\": f\"\\033[91m{output}\\033[0m\", # Red text\n \"green\": f\"\\033[92m{output}\\033[0m\", # Green text\n \"yellow\": f\"\\033[93m{output}\\033[0m\", # Yellow text\n \"blue\": f\"\\033[94m{output}\\033[0m\", # Blue text\n }\n\n return colors.get(color, output)", "def apply(self, text):\n\n codes = list();\n\n if (None is not self.__foreground) :\n codes.append(self.__foreground);\n\n if (None is not self.__background) :\n codes.append(self.__background);\n\n if self.__options :\n codes.extend(self.__options);\n\n\n if not codes:\n return text;\n\n\n return \"\\033[{0}m{1}\\033[0m\".format(';'.join(codes), text);", "def pretty_hebrew(val):\n return 'font-size:20px; font-family: Times New Roman; text-align: right; max-width: 500px'", "def hash_coloured(text):\n ansi_code = int(sha256(text.encode(\"utf-8\")).hexdigest(), 16) % 230\n return colored(text, ansi_code=ansi_code)", "def pad_ansi(text, width, char, left=False):\n current_width = len(ANSI_PATTERN.sub('', text))\n parts = [text, (width - current_width) * char]\n if left:\n parts = reversed(parts)\n return ''.join(parts)", "def get_coloured_text_string(text, colour):\n if colour==\"red\":\n return (\"\\033[31m\" + text + \"\\033[0m\")\n if colour==\"green\":\n return (\"\\033[32m\" + text + \"\\033[0m\")\n if colour==\"yellow\":\n return (\"\\033[33m\" + text + \"\\033[0m\")\n if colour==\"blue\":\n return (\"\\033[34m\" + text + \"\\033[0m\")\n if colour==\"purple\":\n return (\"\\033[35m\" + text + \"\\033[0m\")\n if colour==\"cyan\":\n return (\"\\033[36m\" + text + \"\\033[0m\")\n if colour==\"white\":\n return (\"\\033[37m\" + text + \"\\033[0m\")\n return text", "def get_char_echo(self) -> str:\n ...", "def _change_text_color(text, color_code) -> StyledStr:\n uncolored_fg = _remove_text_colors(text)\n return _apply_ansi_code(color_code, uncolored_fg)", "def bg(value: int) -> str:\n return f\"\\033[48;5;{value}m\"", "def style_error(msg='{}'):\n red_code = '\\033[0;31m'\n return text_color(msg, red_code)", "def bold(string):\n return BOLD + string + RESETFORMAT", "def _process_colors(self, s: str) -> str:\r\n return self._color_regexp.sub(lambda m: self._ansi_equivalent(m.group()), s)", "def _colorstr(self, args):", "def error(text):\n return color_str(text, 'RED')", "def color(level, string):\n colors = {\n 'HEADER': '\\033[95m',\n 'OKBLUE': '\\033[94m',\n 'DARKCYAN': '\\033[36m',\n 'INFO': '\\033[94m',\n 'WARNING': '\\033[93m',\n 'FAIL': '\\033[91m',\n 'ENDC': '\\033[0m',\n 'GREEN': '\\033[92m',\n 'BOLD': '\\033[1m',\n 'UNDERLINE': '\\033[4m'\n }\n return colors[level] + string + colors['ENDC']", "def colored (string_, color, attrs):\n return string_", "def fork_string(name):\n\n return colored(name, 'cyan')" ]
[ "0.698737", "0.6955122", "0.68967116", "0.6775408", "0.67163545", "0.6666538", "0.6612062", "0.6594532", "0.6576787", "0.6540611", "0.6525283", "0.6465225", "0.6419213", "0.6390745", "0.6369478", "0.63463056", "0.6337062", "0.6330112", "0.6313778", "0.6313092", "0.6297528", "0.6288874", "0.6213546", "0.6130968", "0.61224574", "0.61007416", "0.60889256", "0.606803", "0.60561806", "0.60425407", "0.603665", "0.6012752", "0.60114825", "0.5969875", "0.5948879", "0.59435946", "0.5905553", "0.58892095", "0.5884584", "0.5860625", "0.58597696", "0.5859214", "0.58372384", "0.58319503", "0.5826008", "0.5825761", "0.58066195", "0.5792427", "0.57857186", "0.57807034", "0.57675", "0.5763598", "0.5742393", "0.5708926", "0.56836504", "0.56560683", "0.56560683", "0.56560683", "0.56560683", "0.56456083", "0.5643857", "0.563963", "0.56383204", "0.56292593", "0.56292593", "0.56292593", "0.56292593", "0.56132734", "0.5612045", "0.5610244", "0.56020385", "0.55869955", "0.55829555", "0.55711764", "0.55500126", "0.55497307", "0.55463845", "0.5534905", "0.5529148", "0.5521941", "0.5505092", "0.5499085", "0.5491177", "0.54647434", "0.54500383", "0.54473907", "0.54214174", "0.54169357", "0.5415808", "0.5405161", "0.54010695", "0.53999966", "0.53897995", "0.53782797", "0.5369644", "0.53556836", "0.53528404", "0.5350707", "0.53336114", "0.53330576" ]
0.771174
0
Sort list of TKey by their names ignoring the case
def keyListSort(keyList): keyList.sort(key=lambda y: y.GetName().lower())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sortCaseInsensitive(*args, **kwargs)->List[AnyStr]:\n pass", "def sort_by_name(list_to_sort):\n return sorted(\n list_to_sort,\n key=lambda k: k['Name'].lower()\n )", "def natsort_icase(lst: List[str]) -> None:\n lst.sort(key=natsort_key_icase)", "def human_sort(l):\n l.sort(key=alphanum_key)\n return l", "def _mySort(self, alist):\n return sorted(alist, key=lambda x: (x[0].isdigit(), x.lower()))", "def natsort_case_insensitive(seq):\r\n return natsort(seq, case_sensitive=False)", "def sort_nicely(l):\n l.sort(key=alphanum_key)\n return l", "def sort_names(li, by_which):\n \n if by_which == 'first':\n li.sort(key = Name.first)\n elif by_which == 'last':\n li.sort(key = Name.last)", "def sort_nicely(l):\r\n\tl.sort(key=alphanum_key)", "def sort_nicely(l):\n l.sort(key=alphanum_key)", "def sort_nicely(l):\n l.sort(key=alphanum_key)", "def titleSort(dictList):\n\tres = sorted(dictList, key=lambda k: getSortTitle(k))\n\treturn res", "def natsort(lst: List[str]) -> None:\n lst.sort(key=natsort_key)", "def sort_nicely(l):\n l.sort(key=alphanum_key)\n return l", "def tupleListSort(tupleList):\n tupleList.sort(key=lambda y: y[0].lower())", "def alpha_case_insensitive():\n# fill it out\n return sorted(STRING_LIST, key=lambda s: s.lower())", "def natsort_icase(lst):\n lst.sort(key=natsort_key_icase)", "def _sort_by_name(bam_fn):", "def keysort(*args, **kwargs): # real signature unknown\n pass", "def sorted(cls, tags: list, reverse: bool = False) -> list:\n return sorted(tags, key=lambda x: x.name.lower(), reverse=reverse)", "def sortTermsAlphabetically(terms):\n # Tutorial for sorting credit:\n # https://www.geeksforgeeks.org/ways-sort-list-dictionaries-values-python-using-lambda-function/\n sorted_list = sorted(terms, key=lambda i: (i[\"term_header\"], i[\"rating\"]))\n return sorted_list", "def _sort(self, groups):\n return sorted(groups, key=lambda group: (group.name.lower(), group.pubid))", "def sortednameslist(nameslist):\n sortednames = sorted(nameslist, key=lambda x: x[1])\n return sortednames", "def natsorted_icase(lst: Sequence[str]) -> List[str]:\n return sorted(lst, key=natsort_key_icase)", "def sort(self, key_func):\n pass", "def alphabetical_sorted(iterable, cmp=None, key=lambda x: x.lower(),\n reverse=False):\n return sorted(iterable, cmp, key, reverse)", "def natsorted(lst: Sequence[str]) -> List[str]:\n return sorted(lst, key=natsort_key)", "def sort(self):\r\n self.list.sort(key=lambda x: ''.join(x))", "def _sort_natural(names_list, reverse=False):\n def sort_key(val):\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', val)]\n\n return sorted(names_list, key=sort_key, reverse=reverse)", "def sort_key(self):\n ...", "def sorted_nicely(ls, key, rev=False):\n def convert(text):\n return int(text) if text.isdigit() else text\n\n def alphanum_key(item):\n return [convert(c) for c in re.split('([0-9]+)', key(item))]\n\n return sorted(ls, key=alphanum_key, reverse=rev)", "def sortedKeys(self):\n sortedItems = list(self.items())\n\n def compare(x, y): return sign(y[1] - x[1])\n sortedItems.sort(cmp=compare)\n return [x[0] for x in sortedItems]", "def string_sort(a_list):\n for index in range(1, len(a_list)): # indexing through the list\n value = a_list[index]\n pos = index - 1\n while pos >= 0 and a_list[pos].lower() > value.lower(): #case insensitive, compare words\n a_list[pos + 1] = a_list[pos]\n pos -= 1\n a_list[pos + 1] = value", "def sort_words_case_insensitively(words):\r\n numbers = list()\r\n strings = list()\r\n for word in words:\r\n if word[0].isdigit():\r\n numbers.append(word)\r\n else:\r\n strings.append(word)\r\n\r\n numbers = sorted(numbers)\r\n strings = sorted(strings, key=str.casefold)\r\n return strings+numbers", "def sort(self):\n self.list.sort(key=lambda x: ''.join)", "def natsorted_icase(lst):\n return sorted(lst, key=natsort_key_icase)", "def sortByName(requestContext, seriesList):\n def compare(x,y):\n return cmp(x.name, y.name)\n\n seriesList.sort(compare)\n return seriesList", "def sortLoadFiles(self):\n self.loadFiles.sort()\n self.loadFiles.sort(lambda a,b: cmp(a[-3:].lower(), b[-3:].lower()))", "def test_natsort_case_insensitive(self):\r\n\r\n # string with alpha and numerics sort correctly\r\n s = [\r\n 'sample1',\r\n 'sample2',\r\n 'sample11',\r\n 'sample12',\r\n 'SAmple1',\r\n 'Sample2']\r\n\r\n # expected values\r\n exp_natsort = ['SAmple1', 'Sample2', 'sample1', 'sample2', 'sample11',\r\n 'sample12']\r\n exp_natsort_case_insensitive = ['sample1', 'SAmple1', 'sample2',\r\n 'Sample2', 'sample11', 'sample12']\r\n\r\n # test natsort\r\n self.assertEqual(natsort(s), exp_natsort)\r\n # test natsort_case_insensitive\r\n self.assertEqual(natsort_case_insensitive(s),\r\n exp_natsort_case_insensitive)\r\n\r\n s.reverse()\r\n # test natsort\r\n self.assertEqual(natsort(s), exp_natsort)\r\n # test natsort_case_insensitive\r\n self.assertEqual(natsort(list('cbaA321')), list('123Aabc'))\r\n\r\n # strings with alpha only sort correctly\r\n self.assertEqual(natsort_case_insensitive(list('cdBa')), list('aBcd'))\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort_case_insensitive(['11', '2', '1', '0']),\r\n ['0', '1', '2', '11'])\r\n\r\n # strings of floats sort correctly\r\n self.assertEqual(natsort_case_insensitive(['1.11', '1.12', '1.00',\r\n '0.009']), ['0.009', '1.00',\r\n '1.11', '1.12'])\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort_case_insensitive([('11', 'A'), ('2', 'B'),\r\n ('1', 'C'), ('0', 'D')]),\r\n [('0', 'D'), ('1', 'C'),\r\n ('2', 'B'), ('11', 'A')])", "def sort_by_unicode(self):\n utils.sort_unicode_word_list(self.words_new)", "def humanSort(l): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n l.sort( key=alphanum_key )", "def sorted_nicely(l, key):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda item: [ convert(c) for c in re.split('([0-9]+)', key(item)) ]\n return sorted(l, key = alphanum_key)", "def sort_name_key(name):\n if name.isupper():\n name_type = NAME_TYPE_CLASS if len(name) == 1 else NAME_TYPE_CONSTANT\n elif name.islower():\n name_type = NAME_TYPE_FUNCTION\n else:\n name_type = NAME_TYPE_CLASS if name[0].isupper() else NAME_TYPE_FUNCTION\n return (name_type, name)", "def natsort(lst):\n lst.sort(key=natsort_key)", "def natsort(seq, case_sensitive=True):\r\n if case_sensitive:\r\n natsort_key = _natsort_key\r\n else:\r\n natsort_key = _natsort_key_case_insensitive\r\n\r\n alist = list(seq)\r\n alist.sort(key=natsort_key)\r\n\r\n return alist", "def alphabetical(lst):\n\treturn list(reversed(sorted(lst, key=lambda x: x[0])))", "def sorted_gnames():\n return sorted(group_names.keys())", "def sort_slide_names(l): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key = alphanum_key)", "def sort_list(list, key):\r\n list.sort(lambda x,y: cmp(key(x), key(y))) # Python < 2.4 hack\r\n return list", "def sortedKeys(self):\n sortedItems = self.items()\n compare = lambda x, y: sign(y[1] - x[1])\n sortedItems.sort(cmp=compare)\n return [x[0] for x in sortedItems]", "def fixture_sorted_param_names(allparams):\n return sorted(list(allparams.keys()))", "def sort_records_by_name(records):\n return sorted(records, key=lambda x: (x.last_name, x.first_name), reverse=True)", "def natural_sort(l):\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)", "def human_sort( l ):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n alphanum_key = None\n try:\n l.sort( key=alphanum_key )\n except TypeError:\n l.sort()\n return l", "def karyotypicSortKey(s):\n if s == \"chrM\": return []\n if s == \"MT\": return [\"~\"]\n return naturalSortKey(s)", "def _alphanumeric_sort(iterable):\n convert = lambda text: int(text) if text.isdigit() else text\n sort_key = lambda k: [convert(c) for c in re.split('([0-9]+)', k)]\n return sorted(iterable, key=sort_key)", "def _sort_key(k):\n ret = []\n for s in k.common_path:\n s = (s if isinstance(s, (int, text_type)) else s.decode())\n\n if isinstance(s, text_type) and s.isnumeric() or isinstance(s, int):\n ret.append(('', -int(s)))\n else:\n ret.append((s,))\n return ret", "def natsorted(lst):\n return sorted(lst, key=natsort_key)", "def sort_nicely(alist, dict_key=None):\n convert = lambda text: int(text) if text.isdigit() else text\n if dict_key is None:\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n else:\n alphanum_key = operator.itemgetter(dict_key)\n alist.sort(key=alphanum_key)", "def sort_key(alpha):\n if not isinstance(alpha, dict):\n # alpha *should* be a dict, but if passed a list or a string, treat it\n # as an ordering\n try:\n alpha = {k: v for v, k in enumerate(alpha)}\n except TypeError:\n # alpha isn't iterable, and is therefore useless as a key\n alpha = {}\n a = sorted(alpha.keys(), key=lambda x: -len(x))\n\n def key(word):\n out = []\n for m in regex.finditer('(' + '|'.join(a) + ')|.', word):\n if m.group(1):\n if alpha[m[0]] is not None:\n out.append(alpha[m[0]])\n else:\n out.append(-1)\n return out\n\n return key", "def sort_keys( self, results ):\n if self.sorted_keys != None:\n return self.sorted_keys\n reverse_dict = {}\n for key, item in results.items():\n size = self.data_size( item )\n if size not in reverse_dict:\n reverse_dict[size] = [key]\n else:\n reverse_dict[size].append( key )\n sorted_dict_keys = reverse_dict.keys(); sorted_dict_keys.sort()\n sorted_dict_keys.reverse()\n sorted_keys = []\n for key in sorted_dict_keys:\n sorted_keys.extend( reverse_dict[key] )\n return sorted_keys", "def sort_by(dict_list, key):\n return sorted(dict_list, key=lambda k: k[key])", "def sort_diff_data_in_alphabetical_order(diff):\n sorted_diff = OrderedDict(sorted(diff.items(), key=lambda x:x[0]))\n for interface_name, interface in sorted_diff.iteritems():\n sorted_interface = sort_members_in_alphabetical_order(interface)\n if DIFF_TAG in interface:\n sorted_interface[DIFF_TAG] = interface[DIFF_TAG]\n sorted_diff[interface_name] = sorted_interface\n return sorted_diff", "def sort_keys( self, results ):\n if self.sorted_keys != None:\n return self.sorted_keys\n reverse_dict = {}\n for key, item in results.items():\n size = self.data_size( item )\n if size not in reverse_dict:\n reverse_dict[size] = [key]\n else:\n reverse_dict[size].append( key )\n\n sorted_dict_keys = reverse_dict.keys(); sorted_dict_keys.sort()\n sorted_dict_keys.reverse()\n sorted_keys = []\n for key in sorted_dict_keys:\n sorted_keys.extend( reverse_dict[key] )\n return sorted_keys", "def sortedFields(cls):\n return [\n i[0] for i in sorted(cls._nameToValue.items(), key=lambda item: item[1])\n ]", "def sort_list(self, key_):\n options = {\n 'index': 0,\n 'name' : 1,\n 'surname': 2,\n 'email': 3,\n 'phone': 4,\n }\n if key_ in options.keys():\n key_ = options.get(key_)\n\n return(sorted(self.contacts, key = lambda x: x[key_]))", "def sortFiles(files):\n def sortKey(file):\n dirFile = file.lower().rsplit('\\\\',1)\n if len(dirFile) == 1: dirFile.insert(0,'')\n return dirFile\n sortKeys = dict((x,sortKey(x)) for x in files)\n return sorted(files,key=lambda x: sortKeys[x])", "def _natsort_key_case_insensitive(item):\r\n # added the lower() call to allow for case-insensitive sorting\r\n item = str(item).lower()\r\n\r\n try:\r\n chunks = re.split('(\\d+(?:\\.\\d+)?)', item)\r\n except TypeError:\r\n # if item is a tuple or list (i.e., indexable, but not a string)\r\n # work with the first element\r\n chunks = re.split('(\\d+(?:\\.\\d+)?)', item[0])\r\n for ii in range(len(chunks)):\r\n if chunks[ii] and chunks[ii][0] in '0123456789':\r\n if '.' in chunks[ii]:\r\n numtype = float\r\n else:\r\n numtype = int\r\n # wrap in tuple with '0' to explicitly specify numbers come first\r\n chunks[ii] = (0, numtype(chunks[ii]))\r\n else:\r\n chunks[ii] = (1, chunks[ii])\r\n return (chunks, item)", "def sort_nicely(l): \n import re\n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key=alphanum_key)", "def _to_order(key):\n return list(sorted(key).index(char) for char in key)", "def sort_words(words):\n return sorted(words)", "def sort_mss(ms_list):\n return sorted(ms_list, key=lambda x: witintify(x))", "def natural_sort( l ): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n l.sort( key=alphanum_key )\n return l", "def get_ordered_adversary_names(self) -> List[str]:\n pass", "def sorted_tags(self):\n return sorted(self.tags, key=lambda x: x.name)", "def SortList(self, key: callable = str.lower):\n temp_list = self.Items\n temp_list.sort(key=key)\n # delete contents of present listbox\n self.delete(0, Tags.End.value)\n # load listbox with sorted data\n for item in temp_list:\n self.insert(Tags.End.value, item)", "def format_words(words):\n return sorted(words, key=str.lower)", "def sort_words_case_insensitively(words):\n #temp = sorted(words, key=lambda test_str: test_str[:1].lower() + test_str[1:])\n temp = sorted(words, key=str.lower)\n temp1 = []\n for index, word in enumerate(temp):\n if not word[0].isdigit():\n temp1.append(temp[index])\n for index, word in enumerate(temp):\n if word[0].isdigit():\n temp1.append(temp[index])\n return temp1", "def natural_sort_case_insensitive_comparison(value1, value2):\n return natural_sort_comparison(value1.lower(), value2.lower())", "def orderPairs(self):\n pairsByTickers = {}\n for asset in self.availableTickers:\n holder = []\n for pair in self.allPairs:\n if asset.lower() in pair:\n holder.append(pair.upper())\n pairsByTickers[asset] = holder\n return pairsByTickers", "def _NaturalSortByName(node):\n # See: https://blog.codinghorror.com/sorting-for-humans-natural-sort-order/\n name = node.get('name').lower()\n convert = lambda text: int(text) if text.isdigit() else text\n return [convert(c) for c in re.split('([0-9]+)', name)]", "def naturalSortKey(s):\n return [(str, int)[k](\"\".join(v)) for k, v in groupby(s, str.isdigit)]", "def sortn(xs):\n return sorted(xs, key=sortnkey)", "def sort_members_in_alphabetical_order(interface):\n sorted_interface = OrderedDict()\n for member_type in EXTATTRIBUTES_AND_MEMBER_TYPES:\n member_names = []\n sorted_member_names = OrderedDict()\n sorted_members = []\n for member in interface[member_type]:\n if sorted_members:\n pointer = 0\n for sorted_member in sorted_members:\n if member['Name'] < sorted_member['Name']:\n sorted_members.insert(pointer, member)\n break\n elif pointer >= (len(sorted_members)-1):\n sorted_members.append(member)\n else:\n pointer += 1\n else:\n sorted_members.append(member)\n sorted_interface[member_type] = sorted_members\n return sorted_interface", "def _sorted_nicely(self, l):\n\n import re\n\n convert = lambda text: int(text) if text.isdigit() else \"\"\n\n alphanum_key = lambda key: [\n convert(c) for c in re.split(\n '([0-9]+)', key)]\n\n return sorted(l, key=alphanum_key)", "def NiceSort(values, key=None):\n if key is None:\n keyfunc = NiceSortKey\n else:\n keyfunc = lambda value: NiceSortKey(key(value))\n\n return sorted(values, key=keyfunc)", "def dedup_and_title_case_names(names):\n\tdictionary = list(dict.fromkeys(NAMES))\n\treturn [name.title() for name in dictionary]", "def sorting(tokens: list):\n tokens.sort(key=lambda x: (x[0], x[1]))", "def sort_title(self):\n return self.sort('title')", "def order_by(self, results, key_, direction=\"ASC\"):\n\n return sorted(results, key=lambda x: x.get(key_), reverse=direction==\"DESC\")", "def NiceSortKey(value):\n return [_NiceSortTryInt(grp)\n for grp in _SORTER_RE.match(value).groups()]", "def _natural_key_sort(string_to_sort):\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', string_to_sort)]", "def sorted_nicely(l):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)", "def _natural_sort(alphanumeric_data):\n try:\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n except Exception as e:\n logger.error(\"Exception in _natural_sort : \" + str(e))\n return sorted(alphanumeric_data, key=alphanum_key, reverse=True)", "def diffsort(self, key):\n # Append newlines because difflib works better with them\n a = [s + '\\n' for s in self.d[key]]\n b = sorted(a, key=str.lower)\n return difflib.unified_diff(a, b, fromfile=key+' unsorted',\n tofile=key+' sorted')", "def sorted_nicely( l ): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key = alphanum_key)", "def natsort_key_icase(s):\n return natsort_key(s.lower())", "def keys(self):\n return sorted(\n super().keys() + list(ttfont_dict_keys) + [text_s, case_s]\n )", "def alphabetize(value):\n if isinstance(value, list) and all(isinstance(s, str) for s in value):\n return sorted([s.lower() for s in value])\n else:\n raise TypeError(\"Argument must be a list of strings\")", "def sort(self, keys=None, reverse=False):\n import hxl.filters\n return hxl.filters.SortFilter(self, tags=keys, reverse=reverse)" ]
[ "0.7360461", "0.7314273", "0.6833507", "0.67649287", "0.66534966", "0.6629779", "0.6619612", "0.6566453", "0.65517783", "0.6549519", "0.6549519", "0.65458226", "0.65365845", "0.65276384", "0.6501143", "0.64999694", "0.64592296", "0.6409549", "0.6385534", "0.6382833", "0.63771147", "0.6361041", "0.63178575", "0.6309192", "0.6241156", "0.623927", "0.6195783", "0.61827636", "0.61751884", "0.61701214", "0.61338955", "0.6110647", "0.6104986", "0.6099877", "0.60973275", "0.60909784", "0.60861087", "0.60649085", "0.60631317", "0.6052488", "0.60216635", "0.6019358", "0.6012929", "0.597414", "0.59735996", "0.59511364", "0.5943593", "0.59224105", "0.5911005", "0.5909263", "0.59012926", "0.5891799", "0.5889448", "0.588909", "0.5875636", "0.5875301", "0.586806", "0.58586186", "0.58519095", "0.5848309", "0.5844296", "0.584376", "0.5821818", "0.5813956", "0.58111805", "0.58110243", "0.58071786", "0.5782412", "0.57804954", "0.57661563", "0.57464564", "0.57441384", "0.5728555", "0.57264155", "0.57255036", "0.57122123", "0.57059616", "0.5704069", "0.5701503", "0.57014143", "0.5691279", "0.568999", "0.56811166", "0.5678504", "0.5665047", "0.5662708", "0.566036", "0.5660263", "0.565651", "0.56538045", "0.56522065", "0.5651563", "0.56470275", "0.56395036", "0.5636111", "0.56309646", "0.56240225", "0.5612416", "0.56090176", "0.56085247" ]
0.818581
0
Sort list of tuple by their first elements ignoring the case
def tupleListSort(tupleList): tupleList.sort(key=lambda y: y[0].lower())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _mySort(self, alist):\n return sorted(alist, key=lambda x: (x[0].isdigit(), x.lower()))", "def sort_list_of_tuples(list):\n list.sort(key=lambda x: x[0])\n return list", "def sortCaseInsensitive(*args, **kwargs)->List[AnyStr]:\n pass", "def natsort_case_insensitive(seq):\r\n return natsort(seq, case_sensitive=False)", "def _natsort_key_case_insensitive(item):\r\n # added the lower() call to allow for case-insensitive sorting\r\n item = str(item).lower()\r\n\r\n try:\r\n chunks = re.split('(\\d+(?:\\.\\d+)?)', item)\r\n except TypeError:\r\n # if item is a tuple or list (i.e., indexable, but not a string)\r\n # work with the first element\r\n chunks = re.split('(\\d+(?:\\.\\d+)?)', item[0])\r\n for ii in range(len(chunks)):\r\n if chunks[ii] and chunks[ii][0] in '0123456789':\r\n if '.' in chunks[ii]:\r\n numtype = float\r\n else:\r\n numtype = int\r\n # wrap in tuple with '0' to explicitly specify numbers come first\r\n chunks[ii] = (0, numtype(chunks[ii]))\r\n else:\r\n chunks[ii] = (1, chunks[ii])\r\n return (chunks, item)", "def string_sort(a_list):\n for index in range(1, len(a_list)): # indexing through the list\n value = a_list[index]\n pos = index - 1\n while pos >= 0 and a_list[pos].lower() > value.lower(): #case insensitive, compare words\n a_list[pos + 1] = a_list[pos]\n pos -= 1\n a_list[pos + 1] = value", "def human_sort(l):\n l.sort(key=alphanum_key)\n return l", "def tuple_sorted(a):\r\n if ((isinstance(a, int) == True) or (isinstance(a, str) == True)):\r\n return a\r\n if ((isinstance(a[0], int) == True) or (isinstance(a[0], str) == True)):\r\n return sorted(a)\r\n else:\r\n w = []\r\n for b in a:\r\n w.append(tuple(tuple_sorted(b)))\r\n return tuple(sorted(tuple(w)))", "def test_signed_sort(self):\r\n\r\n # an empty list must be returned when an empty list needs to be sorted\r\n self.assertEqual(signed_natsort([]), [])\r\n\r\n # tuples that can be sorted by type-casting the first element\r\n test_list = [('9', 'SampleA'), ('-1', 'SampleD'), ('7', 'SampleC'),\r\n ('-2', 'SampleE'), ('-0.11',\r\n 'SampleF'), ('17.11', 'SampleB'),\r\n ('100', 'SampleG'), ('13', 'SampleH')]\r\n expected_result = [('-2', 'SampleE'), ('-1', 'SampleD'),\r\n ('-0.11', 'SampleF'), ('7',\r\n 'SampleC'), ('9', 'SampleA'),\r\n ('13', 'SampleH'), ('17.11', 'SampleB'), ('100', 'SampleG')]\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # tuples that must be sorted alphabetically\r\n test_list = [('Cygnus', 'SampleA'), ('Cepheus', 'SampleD'),\r\n ('Auriga', 'SampleC'), ('Grus',\r\n 'SampleE'), ('Hydra', 'SampleF'),\r\n ('Carina', 'SampleB'), ('Orion', 'SampleG'), ('Lynx', 'SampleH')]\r\n expected_result = [('Auriga', 'SampleC'), ('Carina', 'SampleB'),\r\n ('Cepheus', 'SampleD'), ('Cygnus',\r\n 'SampleA'), ('Grus', 'SampleE'),\r\n ('Hydra', 'SampleF'), ('Lynx', 'SampleH'), ('Orion', 'SampleG')]\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # mixed case, tuples will be sorted alpha-numerically\r\n test_list = [('Cygnus', 'SampleA'), ('Cepheus', 'SampleD'),\r\n ('Auriga', 'SampleC'), ('Grus',\r\n 'SampleE'), ('-0.11', 'SampleF'),\r\n ('17.11', 'SampleB'), ('100', 'SampleG'), ('Lynx', 'SampleH')]\r\n expected_result = [('17.11', 'SampleB'), ('100', 'SampleG'),\r\n ('-0.11', 'SampleF'), ('Auriga',\r\n 'SampleC'), ('Cepheus', 'SampleD'),\r\n ('Cygnus', 'SampleA'), ('Grus', 'SampleE'), ('Lynx', 'SampleH')]\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # mixed case just a list\r\n test_list = ['foo', 'bar', '-100', '12', 'spam', '4', '-1']\r\n expected_result = ['4', '12', '-1', '-100', 'bar', 'foo', 'spam']\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # list of elements that can be type-casted\r\n test_list = ['0', '1', '14', '12', '-15', '4', '-1']\r\n expected_result = ['-15', '-1', '0', '1', '4', '12', '14']\r\n\r\n output = signed_natsort(test_list)\r\n self.assertEquals(output, expected_result)\r\n\r\n # mixed dict case\r\n test_dict = {\r\n 'foo': 'a', 'bar': 'b', '-100': '1', '12': '11', 'spam': 'q',\r\n '4': '11', '-1': 'e'}\r\n expected_result = ['4', '12', '-1', '-100', 'bar', 'foo', 'spam']\r\n\r\n output = signed_natsort(test_dict)\r\n self.assertEquals(output, expected_result)\r\n\r\n # dict where the keys can be type-casted\r\n test_dict = {\r\n '0': 'foo', '1': 'bar', '14': 'stand', '12': 'eggs', '-15': 'q',\r\n '4': 'b', '-1': 'h'}\r\n expected_result = ['-15', '-1', '0', '1', '4', '12', '14']\r\n\r\n output = signed_natsort(test_dict)\r\n self.assertEquals(output, expected_result)", "def sort_words_case_insensitively(words):\n #temp = sorted(words, key=lambda test_str: test_str[:1].lower() + test_str[1:])\n temp = sorted(words, key=str.lower)\n temp1 = []\n for index, word in enumerate(temp):\n if not word[0].isdigit():\n temp1.append(temp[index])\n for index, word in enumerate(temp):\n if word[0].isdigit():\n temp1.append(temp[index])\n return temp1", "def alpha_case_insensitive():\n# fill it out\n return sorted(STRING_LIST, key=lambda s: s.lower())", "def natsort_icase(lst: List[str]) -> None:\n lst.sort(key=natsort_key_icase)", "def keyListSort(keyList):\n keyList.sort(key=lambda y: y.GetName().lower())", "def alphabetical(lst):\n\treturn list(reversed(sorted(lst, key=lambda x: x[0])))", "def sort_fst(xs):\n return sorted(xs, key=lambda pair: pair[0])", "def langsort_tuples (lst, index, lang=None):\n\n reset_locale = _set_lang_locale(lang)\n lst.sort(lambda x, y: locale.strcoll(x[index], y[index]))\n reset_locale()", "def natsort(lst: List[str]) -> None:\n lst.sort(key=natsort_key)", "def sort_nicely(l):\r\n\tl.sort(key=alphanum_key)", "def sort_nicely(l):\n l.sort(key=alphanum_key)\n return l", "def sort(self):\r\n self.list.sort(key=lambda x: ''.join(x))", "def sortTuple(lstTuples, element):\n\n lstTuples.sort(key=lambda x: x[element-1])\n return lstTuples", "def sort_nicely(l):\n l.sort(key=alphanum_key)", "def sort_nicely(l):\n l.sort(key=alphanum_key)", "def test_natsort_case_insensitive(self):\r\n\r\n # string with alpha and numerics sort correctly\r\n s = [\r\n 'sample1',\r\n 'sample2',\r\n 'sample11',\r\n 'sample12',\r\n 'SAmple1',\r\n 'Sample2']\r\n\r\n # expected values\r\n exp_natsort = ['SAmple1', 'Sample2', 'sample1', 'sample2', 'sample11',\r\n 'sample12']\r\n exp_natsort_case_insensitive = ['sample1', 'SAmple1', 'sample2',\r\n 'Sample2', 'sample11', 'sample12']\r\n\r\n # test natsort\r\n self.assertEqual(natsort(s), exp_natsort)\r\n # test natsort_case_insensitive\r\n self.assertEqual(natsort_case_insensitive(s),\r\n exp_natsort_case_insensitive)\r\n\r\n s.reverse()\r\n # test natsort\r\n self.assertEqual(natsort(s), exp_natsort)\r\n # test natsort_case_insensitive\r\n self.assertEqual(natsort(list('cbaA321')), list('123Aabc'))\r\n\r\n # strings with alpha only sort correctly\r\n self.assertEqual(natsort_case_insensitive(list('cdBa')), list('aBcd'))\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort_case_insensitive(['11', '2', '1', '0']),\r\n ['0', '1', '2', '11'])\r\n\r\n # strings of floats sort correctly\r\n self.assertEqual(natsort_case_insensitive(['1.11', '1.12', '1.00',\r\n '0.009']), ['0.009', '1.00',\r\n '1.11', '1.12'])\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort_case_insensitive([('11', 'A'), ('2', 'B'),\r\n ('1', 'C'), ('0', 'D')]),\r\n [('0', 'D'), ('1', 'C'),\r\n ('2', 'B'), ('11', 'A')])", "def sort_nicely(l):\n l.sort(key=alphanum_key)\n return l", "def sort_1(l):\n pass", "def natural_sort(l):\n convert = lambda text: int(text) if text.isdigit() else text.lower()\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)", "def sort_by_name(list_to_sort):\n return sorted(\n list_to_sort,\n key=lambda k: k['Name'].lower()\n )", "def natural_sort_case_insensitive_comparison(value1, value2):\n return natural_sort_comparison(value1.lower(), value2.lower())", "def humanSort(l): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n l.sort( key=alphanum_key )", "def process_tuples_sorted(self):\n return sorted(self.process_tuples, key=lambda process_tuple: process_tuple[0].name)", "def human_sort( l ):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n alphanum_key = None\n try:\n l.sort( key=alphanum_key )\n except TypeError:\n l.sort()\n return l", "def sort(self):\n self.list.sort(key=lambda x: ''.join)", "def natural_sort( l ): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n l.sort( key=alphanum_key )\n return l", "def natsorted(lst: Sequence[str]) -> List[str]:\n return sorted(lst, key=natsort_key)", "def sorting(tokens: list):\n tokens.sort(key=lambda x: (x[0], x[1]))", "def sort_names(li, by_which):\n \n if by_which == 'first':\n li.sort(key = Name.first)\n elif by_which == 'last':\n li.sort(key = Name.last)", "def natsorted_icase(lst: Sequence[str]) -> List[str]:\n return sorted(lst, key=natsort_key_icase)", "def sort_mixed(iterable):\n return sorted(iterable, key=lambda x: split_string_at_numbers(x))", "def _sort(self, groups):\n return sorted(groups, key=lambda group: (group.name.lower(), group.pubid))", "def natsort(seq, case_sensitive=True):\r\n if case_sensitive:\r\n natsort_key = _natsort_key\r\n else:\r\n natsort_key = _natsort_key_case_insensitive\r\n\r\n alist = list(seq)\r\n alist.sort(key=natsort_key)\r\n\r\n return alist", "def sort_words_case_insensitively(words):\r\n numbers = list()\r\n strings = list()\r\n for word in words:\r\n if word[0].isdigit():\r\n numbers.append(word)\r\n else:\r\n strings.append(word)\r\n\r\n numbers = sorted(numbers)\r\n strings = sorted(strings, key=str.casefold)\r\n return strings+numbers", "def sort(self, input):\n regex = re.compile('[%s]' % re.escape(string.punctuation))\n normal_input = regex.sub('', input.lower())\n array = list(normal_input.replace(' ',''))\n array.sort()\n return ''.join(array)", "def sort_nicely(l): \n import re\n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key=alphanum_key)", "def naturalSortKey(s):\n return [(str, int)[k](\"\".join(v)) for k, v in groupby(s, str.isdigit)]", "def alphabetize(value):\n if isinstance(value, list) and all(isinstance(s, str) for s in value):\n return sorted([s.lower() for s in value])\n else:\n raise TypeError(\"Argument must be a list of strings\")", "def sort_prices(list_of_tuples):\n list_of_tuples.sort(key = get_price, reverse = True)\n return list_of_tuples", "def natsort_icase(lst):\n lst.sort(key=natsort_key_icase)", "def sorted_nicely( l ): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key = alphanum_key)", "def sorted(cls, tags: list, reverse: bool = False) -> list:\n return sorted(tags, key=lambda x: x.name.lower(), reverse=reverse)", "def sorting(recommendation: List[Tuple[str, int]]) -> None:\n \n for tup in range(len(recommendation)):\n score = recommendation[tup][1]\n alpha = recommendation[tup][0]\n for j in range(tup + 1, len(recommendation)):\n if recommendation[j][1] > score or \\\n (recommendation[j][1] == score and recommendation[j][0] < alpha):\n recommendation[j], recommendation[tup] = recommendation[tup], \\\n recommendation[j]", "def sort_words(words):\n return sorted(words)", "def name_mapper(tup):\n tup = tuple(ix2name[i] for i in tup)\n\n def s(t):\n return sorted(t, key=self.clade_order)\n left, right = sorted((s(tup[:2]), s(tup[2:])))\n return (*left, *right)", "def sort_by_unicode(self):\n utils.sort_unicode_word_list(self.words_new)", "def _sorted_occurrence_tuples(\n occurrences: Dict[str, List[int]]\n ) -> List[Tuple[str, int]]:\n return sorted(\n ((raw, idx) for raw in occurrences.keys() for idx in occurrences[raw]),\n # Sort first by position, then by lexical (for stability)\n key=lambda x: (x[1], x[0]),\n )", "def sort_string(raw_str):", "def sort_suggestions(\n suggestions: List[Tuple[Set[str], float]]\n) -> List[Tuple[Set[str], float]]:\n confidence_list = [suggestion[1] for suggestion in suggestions]\n sort_index = sorted(range(len(confidence_list)), key=lambda k: confidence_list[k])\n # Inverse the sort\n sort_index = sort_index[::-1]\n return [suggestions[i] for i in sort_index]", "def _alphanumeric_sort(iterable):\n convert = lambda text: int(text) if text.isdigit() else text\n sort_key = lambda k: [convert(c) for c in re.split('([0-9]+)', k)]\n return sorted(iterable, key=sort_key)", "def word_count_sort(word_count_list):\n\n for index in range(1, len(word_count_list)):\n # initialize pointers\n value = word_count_list[index] # starts at the tuple in index 1\n position = index - 1 # initialize to start at 0\n\n # move items to a higher index position while their value is less than the value at the next index\n # compare values in tuple[1] but swap entire tuple\n while position >= 0 and word_count_list[position][1] < value[1]:\n word_count_list[position + 1] = word_count_list[position] # swap the tuple at position into next index\n position -= 1 # decrement to fill lower index and break loop\n\n word_count_list[position + 1] = value # move higher number left one index\n\n return word_count_list", "def sorted_nicely(l, key):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda item: [ convert(c) for c in re.split('([0-9]+)', key(item)) ]\n return sorted(l, key = alphanum_key)", "def sorted_nicely(l):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key=alphanum_key)", "def sorted_nicely( l ):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key = alphanum_key)", "def tuple_to_ordered(mytuple):\n ac_ord = OrderedDict()\n first = mytuple[0]\n if type(first) is str:\n if (len(mytuple)==2) and not (type(mytuple[1]) is str) :\n reval = tuple_to_ordered(mytuple[1])\n ac_ord[mytuple[0]] = reval\n else:\n return(mytuple)\n else:\n for (key,val) in mytuple:\n reval = tuple_to_ordered(val)\n ac_ord[key] = reval\n return(ac_ord)", "def sortednameslist(nameslist):\n sortednames = sorted(nameslist, key=lambda x: x[1])\n return sortednames", "def _sorted_items(x):\n return sorted(x.items(), key=lambda x: x[0])", "def _sort_by_name(bam_fn):", "def alphabetical_sorted(iterable, cmp=None, key=lambda x: x.lower(),\n reverse=False):\n return sorted(iterable, cmp, key, reverse)", "def natsorted_icase(lst):\n return sorted(lst, key=natsort_key_icase)", "def NaturalSort(l):\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n return sorted(l, key = alphanum_key)", "def _natsort_key(item, case_sensitivity=False):\r\n item = str(item)\r\n\r\n try:\r\n chunks = re.split('(\\d+(?:\\.\\d+)?)', item)\r\n except TypeError:\r\n # if item is a tuple or list (i.e., indexable, but not a string)\r\n # work with the first element\r\n chunks = re.split('(\\d+(?:\\.\\d+)?)', item[0])\r\n for ii in range(len(chunks)):\r\n if chunks[ii] and chunks[ii][0] in '0123456789':\r\n if '.' in chunks[ii]:\r\n numtype = float\r\n else:\r\n numtype = int\r\n # wrap in tuple with '0' to explicitly specify numbers come first\r\n chunks[ii] = (0, numtype(chunks[ii]))\r\n else:\r\n chunks[ii] = (1, chunks[ii])\r\n return (chunks, item)", "def sort_4(l):\n l = list(set(l))\n l.sort()", "def mech_tuples_sorted(self):\n return sorted(self.mech_tuples, key=lambda mech_tuple: mech_tuple[0].name)", "def _sort_natural(names_list, reverse=False):\n def sort_key(val):\n return [int(s) if s.isdigit() else s for s in re.split(r'(\\d+)', val)]\n\n return sorted(names_list, key=sort_key, reverse=reverse)", "def sortTermsAlphabetically(terms):\n # Tutorial for sorting credit:\n # https://www.geeksforgeeks.org/ways-sort-list-dictionaries-values-python-using-lambda-function/\n sorted_list = sorted(terms, key=lambda i: (i[\"term_header\"], i[\"rating\"]))\n return sorted_list", "def front_x(some_list):\n #This funstion will sort every element in list but every element that start with \"x\" come first.\n new_list = []\n new_list_x = []\n for k in some_list:\n #print(k)\n if k.find(\"x\") == 0:\n #print(k.find(\"x\"))\n new_list_x.append(k) \n else:\n new_list.append(k)\n return sorted(new_list_x) + sorted(new_list)", "def sort_mss(ms_list):\n return sorted(ms_list, key=lambda x: witintify(x))", "def sort_slide_names(l): \n convert = lambda text: int(text) if text.isdigit() else text \n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key) ] \n return sorted(l, key = alphanum_key)", "def _sorted_nicely(self, l):\n\n import re\n\n convert = lambda text: int(text) if text.isdigit() else \"\"\n\n alphanum_key = lambda key: [\n convert(c) for c in re.split(\n '([0-9]+)', key)]\n\n return sorted(l, key=alphanum_key)", "def natsort(lst):\n lst.sort(key=natsort_key)", "def natsorted(lst):\n return sorted(lst, key=natsort_key)", "def sort_nicely(l):\n\n def tryint(s):\n try:\n return int(s)\n except:\n return s\n\n def alphanum_key(s):\n \"\"\" Turn a string into a list of string and number chunks.\n \"z23a\" -> [\"z\", 23, \"a\"]\n \"\"\"\n return [tryint(c) for c in re.split('([0-9]+)', s)]\n\n l.sort(key=alphanum_key)\n return l", "def process(self, element, **kwargs):\n word, next_list = element\n next_list = list(next_list) # per eseguire funzioni su liste su cloud (eg: sort) forzare i tipi!\n next_list.sort(key=lambda wc_tuple: (-wc_tuple[1], wc_tuple[0]))\n most_likely_successor = next_list[0][0]\n return [(word, most_likely_successor)]", "def format_words(words):\n return sorted(words, key=str.lower)", "def sortn(xs):\n return sorted(xs, key=sortnkey)", "def sort_by_surname_desc(names):\n names = dedup_and_title_case_names(names)\n names1 = []\n for n in names:\n x = n.split(\" \")\n names1.append(x[1] + \" \" + x[0])\n return names1\n # ...", "def anythingSort(L):\n return internalSort(L, 0, len(L) - 1)", "def _key_func_1(entry: tuple[str, list]) -> tuple[tuple[int, str], str]:\n key, (_targets, _sub_items, category_key) = entry\n if category_key:\n # using the specified category key to sort\n key = category_key\n lc_key = unicodedata.normalize('NFD', key.lower())\n if lc_key.startswith('\\N{RIGHT-TO-LEFT MARK}'):\n lc_key = lc_key[1:]\n\n if not lc_key[0:1].isalpha() and not lc_key.startswith('_'):\n # put symbols at the front of the index (0)\n group = 0\n else:\n # put non-symbol characters at the following group (1)\n group = 1\n # ensure a deterministic order *within* letters by also sorting on\n # the entry itself\n return (group, lc_key), entry[0]", "def to_sorted_points(x):\n return tuple(sorted(x))", "def sort_col(col):\n return (col[0], sorted(col[1], key=lambda pair: pair[0]))", "def sort_L3():\n for item in d_list:\n item.sort(key=operator.itemgetter(1))", "def listify(words):\n word_list = []\n for word in words:\n if word:\n word = word.lower()\n if word not in word_list: # add it\n word_list.append(word)\n else:\n pass\n word_list.sort()\n return word_list", "def solve(words):\n result = defaultdict(list)\n for s in words:\n result[tuple(sorted(s))].append(s)\n print(result.values())", "def insertion_sort_single_alpha(arr:Sequence[AlphaList]) -> AlphaList:\n lsi = 1\n while lsi <= len(arr)-1:\n insert_into = 0\n compare = arr[lsi]\n for idx in range(lsi,-1,-1):\n if ord(compare) > ord(arr[idx]):break\n insert_into = idx\n del arr[lsi]\n arr.insert(insert_into,compare)\n lsi += 1\n return arr", "def keysort(*args, **kwargs): # real signature unknown\n pass", "def sort_unit_lst(self, attrname, lst2sort):\n comp = []\n for unit in lst2sort:\n importance = self._importance_rank(unit, attrname)\n comp.append((unit, importance))\n comp = sorted(comp, key= lambda x: x[1], reverse=True)\n\n return [x[0] for x in comp]", "def langsort (lst, lang=None):\n\n reset_locale = _set_lang_locale(lang)\n lst.sort(locale.strcoll)\n reset_locale()", "def sort(self,desc):\n\tself.__sort(\"\",\"\",desc)", "def _process_sorts(sorts: Iterable[Union[str, Tuple[str, bool]]]) -> List[str]:\n processed: List[str] = []\n for sort in sorts:\n # Plain strings can be kept as-is\n if isinstance(sort, str):\n processed.append(sort)\n # Non-string objects are expected to be tuples showing sort order\n else:\n try:\n field, sort_order = sort\n except ValueError as err:\n raise ValueError(f'Invalid sort key: {sort}') from err\n processed.append(field if sort_order else f'{field}:-1')\n return processed", "def _toposort_with_ordered_mech_tuples(self, data):\n result = []\n for dependency_set in toposort(data):\n d_iter = iter(dependency_set)\n result.extend(sorted(dependency_set, key=lambda item : next(d_iter).mechanism.name))\n return result", "def sort_nicely(alist, dict_key=None):\n convert = lambda text: int(text) if text.isdigit() else text\n if dict_key is None:\n alphanum_key = lambda key: [ convert(c) for c in re.split('([0-9]+)', key) ]\n else:\n alphanum_key = operator.itemgetter(dict_key)\n alist.sort(key=alphanum_key)" ]
[ "0.70666724", "0.6958094", "0.6883425", "0.68414825", "0.6625755", "0.65509486", "0.64947134", "0.64417565", "0.6294914", "0.62795585", "0.6264345", "0.6251216", "0.6234281", "0.62279594", "0.62186223", "0.62157", "0.6190618", "0.6180634", "0.6172756", "0.61627626", "0.6156476", "0.61263466", "0.61263466", "0.61226153", "0.6100449", "0.60908145", "0.60905695", "0.60773146", "0.606564", "0.6061432", "0.6058755", "0.6057032", "0.60505843", "0.6012426", "0.6004046", "0.5989858", "0.59822994", "0.59433496", "0.59293133", "0.58837384", "0.5865551", "0.58196056", "0.58106285", "0.5794779", "0.5777007", "0.57724935", "0.5755805", "0.57386863", "0.5723268", "0.57138246", "0.5712057", "0.57074285", "0.56986785", "0.56966525", "0.5691579", "0.5684014", "0.56797904", "0.56794673", "0.5674445", "0.5665229", "0.56547683", "0.5652357", "0.5642202", "0.56350046", "0.5627387", "0.56270653", "0.56187207", "0.5609969", "0.5603082", "0.55947506", "0.55927914", "0.55803496", "0.5551032", "0.55484533", "0.5547578", "0.5539722", "0.55396205", "0.5538735", "0.55372316", "0.55339473", "0.55337065", "0.55204713", "0.55195016", "0.5508061", "0.5502945", "0.5497608", "0.5486653", "0.54828024", "0.5480453", "0.5473431", "0.5463903", "0.5462426", "0.5459472", "0.54582566", "0.54528224", "0.5441359", "0.5435974", "0.5427096", "0.54173326", "0.54167205" ]
0.8253932
0
Use sys.stdout.write to write the string with an indentation equal to indent and specifying the end character
def write(string,indent=0,end=""): sys.stdout.write(" "*indent+string+end)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def indentOut_ (stream, indent) :\r\n if indent == 0 :\r\n return\r\n else :\r\n stream.write(\" \"*indent)", "def out_indent(indent, *args):\n s = \"\"\n s += indent * \" \"\n s += \" \".join(args)\n return s", "def try_print_indent(self):\n if self.lasttoken[0] != lex.Token.NEWLINE:\n return\n\n if len(self.lasttoken[1]) > 0:\n self.buffer.scope_line(\"__io.write(u'\" + self.lasttoken[1] + \"')\")", "def indent(self):\n print (self.indent_size*self.n_indent*' ',end='',flush=True, file=self.fp)", "def printer(end,message):\n\n sys.stdout.write('\\r'+message+'\\t')\n sys.stdout.flush()\n if end: sys.stdout.write('\\n')", "def test_with_custom_indent(self):\n self.assertEqual(indent('foo', 3), ' foo')", "def write_text(self, token):\n self.try_print_indent()\n self.buffer.write_scope(\"__io.write(u'\")\n self.buffer.write(token)\n self.buffer.write_line(\"')\")", "def printIndent(s,lvl) :\n for line in s.split('\\n') :\n print('%s%s' % (' '*lvl,line))", "def prettyPrintStringHelper_ (s, stream, indent, pretty_print=True, indent_additive=4):\r\n stream.write(repr(s))", "def print(self, s, end='\\n'):\n self._output.write(str(s)+end)", "def console_print(out, *args, **kwargs):\n const_charset = stream_encoding(out)\n out.write(' '.join([a.encode(cons_charset, 'replace') for a in args]))\n if kwargs.get('newline', True):\n out.write('\\n')", "def indent(string, level=1):\n spaces = ' ' * (level * 4)\n return \"%s%s\" % (spaces, string)", "def indent(str, level):\n if level == 0: return str\n return \"\\n\".join(\"\\t\" * level + line for line in str.splitlines())", "def print_with_indent(*args):\n if INDENT_LEVEL:\n print(\"\\t\" * INDENT_LEVEL, end='')\n for arg in args:\n print(arg, end='')\n print()", "def print(*args, **kwargs):\n sep, file = kwargs.pop(\"sep\", b\" \"), kwargs.pop(\"file\", sys.stdout)\n at_start = True\n for x in args:\n if not at_start:\n file.write(sep)\n file.write(str(x))\n at_start = False\n file.write(kwargs.pop(\"end\", b\"\\n\"))\n if kwargs.pop(\"flush\", False):\n file.flush()", "def __indent_text_block(text):\n lines = text.splitlines()\n if len(lines) > 1:\n out = lines[0] + \"\\r\\n\"\n for i in range(1, len(lines)-1):\n out = out + \" \" + lines[i] + \"\\r\\n\"\n out = out + \" \" + lines[-1]\n return out\n return text", "def printc(txt):\n sys.stdout.write(txt)\n sys.stdout.write('\\n')", "def _newline(self):\n if prettyprint:\n return '\\n' + self._indent_spaces()\n else:\n return ''", "def indent(text, prefix, predicate=...): # -> str:\n ...", "def echo(string, end=\"\\n\"):\n\tprint(string, end=end)", "def tprint(msg, indent=0):\n\n print(\" \" * indent + \n \" \" * (indent+1) + \n \"'-- \" + msg)", "def Write(self, line='', *args, **kwargs):\n result = line % args\n offset = self._indent * 2 + kwargs.get('offset', 0)\n indent = ' ' * offset if result else ''\n self._out.write(indent + result + '\\n')", "def space():\n print(' ', end='')", "def indent(txt, indent_level):\n indent = \" \" * indent_level\n return \"\\n\".join(indent + x for x in txt.splitlines())", "def insert_indent(event):\n env = XSH.env\n event.cli.current_buffer.insert_text(env.get(\"INDENT\"))", "def write(string):\n\n\tsys.stdout.write(string)\n\tsys.stdout.flush()", "def print_substep(text, style=\"\"):\n console.print(text, style=style)", "def escaped_printer(to_write):\n # suppress(anomalous-backslash-in-string)\n to_write = to_write.replace(\";\", \"{c};\".format(c=char))\n to_write = to_write.replace(\"\\n\", \";\\n\") + \";\\n\"\n\n if file_object:\n file_object.write(to_write)\n else:\n sys.stdout.write(to_write)", "def get_indent(op):\n ret = \"\"\n for ii in range(op):\n # Would tab be better?\n ret += \" \"\n return ret", "def log(self, message, indent_amount=0):\n indent = \" \" * indent_amount\n text = \"{indent}{text}\\n\".format(indent=indent, text=message)\n sys.stdout.write(text)", "def test_with_multiple_lines(self):\n self.assertEqual(indent('foo\\nbar'),\n ' foo\\n bar')", "def write(msg, newline=True, flush=True):\n sys.stdout.write(msg)\n if newline:\n sys.stdout.write(\"\\n\")\n if flush:\n sys.stdout.flush()", "def indent(text, n=4):\n if not text:\n return \"\"\n i = \" \" * n\n return i + text.replace(\"\\n\", \"\\n\" + i)", "def tprint_raw(self, cmd, end='\\n'):\n self.fileHandle.write(cmd + end)", "def append_footer(cmd, marker):\n header = \"\"\n footer = \"\"\"\nEXIT_CODE=$?\necho {marker}code: ${{EXIT_CODE}}{marker}\necho {marker}pwd: $(pwd){marker}\necho {marker}env: $(cat -v <(env -0)){marker}\n\"\"\".format(\n marker=marker\n )\n\n full_command = \"\\n\".join([header, cmd, footer])\n\n return full_command", "def test_with_default_indent(self):\n self.assertEqual(indent('foo'), ' foo')", "def Indent(indents):\n return ' ' * (2 * indents)", "def block_indent(text, spaces=4):\n return '\\n'.join([(' ' * spaces) + l for l in pprint.pformat(text).splitlines()])", "def _indent_spaces(self):\n if prettyprint:\n return self.indentspace * self._indent_level\n else:\n return ''", "def p(s):\n\n out.write(s + \"\\n\")\n # sys.stdout.write(s + \"\\n\")", "def write_line(self, line):\n\n if line[0] != \"(\": # indent all non-label lines.\n line = \" \" * 4 + line\n self.fd.write(line)\n self.fd.write(os.linesep)", "def indent_cursor(cls):\n click.echo(' ' * cls.INDENT_SIZE, nl=False)", "def indent(value):\n return \" \" + value", "def Synopsis(self, line):\n nest = 0 # [...] nesting level.\n no_split = 0 # buf[no_split:i] should not be split across lines.\n # String append on buf used below because of no_split lookbehind.\n buf = ' ' * self._indent[0]\n n = len(buf) + 1\n i = 0\n while i < len(line):\n c = line[i]\n if c == self._csi_char:\n control_len = self._attr.GetControlSequenceLen(line[i:])\n if control_len:\n j = i\n i += control_len\n buf += line[j:i]\n continue\n if c == '[':\n # [...] nesting.\n nest += 1\n if nest == 1:\n # A new [...] group - don't split until the end of the group.\n no_split = len(buf)\n elif c in [']', ' ']:\n if c == ']':\n nest -= 1\n if not nest:\n # Outside [...]. OK to split at this point if needed.\n if n >= self._width:\n # Split the line up to no_split, eliminate trailing space and write\n # the line up to no_split.\n n = no_split\n while n > 0 and buf[n - 1] == ' ':\n n -= 1\n self._out.write(buf[:n] + '\\n')\n # Reset indentation for the next line which will start at no_split.\n buf = ' ' * self._indent[0] * 2 + buf[no_split:]\n n = len(buf) + 1\n elif c == ' ':\n # Space outside [...]. Set a new split point.\n no_split = len(buf)\n if c == ' ' and buf and buf[-1] == ' ':\n # Collapse adjacent spaces to one space.\n i += 1\n continue\n buf += c\n n += 1\n i += 1\n self._out.write(buf + '\\n\\n')", "def print_cmd(cmd):\n padding = \" \" * 80\n sys.stdout.write(\"\\r\"+padding)\n sys.stdout.write(\"\\r\"+prompt+cmd)\n sys.stdout.flush()", "def printSeq(startChar, endChar, seq, maxdepth):\n if maxdepth < 1: return '&'\n maxdepth -= 1\n return [startChar] + [cprint2(x, maxdepth) for x in seq] + [endChar]", "def uprint(pstr):\r\n\r\n sys.stdout.write(pstr)", "def StdOut(self, message):\n sys.stdout.write('{0:s}\\n'.format(message))\n sys.stdout.flush()", "def test_indent():\n\n multiline_string = \"\"\"test\ntest1\ntest2\ntest3\"\"\"\n\n indented_multiline_string = \"\"\" test\n test1\n test2\n test3\"\"\"\n\n assert indented_multiline_string == _indent(multiline_string, 4)", "def indent(text, indentation, width=None, pad_character=\" \"):\n\n text = pad_character * indentation + text\n length = len(text)\n if width is None or length >= width:\n return text\n else:\n return text + pad_character * (width - length)", "def write(self, command):\n if not command.endswith('\\n'):\n command += '\\n'\n self.rpc.call(MsfRpcMethod.ConsoleWrite, [self.cid, command])", "def test_newline_and_indent(self):\n before_b = \"\"\"\\\n first line\n line 1\n line a\n line b\n line c\n last line\n \"\"\"\n after_b = \"\"\"\\\n first line\n line 1\n \n line a\n line b\n line c\n last line\n \"\"\"\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.6\", \"2.6\"),\n after_sel=(\"3.4\", \"3.4\"),\n command_name=\"newline-and-indent\",\n )", "def pretty (value, stream=sys.stdout, starting_indent=0, indent_additive=4) :\r\n indentOut_(stream, starting_indent)\r\n pretty_print = 1\r\n specialStream_(value, stream, starting_indent-indent_additive, pretty_print, indent_additive)\r\n if type(value) in [list, dict, OrderedDict] :\r\n stream.write('\\n')", "def out(self, output, newline=True):\r\n self.stdout.write(output)\r\n if newline:\r\n self.stdout.write(os.linesep)", "def writec(text, color='black', style='normal'):\n\n sys.stdout.write(strc(text, color, style))", "def cool_print(self, text=str, newline=True, margin=21, rate=.02):\n print(\" \" * margin, end='')\n for letter in text:\n sleep(.02)\n stdout.write(letter)\n stdout.flush()\n if newline:\n print()", "def Write(self, data):\n print(data, end=u'')", "def output_sep_mark():\n print(sep_mark)", "def test_default_indent_and_newline(self):\n self.builder.dom_element.write_doc(self.iobytes, indent=True)\n self.assertEqual(\n u'<?xml version=\"1.0\" encoding=\"utf-8\"?>\\n'\n u'<DocRoot>\\n'\n u' <Elem1>默认جذ</Elem1>\\n'\n u' <Elem2/>\\n'\n u'</DocRoot>\\n'.encode('utf-8'),\n self.iobytes.getvalue())", "def Example(self, line):\n self._fill = self._indent[self._level] + self._INDENT\n self._out.write(' ' * self._fill + line + '\\n')\n self._blank = False\n self._fill = 0", "def _write_line(output, s):\n output.write(s)\n output.write(\"\\n\")", "def write(self, *args, **keys):\n output = self.format(*args, **keys)\n self.eol_pending = not output.endswith(\"\\n\")\n sys.stderr.flush()\n sys.stdout.write(output)\n sys.stdout.flush()", "def appendCode(dest, indent, text):\n\n dest.append(\"%s// START custom code\" % indent)\n for s in text.rstrip().split(\"\\n\"):\n dest.append(\"%s%s\" % (indent, s))\n dest.append(\"%s// END custom code\" % indent)", "def _spacer(self, msg):\n msg = str(msg)\n msg_len = len(msg)\n if msg_len == 1:\n print(\" \", end=\"\")\n elif msg_len == 2:\n print(\" \", end=\"\")", "def reindent(text, indent):\n\n lines = textwrap.dedent(text).split('\\n')\n while lines and not lines[0].strip():\n lines.pop(0)\n while lines and not lines[-1].strip():\n lines.pop()\n return indent + ('\\n' + indent).join(lines)", "def adv_print(*args, start='', in_file = False, **kwargs):\n max_line = kwargs.pop('max_line', False)\n print(kwargs)\n old_stdout = sys.stdout\n value = StringIO()\n sys.stdout = value\n print(*args, **kwargs)\n sys.stdout = old_stdout\n value = value.getvalue()\n value = start + value\n if max_line:\n value = value[:max_line] + '\\n' + value[max_line:]\n if in_file:\n if 'filename' in kwargs:\n filename = kwargs['filename']\n else:\n filename = 'output.txt'\n with open(filename, 'w') as f:\n f.write(value)\n print(value)", "def write(self,data):\n\n if '\\n' in data:\n\n lines = data.split('\\n')\n write = self.stream.write\n indent = self._indent\n\n for l in lines[:-1]:\n if l:\n indent()\n write(l)\n write('\\n')\n self.needSpaces = 1\n\n if lines[-1]:\n indent()\n write(lines[-1])\n else:\n self._indent()\n self.stream.write(data)", "def indent(self):\n cursor = self.parent.textCursor()\n # Check if something is selected\n if cursor.hasSelection():\n # get the line/block nr\n temp = cursor.blockNumber()\n # Move to last line of the selection\n cursor.setPosition(cursor.selectionEnd())\n # calculate range of selection\n diff = cursor.blockNumber() - temp\n # Go over all the selected lines\n for n in range(diff + 1):\n cursor.movePosition(QTextCursor.StartOfLine)\n # insert tab\n cursor.insertText(\"\\t\")\n # move back up\n cursor.movePosition(QTextCursor.Up)\n else:\n # There is no selection, simply insert a TAB\n cursor.movePosition(QTextCursor.StartOfLine)\n cursor.insertText(\"\\t\")", "def _display(s):\n if not isinstance(s, unicode):\n s = s.decode(\"utf-8\")\n s = _indent(_escaped_text_from_text(s, \"whitespace\"), 4)\n if not s.endswith('\\n'):\n s += '\\n'\n return s", "def write(self, arg, move=False, align=\"left\", font=(\"Arial\", 8, \"normal\")):\n if self.undobuffer:\n self.undobuffer.push([\"seq\"])\n self.undobuffer.cumulate = True\n end = self._write(str(arg), align.lower(), font)\n if move:\n x, y = self.pos()\n # self.setpos(end, y)\n if self.undobuffer:\n self.undobuffer.cumulate = False", "def Indent( elem, level=0, indent=' ' ):\n i = \"\\n\" + level * indent\n if len( elem ):\n if not elem.text or not elem.text.strip():\n elem.text = i + indent\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n Indent( elem, level + 1, indent )\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and ( not elem.tail or not elem.tail.strip() ):\n elem.tail = i", "def indentation(self, pad, linepad, lang='c++', *args):\n pad.edit_separator()\n if lang == 'c++':\n curr = pad.get('1.0', GUI.INSERT)\n till_end = pad.get('1.0', GUI.END)\n indent = max(curr.count(\"{\") - curr.count('}'), 0)\n diff = till_end.count('{') - till_end.count('}')\n pad.insert(GUI.INSERT, ' ' * indent)\n cordinate = map(int, pad.index(GUI.INSERT).split('.'))\n if diff > 0:\n pad.insert(GUI.INSERT, '\\n' + ' ' * 4 * max(indent - 1, 0) + '}')\n pad.mark_set(GUI.INSERT, '%d.%d' % (cordinate[0], cordinate[1]))\n if lang == 'py':\n coordinates1 = map(int, pad.index(GUI.INSERT).split('.'))\n if coordinates1[0] != 1:\n coordinates = str(coordinates1[0] - 1) + '.0'\n r = pad.get(coordinates, coordinates + 'lineend')\n letters = list(str(r))\n cnt = 0\n # find indentation level\n for i in letters:\n if i == ' ':\n cnt += 1\n else:\n break\n cnt = cnt / 4\n # check if indentation increasing keywords present\n f = 0\n for i in keywords['py']['loops']:\n if i in r:\n f = 1\n break\n\n if f:\n pad.insert(GUI.INSERT, (' ' * (cnt + 1) * 4))\n else:\n pad.insert(GUI.INSERT, (' ' * (cnt) * 4))\n self.linenumber(pad, linepad)", "def _print_separator():\n print(\n \"───── ──────────────── ──────────────────────────────────────────────────────────────────────────────── ──────── ───────── ───── ──────── ──── ──── ──── ──── ──── ──── ──── ──── ──── ────\"\n )", "def write(self, string):\n if self.out is not None:\n if self.first_write:\n self.first_write = False\n string = \"\\r\\n\" + string\n if self.color is not None:\n self.out.write(colored(string, self.color))\n else:\n self.out.write(string)\n # check for the split case\n if (\n len(self.parent.log) > 1\n and self.parent.log[-1] == \"\\r\"\n and string[0] == \"\\n\"\n ):\n tmp = \"\\n[%.6f]\" % time.process_time()\n tmp += string[1:]\n string = tmp\n to_log = re.sub(\"\\r\\n\", \"\\r\\n[%.6f]\" % time.process_time(), string)\n self.parent.log += to_log\n if hasattr(self.parent, \"test_to_log\"):\n self.parent.test_to_log.log += re.sub(\n r\"\\r\\n\\[\", \"\\r\\n%s: [\" % self.parent.test_prefix, to_log\n )", "def indent(self, amount: int = OutputFile.INDENT_WIDTH) -> Any:\n return self.output.indent(amount)", "def _print_with_depth(self, string, depth):\n print(\"{0}{1}\".format(\" \" * depth, string))", "def output_plain_sep_mark():\n print(plain_sep_mark)", "def hanging_indent(string: str, tab_width: int = 4) -> str:\n\n # Gets the terminal width\n num_col = shutil.get_terminal_size((80, 20)).columns\n\n if len(string) <= num_col:\n # Returns a clone of the string, not the original\n return string[:]\n\n # Creates a tab string\n tab = \" \"*tab_width\n\n result = string[:num_col]\n remaining = string[num_col:]\n while True:\n if len(remaining) > num_col - tab_width:\n result += tab + remaining[:num_col - tab_width]\n remaining = remaining[num_col - tab_width:]\n else:\n result += tab + remaining\n break\n\n return result", "def append(self, s):\n self.result += ' ' * self.indent + s + '\\n'", "def _write(message: Optional[str] = None) -> None:\n if message is not None:\n stdout(\"%s\\n\" % message)\n else:\n stdout(\"\\n\")", "def WriteLn(*args):\n for arg in args:\n sys.stdout.write(str(arg))\n sys.stdout.write('\\n')", "def indent(elem, level=0):\n i = \"\\n\" + level*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n indent(elem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i", "def printf(str):\r\n print(str, flush=True)", "def output(text):\n sys.stdout.write(text)", "def test_adjust_indent():\n hr.Element.indent = 2\n\n body = hr.Body()\n body.append(hr.P(\"some text\"))\n html = hr.Html(body)\n\n file_contents = render_result(html)\n\n print(file_contents)\n lines = file_contents.split(\"\\n\")\n for i in range(3): # this needed to be adapted to the <DOCTYPE> tag\n assert lines[i + 1].startswith(i * (\" \" * hr.Element.indent) + \"<\")\n\n assert lines[4].startswith(3 * (\" \" * hr.Element.indent) + \"some\")", "def test_writer_linebreak():\n GCMT(write=\"on\")\n write_message(100 * \"test\")\n write_message(100 * \" \")", "def _text_indent(text, indent):\n # type: (str, str) -> str\n lines = [line.strip() for line in text.strip().split('\\n')]\n return indent + indent.join(lines)", "def indent(self):\n self.indent_level += self.INDENT_STEP", "def indent(self):\n self.indent_level += self.INDENT_STEP", "def py_repl_write(str):\n with Capturing() as output:\n interp.push(str)\n return \"\\n\".join(output)", "def putstr(s):\n print(s, end='', flush=True, file=tty)", "def printSeparator(count: int):\n if count == 0 or count == 3 or count == 6:\n print(\"|\", end='')\n return\n\n if count == 1 or count == 4 or count == 7:\n print(\"|\", end='')\n return\n\n if count == 2 or count == 5:\n print('')\n print(\"-+-+-\")\n return\n\n if count == 8:\n print('')\n return", "def WriteFooter(self):\n self.WriteText('}')", "def pretty_print(linenum, todo):\n\n global COUNTER\n comm_endings = ['\"\"\"', \"'''\", '*/', '-->', '#}', '--}}', '}}', '%>']\n for i in comm_endings:\n if todo.endswith(i):\n todo = todo[:-len(i)]\n print(' line', linenum.rjust(4), '>>\\t', todo )\n COUNTER += 1", "def indent(elem, level=0):\n i = \"\\n\" + level*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n indent(elem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i", "def indent(elem, level=0):\n i = \"\\n\" + level*\" \"\n if len(elem):\n if not elem.text or not elem.text.strip():\n elem.text = i + \" \"\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n for elem in elem:\n indent(elem, level+1)\n if not elem.tail or not elem.tail.strip():\n elem.tail = i\n else:\n if level and (not elem.tail or not elem.tail.strip()):\n elem.tail = i", "def _writeline(self, data):\n self._write(data+chr(13)+chr(10))", "def print( self, str, pos=None ):\n\t\tif pos:\n\t\t\tself.set_cursor( pos )\n\t\tself.write( str.encode(\"ASCII\") )", "def write(self, string):\n if self.out is not None:\n if self.first_write:\n self.first_write = False\n string = \"\\r\\n\" + string\n if self.color is not None:\n self.out.write(colored(string, self.color))\n else:\n self.out.write(string)\n current_time = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())\n # check for the split case\n if (\n len(self.parent.log) > 1\n and self.parent.log[-1] == \"\\r\"\n and string[0] == \"\\n\"\n ):\n string = f\"\\n{current_time} {string[1:]}\"\n to_log = re.sub(\"\\r\\n\", f\"\\r\\n{current_time} \", string)\n self.parent.log += to_log\n if hasattr(self.parent, \"test_to_log\"):\n self.parent.test_to_log.log += re.sub(\n r\"\\r\\n\\[\", f\"\\r\\n{self.parent.test_prefix}: [\", to_log\n )", "def insert_newline():\r\n insert_char(\"\\n\")" ]
[ "0.7758625", "0.6414926", "0.6342674", "0.6169035", "0.6082922", "0.59543496", "0.5942104", "0.5933664", "0.58904225", "0.5866938", "0.5845034", "0.5830857", "0.5830536", "0.5825642", "0.5822062", "0.58123285", "0.58008015", "0.5715586", "0.5666716", "0.5651807", "0.56485546", "0.5639401", "0.5629479", "0.5616615", "0.56136316", "0.56098604", "0.5600371", "0.5595038", "0.5573551", "0.5537827", "0.5532421", "0.5529422", "0.5522187", "0.5520028", "0.5493092", "0.5487611", "0.5477603", "0.54662424", "0.54649454", "0.5446111", "0.54409033", "0.54408824", "0.54366004", "0.5428484", "0.54110485", "0.54046375", "0.53946716", "0.53932923", "0.5369866", "0.5367662", "0.53489876", "0.5348259", "0.5336288", "0.5330634", "0.532517", "0.5316772", "0.53147894", "0.53052515", "0.5303512", "0.52960074", "0.52896196", "0.52875954", "0.52863604", "0.5278465", "0.52779675", "0.52665216", "0.5261563", "0.5248451", "0.5240855", "0.52332306", "0.52266294", "0.5222768", "0.5221558", "0.5219342", "0.52176374", "0.5216597", "0.52133197", "0.5209587", "0.5201588", "0.51941234", "0.51904726", "0.5183275", "0.51776844", "0.5176583", "0.5173061", "0.51699305", "0.5168754", "0.516738", "0.516738", "0.5162344", "0.5159334", "0.5156149", "0.51517", "0.51515526", "0.51491994", "0.51491994", "0.51486015", "0.5137124", "0.5135488", "0.513078" ]
0.8376877
0
Print recursively tree informations
def recursifTreePrinter(tree,indent): listOfBranches = tree.GetListOfBranches() if len(listOfBranches) > 0: # Width informations maxCharName = max([len(branch.GetName()) \ for branch in listOfBranches]) maxCharTitle = max([len(branch.GetTitle()) \ for branch in listOfBranches]) dic = { \ "nameWidth":maxCharName+2, \ "titleWidth":maxCharTitle+4, \ "memoryWidth":1} for branch in listOfBranches: # Print loop rec = \ [branch.GetName(), \ "\""+branch.GetTitle()+"\"", \ str(branch.GetTotBytes())] write(TREE_TEMPLATE.format(*rec,**dic),indent,end="\n") recursifTreePrinter(branch,indent+2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_tree(self):\n\t\tprint(self.__print_tree('', True, ''))", "def print_tree(self):\n\t\tself.root.print_recursive(0)", "def printTree(self):\n print(printTreeF(self, 0, self))", "def print_tree(node):\n print tree(node)", "def print_tree(self):\n return \"\"", "def print_tree(self):\n recur_print = self.recur_print(tree.root, '')[:-1]\n return recur_print", "def print_tree(tree, indent=0):\n for c in tree.children:\n print \" \" * indent, \"-->\", c.name\n \n if c.children != []:\n print_tree(c, indent+1)", "def print_tree(self):\n out = \"\"\n for i in range(self.level):\n out += ' |'\n out += '___'\n out += str(self.action)\n if self.action is None:\n print \"None\"\n else:\n print out\n for child in self.children:\n child.print_tree()", "def print_tree(tree):\n if not tree:\n print None\n return\n \n if tree.children:\n print 'Directory hash = {}'.format(base64.urlsafe_b64encode(tree.dmt_hash))\n print 'Contents:'\n for name, subtree in tree.children.iteritems():\n print\n print name\n print_tree(subtree)\n \n else:\n print 'File hash = {}'.format(base64.urlsafe_b64encode(tree.dmt_hash))", "def print_tree(self):\n self.__print_node(self.root, 0)", "def printTree(self):\n if self.left:\n self.left.printTree()\n print(self.data)\n if self.right:\n self.right.printTree()", "def print_tree(self):\n print(_Node.__print_tree(self))", "def print_tree(self, tree, nodes):\n\t\tprint(self.display(tree, nodes, '', True, ''))", "def print_tree(node):\r\n if node is None:\r\n return\r\n print_tree(node.left)\r\n print node.key\r\n print_tree(node.right)", "def printTree(self):\n\t\tprint 'Tree:'\n\t\tprint self.root.toString(0)\n\t\tprint '\\n'", "def print_tree(self, parser=None):\n for pre, _, node in RenderTree(self):\n print(pre + node._self_string(parser))", "def show_tree(obj,d=0):\n print \"%s%s\" % (\"-\"*d,obj.__class__.__name__)\n if 'get_children' in dir(obj):\n for a in obj.get_children(): show_tree(a,d+1)", "def print_tree(t, indent=0):\n print(' ' * indent + str(t.root))\n for b in t.branches:\n print_tree(b, indent + 1)", "def print_tree(node, depth=1):\n for child in node:\n print(\" \" * depth + child.get_name())\n print_tree(child, depth+1)", "def printTree(self):\r\n print(self.letter)\r\n if self.left:\r\n self.left.printTree()\r\n if self.right:\r\n self.right.printTree()", "def print_tree( root, level ):\n\n if not root.isleaf():\n print level*\"==\" + \"==> \", str(root), \"pointers\", len(root.pointers)\n for p in root.pointers:\n print_tree ( p, level+1 )\n else:\n print level*\"==\" + \"==> \", \n for t in root.tuples:\n print str(t), \n print \"\"", "def print_tree(tree, depth=0):\n print('+','--'*depth,tree[0])\n if isinstance(tree[1], str):\n print('|',' '*depth,'->',tree[1])\n return\n if isinstance(tree[1],Terminal):\n print('|',' '*depth,'->',repr(tree[1]))\n return\n for subtree in tree[1]:\n print_tree(subtree, depth+1)", "def print_tree(t, indent=0):\n print(' ' * indent + str(label(t)))\n for b in branches(t):\n print_tree(b, indent + 1)", "def print_recursive(self, indents):\n\n\t\tind = \"\\t\"\n\t\toutput = indents * ind + self.name\n\t\tprint(output)\n\t\tfor i in self.children:\n\t\t\ti.print_recursive(indents+1)", "def print_tree(t):\r\n if (t==None):\r\n return \r\n else:\r\n print_tree(left(t))\r\n print(value(t),end=\" \")\r\n print_tree(right(t))", "def print_tree(self, prefix=\"\"):\n print(\"%s%s\" % (prefix, self.node_label()))\n if self.left:\n self.left.print_tree(prefix + \" \")\n if self.right:\n self.right.print_tree(prefix + \" \")", "def print_tree(root):\n queue = [(root, [\"1\"])]\n while queue:\n this, depth = queue.pop(0)\n if isinstance(this, int):\n reprr = \"L %i\" % this\n else:\n reprr = str(this.attribute)\n for key, child in this.children.items():\n queue.append((child, depth + [\"%s\" % key]))\n print \"%s: %s\" % (\".\".join(depth), reprr)", "def print_tree(self):\n return self.preorder_print(self.root, \"\")[:-1]", "def print_tree(self):\n return self.preorder_print(self.root, \"\")[:-1]", "def recursive_print(root: Node, depth=0):\n if not root:\n return\n print(\n (\" \" * depth)\n + f\"({root.resource.order}, exec={root.resource.execution_ms:.3f}, \"\n + f\"ttfb={root.resource.time_to_first_byte_ms}, delay={root.resource.fetch_delay_ms:.3f}, \"\n + f\"size={root.resource.size} B, {ResourceType(root.resource.type).name}, {root.resource.url})\"\n )\n for next_node in root.children:\n recursive_print(next_node, depth + 1)", "def print_output(tree):\n print_value(tree)\n print_tree(tree)", "def tree_print(clf, X):\n tlevel = _tree_rprint('', clf, X.columns, clf.classes_)\n print('<',end='')\n for i in range(3*tlevel - 2):\n print('-',end='')\n print('>')\n print('Tree Depth: ',tlevel)", "def print_node(node, depth=0):\n print(node.N, depth)\n if node.children:\n _, children = zip(*node.children.items())\n for child in children:\n Tree.print_node(child, depth=depth + 1)", "def print_tree(tree, indent=''):\n\n for branch in tree:\n if type(branch) == list and branch != []:\n print_tree(branch, indent + ' ')\n else:\n if branch != []:\n print(indent + str(branch))", "def print_bi_tree(self):\n\n to_print = [self]\n # current = None\n\n while to_print:\n current = to_print.pop(0)\n if current:\n print(f'\\t{current.data}')\n to_print.append(current.left)\n to_print.append(current.right)", "def print_tree(self):\n stack = [(self.root, 0, 0)] # (node, child no., tabs)\n ntabs = 0\n while len(stack):\n n, i, tabs = stack.pop()\n if len(n.branch):\n if i>=1 and i==len(n.children)-1:\n print(tabs*'\\t' + 'axis-' + str(n.axis) + ': >' + str(n.branch[i-1]))\n else:\n print(tabs*'\\t' + 'axis-' + str(n.axis) + ': <=' + str(n.branch[i]))\n stack.append((n, i+1, tabs))\n if i<len(n.children):\n stack.append((n.children[i], 0, tabs+1))\n else:\n avg = np.dot(n.probabilities[:,0], n.probabilities[:,1])\n print(tabs*'\\t' + 'Label: ' + str(avg) + '\\n')", "def tree(self, depth_index=0):\r\n print(self.tree_str(depth_index))", "def tree(node):\n subtrees = []\n for arg in node.args:\n subtrees.append(tree(arg))\n s = print_node(node)+pprint_nodes(subtrees)\n return s", "def pretty_print(self,depth=0):\n\t\tfor i in range(depth):\n\t\t\tprint \"\\t\",\n\t\t\t\t\n\t\tprint self.__str__()\n\t\t\n\t\tfor c in self.tree.children:\n\t\t\tc.viz.pretty_print(depth+1)", "def print_tree(t, indent=0, end='\\n'):\n if isinstance(t, Leaf):\n print(t, end='')\n else:\n s = '(' + t.tag + ' '\n indent += len(s)\n print(s, end='')\n print_tree(t.branches[0], indent, '')\n for b in t.branches[1:]:\n print('\\n' + ' '*indent, end='')\n print_tree(b, indent, '')\n print(')', end=end)", "def print_tree(self, root=None, level=0):\n if not root:\n root = self.root\n\n for lv in range(level):\n print ' ',\n try:\n print root.get_properties()\n except Exception as ex:\n print 'data: %s' % repr(root.get_properties())\n\n query = 'START s=node(%s)\\n' % root._id +\\\n 'MATCH (s)-[r]->(c)\\n' + \\\n 'RETURN c'\n records = neo4j.CypherQuery(self.db_handler, query).execute()\n\n nodes = [record.values[0] for record in records.data]\n for node in nodes:\n self.print_tree(node, level + 1)", "def print_tree(tree, indent=0, use_symbols=False):\n if use_symbols:\n if indent == 0:\n print_tree_symbol(tree, indent)\n indent += 1\n\n for c in tree.children:\n print_tree_symbol(c, indent)\n\n try:\n if c.children:\n print_tree(c, indent + 1, use_symbols)\n except:\n pass\n else:\n for c in tree.children:\n print(\" \" * indent, \"-->\", c.name)\n\n try:\n if c.children:\n print_tree(c, indent + 1)\n except:\n pass", "def printTree(self, tree, str):\n\n\t\tif type(tree) == dict:\n\t\t\tfor item in list(tree.values())[0].keys():\n\t\t\t\t\tprint(\"%s %s = %s \" % (str, list(tree.keys())[0], item))\n\t\t\t\t\tself.printTree(list(tree.values())[0][item], str + \"\\t\")\n\t\telse:\n\t\t\tprint(\"%s -> %s = %s\" % (str, self.targetName, tree))", "def PrintTree(self,num=0):\n self.ResetTarget()\n self.PrintTreeInt(num)\n return self.target", "def printTree(self):\n print self.storeTree.movies", "def __repr__(self):\n return show_tree(self, lambda node: node.name,\n lambda node: node.children)", "def _print_nodes(node):\n if node.has_left_child():\n BinarySearchTree._print_nodes(node.left)\n print(node.value)\n if node.has_right_child():\n BinarySearchTree._print_nodes(node.right)", "def printTree(tree, fromNode=None, printDirectory = False):\r\n if fromNode == None:\r\n fromNode = tree.root\r\n print fromNode.name\r\n tree.printChildrenOfNode(fromNode, printDirectory)", "def printChildrenOfNode(tree, node, printDirectory = False):\r\n if node.children:\r\n for child in node.children:\r\n tree.printDepth = tree.printDepth+1\r\n if printDirectory:\r\n print (\"| \"*tree.printDepth), child.directory\r\n else:\r\n print (\"| \"*tree.printDepth), child.name\r\n if child.children:\r\n tree.printChildrenOfNode(child, printDirectory)\r\n else:\r\n tree.printDepth = tree.printDepth-1\r\n \r\n tree.printDepth = tree.printDepth-1", "def print(self, root):\n\n depth = self.depth(root)\n for i in range(1, depth + 1):\n print(\"\\n***\", \"Level\", i, \"*********************************\")\n self.print_level(root, i)", "def draw_tree(self):\n\n print \"--- \" + str(self.name)\n \n def draw_child_tree(current, depth):\n \n for c in current.children:\n print depth * \" \" + \"|-- \" + str(c.name)\n if hasattr(c, 'children'):\n draw_child_tree(c, depth + 1)\n \n draw_child_tree(self, 1)\n \n return", "def print_tree(account, level=0):\r\n \"\"\" In the example output below, \"GE\" is the root account, \"Jet Engines\"\r\n and \"Appliances\" are first-degree ChildAccounts, and \"DoD Contracts\"\r\n and \"Washing Machines\" are second-degree ChildAccounts.\r\n\r\n > print_tree(general_electric)\r\n GE (Manufacturing, R&D): Daniel Testperson\r\n Jet Engines (Manufacturing, R&D, Aerospace): Daniel Testperson\r\n DoD Contracts (Defense, R&D, Aerospace): William Testperson\r\n Appliances (Manufacturing, Consumer Goods): Janet Testperson\r\n Washing Machines (Consumer Goods): Janet Testperson\r\n \"\"\"\r\n markets_output = \"\"\r\n # work a little magic to properly format the names of the market segments\r\n # specifically strip off the leading and trailing quotes and add a\r\n # separating comma\r\n for market in account.get_market_segments():\r\n markets_output += market.name.strip(\"\\'\") + \", \"\r\n markets_output = markets_output.strip(\"\\'\")\r\n\r\n # print a row to console\r\n print(\"{arrow}> {ac_name} ({markets}): {rep}\"\r\n .format(arrow=2*level*\"-\",\r\n ac_name=account.name,\r\n markets=markets_output[:-2],\r\n rep=account.get_sales_rep()))\r\n\r\n # recursively call print on the children (if any) Base Case: no children\r\n for child in account.get_children():\r\n print_tree(child, level=level+1)", "def print_tree(node, spacing=\"\"):\n\n # Base case: we've reached a leaf\n if isinstance(node, Leaf):\n print (spacing + \"Predict\", node.predictions)\n return\n\n # Print the question at this node\n print(spacing + node.question.text())\n\n # Call this function recursively on the true branch\n print (spacing + '--> True:')\n print_tree(node.trueBranch, spacing + \" \")\n\n # Call this function recursively on the false branch\n print (spacing + '--> False:')\n print_tree(node.falseBranch, spacing + \" \")", "def pretty_print(self):\n print(self.root)\n self.__pretty_print__(self.root, depth=1)", "def _print_tree(self, tree, current_depth=0):\n if 'surv' in tree:\n self._print_with_depth(tree['times'], current_depth)\n return\n self._print_with_depth(\n \"{0} > {1}\".format(self.column_names[tree['feature']],\n tree['threshold']),\n current_depth)\n self._print_tree(tree['left'], current_depth + 1)\n self._print_tree(tree['right'], current_depth + 1)", "def debug_node(fdt, node, depth, path):\n depth += 1\n path = path + node.get_name() + b'/'\n print()\n print(colored(\"Tree:\", 'cyan'), \"-> \", colored(path.decode('ascii'), 'green'), '{')\n for key in node.keys():\n print(colored(\"Node:\", 'cyan'), \"-> \", \" \" * depth, key, \"=\", colored(node[key], 'yellow'))\n for leaf in node.get_children():\n debug_node(fdt, leaf, depth, path)\n print(colored(\"Tree:\", 'cyan'), \"-> \", \" \" * depth, \"};\")", "def display(self, tree, level = 0):\n\t\tresult = \"\"\n\t\tfor name, node in tree.soon:\n\t\t\tresult += \" \"*level+repr(node)+\"\\n\"\n\t\t\tresult += self.display(tree.getSoon(name),level + 1)\n\t\treturn result", "def pprint_nodes(subtrees):\n def indent(s,type=1):\n x = s.split(\"\\n\")\n r = \"+-%s\\n\"%x[0]\n for a in x[1:]:\n if a==\"\": continue\n if type==1:\n r += \"| %s\\n\"%a\n else:\n r += \" %s\\n\"%a\n return r\n if len(subtrees)==0: return \"\"\n f=\"\";\n for a in subtrees[:-1]:\n f += indent(a)\n f += indent(subtrees[-1],2)\n return f", "def print_tree(self, feature_names, class_names, show_details=True):\n self.tree_.print_tree(feature_names, class_names, show_details)", "def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)", "def pprint(self,indent=0,node=None):\n if node == None:\n node = self.root\n if node == None:\n print_indent(indent)\n print \"[empty tree]\"\n return\n if node.type == 'v':\n print_indent(indent)\n print node.value\n elif node.type == 's':\n for (val,c) in node.children.iteritems():\n print_indent(indent)\n print \"-\",self.keys[node.feature],\"=\",val,\":\"\n self.pprint(indent+1,c)\n elif node.type == 'i':\n print_indent(indent)\n print self.keys[node.feature],\"<=\",node.value,\":\"\n self.pprint(indent+1,node.children[0])\n print_indent(indent)\n print self.keys[node.feature],\">\",node.value,\":\"\n self.pprint(indent+1,node.children[1])", "def pprint(tree):\n p = PrettyPrinter(indent=2)\n p.pprint(tree)", "def _print_all_descendants_rec(self, node, level):\n if level == 0:\n print(\"|---\" + str(node))\n \n if node.get_children():\n level += 1\n for child in node.get_children():\n string = \"| \"*level\n print(string + \"|---\" + str(child))\n self._print_all_descendants_rec(child, level)\n return\n else:\n if level == 0:\n string = \"\"\n else:\n string = \"|\" + (\" \"*level)\n return", "def printTree(rootNode, level = 0):\n \n if rootNode:\n print(\" \" * level, rootNode.split, \"CLASS:\", rootNode.data)\n printTree(rootNode.lesser, level + 3)\n printTree(rootNode.greater, level + 3)", "def dft_print(self):\n #print(self.value)\n #if self.left:\n # self.left.dft_print()\n #if self.right:\n # self.right.dft_print()\n stack = []\n stack.append(self)\n while len(stack):\n current = stack.pop()\n print(current.value)\n if current.left:\n stack.append(current.left)\n if current.right:\n stack.append(current.right)", "def show_info(self):\r\n if not self.parent and self.key:\r\n print(\"######### ROOT #########\")\r\n print(\"------------------------\")\r\n print(\"key: %s\" % self.key)\r\n print(\"value: %s\" % self.value)\r\n print(\"color: %s\" % self.get_color())\r\n\r\n try:\r\n print(\"left_child: %s\" % self.left_child.key)\r\n print(\"right_child: %s\" % self.right_child.key)\r\n print(\"parent: %s\" % self.parent.key if self.parent else \"parent: None\")\r\n print(\"size_tree: %s\" % self.size_tree)\r\n except:\r\n pass\r\n print(\"------------------------\")", "def __repr__(self):\n return self.displayTree(0)", "def pretty_print(tree, depth=0):\r\n if depth == 0:\r\n print('TREE')\r\n\r\n for index, split_criterion in enumerate(tree):\r\n sub_trees = tree[split_criterion]\r\n\r\n # Print the current node: split criterion\r\n print('|\\t' * depth, end='')\r\n print('+-- [SPLIT: x{0} = {1} {2}]'.format(split_criterion[0], split_criterion[1], split_criterion[2]))\r\n\r\n # Print the children\r\n if type(sub_trees) is dict:\r\n pretty_print(sub_trees, depth + 1)\r\n else:\r\n print('|\\t' * (depth + 1), end='')\r\n print('+-- [LABEL = {0}]'.format(sub_trees))", "def printLevelOrder(root):\n print(\"---- printing below the level traversal of the tree -----\")\n \n print(\"=========================================================\")", "def print_tree(node, val='', tabs=0):\n align = get_tabs(tabs)\n if isinstance(node, Leaf):\n print(align + str(val))\n print(get_tabs(tabs), str(node))\n return\n print(align + str(val))\n print(align + str(node))\n print_tree(node.true_branch, True, tabs + 1)\n print_tree(node.false_branch, False, tabs + 1)", "def printGeneration(tree):\n for mod in tree:\n if mod.param != []:\n print(str(mod.symbol) + str(mod.param).replace(\"[\",\"(\").replace(\"]\",\")\"),end=\"\")\n else:\n print(str(mod.symbol),end=\"\")\n print(\"\")", "def print(self) -> None:\n\n print('')\n print(f\"{self.get_name()}, {self.get_description()}\")\n print('-------------')\n for child in self._children:\n child.print()", "def print_tree(self, use_short_ids=True):\r\n def short_id(node):\r\n return node.short_id\r\n def id(node):\r\n return node.data.id\r\n\r\n node_fn = short_id if use_short_ids else id\r\n self._logger.debug(\"deps = {\")\r\n for node in self.nodes:\r\n self._logger.debug(\r\n \"\"\" \"%s\": {\"num\": %d, \"children\": [%s]},\"\"\" % (\r\n node_fn(node),\r\n node.data.num_sources,\r\n ','.join(['\"%s\"' % node_fn(child) for child in node.children]))\r\n )\r\n self._logger.debug('}')\r\n self._logger.debug('')", "def print(self):\n dot = \"digraph G {\\nrankdir = UD\\n\"\n\n for i in range(len(self.allNodes)):\n if self.allNodes[i].left is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].left.key) + \"\\n\"\n if self.allNodes[i].right is not None:\n dot += str(self.allNodes[i].key) + \" -> \" + str(self.allNodes[i].right.key) + \"\\n\"\n\n dot += \"}\"\n\n file = open(\"outputfiles/BinTree.dot\", \"w\")\n file.write(dot)\n file.close()\n\n os.system(\"dot outputfiles/BinTree.dot -Tpng -o outputfiles/BinTree.png\")", "def print_tree(self,root_key='',offset=''):\n itm = self._root\n if root_key:\n itm = self.get_data(root_key)\n tstr = os.linesep \n try: #if isinstance(itm,dict):\n for k in itm.keys():\n x_str = self.print_tree(root_key+'.'+k,offset+' ')\n tstr = tstr+offset+'{}: {}'.format(k,x_str)+os.linesep\n except:\n try: #elif isinstance(itm,list):\n for i,x in enumerate(itm):\n x_str = self.print_tree(root_key+'.'+str(i),offset+' ')\n tstr = tstr+offset+'{}: {}'.format(i,x_str)+os.linesep\n except:\n return '{}'.format(itm)\n return tstr", "def print_tree(tree, pref=\"\"):\r\n leaf = \"|_____> \"\r\n top = \"|_______\"\r\n son1 = \"| \"\r\n son2 = \" \"\r\n width = len(top)\r\n\r\n a = \"\"\r\n if len(tree) == 3:\r\n if (pref == \"\"):\r\n a += pref + str(tree[0]) + \"\\n\"\r\n else:\r\n a += pref[:-width] + top + str(tree[0]) + \"\\n\"\r\n a += print_tree(tree[1], pref + son1)\r\n a += print_tree(tree[2], pref + son2)\r\n return a\r\n\r\n else:\r\n return (pref[:-width] + leaf + str(tree) + \"\\n\")", "def _print_structure(self):\n if self._isthisapropertree() is False:\n print(\"ERROR: this is not a proper Binary Search Tree. ++++++++++\")\n outstr = str(self._element) + \" (hgt=\" + str(self._height) + \")[\"\n if self._leftchild is not None:\n outstr = outstr + \"left: \" + str(self._leftchild._element)\n else:\n outstr = outstr + \"left: *\"\n if self._rightchild is not None:\n outstr += \"; right: \" + str(self._rightchild._element) + \"]\"\n else:\n outstr = outstr + \"; right: *]\"\n if self._parent is not None:\n outstr = outstr + \" -- parent: \" + str(self._parent._element)\n else:\n outstr = outstr + \" -- parent: *\"\n print(outstr)\n if self._leftchild is not None:\n self._leftchild._print_structure()\n if self._rightchild is not None:\n self._rightchild._print_structure()", "def visualize_tree(root):\n _visualize_tree(root, [], 0, '-')", "def tree(ctx):\n root_cmd = _build_command_tree(ctx.find_root().command)\n _print_tree(root_cmd)", "def print_cr_tree(self, tree):\n str = ''\n try:\n if not tree: return \"None\"\n else:\n for x in tree: str += \" \" + x.name\n except TypeError: return tree.name\n return str", "def print_phpsyntax_tree(tree):\n import queue\n q = queue.Queue()\n indent = 10\n\n q.put(tree)\n while q.not_empty:\n item = q.get()\n s = \"\"\n for c in item.children:\n s = s + \" \" * indent + c.name\n q.put(c)\n print(s)\n indent -= 1", "def printLevelOrder(root):\n print(\"---- printing below the level traversal of the tree -----\")\n h = height(root) \n for i in range(1, h+1): \n printGivenLevel(root, i) \n print(\"=========================================================\")", "def print_node_tree(node, level=0):\n str_builder = []\n if node:\n str_builder.append(print_node_tree(node.right, level + 1))\n str_builder.append(\"| \" * level)\n str_builder.append(\n ''.join([str(node.value), \" - \", str(node.level), \"\\n\"]))\n str_builder.append(print_node_tree(node.left, level + 1))\n return ''.join(str_builder)", "def paths_print(atree):\n\n l = atree.pathFromHere_explore('/')\n for d in l:\n print(d)", "def display_tree(self, tree_node, spacing=\"\"):\n if tree_node is None:\n return\n else:\n print(spacing + str(tree_node.val))\n spacing += \" \"\n self.display_tree(tree_node.left, spacing)\n self.display_tree(tree_node.right, spacing)", "def view_tree(self, node=None, prefix=' ', connection=None):\n\n connection = connection or self.engine.connect()\n\n if not node:\n # get roots\n roots = self.get_roots(connection)\n\n if not roots:\n print('No root nodes found.')\n return\n\n for node in roots:\n # print tree for each root\n self.view_tree(node, connection=connection)\n print()\n\n return\n else:\n node_title = node.title\n node_id = node.descendant\n\n # print the current node\n print('{}({}, {})'.format(prefix, node_id, node_title))\n\n # fetch the children for the current node\n children = self.get_descendants(node_id, connection)\n\n # print the tree for each node\n prefix += '. '\n for child in children:\n self.view_tree(child, prefix=prefix, connection=connection)", "def pretty_print(self):\n return self.tree.pretty_print()", "def printTree(tree):\n keys = tree.keys()\n keys.sort() #print in alphabetical order\n for key in keys: #each value dictionary only has one entry, so this works\n print key, tree[key].keys()[0], tree[key].values()[0]", "def showFileTree():\n\treturn 0", "def printTreeF(node, n, root):\n out = \"\"\n if isinstance(node, DecisionTree):\n out += \"..\" * n + \"[atr \" + str(node.i) + \" < %.2f\" % node.v + \"]\\n\"\n out += printTreeF(node.lt, n + 1, root)\n out += printTreeF(node.gt, n + 1, root)\n else:\n out += \"..\" * n + root.getString(node) + \"\\n\"\n return out", "def visualize(tree, depth=0):\r\n\r\n if depth == 0:\r\n print('TREE')\r\n\r\n for index, split_criterion in enumerate(tree):\r\n sub_trees = tree[split_criterion]\r\n\r\n # Print the current node: split criterion\r\n print('|\\t' * depth, end='')\r\n print('+-- [SPLIT: x{0} = {1}]'.format(split_criterion[0], split_criterion[1]))\r\n\r\n # Print the children\r\n if type(sub_trees) is dict:\r\n visualize(sub_trees, depth + 1)\r\n else:\r\n print('|\\t' * (depth + 1), end='')\r\n print('+-- [LABEL = {0}]'.format(sub_trees))", "def print_val(tree):\n if tree == None:\n return\n\n # Prints the inorder sequence of the tree\n print_val(tree.get_left())\n print(tree)\n print_val(tree.get_right())", "def printSubmissionTree(self, padding=\"\"):\r\n print padding + \"Author: %s\" %self.authorId\r\n print padding + \"Content: %s\" %self.content\r\n\r\n if self.comments:\r\n print padding + \"Children:\" \r\n for c in self.comments:\r\n c.printSubmissionTree(padding+\" \")", "def print_itype_tree(node, one_handers=None, two_handers=None, current_depth=0):\n if not node:\n return\n\n output_str = \" \" * (4 * current_depth) + node.code\n\n if one_handers and node.code in one_handers:\n output_str = Fore.GREEN + output_str + \" <-- 1h\" + Fore.RESET\n elif two_handers and node.code in two_handers:\n output_str = Fore.CYAN + output_str + \" <-- 2h\" + Fore.RESET\n\n print(output_str)\n\n for child in node.children:\n print_itype_tree(child, one_handers, two_handers, current_depth + 1)", "def tree():\n nobv.visual_tree()", "def _print_inorder(self):\n if not self.root:\n return None\n else:\n stack = []\n node = self.root\n while len(stack) or node:\n if node:\n stack.append(node)\n node=node.get_left()\n else:\n node = stack.pop()\n print(node.get_data())\n node=node.get_right()", "def show_tree(self):\n G, vertex_dict = self.tree().graph()\n root = self.tree().root()\n vertical_list = []\n horizontal_list = []\n no_component_list = []\n for i, xi in vertex_dict.items():\n if xi.is_equal(root):\n root_index = i\n if self.is_component(xi):\n if xi.type() == \"II\":\n vertical_list.append(i)\n else:\n horizontal_list.append(i)\n print(i, \": \", xi)\n else:\n no_component_list.append(i)\n vertex_colors = {'red': vertical_list, 'blue': horizontal_list,\n 'grey': no_component_list}\n G.show(vertex_colors=vertex_colors, tree_root=root_index, layout='tree')", "def walkTree(self):\n if self.parentId:\n print self.parentId, self.id, self.value\n for child in self.children.itervalues():\n child.walkTree()", "def __print_node(self, node, depth):\n max_depth = 4\n\n # format label distribution for printing\n unique_labels = np.unique(node.labels, return_counts=True)\n labels = unique_labels[0]\n label_counts = unique_labels[1]\n lab_dists_format = {}\n for lab, cnt in zip(labels, label_counts):\n lab_dists_format[lab] = cnt\n\n # print the leaf info and go back up the tree\n if node.is_leaf:\n print(\" \" * (depth + 1), \"+\", \"----\" * (depth + 1), \"Leaf:\",\n node.predicted_class, \" (Entropy:\", round(node.entropy, 4),\n \")\", lab_dists_format)\n return\n\n # go back up the tree if you've reached printing depth limit\n elif depth >= max_depth:\n return\n\n # print the node info and continue with its childen\n print(\" \" * (depth + 1), \"+\", \"----\" * (depth + 1), \"IntNode\",\n \"[Split: feature \" + str(node.feature_index_split) + \":\" +\n str(label_names[node.feature_index_split]),\n \"< \", str(node.integer_splitting_rule) + \"] (Entropy:\",\n round(node.entropy, 4), \")\", lab_dists_format)\n\n self.__print_node(node.left_child, depth + 1)\n self.__print_node(node.right_child, depth + 1)", "def print_phpsyntax_tree(tree):\n for c in tree.children:\n print \"[\" + string.replace(c.name, \"_\", \".\"),\n if c.children != []:\n print_phpsyntax_tree(c),\n print \"]\"," ]
[ "0.8600898", "0.8487803", "0.8312677", "0.8166421", "0.80248964", "0.8010252", "0.7947379", "0.7931171", "0.7874574", "0.78502697", "0.7842588", "0.7769115", "0.7767501", "0.7752166", "0.77449286", "0.7713288", "0.77115613", "0.7703568", "0.7699183", "0.76673025", "0.76467204", "0.76147765", "0.7604308", "0.75777626", "0.7570994", "0.7566444", "0.7563901", "0.75540364", "0.75540364", "0.75399554", "0.75320077", "0.7520107", "0.74281085", "0.74225813", "0.74212396", "0.7402885", "0.73971236", "0.73341334", "0.7273434", "0.72420704", "0.7232572", "0.7210063", "0.7206524", "0.72043455", "0.719999", "0.71902394", "0.7188654", "0.7180638", "0.7163529", "0.7148941", "0.71356946", "0.7123613", "0.7122196", "0.7103451", "0.70938957", "0.70376575", "0.7017526", "0.7004915", "0.6990659", "0.69802344", "0.69617987", "0.6960739", "0.6952615", "0.69360733", "0.6930355", "0.6928271", "0.6922329", "0.691776", "0.69167984", "0.6913013", "0.69101125", "0.6903749", "0.688497", "0.6881485", "0.6840433", "0.6828513", "0.68151444", "0.6808425", "0.6801064", "0.679086", "0.67864466", "0.67699945", "0.672522", "0.67174494", "0.67172253", "0.6674458", "0.6651352", "0.66513264", "0.6642911", "0.6642293", "0.66377616", "0.66245896", "0.66140795", "0.66107655", "0.6600957", "0.6589624", "0.65775836", "0.6573864", "0.65640146", "0.6563938" ]
0.7243111
39
Get time in the proper shape
def prepareTime(time): time = str(time) time = '000000'+time time = time[len(time)-6:] return time
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time(self):\n return self.time_array", "def get_time(self) -> float:\n raise NotImplementedError()", "def get_time(self):\n start=''\n end=''\n time=''\n times=self.times\n print(times[self.istep])\n if self.istep > 0:\n start=ncEarth.beginstr % times[self.istep].isoformat()\n\n\n if self.istep < len(times)-2:\n end = ncEarth.endstr % times[self.istep+1].isoformat()\n\n if start is not '' or end is not '':\n time=ncEarth.timestr % {'begin':start,'end':end}\n\n return time", "def get_time(self):\n return numpy.linspace(self.header.time_gate_start, \\\n self.header.time_gate_stop, self.num_time_bins())", "def gettime(self):\n return self.t", "def time(self):\r\n raise NotImplementedError", "def time(self):\n return self[self.time_columns]", "def time(self):\n return self[self.time_columns]", "def get_time(self):\n clock = self.pipeline.get_clock()\n tm = clock.get_internal_time()\n return tm / 1.e9", "def get_time_info(self):\n\n raise NotImplementedError", "def get_time(self):\n return self.get_timed() / 10.0", "def get_time(self):\n return self._ticks", "def __get_time_span(self):\n\n nonzero = self.data[\"time\"].nonzero()\n return iso_time.time(self.data[\"time\"][nonzero[0][0]]), iso_time.time(\n self.data[\"time\"][nonzero[0][-1]])", "def get_time(self):\n x = time.localtime()\n return ''.join([\n str(x[0]).rjust(4, '0'), '/', str(x[1]).rjust(2, '0'), '/',\n str(x[2]).rjust(2, '0'), ' ', str(x[3]).rjust(2, '0'), ':',\n str(x[4]).rjust(2, '0'), ':', str(x[5]).rjust(2, '0')])", "def getTimes():", "def getTimes():", "def getTimes():", "def time(self):\n raise NotImplementedError()", "def time(self):\n try:\n if self.single_date:\n return self.stime\n else:\n return self.stime + (self.etime - self.stime) / 2\n except TypeError:\n return None", "def get_time_points(self):\n return self._time", "def arr_time(self):\n return self._arr_time", "def time(self) -> int:\n return self.raw[\"time\"]", "def timeTime(self):\n return self._micros / 1000000.0", "def get_time(cls):\n now = rospy.Time.now()\n return now.secs + now.nsecs*(10**-9) # time in seconds", "def getTime(self) -> float:\n return self.t", "def get_time(self):\r\n\t\tactual_time = '{}, {} de {} del {} ({}:{} {} (UTC))'\r\n\t\tda_taim = actual_time.format(self.wday, self.day, self.mon,\r\n\t\t\t\t\t\t\t\t\t self.year, self.hour, self.min,\r\n\t\t\t\t\t\t\t\t\t self.day_or_night)\r\n\t\treturn da_taim", "def time(self):\n return time(\n self.hour, self.minute, self.second, self.microsecond, fold=self.fold\n )", "def time_NEURON():\n recorded_time = h.Vector()\n recorded_time.record(h._ref_t)\n return recorded_time", "def getTime(self):\n return self.step / (self.max_step + int(self.include))", "def getTime(self):\n return _osgAnimation.Keyframe_getTime(self)", "def get_time(t):\n return [time.clock()-t[0], time.time()-t[1]]", "def get_time(self):\n return self.time", "def wall_time(self):", "def get_time(self):\n return self.time_param", "def getTime(self):\n return _osgAnimation.Motion_getTime(self)", "def startTime(self) -> float:\n try: return self.times[0]\n except IndexError: return 0.0", "def current_time(cls) -> float:", "def time(self):\n self.convert_window(\"Time\", \"seconds\", [\"centuries\", \"days\", \"decades\", \"femtoseconds\", \"fortnights\", \"hours\", \"microseconds\", \"millenia\", \"milliseconds\", \"minutes\", \"months(Common)\", \"months(Synodic)\", \"nanoseconds\", \"picoseconds\", \"quarters(Common)\", \"seconds\", \"shakes\", \"weeks\", \"years(Average Gregorian)\", \"years(Common)\", \"years(Julian)\", \"years(Leap)\", \"years(Tropical)\"])", "def rt_arr_time(self):\n return self._rt_arr_time", "def _get_half_time(self):\n return self.__half_time", "def getSimulationTime(self):\r\n raise NotImplementedError()", "def time(self):\n return self.raw[\"logTime\"]", "def __get_times(self):\n data = self.simulate_file.readlines()\n data = list(map(str.strip, data))\n data = list(map(float, data))\n start = data[0]\n times = data[1:]\n return (start, times)", "def get_time():\r\n return datetime.datetime.now().strftime(\"%H\")+\":\"+datetime.datetime.now().strftime(\"%M\")+\":\"+datetime.datetime.now().strftime(\"%S\")", "def getisotime():\n ct = datetime.utcnow()\n return ct.strftime(\"%Y%m%d%H%M\")", "def time(self):\n\t\treturn self._time", "def time(self) -> float:\n return self.sim_scene.data.time", "def get_time() -> int:\n return store.time", "def initialTime(self):\n return self.params['t0']", "def struct_time(self):\n _, month, day, hour, minute, second, weekday, _, _ = self.current_time\n # Bluetooth weekdays count from 1. struct_time counts from 0.\n return time.struct_time((month, day, hour, minute, second, weekday - 1, -1))", "def get_time(self) -> int:\n t = str(self.eval(\"pyb.RTC().datetime()\").encode(\"utf-8\"))[1:-1].split(\", \")\n return int(t[4]) * 3600 + int(t[5]) * 60 + int(t[6])", "def GetTime(self):\n return self.hour, self.minute, self.second", "def get_time(self):\n return ''", "def get_time(self):\n return \"%02u:%02u:%02u (%d)\" % self.rtc.datetime()[4:8]", "def build_time_feature_vector(self, time):\n return time", "def get_time_sec(gpx_track):\n start_time = gpx_track.segments[0].points[0].time\n time_in_sec = np.empty([gpx_track.get_points_no(), 1])\n n = 1\n for segment in gpx_track.segments:\n for point in segment.points:\n curr_time = point.time\n time_in_sec[n] = curr_time - start_time\n n = n + 1\n\n return time_in_sec, start_time", "def get_time(text_time):\n # return Observer.datetime_to_astropy_time(dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M'))\n the_time = dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M')\n return Time(the_time.strftime('%Y-%m-%d %H:%M'))\n #date = [int(i) for i in date.split('/')]", "def time(self) -> int:\n pass", "def time(self):\n return Time(self.hour, self.minute, self.second)", "def time(self):\n return sum(self._interval) * .5", "def time(self):\n return parse_time(self['timestamp'])", "def getTime(self):\n return self.time", "def time(self):\n return self._begin", "def get_time(start, stop, step = 1):\n # reshape array into columns from row\n return numpy.reshape(numpy.array(range(start, stop, step)), (-1,1))", "def get_time(self):\n return self.widget().time()", "def get_colour_time_based(self):\n\t\treturn self.get_hex_from_rgb(0, 0, int(255.0 * (get_elapsed_seconds_simulated() / SECONDS_PER_DAY)))", "def PreciseTime(self):\n return '%2.2d:%2.2d:%06.3f' % (self._hour, self._minute, self._second)", "def parse_time(self):\n\n # parse time\n year = int(self.start[:4])\n month = int(self.start[5:7])\n day = int(self.start[8:10])\n hours = int(self.start[11:13])\n minutes = int(self.start[14:16])\n seconds = int(self.start[17:19])\n time = datetime.datetime(year, month, day, hours, minutes, seconds)\n\n # advance time\n time = time + datetime.timedelta(minutes=self.rain_interval)\n time = time.isoformat(\" \")\n\n # timestamp\n # elevation (m)\n evolved_elevation = (\n 'elevation_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # water depth (m)\n depth = (\n 'depth_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # sediment flux (kg/ms)\n sediment_flux = (\n 'flux_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # erosion-deposition (kg/m2s)\n erosion_deposition = (\n 'erosion_deposition_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # elevation difference (m)\n difference = (\n 'difference_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n\n return (evolved_elevation, time, depth, sediment_flux,\n erosion_deposition, difference)", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time", "def get_time(self):\n return self._time", "def get_time(self):\n return time.strftime(\"%d/%m/%y %M:%H:%S\", self.time)", "def time(self) -> float:\n return self._time", "def timenow(self):\n return (datetime.now().strftime(\"%H:%M:%S\"))", "def time(self):\n return self._time", "def get_time(self, variables):\n if len(self.TIME_VARIABLE):\n return self._get_variable(variables, self.TIME_VARIABLE)\n else:\n return [0.]", "def get_time(self):\n return self.run_command('get_time')[0]", "def get_frame_time(self):\n return self.get_timings().frame_time", "def ctime(self): # real signature unknown; restored from __doc__\r\n pass", "def _get_time(self) -> None:\n self.data[\"time\"] = np.zeros(len(self.data[\"yyyymmdd\"]), dtype=object)\n \n for idx, (yyyymmdd, hhmmss) in enumerate(zip(self.data[\"yyyymmdd\"], self.data[\"hhmmss\"])):\n year, month, day = yyyymmdd.split(\"/\")\n hour, minute, second = hhmmss.split(\":\")\n self.data[\"time\"][idx] = datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n \n del self.data[\"yyyymmdd\"]\n del self.data[\"hhmmss\"]", "def show_time(self):\n hour = str(datetime.datetime.now().strftime(\"%H\"))\n minute = str(datetime.datetime.now().strftime(\"%M\"))\n\n hour1 = int(hour[0])\n hour2 = int(hour[1])\n minute1 = int(minute[0])\n minute2 = int(minute[1])\n\n self.light_number(self.numbers[hour1], [0, 5])\n self.light_number(self.numbers[hour2], [0, 0])\n self.light_number(self.numbers[minute1], [5, 5])\n self.light_number(self.numbers[minute2], [5, 0])", "def time_position(self):\n rt_most_pixel = None\n lf_most_pixel = None\n time_position = []\n min_time_len = None\n for i in range (len(np.unique(self.pd.objid))):\n trajec = self.dataset.trajec(self.dataset.keys[i])\n times = trajec.time_epoch_secs + trajec.time_epoch_nsecs / 1e9\n time_pos = np.vstack([times, trajec.position_x])\n time_position.append(time_pos)\n if min_time_len == None:\n min_time_len = len(times)\n elif min_time_len > len(times):\n min_time_len = len(times)\n pixels = np.unique(trajec.position_x)\n if rt_most_pixel ==None:\n rt_most_pixel = pixels[-1]\n elif rt_most_pixel < pixels[-1]:\n rt_most_pixel = pixels[-1]\n if lf_most_pixel ==None:\n lf_most_pixel = pixels[0]\n elif lf_most_pixel > pixels[0]:\n lf_most_pixel = pixels[0]\n print min_time_len\n print rt_most_pixel\n print lf_most_pixel\n print rt_most_pixel - lf_most_pixel\n return time_position, rt_most_pixel, lf_most_pixel", "def start_time(self) -> float:\r\n ...", "def get_time(self):\n\t\treturn time.time()", "def _get_time(self, state: State) -> int:\n benchmark_time = {\n 'resnet': state.timestamp.epoch.value,\n 'bert': state.timestamp.sample.value,\n }\n return benchmark_time[self.benchmark]", "def Time(self):\n return '%2.2d:%2.2d:%2.2d' % (self._hour, self._minute, self._nearsec)", "def get_time(self):\n return self._total_time", "def t0(self):\n return self._time_axis.start", "def timeCalc(image):\n telheader = astropy.io.fits.open(image)\n UT = telheader[0].header['UT']\n secs = float(UT[6:10])\n mins = float(UT[3:5])\n hours = float(UT[0:2])\n time = secs+mins*60.+hours*(60.*60.)\n\n return time", "def construct_obstime(self, row):\n return time.Time(self['mjd'][row], format='mjd')", "def timeStep(self):\n return self.params['h']", "def time_axis(self):\n if self.axes_wcs.wcs.ctype[0] not in ['TIME', 'UTC']:\n raise cu.CubeError(1, 'No time axis present')\n delta = self.axes_wcs.wcs.cdelt[0]\n crpix = self.axes_wcs.wcs.crpix[0]\n crval = self.axes_wcs.wcs.crval[0]\n start = crval - crpix * delta\n stop = start + len(self.data) * delta\n cunit = u.Unit(self.axes_wcs.wcs.cunit[0])\n return np.linspace(start, stop, num=self.data.shape[-1]) * cunit", "def get_reltriggertimes(self):\n return np.array(self.trtimes)-self.soundstarttime", "def _time(self):\n return self.r.eval(self.LUA_TIME, 1, 1)", "def output(self):\n if self.after_sunrise:\n return \"%02d:%02d:%02dR\" % self.time\n if self.after_sunset:\n return \"%02d:%02d:%02dT\" % self.time\n return \"%02d:%02d:%02d\" % self.time", "def round_trip_time(self):\n ...", "def gettime():\n return libruss.russ_gettime()", "def ConvertTime( self ) :\n \n # modules:\n import logging\n import datetime\n import netCDF4\n import numpy\n \n #\n # Original 'Time' units and description:\n #\n # title = \"Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC), 1 January 1970.\"\n # units = \"s\"\n #\n # Create new field 'Datetime' field with units:\n # units = \"Seconds since 1970-01-01 00:00'\n #\n # select:\n varid = self.swaths[self.component]['Geolocation Fields']['Time']\n # values:\n tvalues = varid['data']\n # extract description:\n long_name = varid['long_name'].decode('latin-1')\n # check ...\n key = 'Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC),'\n if long_name.startswith(key) :\n # remove leading description:\n time0 = long_name.replace(key,'').replace('.','').strip()\n # extract datetime object:\n t0 = datetime.datetime.strptime(time0,'%d %B %Y')\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n if 'mask' in dir(tvalues) :\n values1d = netCDF4.num2date( tvalues.data, var['units'] )\n else :\n values1d = netCDF4.num2date( tvalues , var['units'] )\n #endif\n # alternative:\n # \"Time at Start of Scan (s, TAI93)\"\n elif 'TAI' in long_name :\n # find start:\n i0 = long_name.index('TAI')\n # extract:\n year = int(long_name[i0+3:].replace(')',''))\n # convert to 4-digits if necessary:\n if year < 100 :\n if year > 50 :\n year = 1900 + year\n else :\n year = 2000 + year\n #endif\n #endif\n # reference time:\n t0 = datetime.datetime(year,1,1,0,0,0)\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n values1d = netCDF4.num2date( tvalues, var['units'] )\n else :\n self.logger.error( 'could not convert time units \"%s\"' % long_name )\n self.logger.error( 'first value : %f' % tvalues[0] )\n raise Exception\n #endif\n \n # expand to 2D:\n var['data'] = numpy.zeros( (self.ntime,self.np), values1d.dtype )\n for ip in range(self.np) :\n var['data'][:,ip] = values1d\n #endfor\n \n # set dim names:\n var['dimnames'] = ('time','pixel')\n \n # store:\n self.swaths[self.component]['Geolocation Fields']['Datetime'] = var", "def _TIME2STEPS(time):\n return int(time*1000)", "def get_tim(self):\n return self.sum(axis=0)", "def _get_time(self, _time: Optional[float] = None) -> float:\n if _time is None:\n return time.time()\n\n return _time" ]
[ "0.7274171", "0.70513386", "0.7026728", "0.69944495", "0.68678916", "0.68556535", "0.68395287", "0.68395287", "0.6821601", "0.679285", "0.67860985", "0.6783213", "0.6751718", "0.6702506", "0.66815585", "0.66815585", "0.66815585", "0.66792035", "0.6662519", "0.66364765", "0.6635678", "0.6634999", "0.6599993", "0.6579908", "0.6565989", "0.6564395", "0.6544554", "0.65401447", "0.65400606", "0.6538975", "0.6536383", "0.65234756", "0.65040606", "0.6489778", "0.64886135", "0.6462023", "0.6457483", "0.6448697", "0.6443608", "0.6435573", "0.6432302", "0.6415515", "0.64112926", "0.640924", "0.6400426", "0.63724774", "0.636095", "0.63591623", "0.6355987", "0.6352036", "0.63475525", "0.6343992", "0.6337802", "0.633484", "0.6318836", "0.6315671", "0.6313396", "0.63097936", "0.63093185", "0.63070047", "0.63045096", "0.6298989", "0.62972414", "0.628557", "0.62853277", "0.62745744", "0.6273968", "0.6263837", "0.62428975", "0.62428975", "0.62428975", "0.6233454", "0.62299865", "0.6229072", "0.622313", "0.6218675", "0.62178636", "0.62143546", "0.6213642", "0.6212986", "0.62075627", "0.62065125", "0.6201159", "0.61962336", "0.61950594", "0.61924356", "0.6187394", "0.6183382", "0.61827713", "0.6169987", "0.616921", "0.6168906", "0.6165318", "0.6161772", "0.616127", "0.61539274", "0.61538315", "0.61533034", "0.6147103", "0.61462724", "0.613292" ]
0.0
-1
Print a list of Tkey in columns
def roolsPrintLongLs(keyList,optDict,indent): if len(keyList) > 0: # Width informations maxCharClass = max([len(key.GetClassName()) for key in keyList]) maxCharTime = 12 maxCharName = max([len(key.GetName()) for key in keyList]) dic = { \ "classWidth":maxCharClass+2, \ "timeWidth":maxCharTime+2, \ "nameWidth":maxCharName+2, \ "titleWidth":1} date = ROOT.Long(0) for key in keyList: time = ROOT.Long(0) key.GetDatime().GetDateTime(key.GetDatime().Get(),date,time) time = prepareTime(time) rec = \ [key.GetClassName(), \ MONTH[int(str(date)[4:6])]+" " +str(date)[6:]+ \ " "+time[:2]+":"+time[2:4], \ key.GetName(), \ "\""+key.GetTitle()+"\""] write(LONG_TEMPLATE.format(*rec,**dic),indent,end="\n") if optDict['tree'] and isTreeKey(key): tree = key.ReadObj() recursifTreePrinter(tree,indent+2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_table(data):\n for key in sorted(data):\n print \"%s: %s\" % (key.rjust(16), data[key])", "def show_table(self, keys=None, sort_keys_function=None):\n rows = []\n output_keys = keys or self.keys\n\n for item in self.__get_items(sort_keys_function):\n row = []\n for output_key in output_keys:\n row.append(getattr(item, self.mapping[output_key]))\n rows.append(row)\n print(tabulate(rows, output_keys))", "def print_table():\n for key in _op_table.keys():\n print(key)\n for sub_key in _op_table[key]:\n print('\\t--' + sub_key)", "def print_table(listx):\r\n\tfor lists in listx:\r\n\t\tfor i in lists:\r\n\t\t\tprint str(i) , '\\t',\r\n\t\tprint()", "def print_columns(outfile):\r\n values = []\r\n for key in Output.R_COLUMNS:\r\n values.append(str(key))\r\n row = '\\t'.join(values)\r\n row = row + '\\n'\r\n outfile.write(row.encode(\"utf8\"))", "def output_columns(self) -> List[str]:", "def __print_work_table(table):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % ('Act', 'Pred', 'Block', 'Dummy', 'Succ', 'start', 'end')\n for k, col in sorted(table.items()):\n print \"%-5s %-30s %5s %5s %5s %5s %5s\" % tuple(\n [str(k)] + [list(col[0])] + [str(col[i]) for i in range(1, len(col))])", "def show_tables(self) -> List[str]:\n return list(self.tb.keys())", "def print_table(table):\n # transpose the table:\n table = map(list, zip(*table))\n # get the column width:\n col_width = [max(len(str(x)) for x in col) for col in zip(*table)]\n # print it to screen:\n print\n for line in table:\n print \"| \" + \" | \".join(\"{:{}}\".format(x, col_width[i]) for i, x in enumerate(line)) + \" |\"\n print", "def print_table(seqids, data, outputfile, separator='\\t'):\n\n tags = data.keys()\n with open(outputfile, 'w') as out:\n out.write(separator.join([\"#Sequence ID\"] + list(tags)) + \"\\n\")\n for s in seqids:\n out.write(s)\n for t in tags:\n out.write(\"{}{}\".format(separator, data[t].get(s, \"\")))\n out.write(\"\\n\")", "def display_taxis(taxis):\n for i, taxi in enumerate(taxis):\n print(\"{} - {}\".format(i, taxi))", "def basic_print(lista):\n for item in lista:\n print(\"{} \\t\\t {}\".format(item[0], item[1]))", "def show(matrix):\n print(\"\",end=\" \")\n for k in sorted(matrix.keys()):\n print(k,end=\" \")\n \n for i,row in sorted(matrix.items()):\n print(\"\\n\" + str(i),end=\" \")\n for j in row:\n print(matrix[i][j],end=\" \")\n print()", "def print_labels(self,labels):\n\t\tfor key in labels:\n\t\t\tprint key, ':\\t', labels[key]", "def print_table(table):\n rest = table[1:]\n fmt = \"%-28s %-9s %-16s %s\"\n for row in rest:\n print(fmt % tuple(row))", "def roolsPrintSimpleLs(keyList,indent):\n # This code is adaptated from the pprint_list function here :\n # http://stackoverflow.com/questions/25026556/output-list-like-ls\n # Thanks hawkjo !!\n if len(keyList) == 0: return\n (term_width, term_height) = getTerminalSize()\n term_width = term_width - indent\n min_chars_between = 2\n min_element_width = min( len(key.GetName()) for key in keyList ) \\\n + min_chars_between\n max_element_width = max( len(key.GetName()) for key in keyList ) \\\n + min_chars_between\n if max_element_width >= term_width: ncol,col_widths = 1,[1]\n else:\n # Start with max possible number of columns and reduce until it fits\n ncol = min( len(keyList), term_width / min_element_width )\n while True:\n col_widths = \\\n [ max( len(key.GetName()) + min_chars_between \\\n for j, key in enumerate(keyList) if j % ncol == i ) \\\n for i in range(ncol) ]\n if sum( col_widths ) <= term_width: break\n else: ncol -= 1\n for i, key in enumerate(keyList):\n if i%ncol == 0: write(\"\",indent) # indentation\n # Don't add spaces after the last element of the line or of the list\n if (i+1)%ncol != 0 and i != len(keyList)-1:\n if not IS_TERMINAL: write( \\\n key.GetName().ljust(col_widths[i%ncol]))\n elif isDirectoryKey(keyList[i]): write( \\\n isSpecial(ANSI_BLUE,key.GetName()).ljust( \\\n col_widths[i%ncol] + ANSI_BLUE_LENGTH))\n elif isTreeKey(keyList[i]): write( \\\n isSpecial(ANSI_GREEN,key.GetName()).ljust( \\\n col_widths[i%ncol] + ANSI_GREEN_LENGTH))\n else: write(key.GetName().ljust(col_widths[i%ncol]))\n else: # No spaces after the last element of the line or of the list\n if not IS_TERMINAL: write(key.GetName())\n elif isDirectoryKey(keyList[i]):\n write(isSpecial(ANSI_BLUE, key.GetName()))\n elif isTreeKey(keyList[i]):\n write(isSpecial(ANSI_GREEN, key.GetName()))\n else: write(key.GetName())\n write('\\n')", "def show(self, keys=None, sort_keys_function=None):\n output_keys = keys or self.keys\n if not self.items:\n print(\"No items to show\")\n else:\n for item in self.__get_items(sort_keys_function):\n for output_key in output_keys:\n print(\"{0:25}: {1!s}\".format(output_key, getattr(item, self.mapping[output_key])))\n print(\"-\" * 25)", "def display_taxis(taxis):\n for i, taxi in enumerate(taxis):\n print(f\"{i} - {taxi}\")", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\", \" \").capitalize(), args[k]] for k in keys])\n print(t.draw())", "def print_column():\n print('+----+----+----+----+')", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\",\" \").capitalize(), args[k]] for k in keys])\n print(t.draw())", "def print_map(self):\n y_max,x_max = map(max, zip(*self.mp.keys()))\n for row in range(0,y_max+1):\n msg = []\n for k in range(0,x_max+1):\n msg.append(chr(self.mp[row,k]))\n print(\"\".join(msg))", "def keys_for_tags(self):\r\n\r\n for counter, t_temp in enumerate(sorted(self.get_tags())):\r\n display.noteprint((labels.TAGS[3:]+POUND+BLANK+str(counter+1)\r\n +BLANK+COLON+BLANK+t_temp,\r\n formkeys(self.get_keys_for_tag(t_temp))))", "def print_key_freq(self,\r\n freq_list):\r\n\r\n for key, freq in freq_list:\r\n\r\n display.noteprint((EMPTYCHAR,key + alerts.APPEARS_BEG\\\r\n +len(self.get_indexes_for_key(key)\\\r\n +alerts.APPEARS_END+freq)))", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\",\" \").capitalize(),args[k]] for k in keys])\n print(t.draw())", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\",\" \").capitalize(),args[k]] for k in keys])\n print(t.draw())", "def displayPokedex(pokedex):\n for col1,col2 in zip(pokedex[::2],pokedex[1::2]):\n\t print(col1+\",\",col2+\",\")", "def key_columns(self):\n return [str(column) for id, column in self._columns.iteritems() if column.is_key]", "def display_stonks(stonks):\n order = sorted(stonks, key=lambda key: len(stonks[key]), reverse=True)\n i = 0\n for symbol in order:\n if i == 12:\n break\n print(symbol, \"\\t\", len(stonks[symbol]))\n i += 1\n return", "def print_column():\n print('+----+----+')", "def _print_table(stats):\n max_key_len = max([len(key) for key in stats])\n width_right = 15\n width_left = max(width_right, max_key_len)\n divider = '+-' + '-' * width_left + '-+-' + '-' * width_right + '-+'\n\n def get_format_char(value):\n if isinstance(value, int):\n return 'd'\n elif isinstance(value, float):\n return '.4f'\n else:\n return 's'\n\n print(divider)\n for name, value in stats.items():\n left_format = f':>{width_left}s'\n right_format = f':<{width_right}{get_format_char(value)}'\n line_format = f'| {{{left_format}}} | {{{right_format}}} |'\n line = line_format.format(name, value)\n print(line)\n print(divider)", "def print_table(table):\n for row in table:\n print(row)", "def print_table(table):\n for row in table:\n print(row)", "def print_q(q):\n for key in sorted(q.keys()):\n print(key, end=\" \")\n value = q[key]\n for i in range(len(value)):\n print(value[i], end=\" \")\n print()", "def print_table(table):\n for i in range(len(table)):\n print \"Row \", i, \"\\t\",\n for j in range(len(table[i])):\n print table[i][j],\n print \"\\n\"", "def view_keys(dict):\n claves=list(dict.keys())\n claves.sort()\n for line in claves:\n print(line.upper(),' = ',dict[line])", "def print_table(table):\n for row in table:\n # Header column left justified\n print(\"{:<19}\".format(row[0]), end='')\n # Remaining columns right justified\n for col in row[1:]:\n print(\"{:>4}\".format(col), end='')\n print(\"\", end='\\n')", "def print_table(table):\n for row in table:\n print(row)", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n tab = Texttable()\n tab.add_rows([[\"Parameter\", \"Value\"]])\n tab.add_rows([[k.replace(\"_\", \" \").capitalize(), args[k]] for k in keys])\n print(tab.draw())", "def print(listing: typing.Iterable[typing.Any]) -> None:\n listing = tuple(str(i) for i in listing)\n if not listing:\n return\n width = max(len(i) for i in listing) + 2\n count = min(shutil.get_terminal_size().columns // width, len(listing))\n for row in itertools.zip_longest(*(listing[i::count] for i in range(count)), fillvalue=''):\n print(*(f'{c:<{width}}' for c in row), sep='')", "def show(arr2d):\n print (\"\\n\".join(\"\\t\".join(row) for row in arr2d))", "def print_table(table, fieldnames):\n print(\"{:<19}\".format(fieldnames[0]), end='')\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(field), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(row[field]), end='')\n print(\"\", end='\\n')", "def print_table(table, fieldnames):\n print(\"{:<19}\".format(fieldnames[0]), end='')\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(field), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for field in fieldnames[1:]:\n print(\"{:>6}\".format(row[field]), end='')\n print(\"\", end='\\n')", "def print_table(table):\r\n print('/-----------------------------------------------------------------------------------\\\\')\r\n for item in table:\r\n\r\n while len(item[1]) <= 22:\r\n item[1] += ' '\r\n\r\n while len(item[2]) <= 27:\r\n item[2] += ' '\r\n\r\n while len(item[0]) <= 15:\r\n item[0] += ' '\r\n\r\n print('| '+item[0]+' | '+item[1]+'| '+item[2]+' |')\r\n\r\n print('\\\\-----------------------------------------------------------------------------------/')", "def pretty_keys(dictionary):\n if not dictionary:\n return []\n # - number of keys printed per line\n num = 5\n # - turn into sorted list\n keys = list(dictionary.keys())\n keys.sort()\n # - fill with blank elements to width num\n missing = (len(keys) % num)\n if missing != 0:\n to_add = num - missing\n keys.extend([''] * to_add)\n # - turn into 2D matrix\n matrix = [[keys[i+j] for i in range(0, num)]\n for j in range(0, len(keys), num)]\n # - calculate max width for each column\n len_matrix = [[len(col) for col in row] for row in matrix]\n max_len_col = [max([row[j] for row in len_matrix])\n for j in range(0, num)]\n # - pad with spaces\n matrix = [[row[j].ljust(max_len_col[j]) for j in range(0, num)]\n for row in matrix]\n # - return list of lines to print\n matrix = [' '.join(row) for row in matrix]\n return matrix", "def print_table(source, count=False):\n table_value = []\n table_header = []\n for source_key, source_value in source.items():\n for item in source_value:\n table_value.append([v for v in item.values()])\n table_header.append([k for k in item.keys()])\n if not count:\n print(tabulate(table_value,\n headers=table_header[0],\n tablefmt='orgtbl'))\n else:\n print(tabulate([[len(source_value)]],\n headers=[source_key],\n tablefmt='orgtbl'))", "def printt(dictionnary):\n for key, value in dictionnary.iteritems():\n print('{key}, size: {size}, {values}'.format(key=key, \n size=len(value), values=value[0:4]))", "def metadata_print(metadata):\n\n print('{0:<10} {1}'.format('parameter', 'value'))\n for key in metadata:\n print('{0:<10} {1}'.format(key, metadata[key]))", "def pretty_list(incoming_keys, num=3):\n if not incoming_keys:\n return []\n keys = list(incoming_keys)\n # - turn into sorted list\n keys.sort()\n # - fill with blank elements to width num\n missing = (len(keys) % num)\n if missing != 0:\n to_add = num - missing\n keys.extend([''] * to_add)\n # - turn into 2D matrix\n matrix = [[keys[i+j] for i in range(0, num)]\n for j in range(0, len(keys), num)]\n # - calculate max width for each column\n len_matrix = [[len(col) for col in row] for row in matrix]\n max_len_col = [max([row[j] for row in len_matrix])\n for j in range(0, num)]\n # - pad with spaces\n matrix = [[row[j].ljust(max_len_col[j]) for j in range(0, num)]\n for row in matrix]\n # - return list of lines to print\n matrix = [' '.join(row) for row in matrix]\n return matrix", "def mediaWikiTable(leftmostTitle, array, formatFn=lambda x: str(x)):\n columnKeys = extractColumnKeys(array)\n print(\"{|\")\n for t in [leftmostTitle] + [str(k) for k in columnKeys]:\n print(\"!\" + \" !! \".join(titles))\n for k in sorted(array.keys, key=cmp_to_key(compareFn)):\n print(\"|-\")\n print(\"| \" + str(k))\n v = array[k]\n for ck in columnKeys:\n value = v.get(k, None)\n print(\"| \" + (formatFn(value) if value else \"\"))\n print(\"|}\")", "def print_table(self):\n print(\"%-12s%-12s%-12s%-12s%-12s\" % (\"index\",\"balance\",\"payment\",\"interest\",\"amortization\"))\n print(\"-------------------------------------------------------------\")\n for i in self.table[\"index\"]:\n print(\"%-12i%-12i%-12i%-12i%-12i\" % (self.table[\"index\"][i],self.table[\"balance\"][i]\\\n ,self.table[\"payment\"][i],self.table[\"interest\"][i],\\\n self.table[\"amortization\"][i]))", "def print_kraken_otu_table(linked_dict, tax_ids, out=\"contig_taxonomy_table.txt\"):\n unique_tax = get_unique_dict(tax_ids)\n\n\n sorted_tax = sorted(unique_tax.keys())\n with open(out, 'w') as OUT:\n OUT.write(\"\\t\".join([\"contig\"] + sorted_tax) + \"\\n\")\n\n \n for contig, tid_dict in linked_dict.items():\n to_print = []\n to_print.append(contig)\n\n \n for tax in sorted_tax:\n count = 0\n for tid in unique_tax[tax]:\n count += tid_dict.get(tid, 0)\n\n to_print.append(str(count))\n\n OUT.write(\"\\t\".join(to_print) + \"\\n\")", "def print_playing_field(d):\n print()\n print(\"1 2 3 4 5 6 7\")\n for row in d:\n line = \"\"\n for cell in row:\n line += cell + \" \"\n print(line)\n print()", "def tabout(things, file=sys.stdout):\n print(\"\\t\".join([str(x) for x in things]), file=file)\n file.flush()", "def listOptions(lst):\n for k, e in enumerate(lst,1):\n print(\"{:^15}{:<10}\".format(k,e))", "def print_PQ(q):\n for item in q:\n print(str(item), end=' ')\n print()", "def printvarindex(fname):\n cursor = eplussql.getcursor(fname)\n mtx1 = eplussql.get_varindex(cursor)\n mtx2 = [[str(item) for item in row] for row in mtx1]\n mtx3 = [','.join(row) for row in mtx2]\n for row in mtx3:\n print row", "def print_board(bd):\n print(\"-----------------\")\n for row in rows:\n to_print = \"\"\n for num in nums:\n to_print += str(bd[row[num]]) + \" \"\n print(to_print)\n print(\"-----------------\")", "def show_columns(df):\n\n if isinstance(df,str):\n df = pd.read_csv(df)\n\n ind = np.arange(len(df.columns))\n\n for entry in zip(ind,df.columns):\n print(entry)", "def printData (data):\n print(str(len(data)) + '\\t' + str(data))", "def print_tables(self):\n print \"------------------\\nTables\\n------------------\"\n cnt = 0\n for x in self.show_tables():\n cnt += 1\n print (\"{0}.) {1}\".format(cnt, x[0]))", "def print(self, frequencies: {'token': int}) -> [str]:\n # project2: return the key-value pairs as a list of str instead of print to console\n return [f\"{k}\\t{v}\" for k, v in sorted(frequencies.items(), key=lambda x: (-x[1], x[0]))]\n # sort first by value in decreasing order, then by key in alphabetical order", "def print_all(cls):\n [print('{0} = \"{1}\"'.format(k, v)) for (k, v) in cls.all()]", "def mostrar_tablero(mtx, n):\n # Cabecera de Columnas\n fila = \"/ |\"\n for i in range(n):\n fila = fila + \" \" + chr(65+i)\n print fila\n print \"-\"*(2*n+3)\n # Cabecera de Filas\n for i in range(n):\n fila = str(i+1)\n if i < 9 : fila += \" |\"\n else:\n fila+=\"|\"\n for e in range(n):\n fila = fila+\" \"+mtx[i][e]\n print fila\n fila = \"\"\n # Nueva linea\n print \"\"", "def displayHand(hand):\n for letter in hand.keys():\n for j in range(hand[letter]):\n print letter, # print all on the same line\n print # print an empty line", "def report_keyset(self):\n for i, matchset in enumerate(self.matches):\n if len(matchset) == 1:\n print \"[%02d]\" % i, fmt(sorted([k for k, data in matchset.items()]), BLUE)\n elif len(matchset) != 0:\n print \"[%02d]\" % i, fmt(sorted([k for k, data in matchset.items()]), WHITE)\n else:\n print \"[%02d]\" % i, fmt(\"[X]\", RED)", "def print_row():\n print('| | |')", "def print_column_names(self):\n counter = 1\n try:\n for col_names in self.cursor.description:\n # print(self.cursor.description[col_names][0])\n print(\"\"\"Attribut{}: {:<5}, Typ: {:<5}, DisplaySize: {} InternalSize: {:<5}, Precision: {},\n \"Scale: {}, Null_Ok: {}\"\"\"\n .format(counter,\n col_names[0],\n col_names[1],\n col_names[2],\n col_names[3],\n col_names[4],\n col_names[5],\n col_names[6]))\n counter += 1\n except p.Error as exception:\n print(exception.pgerror)\n except Exception as general_exception:\n print(general_exception)", "def print_tree(self,root_key='',offset=''):\n itm = self._root\n if root_key:\n itm = self.get_data(root_key)\n tstr = os.linesep \n try: #if isinstance(itm,dict):\n for k in itm.keys():\n x_str = self.print_tree(root_key+'.'+k,offset+' ')\n tstr = tstr+offset+'{}: {}'.format(k,x_str)+os.linesep\n except:\n try: #elif isinstance(itm,list):\n for i,x in enumerate(itm):\n x_str = self.print_tree(root_key+'.'+str(i),offset+' ')\n tstr = tstr+offset+'{}: {}'.format(i,x_str)+os.linesep\n except:\n return '{}'.format(itm)\n return tstr", "def tabular_print(files_dict: dict):\r\n # create a list of file extensions\r\n file_extensions = []\r\n for filename in files_dict.keys():\r\n for file_ext in files_dict[filename].keys():\r\n # print(\"debug:::\", file_ext)\r\n file_extensions.append(file_ext)\r\n break\r\n # go through all the files and print them in a table with the file extension as the top row\r\n sep_line_len = 40 + 10 * len(file_extensions) # separator line length = max_filename_len [35] + 10*number of ext\r\n # print the first row\r\n print(\"filename\".ljust(40), end='')\r\n for ext in file_extensions:\r\n print(\"|\" + ext.center(9), end='')\r\n print()\r\n print(''.center(sep_line_len, '='))\r\n # print the rest of the files\r\n for filename, ext_dict in files_dict.items():\r\n print(filename.ljust(40), end='')\r\n for ext in ext_dict.keys():\r\n if ext_dict[ext]:\r\n print(\"|\" + \"V\".center(9), end='')\r\n else:\r\n print(\"|\" + \" \".center(9), end='')\r\n print()\r\n print(''.center(sep_line_len, '-'))", "def print_all_items_in_dict_for_csv(all_items):\n for item in sorted(all_items):\n print(f\"{item},{all_items[item]}\")", "def tabular_formatted_printing(data_list):\n n = len(data_list)\n max = 0\n for i in range(0,n):\n if int(len(data_list[i][0])) > max:\n max = len(data_list[i][0])\n for i in range(0,n):\n if int(len(data_list[i][0])) < max:\n space = max - len(data_list[i][0])\n else:\n space = 0\n print(data_list[i][0]+space*' '+' : '+str(data_list[i][1]))\n return", "def print_data():\r\n\r\n d = data()\r\n for i in d:\r\n for key, value in i.items():\r\n print(key, \" : \", value)\r\n print()", "def print_list(things_to_print, prefix=\"\\t\", stream=sys.stdout):\n for item in things_to_print:\n print(f\"{prefix}{item}\", file=stream)", "def generate_table(results):\n keyslist = list(results[0].keys())\n table = PrettyTable(keyslist)\n for dct in results:\n table.add_row([dct.get(c, \"\") for c in keyslist])\n return table", "def html_keyvalue_tablerow(key, *values):\n return '<tr><td>', key, '</td><td>', list(values), '</td></tr>'", "def formkeys(entry_temp):\r\n\r\n return nformat.format_keys(transpose_keys(entry_temp,\r\n notebook=notebook))", "def print_table(rows, header=['Operation', 'OPS']):\n if len(rows) == 0:\n return\n col_max = [max([len(str(val[i])) for val in rows]) + 3 for i in range(len(rows[0]))]\n row_format = ''.join([\"{:<\" + str(length) + \"}\" for length in col_max])\n\n if len(header) > 0:\n print(row_format.format(*header))\n print(row_format.format(*['-' * (val - 2) for val in col_max]))\n\n for row in rows:\n print(row_format.format(*row))\n print(row_format.format(*['-' * (val - 3) for val in col_max]))", "def print(self):\n tiles = list(map(list, zip(*self.tiles))) # transposed\n print('tiles = [')\n for row in tiles:\n print('\\t' + repr(row))\n print(']')\n print('props = [')\n for prop in self.props:\n print('\\t' + repr(prop))\n print(']')", "def print_row():\n print('| | | | |')", "def print_seq(self):\n names, values = [], []\n for each in self.minions:\n names.append(each.name)\n values.append(f'{each.atk}/{each.dfs}')\n t = PrettyTable()\n t.add_row(names)\n t.add_row(values)\n print(t)", "def print_dataset_schema(outputs, name, columns):\n outputs.stdout.append(TextOutput(name + ' ('))\n for i in range(len(columns)):\n text = ' ' + str(columns[i])\n if i != len(columns) - 1:\n text += ','\n outputs.stdout.append(TextOutput(text))\n outputs.stdout.append(TextOutput(')'))", "def display_table(dict_list=None, user_config_data=None):\r\n if user_config_data is not None:\r\n # print(tabulate.tabulate(user_config_data, headers=['Variable', 'Value'], tablefmt=\"grid\"))\r\n print(tabulate.tabulate(user_config_data, tablefmt=\"grid\"))\r\n return\r\n\r\n header = [\"idx\"] + list(dict_list[0].keys())\r\n rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)]\r\n print(tabulate.tabulate(rows, header, tablefmt=\"grid\"))", "def display_table(dict_list=None, user_config_data=None):\r\n if user_config_data is not None:\r\n # print(tabulate.tabulate(user_config_data, headers=['Variable', 'Value'], tablefmt=\"grid\"))\r\n print(tabulate.tabulate(user_config_data, tablefmt=\"grid\"))\r\n return\r\n\r\n header = [\"idx\"] + list(dict_list[0].keys())\r\n rows = [[idx + 1] + list(x.values()) for idx, x in enumerate(dict_list)]\r\n print(tabulate.tabulate(rows, header, tablefmt=\"grid\"))", "def display_col_dp(dp_list, attr_name):\n\n print()\n print(\"---------- {:s} ----------\".format(attr_name))\n print([getattr(dp, attr_name) for dp in dp_list])", "def displayHand(hand):\r\n for letter in hand.keys():\r\n for j in range(hand[letter]):\r\n print(letter,end=\" \") # print all on the same line\r\n print() # print an empty line\r", "def print_sorted_table_by_value(table):\n\td_view = [ (v,k) for k,v in table.iteritems() ]\n\td_view.sort(reverse=True) # natively sort tuples by first element\n\tfor v,k in d_view:\n\t\tprint \"%d: %s\" % (v,k)", "def print_table(table):\n print(\"City \", end='')\n for month in MONTHS:\n print(\"{:>6}\".format(month), end='')\n print(\"\")\n for name, row in table.items():\n # Header column left justified\n print(\"{:<19}\".format(name), end='')\n # Remaining columns right justified\n for month in MONTHS:\n print(\"{:>6}\".format(row[month]), end='')\n print(\"\", end='\\n')", "def printUsersInGroup(group) -> None:\n click.echo(tabulate(listUsersInDict(group), headers=\"keys\", tablefmt=\"grid\"))", "def collatz_print (a) :\n return (str(i) + \" \" + str(j) + \" \" + str(v) + \"\\n\" for i, j, v in a)", "def print_tables(hash_table, f_output, l_samples):\n\n l_fields = ['chr', 'pos', 'ref', 'alt', 'QUAL', 'FILTER',\n 'Func.refGene', 'Gene.refGene', 'GeneDetail.refGene', 'ExonicFunc.refGene', 'AAChange.refGene',\n 'cytoBand', 'ExAC_ALL', 'ExAC_AFR', 'ExAC_AMR', 'ExAC_EAS', 'ExAC_FIN', 'ExAC_NFE', 'ExAC_OTH',\n 'ExAC_SAS',\n 'avsnp147', 'SIFT_score', 'SIFT_pred', 'Polyphen2_HDIV_score', 'Polyphen2_HDIV_pred',\n 'Polyphen2_HVAR_score',\n 'Polyphen2_HVAR_pred', 'LRT_score', 'LRT_pred', 'MutationTaster_score', 'MutationTaster_pred',\n 'MutationAssessor_score', 'MutationAssessor_pred', 'FATHMM_score', 'FATHMM_pred', 'PROVEAN_score',\n 'PROVEAN_pred', 'VEST3_score', 'CADD_raw', 'CADD_phred', 'DANN_score', 'fathmm-MKL_coding_score',\n 'fathmm-MKL_coding_pred', 'MetaSVM_score', 'MetaSVM_pred', 'MetaLR_score', 'MetaLR_pred',\n 'integrated_fitCons_score', 'integrated_confidence_value', 'GERP++_RS', 'phyloP7way_vertebrate',\n 'phyloP20way_mammalian', 'phastCons7way_vertebrate', 'phastCons20way_mammalian', 'SiPhy_29way_logOdds']\n l_fields = l_fields + l_samples\n \n l_chr = set([item[0] for item in hash_table.keys()])\n\n fo = open(f_output, 'w')\n fo.write(','.join(l_fields) + '\\n')\n for key in sorted(hash_table.keys(), key=itemgetter(1)):\n fo.write(','.join(map(lambda field: hash_table[key].get(field, '.'), l_fields)) + '\\n')\n fo.close()", "def show_directory_table(self):\n for row in range(self.directory_table.shape[0]):\n for column in range(self.directory_table.shape[1]):\n if column == self.directory_table.shape[1] - 1:\n print(self.directory_table[row][column])\n else:\n print(self.directory_table[row][column], end=',', sep='')", "def print_table(emojis):\n if len(emojis) > 0:\n table = []\n for i in emojis:\n table.append([i.get('id'), i.get('title'), i.get('emoji')])\n print(tabulate(table, headers=[\"ID\", \"Title\", \"Emoji\"]))\n else:\n print(\"¯\\_(ツ)_/¯ Nothing to see here...\")", "def dibujar_tablero(tablero):\n for fila in tablero:\n print(\"|\", \"|\".join(fila), \"|\", sep=\"\")\n print(\"\")", "def pprint_table(out, table):\n\n\tcol_paddings = []\n\n\tfor i in range(len(table[0])):\n\t\tcol_paddings.append(get_max_width(table, i))\n\n\tfor row in table:\n\t\t# left col\n\t\tout.write(str(row[0]).ljust(col_paddings[0] + 1))\n\t\t\n\t\t# rest of the cols\n\t\tfor i in range(1, len(row)):\n\t\t\tout.write(str(row[i]).rjust(col_paddings[i] + 2))\n\t\t\n\t\tout.write('\\n')", "def print_record(self, key):\n percentile_amount = str(int(round(self.running_percentile[key].get_percentile())))\n total_amount = str(int(round(self.running_percentile[key].total_amount)))\n count = str(len(self.running_percentile[key]))\n record = [key.recipient, key.zip_code, str(key.year), percentile_amount, total_amount, count]\n return '|'.join(record)", "def table_row_keys(exc_keys, seg_keys, bitmask_keys, check_keys):\n\n # first few columns are predefined\n cols = [\"IFO\", \"GraceDB IDs\", \"GraceDB Times\", \"Schedule\",\n \"All checks passed\"]\n\n # add checks keys\n for key in check_keys:\n cols.append(key)\n\n # add check_exc keys\n for key in exc_keys:\n cols.append(\"Excitations from \"+key)\n\n # add check_segdb keys\n for key in seg_keys:\n cols.append(\"Segments from \"+key)\n\n # add check_bitmask keys\n for key in bitmask_keys:\n cols.append(\"Bitmask segments from \"+key)\n\n return cols", "def display_map(map):\n for row in map:\n line = \"\"\n for point in row:\n line += point.display_point()\n print(line)", "def print_file(list):\n chr_name_list = ['SL2.40ch00','SL2.40ch01','SL2.40ch02','SL2.40ch03','SL2.40ch04','SL2.40ch05','SL2.40ch06','SL2.40ch07','SL2.40ch08','SL2.40ch09','SL2.40ch10','SL2.40ch11','SL2.40ch12']\n for index,chr_list in enumerate(list):\n if chr_list:\n chr = chr_name_list[index]\n for loci in chr_list:\n print \"%s\\t%d\\t%d\\t%s\\t%d\" % (chr,loci[0],loci[1],'\\t'.join(loci[2]),len(loci[2])-loci[2].count('0'))", "def pprint_grid(grid):\n print(\"\\n\".join(\" \".join(str(r) for r in g) for g in grid))", "def print_board(self, board):\n\n for i in range(0, len(self.row_map.keys())):\n for j in range(0, len(self.row_map.keys())):\n print(\" | {:>2}\".format(board[self.row_map[i + 1] + str(j + 1)]), end='')\n print(\"\\n\")\n print(\" --------------------- \")" ]
[ "0.69297874", "0.6871086", "0.6832247", "0.655662", "0.64423645", "0.63084143", "0.6236635", "0.6138035", "0.60768145", "0.6069733", "0.60688424", "0.60679513", "0.6044288", "0.60366815", "0.60195136", "0.59938", "0.5990676", "0.5981191", "0.59755695", "0.5975386", "0.59612054", "0.59542245", "0.59370786", "0.5933358", "0.59314215", "0.59314215", "0.5911349", "0.59074", "0.58973515", "0.5889101", "0.5876408", "0.5856921", "0.5856921", "0.5845204", "0.58431005", "0.58385354", "0.5837841", "0.58319306", "0.5803347", "0.5802923", "0.5796146", "0.5770788", "0.5770788", "0.5751032", "0.57409763", "0.57347035", "0.5734597", "0.57306457", "0.5727991", "0.5714615", "0.57055634", "0.57035184", "0.5700293", "0.5695863", "0.56853884", "0.5670796", "0.5669774", "0.56507033", "0.5638839", "0.56387633", "0.5635784", "0.56324404", "0.5626068", "0.5616112", "0.5612498", "0.56077635", "0.5607684", "0.5599175", "0.55972135", "0.5594984", "0.5589822", "0.5584056", "0.5579242", "0.55729216", "0.55722743", "0.55703163", "0.55672336", "0.5561017", "0.5559625", "0.5545978", "0.5545679", "0.5544007", "0.55414325", "0.5541363", "0.55403817", "0.5540192", "0.553611", "0.5528267", "0.5525408", "0.55187416", "0.5510099", "0.550829", "0.5507297", "0.5501062", "0.5491217", "0.5487079", "0.54769546", "0.5467723", "0.54561615", "0.5452587", "0.5442336" ]
0.0
-1
getTerminalSize() get width and height of console works on linux,os x,windows,cygwin(windows)
def getTerminalSize(): current_os = platform.system() tuple_xy = None if current_os == 'Windows': tuple_xy = _get_terminal_size_windows() if tuple_xy is None: tuple_xy = _get_terminal_size_tput() # needed for window's python in cygwin's xterm! if current_os in ['Linux', 'Darwin'] or current_os.startswith('CYGWIN'): tuple_xy = _get_terminal_size_linux() if tuple_xy is None: #print "default" #_get_terminal_size_windows() or _get_terminal_size_tput don't work tuple_xy = (80, 25) # default value return tuple_xy
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_terminal_size_windows():\r\n\r\n\r\n #from https://gist.github.com/jtriley/1108174, thank you very much for this hard to produce code!\r\n\r\n # stdin handle is -10\r\n # stdout handle is -11\r\n # stderr handle is -12\r\n h = windll.kernel32.GetStdHandle(-12)\r\n csbi = create_string_buffer(22)\r\n res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)\r\n if res:\r\n (bufx, bufy, curx, cury, wattr,\r\n left, top, right, bottom,\r\n maxx, maxy) = struct.unpack(\"hhhhHhhhhhh\", csbi.raw)\r\n sizex = right - left + 1\r\n sizey = bottom - top + 1\r\n return sizex, sizey", "def getTerminalSize():\n\n try: # works on Linux\n height, width = [int(x) for x in\n os.popen('stty size', 'r').read().split()]\n width -= 1 # leave a blank column on the right\n \n except Exception: # otherwise assume standard console size\n height = 25\n width = 80 - 1 # leave a blank column on the right\n\n return (height, width)", "def _get_term_size_windows():\n res = None\n try:\n from ctypes import windll, create_string_buffer # type: ignore[attr-defined]\n # stdin handle is -10, stdout -11, stderr -12\n handle = windll.kernel32.GetStdHandle(-12)\n csbi = create_string_buffer(22)\n res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi)\n except: # pylint: disable=bare-except\n return None\n if res:\n import struct\n (_, _, _, _, _, left, top, right, bottom, _, _) = struct.unpack(\"hhhhHhhhhhh\", csbi.raw)\n sizex = right - left + 1\n sizey = bottom - top + 1\n return sizex, sizey\n return None", "def getTerminalSize():\r\n import os\r\n env = os.environ\r\n\r\n def ioctl_GWINSZ(fd):\r\n try:\r\n import fcntl, termios, struct\r\n cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))\r\n except:\r\n return\r\n return cr\r\n\r\n cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)\r\n\r\n if not cr:\r\n try:\r\n fd = os.open(os.ctermid(), os.O_RDONLY)\r\n cr = ioctl_GWINSZ(fd)\r\n os.close(fd)\r\n except:\r\n pass\r\n\r\n if not cr:\r\n cr = (env.get('LINES', 25), env.get('COLUMNS', 80))\r\n\r\n return int(cr[1]), int(cr[0])", "def get_terminal_size_wrapper():\n try:\n # python3.3+\n import shutil\n termsize = shutil.get_terminal_size()\n return termsize.columns, termsize.lines\n except AttributeError:\n return get_terminal_size()", "def getTerminalSize(self):\n\n # Let the COLUMNS and LINES environment variables override any actual terminal\n # dimensions.\n self.term_width=os.environ.get('COLUMNS')\n if self.term_width:\n self.term_width=int(self.term_width)\n self.term_height=os.environ.get('LINES')\n if self.term_height:\n self.term_height=int(self.term_height)\n\n # Get terminal dimensions from the terminal device IFF needed.\n for f in sys.stdin,sys.stdout,sys.stderr:\n if f.isatty():\n th,tw,_,_=struct.unpack(\n 'HHHH',\n fcntl.ioctl(f.fileno(),termios.TIOCGWINSZ,struct.pack('HHHH',0,0,0,0))\n )\n if not self.term_width:\n self.term_width=tw\n if not self.term_height:\n self.term_height=tw\n break\n else:\n # Lame though it is, use 80x25 for terminal dimensions if we can't figure\n # anything else out.\n if not self.term_width:\n self.term_width=80\n if not self.term_height:\n self.term_height=25\n\n return self.term_width,self.term_height", "def get_terminal_size():\n rows, columns = subprocess.check_output([\"stty\", \"size\"]).split()\n return (int(rows), int(columns))", "def get_console_size():\n return GetConsoleScreenBufferInfo().dwSize", "def terminal_width():\n return shutil.get_terminal_size().columns", "def get_terminal_size():\n def ioctl_gwinsz_fd(fd):\n \"\"\"Use GWINSZ ioctl on stdin, stdout, stderr.\n\n Args:\n fd: File descriptor.\n Returns:\n Pair (nlines, ncolumns) if the ioctl succeeded, or None.\n \"\"\"\n try:\n import fcntl\n import termios\n import struct\n return struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))\n except:\n return None\n\n def ioctl_gwinsz_path(path):\n try:\n fd = os.open(path, os.O_RDONLY)\n try:\n return ioctl_gwinsz_fd(fd)\n finally:\n os.close(fd)\n except:\n return None\n\n return ioctl_gwinsz_fd(0) \\\n or ioctl_gwinsz_fd(1) \\\n or ioctl_gwinsz_fd(2) \\\n or ioctl_gwinsz_path(os.ctermid()) \\\n or (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80))", "def GetTerminalSize():\n\t\tplatform = platform_system()\n\t\tif (platform == \"Windows\"):\n\t\t\tsize = Terminal.__GetTerminalSizeOnWindows()\n\t\telif ((platform in [\"Linux\", \"Darwin\"]) or\n\t\t platform.startswith(\"CYGWIN\") or\n\t\t platform.startswith(\"MINGW32\") or\n\t\t platform.startswith(\"MINGW64\")):\n\t\t\tsize = Terminal.__GetTerminalSizeOnLinux()\n\t\tif (size is None):\n\t\t\tsize = (80, 25) # default size\n\t\treturn size", "def get_terminal_width(default=80):\n twidth = os.popen('stty size', 'r').read().split()\n if len(twidth):\n rows, columns = twidth\n return int(columns)\n else:\n return 80", "def terminalWidth():\n return shutil.get_terminal_size().columns", "def get_terminal_size():\n # type: () -> Tuple[int, int]\n default_width = 80\n default_height = 25\n dims = _get_term_size_env()\n if not dims:\n current_os = platform.system()\n if current_os == 'Windows':\n dims = _get_term_size_windows()\n if not dims:\n # for window's python in cygwin's xterm\n dims = _get_term_size_tput()\n if current_os == 'Linux' or current_os == 'Darwin' or current_os.startswith('CYGWIN'):\n dims = _get_term_size_posix()\n if not dims:\n dims = default_width, default_height\n try:\n dims = list(map(int, dims))\n except ValueError:\n dims = default_width, default_height\n width = dims[0] if dims[0] >= 10 else default_width\n height = dims[1] if dims[1] >= 1 else default_height\n return width, height", "def get_terminal_size():\n\n with os.popen(\"stty size\", \"r\") as f:\n termsize = f.read().split()\n\n return int(termsize[0]), int(termsize[1])", "def get_term_dimensions():\n height, width = subprocess.check_output(SIZE).split()\n return int(width), int(height)", "def test_terminal_width_height():\n assert (80, 24) == terminal_size()", "def getTerminalSize():\n\n\timport os\n\tdef ioctl_GWINSZ(fd):\n\t\ttry:\n\t\t\timport fcntl, termios, struct\n\t\t\tcr = struct.unpack('hh', fcntl.ioctl(fd,\n\t\t\t\t\t\t\t\t\t\t\t\ttermios.TIOCGWINSZ,'1234'))\n\t\texcept:\n\t\t\treturn None\n\t\treturn cr\n\tcr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)\n\tif not cr:\n\t\ttry:\n\t\t\tfd = os.open(os.ctermid(), os.O_RDONLY)\n\t\t\tcr = ioctl_GWINSZ(fd)\n\t\t\tos.close(fd)\n\t\texcept:\n\t\t\tpass\n\tif not cr:\n\t\treturn 0, 0\n\treturn int(cr[1]), int(cr[0])", "def get_terminal_size(default=(80, 24)):\r\n try:\r\n import fcntl, termios\r\n except ImportError:\r\n return default\r\n try:\r\n ary = array('h', fcntl.fcntl(sys.stdin, termios.TIOCGWINSZ, chr(0) * 8))\r\n return ary[1], ary[0]\r\n except IOError:\r\n return default", "def get_terminal_size():\r\n\ttuple_xy = None\r\n\tif OS_NAME == \"Windows\":\r\n\t\ttuple_xy = _get_terminal_size_windows()\r\n\telif OS_NAME in [\"Linux\", \"Darwin\"] or OS_NAME.startswith(\"CYGWIN\"):\r\n\t\ttuple_xy = _get_terminal_size_linux()\r\n\telif tuple_xy is None:\r\n\t\ttuple_xy = _get_terminal_size_tput()\r\n\tif tuple_xy is None:\r\n\t\ttuple_xy = (80, 24)\r\n\treturn tuple_xy", "def terminal_size(self):\n try:\n _, columns = os.popen(\"stty size\", \"r\").read().split()\n return min(int(columns) - 10, 100)\n except ValueError:\n return self.default_terminal_size", "def getwinsize():\n if 'TIOCGWINSZ' in dir(termios):\n TIOCGWINSZ = termios.TIOCGWINSZ\n else:\n TIOCGWINSZ = 1074295912L\n s = struct.pack('HHHH', 0, 0, 0, 0)\n x = fcntl.ioctl(sys.stdout.fileno(), TIOCGWINSZ, s)\n return struct.unpack('HHHH', x)[0:2]", "def _size_term(self):\n curr_dim = self._screen.getmaxyx()\n dims = [50, 150]\n if curr_dim[0] < dims[0] or curr_dim[1] < dims[1]:\n sys.stdout.write(\"\\x1b[8;{rows};{cols}t\".format(rows=dims[0], cols=dims[1]))\n curses.resize_term(dims[0], dims[1])\n return dims\n else:\n return curr_dim", "def get_console_width(fallback=75):\n if test_if_ipython():\n return fallback\n try:\n _, width = subprocess.check_output(['stty', 'size'], stderr=subprocess.PIPE).split()\n except:\n width = fallback\n width = int(width)\n return width", "def console_width():\n try:\n return int(os.environ[\"COLUMNS\"])\n except (KeyError, ValueError):\n pass\n\n try:\n # Call the Windows API (requires ctypes library)\n from ctypes import windll, create_string_buffer\n h = windll.kernel32.GetStdHandle(-11)\n csbi = create_string_buffer(22)\n res = windll.kernel32.GetConsoleScreenBufferInfo(h, csbi)\n if res:\n import struct\n (bufx, bufy,\n curx, cury, wattr,\n left, top, right, bottom,\n maxx, maxy) = struct.unpack(\"hhhhHhhhhhh\", csbi.raw)\n return right - left + 1\n except ImportError:\n pass\n\n # Parse the output of stty -a\n if os.isatty(1):\n out = os.popen(\"stty -a\").read()\n m = re.search(r\"columns (\\d+);\", out)\n if m:\n return int(m.group(1))\n\n # sensible default\n return 80", "def termsize(self):\n try:\n with open(\"/dev/tty\") as tty:\n cmd = ['stty', 'size']\n lines, cols = Uprocess().check_output(cmd, stdin=tty).split()\n return (int(lines), int(cols))\n except (OSError, IOError):\n pass\n return (24, 80)", "def _get_term_size_tput():\n try:\n import subprocess\n proc = subprocess.Popen([\"tput\", \"cols\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n universal_newlines=True)\n output = proc.communicate(input=None)\n cols = int(output[0])\n proc = subprocess.Popen([\"tput\", \"lines\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE,\n universal_newlines=True)\n output = proc.communicate(input=None)\n rows = int(output[0])\n return (cols, rows)\n except: # pylint: disable=bare-except\n return None", "def _get_term_size_env():\n # type: () -> Optional[Any]\n try:\n return (int(os.environ['LINES']), int(os.environ['COLUMNS']))\n except (KeyError, ValueError):\n return None", "def get_console_width(default=80):\n # TODO(maruel): Implement Windows.\n try:\n _, columns = os.popen('stty size', 'r').read().split()\n except (IOError, OSError, ValueError):\n columns = default\n return int(columns)", "def setKnownConsoleSize(self, width, height):\n # Local import to avoid win32 issues.\n import tty\n class FakeFcntl(object):\n def ioctl(self, fd, opt, mutate):\n if opt != tty.TIOCGWINSZ:\n self.fail(\"Only window-size queries supported.\")\n return struct.pack(\"4H\", height, width, 0, 0)\n self.patch(cftp, \"fcntl\", FakeFcntl())", "def get_terminal_width(default_width=80):\n try:\n return int(os.environ[\"COLUMNS\"])\n except (KeyError, ValueError):\n return default_width", "def getwinsize(self):", "def get_size(self, defaults=None):\n def f():\n self.cursor_save()\n self.cursor_to(9999, 9999)\n x, y = self.cursor_get_pos()\n w = x + 1\n h = y + 1\n self.cursor_restore()\n return (w, h)\n tries = 0\n while tries < 3:\n try:\n return f()\n except:\n pass\n raise RuntimeError(\"Failed to get terminal size.\")", "def _get_term_size_posix():\n # type: () -> Union[Tuple[int, int], None]\n # This function follows a POSIX naming scheme, not Python's.\n # pylint: disable=invalid-name\n # Sometimes Pylint thinks termios doesn't exist or doesn't have certain members even when it does.\n # pylint: disable=no-member\n def ioctl_GWINSZ(fd):\n # type: (int) -> Union[Tuple[int, int], None]\n try:\n import fcntl\n import termios\n import struct\n dims = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, b'1234'))\n except: # pylint: disable=bare-except\n return None\n # pylint: disable=invalid-sequence-index\n result = cast(Optional[Tuple[int, int]], dims)\n return result\n dims = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)\n if not dims:\n try:\n fd = os.open(os.ctermid(), os.O_RDONLY)\n dims = ioctl_GWINSZ(fd)\n os.close(fd)\n except: # pylint: disable=bare-except\n pass\n if not dims:\n return None\n return int(dims[1]), int(dims[0])", "def __calc_size(self):\r\n maxyx = self.stdscr.getmaxyx()\r\n self.termwidth = maxyx[1]\r\n self.termheight = maxyx[0]\r\n self.posx = 0\r\n self.posy = 0\r\n self.width = self.termwidth\r\n self.height = self.termheight\r\n self.calc_size()", "def get_screen_resolution() -> tuple:\n root = tkinter.Tk()\n root.withdraw()\n return root.winfo_screenwidth(), root.winfo_screenheight()", "def get_window_width_height(window_id):\n return commands.getoutput(\" xwininfo -id \"+window_id+\" | egrep \\\"Height|Width\\\" | cut -d: -f2 | tr -d \\\" \\\"\").split(\"\\n\")", "def get_display_size(screen_id: int = 0) -> Tuple[int, int]:\n display = pyglet.canvas.Display()\n screen = display.get_screens()[screen_id]\n return screen.width, screen.height", "def get_curr_screen_size():\n root = tk.Tk()\n root.update_idletasks()\n root.attributes('-fullscreen', True)\n root.state('iconic')\n size = (root.winfo_width(), root.winfo_height(),)\n root.destroy()\n return size", "def _get_screen_size():\n import PySide.QtGui\n rect = PySide.QtGui.QDesktopWidget().screenGeometry(-1)\n return [rect.width(), rect.height()]", "def get_size(self):\n result_str = subprocess.check_output([\n ADB_EXECUTOR, '-s', self.device_id, 'shell',\n 'wm', 'size'\n ]).decode(DEFAULT_CHARSET)\n width, height = result_str.replace('\\n', '').replace('\\r', '').split(' ')[-1].split('x')\n return width, height", "def lines(self):\n return get_terminal_size()[0]", "def send_naws(self):\n return (self.shell.terminal_height, self.shell.terminal_width)", "def get_window_size(self):\n coordinate = self.run_command('shell wm size').replace(b'\\r\\r\\n', b'')\n coordinate = coordinate.decode('utf-8').replace('Physical size: ', '').split('x')\n if len(coordinate) != 2:\n raise ADBError('Error when detecting window size')\n return [int(x) for x in coordinate]", "def get_screen_size():\n screen = QDesktopWidget().screenGeometry()\n return screen.width(), screen.height()", "def columns(self):\n return get_terminal_size()[1]", "def getxy():\n if g.detectable_size:\n x, y = terminalsize.get_terminal_size()\n max_results = y - 4 if y < 54 else 50\n max_results = 1 if y <= 5 else max_results\n\n else:\n x, max_results = Config.CONSOLE_WIDTH.get, Config.MAX_RESULTS.get\n y = max_results + 4\n\n return XYTuple(x, y, max_results)", "def get_video_window_size(self):\n alloc = self.drawingarea.get_allocation()\n return (alloc.width, alloc.height)", "def show_terminal_warning():\n\n # clear terminal\n nuqql.win.MAIN_WINS[\"screen\"].clear()\n\n # check if terminal is big enough for at least one character\n max_y, max_x = nuqql.win.MAIN_WINS[\"screen\"].getmaxyx()\n if max_y < 1:\n return\n if max_x < 1:\n return\n\n # print as much of the error message as possible\n msg = \"Invalid terminal size. Please resize.\"[:max_x - 1]\n nuqql.win.MAIN_WINS[\"screen\"].addstr(0, 0, msg)", "def get_renderer_size() -> Tuple[int, int]:\n sdl_renderer = tcod.lib.TCOD_sys_get_sdl_renderer()\n assert sdl_renderer\n renderer_size = tcod.ffi.new(\"int[2]\")\n tcod.lib.SDL_GetRendererOutputSize(\n sdl_renderer, renderer_size, renderer_size + 1\n )\n return renderer_size[0], renderer_size[1]", "def getWindowSize(self, windowHandle='current'):\n cmdId = self.executeCommand(Command.GET_WINDOW_SIZE, {\"windowHandle\": windowHandle})\n return cmdId", "def width(self) -> int:\n return self.screen.getmaxyx()[1]", "def update_dimensions(self):\r\n # stores the old screen height for cleaning the screen\r\n old_w_height = self.w_height\r\n\r\n self.w_width, self.w_height = get_terminal_size()\r\n # see __init__\r\n self.w_width -= self.w_width % 2\r\n self.w_height -= self.w_height % 2\r\n\r\n # no need to clear screen if window size hasn't changed\r\n if old_w_height != self.w_height:\r\n self.clear_screen(old_w_height)", "def get_window_size():\n global windowSize\n windowSize = DRIVER.driver.get_window_size()\n return windowSize", "def _get_window_width(self):", "def getScreenDims(self):\n nes_lib.getScreenHeight.argtypes = [c_void_p]\n nes_lib.getScreenHeight.restype = c_int\n nes_lib.getScreenWidth.argtypes = [c_void_p]\n nes_lib.getScreenWidth.restype = c_int\n width = nes_lib.getScreenWidth(self.obj)\n height = nes_lib.getScreenHeight(self.obj)\n return (width, height)", "def get_window_size(self):\n raise NotImplementedError", "def screen_status(self):\n height, width = self.stdscr.getmaxyx()\n i_start = self.y_start * width + self.x_start\n\n return i_start, height, width", "def getDesktopSize(self):\n return convToUnits(self.desktopBytes, divby=1000)", "def get_width(vm_address):\n try:\n result = call_exec_daemon('getEnvVar', ['ProgramFiles(x86)'],\n host=vm_address)\n except Fault:\n return 32\n else:\n return 64 if '\\\\' in result else 32", "def _set_pty_size(self):\n assert self.master_fd is not None\n\n buf = array.array('h', [0, 0, 0, 0])\n fcntl.ioctl(pty.STDOUT_FILENO, termios.TIOCGWINSZ, buf, True)\n fcntl.ioctl(self.master_fd, termios.TIOCSWINSZ, buf)", "def getSize(self):\n return self.screen.get_size()", "def height(self) -> int:\n return self.screen.getmaxyx()[0]", "def get_reserved_space(self):\n reserved_space_ratio = .45\n max_reserved_space = 8\n _, height = click.get_terminal_size()\n return min(int(round(height * reserved_space_ratio)), max_reserved_space)", "def GetConsoleScreenBufferInfo(stream_id=STDOUT):\n handle = handles[stream_id]\n csbi = CONSOLE_SCREEN_BUFFER_INFO()\n success = windll.kernel32.GetConsoleScreenBufferInfo(\n handle, byref(csbi))\n if not success:\n raise WinError()\n return csbi", "def hr() -> None:\n width, _ = click.get_terminal_size()\n click.echo('-' * width)", "def get_term_height():\n return get_term_dimensions()[1]", "def _safe_pipe_properties(fd, use_tty=False):\n if not use_tty:\n return\n # due to some weird, long standing issue in Python, PTYs come out\n # replacing newline \\n with \\r\\n. This causes issues for raw unix\n # protocols, like git and ssh, which expect unix line endings.\n # see https://mail.python.org/pipermail/python-list/2013-June/650460.html\n # for more details and the following solution.\n props = xli.termios.tcgetattr(fd)\n props[1] = props[1] & (~xli.termios.ONLCR) | xli.termios.ONLRET\n xli.termios.tcsetattr(fd, xli.termios.TCSANOW, props)\n # newly created PTYs have a stardard size (24x80), set size to the same size\n # than the current terminal\n winsize = None\n if sys.stdin.isatty():\n winsize = xli.fcntl.ioctl(sys.stdin.fileno(), xli.termios.TIOCGWINSZ, b\"0000\")\n elif sys.stdout.isatty():\n winsize = xli.fcntl.ioctl(sys.stdout.fileno(), xli.termios.TIOCGWINSZ, b\"0000\")\n elif sys.stderr.isatty():\n winsize = xli.fcntl.ioctl(sys.stderr.fileno(), xli.termios.TIOCGWINSZ, b\"0000\")\n if winsize is not None:\n xli.fcntl.ioctl(fd, xli.termios.TIOCSWINSZ, winsize)", "def get_screen_size(self):\n return self.__screen_size", "def _window_size(self):\n width = self.cv.winfo_width()\n if width <= 1: # the window isn't managed by a geometry manager\n width = self.cv['width']\n height = self.cv.winfo_height()\n if height <= 1: # the window isn't managed by a geometry manager\n height = self.cv['height']\n return width, height", "def get_screen_height():\r\n\r\n logging.debug('get_screen_height()')\r\n\r\n height = win32api.GetSystemMetrics(1)\r\n logging.debug('get_screen_height - height = {}'.format(height))\r\n return height", "def getCanvasSize():\n\t\treturn canvas.winfo_width(), canvas.winfo_height()", "def get_curr_screen_geometry():\r\n root = tkinter.Tk()\r\n root.update_idletasks()\r\n root.attributes('-fullscreen', True)\r\n root.state('iconic')\r\n geometry = root.winfo_geometry()\r\n root.destroy()\r\n return geometry", "def maxSize():\n rect = pf.app.desktop().availableGeometry()\n maxh,maxw = rect.width(),rect.height()\n return maxh,maxw", "def measure_screen(screen_x=None, screen_y=None):\n env = os.environ\n def ioctl_GWINSZ(fd):\n try:\n cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))\n except:\n return\n return cr\n\n cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)\n if not cr:\n try:\n fd = os.open(os.ctermid(), os.O_RDONLY)\n cr = ioctl_GWINSZ(fd)\n os.close(fd)\n except:\n pass\n\n if not cr:\n cr = (env.get('LINES', 25), env.get('COLUMNS', 80))\n\n if screen_x == None:\n screen_x = int(cr[1])\n\n if screen_y == None:\n screen_y = int(cr[0])\n\n return screen_x, screen_y", "def height(self):\n return(self.SCREEN_H)", "def _GetViewportSize(self):\n return self.tab.EvaluateJavaScript(\n '[ window.innerWidth, window.innerHeight ]')", "def get_desktop_size(self):\n\n _ptr = ffi.new('SDL_DisplayMode *')\n check_int_err(lib.SDL_GetDesktopDisplayMode(self._index, _ptr))\n return (_ptr.w, _ptr.h)", "def get_current_resolution(self):\n return self.display_info[\"width\"], self.display_info[\"height\"]", "def output_size(self) -> int:\n return self.win_length", "def calc_size(self):\r\n self.height = HEIGHT_CON\r\n self.posy = self.termheight - self.height", "def size_with_window(self):\n return self.container['size_with_window']", "def get_term_width():\n return get_term_dimensions()[0]", "def check_window_size():\n \n wight = 870\n height = 519\n \n window = win32gui.FindWindow(MINECRAFT_CLASS_NAME, MINECRAFT_TITLE + MINECRAFT_VERSION)\n x0, y0, x1, y1 = win32gui.GetWindowRect(window)\n # x0 and y0 are initial points, upper left corner and lower left corner\n # then we need the difference between upper left corner and upper right corner to get the wight and\n # the difference between lower left corner and lower right corner to get the height\n \n w = x1 - x0\n h = y1 - y0\n \n if w is not wight or h is not height:\n win32gui.MoveWindow(window, x0, y0, wight, height, True)", "def image_size():\n return eval(subprocess(\"print camera_image_size()\"))", "def size():\n return int(os.environ['WORLD_SIZE'])", "def calc_size(self):\r\n self.height = self.termheight - HEIGHT_CON - HEIGHT_STATUS\r\n self.posy = HEIGHT_STATUS\r\n self.width = WIDTH_ORDERBOOK", "def check_size(height_max, width_max):\n def check_size_window(func):\n @wraps(func)\n def wrapped(self, *args, **kwargs):\n # Extract window size\n height, width = self.stdscr.getmaxyx()\n # Check size window\n if width >= width_max and height >= height_max:\n return func(self, *args, **kwargs)\n else:\n string_warning = \"jtop\"\n string_warning_msg = \"Change size window!\"\n size_window_width = \"Width: \" + str(width) + \" >= \" + str(width_max)\n size_window_height = \"Height: \" + str(height) + \" >= \" + str(height_max)\n try:\n height_c = int(height / 2)\n self.stdscr.addstr(height_c - 2, int((width - len(string_warning)) / 2), string_warning, curses.A_BOLD)\n self.stdscr.addstr(height_c - 1, int((width - len(string_warning_msg)) / 2), string_warning_msg, curses.A_BOLD)\n # Show size window\n if width < width_max:\n self.stdscr.addstr(height_c, int((width - len(size_window_width)) / 2), str(size_window_width), curses.color_pair(1))\n else:\n size_window_width = \"Width OK!\"\n self.stdscr.addstr(height_c, int((width - len(size_window_width)) / 2), size_window_width, curses.A_BOLD)\n if height < height_max:\n self.stdscr.addstr(height_c + 1, int((width - len(size_window_height)) / 2), str(size_window_height), curses.color_pair(1))\n else:\n size_window_height = \"Height OK!\"\n self.stdscr.addstr(height_c + 1, int((width - len(size_window_height)) / 2), str(size_window_height), curses.A_BOLD)\n # Set background for all menu line\n self.stdscr.addstr(height - 1, 0, (\"{0:<\" + str(width - 1) + \"}\").format(\" \"), curses.A_REVERSE)\n # Add close option menu\n self.stdscr.addstr(height - 1, 1, \"Q to close\", curses.A_REVERSE)\n except curses.error:\n pass\n return wrapped\n return check_size_window", "def main() -> None:\n window_flags = (\n tcod.lib.SDL_WINDOW_RESIZABLE | tcod.lib.SDL_WINDOW_MAXIMIZED\n )\n renderer_flags = tcod.lib.SDL_RENDERER_PRESENTVSYNC\n with init_sdl2(640, 480, None, window_flags, renderer_flags):\n console = tcod.console.Console(20, 4, order=\"F\")\n TEXT = \"Console with a fixed aspect ratio and integer scaling.\"\n console.print_box(0, 0, 0, 0, TEXT)\n while True:\n # Clear background with white.\n clear((255, 255, 255))\n # Draw the console to SDL's buffer.\n viewport = get_viewport(console, True, True)\n accumulate(console, viewport)\n # If you want you can use the FFI to do additional drawing here:\n ...\n # Present the SDL2 renderer to the display.\n present()\n\n for event in tcod.event.wait():\n if event.type == \"QUIT\":\n raise SystemExit()\n elif event.type == \"MOUSEMOTION\":\n # Mouse pixel coordinates will need to be converted to\n # tiles using the console and viewport as a reference.\n x, y = pixel_to_tile(event.pixel, console, viewport)\n console.tiles[\"bg\"] = (0, 0, 0, 255)\n console.tiles[\"fg\"] = (255, 255, 255, 255)\n if 0 <= x < console.width and 0 <= y < console.height:\n console.tiles[\"bg\"][x, y] = (255, 255, 255, 255)\n console.tiles[\"fg\"][x, y] = (0, 0, 0, 255)\n elif event.type == \"WINDOWRESIZED\":\n # You can change to a console of a different size in\n # response to a WINDOWRESIZED event if you want.\n ... # See resizable_console.py", "def winfo_screenheight(self):\n return self.height", "def get_screen_width():\r\n\r\n logging.debug('get_screen_width()')\r\n\r\n width = win32api.GetSystemMetrics(0)\r\n logging.debug('get_screen_width - width = {}'.format(width))\r\n return width", "def __init__(self, options=None):\n self.options = options\n if not self.options:\n self.options.border = '|~|'\n self.options.filler = ' '\n self.options.spacer = ' |#|'\n self.options.blocker = ' '\n self.working_res = False\n try:\n self.res_x, self.res_y = self._get_terminal_size_windows() if not None else self.getTerminalSize()\n self.working_res = True\n except Exception as exp:\n # cant know the screen size, might be visual studio or Jupyter\n # use backup printer that doesnt need screen size\n # not working in powershell either.\n # print(\"Failing to get screen size!\")\n pass", "def get_window_size(self):\n return self.__window_size", "def _raw_graph_window_dim(self):\n # self.y + 10: 1 for border, 9 for scrollbar\n return self.x+1., self.y+1.+9., self.w-2., self.h-2.-9.", "def get_dev_size(self):\n\t\treturn call_sdk_function('PrlSrvCfgHdd_GetDevSize', self.handle)", "def columns(self):\n return int(ShellCommandOutput('tput cols'))", "def max_line(self):\n return self.screen.getmaxyx()[0] - 2 # Account for header size", "def resize(self):\r\n Win.resize(self)\r\n self.write(\"### console has been resized\")", "def get_terminal():\n terminal = Gtk.ScrolledWindow()\n terminal.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.ALWAYS)\n textview = Gtk.TextView()\n textview.set_wrap_mode(Gtk.WrapMode.WORD_CHAR)\n terminal.add(textview)\n return terminal, textview.get_buffer()", "def width(self):\n return self._vim.current.window.width" ]
[ "0.8626077", "0.8466061", "0.83579844", "0.82661015", "0.8189961", "0.8158154", "0.8115553", "0.8110067", "0.8009353", "0.79788095", "0.7913624", "0.78884804", "0.787112", "0.78315413", "0.7774899", "0.77129865", "0.7648327", "0.7609278", "0.760888", "0.7603599", "0.7578715", "0.7563344", "0.75603276", "0.7548958", "0.7497893", "0.74483687", "0.73373806", "0.7254422", "0.7252471", "0.7103836", "0.697771", "0.6953456", "0.6942451", "0.6749415", "0.67465496", "0.6578866", "0.6535182", "0.6519294", "0.64739966", "0.6449531", "0.64250445", "0.6394387", "0.6386315", "0.63168114", "0.63076526", "0.62492853", "0.6247961", "0.6206505", "0.61585516", "0.6121945", "0.611811", "0.60939854", "0.59913266", "0.5960133", "0.5934411", "0.59049267", "0.5904872", "0.58912605", "0.58835185", "0.58779025", "0.5842462", "0.58296597", "0.581086", "0.58048284", "0.5783103", "0.5767889", "0.57523733", "0.5742996", "0.5741215", "0.5719599", "0.57138306", "0.5671468", "0.567061", "0.56547546", "0.56336933", "0.563146", "0.5608016", "0.56053", "0.559316", "0.55844223", "0.55742973", "0.5570089", "0.5541088", "0.55352867", "0.552289", "0.5501867", "0.5490592", "0.5482082", "0.54641306", "0.5445315", "0.54306376", "0.54278386", "0.5411203", "0.54097986", "0.5400321", "0.53983086", "0.53949594", "0.5390832", "0.53802305", "0.53664815" ]
0.7640822
17
Print list of strings in columns blue for directories green for trees
def roolsPrintSimpleLs(keyList,indent): # This code is adaptated from the pprint_list function here : # http://stackoverflow.com/questions/25026556/output-list-like-ls # Thanks hawkjo !! if len(keyList) == 0: return (term_width, term_height) = getTerminalSize() term_width = term_width - indent min_chars_between = 2 min_element_width = min( len(key.GetName()) for key in keyList ) \ + min_chars_between max_element_width = max( len(key.GetName()) for key in keyList ) \ + min_chars_between if max_element_width >= term_width: ncol,col_widths = 1,[1] else: # Start with max possible number of columns and reduce until it fits ncol = min( len(keyList), term_width / min_element_width ) while True: col_widths = \ [ max( len(key.GetName()) + min_chars_between \ for j, key in enumerate(keyList) if j % ncol == i ) \ for i in range(ncol) ] if sum( col_widths ) <= term_width: break else: ncol -= 1 for i, key in enumerate(keyList): if i%ncol == 0: write("",indent) # indentation # Don't add spaces after the last element of the line or of the list if (i+1)%ncol != 0 and i != len(keyList)-1: if not IS_TERMINAL: write( \ key.GetName().ljust(col_widths[i%ncol])) elif isDirectoryKey(keyList[i]): write( \ isSpecial(ANSI_BLUE,key.GetName()).ljust( \ col_widths[i%ncol] + ANSI_BLUE_LENGTH)) elif isTreeKey(keyList[i]): write( \ isSpecial(ANSI_GREEN,key.GetName()).ljust( \ col_widths[i%ncol] + ANSI_GREEN_LENGTH)) else: write(key.GetName().ljust(col_widths[i%ncol])) else: # No spaces after the last element of the line or of the list if not IS_TERMINAL: write(key.GetName()) elif isDirectoryKey(keyList[i]): write(isSpecial(ANSI_BLUE, key.GetName())) elif isTreeKey(keyList[i]): write(isSpecial(ANSI_GREEN, key.GetName())) else: write(key.GetName()) write('\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_directory_table(self):\n for row in range(self.directory_table.shape[0]):\n for column in range(self.directory_table.shape[1]):\n if column == self.directory_table.shape[1] - 1:\n print(self.directory_table[row][column])\n else:\n print(self.directory_table[row][column], end=',', sep='')", "def printDirectoryLists():\n # Print the current directory of data being reduced.\n logging.info(\"\\n#################################################################################\")\n logging.info(\" \")\n logging.info(\" COMPLETE - sorting. I've updated scienceDirectoryList,\")\n logging.info(\" telluricDirectoryList and calibrationDirectoryList in\")\n logging.info(\" runtimeData/config.cfg with the following values:\")\n logging.info(\"\")\n logging.info(\"#################################################################################\\n\")\n\n with open('./config.cfg') as config_file:\n options = ConfigObj(config_file, unrepr=True)\n logging.info(\"\\nScience Directory List: \")\n for i in range(len(options['scienceDirectoryList'])):\n logging.info(options['scienceDirectoryList'][i])\n logging.info(\"\\nTelluric Directory List: \")\n for i in range(len(options['telluricDirectoryList'])):\n logging.info(options['telluricDirectoryList'][i])\n logging.info(\"\\nCalibration Directory List: \")\n for i in range(len(options['calibrationDirectoryList'])):\n logging.info(options['calibrationDirectoryList'][i])", "def paths_print(atree):\n\n l = atree.pathFromHere_explore('/')\n for d in l:\n print(d)", "def print_tree(self, paths=None, color=True):\n if not paths:\n paths = ('/', )\n is_first = True\n ns_color = COLOR_YELLOW if color else ''\n end_color = COLOR_NORM if color else ''\n for root_mount_id, namespaces in sorted(self.ns_for_root_id.items()):\n if is_first:\n is_first = False\n else:\n print(\"\")\n assert namespaces\n if len(namespaces) >= 2:\n root_ns = self.items[root_mount_id].mount_ns\n print(\"Namespaces {0}{2}{1} starting with {0}{3}{1}\".format(\n ns_color,\n end_color,\n ', '.join(sorted((format_ns(ns) for ns in namespaces))),\n self.format_ns_with_processes(root_ns),\n ))\n else:\n root_ns = list(namespaces)[0]\n print(\"{0}Namespace {2}{1}\".format(\n ns_color,\n end_color,\n self.format_ns_with_processes(root_ns),\n ))\n self.print_tree_entry(root_mount_id, root_ns, paths, color)", "def printtable(dict):\n for dirname in dict:\n if dict[dirname][2] != 0:\n sys.stdout.write(\"{0:4} {1:4} {2:4} {3}\\n\".format(dict[dirname][2], dict[dirname][0], dict[dirname][1], dirname))", "def show_paths(self):\r\n print(\"------------------------\")\r\n print(\"######### ALL PATHS #########\")\r\n\r\n if self.size == 0:\r\n print(\"Empty tree!\")\r\n else:\r\n for i in range(1, self.root.size_tree + 1):\r\n node = self.select(i)\r\n if node.size_tree == 1:\r\n print(\"|\" + self.str_single_path(node))\r\n\r\n print(\"------------------------\")", "def print_tree(tree, indent=''):\n\n for branch in tree:\n if type(branch) == list and branch != []:\n print_tree(branch, indent + ' ')\n else:\n if branch != []:\n print(indent + str(branch))", "def print_tree(self):\n return \"\"", "def print_tree(tree):\n if not tree:\n print None\n return\n \n if tree.children:\n print 'Directory hash = {}'.format(base64.urlsafe_b64encode(tree.dmt_hash))\n print 'Contents:'\n for name, subtree in tree.children.iteritems():\n print\n print name\n print_tree(subtree)\n \n else:\n print 'File hash = {}'.format(base64.urlsafe_b64encode(tree.dmt_hash))", "def print_tree(self):\n\t\tprint(self.__print_tree('', True, ''))", "def print_folders(conn):\n for f in conn.list():\n print (\"\\t\", f)", "def print_cr_tree(self, tree):\n str = ''\n try:\n if not tree: return \"None\"\n else:\n for x in tree: str += \" \" + x.name\n except TypeError: return tree.name\n return str", "def display_viz(self, width=60, label_max_len=3):\n output = ''\n last_children = [(self, width)] # Nodes to be added next loop\n for i in range(self.depth + 1):\n depth_output = ''\n depth_children = []\n for (node, subtree_width) in last_children:\n label = ' ' if node is None else str(node.label)[:label_max_len]\n this_output = label.center(subtree_width)\n this_children = [] # Children from this item\n cum_width = 0 # Cumulative character-width of all subtrees\n cum_cols = 0 # Cumulative maximum node-width of all subtrees\n # If no children, propogate the empty spaces below terminal\n if not node or not node.children:\n cum_cols += 1\n cum_width += subtree_width\n this_children.append((None, subtree_width))\n # If children, fill-in this_output with '_' to first/last child label\n else:\n children_cols = [c.n_cols for c in node.children]\n total_cols = sum(children_cols)\n for child, child_cols in zip(node.children, children_cols):\n # Convert each child's 'cols' into character spacing\n cum_cols += child_cols\n cum_ratio = cum_cols / total_cols\n target_width = math.ceil(cum_ratio * subtree_width) - cum_width\n remaining_width = subtree_width - cum_width\n child_width = min(target_width, remaining_width)\n # Add record and update tracked values\n this_children.append((child, child_width))\n cum_width += child_width\n # Add lines to the output\n start_padding = this_children[0][1] // 2 - 1 # Midpoint of first child\n end_padding = subtree_width - (this_children[-1][1] // 2) # ..of last child\n with_line = ''\n for i, v in enumerate(this_output):\n with_line += '_' if (i > start_padding and i < end_padding and v == ' ') else v\n this_output = with_line\n depth_output += this_output\n depth_children += this_children\n last_children = depth_children\n if last_children:\n depth_output += '\\n'\n output += depth_output\n return output", "def print_tree(self, tree, nodes):\n\t\tprint(self.display(tree, nodes, '', True, ''))", "def main(argv):\n\n opts = docopt.docopt(__doc__, version='ftree 0.1')\n\n dirs = opts['<dir>'] or ['.']\n for d in dirs:\n #print d\n print ListTree(d)\n\n return 0", "def show(dfs):\n\n for df in dfs:\n print('{} -> {}'.format(df[0], df[1]))", "def showFileTree():\n\treturn 0", "def print_tree(tree, pref=\"\"):\r\n leaf = \"|_____> \"\r\n top = \"|_______\"\r\n son1 = \"| \"\r\n son2 = \" \"\r\n width = len(top)\r\n\r\n a = \"\"\r\n if len(tree) == 3:\r\n if (pref == \"\"):\r\n a += pref + str(tree[0]) + \"\\n\"\r\n else:\r\n a += pref[:-width] + top + str(tree[0]) + \"\\n\"\r\n a += print_tree(tree[1], pref + son1)\r\n a += print_tree(tree[2], pref + son2)\r\n return a\r\n\r\n else:\r\n return (pref[:-width] + leaf + str(tree) + \"\\n\")", "def directory_tree(directory: pathlib.Path) -> None:\n directory_tree_string = ''\n # Turn directory into a pathlib.Path object\n # if not already one\n if not isinstance(directory, pathlib.Path):\n directory = pathlib.Path(directory)\n #print(f'+ {directory}')\n directory_tree_string += f'\\n+ {directory}'\n for path in sorted(directory.rglob('*')):\n depth = len(path.relative_to(directory).parts)\n spacer = ' ' * depth\n #print(f'{spacer}+ {path.name}')\n directory_tree_string += f'\\n{spacer}+ {path.name}'\n return directory_tree_string", "def __repr__ (self, depth=None):\n\t\ts=[];add=s.append\n\t\t\n\t\tadd (\"%s%s\" % (myglobals.getIndent(self.level), self.name))\n\t\tif depth is None or self.level < depth:\n\t\t\tfor status in self.selected:\n\t\t\t\tobj = status.fsObj\n\t\t\t\t# if obj.level > depth:\n\t\t\t\t\t# # print 'level (%d) exceeds depth, skipping' % obj.level\n\t\t\t\t\t# continue\n\t\t\t\tif isinstance (obj, WorkingDirectory):\n\t\t\t\t\t# print \"DIRECTORY %s\" % obj.name\n\t\t\t\t\tif not obj.selected.isempty():\n\t\t\t\t\t\tadd (str(obj))\n\t\t\t\telif isinstance (obj, JloFile):\n\t\t\t\t\tif os.path.exists(obj.path):\n\t\t\t\t\t\tadd (\"%s (%s)!!!\" % ( str(obj), status.flag))\n\t\t\t\t\t\t# add (\"%s%s (%s)!!!\" % (myglobals.getIndent(self.level), str(obj), status.flag))\n\t\t\t\t\telse:\n\t\t\t\t\t\tadd (\"%s%s (%s)???\" % (myglobals.getIndent(self.level), str(obj), status.flag))\n\t\t\t\telse:\n\t\t\t\t\t## missing directory\n\t\t\t\t\tadd (\"%s%s (missing)##\" % (myglobals.getIndent(self.level+1), obj.name))\n\t\treturn '\\n'.join (s)", "def recursifTreePrinter(tree,indent):\n listOfBranches = tree.GetListOfBranches()\n if len(listOfBranches) > 0: # Width informations\n maxCharName = max([len(branch.GetName()) \\\n for branch in listOfBranches])\n maxCharTitle = max([len(branch.GetTitle()) \\\n for branch in listOfBranches])\n dic = { \\\n \"nameWidth\":maxCharName+2, \\\n \"titleWidth\":maxCharTitle+4, \\\n \"memoryWidth\":1}\n for branch in listOfBranches: # Print loop\n rec = \\\n [branch.GetName(), \\\n \"\\\"\"+branch.GetTitle()+\"\\\"\", \\\n str(branch.GetTotBytes())]\n write(TREE_TEMPLATE.format(*rec,**dic),indent,end=\"\\n\")\n recursifTreePrinter(branch,indent+2)", "def print_dir_tree(dir_path):\n top_path = Path(dir_path)\n if Path(top_path).exists() and Path(top_path).is_dir():\n print(f'+ {top_path}')\n paths = [p for p in sorted(top_path.rglob('*')) if not path_is_hidden(p)]\n for path in paths:\n depth = len(path.relative_to(top_path).parts)\n spacer = ' ' * depth\n print(f'{spacer}+ {path.name}')\n\n else:\n print(\"The path {} is not a directory.\".format(dir_path))", "def print_directory_content(path):\n print(path)\n for child in os.listdir(path):\n child_path = os.path.join(path, child)\n if os.path.isdir(child_path):\n print_directory_content(child_path)\n else:\n print(child)", "def printfoldertree(folder, indent=''):\n print indent + folder.name.get().encode('utf8')\n for folder in folder.folders.get():\n printfoldertree(folder, indent + '\\t')", "def print_tree(tree, depth=0):\n print('+','--'*depth,tree[0])\n if isinstance(tree[1], str):\n print('|',' '*depth,'->',tree[1])\n return\n if isinstance(tree[1],Terminal):\n print('|',' '*depth,'->',repr(tree[1]))\n return\n for subtree in tree[1]:\n print_tree(subtree, depth+1)", "def _display_aux(self):\n # Ref: https://stackoverflow.com/questions/34012886/print-binary-tree-level-by-level-in-python/40885162\n # No child.\n if self.right is None and self.left is None:\n line = '%s' % self.value\n width = len(line)\n height = 1\n middle = width // 2\n return [line], width, height, middle\n\n # Only left child.\n if self.right is None:\n lines, n, p, x = self.left._display_aux()\n s = '%s' % self.value\n u = len(s)\n first_line = (x + 1) * ' ' + (n - x - 1) * '_' + s\n second_line = x * ' ' + '/' + (n - x - 1 + u) * ' '\n shifted_lines = [line + u * ' ' for line in lines]\n return [first_line, second_line] + shifted_lines, n + u, p + 2, n + u // 2\n\n # Only right child.\n if self.left is None:\n lines, n, p, x = self.right._display_aux()\n s = '%s' % self.value\n u = len(s)\n first_line = s + x * '_' + (n - x) * ' '\n second_line = (u + x) * ' ' + '\\\\' + (n - x - 1) * ' '\n shifted_lines = [u * ' ' + line for line in lines]\n return [first_line, second_line] + shifted_lines, n + u, p + 2, u // 2\n\n # Two children.\n left, n, p, x = self.left._display_aux()\n right, m, q, y = self.right._display_aux()\n s = '%s' % self.value\n u = len(s)\n first_line = (x + 1) * ' ' + (n - x - 1) * '_' + s + y * '_' + (m - y) * ' '\n second_line = x * ' ' + '/' + (n - x - 1 + u + y) * ' ' + '\\\\' + (m - y - 1) * ' '\n if p < q:\n left += [n * ' '] * (q - p)\n elif q < p:\n right += [m * ' '] * (p - q)\n zipped_lines = zip(left, right)\n lines = [first_line, second_line] + [a + u * ' ' + b for a, b in zipped_lines]\n return lines, n + m + u, max(p, q) + 2, n + u // 2", "def list_branches():\n a = App()\n print(tabulate(a.list_branches(), tablefmt=\"fancy_grid\"))", "def head():\r\n\tfor i in range(SIZE):\r\n\t\tfor j in range(SIZE*2+2):\r\n\t\t\tif j == (SIZE-i):\r\n\t\t\t\tfor k in range(i+1):\r\n\t\t\t\t\tprint('/', end='')\r\n\t\t\t\tfor k in range(i+1):\r\n\t\t\t\t\tprint('\\\\', end='')\r\n\t\t\tprint(' ', end='')\r\n\t\tprint('')", "def show_tree(self):\n G, vertex_dict = self.tree().graph()\n root = self.tree().root()\n vertical_list = []\n horizontal_list = []\n no_component_list = []\n for i, xi in vertex_dict.items():\n if xi.is_equal(root):\n root_index = i\n if self.is_component(xi):\n if xi.type() == \"II\":\n vertical_list.append(i)\n else:\n horizontal_list.append(i)\n print(i, \": \", xi)\n else:\n no_component_list.append(i)\n vertex_colors = {'red': vertical_list, 'blue': horizontal_list,\n 'grey': no_component_list}\n G.show(vertex_colors=vertex_colors, tree_root=root_index, layout='tree')", "def print_bfs(self):\n visit_order = self.bfs()\n s = \"Tree (from BFS)\\n\"\n previous_level = -1\n for i in range(len(visit_order)):\n node, level = visit_order[i]\n if level == previous_level:\n s += \" | \" + str(node) \n else:\n s += \"\\n\" + str(node)\n previous_level = level\n \n return s", "def display(self, tree, level = 0):\n\t\tresult = \"\"\n\t\tfor name, node in tree.soon:\n\t\t\tresult += \" \"*level+repr(node)+\"\\n\"\n\t\t\tresult += self.display(tree.getSoon(name),level + 1)\n\t\treturn result", "def tree_print(clf, X):\n tlevel = _tree_rprint('', clf, X.columns, clf.classes_)\n print('<',end='')\n for i in range(3*tlevel - 2):\n print('-',end='')\n print('>')\n print('Tree Depth: ',tlevel)", "def print_bi_tree(self):\n\n to_print = [self]\n # current = None\n\n while to_print:\n current = to_print.pop(0)\n if current:\n print(f'\\t{current.data}')\n to_print.append(current.left)\n to_print.append(current.right)", "def tree(ctx):\n hokusai.print_command_tree(ctx.find_root().command)", "def print_recursive(self, indents):\n\n\t\tind = \"\\t\"\n\t\toutput = indents * ind + self.name\n\t\tprint(output)\n\t\tfor i in self.children:\n\t\t\ti.print_recursive(indents+1)", "def tree(ctx):\n root_cmd = _build_command_tree(ctx.find_root().command)\n _print_tree(root_cmd)", "def print_directory_contents(path):\n if os.path.isdir(path):\n children = os.listdir(path)\n for child in children:\n child_path = os.path.join(path, child)\n print_directory_contents(child_path)\n else:\n print(path)\n directories.append(path)\n\n return directories", "def display(items):\n\n # LOC, COMMENT, ...\n # (same as keys of TYPE_OF_LINE, but better to only rely on items here)\n what = next(iter(items))[1]\n\n # Headers\n print(bcolors.BOLD\n +(\"{:<30}\"+\":{:>10}\"*len(what)).format(\"path\", *what)\n +bcolors.ENDC)\n\n # Lines\n for k,v in items:\n print((bcolors.OKGREEN if v[\"LOC\"] == 0\n else bcolors.FAIL if v[\"COMMENTS\"] == 0\n else bcolors.WARNING if v[\"COMMENTS\"]/v[\"LOC\"] < 0.2\n else bcolors.OKGREEN )\n +(\"{:<30}\"+\":{:>10}\"*len(v)).format(k, *v.values())\n + bcolors.ENDC)", "def label(item):\n if isinstance(item, Path):\n if item.is_dir():\n return f\"{Fore.BLUE}{Style.BRIGHT}{item}{Style.RESET_ALL}\"\n return f\"{Style.BRIGHT}{Fore.BLUE}{item.parent}/{Fore.MAGENTA}{item.name}{Style.RESET_ALL}\"\n return str(item)", "def pytree(directory: Path, hidden: bool):\n d = Path(directory)\n printDirContents(d, show_hidden=hidden)", "def path_show(args):\n print(header(\"$PATH Components\"))\n loop_fmt = \"{pad}{color}{path}\"\n pad = 4\n\n cnt = 0\n for part in os.environ[\"PATH\"].split(\":\"):\n color = u\"\"\n if args.color:\n color = CODES[cnt]\n cnt = (cnt + 1) % len(CODES)\n\n print(loop_fmt.format(pad=pad * \" \", color=color, path=part))\n if args.nowarn:\n continue\n\n for warn in check_path_folder(part):\n print(\"{}X {}\".format(pad * 2 * \" \", warn))", "def print_tree(self):\n out = \"\"\n for i in range(self.level):\n out += ' |'\n out += '___'\n out += str(self.action)\n if self.action is None:\n print \"None\"\n else:\n print out\n for child in self.children:\n child.print_tree()", "def trees (type, climate):\n formatted = f\"{type} {climate}\"\n return formatted.title()", "def pretty_runlist(runlist):\n if not runlist:\n return [' empty']\n output = list()\n current_kind = str()\n for i, entry in enumerate(runlist):\n if current_kind != entry['kind']:\n output.append(entry['kind'] + ':')\n current_kind = entry['kind']\n path = entry['command']\n basename = os.path.splitext(os.path.basename(path))[0]\n line = ' %d %s' % (i+1, basename)\n line = line.ljust(20)\n line += '\"%s\"' % pretty_path(path)\n output.append(line)\n return output", "def display_all_paths(taxonomy):\n for i,entry in enumerate(taxonomy):\n print \"For nodeId : {} :: NodeName : {} \" .format(entry['nodeId'], entry['nodeName'])\n parentId = entry['parentId']\n parentName = entry['parentName']\n while parentId != None:\n print \"ParentId : {} :: ParentName : {}\" .format(parentId, parentName)\n # Search for nodeId == parentId\n for temp in taxonomy:\n if temp['nodeId'] == parentId:\n parentId = temp['parentId']\n parentName = temp['parentName']\n break\n if i == 5:\n break", "def print_tree(t, indent=0):\n print(' ' * indent + str(label(t)))\n for b in branches(t):\n print_tree(b, indent + 1)", "def display_tree(self, tree_node, spacing=\"\"):\n if tree_node is None:\n return\n else:\n print(spacing + str(tree_node.val))\n spacing += \" \"\n self.display_tree(tree_node.left, spacing)\n self.display_tree(tree_node.right, spacing)", "def label_dir(self):\n for lblname in self._vallabs:\n print(lblname)", "def handle_short_info(files, directories, args):\n result_info = []\n # Define the width of columns\n max_length = 0\n if directories:\n max_length = max(max(len(item) for item in directories[d])\n for d in directories)\n if files:\n max_length = max(max_length, max(len(item) for item in files))\n col_width = max_length + 1\n terminal_width = shutil.get_terminal_size().columns\n if args.format == 'single-column' or args.one:\n columns = 1\n else:\n columns = terminal_width // (max_length + 1) or 1\n\n if not files and len(directories) == 1:\n d = list(directories.keys())[0]\n result_info.extend(handle_files_group(directories[d],\n columns, col_width, args))\n log.debug(result_info)\n return result_info\n\n if files:\n result_info.extend(handle_files_group(files, columns, col_width, args))\n for d in directories:\n result_info.append(f'{d}:')\n result_info.extend(handle_files_group(directories[d],\n columns, col_width, args))\n log.debug(result_info)\n return result_info", "def print_dfs_output(G, d, f, pi):\n V = G[0]\n print (\" d f pi\")\n for v in V:\n print (\" {: <5}{: <5}{: <5}{: <5}\".format(v, d[v], f[v], pi[v]))", "def get_list():\n\n print(f\"Корневой каталог: {config_tools.NAME_PATH}\")\n for dirpath, dirnames, filenames in os.walk(config_tools.NAME_PATH):\n # перебрать каталоги\n for dirname in dirnames:\n print(\"Каталог:\", os.path.join(dirpath, dirname))\n # перебрать файлы\n for filename in filenames:\n print(\"Файл:\", os.path.join(dirpath, filename))", "def render_board(board):\n for line in board:\n print(' '.join(line))", "def printDirContents(directory: Path, level=0, show_hidden=False):\n if show_hidden:\n children = directory.glob(\"./*\")\n else:\n children = directory.glob(\"./[!.]*\")\n dirs = []\n files = []\n for node in children:\n if node.is_dir():\n dirs.append(node)\n if node.is_file():\n files.append(node)\n for d in sorted(dirs):\n printSeperator(level)\n printItem(d.name)\n printDirContents(d, level + 1)\n for f in sorted(files):\n printSeperator(level)\n printItem(f.name)\n\n printSeperator(level, end='\\n')", "def treeview(data,style='unicode'):\n\t# note that dumping to YAML is a great alternative\n\tif style=='unicode': \n\t\t# protect against TeeMultiplexer here because it cannot print unicode to the log file\n\t\tdo_swap_stdout = sys.stdout.__class__.__name__=='TeeMultiplexer'\n\t\tdo_swap_stderr = sys.stderr.__class__.__name__=='TeeMultiplexer'\n\t\tif do_swap_stdout: \n\t\t\thold_stdout = sys.stdout\n\t\t\t#! assume fd1 is the original stream\n\t\t\tsys.stdout = sys.stdout.fd1\n\t\tif do_swap_stderr: \n\t\t\thold_stderr = sys.stderr\n\t\t\t#! assume fd1 is the original stream\n\t\t\tsys.stderr = sys.stderr.fd1\n\t\t# show the tree here\n\t\tasciitree(data)\n\t\t# swap back\n\t\tif do_swap_stderr: sys.stderr = hold_stderr\n\t\tif do_swap_stdout: sys.stdout = hold_stdout\n\telif style=='json': return print(json.dumps(data))\n\telif style=='pprint': \n\t\timport pprint\n\t\treturn pprint.pprint(data)\n\telse: raise Exception('invalid style %s'%style)", "def get_tree_str(self, depth: int = 0) -> str:\n temp = \" \" * depth + str(self.head) + \"\\n\"\n for son in self.sons:\n temp += son.get_tree_str(depth + 1)\n return temp", "def getAllCWDs():\n return [\"%01d%01d%s\" % (card, pair, dom)\n for card in range(MAXCARDS)\n for pair in range(MAXPAIRS)\n for dom in DOMLABELS]", "def __str__(self) -> str:\n values = []\n self._str_helper(self.root, values)\n return \"TREE in order { \" + \", \".join(values) + \" }\"", "def __str__(self) -> str:\n values = []\n self._str_helper(self.root, values)\n return \"TREE in order { \" + \", \".join(values) + \" }\"", "def print_file(list):\n chr_name_list = ['SL2.40ch00','SL2.40ch01','SL2.40ch02','SL2.40ch03','SL2.40ch04','SL2.40ch05','SL2.40ch06','SL2.40ch07','SL2.40ch08','SL2.40ch09','SL2.40ch10','SL2.40ch11','SL2.40ch12']\n for index,chr_list in enumerate(list):\n if chr_list:\n chr = chr_name_list[index]\n for loci in chr_list:\n print \"%s\\t%d\\t%d\\t%s\\t%d\" % (chr,loci[0],loci[1],'\\t'.join(loci[2]),len(loci[2])-loci[2].count('0'))", "def df_print(p_df):\n l_gr = p_df['Group']\n l_number = p_df['Number']\n l_string = p_df['String']\n\n for i in range(l_gr.count()):\n print(\"=== {} ===\".format(str(l_gr[i])))\n for j in range(len(l_number[i])):\n print('{}. {}'.format(l_number[i][j], l_string[i][j]))", "def print_tree(t):\r\n if (t==None):\r\n return \r\n else:\r\n print_tree(left(t))\r\n print(value(t),end=\" \")\r\n print_tree(right(t))", "def pprint_grid(grid):\n print(\"\\n\".join(\" \".join(str(r) for r in g) for g in grid))", "def report_path_sanity(list_of_paths):\n sanity_truth_table = [[\n path,\n unicode_booleans(os.path.exists(path)),\n unicode_booleans(os.path.isfile(path)),\n unicode_booleans(os.path.isdir(path))\n ] for path in list_of_paths]\n\n cols = ['Path', 'Exists', 'File', 'Folder']\n\n sanity_table = pd.DataFrame(sanity_truth_table, columns=cols)\n sanity_table.set_index('Path')\n return tabulate(sanity_table, headers=cols)", "def tree():\n nobv.visual_tree()", "def visualize_tree(root):\n _visualize_tree(root, [], 0, '-')", "def tree(self, depth_index=0):\r\n print(self.tree_str(depth_index))", "def print_tree(self):\n stack = [(self.root, 0, 0)] # (node, child no., tabs)\n ntabs = 0\n while len(stack):\n n, i, tabs = stack.pop()\n if len(n.branch):\n if i>=1 and i==len(n.children)-1:\n print(tabs*'\\t' + 'axis-' + str(n.axis) + ': >' + str(n.branch[i-1]))\n else:\n print(tabs*'\\t' + 'axis-' + str(n.axis) + ': <=' + str(n.branch[i]))\n stack.append((n, i+1, tabs))\n if i<len(n.children):\n stack.append((n.children[i], 0, tabs+1))\n else:\n avg = np.dot(n.probabilities[:,0], n.probabilities[:,1])\n print(tabs*'\\t' + 'Label: ' + str(avg) + '\\n')", "def _print_tree(self, tree, current_depth=0):\n if 'surv' in tree:\n self._print_with_depth(tree['times'], current_depth)\n return\n self._print_with_depth(\n \"{0} > {1}\".format(self.column_names[tree['feature']],\n tree['threshold']),\n current_depth)\n self._print_tree(tree['left'], current_depth + 1)\n self._print_tree(tree['right'], current_depth + 1)", "def print_tree(tree, indent=0):\n for c in tree.children:\n print \" \" * indent, \"-->\", c.name\n \n if c.children != []:\n print_tree(c, indent+1)", "def display(self, contents=False, recurse=False): # DirObj.display\n if recurse:\n for name, entry in self.subdirs.iteritems():\n entry.display(contents, recurse)\n if contents:\n for name, entry in self.files.iteritems():\n entry.display(contents, recurse);\n print '# Directory\\t' + str(self.deleted) + '\\t' + str(self.ignore) + '\\t' + str(self.depth) + '\\t' + self.hexdigest + ' ' + self.pathname", "def make_tree_doc(root):\n res = []\n\n desc = extract_descriptions(root)\n tree = make_tree(root)\n tree_full = make_tree(root, full=True)\n\n for (branch_full, branch) in zip(tree_full.split(\"\\n\"), tree.split(\"\\n\")):\n if branch_full:\n filename = branch_full.split()[-1].strip()\n if filename in desc:\n branch += \" \\t# \" + desc[filename]\n res.append(branch)\n return \"\\n\".join(res)", "def pprint_nodes(subtrees):\n def indent(s,type=1):\n x = s.split(\"\\n\")\n r = \"+-%s\\n\"%x[0]\n for a in x[1:]:\n if a==\"\": continue\n if type==1:\n r += \"| %s\\n\"%a\n else:\n r += \" %s\\n\"%a\n return r\n if len(subtrees)==0: return \"\"\n f=\"\";\n for a in subtrees[:-1]:\n f += indent(a)\n f += indent(subtrees[-1],2)\n return f", "def print_table(table):\r\n print('/-----------------------------------------------------------------------------------\\\\')\r\n for item in table:\r\n\r\n while len(item[1]) <= 22:\r\n item[1] += ' '\r\n\r\n while len(item[2]) <= 27:\r\n item[2] += ' '\r\n\r\n while len(item[0]) <= 15:\r\n item[0] += ' '\r\n\r\n print('| '+item[0]+' | '+item[1]+'| '+item[2]+' |')\r\n\r\n print('\\\\-----------------------------------------------------------------------------------/')", "def printPath(path):\n result =''\n for i in range(len(path)):\n result = result + str(path[i])\n if i != len(path) -1:\n result = result + '->'\n return result", "def print_grid(grid):\n\tprint(\"\")\n\twall = \"+------\"*len(grid[0])+\"+\"\n\tprint(wall)\n\tfor row in grid:\n\t\tmeat = \"|\".join(COLORS[val] if val else \" \"*6 for val in row)\n\t\tprint(\"|{}|\".format(meat))\n\t\tprint(wall)", "def lower():\r\n\tfor i in range(SIZE):\r\n\t\tfor j in range(SIZE*2+2):\r\n\t\t\tif j == 0 or j == SIZE*2+1:\r\n\t\t\t\tprint('|', end='')\r\n\t\t\telif (i+j) == SIZE*2:\r\n\t\t\t\tprint('/', end='')\r\n\t\t\telif j-i == 1:\r\n\t\t\t\tprint('\\\\', end='')\r\n\t\t\telif 1+i < j < SIZE*2-i:\r\n\t\t\t\tif (i+j) % 2 == 1:\r\n\t\t\t\t\tprint('\\\\', end='')\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint('/', end='')\r\n\t\t\telse:\r\n\t\t\t\tprint('.', end='')\r\n\t\tprint('')", "def __str__(self):\n if len(self.children) == 0:\n return self.val\n ret = [\n self.val]\n for child in self.children:\n ret += [ '\\t' + child_s for child_s in str(child).split('\\n') ]\n\n return ('\\n').join(ret)", "def __str__(self) -> str:\n\n if not self.root:\n return 'Empty RB Tree'\n\n root, bfs_queue, height = self.root, queue.SimpleQueue(), self.root.subtree_height()\n track = {i: [] for i in range(height + 1)}\n bfs_queue.put((root, 0, root.parent))\n\n while bfs_queue:\n n = bfs_queue.get()\n if n[1] > height:\n break\n track[n[1]].append(n)\n if n[0] is None:\n bfs_queue.put((None, n[1] + 1, None))\n bfs_queue.put((None, n[1] + 1, None))\n continue\n bfs_queue.put((None, n[1] + 1, None) if not n[0].left else (n[0].left, n[1] + 1, n[0]))\n bfs_queue.put((None, n[1] + 1, None) if not n[0].right else (n[0].right, n[1] + 1, n[0]))\n\n spaces = 12 * (2 ** (height))\n ans = '\\n' + '\\t\\tVisual Level Order Traversal of RBtree'.center(spaces) + '\\n\\n'\n for i in range(height):\n ans += f\"Level {i + 1}: \"\n for n in track[i]:\n space = int(round(spaces / (2 ** i)))\n if not n[0]:\n ans += ' ' * space\n continue\n ans += \"{} ({})\".format(n[0], n[2].value if n[2] else None).center(space, \" \")\n ans += '\\n'\n return ans", "def __str__(self):\r\n levels = tuple(self.generate_levels())\r\n self.compute_representation_positions()\r\n levels_to_strings = self.represent_tree_levels(levels)\r\n branches = self.represent_tree_branches(levels)\r\n\r\n return \"\".join(\"\".join((level, \"\\n\\n\", branch))\r\n for (level, branch) in zip(levels_to_strings, branches))", "def displayPathInfo():\n # TODO: Remove unwanted / unused functions\n\n dirpath = os.getcwd()\n logging.info(\"Current Directory is : \" + dirpath)\n foldername = os.path.basename(dirpath)\n logging.info(\"Directory name is : \" + foldername)", "def debug(self):\n\n lst = []\n self._debug_node(lst, 0, self.root)\n return \"\\r\".join(lst)", "def _create_directory_listing(directory):\n # Read content.\n content = sorted(os.listdir(directory))\n directories = []\n files = []\n\n for f in content:\n if os.path.isdir(os.path.join(directory, f)):\n directories.append(f)\n else:\n files.append(f)\n\n # Construct output.\n result = []\n result.append('\" ==================================\\n')\n result.append('\" Directory Listing\\n')\n result.append('\" %s\\n' % os.path.abspath(directory))\n result.append('\" ==================================\\n')\n\n for d in directories:\n result.append('%s/\\n' % d)\n\n for f in files:\n result.append('%s\\n' % f)\n\n return ''.join(result)", "def walk_through_dir(dir_path):\n for dirpath, dirnames, filenames in os.walk(dir_path):\n print(f\"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpath}'.\")", "def encompass(self):\n self._printer('Standard Walk')\n count = Counter(length=3)\n for directory in self.directory:\n for root, directories, files in os.walk(directory, topdown=self.topdown):\n root = root[len(str(directory)) + 1:]\n self._printer(str(count.up) + \": Explored path - \" + str(root), stream=True)\n for filename in files:\n fullname = os.path.join(root, filename)\n # Join the two strings in order to form the full filepath.\n self.add_path(directory, fullname)", "def print_tree_entry(self, mount_id, parent_ns, paths, color=True, line_prefix='', is_last=True):\n entry = self.items[mount_id]\n\n # Define colors\n color_gray = COLOR_GRAY if color else ''\n color_norm = COLOR_NORM if color else ''\n\n # Build the line\n line = color_gray + line_prefix + ('`-' if is_last else '|-') + color_norm\n line += entry.pretty_str(parent_ns, color=color)\n print(line)\n\n # Get children\n children_id = self.children_by_id.get(mount_id)\n if not children_id:\n return\n filtered_children = [\n (mntpnt, mntid) for mntid, mntpnt in children_id.items()\n if any(mntpnt.startswith(p) or p.startswith(mntpnt) for p in paths)\n ]\n filtered_children.sort()\n\n for idx, child in enumerate(filtered_children):\n self.print_tree_entry(\n mount_id=child[1],\n parent_ns=entry.mount_ns,\n paths=paths,\n color=color,\n line_prefix=line_prefix + (' ' if is_last else '| '),\n is_last=(idx == len(filtered_children) - 1),\n )", "def log_dir_stacks_contents(dir_stacks):\r\n for directory in dir_stacks:\r\n logging.info('-'*80)\r\n logging.info('Predicted directory contents of:\\n{0}'\r\n .format(directory.path))\r\n files = directory.file_names\r\n files = sorted(files)\r\n logging.info('Number of files: {0}'.format(len(files)))\r\n logging.info('Files:')\r\n logging.info('\\t'.join(files))", "def print_tree(self, parser=None):\n for pre, _, node in RenderTree(self):\n print(pre + node._self_string(parser))", "def printTree(self):\n print(printTreeF(self, 0, self))", "def printGraph(tree, filename):\n G = pgv.AGraph() #Constructs a graph object\n for key in tree.keys():\n G.add_node(key)\n for subkey in tree[key].keys():\n G.add_node(subkey)\n G.add_edge(key,subkey,label=str(tree[key][subkey]),\\\n len=max(1, tree[key][subkey]))\n #length can't be less than 1, so that labels are readable\n\n G.draw(filename,prog=\"neato\")", "def print_tree(self, prefix=\"\"):\n print(\"%s%s\" % (prefix, self.node_label()))\n if self.left:\n self.left.print_tree(prefix + \" \")\n if self.right:\n self.right.print_tree(prefix + \" \")", "def walk_through_dir(dir_path):\n for dirpath, dirnames, filenames in os.walk(dir_path):\n print(f\"There are {len(dirnames)} directories and {len(filenames)} images in '{dirpath}'.\")", "def dirtree(dir, index):\n filenames = os.listdir(dir)\n for filename in filenames:\n if not os.path.isdir(os.path.abspath(dir+'/'+filename)):\n if filename == filenames[-1]:\n print('| '*index+'\\--', filename)\n else:\n print('| '*index+'|--', filename)\n else:\n print('| '*index+'|--', filename)\n dir = dir + '/' + filename\n dirtree(dir, index+1)", "def print_output(tree):\n print_value(tree)\n print_tree(tree)", "def get_tree_string(self, node):\n string = \"\"\n for child in sorted(node.children):\n string += node.depth * \"\\t\"\n if node.depth > 0:\n string += \"|\"\n string += node.feature + \"=\" + child\n if node.children[child].is_leaf:\n string += \":\" + node.children[child].pred + \"\\n\"\n else:\n string += \"\\n\" + self.get_tree_string(node.children[child])\n\n return string", "def upper():\r\n\tfor i in range(SIZE):\r\n\t\tfor j in range(SIZE*2+2):\r\n\t\t\tif j == 0 or j == SIZE*2+1:\r\n\t\t\t\tprint('|', end='')\r\n\t\t\telif (i+j) == SIZE:\r\n\t\t\t\tprint('/', end='')\r\n\t\t\telif j == SIZE+i+1:\r\n\t\t\t\tprint('\\\\', end='')\r\n\t\t\telif (SIZE-i) < j < (SIZE+i+1):\r\n\t\t\t\tif (i+j) % 2 == 1:\r\n\t\t\t\t\tprint('\\\\', end='')\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint('/', end='')\r\n\t\t\telse:\r\n\t\t\t\tprint('.', end='')\r\n\t\tprint('')", "def draw_folders(output_dir, row):\n\n print(\n f\"[desenhando] {row['Record name']} {row['Pasta']} {row['Título'][:40]}...\"\n )\n\n im = Image.new(\"RGB\", (842, 1191), \"white\")\n draw = ImageDraw.Draw(im)\n\n arial_large = ImageFont.truetype(\"arial.ttf\", 20)\n\n arial_small = ImageFont.truetype(\"arial.ttf\", 14)\n\n draw.text((60, 80), str(row[\"collection\"]), fill=\"gray\", font=arial_small)\n\n draw.text((60, 120), str(row[\"Record name\"]),\n fill=\"black\", font=arial_large)\n\n draw.text(\n (700, 120), str(row[\"Pasta\"]), fill=\"black\", font=arial_large,\n )\n\n draw.text((60, 180), str(row[\"Título\"]), fill=\"black\", font=arial_large)\n\n draw.text(\n (60, 230),\n \"Número de fotografias: \" + str(row[\"Número de imagens\"]),\n fill=\"black\",\n font=arial_small,\n )\n\n filename = f'{row[\"Record name\"]} ({row[\"Pasta\"]}).pdf'\n im.save(os.path.join(output_dir, filename), \"PDF\", quality=100)", "def printChildrenOfNode(tree, node, printDirectory = False):\r\n if node.children:\r\n for child in node.children:\r\n tree.printDepth = tree.printDepth+1\r\n if printDirectory:\r\n print (\"| \"*tree.printDepth), child.directory\r\n else:\r\n print (\"| \"*tree.printDepth), child.name\r\n if child.children:\r\n tree.printChildrenOfNode(child, printDirectory)\r\n else:\r\n tree.printDepth = tree.printDepth-1\r\n \r\n tree.printDepth = tree.printDepth-1", "def pretty_print(self,depth=0):\n\t\tfor i in range(depth):\n\t\t\tprint \"\\t\",\n\t\t\t\t\n\t\tprint self.__str__()\n\t\t\n\t\tfor c in self.tree.children:\n\t\t\tc.viz.pretty_print(depth+1)", "def print_grid (grid):\r\n print('+--------------------+')\r\n for o in range(len(grid)):\r\n print('|',end='')\r\n for e in range(len(grid[o])):\r\n j=grid[o][e]\r\n if j==0:\r\n g=' '\r\n else:\r\n g=j\r\n print(g,end=' '*(5-len(str(grid[o][e]))))\r\n print('|')\r\n print('+--------------------+')", "def get_tree_contents(cls, tree_path, dirs, files, ref_table):\n\n tree_contents = ''\n\n for dir_name in dirs:\n dir_name_path = '%s/%s' % (tree_path, dir_name)\n dir_name_perm = ref_table[dir_name_path]['perm']\n tree_contents += '%s tree %s %s \\n' % (dir_name_perm, ref_table[dir_name_path]['hash'], dir_name)\n\n for file in files:\n file_path = '%s/%s' % (tree_path, file)\n file_perm = ref_table[file_path]['perm']\n tree_contents += '%s file %s %s \\n' % (file_perm, ref_table[file_path]['hash'], file)\n\n return tree_contents, cls.get_256_hash_from_string(tree_contents)" ]
[ "0.68927115", "0.62558967", "0.6205917", "0.611956", "0.5990437", "0.59898376", "0.59636194", "0.5947244", "0.5935697", "0.5933059", "0.59189725", "0.58366364", "0.58359617", "0.58307904", "0.5817027", "0.58034277", "0.579429", "0.577492", "0.5759573", "0.5752997", "0.57404804", "0.57389766", "0.5730393", "0.57103544", "0.56736445", "0.56677073", "0.56595165", "0.5656392", "0.56530076", "0.5645871", "0.557751", "0.55595464", "0.5558055", "0.5557551", "0.5544707", "0.5536296", "0.5509177", "0.54996747", "0.5478892", "0.54774475", "0.5475754", "0.5467906", "0.5459069", "0.5457991", "0.5453124", "0.5445331", "0.5442727", "0.54330844", "0.5419277", "0.54126275", "0.54060256", "0.5374955", "0.53729814", "0.53641886", "0.5352839", "0.5348812", "0.53415376", "0.53415376", "0.53382045", "0.5334618", "0.53308284", "0.5313093", "0.53084713", "0.53054744", "0.52920693", "0.5286263", "0.5277556", "0.52754265", "0.5275101", "0.52721614", "0.5270372", "0.52700144", "0.5269353", "0.5262767", "0.5260726", "0.52601266", "0.5255302", "0.5255088", "0.5252368", "0.52418774", "0.5241785", "0.5239172", "0.5238803", "0.52386564", "0.5237766", "0.5237578", "0.5230199", "0.52202076", "0.52184737", "0.52180874", "0.52165145", "0.5215202", "0.5214253", "0.5207562", "0.5206115", "0.5205872", "0.52044386", "0.5200516", "0.5187801", "0.51853937" ]
0.55308056
36
Print informations given by keyList with a rools style choosen with optDict
def roolsPrint(keyList,optDict,indent=0): if optDict['long'] or optDict['tree']: \ roolsPrintLongLs(keyList,optDict,indent) else: roolsPrintSimpleLs(keyList,indent)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def printDict(myDict):\n for key in myDict:\n print(f\"Version: --> {myDict[key]['version']} \")\n print(f\"Accuracy: --> {myDict[key]['accuracy']}\")\n print(f\"Time --> {myDict[key]['time_per_target']}\")\n print(f\"Penalty --> {myDict[key]['target_w_penalty']}\")\n print(f\"ID --> {myDict[key]['assessed_by']}\")\n print(f\"# --> {myDict[key]['attempt']}\")\n\n print()", "def roolsPrintSimpleLs(keyList,indent):\n # This code is adaptated from the pprint_list function here :\n # http://stackoverflow.com/questions/25026556/output-list-like-ls\n # Thanks hawkjo !!\n if len(keyList) == 0: return\n (term_width, term_height) = getTerminalSize()\n term_width = term_width - indent\n min_chars_between = 2\n min_element_width = min( len(key.GetName()) for key in keyList ) \\\n + min_chars_between\n max_element_width = max( len(key.GetName()) for key in keyList ) \\\n + min_chars_between\n if max_element_width >= term_width: ncol,col_widths = 1,[1]\n else:\n # Start with max possible number of columns and reduce until it fits\n ncol = min( len(keyList), term_width / min_element_width )\n while True:\n col_widths = \\\n [ max( len(key.GetName()) + min_chars_between \\\n for j, key in enumerate(keyList) if j % ncol == i ) \\\n for i in range(ncol) ]\n if sum( col_widths ) <= term_width: break\n else: ncol -= 1\n for i, key in enumerate(keyList):\n if i%ncol == 0: write(\"\",indent) # indentation\n # Don't add spaces after the last element of the line or of the list\n if (i+1)%ncol != 0 and i != len(keyList)-1:\n if not IS_TERMINAL: write( \\\n key.GetName().ljust(col_widths[i%ncol]))\n elif isDirectoryKey(keyList[i]): write( \\\n isSpecial(ANSI_BLUE,key.GetName()).ljust( \\\n col_widths[i%ncol] + ANSI_BLUE_LENGTH))\n elif isTreeKey(keyList[i]): write( \\\n isSpecial(ANSI_GREEN,key.GetName()).ljust( \\\n col_widths[i%ncol] + ANSI_GREEN_LENGTH))\n else: write(key.GetName().ljust(col_widths[i%ncol]))\n else: # No spaces after the last element of the line or of the list\n if not IS_TERMINAL: write(key.GetName())\n elif isDirectoryKey(keyList[i]):\n write(isSpecial(ANSI_BLUE, key.GetName()))\n elif isTreeKey(keyList[i]):\n write(isSpecial(ANSI_GREEN, key.GetName()))\n else: write(key.GetName())\n write('\\n')", "def DictFunction():\r\n print \"{name} is from {city}, and he likes {cake} cake, {fruit} fruit, {salad} salad and {pasta} pasta\".format(**food_prefs)", "def listOptions(lst):\n for k, e in enumerate(lst,1):\n print(\"{:^15}{:<10}\".format(k,e))", "def roolsPrintLongLs(keyList,optDict,indent):\n if len(keyList) > 0: # Width informations\n maxCharClass = max([len(key.GetClassName()) for key in keyList])\n maxCharTime = 12\n maxCharName = max([len(key.GetName()) for key in keyList])\n dic = { \\\n \"classWidth\":maxCharClass+2, \\\n \"timeWidth\":maxCharTime+2, \\\n \"nameWidth\":maxCharName+2, \\\n \"titleWidth\":1}\n date = ROOT.Long(0) \n for key in keyList:\n time = ROOT.Long(0)\n key.GetDatime().GetDateTime(key.GetDatime().Get(),date,time)\n time = prepareTime(time)\n rec = \\\n [key.GetClassName(), \\\n MONTH[int(str(date)[4:6])]+\" \" +str(date)[6:]+ \\\n \" \"+time[:2]+\":\"+time[2:4], \\\n key.GetName(), \\\n \"\\\"\"+key.GetTitle()+\"\\\"\"]\n write(LONG_TEMPLATE.format(*rec,**dic),indent,end=\"\\n\")\n if optDict['tree'] and isTreeKey(key):\n tree = key.ReadObj()\n recursifTreePrinter(tree,indent+2)", "def display_dict() -> None:\n for key in ascii_dict:\n print(key, ': ')\n for line in ascii_dict[key]:\n print(line)", "def _print_enum_opt(self, option, choices):\n for key in choices:\n if key == self.conf[option]:\n print(\"* %s\" % key)\n else:\n print(\" %s\" % key)", "def show(list_of_dicts, key):\n print(\"\\nHere are the stocks I have considered for you:\")\n for i in list_of_dicts: # iterates through list_of_dicts and prints Name and Market Cap\n print(f\" - {i['Name']} - {key} is {i[key]} \")", "def print_car(car):\n for key, value in car.items():\n print(f\"{key}: {value}\")", "def printPicnic(itemsDict: dict, leftWidth: int, rightWidth: int) -> None:\n print('PICNIC ITEMS'.center(leftWidth + rightWidth, '-'))\n for k, v in itemsDict.items():\n print(k.ljust(leftWidth, '.') + str(v).rjust(rightWidth))", "def show_key_options(json_dict, backtrack):\n print(\"Keys available:\")\n for key in json_dict:\n print(key, end=\" \"*5)\n key = input(\"\\nEnter next key: \")\n step_into(json_dict, key, backtrack)", "def run_print_dict_examples():\n print()\n print_dict_keys(NAME_DICT)\n print()\n print_dict_items(NAME_DICT)", "def run_print_dict_examples():\n print()\n print_dict_keys(NAME_DICT)\n print()\n print_dict_items(NAME_DICT)", "def print_option_set(option_set, leader):\n for option in option_set:\n labels = \",\".join(option['labels'])\n option_set = leader + labels + \" \"*(20-len(labels)) + \"- \" + option['description']\n print(option_set)", "def print_individual(individual : Dict[str, str], keys: List[str], individualsDict):\n ind_str = \"\"\n for index, key in enumerate(keys):\n if index != 0:\n ind_str += \", \"\n\n if key == 'name':\n\n #US47 twins special symbol\n twins = {}\n for id, i in individualsDict.items():\n family = i[\"child\"]\n birthday = i[\"birthday\"]\n\n if family+birthday in twins:\n twins[family+birthday] = twins[family+birthday].append(i['id'])\n else:\n twins[family+birthday] = [i['id']]\n\n flatList = []\n for twin_lists in twins.values():\n if len(twin_lists) > 1:\n flatList = flatList + twin_lists\n\n # US44: underline if dead\n if not individual[\"alive\"]:\n ind_str += \"\\u001b[4m\"\n # blue for boy, red for girl\n ind_str += \"\\033[1;34;40m\" if individual[\"gender\"] == \"M\" else \"\\033[1;35;40m\"\n ind_str += f\"name = {individual['name']}\\033[0;37;40m\" # reset color\n ind_str += \"\\u001b[0m\" # reset text decoration\n \n if individual['id'] in flatList:\n ind_str += u'\\1071'\n else:\n ind_str += f\"{key} = {individual[key]}\"\n\n if key == 'birthday':\n ind_str += format_date(individual['birthday'])\n\n print(ind_str)", "def print_options(val, cur_matches):\n print val\n\n #skip one to print none at end\n for i,v in enumerate(cur_matches[1:]):\n print \"[%i] %s : %s \"%(i+1, v[0], v[1])\n print \"[%i] %s : %s \" % (0, cur_matches[0][0], cur_matches[0][1])\n\n print \n print 'Choice?'", "def printMap(values, klab, vlab, precision, offset=16):\n\tprint(klab.ljust(offset, \" \") + vlab)\n\tfor k in values.keys():\n\t\tv = values[k]\n\t\tks = toStr(k, precision).ljust(offset, \" \")\n\t\tvs = toStr(v, precision)\n\t\tprint(ks + vs)", "def print_options(order_list, option_list):\n menu = ''\n for order, text in zip(order_list, option_list):\n menu += (str(order) + ' - ' + text + '\\n')\n return menu", "def display(self):\r\n\t\tfor key, value in self.__dict__.items():\r\n\t\t\tprint(key.upper(), value, sep=': ')\r\n\r\n\t\tprint(\"\")", "def listopt(opt, f=None):\n args = vars(opt)\n\n if f is not None:\n f.write('------------ Options -------------\\n')\n else:\n print('------------ Options -------------')\n\n for k, v in sorted(args.items()):\n if f is not None:\n f.write('%s: %s\\n' % (str(k), str(v)))\n else:\n print('%s: %s' % (str(k), str(v)))\n\n if f is not None:\n f.write('-------------- End ----------------\\n')\n else:\n print('-------------- End ----------------')", "def listopt(opt, f=None):\n args = vars(opt)\n\n if f is not None:\n f.write('------------ Options -------------\\n')\n else:\n print('------------ Options -------------')\n\n for k, v in sorted(args.items()):\n if f is not None:\n f.write('%s: %s\\n' % (str(k), str(v)))\n else:\n print('%s: %s' % (str(k), str(v)))\n\n if f is not None:\n f.write('-------------- End ----------------\\n')\n else:\n print('-------------- End ----------------')", "def printdict(input_dict):\n for key in input_dict:\n print key, \":\", input_dict[key]", "def format_dict(kv_list):\n return '\\n'.join(['{} - {}'.format(key, value) for\n key, value in kv_list])", "def show_values():\n dic_drg = {}\n dic_age = {}\n dic_sex = {}\n dic_sline = {}\n for tup in all_data:\n drg = tup[7]\n age = tup[9]\n sex = tup[10]\n sline = tup[14]\n\n dic_drg[drg] = 1\n dic_age[age] = 1\n dic_sex[sex] = 1\n dic_sline[sline] = 1\n\n print \"Age values\"\n for key in sorted(dic_age.keys()):\n print key\n\n print \"Sex values\"\n for key in sorted(dic_sex.keys()):\n print key\n\n print \"Service line values\"\n for key in sorted(dic_sline.keys()):\n if key is None or len(key) == 0:\n continue\n print \"'\" + key + \"',\",\n print\n\n print \"Drg values\"\n for key in sorted(dic_drg.keys()):\n if key is None or len(key) == 0:\n continue\n print\"'\" + key + \"',\",\n print", "def show(self, keys=None, sort_keys_function=None):\n output_keys = keys or self.keys\n if not self.items:\n print(\"No items to show\")\n else:\n for item in self.__get_items(sort_keys_function):\n for output_key in output_keys:\n print(\"{0:25}: {1!s}\".format(output_key, getattr(item, self.mapping[output_key])))\n print(\"-\" * 25)", "def build_choices(header, dictionary, after):\n out = f\"{header}\\n\"\n for i, (key, item) in enumerate(dictionary.items(), start=1):\n out += f\"{INDENT_STRING}{i}. {item}\\n\"\n out += after\n return out", "def print_pairs(self, d, level=0):\n for k, v in d.iteritems():\n if type(v) is dict:\n self._write('%s%s :\\n' % (\"\\t\" * level, k.upper()))\n self.print_pairs(v, level + 1)\n elif k == \"output\":\n self._write('%s%s :\\n' % (\"\\t\" * level, k.upper()))\n self._write('%s\\n' % v)\n else:\n self._write('%s%s : %s\\n' % (\"\\t\" * level, k.upper(), v))", "def recursive_dict_key_print(dict_in, spacer=\"\"):\n if type(dict_in) is not dict:\n return\n next_spacer = spacer + \" \"\n for key, value in dict_in.items():\n try:\n print(spacer, f\"{key} : {value.shape}\")\n except(AttributeError):\n print(spacer, key)\n recursive_dict_key_print(value, next_spacer)", "def print_verb_dict(verb):\n for keys in verb:\n print(f'{keys}: {verb[keys]}')", "def showd(d):\r\n return ' '.join([':%s %s' % (k,v)\r\n for k,v in\r\n sorted(d.items())\r\n if not \"_\" in k])", "def print_options(self):\n for option in self._options.items():\n print \"{0} = {1}\".format(option[0], option[1])", "def printOptions():\n\n # For each group, create a group option\n print(\"default\")", "def printsection(section):\n print('===')\n for key in section.keys():\n print(\"Key: %s\" % key)\n for item in section[key]:\n print(' %s' % item)", "def print_kwargs(**kwargs):\n for key in kwargs:\n print('%s %s' %(key, kwargs[key]))", "def print_options(self, options, describe=False, indent_level=0):\n indent = ' ' * indent_level\n for option in options.get_option_names():\n line = colorize(option + ': ', 'green') + str(options[option])\n if describe:\n line += ' (' + options.get_description(option) + ')'\n values = options.get_acceptable_values(option)\n if values is not None:\n line += ' (Acceptable Values: ' + str(values) + ')'\n eprint(indent + line)", "def print_dict(data):\n print data", "def print_dictionary(d, start_pos=0, end_pos=2):\n if type(d) is list: # end_pos will also act as limit for no. of results\n print(\"\\n\" + \"_\" * 37 + \"BEGIN\" + \"_\" * 37 + \"\\n\")\n for i in range(start_pos, end_pos + 1):\n if i == len(d):\n break\n if len(d) != 1: # Skip item number for single track dictionary\n print(\"Item no.: {}\".format(i + 1))\n for key, value in d[i].items():\n if type(value) is str and len(value) > 79:\n value = value[:40]\n value = value + '...'\n print(\"{0}: {1}\".format(key, value))\n print()\n\n inner_choice = input(\"Want more results? (y/n): \")\n if inner_choice.lower() in ['y', 'yes']:\n print_dictionary(d, start_pos=end_pos + 1, end_pos=end_pos + 5)\n\n if i == len(d):\n print(\"_\" * 38 + \"END\" + \"_\" * 38 + \"\\n\")\n return 1\n\n elif type(d) is dict:\n print()\n for key, value in d.items():\n if type(value) is str and len(value) > 79:\n value = value[:40]\n value = value + '...'\n print(\"{0}: {1}\".format(key, value))\n print()\n return 1", "def pprint(self):\r\n for i in self.items():\r\n print '%s => %r'%i", "def view_keys(dict):\n claves=list(dict.keys())\n claves.sort()\n for line in claves:\n print(line.upper(),' = ',dict[line])", "def print_dd_dict( self, ):\n print( self._dd_dict )", "def display_songs_in_scrapped_dict(scrapped_songs_dict):\n for i in range(1, len(scrapped_songs_dict) + 1):\n print(f\"{i}. {list(scrapped_songs_dict.keys())[i - 1]}\")", "def print_out_dict(word_dict):\n for word_class in WORD_CLASSES:\n \n last_word = \"\"\n if(word_class in word_dict):\n print(f\"{word_class}:\")\n for word in sorted(word_dict[word_class]):\n if(word != last_word):\n spacing = \" \"*(20-len(word))\n print(spacing+word)\n last_word = word", "def print_object(dict_to_print, *, name='', uppercase=False):\n string = '' if name == '' else name.ljust(10)\n for key, value in dict_to_print.items():\n string += f'{key.upper() if uppercase else key}: {\"\" if value < 0 else \" \"}{float(value):.4}'.ljust(\n len(key) + 10)\n\n print(string)", "def print_options(events):\n headers = get_keys('headers', events)\n context = get_keys('context', events)\n params = get_keys('params', events)\n variables = get_keys('vars', events)\n tags = get_keys('tags', events)\n\n table = PrettyTable(['Headers', 'Context', 'Params', 'Vars', 'Tags'])\n table.align = 'l'\n\n for header, context_var, param, var, tag in izip_longest(\n headers, context, params, variables, tags, fillvalue=''):\n table.add_row((header, context_var, param, var, tag))\n\n print table", "def show_menu():\r\n print(\"Write a number of the next options:\")\r\n for key, value in enumerate(options):\r\n print(\"{}. {}\".format(key, value))", "def printDicts():\n for k in key:\n print k, key[k]\n \n for f in freq:\n print f, freq[f]\n \n for e in english:\n print e, english[e]", "def print_dict(d):\n list1=list()\n for key,value in d.items():\n list1.append((value,key))\n list1.sort(reverse=True)\n for word,value in list1[:20]:\n print(word, value, sep='\\t')", "def print_dict(dictionary, format_=None):\n\n format_ = format_ or DEFAULT\n\n if format_ == TEXT:\n for key, value in iter(sorted(dictionary.items())):\n print(\"%s = %s\" % (key, value))\n elif format_ == DOCKERENV:\n for key, value in iter(sorted(dictionary.items())):\n print(\"%s=%s\" % (key, value))\n elif format_ == BASH:\n for key, value in iter(sorted(dictionary.items())):\n print(\"export %s=%s\" % (key, value))\n elif format_ == JSON:\n print(json.dumps(dictionary))\n elif format_ == NAME_VALUE_DICT:\n print(\"[\")\n for key, value in iter(sorted(dictionary.items())):\n print('{\"name\": \"%s\", \"value\": \"%s\"},' % (key, value))\n print(\"]\")", "def print_config(self, options=()):\n if len(options) == 0:\n options_to_print = sorted(self._config.keys())\n else:\n options_to_print = options\n\n for key in options_to_print:\n if key in self._config:\n config_value = self._config[key].get_highest_priority()\n actual_value = self._raw_get(key) # for multiple this is a combined value\n print(\n '{key}: {value} - prio: {priority}, source: {source}'.format(\n key=key,\n value=actual_value,\n priority=config_value.priority,\n source=config_value.source))", "def print_all_items_in_dict(all_items):\n if config.output.csv:\n print_all_items_in_dict_for_csv(all_items)\n else:\n print_all_items_in_dict_for_human(all_items)", "def printModifiedOptions(self):\n if self.comm.rank == 0:\n print('+---------------------------------------+')\n print('| All Modified %s Options: |' % self.name)\n print('+---------------------------------------+')\n # Need to assemble a temporary dictionary\n tmpDict = {}\n for key in self.options:\n if self.getOption(key) != self.defaultOptions[key][1]:\n tmpDict[key] = self.getOption(key)\n pp(tmpDict)", "def display(items):\n\n # LOC, COMMENT, ...\n # (same as keys of TYPE_OF_LINE, but better to only rely on items here)\n what = next(iter(items))[1]\n\n # Headers\n print(bcolors.BOLD\n +(\"{:<30}\"+\":{:>10}\"*len(what)).format(\"path\", *what)\n +bcolors.ENDC)\n\n # Lines\n for k,v in items:\n print((bcolors.OKGREEN if v[\"LOC\"] == 0\n else bcolors.FAIL if v[\"COMMENTS\"] == 0\n else bcolors.WARNING if v[\"COMMENTS\"]/v[\"LOC\"] < 0.2\n else bcolors.OKGREEN )\n +(\"{:<30}\"+\":{:>10}\"*len(v)).format(k, *v.values())\n + bcolors.ENDC)", "def _show(node: dict, prefix=\"\"):\n print(prefix)\n for key, value in node.items():\n if key in {ITEMSKEY, SUFFIXKEY}:\n print(f\"{prefix}{key}: {value}\")\n else:\n _show(value, prefix + key)", "def __repr__(self, *args, **kwargs):\n result ='{'\n for (k, v) in self.items(*args, **kwargs):\n result += repr(k) + \": \" + repr(v) + \",\"\n\n result = result[:-1] + '}'\n return result", "def click_echo_kvp(key, value, padding=20, color='green'):\n return click.echo(\n click.style('{key:<{padding}}'.format(\n key=key + ':',\n padding=padding\n ), fg=color) +\n str(value)\n )", "def ppdict(d):\n print '{'\n keys=d.keys()\n keys.sort()\n for k in keys:\n spacing=\" \" * (16-(len(repr(k))+1))\n print \"%s:%s%s,\" % (repr(k),spacing,repr(d[k]))\n print '}'", "def large_list_display(keyval: str, record: dict, title: str):\n if keyval in record:\n if len(record[keyval]):\n res = \", \".join(t[\"value\"].title() for t in record[keyval])\n res = f\"{chunk_long_description(res)}\"\n res = f\"{colored(title, attrs=['bold','underline'])}\\n{res}\"\n print(f\"{res}\\n\")", "def display(self):\n\n print('\\n')\n for key, val in self.option.items():\n print(key, val, '\\n') # make it more confortable to read\n self.get_choice() # launch automaticly the choice method after display", "def __str__(self):\n if len(self.keys()):\n return '{' + repr(self.keys()[0]) + ':' + repr(self[self.keys()[0]]) + ', ...'\n else:\n return super(FSDict, self).__str__()", "def print_key_freq(self,\r\n freq_list):\r\n\r\n for key, freq in freq_list:\r\n\r\n display.noteprint((EMPTYCHAR,key + alerts.APPEARS_BEG\\\r\n +len(self.get_indexes_for_key(key)\\\r\n +alerts.APPEARS_END+freq)))", "def print_dict_items(my_dict):\n print(\"Printing dictionary\", my_dict, \"in readable form\")\n for (key, value) in my_dict.items():\n print(\"Key =\", key, \"has value =\", value)", "def print_dict_items(my_dict):\n print(\"Printing dictionary\", my_dict, \"in readable form\")\n for (key, value) in my_dict.items():\n print(\"Key =\", key, \"has value =\", value)", "def multivalue():\n d = {1: \"George\", \"Prince\", \"Male\", 2: \"Margaret\", \"Queen\", \"Lizard\"}\n print(d)", "def print(self):\n str_items = [(str(v),str(p)) for v,p in sorted(self.items())]\n max_lens = [\n max(i[0] for i in str_items),\n max(i[1] for i in str_items)\n ]\n lena, lenb = max_lens\n print(\"\\n\".join(\n f\"{v:{lena}s} -> {p:{lenb}s}\"\n for v, p in str_items\n ))", "def possession_stringer(input_dict):\r\n\treturn ', '.join(' x'.join((k, str(v))) for k,v in sorted(input_dict.items())) #output formatted skill list string\r", "def skill_stringer(input_dict): #input a dictionary\r\n\treturn ', '.join('-'.join((k, str(v))) for k,v in sorted(input_dict.items())) #output formatted skill list string\r", "def show_opt(self):\n print(\n ''\n '\\n\\t' + bc.OKBLUE + ('%-*s %-*s %-*s %s' % (15, 'OPTION', 8, 'RQ', 18, 'VALUE', 'DESCRIPTION')) + bc.ENDC +\n '\\n\\t' + ('%-*s %-*s %-*s %s' % (15, '------', 8, '--', 18, '-----', '-----------')) +\n '\\n\\t' + ('%-*s %-*s %-*s %s' % (15, 'ip:', 8, 'y', 18, self.ip, 'IP or subnet to scan (192.168.1.100 or 192.168.1.1/24')) +\n '\\n\\t' + ('%-*s %-*s %-*s %s' % (15, 'debug:', 8, 'n', 18, self.debug, 'Turn debugging on (y/n)')) +\n '\\n'\n )", "def print_options(self, opt):\n message = ''\n message += '----------------- Options ---------------\\n'\n for k, v in sorted(vars(opt).items()):\n comment = ''\n default = self.parser.get_default(k)\n if v != default:\n comment = '\\t[default: %s]' % str(default)\n message += '{:>25}: {:<30}{}\\n'.format(str(k), str(v), comment)\n message += '----------------- End -------------------'\n print(message)\n\n # save to the disk\n expr_dir = os.path.join(opt.checkpoints_dir, opt.name)\n mkdirs(expr_dir)\n file_name = os.path.join(expr_dir, '{}_opt.txt'.format(opt.phase))\n with open(file_name, 'wt') as opt_file:\n opt_file.write(message)\n opt_file.write('\\n')", "def print_cutoffs(cutoffs: list, spaces: int = 8) -> str:\n lines = []\n for key, val in sorted(cutoffs.items()):\n lines.append(f'{spaces * \" \"}{key}: {val}')\n return \"\\n\".join(lines) if lines else \"\"", "def print(self):\n for fiction in self.fictions:\n print(fiction.__dict__)", "def print_config(config_dic, logger):\n for k, v in config_dic.items():\n logger.info(\"{}:\\t{}\".format(k.ljust(15), v))", "def __repr__(self):\r\n return '{%s}' % ', '.join(['%r: %r' % (k, v) for k, v in self.items()])", "def display_present_ire_endings():\n ire_endings = {\"io\": \"-o\", \"tu\": \"-i\", \"lui\": \"-e\", \"lei\": \"-e\", \"noi\": \"-iamo\", \"voi\": \"-ite\", \"loro\": \"-ono\"}\n for keys in ire_endings:\n print(f'{keys}: {ire_endings[keys]}')", "def printDict(self):\n print str(self)", "def print_section( secName, dictName ):\n\n\t# Number of entries in IA dictionary.\n\tmaxitem = len(dictName)\n\n\toutFile.write('\\n\\\\begin{enumerate}\\n')\n\n\tfor m in xrange(0,maxitem):\n\n\t\toutFile.write('\\\\item ' + dictName[m])\n\n\t\tbeans = [bean for bean in secName if int(bean[1]) == (m+1)]\n\t\t\n \t\tif beans[0][-1] == 'N/A':\n \t\t\toutFile.write('\\n\\t\\\\begin{description}[font=\\\\normalfont]\\n')\n \t\t\toutFile.write('\\t\\t\\\\item[N/A]\\n')\n \t\t\toutFile.write('\\t\\\\end{description}\\n\\n')\n\t\telse:\n\t\t\tl = len(beans)\n\t\t\tif beans[0][2].startswith('AY'):\n\t\t\t\toutFile.write('\\t\\\\begin{description}[leftmargin=1.75cm, font=\\\\normalfont]\\n')\n\t\t\t\tbeans[0][2] = string.replace(beans[0][2],'AY','\\\\textsc{ay}')\n\t\t\telse:\n\t\t\t\toutFile.write('\\t\\\\begin{description}[font=\\\\normalfont]\\n')\n\t\t\tfor i in xrange(0,l):\n\t\t\t\tif beans[i][2].startswith('AY'):\n\t\t\t\t\tbeans[i][2] = string.replace(beans[i][2],'AY','\\\\textsc{ay}')\n\t\t\t\toutFile.write('\\t\\t\\\\item[\\\\small ' + beans[i][2] + '] ' + beans[i][3] + '\\n')\n\n\t\t\t\tif len(beans[i]) == 5:\n\t\t\t\t\toutFile.write('\\n\\n\\t\\t' + beans[i][4] + '\\n')\n\n\t\t\toutFile.write('\\t\\\\end{description}\\n\\n')\n\n\toutFile.write('\\\\end{enumerate}\\n')", "def print_table():\n for key in _op_table.keys():\n print(key)\n for sub_key in _op_table[key]:\n print('\\t--' + sub_key)", "def display_phrasewise_list(prob_dict):\n print(\"***********Phrase pairs and their ranks*****************\")\n for f_phrase in prob_dict:\n e_phrases = prob_dict[f_phrase]\n s = [(phrase, e_phrases[phrase]) for phrase in sorted(e_phrases, key=e_phrases.get, reverse=True)]\n print(f_phrase ,\"->\",s)\n print(\"----------------------------------------------------------------------\")", "def drawMenu(self):\n try:\n for key in self.order_of_keys:\n print(\"\\r[key {:8}] : {}\".format(key, self.keybindings[key][self.KEY_DESCRIPTION]))\n except KeyError:\n print(\"Error: Keys found GoPiGo3WithKeyboard.order_of_keys don't match with those in GoPiGo3WithKeyboard.keybindings.\")", "def _render_dict_to_string(self, adict):\n alist = [ \"%s:%s\" % (self._render_thing(k), \n self._render_thing(adict[k])\n ) for k in adict.keys()]\n return \",\".join(self._render_row(alist))", "def printt(dictionnary):\n for key, value in dictionnary.iteritems():\n print('{key}, size: {size}, {values}'.format(key=key, \n size=len(value), values=value[0:4]))", "def _repr_kwargs(self):\n\n ret = \"\"\n if self.options.growth:\n ret += \", growth=True\"\n elif self.options.circular:\n ret += \", circular=True\"\n\n return ret", "def simple_list_display(keyval: str, record: dict, title: str, no_val: bool = False):\n if keyval in record:\n if len(record[keyval]):\n if no_val:\n result = \", \".join(list(record[keyval]))\n else:\n result = \", \".join(m[\"value\"].title() for m in record[keyval])\n print(f\"{bold(title)}: {result}\\n\")", "def print_device_dict(device_dict):\n for device_id in device_dict:\n print(str(device_id) + ':', device_dict[device_id]['name'])", "def print_melon(dictionary):\n\n for melon, melon_info in dictionary.items():\n melon = melon.upper()\n print(f'{melon}') \n\n for key, value in melon_info.items():\n print(f'\\t {key}: {value}')", "def printfunc(self, params, iter, resid, *args, **kwargs):\n\n print(iter) \n print(params.valuesdict())", "def map_room_list():\n for room in map_rooms:\n print(f\"{room}: \")\n for description in map_rooms[room]:\n print(f\"{description} - {map_rooms[room][description]}\")", "def display(self):\n for value, prob in self.items():\n print(value, prob)", "def printCurrentOptions(self):\n if self.comm.rank == 0:\n print('+---------------------------------------+')\n print('| All %s Options: |' % self.name)\n print('+---------------------------------------+')\n # Need to assemble a temporary dictionary\n tmpDict = {}\n for key in self.options:\n tmpDict[key] = self.getOption(key)\n pp(tmpDict)", "def dispDic(dic):\n pass", "def display(self):\n # type: ()->None\n print('============')\n for key, value in self._ifAttributes.items():\n if isinstance(value, list):\n print(key + ': ')\n for item in value:\n print('\\t' + item)\n elif isinstance(value, dict):\n print(key + ': ')\n for item in value.keys():\n print('\\t' + item + ': ' + value[item])\n else:\n print(key + ': ' + str(value))\n print('============')", "def display(animal):\n for name, valeur in animal.items(): # boucle contenant deux variables pour le nom et la valeur de chaque clef dans le dictionaire\n print(\"donnée de votre animal: {} : {}\".format(name,valeur))", "def test17():\n\n\twardrobe = {\"shirt\":[\"red\",\"blue\",\"white\"], \"jeans\":[\"blue\",\"black\"]}\n\tfor cloth in wardrobe.keys():\n\t\tfor color in wardrobe[cloth]:\n\t\t\tprint(\"{} {}\".format(color,cloth))", "def __str__(self):\n try:\n delim = ', ' if len(self) < 8 else ',\\n '\n s = delim.join('%s: %s' % (repr(k), repr(self[k])) for k in self.peys())\n return '{' + s + '}'\n except Exception:\n return dict.__repr__(self)", "def print_q(q):\n for key in sorted(q.keys()):\n print(key, end=\" \")\n value = q[key]\n for i in range(len(value)):\n print(value[i], end=\" \")\n print()", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\", \" \").capitalize(), args[k]] for k in keys])\n print(t.draw())", "def print_configs(opts, header=''):\n if header:\n print('\\n###############################################################\\n')\n print('\\t########\\t {} \\t########\\n'.format(header))\n print('###############################################################\\n')\n\n for field in opts._fields:\n if len(field) < 8:\n print('\\t{}\\t\\t\\t:\\t{}\\n'.format(field, getattr(opts, field)))\n else:\n print('\\t{}\\t\\t:\\t{}\\n'.format(field, getattr(opts, field)))", "def printResults(self):\n for key in self.mDict.keys():\n print ('for {:d}, entries = {:d} and exits = {:d}'.format (key, self.mDict.get(key).get ('entries'), self.mDict.get(key).get ('exits')))", "def tab_printer(args):\n args = vars(args)\n keys = sorted(args.keys())\n t = Texttable() \n t.add_rows([[\"Parameter\", \"Value\"]] + [[k.replace(\"_\",\" \").capitalize(), args[k]] for k in keys])\n print(t.draw())", "def print_map(self):\n y_max,x_max = map(max, zip(*self.mp.keys()))\n for row in range(0,y_max+1):\n msg = []\n for k in range(0,x_max+1):\n msg.append(chr(self.mp[row,k]))\n print(\"\".join(msg))", "def print_data():\r\n\r\n d = data()\r\n for i in d:\r\n for key, value in i.items():\r\n print(key, \" : \", value)\r\n print()" ]
[ "0.6518504", "0.6352941", "0.63204557", "0.6319985", "0.62665594", "0.61978984", "0.6115929", "0.6110398", "0.60612506", "0.59930515", "0.5966976", "0.59666437", "0.59666437", "0.59537727", "0.5947354", "0.59427744", "0.5914536", "0.5914091", "0.582117", "0.5819495", "0.5819495", "0.5787796", "0.57763624", "0.5764358", "0.57596225", "0.5741508", "0.5726192", "0.5722572", "0.5717987", "0.5707593", "0.56824607", "0.56670594", "0.56663716", "0.56491196", "0.5647951", "0.5624272", "0.56121874", "0.56056654", "0.559861", "0.55908287", "0.55894864", "0.5577536", "0.5576847", "0.5571922", "0.55667466", "0.55649936", "0.55638665", "0.55581194", "0.55475223", "0.55467707", "0.5539586", "0.5537563", "0.55275035", "0.55232316", "0.55221957", "0.55171704", "0.549218", "0.54869497", "0.54828227", "0.5473378", "0.54721284", "0.54721284", "0.546936", "0.54678077", "0.5461414", "0.5445227", "0.54411525", "0.5435677", "0.5435664", "0.54346937", "0.5430437", "0.5427048", "0.54268104", "0.54213655", "0.54178077", "0.5417523", "0.54142207", "0.5411624", "0.5407385", "0.540454", "0.540434", "0.53973114", "0.53958416", "0.5393177", "0.5390221", "0.53872514", "0.538373", "0.5377279", "0.5373595", "0.53721476", "0.53686076", "0.5368385", "0.5360875", "0.5357946", "0.5342779", "0.5337074", "0.533521", "0.53250265", "0.5324967", "0.5324042" ]
0.7562387
0
Call back function which checks if two rectangles have collided with each other
def collide_hit_rect(first, second): return first.hit_rect.colliderect(second.hit_rect) or first.hit_rect.colliderect(second.hit_rect)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_overlapping(self, region):\n if self.x2 < region.x1:\n return False # this box is left the other\n if self.x1 > region.x2:\n return False # this box is right the other\n if self.y2 < region.y1:\n return False # this box is above the other\n if self.y1 > region.y2:\n return False # this box is below the other\n return True", "def overlap_rect(rec1, rec2):\n # true if rec2 is left of rec1\n a = rec2[2] <= rec1[0]\n \n # true if rec2 is right of rec1\n b = rec1[2] <= rec2[0]\n\n # true if rec2 is below rec1\n c = rec2[3] <= rec1[1]\n\n # true if rec2 is above rec1\n d = rec1[3] <= rec2[1]\n\n return not (a or b or c or d)", "def check_obstructed(r1,r2): \n \n if r1==r2:\n return False\n \n #Densely sample line connecting r1 and r2.\n #If any of those sampled points is inside the rectangle, then the \n #line of sight intersects the rectangle and the tower's view is\n #obstructed.\n NP = 1000\n sampled_x = np.linspace(r1[0],r2[0],NP)\n sampled_y = np.linspace(r1[1],r2[1],NP)\n for x,y,w,h in self.coordinates__obstacles:\n for pt in xrange(NP):\n if (sampled_x[pt] > x) and (sampled_x[pt] < x+w) and \\\n (sampled_y[pt] > y) and (sampled_y[pt] < y+h):\n return True\n return False", "def collides(self, other):\r\n for block in self.blocks:\r\n for obstacle in other.blocks:\r\n if block.col == obstacle.col and block.row == obstacle.row:\r\n return True\r\n return False", "def box_collision(self):\n border_box_pos_1 = self.box_1.x + self.box_1.width/2\n border_box_pos_2 = self.box_2.x - self.box_2.width/2\n\n if (border_box_pos_2 - border_box_pos_1) <= 0:\n return True\n else:\n return False", "def is_overlapping(box1, box2):\n if box1[2] <= box2[0]: # If box1 is to the left of box2\n return False\n elif box1[0] >= box2[2]: # If box1 is to the right of box2\n return False\n elif box1[3] <= box2[1]: # If box1 is below box2\n return False\n elif box1[1] >= box2[3]: # If box1 is above box2\n return False\n else:\n return True", "def doBoundingBoxesIntersect(self, other):\n if(self.upperLeft.x <= other.lowerRight.x and\n self.lowerRight.x >= other.upperLeft.x and\n self.upperLeft.y >= other.lowerRight.y and\n self.lowerRight.y <= other.upperLeft.y):\n return True\n return False", "def _rect_intersects(self, rect):\n\tb = (self.left() > rect.right() or \n\t\tself.right() < rect.left() or \n\t\tself.top() < rect.bottom() or \n\t\tself.bottom() > rect.top())\n\treturn not b", "def check_collision(self, a, b):\n\n dis_x = abs((a.x+a.r + a.dx)-(b.x+b.r + b.dx))\n dis_y = abs((a.y+a.r + a.dy)-(b.y+b.r + b.dy))\n distance = math.sqrt(dis_x*dis_x + dis_y*dis_y)\n\n if distance <= (b.r + a.r) and (a.colliding == False or b.colliding == False):\n\n return True", "def accurate_collision(self, other) -> bool:\r\n if self.collide:\r\n if self.bbox_intersect(other):\r\n offset = round(self.x - other.x), \\\r\n round(self.y - other.y)\r\n if self.mask.overlap(other.mask, offset): # Overlap returns None or 1 point\r\n return True\r\n return False\r\n else:\r\n return False", "def _overlapping(self, atom1, atom2):\n\n if np.linalg.norm(atom1.pos-atom2.pos) < (atom1.rad+atom2.rad):\n return True\n else:\n return False", "def check_collisions(self):\n for tail in self.tail:\n if tail.position == self.head.position:\n self.die()\n\n future_pos = Position(self.head_x + self.direction.move_x * Const.SQUARE_SIZE,\n self.head_y + self.direction.move_y * Const.SQUARE_SIZE)\n\n if future_pos.x < 0 or future_pos.x > Const.G_B_W - Const.SQUARE_SIZE or \\\n future_pos.y < 0 or future_pos.y > Const.G_B_H - Const.SQUARE_SIZE:\n self.die()", "def __check_if_symbol_is_over(rect1, rect2):\n\n rect_center_x_coord = rect1[4][0]\n rect2_center_x_coord = rect2[4][0]\n rect2_width = rect2[5]\n rect1_center_y_coord = rect1[4][1]\n rect2_center_y_coord = rect2[4][1]\n\n leftmost_x_coord = rect2_center_x_coord - (rect2_width // 2)\n rightmost_y_coord = rect2_center_x_coord + (rect2_width // 2)\n if (\n leftmost_x_coord <= rect_center_x_coord <= rightmost_y_coord\n and\n rect1_center_y_coord < rect2_center_y_coord\n ):\n return True\n else:\n return False", "def test_overlap(self):\r\n rect1 = Rectangle(10, 20, 30, 40)\r\n rect2 = Rectangle(50, 60, 70, 80)\r\n\r\n # overlap should be commutative\r\n assert not rect1.overlap_with(rect2)\r\n assert not rect2.overlap_with(rect1)\r\n assert not Rectangle.overlap(rect1, rect2)\r\n assert not Rectangle.overlap(rect2, rect1)\r\n\r\n rect1 = Rectangle(-10, -20, 10, 60)\r\n rect2 = Rectangle(0, 50, 100, 200)\r\n assert rect1.overlap_with(rect2)\r\n assert rect2.overlap_with(rect1)\r\n assert Rectangle.overlap(rect1, rect2)\r\n assert Rectangle.overlap(rect2, rect1)\r\n\r\n # rectangles with only same boarder are not considered overlapped\r\n rect1 = Rectangle(-30, -10, -20, 0)\r\n rect2 = Rectangle(-20, -5, 30, 20)\r\n rect3 = Rectangle(-40, 0, 30, 20)\r\n assert not rect1.overlap_with(rect2)\r\n assert not rect1.overlap_with(rect3)\r\n assert not Rectangle.overlap(rect2, rect1)\r\n assert not Rectangle.overlap(rect3, rect1)", "def intersects(self, other: RectangularRoom) -> bool:\n return (\n self.x1 <= other.x2\n and self.x2 >= other.x1\n and self.y1 <= other.y2\n and self.y2 >= other.y1\n )", "def check_overlap(self, a, b):\n return utils.is_point_in_circle(b.get_pos(), a.get_pos(), a.radius)", "def check_collision(self, footprint):\n return self.upperleft[0] < footprint.upperleft[0] < footprint.upperright[0] < self.upperright[0] and \\\n self.upperleft[1] < footprint.upperleft[1] < footprint.bottomleft[1] < self.bottomleft[1]", "def collided_with(self, entity):\n drawables = self.get_drawables()\n rectangles = []\n for d in drawables:\n rectangles.append(d.get_rect())\n return entity.get_rect().collidelist(rectangles) != -1", "def collided_with(self, entity):\n drawables = self.get_drawables()\n rectangles = []\n for d in drawables:\n rectangles.append(d.get_rect())\n return entity.get_rect().collidelist(rectangles) != -1", "def in_rect(a, b, c, x, y=None):\n x_in = a.x < x.x < b.x and a.y < x.y < c.y\n return x_in if y is None else x_in and a.x < y.x < b.x and a.y < y.y < c.y", "def collide(obj1, obj2):\n offset_x = obj2.x - obj1.x #The difference between obj1 and obj 2\n offset_y = obj2.y - obj1.y \n return obj1.mask.overlap(obj2.mask, (int(offset_x), int(offset_y))) != None # (x,y)", "def rectangles_intersect(r1, r2, shift1=(0, 0), shift2=(0, 0), extraSize=3):\r\n\r\n if ((min(r1[0] - extraSize + shift1[0], r1[2] + extraSize + shift1[0]) > max(r2[0] - extraSize + shift2[0],\r\n r2[2] + extraSize + shift2[0]))\r\n or (max(r1[0] - extraSize + shift1[0], r1[2] + extraSize + shift1[0]) < min(r2[0] - extraSize + shift2[0],\r\n r2[2] + extraSize + shift2[\r\n 0]))):\r\n return False\r\n\r\n if ((min(r1[1] - extraSize + shift1[1], r1[3] + extraSize + shift1[1]) > max(r2[1] - extraSize + shift2[1],\r\n r2[3] + extraSize + shift2[1]))\r\n or (max(r1[1] - extraSize + shift1[1], r1[3] + extraSize + shift1[1]) < min(r2[1] - extraSize + shift2[1],\r\n r2[3] + extraSize + shift2[\r\n 1]))):\r\n return False\r\n\r\n return True", "def rectContains(rect1, rect2):\n x1, y1, w1, h1 = rect1\n x2, y2, w2, h2 = rect2\n\n if x2 >= x1 and y2 >= y1 and x2 <= x1 + w1 and y2 <= y1 + h1 and x2 + w2 <= x1 + w1 and y2 + h2 <= y1 + h1:\n return True\n return False", "def is_rectangle_colliding(self, rectangle):\n for obstacle in self.obstacle_iterator():\n if rectangle.colliderect(obstacle.rect):\n return True\n return False", "def collide(b1,b2):\n if mag(b1.pos-b2.pos) < (b1.radius + b2.radius - .05):\n return True", "def check_collision(self):\n self.collided = False\n\n for point in self.collision_points:\n\n try:\n if self.game_map.get_at((\n int(point[0]), int(point[1])\n )) == WHITE_COLOR:\n self.collided = True\n break\n except:\n self.collided = True", "def circles_overlapping(x1, y1, x2, y2, r):\n # print(abs((x2-x1)**2 + (y2-y1)**2))\n # print((2*r)**2)\n if (abs((x2-x1)**2 + (y2-y1)**2) > (2*r)**2):\n return False\n else: return True", "def test_collision(quadtree1, quadtree2, shift1, shift2, stay_away):\r\n\r\n r1, r2 = quadtree1.root, quadtree2.root\r\n\r\n if (not r1) or (not r2):\r\n return False\r\n\r\n stack = [(r1, r2)]\r\n\r\n while stack:\r\n p1, p2 = stack.pop() # the nodes of the 1st and the 2nd trees\r\n\r\n if not rectangles_intersect(p1.value, p2.value, shift1, shift2, stay_away):\r\n # if larger rectangles do not collide, their children will not collide either\r\n # hence no need to go for sub-nodes\r\n continue\r\n\r\n if p1.is_leaf() and p2.is_leaf():\r\n # leaves collide, we're done\r\n return True\r\n\r\n c1, c2 = p1.get_children_list(), p2.get_children_list()\r\n\r\n if not c1:\r\n for x in c2:\r\n stack.append((p1, x))\r\n else:\r\n if not c2:\r\n for x in c1:\r\n stack.append((x, p2))\r\n else:\r\n # none are empty, i.e. the nodes are not leaves\r\n for x in c1:\r\n for y in c2:\r\n stack.append((x, y))\r\n\r\n return False", "def is_overlap(bb1, bb2):\n l1, t1, r1, b1 = bb1['x'], bb1['y'], bb1['x']+bb1['w'], bb1['y']+bb1['h']\n l2, t2, r2, b2 = bb2['x'], bb2['y'], bb2['x']+bb2['w'], bb2['y']+bb2['h']\n\n if r1 > l2 and r2 > l1 and b2 > t1 and b1 > t2:\n return True\n else:\n return False", "def overlaps(self, other):\n return (self.right > other.left and self.left < other.right and\n self.top < other.bottom and self.bottom > other.top)", "def intersects(self, other): # -> bool:\n ...", "def __is_similar(rect, another):\n area1 = rect[2]*rect[3]\n area2 = another[2]*another[3]\n intersect_width = min(rect[0]+rect[2], another[0]+another[2]) - max(rect[0],another[0])\n if not intersect_width > 0:\n return False\n intersect_height = min(rect[1]+rect[3], another[1]+another[3]) - max(rect[1],another[1])\n if not intersect_height > 0:\n return False\n intersect_area = intersect_width * intersect_height\n return (float(intersect_area) / float(min(area1,area2))) > 0.7", "def rOverlap (x1, y1, w1, h1, x2, y2, w2, h2):\n if x1<=x2<=(x1+w1) or y1<=y2<=(y1+h1):\n return True\n elif x1<=(x2+w2)<=(x1+w1):\n return True\n else:\n return False", "def overlaps(self, other): # -> bool:\n ...", "def do_box_overlap(coord1, coord2):\n return (\n (coord1[0] - 2 < coord2[0] and coord1[1] + 2 > coord2[0]\n or coord2[0] - 2 < coord1[0] and coord2[1] + 2 > coord1[0]) \n and (coord1[2] - 2 < coord2[2] and coord1[3] + 2 > coord2[2]\n or coord2[2] - 2 < coord1[2] and coord2[3] + 2 > coord1[2]))", "def is_collided(self, rect):\n # return self.get_hit_box().colliderect(rect)\n player_hitbox = self.get_hit_box()\n distance = math.sqrt((math.pow(rect[0]-player_hitbox[0],2) + (math.pow(rect[1]-player_hitbox[1],2))))\n # dont collide with objects passed you\n if distance < self.player_image_size[0] and rect[0] >= player_hitbox[0]:\n return True\n else:\n return False", "def rectIntersect(rect1, rect2):\n rect = np.zeros_like(rect1)\n rect[[0, 2]] = np.maximum(rect1[[0, 2]], rect2[[0, 2]])\n rect[[1, 3]] = np.minimum(rect1[[1, 3]], rect2[[1, 3]])\n return rect", "def _intersects_1D(A, B):\n return False if (B[1] <= A[0]) or (B[0] >= A[1]) else True", "def check_collisions(self):", "def collision_check(self):\n return True", "def _bbox_overlap(self, other):\n reg0 = self.bbox\n reg1 = other.bbox\n return (reg0[0] <= reg1[2] and reg1[0] <= reg0[2] and\n reg0[1] <= reg1[3] and reg1[1] <= reg0[3])", "def crosses(self, other): # -> bool:\n ...", "def inside_rectangle(self, x, y):\n if (self.pos.x - self.width < x < self.pos.x + self.width and\n self.pos.y - self.height < y < self.pos.y + self.height):\n return True", "def in_rectangle(x, y):\n return ((self.min_x <= x <= self.max_x) and\n (self.min_y <= y <= self.max_y))", "def bbox_collision(bbox1, bbox2):\n\n bbox1 = np.asarray(bbox1)\n bbox2 = np.asarray(bbox2)\n\n max1 = np.max(bbox1, axis=1)\n min1 = np.min(bbox1, axis=1)\n\n max2 = np.max(bbox2, axis=1)\n min2 = np.min(bbox2, axis=1)\n\n out = (min1 <= max2) & (max1 >= min2)\n return np.all(out)", "def is_overlap(box_1, box_2, iou_th):\n return box_1.iou(box_2) > iou_th", "def check_intersection(obj1, obj2):\n (x1, y1, w1, h1) = obj1.get_box()\n (x2, y2, w2, h2) = obj2.get_box()\n if x2 + w2 - 1 < x1 or x2 >= x1 + w1:\n return False\n if y2 + h2 - 1 < y1 or y2 >= y1 + h1:\n return False\n \n return True", "def rotated_rectangles_intersect(rect1: Tuple[Vector, float, float, float],\n rect2: Tuple[Vector, float, float, float]) -> bool:\n return has_corner_inside(rect1, rect2) or has_corner_inside(rect2, rect1)", "def pixelCollision(rect1, rect2, hitmask1, hitmask2):\n rect = rect1.clip(rect2)\n\n if rect.width == 0 or rect.height == 0:\n return False\n\n x1, y1 = rect.x - rect1.x, rect.y - rect1.y\n x2, y2 = rect.x - rect2.x, rect.y - rect2.y\n\n for x in xrange(rect.width):\n for y in xrange(rect.height):\n if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:\n return True\n return False", "def check_collision():\n global round_over\n\n if player1.alive:\n if pygame.sprite.spritecollide(\n player1,\n obstacles,\n False,\n pygame.sprite.collide_mask):\n collision_text()\n reset_players()\n else:\n if pygame.sprite.spritecollide(\n player2,\n obstacles,\n False,\n pygame.sprite.collide_mask):\n round_over = True\n collision_text()\n reset_players()", "def __has_similar_rect(rect, rect_list):\n for i in reversed(range(len(rect_list))):\n if Reference.__is_similar(rect_list[i], rect):\n del(rect_list[i])\n return True\n return False", "def intersects(self, rect):\n\t\treturn ( rect.right >= self.left and rect.left < self.right\n\t\t\tand rect.bottom >= self.top and rect.top < self.bottom )", "def can_overlap(self):\n return False", "def is_crossed(self):\n left_boundary_clusters = np.extract(self.cluster[0] > 0,\n self.cluster[0])\n right_boundary_clusters = np.extract(self.cluster[-1] > 0,\n self.cluster[-1])\n return np.in1d(left_boundary_clusters, right_boundary_clusters).any()", "def validate_collision(self):\n pass", "def pixelCollision(self,rect1, rect2, hitmask1, hitmask2):\n\t\trect = rect1.clip(rect2)\n\n\t\tif rect.width == 0 or rect.height == 0:\n\t\t\treturn False\n\n\t\tx1, y1 = rect.x - rect1.x, rect.y - rect1.y\n\t\tx2, y2 = rect.x - rect2.x, rect.y - rect2.y\n\n\t\tfor x in xrange(rect.width):\n\t\t\tfor y in xrange(rect.height):\n\t\t\t\tif hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:\n\t\t\t\t\treturn True\n\t\treturn False", "def crossover(x, y):\n return x[-1] > y[-1] and x[-2] < y[-2]", "def rectangle_already_tracked(rectangles, rectangle):\n for current_rectangle in rectangles:\n if rectangle_percentage_coincidence(current_rectangle, rectangle) > 0.6:\n return True \n return False", "def isdisjoint(self, other):\n self._check_title(other)\n\n # sort by top-left vertex\n if self.bounds > other.bounds:\n i = self\n self = other\n other = i\n\n return (self.max_col, self.max_row) < (other.min_col, other.max_row)", "def is_in_collision_line(self, a, b):\n return abs((b[0]-a[0])*self.x + (a[1]-b[1])*self.y + (a[0]-b[0])*b[1] + (b[1]-a[1])*a[0]) /\\\n sqrt((b[0]-b[1])**2 + (a[1]-b[1])**2 + 0.0000001)< self.r", "def has_collide(self, obj):\n rect1 = self.anim.getRect()\n rect2 = obj.anim.getRect()\n \n rect1.move_ip(self.pos)\n rect2.move_ip(obj.pos)\n \n return rect1.colliderect(rect2)", "def hasCollidedWith(self,otherEntity):\n distance=math.sqrt((otherEntity.xPos-self.xPos)**2+(otherEntity.yPos-self.yPos)**2)\n return distance < (self.hitboxRadius+otherEntity.hitboxRadius)", "def overlaps(self, other):\n\n if self.ll.x >= other.ur.x:\n return False\n \n if self.ll.y >= other.ur.y:\n return False\n \n if self.ur.x <= other.ll.x:\n return False\n \n if self.ur.y <= other.ll.y:\n return False\n \n return True", "def intersect(self, rectangle):\n return self.contains(rectangle.corner) or rectangle.contains(self.corner)", "def check_collide(self):\r\n for raindrop in self.overlapping_sprites:\r\n raindrop.handle_collide()", "def do_overlap(r1, r2):\n r1_s, r1_e = r1\n r2_s, r2_e = r2\n\n return r1_s <= r2_s <= r1_e or r2_s <= r1_s <= r2_e", "def overlaps(a, b):\n\n dx = a.x - b.x\n dy = a.y - b.y\n try:\n radius = a.radius + b.radius\n except AttributeError:\n radius = getattr(a, 'radius', 0.5) + getattr(b, 'radius', 0.5)\n\n return dx * dx + dy * dy <= radius * radius", "def hypercubes_overlap(hypercube1, hypercube2):\n if not isinstance(hypercube1, Volume) or \\\n not isinstance(hypercube2, Volume):\n raise TypeError()\n\n lowercorner1, uppercorner1 = hypercube1.get_corners()\n lowercorner2, uppercorner2 = hypercube2.get_corners()\n nb_dims = len(uppercorner1)\n \n for i in range(nb_dims):\n if not uppercorner1[i] > lowercorner2[i] or \\\n not uppercorner2[i] > lowercorner1[i]:\n return False\n\n return True", "def if_overlap(self, x, y) -> bool:\n if self.pos[y][x] != '-':\n print('此坐标已有棋子,请仔细观察棋盘')\n return True\n return False", "def __le__(self, other):\n\t\tif not isinstance(other, Rectangle):\n\t\t\traise NotImplementedError(\"Please use an object of class <Rectangle> only\")\n\n\t\treturn (self.width == other.width and self.height == other.height) or (self.area() < other.area())", "def check_collision(self, p1xy, p2xy):\n p1rc = xy2rc(p1xy)\n p2rc = xy2rc(p2xy)\n rr, cc = line(int(p1rc[0]), int(p1rc[1]), int(p2rc[0]), int(p2rc[1]))\n line_coords_rc = np.vstack([rr, cc]).T\n for line_coord_rc in line_coords_rc:\n if array_in_list(line_coord_rc, list(self.obstacles)):\n return True\n return False", "def collision(self, block):\n if self.pos_x == block.pos_x and self.pos_y+self.height == block.pos_y:\n self.col_d = True\n if self.pos_x == block.pos_x+block.width and self.pos_y == block.pos_y:\n self.col_l = True\n if self.pos_x == block.pos_x-self.width and self.pos_y == block.pos_y:\n self.col_r = True", "def check_position_for_same_colour(self, position1, position2):\n return (not self.check_position_free(position1)) and self.check_position_for_same_occupancy(position1, position2)", "def overlap(component1, component2):\n if component1[0].start <= component2[0].stop and component2[0].start <= component1[0].stop:\n if component1[1].start <= component2[1].stop and component2[1].start <= component1[1].stop:\n return True\n return False", "def _overlap(x1, w1, x2, w2):\r\n if x1+w1 < x2-w2: return False\r\n if x1-w1 > x2+w2: return False\r\n\r\n return True", "def intersects(self, cuboid):\n\t\treturn ( cuboid.front >= self.back and cuboid.back < self.front\n\t\t\tand cuboid.right >= self.left and cuboid.left < self.right\n\t\t\tand cuboid.bottom >= self.top and cuboid.top < self.bottom )", "def collision(self):\n # Check collision with walls\n (x_coord, y_coord) = (self.x_coord[0], self.y_coord[0])\n if x_coord <= EDGE or x_coord >= SCREEN_X - self.size - EDGE or \\\n y_coord <= EDGE or y_coord >= SCREEN_Y - self.size - EDGE:\n return True\n # Check collision with self\n corners = self.get_corners()\n if self.heading == \"right\":\n (frontleft_x, frontleft_y) = (corners[1][0], corners[1][1])\n (frontright_x, frontright_y) = (corners[2][0], corners[2][1])\n elif self.heading == \"left\":\n (frontleft_x, frontleft_y) = (corners[3][0], corners[3][1])\n (frontright_x, frontright_y) = (corners[0][0], corners[0][1])\n elif self.heading == \"up\":\n (frontleft_x, frontleft_y) = (corners[0][0], corners[0][1])\n (frontright_x, frontright_y) = (corners[1][0], corners[1][1])\n elif self.heading == \"down\":\n (frontleft_x, frontleft_y) = (corners[2][0], corners[2][1])\n (frontright_x, frontright_y) = (corners[3][0], corners[3][1])\n for i in range(len(self.x_coord)):\n if self.x_coord[i] < frontleft_x < self.x_coord[i] + self.size and \\\n self.y_coord[i] < frontleft_y < self.y_coord[i] + self.size:\n return True\n if self.x_coord[i] < frontright_x < self.x_coord[i] + self.size and \\\n self.y_coord[i] < frontright_y < self.y_coord[i] + self.size:\n return True\n return False", "def doesNotOverlap( self, other):\n return not self.overlaps( other)", "def update(self, stack):\n\t\t# the following two lines are a hack so that one can't overwrite squares\n\t\tif self.occupied == 1 or self.occupied == 2 or self.occupied == 3:\n\t\t\treturn False\n\t\tif (stack[0].pos == self.rect.topleft and stack[1].pos == self.rect.topright) or \\\n\t\t (stack[1].pos == self.rect.topleft and stack[0].pos == self.rect.topright):\n\t\t\t\tself.top_edge = True\n\t\t\t\tself.check_occupied()\n\t\t\t\treturn self.occupied\n\t\tif (stack[0].pos == self.rect.topright and stack[1].pos == self.rect.bottomright) or \\\n\t\t (stack[1].pos == self.rect.topright and stack[0].pos == self.rect.bottomright):\n\t\t\tself.right_edge = True\n\t\t\tself.check_occupied()\n\t\t\treturn self.occupied\n\t\tif (stack[0].pos == self.rect.bottomleft and stack[1].pos == self.rect.bottomright) or \\\n\t\t (stack[1].pos == self.rect.bottomleft and stack[0].pos == self.rect.bottomright):\n\t\t\tself.bottom_edge = True\n\t\t\tself.check_occupied()\n\t\t\treturn self.occupied\n\t\tif (stack[0].pos == self.rect.topleft and stack[1].pos == self.rect.bottomleft) or \\\n\t\t (stack[1].pos == self.rect.topleft and stack[0].pos == self.rect.bottomleft):\n\t\t\tself.left_edge = True\n\t\t\tself.check_occupied()\n\t\t\treturn self.occupied\n\t\treturn False", "def compare(self, other_group):\n x_bounds = self.bounding_box_x_len == other_group.bounding_box_x_len\n y_bounds = self.bounding_box_y_len == other_group.bounding_box_y_len\n same_num_cells = self.num_colored_cells == other_group.num_colored_cells\n if not (x_bounds and y_bounds and same_num_cells):\n return False\n for row_ind in range(len(other_group.cells)):\n for col_ind in range(len(other_group.cells[0])):\n if other_group.cells[row_ind][col_ind] != self.cells[row_ind][col_ind]:\n return False\n return True", "def isIntvOverlapped(rOne, rTwo):\n\tclear = rOne[1] <= rTwo[0] or rOne[0] >= rTwo[1] \n\treturn not clear", "def check_collision(new_x, new_y):\n query_x = -1024 + (new_x * player.width)\n query_y = -1024 + (new_y * player.height)\n # print('queried %s,%s' % (query_x, query_y))\n zone_query = zone_map[player.current_zone].index.intersect(\n bbox=(query_x, query_y, query_x, query_y)\n )\n for i in zone_query:\n # print('found:%s which is %s' % (i.name, i.collision))\n if i.collision:\n return False\n return True", "def rect_overlap(rect1, rect2):\n a_x1, a_y1 = rect1[0], rect1[1]\n a_x2, a_y2 = (rect1[0] + rect1[2], rect1[1] + rect1[3])\n\n b_x1, b_y1 = rect2[0], rect2[1]\n b_x2, b_y2 = (rect2[0] + rect2[2], rect2[1] + rect2[3])\n\n x_overlap = max(0, min(a_x2, b_x2) - max(a_x1, b_x1))\n y_overlap = max(0, min(a_y2, b_y2) - max(a_y1, b_y1))\n return x_overlap * y_overlap", "def crossunder(x, y):\n return x[-1] < y[-1] and x[-2] > y[-2]", "def bbox_overlap(bbox_1: Sequence, bbox_2: Sequence) -> bool:\n if (bbox_1[0] > bbox_2[0]) or (bbox_1[1] > bbox_2[1]):\n return False\n if (bbox_1[2] < bbox_2[2]) or (bbox_1[3] < bbox_2[3]):\n return False\n\n return True", "def __hit_bricks(self, g_object):\n return type(g_object) == GRect and g_object != self.__paddle", "def overlap(cir1x, cir1y, rad1, cir2x, cir2y, rad2):\n radius = rad1 + rad2\n compare = ((cir2y - cir1y)**2 + (cir2x - cir1x)**2)**0.5\n if compare > radius:\n print \"no overlapping\"\n else:\n print \"overlapping\"", "def overlaps(x1, x2, y1, y2):\n\n return x1 <= y2 and y1 <= x2", "def __contains__(self, other):\n if len(other) == 2:\n return self.collidepoint(*other)\n else:\n return self.contains(*other)", "def is_connected(object_one, object_two):\n\n for vert_one in object_one.Vertexes:\n for vert_two in object_two.Vertexes:\n if (vert_one.X == vert_two.X) and (vert_one.y == vert_two.y):\n return True\n\n return False", "def overlap_with(self, other):", "def overlap(x,y):\n if (x[0]<=y[-1] and x[-1]>y[0]) or (y[0]<=x[-1] and y[-1]>x[0]):\n return 1\n else: return 0", "def is_collision_at(self, x, y):\n return self._on_post(x, y)", "def on_collision(self):", "def collision_2():\r\n tu.reset()\r\n print(\"collision_2\")\r\n r = 100\r\n b1 = Ball2D(r=r, x=r, y=tbl.y_min, vy=tbl.v_max, color=\"blue\")\r\n b2 = Ball2D(r=r, x=0, y=0, color=\"red\")\r\n bc = BallCollision2D(balls=[b1, b2])\r\n while (b1.x**2 + b1.y**2 < max_r_sq\r\n or b2.x**2 + b2.y**2 < max_r_sq):\r\n bc.ball_display()\r\n bc.ball_collision_update()\r\n time.sleep(t_update)\r\n bc.ball_update()\r\n if clear_at_end:\r\n bc.reset()", "def ifCollide( ball1, ball2 ):\n\t\n\tb1_x, b1_y = ball1.position.xy\n\tb2_x, b2_y = ball2.position.xy\n\t\n\t#vector connect center of particles\n\tdistant = Vector.from_points((b2_x, b2_y), (b1_x, b1_y))\n\t\n\t#if lenght of vector above is less( equal ) than sum of radius ( they overlapping )\n\tif ( ball1.r + ball2.r ) ** 2 >= distant.norm():\n\t\treturn True\n\telse:\n\t\treturn False", "def wall_collision(self):\n border_box_pos_1 = self.box_1.x - self.box_1.width/2\n\n if (border_box_pos_1) <= 0:\n return True\n else:\n return False", "def _check_collisions(self):\n\t\tif pygame.sprite.spritecollide(\n\t\t\tself.bolan, \n\t\t\tself.obstacles.obstacles,\n\t\t\tFalse, \n\t\t\tpygame.sprite.collide_mask):\n\t\t\t\tself.is_play = False\n\t\t\t\tself.is_gameover = True\n\t\t\t\tself.bolan.image = self.settings.bolan_dead_image", "def overlaps(self, other):\n\n #from .spherical import Arc\n from pyresample.spherical_geometry import Arc\n\n self_corners = self.corners\n\n other_corners = other.corners\n\n for i in self_corners:\n if i in other:\n return True\n for i in other_corners:\n if i in self:\n return True\n\n self_arc1 = Arc(self_corners[0], self_corners[1])\n self_arc2 = Arc(self_corners[1], self_corners[2])\n self_arc3 = Arc(self_corners[2], self_corners[3])\n self_arc4 = Arc(self_corners[3], self_corners[0])\n\n other_arc1 = Arc(other_corners[0], other_corners[1])\n other_arc2 = Arc(other_corners[1], other_corners[2])\n other_arc3 = Arc(other_corners[2], other_corners[3])\n other_arc4 = Arc(other_corners[3], other_corners[0])\n\n for i in (self_arc1, self_arc2, self_arc3, self_arc4):\n for j in (other_arc1, other_arc2, other_arc3, other_arc4):\n if i.intersects(j):\n return True\n return False", "def check_collide(self):\n\n\t\tfor pizza in self.overlapping_sprites:\n\t\t\tpizza.handle_collide()" ]
[ "0.703403", "0.70215464", "0.7011669", "0.69918287", "0.69465554", "0.6939659", "0.6890335", "0.6829306", "0.68109834", "0.6809146", "0.679998", "0.67945856", "0.6788503", "0.67841524", "0.66956073", "0.66782784", "0.6669898", "0.6662063", "0.6662063", "0.66499317", "0.6635392", "0.6623761", "0.6607536", "0.6606656", "0.65908676", "0.6561375", "0.65442085", "0.65413326", "0.6540936", "0.6532161", "0.65248257", "0.6513124", "0.6507755", "0.65034145", "0.6493069", "0.6492702", "0.64826775", "0.6482142", "0.6480893", "0.64740247", "0.6468279", "0.64544845", "0.64429486", "0.6423188", "0.64108574", "0.64096457", "0.64042217", "0.63934064", "0.6392827", "0.63874", "0.6374457", "0.6370505", "0.6367795", "0.6351585", "0.63398194", "0.6335506", "0.6329643", "0.6321309", "0.63174516", "0.6316893", "0.63053226", "0.62937254", "0.628405", "0.62832296", "0.6274613", "0.6273735", "0.62689453", "0.6267038", "0.6252779", "0.6209458", "0.62071544", "0.6191028", "0.6190876", "0.6183373", "0.6167503", "0.6166202", "0.61651194", "0.6164368", "0.61563146", "0.61337817", "0.6120896", "0.61098856", "0.61083573", "0.6106419", "0.610567", "0.6096521", "0.60954726", "0.6091961", "0.60892737", "0.6083123", "0.6077835", "0.6077728", "0.6076788", "0.6074972", "0.606993", "0.6057524", "0.60570425", "0.60472125", "0.6039457", "0.60290664" ]
0.64797306
39
Checks where the sprite has collided with an obstacle direction is needed to allow a sprite that collides horizontally to continue moving vertically if its movement was in a diagonal. Same goes for blocked vertical movement and continued horizontal movement.
def collide_with_obstacles(sprite, group, direction): collided = False if direction == 'x': collided = True hits = pg.sprite.spritecollide(sprite, group, False, collide_hit_rect) if hits: # If the sprite is moving right, stop it and # set its right face on the left side of the object it collided with. if hits[0].rect.centerx > sprite.hit_rect.centerx: sprite.pos.x = hits[0].rect.left - sprite.hit_rect.width / 2 # If the sprite is moving right, stop it and # set its left face on the left side of the object it collided with. if hits[0].rect.centerx < sprite.hit_rect.centerx: sprite.pos.x = hits[0].rect.right + sprite.hit_rect.width / 2 # Completely stop the sprite sprite.vel.x = -sprite.vel.x # Update the sprite's center to the new position sprite.hit_rect.centerx = sprite.pos.x if direction == 'y': collided = True hits = pg.sprite.spritecollide(sprite, group, False, collide_hit_rect) if hits: # If the sprite is moving upwards, then # set its top to the bottom of the sprite it collided with. if hits[0].rect.centery < sprite.hit_rect.centery: sprite.pos.y = hits[0].rect.bottom + sprite.hit_rect.height / 2 # If the sprite is moving downwards, then # set its bottom to the top of the sprite it collided with. if hits[0].rect.centery > sprite.hit_rect.centery: sprite.pos.y = hits[0].rect.top - sprite.hit_rect.height / 2 # Completely stop the sprite sprite.vel.y = -sprite.vel.y sprite.hit_rect.centery = sprite.pos.y return collided
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_collisions(self, offset, index, obstacles):\n unaltered = True\n self.rect.move_ip(offset)\n while pygame.sprite.spritecollideany(self, obstacles):\n\n # First of all, check if it is a motile transparent block.\n # if so, do nothin\n col_spr = pygame.sprite.spritecollideany(self, obstacles)\n if hasattr(col_spr, \"inertia\"):\n if col_spr.inertia:\n break\n\n if self.climb:\n\t self.climb_mobility = False\n else:\n self.climb_mobility = True\n\n self.rect[index] += (1 if offset[index] < 0 else -1)\n unaltered = False\n #print(\"DEBUG: PLAYERCOL, {}\".format(index))\n\n # stop walking animation\n if index == 0:\n self.walk = False\n\n\n return unaltered", "def will_collide(self, direction=None):\n new_origin, new_positions = self.active_piece.try_move(direction=direction)\n for row, col in new_positions:\n y, x = new_origin[0] + row, new_origin[1] + col\n if y > 19 or y < 0 or x > 9 or x < 0 or self.board[y][x] == '#':\n return True\n return False", "def allow_to_move(self, direction, row, column):\n if self.valid_coverage_cell(row, column):\n if self.collision(direction) is False and \\\n self.cov_grid[row][column] == NOT_VISITED:\n return True\n else:\n return False", "def legal_move(marker, x, y, direction):\n # first if statement determines the directions\n # second if statement checks if the \"potential move\" is within the index\n if direction == \"N\":\n if 0 <= y-2 < len(marker):\n return marker[y-2][x] == marker[y-1][x] == '*'\n if direction == \"S\":\n if 0 <= y+2 < len(marker):\n return marker[y+2][x] == marker[y+1][x] == '*'\n if direction == \"W\":\n if 0 <= x-2 < len(marker[0]):\n return marker[y][x-2] == marker[y][x-1] == '*'\n if direction == \"E\":\n if 0 <= x+2 < len(marker[0]):\n return marker[y][x+2] == marker[y][x+1] == '*'\n return False", "def verify_legal_move(self, direction):\n for b_x, b_y in self.get_block_positions(self.active_piece.FIGURE):\n\n if direction == \"LEFT\":\n b_x -= 1\n elif direction == \"RIGHT\":\n b_x += 1\n elif direction == \"DOWN\":\n b_y += 1\n else:\n raise ValueError\n\n if b_x < 0 or b_x >= self.WIDTH:\n return False\n\n if b_y < 0 or b_y >= self.HEIGHT:\n return False\n\n if self.board[b_y][b_x] != 0:\n return False\n return True", "def can_move(self):\r\n for wall in self.app.walls:\r\n if vec(self.grid_pos+self.direction) == wall:\r\n return False\r\n return True", "def _check_sonar_obstacles(self):\n # TODO: what's a good number?\n BLOCKED_THRESHOLD = 0.7\n\n rate = rospy.Rate(10) # 10 hz\n count = 10\n left = 0\n center = 0\n right = 0\n\n for i in range(count):\n obstacle = self.swarmie.get_obstacle_condition()\n\n if obstacle & Obstacle.SONAR_LEFT == Obstacle.SONAR_LEFT:\n left += 1\n if (obstacle & Obstacle.SONAR_CENTER ==\n Obstacle.SONAR_CENTER):\n center += 1\n if obstacle & Obstacle.SONAR_RIGHT == Obstacle.SONAR_RIGHT:\n right += 1\n\n rate.sleep()\n\n left_blocked = left / count > BLOCKED_THRESHOLD\n center_blocked = center / count > BLOCKED_THRESHOLD\n right_blocked = right / count > BLOCKED_THRESHOLD\n\n return left_blocked, center_blocked, right_blocked", "def collison(direction):\n if direction == 3 and screen.inch(head[0]-1,head[1]) !=ord(' '):\n return True\n elif direction == 2 and screen.inch(head[0]+1,head[1]) !=ord(' '):\n return True\n elif direction == 1 and screen.inch(head[0],head[1]-1) !=ord(' '):\n return True\n elif direction == 0 and screen.inch(head[0],head[1]+1) !=ord(' '):\n return True \n else:\n return False", "def detectWallCollision(self):\n if self.right >= GAME_WIDTH or self.left <= 0:\n self._vx = -1.0 * self._vx\n if self.top >= GAME_HEIGHT:\n self._vy = -1.0 * self._vy", "def is_wall_collided(self)-> bool:\n # print('{} >= {} or {} <= 0'.format(self.x + self.width, self.windows_size))\n if self.x <= 0:\n self.velocity = -self.velocity\n return True\n return False", "def collision(self, direction):\n if direction == \"north\":\n # valide north cell\n if self.check_coverage_collision(self.curr_cell.row - 1,\n self.curr_cell.col):\n return True\n\n elif direction == \"south\":\n # valide north cell\n if self.check_coverage_collision(self.curr_cell.row + 1,\n self.curr_cell.col):\n return True\n\n elif direction == \"east\":\n # valide north cell\n if self.check_coverage_collision(self.curr_cell.row,\n self.curr_cell.col + 1):\n return True\n\n elif direction == \"west\":\n # valide north cell\n if self.check_coverage_collision(self.curr_cell.row,\n self.curr_cell.col - 1):\n return True\n\n return False", "def __check_col(self, x: int, y: int) -> bool:\n return not any([self.__maze[x + i, y] for i in (-1, 0, 1)])", "def collision(self, block):\n if self.pos_x == block.pos_x and self.pos_y+self.height == block.pos_y:\n self.col_d = True\n if self.pos_x == block.pos_x+block.width and self.pos_y == block.pos_y:\n self.col_l = True\n if self.pos_x == block.pos_x-self.width and self.pos_y == block.pos_y:\n self.col_r = True", "def _detect_obstacles(self):\n def _distance(point, line_point1, line_point2):\n \"\"\"calcuate the distance between a point and a line\"\"\"\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance\n\n def _acute_angle(point, line_point1, line_point2):\n \"\"\"detetrmine if the point is whithin the boundary of the line through law of cosines\"\"\"\n base_line = np.linalg.norm(line_point1-line_point2)\n assert base_line > 0, \"check the library useage\"\n line1 = np.linalg.norm(point - line_point1)\n line2 = np.linalg.norm(point - line_point2)\n cos_angle_1 = (base_line**2 + line1**2 - line2**2)/(2*base_line*line1)\n cos_angle_2 = (base_line**2 + line2**2 - line1**2)/(2*base_line*line2)\n if cos_angle_1 * cos_angle_2 > 0:\n return True\n else:\n return False\n\n if self.obstacles != \"None\": # if user assigned some obstacles\n for line in self.env_config: \n line_point1, line_point2 = np.array(line[0]), np.array(line[1])\n point = np.array(self.state[:2])\n distance = _distance(point, line_point1, line_point2)\n acute_angle = _acute_angle(point, line_point1, line_point2)\n if distance <= 0.02 and acute_angle:\n self.adsorption = True\n break\n else:\n self.adsorption = False", "def can_move_direction(entity, neighbor, game_map):\n new_x, new_y = neighbor\n if not game_map.in_bounds(x=new_x, y=new_y, margin=1):\n return False\n elif game_map.in_bounds(x=new_x, y=new_y) \\\n and game_map.terrain[new_x][new_y].elevation > Elevation.SHALLOWS \\\n and not entity.wings:\n return False\n return True", "def collision(self):\n # Check collision with walls\n (x_coord, y_coord) = (self.x_coord[0], self.y_coord[0])\n if x_coord <= EDGE or x_coord >= SCREEN_X - self.size - EDGE or \\\n y_coord <= EDGE or y_coord >= SCREEN_Y - self.size - EDGE:\n return True\n # Check collision with self\n corners = self.get_corners()\n if self.heading == \"right\":\n (frontleft_x, frontleft_y) = (corners[1][0], corners[1][1])\n (frontright_x, frontright_y) = (corners[2][0], corners[2][1])\n elif self.heading == \"left\":\n (frontleft_x, frontleft_y) = (corners[3][0], corners[3][1])\n (frontright_x, frontright_y) = (corners[0][0], corners[0][1])\n elif self.heading == \"up\":\n (frontleft_x, frontleft_y) = (corners[0][0], corners[0][1])\n (frontright_x, frontright_y) = (corners[1][0], corners[1][1])\n elif self.heading == \"down\":\n (frontleft_x, frontleft_y) = (corners[2][0], corners[2][1])\n (frontright_x, frontright_y) = (corners[3][0], corners[3][1])\n for i in range(len(self.x_coord)):\n if self.x_coord[i] < frontleft_x < self.x_coord[i] + self.size and \\\n self.y_coord[i] < frontleft_y < self.y_coord[i] + self.size:\n return True\n if self.x_coord[i] < frontright_x < self.x_coord[i] + self.size and \\\n self.y_coord[i] < frontright_y < self.y_coord[i] + self.size:\n return True\n return False", "def is_collided_vertical(self):\n # bounce of vertical borders -> y-axis-check\n if self.position[1] <= config['globals']['BALL_RADIUS']:\n self.velocity[1] *= -1\n elif self.position[1] >= config['globals']['HEIGHT'] + 1 - config['globals']['BALL_RADIUS']:\n self.velocity[1] *= -1", "def is_legal_move(self, start_pos, end_pos, start_piece, end_piece_player_id, board):\r\n parsed_positions = self.parse_positions(start_pos, end_pos)\r\n\r\n start_row = parsed_positions[0]\r\n start_col = parsed_positions[1]\r\n end_row = parsed_positions[2]\r\n end_col = parsed_positions[3]\r\n count = 0 # Count will track how many pieces are between start and end_pos\r\n\r\n if start_row != end_row and start_col != end_col: # Moving diagonally\r\n return False\r\n\r\n # If cannon moves to an empty position\r\n # if end_piece_player_id is None:\r\n\r\n if start_row == end_row: # Moving horizontally\r\n col_difference = end_col - start_col\r\n\r\n if col_difference > 0: # Moving to the right of the board\r\n for col in range(start_col + 1, end_col): # Checks if there is a piece between start_col and end_col\r\n if board[start_row][col].get_piece() is not None:\r\n count += 1\r\n\r\n if col_difference < 0: # Moving to the left of the board\r\n for col in range(start_col - 1, end_col, -1): # Checks to the left of the board\r\n # If there is a piece to block movement to the end_pos, return False\r\n if board[start_row][col].get_piece() is not None:\r\n count += 1\r\n\r\n if start_col == end_col: # Moving vertically\r\n row_difference = end_row - start_row\r\n\r\n if row_difference > 0: # Moving down the board\r\n for row in range(start_row + 1, end_row):\r\n if board[row][start_col].get_piece() is not None: # If no piece is impeding path to end_pos\r\n count += 1\r\n\r\n\r\n if row_difference < 0: # Moving up the board\r\n for row in range(start_row -1, end_row, -1):\r\n if board[row][start_col].get_piece() is not None: # If no piece is impeding path to end_pos\r\n count += 1\r\n\r\n # 1 piece between start_pos and end_pos and end_pos contains a chess piece\r\n if count == 1 and end_piece_player_id is not None:\r\n return True\r\n # end_pos has no piece and there are no pieces to impede path\r\n elif end_piece_player_id is None and count == 0:\r\n return True\r\n # Returns False for all other scenarios\r\n else:\r\n return False", "def time_to_move(self):\r\n if int(self.pix_pos.x+TOP_BOTTOM_BUFFER//2) % self.app.cell_width == 0:\r\n if self.direction == vec(1, 0) or self.direction == vec(-1, 0) or self.direction == vec(0, 0):\r\n return True\r\n # for the x-direction\r\n\r\n if int(self.pix_pos.y+TOP_BOTTOM_BUFFER//2) % self.app.cell_height == 0:\r\n if self.direction == vec(0, 1) or self.direction == vec(0, -1) or self.direction == vec(0, 0):\r\n return True\r\n # for the y-direction\r\n\r\n # checks to see if the player is still within the bounds\r", "def check_boundaries(self):\n # Checks if the enemy bar has gone of the net\n if self.rect.left <= self.settings.WINDOW_WIDTH / 2:\n self.rect.left = self.settings.WINDOW_WIDTH / 2\n self.isMovingUp = False\n\n # Checks if the enemy bar has gone out of bound to the right\n if self.rect.right >= self.settings.WINDOW_WIDTH:\n self.rect.right = self.settings.WINDOW_WIDTH\n self.isMovingUp = True", "def __check_boundary(self):\n\t\tif self.rect.left <= self.left_boundary:\n\t\t\tself.move_right()\n\t\tif self.rect.right >= self.right_boundary:\n\t\t\tself.move_left()", "def check_bounds(self):\n\n if self.bounds_action == self.BOUNCE:\n if self.hits_left_or_right():\n self.dx = self.dx * -1\n if self.hits_top_or_bottom():\n self.dy = self.dy * -1\n\n if self.bounds_action == self.STOP:\n if self.hits_left_or_right():\n self.dx = 0\n self.dy = 0\n if self.hits_top_or_bottom():\n self.dx = 0\n self.dy = 0\n\n if self.bounds_action == self.SKID:\n if self.hits_left_or_right():\n self.dx = 0\n if self.hits_top_or_bottom():\n self.dy = 0\n\n if self.bounds_action == self.DIE:\n if self.hits_left_or_right() or self.hits_top_or_bottom():\n self.dx = 0\n self.dy = 0\n self.visible = False", "def attack(self, somerow, somecol):\n valid_move = True\n for i in range(self.size):\n if self.is_valid_position(somerow, i):\n if self.board[somerow][i] != \"0\":\n #checks the same row\n valid_move = False\n for i in range(self.size):\n if self.is_valid_position(i, somecol):\n if self.board[i][somecol] != \"0\":\n #checks the same column\n valid_move = False \n for i in range(self.size):\n if self.is_valid_position(somerow+i, somecol+i):\n if self.board[somerow+i][somecol+i] != \"0\":\n #checks diagonal\n valid_move = False\n for i in range(self.size):\n if self.is_valid_position(somerow+i, somecol-i):\n if self.board[somerow+i][somecol-i] != \"0\":\n valid_move = False\n for i in range(self.size):\n if self.is_valid_position(somerow-i, somecol+i):\n if self.board[somerow-i][somecol+i] != \"0\":\n valid_move = False\n for i in range(self.size):\n if self.is_valid_position(somerow-i, somecol-i):\n if self.board[somerow-i][somecol-i] != \"0\":\n valid_move = False\n return valid_move", "def is_legal_move(self, start_pos, end_pos, start_piece, end_piece_player_id, board):\r\n parsed_positions = self.parse_positions(start_pos, end_pos)\r\n start_row = parsed_positions[0]\r\n start_col = parsed_positions[1]\r\n end_row = parsed_positions[2]\r\n end_col = parsed_positions[3]\r\n\r\n if start_row != end_row and start_col != end_col: # Moving non-orthogonally\r\n return False\r\n\r\n if start_row == end_row: # Moving horizontally\r\n col_difference = end_col - start_col\r\n\r\n if col_difference > 0: # Moving to the right of the board\r\n for col in range(start_col + 1, end_col): # Checks if there is a piece between start_col and end_col\r\n if board[start_row][col].get_piece() is not None:\r\n return False\r\n # When there is no piece to impede path, check if position is empty or piece is enemy piece\r\n if end_piece_player_id is None or start_piece.get_player_id() != end_piece_player_id:\r\n return True\r\n\r\n if col_difference < 0: # Moving to the left of the board\r\n for col in range(start_col - 1, end_col, -1): # Checks to the left of the board\r\n # If there is a piece to block movement to the end_pos, return False\r\n if board[start_row][col].get_piece() is not None:\r\n return False\r\n if end_piece_player_id is None or start_piece.get_player_id() != end_piece_player_id:\r\n return True\r\n\r\n if start_col == end_col: # Moving verticially\r\n row_difference = end_row - start_row\r\n\r\n if row_difference > 0: # Moving down the board\r\n for row in range(start_row + 1, end_row):\r\n if board[row][start_col].get_piece() is not None: # If no piece is impeding path to end_pos\r\n return False\r\n # Checks if end_pos is empty or an enemy piece is on end_pos\r\n if end_piece_player_id is None or start_piece.get_player_id() != end_piece_player_id:\r\n return True\r\n\r\n if row_difference < 0:\r\n for row in range(start_row -1, end_row, -1):\r\n if board[row][start_col].get_piece() is not None: # If no piece is impeding path to end_pos\r\n return False\r\n # Checks if end_pos is empty or an enemy piece is on end_pos\r\n if end_piece_player_id is None or start_piece.get_player_id() != end_piece_player_id:\r\n return True", "def isValidMove(self, environment, currentCell, x2, y2, checkVisited = True):\n\n x1, y1 = currentCell.location.x, currentCell.location.y\n\n # Check if within bounds\n if x2 < 0 or x2 >= environment.length or y2 < 0 or y2 >= environment.breadth:\n return False\n\n # Check if cell is a wall\n nextCell = environment.grid[x2][y2]\n if nextCell.type == 'wall':\n return False\n\n # Check if cell is already visited\n if checkVisited and nextCell in self.visited:\n return False\n\n # Check for diagonal movement and corner cutting\n manhattanDistance = abs(x1-x2) + abs(y1-y2)\n if manhattanDistance == 2:\n if not environment.allowDiagonals:\n return False\n if not environment.cutCorners:\n if environment.grid[x1][y2].type == 'wall' or environment.grid[x2][y1].type == 'wall':\n return False\n else:\n if environment.grid[x1][y2].type == 'wall' and environment.grid[x2][y1].type == 'wall':\n return False\n\n return True", "def check_wall_collision(self):\r\n if self.head.xcor() > 280 or self.head.xcor() < -280 or \\\r\n self.head.ycor() > 280 or self.head.ycor() < -280:\r\n return False\r\n else:\r\n return True", "def check_obstacle_contact(board, x_player, y_player, button_pressed, health):\n\n red = '\\033[31m'\n reset_color = '\\033[0m'\n place_on_right_side = board[y_player][x_player + 1]\n place_on_left_side = board[y_player][x_player - 1]\n place_on_up_side = board[y_player - 1][x_player]\n place_on_down_side = board[y_player + 1][x_player]\n\n if button_pressed == 'd' and place_on_right_side == red + '#' + reset_color:\n health -= 5\n elif button_pressed == 'a' and place_on_left_side == red + '#' + reset_color:\n health -= 5\n elif button_pressed == 'w' and place_on_up_side == red + '#' + reset_color:\n health -= 5\n elif button_pressed == 's' and place_on_down_side == red + '#' + reset_color:\n health -= 5\n return health", "def check_collisions(self):\n for tail in self.tail:\n if tail.position == self.head.position:\n self.die()\n\n future_pos = Position(self.head_x + self.direction.move_x * Const.SQUARE_SIZE,\n self.head_y + self.direction.move_y * Const.SQUARE_SIZE)\n\n if future_pos.x < 0 or future_pos.x > Const.G_B_W - Const.SQUARE_SIZE or \\\n future_pos.y < 0 or future_pos.y > Const.G_B_H - Const.SQUARE_SIZE:\n self.die()", "def collides(self, other):\r\n for block in self.blocks:\r\n for obstacle in other.blocks:\r\n if block.col == obstacle.col and block.row == obstacle.row:\r\n return True\r\n return False", "def isValidPos(self, x, y, wallList, grid):\n if (x, y) not in wallList:\n return x > 0 and x < grid.width and y > 0 and y < grid.height", "def valid_move(x, y):\r\n if [x, y] in empty_cells(board):\r\n return True\r\n else:\r\n return False", "def propogate(self):\r\n X = len(grid[0])\r\n Y = len(grid)\r\n for DIR in [[1,0], [-1,0], [0,1], [0,-1]]:\r\n target_x, target_y = self.block_loc[0]+DIR[0], self.block_loc[1]+DIR[1]\r\n if 0 <= target_x < X and 0 <= target_y < Y: #if inbounds:\r\n target_block = grid[target_y][target_x]\r\n if not target_block.collapsed: #only ping uncollapsed blocks\r\n self.send_update(target_block,DIR)\r\n return", "def see_occupant(self, x, y, dx, dy):\r\n if dx == 0 and dy == 0: # Makes looping easier\r\n return False\r\n x += dx\r\n y += dy\r\n while 0 <= x < self.width and 0 <= y < self.height:\r\n if self.grid[y][x] == '#':\r\n return True\r\n if self.grid[y][x] == 'L':\r\n return False\r\n x += dx\r\n y += dy\r\n return False", "def _is_dead_end(self, i_row, i_col, direction):\n return (((i_row, i_col) in self._ts_cells and direction == \"s\") or\n ((i_row, i_col) in self._ts_cells and direction == \"se\") or\n ((i_row, i_col) in self._ts_cells and direction == \"sw\") or\n ((i_row, i_col) in self._ls_cells and direction == \"e\") or\n ((i_row, i_col) in self._ls_cells and direction == \"ne\") or\n ((i_row, i_col) in self._ls_cells and direction == \"se\") or\n ((i_row, i_col) in self._bs_cells and direction == \"n\") or\n ((i_row, i_col) in self._bs_cells and direction == \"nw\") or\n ((i_row, i_col) in self._bs_cells and direction == \"ne\") or\n ((i_row, i_col) in self._rs_cells and direction == \"w\") or\n ((i_row, i_col) in self._rs_cells and direction == \"nw\") or\n ((i_row, i_col) in self._rs_cells and direction == \"sw\") or\n ((i_row, i_col) == self._tl_cell and direction == \"s\") or\n ((i_row, i_col) == self._tl_cell and direction == \"se\") or\n ((i_row, i_col) == self._tl_cell and direction == \"e\") or\n ((i_row, i_col) == self._bl_cell and direction == \"n\") or\n ((i_row, i_col) == self._bl_cell and direction == \"ne\") or\n ((i_row, i_col) == self._bl_cell and direction == \"e\") or\n ((i_row, i_col) == self._tr_cell and direction == \"w\") or\n ((i_row, i_col) == self._tr_cell and direction == \"sw\") or\n ((i_row, i_col) == self._tr_cell and direction == \"s\") or\n ((i_row, i_col) == self._br_cell and direction == \"w\") or\n ((i_row, i_col) == self._br_cell and direction == \"nw\") or\n ((i_row, i_col) == self._br_cell and direction == \"n\"))", "def check_movement(self):\n is_clear = True # default return value if no obstacles\n # !!! IR_SENSORS DISABLED\n if self.move_state == MOV_FORWARD:\n if self.l.look_for_obstacle(OBST_FRONT) == True:\n is_clear = False\n return is_clear", "def check_move(self, y, x):\n return 0 <= y < len(self.maze) \\\n and 0 <= x < len(self.maze[y]) \\\n and self.maze[y][x] != \"#\"", "def check_path(self, cur_pos, new_pos, board, state):\n\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n cannon_pieces = [Cannon('BLUE'), Cannon('RED')]\n \n # Ensures the range is always in the right order\n if new_row > cur_row: \n ran_r = range(cur_row + 1, new_row, 1)\n elif cur_row > new_row:\n ran_r = range(cur_row - 1, new_row, -1)\n \n elif new_col > cur_col:\n ran_c = range(cur_col + 1, new_col, 1)\n elif cur_col > new_col:\n ran_c = range(cur_col - 1, new_col, -1)\n else:\n return False\n \n # Checking if the movement is left or right is legal\n if new_row == cur_row:\n print(\"it's in the new_row == cur_row\")\n # Check if there is a legal piece (a non-Cannon) is contained in the path\n counter = 0\n print(counter)\n for col_spot in ran_c:\n if board[cur_row][col_spot] is not None:\n counter += 1\n\n if counter == 0: \n print(\"jump!\")\n return True\n \n # Checking if the movement vertical is legal\n if new_col == cur_col:\n print(\"it's in the new_col == cur_col\")\n # Check if there is a legal piece (a non-Cannon) is contained in the path\n counter = 0\n for row_spot in ran_r:\n if board[row_spot][cur_col] is not None:\n counter += 1\n print(board[row_spot][cur_col])\n print(counter)\n if counter == 0:\n print(\"jump!\")\n return True", "def is_legal_move(player, row_from, col_from, row_to, col_to):\r\n illegal_moves = [(0, 0), (2, 0), (0, 4), (2, 4)]\r\n\r\n \"\"\"special moves that are move available according to diagram\r\n List of tuples to and from values that are not possible\"\"\"\r\n moves_not_permitted = [[(0, 2), (1, 1)], [(0, 2), (1, 3)], [(1, 1), (2, 2)], [(1, 3), (2, 2)]]\r\n row_diff = abs(row_from - row_to)\r\n col_diff = abs(col_from - col_to)\r\n\r\n if player == 'hounds':\r\n\r\n if (row_to >= 0 and row_to < 3 and col_to >= 0 and col_to < 5):\r\n \"\"\"Check if the move is not out of bounds for the board with max col range 4 and row range 3\r\n and then check if it is a legal move\"\"\"\r\n\r\n if board[row_to][col_to] == 0 and (row_to, col_to) not in illegal_moves and row_diff <= 1 and col_diff <= 1:\r\n \"\"\" Check if the position is blank.\r\n Then check if the move is not one of the blank places\r\n Then check if the row difference and column difference isn't more than 1\r\n \"\"\"\r\n if (col_to - col_from) < 0: # no moves to the left of the board\r\n return False\r\n\r\n for item in moves_not_permitted:\r\n if len(set([(row_from, col_from), (row_to, col_to)]).intersection(set(item))) == 2:\r\n \"\"\" If to and from co-ordinates are present in the moves_not_permitted list then return False\"\"\"\r\n return False\r\n else:\r\n pass\r\n return True\r\n else:\r\n return False\r\n\r\n else:\r\n \"\"\"When player is a hare\"\"\"\r\n\r\n if (row_to >= 0 and row_to < 3 and col_to >= 0 and col_to < 5):\r\n \"\"\"Check if the move is not out of bounds for the board with max col range 4 and row range 3\r\n and then check if it is a legal move\"\"\"\r\n\r\n if board[row_to][col_to] == 0 and (row_to, col_to) not in illegal_moves and row_diff <= 1 and col_diff <= 1:\r\n \"\"\" Check if the position is blank.\r\n Then check if the move is not one of the blank places\r\n Then check if the row difference and column difference isn't more than 1\"\"\"\r\n\r\n for item in moves_not_permitted:\r\n if len(set([(row_from, col_from), (row_to, col_to)]).intersection(set(item))) == 2:\r\n \"\"\" If to and from co-ordinates are present in the moves_not_permitted list then return False\"\"\"\r\n return False\r\n else:\r\n pass\r\n return True\r\n\r\n else:\r\n return False", "def isCollidingWithWall(self, vert, ent1Index, ent2, u, v):\n status = NOCOLLISION\n ent1 = self.listOfEntities[ent1Index]\n \n pt = vert - ent1.body.x.v\n \n vel = ent1.body.velocity.v + np.cross(ent1.body.omega.v, pt)\n \n# vel = QVRotation(ent.body.q,vel)\n \n n = np.cross(u,v)\n n = n/np.linalg.norm(n)\n \n Vr = vel\n Vrn = np.dot(Vr, n)\n \n if Vrn < 0:\n self.listOfCollisions.append(Collision(ent1Index,ent2,n,vert,Vr,-(Vr - (np.dot(np.dot(Vr,n),n)))))\n status = COLLISION\n \n return status", "def check_path_collision(self, X):\n\n #check collision with circular obstacles\n for i in range(X.shape[1]):\n p = X[0:3,i].flatten()\n\n for obs_loc in self.obs_locs:\n if np.linalg.norm(p[0:2] - obs_loc) < self.obs_rad:\n return True\n if i>0:\n p2 = X[0:3,i-1].flatten()\n #check collision with walls\n for win in self.windows:\n if win.check_collision(p, p2):\n return True\n\n return False", "def can_move(self, direction):\n assert direction\n return self._walls & direction == 0", "def checkAdjacent(self, direction, maze):\n\n y = self.y\n x = self.x\n\n # Shift x or y depending on the given direction.\n if direction in NS:\n y = shift[direction](y)\n elif direction in EW:\n x = shift[direction](x)\n\n # Check new location for obstacle or unwanted direction\n if maze[y][x] == 1 or ([x, y] in self.fullpath()) or (self.moved() is False and direction in self.dead_end_direction()[-1]):\n return False\n else:\n return True", "def detect_collision(self, other_sprite):\n\n # collision detection in case enemy is above or below player position\n if self.y_pos > other_sprite.y_pos + other_sprite.height or \\\n self.y_pos + self.height < other_sprite.y_pos:\n return False\n # collision detection in case enemy at the same Y but left or right of the\n # player's position\n if self.x_pos > other_sprite.x_pos + other_sprite.width or \\\n self.x_pos + self.width < other_sprite.x_pos:\n return False\n return True", "def CheckWinInDirection(self, pos, direction):\n\n block_owner = self.state[pos]\n\n if block_owner == EMPTY:\n return False\n\n pos_1 = self.ApplyDirection(pos, direction) # Moving To Next Position (1D)\n pos_2 = self.ApplyDirection(pos_1, direction) # Moving To Next Position (1D)\n\n if pos_1 == -1 or pos_2 == -2: # -2 Will Be Max Because You Have To Check With Three Strick-Throughs\n return False\n\n if block_owner == self.state[pos_1] and block_owner == self.state[pos_2]: # Check If There's A StrickThrough\n return True\n\n return False", "def is_movable(self, src_piece, dest_x, dest_y):\n if dest_x < 0 or dest_x >= self.width or dest_y < 0 or dest_y >= self.height:\n return False\n elif self.coordinates[dest_x][dest_y].status == 0:\n return False\n elif self.coordinates[dest_x][dest_y].player == src_piece.player:\n return False\n else:\n return True", "def IsTileBlocked(self, x, y, actor_moving):\r\n index = self.GetTileIndex([x, y])\r\n blocked = self.palette[index].get('blocking', 0)\r\n \r\n # If we're not blocked by an obstacle, check for an actor\r\n if not blocked:\r\n # Check against the player, if not the player\r\n if self.game.player and actor_moving != self.game.player and \\\r\n self.game.player.pos.IsSame([x, y]):\r\n blocked = True\r\n \r\n # Check against the actors, if not the same actor\r\n for name in self.actors:\r\n actor = self.actors[name]\r\n if actor_moving != actor and actor.pos.IsSame([x, y]):\r\n blocked = True\r\n \r\n return blocked", "def detectWallCollision(self): \n if self.posn_x > cw - self.ball_width: # Collision with right-hand container wall. \n self.velocity_x = -self.velocity_x * self.coef_restitution # reverse direction. \n self.posn_x = cw - self.ball_width * 1.1 # anti-stick to the wall \n if self.posn_x < 1: # Collision with left-hand wall. \n self.velocity_x = -self.velocity_x * self.coef_restitution \n self.posn_x = 2 # anti-stick to the wall \n if self.posn_y < self.ball_height: # Collision with ceiling. \n self.velocity_y = -self.velocity_y * self.coef_restitution \n self.posn_y = self.ball_height * 1.1 # ceiling collision anti-stick \n if self.posn_y > ch - self.ball_height * 1.1 : # Floor collision. \n self.velocity_y = - self.velocity_y * self.coef_restitution \n self.posn_y = ch - self.ball_height * 1.1 # anti-stick. Prevents out-of-bounds ball loss (stickiness) ", "def canMove(self, direction, robot, newPosX, newPosY):\n result = False\n if (newPosY < 0 or newPosY > len(self.map)):\n print (\"Déplacement impossible\")\n elif (newPosX < 0 or newPosX > len(self.map[newPosY])):\n print (\"Déplacement impossible\")\n else:\n if (self.isThereWallInDirection(direction, robot, \\\n newPosX, newPosY)):\n print(\"Déplacement impossible (mur sur le chemin)\")\n result = False\n else:\n car = self.map[newPosY][newPosX]\n logging.info(\"self.map[{}]={}\".format(newPosY, \\\n self.map[newPosY]))\n logging.info(\"new coord X={} : Y={} :: {}\".\\\n format(newPosX, newPosY, car))\n if (car == \"O\"):\n print(\"Déplacement impossible (mur)\")\n else:\n logging.info(\"Déplacement possible\")\n result = True\n return result", "def is_collided(self, rect):\n # return self.get_hit_box().colliderect(rect)\n player_hitbox = self.get_hit_box()\n distance = math.sqrt((math.pow(rect[0]-player_hitbox[0],2) + (math.pow(rect[1]-player_hitbox[1],2))))\n # dont collide with objects passed you\n if distance < self.player_image_size[0] and rect[0] >= player_hitbox[0]:\n return True\n else:\n return False", "def check_collision(self, sprite1, sprite2):\r\n col = pygame.sprite.collide_rect(sprite1, sprite2)\r\n if col == True:\r\n if step != 40 and step != 80:\r\n self.speed[0] = - self.speed[0]\r\n self.speed[1] = - self.speed[1]\r\n self.image = pygame.transform.flip(self.image, 1, 0)", "def _is_valid_move(self, vector, current_piece, other_piece):\n # If direction is forward and the space is non-empty, break\n if vector[0] == 0 and other_piece != \"empty\":\n return False\n # If direction is diagonal and space is empty, break\n if vector[0] != 0 and other_piece == \"empty\":\n return False\n # If moving by 2 spaces, check if in starting row\n if vector[1] == 2 and current_piece.position[1] != 1:\n return False\n if vector[1] == -2 and current_piece.position[1] != 6:\n return False\n\n return True", "def _check_inner_dirs(self, i_row, i_col, adj_opp_cells):\n opp_player = \"B\" if self._turn == \"W\" else \"W\"\n \n if self._board[i_row-1][i_col] == opp_player: #north, tile to be placed will enter from the south\n adj_opp_cells.append((i_row-1, i_col, \"s\")) \n if self._board[i_row-1][i_col+1] == opp_player: #northeast, tile to be placed will enter from the sw\n adj_opp_cells.append((i_row-1, i_col+1, \"sw\"))\n if self._board[i_row][i_col+1] == opp_player: #east, tile to be placed will enter from the west\n adj_opp_cells.append((i_row, i_col+1, \"w\"))\n if self._board[i_row+1][i_col+1] == opp_player: #southeast, tile to be placed will enter from the nw\n adj_opp_cells.append((i_row+1, i_col+1, \"nw\"))\n if self._board[i_row+1][i_col] == opp_player: #south, tile to be placed will enter from the north\n adj_opp_cells.append((i_row+1, i_col, \"n\"))\n if self._board[i_row+1][i_col-1] == opp_player: #southwest, tile to be placed will enter from the ne\n adj_opp_cells.append((i_row+1, i_col-1, \"ne\"))\n if self._board[i_row][i_col-1] == opp_player: #west, tile to be placed will enter from the east.\n adj_opp_cells.append((i_row, i_col-1, \"e\"))\n if self._board[i_row-1][i_col-1] == opp_player: #northwest, tile to be placed will enter from the se.\n adj_opp_cells.append((i_row-1, i_col-1, \"se\"))", "def _is_wall(self, pos):\r\n return self.course[pos[0], pos[1]] == -1", "def _detectDirection(self, obj1, obj2):\n rect1 = obj1.rect\n rect2 = obj2.rect\n\n if rect1.midtop[1] > rect2.midtop[1]:\n return CollisionDirection.Top\n elif rect1.midleft[0] > rect2.midleft[0]:\n return CollisionDirection.Left\n elif rect1.midright[0] < rect2.midright[0]:\n return CollisionDirection.Right\n else:\n return CollisionDirection.Bottom", "def is_collision_by_map_obstacle(self):\n for content in self.contents:\n if self.content.y == self.y and self.content.x == self.x:\n return True\n else:\n return False", "def obstacle_prone_area(self,image):\r\n\r\n start_x=int(self.start[0])\r\n start_y=int(self.start[1])\r\n goal_x=int(self.goal[0])\r\n goal_y=int(self.goal[1])\r\n print(goal_x,goal_y)\r\n if (image[int(self.maximum_size-goal_x),int(goal_y),0]==0) or ((image[int(self.maximum_size-start_x),int(start_y),0]==0)):\r\n #print(1)\r\n return False\r\n else:\r\n #print(2)\r\n return True", "def is_legal_move(self, start_pos, end_pos, start_piece, end_piece_player_id, board):\r\n parsed_positions = self.parse_positions(start_pos, end_pos)\r\n\r\n start_row = parsed_positions[0]\r\n start_col = parsed_positions[1]\r\n end_row = parsed_positions[2]\r\n end_col = parsed_positions[3]\r\n\r\n # For horizontal movements for the horse\r\n if abs(end_row - start_row) == 1 and abs(end_col - start_col) == 2:\r\n # For movement going left\r\n if end_col - start_col == -2:\r\n if board[start_row][start_col-1].get_piece() is None: # Checks if horse is blocked\r\n return True\r\n else:\r\n return False\r\n # For movement going right\r\n else:\r\n if board[start_row][start_col + 1].get_piece() is None: # Checks if horse is blocked\r\n return True\r\n else:\r\n return False\r\n\r\n # For vertical movement for the horse\r\n elif abs(end_row - start_row) == 2 and abs(end_col - start_col) == 1:\r\n # For movement going down\r\n if end_row - start_row == 2:\r\n if board[start_row + 1][start_col].get_piece() is None:\r\n return True\r\n else:\r\n return False\r\n # For movement going up\r\n if end_row - start_row == -2:\r\n if board[start_row - 1][start_col].get_piece() is None:\r\n return True\r\n else:\r\n return False\r\n\r\n # Returns False if invalid end_pos for the horse\r\n else:\r\n return False", "def _is_all_direct_next_moves_blocked(self, reference_board=None):\n # Use untraversed board if none is specified\n if reference_board is None:\n reference_board = BoardPath._untraversed_board\n\n # Case #1 - Goal and Current Location in the Same Row\n if self._current_loc.get_row() == self._goal_loc.get_row():\n # Case 1A - Need to move left but path is blocked\n if self._current_loc.get_column() > self._goal_loc.get_column() and\\\n not self.is_move_valid(\"l\", reference_board):\n return True\n # Case 1B - Need to move left but path is blocked\n elif self._current_loc.get_column() < self._goal_loc.get_column() and\\\n not self.is_move_valid(\"r\", reference_board):\n return True\n else:\n return False\n\n # Case #2 - Goal and Current Location in the Same Row\n if self._current_loc.get_column() == self._goal_loc.get_column():\n # Case 2A - Need to move left but path is blocked\n if self._current_loc.get_row() > self._goal_loc.get_row() and\\\n not self.is_move_valid(\"u\", reference_board):\n return True\n # Case 1B - Need to move left but path is blocked\n elif self._current_loc.get_row() < self._goal_loc.get_row() and\\\n not self.is_move_valid(\"d\", reference_board):\n return True\n else:\n return False\n # Case #3 - Goal and current location are diagonal from one another\n else:\n number_invalid_conditions = 0\n # Case 3A - Check if need to move down but it is blocked\n if self._current_loc.get_row() < self._goal_loc.get_row() \\\n and not self.is_move_valid(\"d\", reference_board):\n number_invalid_conditions += 1\n # Case 3B - Check if need to move up but it is blocked\n if self._current_loc.get_row() > self._goal_loc.get_row() \\\n and not self.is_move_valid(\"u\", reference_board):\n number_invalid_conditions += 1\n # Case 3C - Check if need to move right but it is blocked\n if self._current_loc.get_column() < self._goal_loc.get_column() \\\n and not self.is_move_valid(\"r\", reference_board):\n number_invalid_conditions += 1\n # Case 3D - Check if need to move left but it is blocked\n if self._current_loc.get_column() > self._goal_loc.get_column() \\\n and not self.is_move_valid(\"l\", reference_board):\n number_invalid_conditions += 1\n # Only two direct moves when need to move diagonal. If invalid\n # count equals two, then return true as condition met.\n if number_invalid_conditions == 2:\n return True\n return False", "def _check_ls_corners(self, i_row, i_col, adj_opp_cells, loc):\n shift = 1 if loc == \"tl\" else -1 #either top-left or bottom-left\n opp_player = \"B\" if self._turn == \"W\" else \"W\"\n\n #Note that loc corresponds to the position of the tile to be placed.\n #Also, the indices correspond to an adjacent opposing cell to be considered.\n #The compass direction corresponds to the direction in which the adjacent opposing\n #cell will be \"entered\" by the tile to be placed.\n if self._board[i_row+shift][i_col] == opp_player: #up/down\n if loc == \"tl\":\n adj_opp_cells.append((i_row+shift, i_col, \"n\"))\n elif loc == \"bl\":\n adj_opp_cells.append((i_row+shift, i_col, \"s\")) \n if self._board[i_row+shift][i_col+1] == opp_player: #down-diag/up-diag\n if loc == \"tl\":\n adj_opp_cells.append((i_row+shift, i_col+1, \"nw\")) \n elif loc == \"bl\":\n adj_opp_cells.append((i_row+shift, i_col+1, \"sw\")) \n if self._board[i_row][i_col+1] == opp_player: #right\n adj_opp_cells.append((i_row, i_col+1, \"w\"))", "def _check_fleet_edges(self):\n for alien in self.aliens.sprites():\n # if self.rect.right >= screen_rect.right or self.rect.left <= 0:\n if alien.check_edges():\n print(\"alien.rect BEFORE\", alien.rect) # rect = <rect(x, y, width, height)> \n print(\"direction BEFORE \", self.settings.fleet_direction)\n self._change_fleet_direction()\n print(\"direction AFTER \", self.settings.fleet_direction)\n print(\"Change in y is \", alien.rect.y)\n break", "def moveable(self, board):\n # horizontal\n if self.direction == \"horizontal\":\n # the position to which the car wants to move is either 1 more or 1 less column wise\n right = self.get_cols()[1] + self.size - 1\n left = self.get_cols()[0] - 1\n # check if right or left is out of the boards margins \n if right > board.width_height:\n move_left = board.positions[self.get_rows()[0]][left]\n move_right = None\n elif left < 0:\n move_right = board.positions[self.get_rows()[0]][right]\n move_left = None\n else: \n move_right = board.positions[self.get_rows()[0]][right]\n move_left = board.positions[self.get_rows()[0]][left]\n\n # try to move left and right\n if move_right == \"x\" and move_left == \"x\":\n return \"leftright\"\n elif move_right == \"x\":\n return \"right\"\n elif move_left == \"x\":\n return \"left\"\n else: \n return \"none\"\n \n # vertical\n else:\n up = self.get_rows()[0] - 1\n #print(up)\n down = self.get_rows()[1] + self.size - 1\n # check if up or down is out of the boards margins \n if up < 0:\n # no room on the board for upward movement\n move_down = board.positions[down][self.get_cols()[0]]\n move_up = None\n elif down > board.width_height:\n # no room on the board for downward movement\n move_up = board.positions[up][self.get_cols()[0]]\n move_down = None\n else:\n # both up and down are possible positions on the board\n move_up = board.positions[up][self.get_cols()[0]]\n move_down = board.positions[down][self.get_cols()[0]]\n\n # try to move up and down\n if move_down == \"x\" and move_up == \"x\":\n return \"updown\"\n elif move_up == \"x\":\n return \"up\"\n elif move_down == \"x\":\n return \"down\"\n else: \n return \"none\"", "def isColliding(self, vert, ent1Index, ent2Index, norm): #u, v\n \n ent1 = self.listOfEntities[ent1Index]\n ent2 = self.listOfEntities[ent2Index]\n status = NOCOLLISION\n \n pt1 = vert - ent1.body.x.v\n pt2 = vert - ent2.body.x.v\n \n vel1 = ent1.body.velocity.v + np.cross(ent1.body.omega.v, pt1)\n vel2 = ent2.body.velocity.v + np.cross(ent2.body.omega.v, pt2)\n \n# norm = np.cross(u,v)\n# norm = -norm/np.linalg.norm(norm)\n norm = norm/np.linalg.norm(norm)\n \n Vr = vel1 - vel2\n Vrn = np.dot(Vr, norm)\n \n if Vrn < 0:\n self.listOfCollisions.append(Collision(ent1Index,ent2Index,norm,vert,Vr,-(Vr - (np.dot(np.dot(Vr,norm),norm)))))\n status = COLLISION\n\n return status", "def is_valid_move(self, somerow, somecol):\n bool_1 = self.board[somerow][somecol] != 1\n bool_2 = self.num_queens_placed < self.size \n bool_3 = self.attack(somerow, somecol)\n return bool_1 and bool_2 and bool_3", "def check_allowed(i,j,DIR):\r\n #DIR is a unit vector pointing from i to j (eg: DIR = [0,1] indicates that j is 1 to the right of i)\r\n #check only specific arrangement on-demand\r\n if DIR == [1,0]: #i j #RIGHTWARD\r\n if get_blocks(i)[1][2] == get_blocks(j)[1][0]:\r\n return True\r\n elif DIR == [-1,0]: #LEFTWARD\r\n if get_blocks(j)[1][2] == get_blocks(i)[1][0]: #reverse indices\r\n return True\r\n elif DIR == [0,-1]: #UPWARD\r\n if get_blocks(j)[2][1] == get_blocks(i)[0][1]:\r\n return True \r\n elif DIR == [0,1]: #DOWNWARD\r\n if get_blocks(i)[2][1] == get_blocks(j)[0][1]:\r\n return True\r\n else:\r\n raise ValueError (\"Invalid DIR vector!\")\r\n return False", "def is_valid_move(self, side_color, x, y):\n return self.disc_place(side_color, x, y, check_only=True)", "def collision_step(self, wall_collision:CollisionTypes, players_collision: CollisionTypes, collision_list: list,\n l_motor_speed: float, r_motor_speed: float, extra_action: BallActions = BallActions.NO):\n if players_collision == CollisionTypes.PLAYER:\n collision_object = collision_list[0] # TODO, handle collision with multiple players\n other_pos_x, other_pos_y = collision_object.get_position_components_wcs()\n self_pos_x, self_pos_y = self.get_position_components_wcs()\n diff_x, diff_y = self_pos_x - other_pos_x, self_pos_y - other_pos_y\n blocker_angle = np.arctan2(diff_y, diff_x)\n restricted_angle = np.pi / 2\n\n elif wall_collision != CollisionTypes.NO:\n if wall_collision == CollisionTypes.WALL_VERTICAL:\n if self._x_pos_EFCS > 0: blocker_angle, restricted_angle = 0, np.pi / 2\n else: blocker_angle, restricted_angle = -np.pi, np.pi/2\n self._x_pos_EFCS = np.round(self._x_pos_EFCS)\n elif wall_collision == CollisionTypes.WALL_HORIZONTAL:\n if self._y_pos_EFCS > 0: blocker_angle, restricted_angle = np.pi/2, np.pi / 2\n else: blocker_angle, restricted_angle = -np.pi/2, np.pi/2\n self._y_pos_EFCS = np.round(self._y_pos_EFCS)\n else:\n if self._x_pos_EFCS > 0 and self._y_pos_EFCS > 0: blocker_angle, restricted_angle = np.pi/4, 3*np.pi/4\n elif self._x_pos_EFCS > 0 and self._y_pos_EFCS < 0: blocker_angle, restricted_angle = -np.pi/4, 3*np.pi/4\n elif self._x_pos_EFCS < 0 and self._y_pos_EFCS > 0: blocker_angle, restricted_angle = 3*np.pi/4, 3*np.pi/4\n else: blocker_angle, restricted_angle = -3*np.pi/4, 3*np.pi/4\n self._x_pos_EFCS = np.round(self._x_pos_EFCS)\n self._y_pos_EFCS = np.round(self._y_pos_EFCS)\n else:\n raise ValueError\n self.step_with_restrictions(blocker_angle, restricted_angle, l_motor_speed, r_motor_speed)\n return extra_action", "def check_collision(self,node, obstacleList):\n if (node is None):\n return True\n\n for i in range(len(node.path_x)):\n if self.map[node.path_x[i]+self.mapwidth*node.path_y[i]]:\n return True\n for j in range(5):\n #check neighbouring\n if self.map[node.path_x[i]+self.mapwidth*node.path_y[i]+j]:\n return True\n if self.map[node.path_x[i]+self.mapwidth*node.path_y[i]-j]:\n return True\n # for i in range(len(node.path_x)):\n # if (node.path_x[i],node.path_y[i])in obstacleList:\n # return True\n # for i in range(len(node.path_x)):\n # for j in range(5):\n # if(node.path_x[i]+j,node.path_y[i]+j) in obstacleList:\n # return True\n # elif (node.path_x[i]-j,node.path_y[i]-j) in obstacleList:\n # return True\n\n return False # safe", "def will_snake_collide_with_itself_for_direction(self, direction):\n snake_head_x = self.x[0]\n snake_head_y = self.y[0]\n for itr in range(1, self.length):\n if self.y[itr] == snake_head_y:\n if direction == constants.RIGHT:\n if abs(snake_head_x + self.step - self.x[itr]) <= 1.5 * constants.STEP_SIZE:\n return True\n\n if direction == constants.LEFT:\n if abs(snake_head_x - self.step - self.x[itr]) <= 1.5 * constants.STEP_SIZE:\n return True\n\n elif self.x[itr] == snake_head_x:\n if direction == constants.UP:\n if abs(snake_head_y - self.step - self.y[itr]) <= 1.5 * constants.STEP_SIZE:\n return True\n\n if direction == constants.DOWN:\n if abs(snake_head_y + self.step - self.y[itr]) <= 1.5 * constants.STEP_SIZE:\n return True\n\n\n\n return False", "def possibleMove(self, dist, blockList):\r\n \r\n if self.orientation == \"v\":\r\n for block in blockList:\r\n if dist >= 0:\r\n for n in range(dist):\r\n for coords in self.getCoords():\r\n if ((coords[0], coords[1] + n) in\r\n block.getCoords()) and (block.getNum() !=\r\n self.getNum()):\r\n self.collidedPieces = 1\r\n else:\r\n for n in range(0, dist, -1):\r\n for coords in self.getCoords():\r\n if ((coords[0], coords[1] +n) in\r\n block.getCoords()) and (block.getNum() !=\r\n self.getNum()):\r\n self.collidedPieces = 1\r\n \r\n self.y += dist\r\n self.setCoords()\r\n \r\n elif self.orientation == \"h\":\r\n for block in blockList:\r\n if dist >= 0:\r\n for n in range(dist):\r\n for coords in self.getCoords():\r\n if ((coords[0] + n, coords[1]) in\r\n block.getCoords()) and (block.getNum() !=\r\n self.getNum()):\r\n self.collidedPieces = 1\r\n else:\r\n for n in range(0, dist, -1):\r\n for coords in self.getCoords():\r\n if ((coords[0] + n, coords[1]) in\r\n block.getCoords()) and(block.getNum() !=\r\n self.getNum()):\r\n self.collidedPieces = 1\r\n \r\n self.x += dist\r\n self.setCoords()", "def collision_detect(self):\n\n # Check if the collision was with a map\n # Rect-based collision code\n for map_rect in Map.current_map.collision_rects:\n collision_time, norm_x, norm_y = collision.aabb_swept_collision(self.rect, (self.vx, self.vy), map_rect)\n if collision_time != 1:\n if DEBUG: print(\"[collision]\", collision_time)\n break\n self.x += self.vx * collision_time\n self.y += self.vy * collision_time\n\n remaining_time = 1 - collision_time\n \"\"\"\n if remaining_time > 0:\n self.vx *= remaining_time;\n self.vy *= remaining_time;\n \"\"\"\n if collision_time != 1:\n if abs(norm_x) > .0001:\n self.vx = -self.vx * COLLISION_DAMPING\n if abs(norm_y) > .0001:\n self.vy = -self.vy * COLLISION_DAMPING\n self.collision_counter += 1\n return True\n return False\n\n # Old, mask-based collision code\n \"\"\"\n self.mask = pygame.mask.from_surface(self.image)\n point = pygame.sprite.collide_mask(Map.current_map, self)\n if point:\n if COLLISION_ALGORITHM_EXPERIMENTAL:\n self.vx, self.vy = collision.calculate_reflection_angle(Map.current_map.mask, point, (self.vx, self.vy))\n else: \n self.vx, self.vy = collision.simple_collision(Map.current_map.mask, point, (self.vx, self.vy))\n self.vx, self.vy = self.vx * COLLISION_DAMPING, self.vy * COLLISION_DAMPING\n \n self.collision_counter += 1\n return True\n return False\n \"\"\"", "def move_cell(self):\n return self.abivars.optcell != 0", "def is_haltable_for(self, character, coords):\n\t\treturn self.is_walkable(coords) and coords not in [c.grid_pos for p in self.players for c in p.characters if c != character]", "def is_valid_direction(self,cell,dirNum):\r\n newCell = self.get_next_cell(cell,dirNum,2)\r\n tooSmall = newCell[0] < 0 or newCell[1] < 0\r\n tooBig = newCell[0] >= self.height or newCell[1] >= self.width\r\n return not (tooSmall or tooBig)", "def robotCanOccupy(self, (xIndex, yIndex)):\n for dx in range(0, self.growRadiusInCells + 1):\n for dy in range(0, self.growRadiusInCells + 1):\n xPlus = util.clip(xIndex+dx, 0, self.xN-1)\n xMinus = util.clip(xIndex-dx, 0, self.xN-1)\n yPlus = util.clip(yIndex+dy, 0, self.yN-1)\n yMinus = util.clip(yIndex-dy, 0, self.yN-1)\n if self.occupied((xPlus, yPlus)) or \\\n self.occupied((xPlus,yMinus)) or \\\n self.occupied((xMinus, yPlus)) or \\\n self.occupied((xMinus, yMinus)):\n return False\n return True", "def check_direction(self, x: int, y: int, dx: int, dy: int) -> Optional[List[Point]]:\n\t\tresult = [Point(x,y)]\n\t\tmark = self.tiles[x][y]\n\t\tfor i in range(self.end_count-1):\n\t\t\tx += dx\n\t\t\ty += dy\n\t\t\tif x >= 0 and y >= 0 and x < self.size and y < self.size:\n\t\t\t\tif not self.tiles[x][y] == mark:\n\t\t\t\t\treturn None\n\t\t\t\telse:\n\t\t\t\t\tresult.append(Point(x,y))\n\t\t\telse:\n\t\t\t\treturn None\n\t\treturn result", "def is_valid_move(x:int, y:int,board_length) -> bool:\n if x < 0 or y < 0 or x == board_length or y == board_length:\n return False\n return True", "def colision(self, X, Y):\n #ESTE IF COMPROBARA MEDIANTE LAS POSICIONES EN EL EJE SI HAN GOLPEADO AL SUBDITO\n if X <= self.x + self.width and X >= self.x:\n if Y <= self.y + self.height and Y >=self.y:\n return True\n return False", "def collide(piece, px, py):\n for (i, j) in piece:\n x = px + i\n y = py + j\n if not (0 <= x < BOARD_WIDTH):\n return True\n if y >= BOARD_HEIGHT:\n return True\n if y < 0:\n continue\n if board[y][x]:\n return True\n return False", "def isLineAt(self, x, y, dx, dy):\n\n initialValue = self.board[x][y]\n #checks a cell to see if there is a piece there\n if initialValue != 0:\n #loops though 3 times in a certain direction to see\n #if there is a winning configuration\n for i in range(3):\n xIndex = x + (dx * (i+1))\n yIndex = y + (dy * (i+1))\n if (-1 < xIndex < self.h) and (-1 < yIndex < self.w):\n if initialValue == self.board[xIndex][yIndex]:\n continue\n else:\n return False\n else:\n return False\n return True\n else:\n return False", "def can_move(self, next_x, next_y):\n\t\tif self.battery == 0:\n\t\t\tif self.planet.tiles[next_y][next_x].is_shaded():\n\t\t\t\treturn False\n\t\tif self.planet.tiles[next_y][next_x].elevation(self) == \"+\":\n\t\t\treturn False\n\t\tif self.planet.tiles[next_y][next_x].elevation(self) == \"-\":\n\t\t\treturn False\n\t\treturn True", "def any_possible_moves_vertical(self):\n for i in range(self.TILES_PER_ROW - 1):\n for j in range(self.TILES_PER_ROW):\n if self.main_grid_values[i][j] == self.main_grid_values[i+1][j]:\n return True\n\n return False", "def is_legal_move(self, current_player, move):\n\t\tstarting_pos = move[0]\n\t\tending_pos = move[1]\n\t\tif ending_pos[0] not in range(self.board_size) or ending_pos[1] not in range(self.board_size):\t# Discard any generated moves that fall off of the board\n\t\t\treturn False \n\t\tif self.board.repr[starting_pos[0]][starting_pos[1]]!=self.player_symbol[current_player]:\n\t\t\tprint \"this should never trigger and is redundant\"\n\t\t\treturn False\n\t\tif self.board.repr[ending_pos[0]][ending_pos[1]]!= '.':\t# Check that landing spot is empty\n\t\t\treturn False\n\t\tmiddle_pos = (starting_pos[0]-(starting_pos[0]-ending_pos[0])/2,starting_pos[1]-(starting_pos[1]-ending_pos[1])/2)\t# Check the middle spot is the other piece - this should in theory not matter because the pieces alternate\n\t\tother_player = 1 - current_player \n\t\tif self.board.repr[middle_pos[0]][middle_pos[1]] != self.player_symbol[other_player]:\n\t\t\treturn False \n\t\treturn True", "def test_maze_entrance_adjacent_are_blocked(self):\n maze = Maze(100, 100)\n\n row, col = maze.entrance\n if row - 1 >= 0:\n maze.grid[row - 1][col].blocked = True\n if col + 1 < 100:\n maze.grid[row][col + 1].blocked = True\n if row + 1 < 100:\n maze.grid[row + 1][col].blocked = True\n if col - 1 >= 0:\n maze.grid[row][col - 1].blocked = True\n\n self.assertFalse(maze.can_reach_exit([row, col]))", "def continuous(self, x, y, X, Y):\n hor = fabs(x - X) == SSIZE and y == Y\n ver = fabs(y - Y) == SSIZE and x == X\n return (hor and not ver) or (ver and not hor)", "def checkCollision(self, x, y, silent=False):\n # BBB: use of the zindex info can be useful here in future (flying characters)?\n x, y = utils.normalizeXY(x, y)\n collide_rect = self.collide_rect.move(x,y)\n collideGroups = (self.currentLevel['physical'],)\n for group in collideGroups:\n for sprite in group.sprites():\n if sprite is self:\n continue\n if collide_rect.colliderect(sprite.collide_rect):\n if not silent:\n event = pygame.event.Event(cblocals.SPRITE_COLLISION_EVENT, {'source':self, 'to': sprite})\n pygame.event.post(event)\n return True\n return False", "def check_move(self, x, y):\n try:\n return self.map[self.y+y][self.x+x] == \" \" or [self.x+x, self.y+y] == self.end_pos\n except IndexError:\n return False", "def is_in_buffer(self, coords):\n # technically should add conditions to not let the piece be off to the side of the buffer\n # however, given the board size the piece couldn't possibly get there in time so it's a non-issue\n direction = self.current_direction\n if direction == \"down\":\n return coords[1] < 0\n elif direction == \"up\":\n return coords[1] >= self.size\n elif direction == \"right\":\n return coords[0] < 0\n elif direction == \"left\":\n return coords[0] >= self.size", "def is_valid_position(self, x, y):\n if (x > self.n_cols-1 or y > self.n_rows-1) or (x < 0 or y < 0):\n return False\n\n elif self.grid[x][y] == 3:\n return False\n\n return True", "def is_knight_move_valid(self, from_row, from_col, to_row, to_col):\n # check for valid move\n if ((abs(from_row - to_row) == 1 and abs(from_col - to_col) == 2) or\n (abs(from_row - to_row) == 2 and abs(from_col - to_col) == 1)):\n return True\n return False", "def can_move(self, p_pos, o_pos):\n pos_x, pos_y = p_pos\n\n possibles = [other[0] for other in o_pos if not other[1]]\n if self.is_black() and not self.is_crowned():\n moves = [m for m in possibles if m[1] > pos_y] # limit forward\n elif self.is_red() and not self.is_crowned():\n moves = [m for m in possibles if m[1] < pos_y] # limit forward\n elif self.is_crowned():\n moves = [m for m in possibles] # no limits on movement necessary\n else:\n moves = [] # how the fuck did the color not get set right?\n\n return moves if moves else None", "def player_physics(action, sprite, vector):\n area = []\n if ceil(sprite.width) > 1 or ceil(sprite.height) > 1:\n area = to_area(sprite.x, sprite.y, sprite.width, sprite.height)\n else:\n area.append(sprite.pos)\n\n # shift each point by the vector\n area = list((x + vector[0], y + vector[1]) for x,y in area)\n\n for pos in area:\n obj = at(pos)\n if obj and isinstance(obj, list):\n for x in obj:\n if x.tag == OBSTACLE:\n return False\n elif obj and obj.tag == OBSTACLE:\n return False\n elif not visible(pos):\n return False\n return True", "def handle_collide(self):\r\n if self.get_overlapping_sprites():\r\n self.dx = -self.dx", "def tooTight(self, row, col, i, j):\n return self.distanceToGoal[row + i][col] == self.infinity or \\\n self.distanceToGoal[row][col + j] == self.infinity", "def check_collision(self):\n self.collided = False\n\n for point in self.collision_points:\n\n try:\n if self.game_map.get_at((\n int(point[0]), int(point[1])\n )) == WHITE_COLOR:\n self.collided = True\n break\n except:\n self.collided = True", "def checkCollumn(self, y):\n used = []\n for x in range(len(self.board)):\n cur = self.board[x][y]\n if cur not in used:\n if cur!= 0:\n used += [cur]\n else:\n return False\n return True", "def _check_fleet_edges(self):\n for alien in self.aliens.sprites():\n if alien.check_edges():\n self._change_fleet_direction()\n break", "def _check_fleet_edges(self):\n for alien in self.aliens.sprites():\n if alien.check_edges():\n self._change_fleet_direction()\n break", "def _check_fleet_edges(self):\n for alien in self.aliens.sprites():\n if alien.check_edges():\n self._change_fleet_direction()\n break", "def _check_neighbors(self):\n for direction, dir_info in self.DIRECTIONS.items():\n pos = Point(\n self.position.x + dir_info[\"mask\"][0],\n self.position.y + dir_info[\"mask\"][1]\n )\n status = self.move(direction)\n self.grid[status].add(pos)\n if status in (1, 2):\n # moved\n self.move(dir_info[\"opposite\"])\n yield pos", "def collision_check(self, board, row, column, orientation, ship_length):\n if orientation == 'H':\n for i in range(column, column + ship_length):\n if board[row][i] == SHIP:\n if self.user == 'player':\n print('\\nA SHIP IS ALREADY WITHIN THESE CO-ORDINATES.')\n print('TRY AGAIN!\\n')\n return True\n else:\n return True\n else:\n for i in range(row, row + ship_length):\n if board[i][column] == SHIP:\n if self.user == 'player':\n print('\\nA SHIP IS ALREADY WITHIN THESE CO-ORDINATES.')\n print('TRY AGAIN!\\n')\n return True\n else:\n return True\n return False" ]
[ "0.67670953", "0.66960996", "0.663591", "0.6418021", "0.6344243", "0.63345575", "0.6334223", "0.6327699", "0.6312439", "0.63031816", "0.62835157", "0.6234458", "0.6216627", "0.6159888", "0.6156384", "0.61506426", "0.6149324", "0.61447644", "0.61446947", "0.6136143", "0.6082602", "0.60719675", "0.60708326", "0.60547876", "0.6039809", "0.60216624", "0.600615", "0.600212", "0.597993", "0.59705085", "0.59690076", "0.59640986", "0.5954988", "0.5934315", "0.5932524", "0.59294975", "0.59222585", "0.591524", "0.59103787", "0.5909226", "0.5901929", "0.588929", "0.58889234", "0.5873698", "0.58667403", "0.5865726", "0.586079", "0.585265", "0.5848888", "0.58395493", "0.58327776", "0.58321095", "0.58236855", "0.58229595", "0.5821898", "0.58174354", "0.581728", "0.5813783", "0.5806834", "0.5802926", "0.57943845", "0.57936275", "0.5788314", "0.5786301", "0.57790416", "0.57680166", "0.57671", "0.5757276", "0.5757102", "0.5750193", "0.5743443", "0.5736889", "0.5735488", "0.57344407", "0.5733882", "0.5729577", "0.57278335", "0.57168585", "0.57129747", "0.5709398", "0.56965524", "0.56927514", "0.5676594", "0.5669352", "0.5657629", "0.5650398", "0.56489474", "0.56447774", "0.56447256", "0.56386566", "0.563701", "0.5635803", "0.56355065", "0.5628643", "0.5628312", "0.5621273", "0.5621273", "0.5621273", "0.5620436", "0.5616724" ]
0.655872
3
Get a Producer queue instance
def getProducer(): # get the config and a producer config = ecommerce.config.getConfig() return ecommerce.queue.queue(config, queuePrefix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_queue():\n\n return multiprocessing.Queue()", "def get(queue_name: str, **kwargs) -> Queue:\n return Queue(queue_name, **kwargs)", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def _get_queue(self):\n return self.__queue", "def producer(self):\n return Producer(app=self.app, client=self.client)", "def get_producer(conf_settings=None, address=Config.INSIGHTS_KAFKA_ADDRESS): # pragma: no cover\n if conf_settings is None:\n conf_settings = {}\n conf = _get_producer_config(address, conf_settings)\n return ProducerSingleton(conf)", "def new_queue() -> Queue:\n return multiprocessing.Queue()", "def get_queue(self):\n return self.queue", "def get_queue(self):\n return self.queue", "async def get_queue(self, ctx: commands.Context) -> Optional[QueueManager]:\n\n return self.queue[ctx.guild.id]", "def create(self):\n topic = self.__conn__.create_topic(self.__topic__)\n return topic.get_producer(*self.__args__, **self.__kargs__)", "def get_queue(self):\n if self.queue is not None:\n return self.queue\n state = self.get_state()\n self.queue = state.get_queue()\n # print(\"IQ\", self.queue)\n return self.queue", "def get_queue(self):\r\n return _channeldata[self.chan].queue", "def queue(self, sid):\r\n return queues.Queue(self, sid)", "def small_queue():\n que = Queue()\n que.enqueue(1)\n que.enqueue(2)\n que.enqueue(3)\n que.enqueue(4)\n que.enqueue(5)\n return que", "def get_message(cls):\n rp = cls.get()\n try:\n message = rp.queue_send.get_nowait()\n except Exception:\n return None\n\n return message", "def _create_queue(self):\n # Instantiate\n queue = pbs.queue(verbose=not self.quiet)\n\n if self.q == 'ember':\n # Submitting to Utah ember cluster\n ppn = 12\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n walltime = self.walltime if int(self.walltime.split(':')[0]) < 72 else '72:00:00'\n queue.create(label=self.label, nodes=self.nodes, qos=self.qos, umask=self.umask,\n walltime=walltime, ppn=ppn, cpus=cpus, partition='ember', alloc='sdss')\n elif self.q is not None:\n # All other self.q values expected for Portsmouth cluster,\n # sciama. In this case, the number of nodes is queue\n # dependent, and qos is not set\n if self.q == 'sciama1.q':\n ppn = 12\n elif self.q == 'sciama3.q':\n ppn = 20\n else:\n ppn = 16\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n queue.create(label=self.label, nodes=self.nodes, umask=self.umask,\n walltime=self.walltime, queue=self.q, ppn=ppn, cpus=cpus)\n else:\n # self.q can be None when submitting to both the Portsmouth\n # and Utah clusters. In this case, the default queue\n # destination and ppn is correct. qos is also set, but this\n # should only be used when submitting to Utah.\n ppn = 16\n cpus = ppn if self.cpus is None else min(self.cpus, ppn)\n queue.create(label=self.label, nodes=self.nodes, qos=self.qos, umask=self.umask,\n walltime=self.walltime, ppn=ppn, cpus=cpus)\n\n return queue", "def queue(self):\n return self._queue", "def queue(self):\n return self._queue", "def queue(self):\n return self._queue", "def queue(self):\n return self._queue", "def get_queue(queue_name=\"\"):\n print(get_qstat_arg(queue_name))\n q = subprocess.Popen(\n _get_qstat_arg(queue_name), stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, stdin=subprocess.PIPE\n )\n o, e = q.communicate()\n\n return o", "def get_worker_id_queue():\n global _WORKER_ID_QUEUE\n if _WORKER_ID_QUEUE is None:\n _WORKER_ID_QUEUE = multiprocessing.Queue()\n return _WORKER_ID_QUEUE", "def small_queue():\n queue = Queue()\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n queue.enqueue(4)\n return queue", "def _queue_create(self, **kwargs):\n name = self.generate_random_name()\n return self.clients(\"zaqar\").queue(name, **kwargs)", "def priority_queue():\n from src.priorityq import PriorityQueue\n priority_queue = PriorityQueue()\n return priority_queue", "def queue(self):\n if self._queue is None:\n qstr = self.query_queue(user=self._user)\n self._queue = self.parse_queue_str(qstr)\n\n return self._queue", "def queue_maker(queue, bucket_name):\n scraper = key_scraper.KaleidoscopeKeyScraper(\n bucket_name=bucket_name,\n queue=queue,\n )\n scraper.add_keys_to_queue()\n\n return None", "def get_rabbit_queue():\n\n return \"metrics_queue\"", "def instantiate_queue(self):\n serialized_queue = self.cache.get('queue')\n queue = ast.literal_eval(serialized_queue.decode('utf-8'))\n return queue", "def get(self):\r\n try:\r\n # get with block=False returns an item if one\r\n # is immediately available, else raises the Empty exception\r\n return self._queue.get(block=False)\r\n except queue.Empty:\r\n return self._create_connection()", "def get_message_from_queue(self):\n message = None, None\n\n try:\n message = self.queue.get(block=True, timeout=3)\n except Empty:\n self.fail(msg='Queue get() failed empty')\n\n return message", "def magma_queue_get_device(queue):\n\n return _libmagma.magma_queue_get_device(queue)", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def _get_queue_type(self):\n return self.__queue_type", "def producer():\n\n connection = pika.BlockingConnection(pika.ConnectionParameters('rabbit'))\n channel = connection.channel()\n\n channel.queue_declare(queue=QUEUE_NAME)\n\n # Create two unique device ids to provide more example data\n timestamp = arrow.now().timestamp\n device_name = b'A' if timestamp % 2 == 0 else b'B'\n '''\n This creates the same hash value each time so we can use the Raspberry Pi\n serial number to create a unique ID for each device\n '''\n device_id = hashlib.sha1(device_name).hexdigest()\n\n # Currently a python dict\n data = {\n 'device_id': device_id,\n 'timestamp': timestamp,\n 'data': {\n 'key': 'value'\n }\n }\n\n channel.basic_publish(exchange='',\n routing_key=QUEUE_NAME,\n body=json.dumps(data)) # Encode as a JSON string\n msg = f' [x] Sent {data}'\n print(msg)\n logging.info(msg)\n connection.close()", "def support_queue(self, queue_id):\r\n return support_queues.SupportQueue(self, queue_id)", "def empty_queue():\n return Queue()", "def empty_queue():\n return Queue()", "def peek(self):\n return self.queue[0]", "def queue_peek(queue_instance, timeout=60):\r\n while True:\r\n try:\r\n yield queue_instance.get(timeout=timeout)\r\n except Empty:\r\n break", "def _get_nowait(self):\n # Fulfills a waiting producer, returning its value, or raising Empty if\n # no fulfillable producers are waiting.\n def fulfill_waiting_producer():\n while True:\n if self._waiting_producers:\n produce_wish = self._waiting_producers.pop(0)\n with produce_wish.group.lock:\n if not produce_wish.group.fulfilled:\n return produce_wish.fulfill()\n else:\n raise Empty()\n\n if self._buf is not None and not self._buf.empty:\n value = self._buf.pop()\n try:\n # Cycles a producer's value onto the buffer\n produced = fulfill_waiting_producer()\n self._buf.push(produced)\n except Empty:\n pass\n return value\n else:\n return fulfill_waiting_producer()", "def peek(self):\n return self.the_queue[0]", "def peek(self):\r\n return self.queue[0]", "def peek(self):\r\n return self.queue[0]", "def create_producer(self, topic_id: str) -> Producer:\n backend = None\n if self.vendor == 'kafka':\n backend = KafkaClient(topic_id, self.configs['kafka_servers'])\n else:\n project_id = os.getenv(\"GOOGLE_CLOUD_PROJECT\")\n subscription_id = os.getenv(\"GOOGLE_PUBSUB_SUB_ID\")\n backend = GooglePubSubClient(project_id=project_id, topic=topic_id,\n subscription_id=subscription_id, gcp_configs=self.configs)\n\n return Producer(backend)", "def queue(self):\n if self._queue is None:\n qstr = self.query_queue(user=self._user)\n self._queue = self.parse_queue_str(qstr, keys=self.QSTAT_KEYS)\n\n return self._queue", "def support_queue(self, queue_id):\n return support_queues.SupportQueue(self, queue_id)", "def _event_queue_factory(settings):\n return queue.Queue(settings[\"event_queue_max_size\"])", "def get(self):\n with self.__lock:\n while True:\n try:\n job = self.__queue.get(False)\n self.__lock.notify_all()\n return job\n except Queue.Empty:\n self.__lock.wait()", "def _getqueue(self):\n\n go = self.tickqueue.get()\n for index in range(len(self.outqueues)):\n if not self.outqueues[index].empty():\n return self.outqueues[index]", "def kafka_get_connection(self):\n return self.kf_producer", "def register_producer(self):\n # Assigns the next available id\n # Adds the producer's queue to the buffer\n # Creates the producer's lock\n with self.prod_id_lock:\n self.prod_id += 1\n self.buff.append([])\n self.locks.append(Lock())\n return self.prod_id", "def _getqueue(self):\n go = self.tickqueue.get()\n for index in range(len(self.outqueues)):\n if not self.outqueues[index].empty(): return self.outqueues[index]", "def create_sender(self):\n sender = kafka.KafkaProducer(bootstrap_servers=['%s:%s' % (self._host, self._port)])\n return sender", "def peek(self):\n\n return self._queue[0]", "def getProducerIndex(self, name):\n\n self.ensureNotCreated()\n\n if not name in self.producers:\n raise Exception('Producer %r is not present in the framework configuration' % name)\n\n return self.producers.index(name)", "def use_queue():\n q = queue.Queue()\n for i in range(10):\n q.put_nowait(i)\n while q.qsize() > 0:\n element = q.get_nowait()\n sys.stdout.write(\"poping out from queue: {0}\\n\".format(element))", "def queue(self, name):\n # First create a queue\n queue = self.inbound_channel.declare_queue(name)\n\n # Create the registry for the queue\n registry = Registry(self, queue)\n\n # Prepare consuming queue with registry\n self.inbound_channel.consume(queue=queue, callback=registry)\n\n # Then, return the Registry object.\n return registry", "def peek(self):\n if self.isEmpty(): \n raise Exception(\"Queue underflow\")\n return self._q[self._first]", "def getQueue():\n dm_plugin_url = f\"https://api.zuri.chat/marketplace/plugins/{PLUGIN_ID}\"\n try:\n response = requests.get(url=dm_plugin_url)\n except requests.exceptions.RequestException as e:\n return e\n if response.status_code == 200:\n return response.json()[\"data\"][\"queue\"]\n else:\n return None", "def __init__(self):\n self.queue = Queue()", "def new_dque():\n from dqueue import Dque\n return Dque()", "def peek(self):\r\n if self.size():\r\n return self.queue[0]\r\n else:\r\n return None", "def query_queue(self, queue_name, alt_exchange_name=None):\n return self._query(queue_name, \"queue\", \"org.apache.qpid.broker\", alt_exchange_name)", "def get_publisher():\n return Publisher(\n topic=os.environ[\"TOPIC\"],\n **get_kafka_connection_params(),\n )", "def lookup(self, queue, project=None):\n\n try:\n pool_id = self._pool_id(queue, project)\n except errors.QueueNotMapped as ex:\n LOG.debug(ex)\n\n return self.get_default_pool(use_listing=False)\n\n return self.get_driver(pool_id)", "def _ns_queue(self, queue, consumer_id):\n return self._ns(queue, consumer_id, \"messages\")", "def get_queue_by_name(name):\n sqs = boto3.resource('sqs')\n return sqs.get_queue_by_name(QueueName=name)", "def getQueue(serverName: str, queueType: str):\n if queueType is \"k\":\n queue = kitchenQueue\n elif queueType is \"b\":\n queue = bathroomQueue\n else:\n raise Exception(\"Incorrect parameters\")\n\n if serverName in queue.keys():\n return queue.get(serverName)\n else:\n queue[serverName] = []\n return queue.get(serverName)", "def dequeue_message(self) -> MessageQueueItem:\n return heapq.heappop(self._message_queue)", "def get_queue(self, task_name):\n for name, queue in self.queues.items():\n if task_name in queue:\n return name\n return self.default_queue", "def testQueue():\n myQueue = Queue()\n myQueue.enqueue(1)\n myQueue.enqueue(2)\n myQueue.enqueue(3)\n print('Enqueue 1,2,3: ',myQueue)\n myQueue.enqueue(4)\n print('Peek: ',myQueue.peek())\n myQueue.dequeue()\n print('Enqueue 4+ dequeue: ',myQueue)\n myQueue.enqueue(5)\n print('Enqueue 5: ',myQueue)\n myQueue.enqueue(6)\n print('Enqueue 6: ',myQueue)\n myQueue.enqueue(7)\n print('Enqueue 7: ',myQueue)\n print('Peek: ',myQueue.peek())\n myQueue.dequeue()\n print('Dequeue: ',myQueue)", "def make_empty_queue():\n return Queue(0, None, None)" ]
[ "0.7450264", "0.7052122", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69740534", "0.69648343", "0.69545984", "0.6784196", "0.6645031", "0.6645031", "0.66199046", "0.6527877", "0.6523349", "0.63335377", "0.6271913", "0.6227262", "0.6198443", "0.6166942", "0.61613166", "0.61613166", "0.61613166", "0.61613166", "0.61494315", "0.6147359", "0.61294925", "0.61289525", "0.6071782", "0.6062432", "0.60469747", "0.6044276", "0.60429853", "0.6027902", "0.5977323", "0.59757787", "0.5942074", "0.5942074", "0.5942074", "0.5942074", "0.5942074", "0.5942074", "0.5942074", "0.5942074", "0.5942074", "0.5933152", "0.5933152", "0.5933152", "0.5933152", "0.5933152", "0.5933152", "0.5882835", "0.58698076", "0.5861618", "0.5861618", "0.5851738", "0.5802277", "0.5794053", "0.5790315", "0.5787559", "0.5787559", "0.575119", "0.57501554", "0.5750073", "0.5742813", "0.5703677", "0.5702011", "0.5696862", "0.5692726", "0.5691251", "0.5684521", "0.56833553", "0.56781065", "0.56772184", "0.56768477", "0.5611148", "0.56100976", "0.56059647", "0.559956", "0.5578197", "0.5573713", "0.5570997", "0.55671936", "0.5565562", "0.5553388", "0.5540799", "0.5525608", "0.55205", "0.55204904", "0.5512277" ]
0.8324251
0
Get the list of all entities of a given type from DB
def getEntityIds(type, subtype = None): # get a cursor conn = ecommerce.db.getConnection() cursor = conn.cursor() # decide the query to execute if type not in entityQueries: return [ ] # execute the query qparams = (type, ) if subtype is not None: qparams = (type, subtype) cursor.execute(entityQueries[type], qparams) # fetch the ids elist = [ ] row = cursor.fetchone() while row is not None: elist.append(int(row[0])) row = cursor.fetchone() cursor.close() return elist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def query_all(cls)->List:\n database.cursor.execute(\"SELECT * FROM {}\".format(cls.table_name))\n items = database.cursor.fetchall()\n return [cls.to_object(item) for item in items]", "def get_all(cls):\n return db_session.query(cls).order_by(cls.name).all()", "def get_entities(self, type, offset=0, limit=20):\n # url = '{}/ngsi-ld/v1/entities?type={}&offset={}&limit={}'.format(self.url, type, offset, limit)\n url = '{}/ngsi-ld/v1/entities?type={}'.format(self.url, type, offset, limit)\n r = requests.get(url, headers=self.headers_with_link)\n return r.json()", "def all(self, *args, **kwargs):\n list_to_return = []\n if not self.object_type:\n return list_to_return\n class_name = eval(self.object_type)\n if self.objects_id:\n for id in self.objects_id.split(';'):\n if id:\n list_to_return.append(class_name.objects.get(id=id))\n return list_to_return", "def get_all(class_name):\n result = class_name.query.all()\n return result", "def get_all(cls, order_by: Column = None):\n # Validate class before query\n cls.__class_validation()\n\n if order_by:\n entity_list = cls.query.order_by(order_by).all()\n else:\n entity_list = cls.query.all()\n\n if entity_list:\n return entity_list\n\n return None", "def fetch_all_sensor_types():\n query = db.session.query(\n TypeClass.id,\n TypeClass.sensor_type,\n )\n sensor_types = db.session.execute(query).fetchall()\n sensor_types = query_result_to_array(sensor_types)\n sensor_types = [st for st in sensor_types if is_valid_sensor_type(st[\"id\"])]\n return sensor_types", "def fetch_all(cls):\n return cls.query.all()", "def get_all_by_type(self, type):\n # Validation\n TrainerManager._str_validator(type)\n\n # Database Query\n session = self._db_session()\n if type == 'Regular Trainer':\n trainer_query = session.query(RegularTrainer).filter(\n RegularTrainer.type == \"Regular Trainer\").all()\n if type == 'Gym Leader':\n trainer_query = session.query(GymLeader).filter(\n GymLeader.type == \"Gym Leader\").all()\n session.close()\n\n return trainer_query", "def fetch_all(cls: Type[_T], session: Session, limit: int, offset: int) \\\n -> List[_T]:\n return Query(cls, session=session).limit(limit).offset(offset).all()", "def get_all_types():\n cnx, cursor = connect_db()\n query = \"\"\"select a.name, b.`order` from types a, types b\n where a.parent=b.guid\"\"\"\n cursor.execute(query)\n result = cursor.fetchall()\n result = pd.DataFrame(result, columns=['type', 'order'])\n cnx.close()\n return result", "def fetch_all_sensors(sensor_type):\n query = db.session.query(\n SensorClass.id,\n SensorClass.aranet_code,\n SensorClass.name,\n ).filter(SensorClass.type_id == sensor_type)\n sensors = db.session.execute(query).fetchall()\n sensors = query_result_to_array(sensors)\n sensors = {s[\"id\"]: s for s in sorted(sensors, key=lambda x: x[\"id\"])}\n return sensors", "def get_all_items(model, type):\n if(type == \"office\"):\n return model.get_all_offices()\n elif(type == \"party\"):\n return model.get_all_parties()\n return []", "def types():\n types = session.query(Type).all()\n return jsonify(types=[t.name for t in types])", "def find_objects_by_type():\n try:\n keyword = request.form[\"keyword\"]\n object_type = request.form[\"object_type\"]\n\n # Get entities based on the selection\n entities = g.user.get_api().get_by_object_types(keyword, object_type)\n\n # Parse response object into table data\n data = raw_entities_to_table_data(entities)\n\n # If no entities were found reutrn with failure state and message\n result = get_result_template()\n if len(data[\"data\"]) == 0:\n result[\"status\"] = \"FAIL\"\n result[\"message\"] = 'No entities of type \"{TYPE}\" were found.'.format(\n TYPE=object_type\n )\n else:\n result[\"status\"] = \"SUCCESS\"\n result[\"data\"] = {\"table_field\": data}\n return jsonify(result_decorator(result))\n\n except Exception as e:\n result = get_result_template()\n result[\"status\"] = \"FAIL\"\n result[\"message\"] = str(e)\n return jsonify(result_decorator(result))", "def type_index(context, request):\n\n return {'types': db.DBSession.query(db.Type).order_by(db.Type.id).all()}", "def types_query(owner_name):\n query = Products.query.with_entities(Products.type_name.label('Type'))\\\n .filter_by(owner_name=owner_name)\\\n .distinct()\n return query", "def find_all(cls):\n return cls.dbm().modelclass_find_all(cls)", "def get_records(table, id=None):\n try:\n my_class = load_entity(table)\n except LoaderError as e:\n abort(400, e)\n\n if id is not None:\n try:\n r = my_class[id]\n except ObjectNotFound:\n abort(404)\n return serialize_entity(r)\n\n records = select(r for r in my_class)\n return serialize_entity_collection(records)", "def getResourcesByEntitytype(entitytype, srcentty):\n # Distinction is implemented by python set\n cursor.execute(\n '''SELECT r1.value FROM resource as r1\n JOIN resource as r2 ON r1.content_id = r2.content_id\n JOIN entitytype as e1 ON r1.entitytype_id = e1.id\n JOIN entitytype as e2 ON r2.entitytype_id = e2.id\n JOIN content ON r1.content_id = content.id\n WHERE e1.name = %s\n AND e2.name = %s\n AND in_dump = True\n ''',\n (entitytype, srcentty,)\n )\n return {c['value'] for c in cursor}", "def entities(self) -> List[Entity]:\n return [field for field in self._fields.values() if isinstance(field, Entity)]", "def _get_all_records(self) -> List[DBModelInstance]:\n return self.model.query.all()", "def get_all(self, context, type_):\n types = None\n if type_ and isinstance(type_, basestring):\n types = type_.strip(\",\").split(\",\")\n\n try:\n db_resource_mgrs_data = self.db_api.get_all_resource_managers(\n context, types=types)\n\n _resource_mgrs_data = []\n for db_resource_mgr_data in db_resource_mgrs_data:\n _resource_mgrs_data.append(_make_response(\n db_resource_mgr_data))\n except Exception as e:\n msg = (\"Error retrieving the 'resource managers' reason : %s\"\n % e.message)\n LOG.exception(msg)\n raise exception.RetrieveException(e.message)\n return _resource_mgrs_data", "def all(cls):\n return dbsession.query(cls).all()", "def all(cls):\n return dbsession.query(cls).all()", "async def get_metadata_for_object_type(\n dbcon: DBConnection, object_type: str) -> Iterable[object_models.ObjectMetadata]:\n q = '''select metadata.object_type, metadata.object_id, metadata.key, metadata.value\n from object_metadata as metadata\n where metadata.object_type=%s'''\n return [object_models.ObjectMetadata(*row) for row in await dbcon.fetch_all(q, (object_type,))]", "def get_all() -> list:\n categorias = []\n conn = GenericDao.connect()\n cursor = conn.execute(\"SELECT * FROM categorias\")\n for row in cursor:\n categoria = Categoria(row[1], row[0])\n categorias.append(categoria)\n if debug:\n print(str(categoria))\n\n conn.close()\n return categorias", "def list(self):\n if not self.model:\n raise NameError('database model has not been set.')\n\n with self.session() as session:\n query = self.get_query(session)\n data = query.all()\n return data", "def _queryset(self):\n return self.type.objects.filter(id__in=self.ids)", "async def fetchall(entity, query: Union[ClauseElement, str], values: Dict = None) -> List[Mapping]:\n return await uvicore.db.fetchall(query=query, connection=entity.__connection__)", "def get_fuel_types(session: Session) -> CursorResult:\n return session.query(FuelType).order_by(FuelType.abbrev)", "def all(self, datastore):\n return datastore.query(self.__model__).all()", "def all(klass):\n return klass.find()", "def get_all():\n return SavedQuery.get_all()", "def get_all_by_incident_type(\n *, db_session, incident_type: str, skip=0, limit=100\n) -> List[Optional[Incident]]:\n return (\n db_session.query(Incident)\n .filter(Incident.incident_type.name == incident_type)\n .offset(skip)\n .limit(limit)\n .all()\n )", "def get_all(cls):\n return DataStore.get_all_instance(cls)", "def get_entities(self, clean=False):\n return list(self.iter_entities(clean=clean))", "def get_all_entities(self):\n return Artifact.get_all()", "def all(self, cls=None):\n results = {}\n if cls is not None:\n # TODO cls always is a class\n if type(cls) == str:\n cls = eval(cls)\n for instance in self.__session.query(cls):\n key = \"{}.{}\".format(cls.__name__, instance.id)\n results[key] = instance\n return results\n else:\n for table in self.__tables:\n for instance in self.__session.query(eval(table)):\n key = \"{}.{}\".format(table, instance.id)\n results[key] = instance\n return results", "def view_all(self): # -> Collection[TEntityDto]:\n raise NotImplementedError()", "def all(self, cls=None):\n if cls:\n objects = self.__session.query(cls).all()\n else:\n classes = [State, City] # , User, Place, Review, Amenity]\n objects = []\n for c in classes:\n objects += self.__session.query(c)\n return {\"{}.{}\".format(type(obj).__name__, obj.id): obj for obj in\n objects}", "def find(self, model_type=\"Model\", filter=None):\n collection = self._db[model_type]\n print 'mongo.list()'\n if filter: \n objs = list(collection.find(filter))\n else:\n objs = list(collection.find())\n print 'objs are {}'.format(objs)\n result = []\n # hack to convert uuid to string\n for obj in objs:\n obj['_id'] = str(obj['_id'])\n result += [obj, ]\n return objs", "def test_get_all_instance_types(self):\n session = sql_session.get_session()\n total_instance_types = session.query(models.InstanceTypes).count()\n inst_types = instance_types.get_all_types()\n self.assertEqual(total_instance_types, len(inst_types))", "def get_all(self, object):\n self.lock.acquire()\n result = self.__Session.query(object).all()\n self.lock.release()\n return result", "def get_objects_by_type(self, *types) -> List[TgnObject]:\n if not types:\n return list(self.objects.values())\n types_l = [o.lower() for o in types]\n return [o for o in self.objects.values() if o.type.lower() in types_l]", "def get_all_by_type(self, obj_type):\n objects = []\n node = None\n data = []\n with lzma.open(os.path.join('resources', self.game, 'dumps',\n '{}.dump.xz'.format(obj_type)), 'rt', encoding='latin1') as df:\n for line in df.readlines():\n match = re.match('^\\*\\*\\* Property dump for object \\'\\S+ (\\S+)\\'.*$', line)\n if match:\n objects.append(match.group(1))\n if node:\n node.load_from_string_list(data)\n data = [line]\n node = self.get_node_by_full_object(match.group(1))\n else:\n data.append(line)\n\n if node:\n node.load_from_string_list(data)\n\n return objects", "def all_entities_classes():\n return [\n PersonEntity, CustomAttributeEntity, ProgramEntity, ControlEntity,\n AuditEntity, AssessmentEntity, AssessmentTemplateEntity, IssueEntity,\n CommentEntity]", "def all(self, cls=None):\n if cls is not None:\n if type(cls) == str:\n cls = eval(cls)\n valueQuery = self.__session.query(cls)\n else:\n valueQuery = self.__session.query(State).all()\n valueQuery.extend(self.__session.query(City).all())\n valueQuery.extend(self.__session.query(User).all())\n valueQuery.extend(self.__session.query(Place).all())\n valueQuery.extend(self.__session.query(Review).all())\n valueQuery.extend(self.__session.query(Amenity).all())\n\n return {\"{}.{}\".format(type(obj).__name__, obj.id):\n obj for obj in valueQuery}", "def get_many(cls, limit: int = 100, offset: int = 0):\n if limit > 100:\n raise ModelExceptions(\"It is not possible to list more than 100 resources.\")\n\n instance_list = DBSESSION.query(cls)\n instance_list = instance_list.order_by(cls.id)\n instance_list = instance_list.offset(offset)\n instance_list = instance_list.limit(limit)\n instance_list = instance_list.all()\n if not instance_list:\n raise ObjectNotFound(f\"No registers of {cls.str_representation} found\")\n\n return instance_list", "def select_entities(table):\n with db.connect() as conn:\n order_field = getattr(\n table.c,\n flask.request.args.get('order_by', list(table.c)[0].name)\n )\n order_fn = db.order_directions[flask.request.args.get('order_direction', 'asc')]\n order_by = order_fn(order_field)\n offset = int(flask.request.args.get('offset', 0))\n limit = int(flask.request.args.get('limit', 10))\n return conn.execute(\n sa.select([table])\n .limit(limit)\n .offset(offset)\n .order_by(order_by)\n ).fetchall()", "def _get_revisions_by_type():\n valid_types = [model.__name__ for model in all_models.all_models]\n revisions_table = all_models.Revision.__table__\n id_query = select([\n func.max(revisions_table.c.id),\n ]).group_by(\n revisions_table.c.resource_type,\n revisions_table.c.resource_id,\n )\n ids = [row for (row,) in db.session.execute(id_query)]\n query = select([\n revisions_table.c.id,\n revisions_table.c.resource_type,\n revisions_table.c.resource_id,\n ]).where(\n revisions_table.c.resource_type.in_(valid_types)\n ).where(\n revisions_table.c.action != \"deleted\"\n ).where(\n revisions_table.c.id.in_(ids)\n ).order_by(\n revisions_table.c.resource_type,\n )\n\n rows_by_type = defaultdict(list)\n for row in db.session.execute(query):\n rows_by_type[row.resource_type].append(row)\n\n return rows_by_type", "def _getData(self, entity, params):\n\n res = []\n entity_code = entity.code\n conn = self._connect(entity)\n try:\n conn.create_function(\"INLIST\", 2, self._inlist)\n\n conn.row_factory = sqlite3.Row\n cursor = conn.cursor()\n\n if not self.exists(entity_code, cursor):\n self.generate_entity(entity)\n\n my_departments = \"\"\n my_users = \"\"\n for column in entity.definition[\"columns\"]:\n if \"entityFilterByDepartment\" in column or column[\"type\"] == \"departmentSelector\":\n my_departments = self.getMyDepartments()\n if \"entityFilterByUser\" in column or column[\"type\"] == \"userSelector\":\n my_users = self.getMyUsers()\n\n # Create columnames for each column in entity metadata. Adding too related fields\n columnNames = \"A.id\"\n leftJoin = \"\"\n letter = \"B\"\n thisEntityHaveDepartmentFilter = False\n thisEntityHaveUserFilter = False\n for column in entity.definition[\"columns\"]:\n\n if column[\"type\"] in [\"numeric\", \"text\"]:\n columnNames += f\", A.[{column['field']}]\"\n\n elif column[\"type\"] == \"dateTime\":\n columnNames += f\", strftime('%Y-%m-%d',{column['field']}) as [{column['field']}]\"\n\n elif column[\"type\"] in [\"dropdown\", \"remoteDropdown\"]:\n columnNames += f\", A.[{column['field']}]\"\n columnNames += f\", {letter}.[{column['entityLabel']}] as {letter}_label\"\n leftJoin += f\" LEFT JOIN [{column['entity']}] as {letter} ON {letter}.id = A.{column['field']} \"\n\n if \"entityFilterByDepartment\" in column:\n leftJoin += f' AND ( {letter}.departments is null or INLIST({letter}.departments,\"{my_departments}\") = 1 ) '\n if \"entityFilterByUser\" in column:\n leftJoin += f' AND ( {letter}.users is null or INLIST({letter}.users,\"{my_users}\") = 1 ) '\n\n letter = self.getNextLetter(letter)\n\n elif column[\"type\"] == \"departmentSelector\":\n columnNames += f\", A.[departments]\"\n thisEntityHaveDepartmentFilter = True\n\n elif column[\"type\"] == \"userSelector\":\n columnNames += f\", A.[users]\"\n thisEntityHaveUserFilter = True\n\n elif column[\"type\"] == \"relatedEntity\":\n columnNames += f\", {letter}.[{column['entityLabel']}] as {column.field}\"\n if \"relatedColumnRelation\" in column and column[\"relatedColumnRelation\"]:\n left_on = str(column['relatedColumnRelation']).replace(\n \"#entity#\", \"A\").replace(\"#relatedEntity#\", letter)\n leftJoin += f\" LEFT JOIN [{column['entity']}] as {letter} ON {left_on} \"\n else:\n leftJoin += f\" LEFT JOIN [{column['entity']}] as {letter} ON {letter}.id = A.{column['relatedForeignKey']} \"\n letter = self.getNextLetter(letter)\n\n sortBy = \"A.ID\"\n if \"sortBy\" in params and params[\"sortBy\"]:\n sortBy = f'A.{params[\"sortBy\"]}'\n elif \"sortBy\" in entity.definition and entity.definition[\"sortBy\"]:\n sortBy = f'A.{entity.definition[\"sortBy\"]}'\n where = \"\"\n letter = \"B\"\n\n if thisEntityHaveDepartmentFilter:\n where = f' WHERE ( A.departments is null or INLIST(A.departments,\"{my_departments}\") = 1 ) '\n if thisEntityHaveUserFilter:\n where = f' WHERE ( A.users is null or INLIST(A.users,\"{my_users}\") = 1 ) '\n\n # Add filter for group in related entities\n for column in entity.definition[\"columns\"]:\n if column[\"type\"] in [\"dropdown\", \"remoteDropdown\"] and (\"entityFilterByDepartment\" in column or \"entityFilterByUser\" in column):\n where += \" AND \" if where else \" WHERE \"\n where += f'A.{column[\"field\"]} is null or A.{column[\"field\"]} is not null and {letter}.id is not null '\n letter = self.getNextLetter(letter)\n\n param_list = tuple()\n if \"filters\" in params and params[\"filters\"] and len(params[\"filters\"]) > 0:\n for filter_item in params[\"filters\"]:\n if \"values\" in filter_item and filter_item[\"values\"] and len(filter_item[\"values\"]) > 0:\n if where == \"\":\n where = \" WHERE \"\n else:\n where += \" AND \"\n\n if \".\" in str(filter_item[\"field\"]):\n mm_entity = \"MM\" + str(filter_item[\"field\"]).split(\".\")[0]\n mm_field = str(filter_item[\"field\"]).split(\".\")[1]\n if len(filter_item[\"values\"]) == 1:\n where += f\" {mm_entity}.[{mm_field}] = ?\"\n param_list += (append(filter_item[\"values\"][0]),)\n else:\n where += f\" {mm_entity}.[{mm_field}] IN ({','.join( filter_item['values'])})\"\n\n leftJoin += f\" INNER JOIN [{filter_item['field'].split('.')[0]}] as {mm_entity} ON {mm_entity}.{filter_item['relatedManyToManyKey']} = A.id \"\n else:\n if len(filter_item[\"values\"]) == 1:\n if filter_item[\"useLike\"]:\n where += f\" A.[{filter_item['field']}] LIKE ?\"\n param_list += (f\"%{filter_item['values'][0]}%\",)\n else:\n where += f\" A.[{filter_item['field']}] = ?\"\n param_list += (filter_item[\"values\"][0],)\n else:\n if filter_item[\"useLike\"]:\n where += \" ( 1=2 \"\n for filter_value in filter_item[\"values\"]:\n if filter_value:\n where += f\" OR A.[{filter_item['field']}] LIKE ?\"\n param_list += (f\"%{filter_value}%\",)\n where += \" ) \"\n else:\n where += f\" A.[{filter_item['field']}] IN ({','.join( filter_item['values'])})\"\n\n # Add fixed condition\n if \"condition\" in entity.definition and entity.definition[\"condition\"]:\n if where == \"\":\n where = \" WHERE \"\n else:\n where += \" AND \"\n where += entity.definition[\"condition\"]\n\n sql = f\"SELECT {columnNames} FROM {entity_code} as A {leftJoin}\"\n if where != \"\":\n sql += where\n\n sql += f\" ORDER BY {sortBy}\"\n\n if \"fromReg\" in params and params[\"fromReg\"] > 0 and \"toReg\" in params and params[\"toReg\"] > 0:\n sql += F\" LIMIT {params['fromReg']-1}, {params['toReg']-params['fromReg']+1} \"\n\n cursor.execute(sql, param_list)\n for row in cursor:\n dic = {\"id\": row[\"id\"]}\n letter = \"B\"\n\n for column in entity.definition[\"columns\"]:\n\n if column[\"type\"] in [\"numeric\", \"text\", \"dateTime\", \"date\"]:\n dic[column[\"field\"]] = row[column[\"field\"]]\n elif column[\"type\"] in [\"dropdown\", \"remoteDropdown\"]:\n dic[column[\"field\"]] = f\"{row[column['field']]}|-|{row[f'{letter}_label']}\"\n letter = self.getNextLetter(letter)\n elif column[\"type\"] == \"departmentSelector\":\n dic[\"departments\"] = row[\"departments\"]\n elif column[\"type\"] == \"userSelector\":\n dic[\"users\"] = row[\"users\"]\n elif column[\"type\"] == \"relatedEntity\":\n dic[column[\"field\"]] = row[column[\"field\"]]\n letter = self.getNextLetter(letter)\n\n res.append(dic)\n\n finally:\n conn.close()\n\n return res", "def all(cls):\n with sqlite3.connect(cls.dbpath) as connection:\n connection.row_factory = sqlite3.Row\n cursor = connection.cursor()\n SELECTSQL = \"SELECT * FROM accounts;\"\n cursor.execute(SELECTSQL)\n result = []\n for dictrow in cursor.fetchall():\n result.append(cls(**dictrow))\n return result", "def type_list():\n for type_ in orm.DataFlagType.select():\n click.echo(type_.name)", "def all(self, cls=None):\n if not self.__session:\n self.reload()\n objects = {}\n if type(cls) == str:\n cls = classes.get(cls, None)\n if cls:\n for obj in self.__session.query(cls):\n objects[obj.__class__.__name__ + '.' + obj.id] = obj\n else:\n for cls in classes.values():\n for obj in self.__session.query(cls):\n objects[obj.__class__.__name__ + '.' + obj.id] = obj\n return objects", "def netsuite_get_all(self, record_type: str) -> list:\n if record_type is None:\n raise ValueError(\n \"Parameter 'record_type' is required for kw: netsuite_get_all\"\n )\n return self.client.getAll(recordType=record_type)", "def get_list(self):\n return self.__repository.get_all()", "def _get_objects(self, object_type, **kwargs):\r\n return self.parent_connection._get_objects(object_type,\r\n sys_id=self.sys_id,\r\n **kwargs)", "def get_all_categories(type=None):\n \n categories = None\n if type is None:\n categories = get_db().execute('SELECT * FROM categories').fetchall()\n else:\n categories = get_db().execute('SELECT * FROM categories WHERE type = ?', (type,)).fetchall()\n\n return categories", "def view_all(entities, table, db):\n print \n print \"TABLE:\",table\n for ii in entities:\n print ii\n print", "def find_all(cls, **kwargs):\n return cls.query.filter_by(**kwargs).all()", "def get_all(self):\n return self.db", "def fetch_all(self):\n return list(iter(self))", "def list(self):\n return self.objects.all()", "def list_entity_types(\n self,\n ) -> Callable[\n [featurestore_service.ListEntityTypesRequest],\n Awaitable[featurestore_service.ListEntityTypesResponse],\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"list_entity_types\" not in self._stubs:\n self._stubs[\"list_entity_types\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.aiplatform.v1beta1.FeaturestoreService/ListEntityTypes\",\n request_serializer=featurestore_service.ListEntityTypesRequest.serialize,\n response_deserializer=featurestore_service.ListEntityTypesResponse.deserialize,\n )\n return self._stubs[\"list_entity_types\"]", "def fetch_all(self):\n with self.__connection.cursor() as cursor:\n query = \"SELECT * FROM %s\" % self.__schema\n cursor.execute(query)\n return cursor.fetchall()", "def getAllCategory():\n return session.query(Category).all()", "def document_types(db: Session = Depends(get_db)):\n return get_document_types(db)", "async def get_many(self, **query):\n\n return [await self._expand(obj) for obj in await self.db.get_many(**query)]", "def get_all(cls):\n if Model.data_connector:\n with Model.data_connector.u_lock:\n return Model.data_connector.get_all_objects(cls)\n \n return []", "def entities(self):\n for f in self._children(EntityData):\n log.debug(\"RecordTypeData.entities: f %s\"%f) \n e = EntityData.load(self, f)\n if e:\n yield e\n return", "def _get_objects(self, object_type, **kwargs):\r\n\r\n return self.parent_connection._get_objects(object_type,\r\n sys_id=self.sys_id,\r\n jbod_id=self.jbod_id,\r\n **kwargs)", "async def retrieve_all(cls) -> List[ExampleResource]:\n return await ExampleDAO.all()", "def get_entities_of_type(self, typename, type_system):\n if type_system not in self._type_systems:\n raise ValueError(\n f\"The type system {type_system} is not one of {self._type_systems.keys()}\"\n )\n return self._type_systems[type_system].get_entities_of_type(typename)", "def fetchall(self):\n rows = self.cursor.fetchall()\n\n if self.model.single:\n for row in rows:\n yield self.__instance_from_db(self.model, row)\n else:\n for row in rows:\n yield tuple(self.__instance_from_db(m, row) for m in self.model.models)", "def entity_types(self, eid):\n types = self._load_entity_types()\n return types[eid]", "def entities(self, params=None, **kwargs):\n entities = entity_map()\n\n # Sort entities into type => <set of aliases>.\n type_to_aliases = {}\n for alias in entities:\n entity = entities[alias]\n\n if isinstance(entity, Facility):\n type_name = 'Facilities'\n elif isinstance(entity, Ship):\n type_name = 'Ships'\n elif isinstance(entity, Defense):\n type_name = 'Defense'\n elif isinstance(entity, Technology):\n type_name = 'Technology'\n\n if type_name not in type_to_aliases:\n type_to_aliases[type_name] = set()\n type_to_aliases[type_name].add(alias)\n\n nick = self.irc.source.split('!')[0]\n self.irc.reply('Sending list of entities to %s.' % nick)\n\n for type_name in type_to_aliases:\n aliases = sorted(list(type_to_aliases[type_name]))\n self.irc.privmsg(nick, '%s: %s' % (type_name, ', '.join(aliases)))", "def list_modified_entities(entity_type, max_date):\r\n\r\n # get a cursor\r\n conn = ecommerce.db.getConnection()\r\n cursor = conn.cursor()\r\n\r\n # execute the query\r\n cursor.execute(\"\"\"\r\n SELECT EntityId\r\n FROM Stage0_Delta\r\n WHERE EntityType = ? AND\r\n FlagUpdated = 1 AND\r\n LastUpdate <= TO_DATE(?, 'YYYY-MM-DD HH24:MI:SS')\r\n \"\"\", (entity_type, max_date) )\r\n\r\n # fetch the ids\r\n elist = [ ]\r\n row = cursor.fetchone()\r\n while row is not None:\r\n elist.append(int(row[0]))\r\n row = cursor.fetchone()\r\n cursor.close()\r\n\r\n return elist", "def servicemanage_type_get_all(context, inactive=False, filters=None):\n filters = filters or {}\n\n read_deleted = \"yes\" if inactive else \"no\"\n rows = model_query(context, models.ServiceManageTypes,\n read_deleted=read_deleted).\\\n options(joinedload('extra_specs')).\\\n order_by(\"name\").\\\n all()\n\n # TODO(sirp): this patern of converting rows to a result with extra_specs\n # is repeated quite a bit, might be worth creating a method for it\n result = {}\n for row in rows:\n result[row['name']] = _dict_with_extra_specs(row)\n\n return result", "def _get_objects(self, object_type, **kwargs):\r\n\r\n return self.parent_connection._get_objects(object_type,\r\n sys_id=self.sys_id,\r\n brick_id=self.brick_id,\r\n **kwargs)", "def get_models_query():\n query = db.session.query(Products.model).distinct()\n return query", "def getentities(self):\n entities = {}\n\n # The following will create lots of errors in suds.client, one\n # for every type that is not an entity. Disable their logger\n # temporarily to avoid cluttering the log.\n sudslog = logging.getLogger('suds.client')\n sudssav = sudslog.disabled\n sudslog.disabled = True\n for t in self.gettypes():\n try:\n info = EntityInfo(t, self.client)\n except ICATError:\n continue\n entities[t] = info\n sudslog.disabled = sudssav\n\n return entities", "def _entity_paginator(namespace, workspace, etype, page_size=500,\n filter_terms=None, sort_direction=\"asc\"):\n\n page = 1\n all_entities = []\n # Make initial request\n r = fapi.get_entities_query(namespace, workspace, etype, page=page,\n page_size=page_size, sort_direction=sort_direction,\n filter_terms=filter_terms)\n fapi._check_response_code(r, 200)\n\n response_body = r.json()\n # Get the total number of pages\n total_pages = response_body['resultMetadata']['filteredPageCount']\n\n # append the first set of results\n entities = response_body['results']\n all_entities.extend(entities)\n # Now iterate over remaining pages to retrieve all the results\n page = 2\n while page <= total_pages:\n r = fapi.get_entities_query(namespace, workspace, etype, page=page,\n page_size=page_size, sort_direction=sort_direction,\n filter_terms=filter_terms)\n fapi._check_response_code(r, 200)\n entities = r.json()['results']\n all_entities.extend(entities)\n page += 1\n\n return all_entities", "def get_departments() -> list:\n return Department.query.all()", "def get_node_ids_by_entity_type(self, entity_name):\n try:\n with closing(self.connection) as con:\n with con:\n with closing(con.cursor()) as cursor:\n cursor.execute(\"\"\"\n SELECT type, id\n FROM nodes\n WHERE name == (?)\n \"\"\", (entity_name,))\n node_ids_by_type = dict()\n for x in cursor.fetchall():\n ids = node_ids_by_type.setdefault(x[0], [])\n ids.append(x[1])\n node_ids_by_type[x[0]] = ids\n return node_ids_by_type\n\n except sqlite3.OperationalError as e:\n print(\"ERROR: Could not retrieve ids for entity with name '{}': {}\".format(entity_name, str(e)))\n return None", "def all(self, cursor: sqlite3.Cursor) -> List[ModelledTable]:\n\n sql = f\"SELECT {self.id_field} FROM [{self.table}]\"\n\n _LOGGER.debug(sql)\n\n cursor.execute(sql)\n\n ids = [x[0] for x in cursor.fetchall()]\n\n return list(self.get_many(cursor, *ids).values())", "def get_all(cls):\n result = cls.query.all()\n if not result:\n return {\n \"message\": \"The class of objects do not exist\",\n \"help\": \"Ensure the class required has objects.\"\n }\n return result", "def all(self, cls=None):\n query_data = {}\n\n if cls is None:\n for valid_key, valid_class in DBStorage.CNC.items():\n for instance in self.__session.query(valid_class):\n key = type(instance).__name__ + \".\" + instance.id\n query_data.update({key: instance})\n return query_data\n else:\n for instance in self.__session.query(DBStorage.CNC[cls]):\n key = type(instance).__name__ + \".\" + instance.id\n query_data.update({key: instance})\n return query_data", "async def get_objects(conn: Database, query):\n return await conn.fetch_all(query=query)", "def getItemsOfType(typeId):\n return Gw2Spidy._request('all-items', str(typeId))['results']", "def get_all(cls, request, page=None, limit=None):\n session = get_session(request)\n\n query = session.query(cls)\n\n if limit:\n query = query.limit(limit)\n\n if page and limit:\n offset = (page - 1) * limit\n query = query.offset(offset)\n\n return query", "def fetchall(self) -> list:\n return self.cursor.fetchall()", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_find_all_entities_action(self):\n pass", "def getinstancelist():\n dbcursor_dict.execute(dbq.get_all_instance_list, )\n db_instance_list = dbcursor_dict.fetchall()\n return db_instance_list", "def entity_data(self, entity_name, limit=10):\n from sagas.ofbiz.entities import OfEntity as e, finder, record_list_df\n # limit = 10\n offset = 0\n result = finder.find_list(entity_name, limit, offset)\n result = record_list_df(entity_name, result, drop_null_cols=True, contains_internal=False)\n print(result)", "def as_list(self, db):\n keys = self.__table__.columns.keys()\n list = []\n if not db.session:\n logger.error(\"no database session\")\n return list\n\n for key in keys:\n if key in [\"external_id\", \"sport_id\", \"is_duplicate_with\", \"manual_check_required_with\"]:\n continue\n elif key == \"id\":\n list.append(getattr(self, \"external_id\"))\n elif key == \"sportstype_id\":\n list.append(db.session.query(SportsType.name).filter(\n SportsType.id == getattr(self, key)).first()[0])\n else:\n list.append(getattr(self, key))\n return list", "def get(self, currency, entity):\n check_inputs(currency=currency, entity=entity)\n tags = entitiesDAO.list_entity_tags(currency, entity)\n return tags", "def get(self):\n self.startTime = time.time()\n result = models.Type.Type()\n total = 0\n action = request.args.get('action')\n id_type = request.args.get('id')\n type_model = models.Type.Type(\n value=request.args.get('value'),\n description=request.args.get('description'))\n page_size = None\n if request.args.get('pageSize'):\n page_size = int(request.args.get('pageSize'))\n else:\n page_size = 10\n\n offset = None\n if request.args.get('offset'):\n offset = int(request.args.get('offset'))\n else:\n offset = 0\n repository = TypeRepository(\n FLASK_APP.config[\"DBUSER\"],\n FLASK_APP.config[\"DBPASS\"],\n FLASK_APP.config[\"DBHOST\"],\n FLASK_APP.config[\"DBPORT\"],\n FLASK_APP.config[\"DBNAME\"])\n try:\n if action == 'searchByID':\n result = repository.searchByID(id_type)\n Logger.Logger.create(FLASK_APP.config[\"ELASTICURL\"],\n 'Informative',\n 'Ok',\n 'get()',\n str(result.__dict__),\n FLASK_APP.config[\"TYPE\"])\n return self.okResponse(\n response=result,\n message=\"Ok\",\n status=200)\n elif action == 'search':\n result = repository.search(type_model, page_size, offset)\n total = result['total']\n result = result['content']\n Logger.Logger.create(FLASK_APP.config[\"ELASTICURL\"],\n 'Informative',\n 'Ok',\n 'get()',\n str(result),\n FLASK_APP.config[\"TYPE\"])\n return self.okResponse(\n response=result,\n message=\"Ok\",\n status=200,\n total=total,\n offset=offset,\n pageSize=page_size), 200\n except (exc.SQLAlchemyError, Exception) as sqlerr:\n Logger.Logger.create(FLASK_APP.config[\"ELASTICURL\"],\n 'Error',\n 'SQL Error',\n 'get()',\n str(sqlerr),\n FLASK_APP.config[\"TYPE\"])\n return self.okResponse(\n response=sqlerr,\n message=\"SQL error: \"+str(sqlerr),\n status=500)", "def query(cls):\r\n return cls._session.query(cls)", "def types_clients_view(request):\n query = request.dbsession.query(ClientType).all()\n return Utils.serialize_many(query)" ]
[ "0.7030536", "0.6770714", "0.66363615", "0.6616682", "0.6602713", "0.6575097", "0.64512324", "0.6423671", "0.64117724", "0.6411315", "0.63493234", "0.634775", "0.63454515", "0.6322174", "0.62615913", "0.6218871", "0.6200281", "0.61640894", "0.6134082", "0.613314", "0.6114528", "0.6106322", "0.6104303", "0.60861796", "0.60861796", "0.60642195", "0.6038581", "0.6036301", "0.6004704", "0.5985883", "0.5964532", "0.59410536", "0.59314805", "0.592942", "0.59274745", "0.5901614", "0.5901437", "0.58878297", "0.5885424", "0.5879337", "0.58636713", "0.5859314", "0.5844364", "0.5839544", "0.5836307", "0.5831095", "0.58160895", "0.58091915", "0.58070165", "0.58067083", "0.57963514", "0.5784833", "0.5784763", "0.57844895", "0.578206", "0.5779471", "0.5772605", "0.57685626", "0.5761142", "0.57401735", "0.572208", "0.5718815", "0.57152146", "0.57130647", "0.57128257", "0.57015026", "0.5691884", "0.5679731", "0.56764483", "0.5676006", "0.5675532", "0.5661822", "0.5655888", "0.56515276", "0.5650148", "0.5646868", "0.56455433", "0.5643685", "0.5636821", "0.5633607", "0.5618599", "0.56130534", "0.56079984", "0.56021756", "0.5589203", "0.5587918", "0.55753994", "0.5566395", "0.5563705", "0.5557096", "0.5555191", "0.55547494", "0.55494446", "0.5546674", "0.55277646", "0.5526527", "0.552313", "0.5514649", "0.55087537", "0.5507679" ]
0.6784044
1
Puts generate commands into the queue
def cmd_add(arguments): # build the list of entities entities = { type : [ ] for type in entityTypes } type = "" param = 1 # skip "add" command while param < len(arguments): # get an entity type and check it's valid fullType = arguments[param] type = fullType subtype = None if "/" in type: (type, waste, subtype) = fullType.partition("/") param += 1 if type not in entityTypes: print "ERROR: entity type [%s] is not valid" % type return -1 if subtype is not None and subtype not in subtypes[type]: print "ERROR: entity subtype [%s] for type [%s] is not valid" % (subtype, type) return -1 # build the list of values eList = [ ] if arguments[param] == "*": # if the entity has subtypes and we have no subtype, iterate if subtype is None and subtypes[type]: # iterate subtypes for subtype in subtypes[type]: # set the fullType fullType = type + "/" + subtype # get the list of (ALL) ids from the database eList = getEntityIds(type, subtype) param += 1 # attach the list to the type entities[fullType] = eList else: # no subtypes or subtype specified # get the list of (ALL) ids from the database eList = getEntityIds(type, subtype) param += 1 # attach the list to the type entities[fullType] = eList else: # process the params while param < len(arguments): try: eList.append(int(arguments[param])) param += 1 except ValueError: break # attach the list to the type entities[fullType] = eList # get a producer producer = getProducer() # start creating jobs jobCount = 0 for fullType in entities: # separate type/subtype type = fullType if "/" in type: (type, waste, subtype) = fullType.partition("/") # get the list elist = entities[fullType] # build jobs of up to 64 entries partialJobCount = generateJobs(producer, type, elist) # report if partialJobCount > 0: print "added %5d jobs for entity %s" % (partialJobCount, fullType) jobCount += partialJobCount # report the number of jobs created print "" print "Added a total of %5d jobs in queue" % jobCount print "" return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate():\n PackCommandExecutor().pack()\n GenerateCommandExecutor().generate()", "def _manager_main(self, queue):\r\n for task in self._task_generator():\r\n queue.put(task)", "def enqueue(self, cmd) -> None:\n self.general_queue.append(cmd)", "def send_command_queue(command_queue):\n for command in command_queue:\n Game._send_string(command)\n\n Game._done_sending()", "def _send_possible_commands(self): # pylint: disable=too-many-branches\n active_ids = deepcopy(self._currently_allocated_ids)\n for logical_id in self._current_row_major_mapping:\n # So that loop doesn't stop before AllocateGate applied\n active_ids.add(logical_id)\n\n new_stored_commands = []\n for i, cmd in enumerate(self._stored_commands):\n if len(active_ids) == 0:\n new_stored_commands += self._stored_commands[i:]\n break\n if isinstance(cmd.gate, AllocateQubitGate):\n if cmd.qubits[0][0].id in self._current_row_major_mapping:\n self._currently_allocated_ids.add(cmd.qubits[0][0].id)\n\n mapped_id = self._current_row_major_mapping[cmd.qubits[0][0].id]\n qb = WeakQubitRef(engine=self, idx=self._mapped_ids_to_backend_ids[mapped_id])\n new_cmd = Command(\n engine=self,\n gate=AllocateQubitGate(),\n qubits=([qb],),\n tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)],\n )\n self.send([new_cmd])\n else:\n new_stored_commands.append(cmd)\n elif isinstance(cmd.gate, DeallocateQubitGate):\n if cmd.qubits[0][0].id in active_ids:\n mapped_id = self._current_row_major_mapping[cmd.qubits[0][0].id]\n qb = WeakQubitRef(engine=self, idx=self._mapped_ids_to_backend_ids[mapped_id])\n new_cmd = Command(\n engine=self,\n gate=DeallocateQubitGate(),\n qubits=([qb],),\n tags=[LogicalQubitIDTag(cmd.qubits[0][0].id)],\n )\n self._currently_allocated_ids.remove(cmd.qubits[0][0].id)\n active_ids.remove(cmd.qubits[0][0].id)\n self._current_row_major_mapping.pop(cmd.qubits[0][0].id)\n self._current_mapping.pop(cmd.qubits[0][0].id)\n self.send([new_cmd])\n else:\n new_stored_commands.append(cmd)\n else:\n send_gate = True\n mapped_ids = set()\n for qureg in cmd.all_qubits:\n for qubit in qureg:\n if qubit.id not in active_ids:\n send_gate = False\n break\n mapped_ids.add(self._current_row_major_mapping[qubit.id])\n # Check that mapped ids are nearest neighbour on 2D grid\n if len(mapped_ids) == 2:\n qb0, qb1 = sorted(mapped_ids)\n send_gate = False\n if qb1 - qb0 == self.num_columns:\n send_gate = True\n elif qb1 - qb0 == 1 and qb1 % self.num_columns != 0:\n send_gate = True\n if send_gate:\n # Note: This sends the cmd correctly with the backend ids as it looks up the mapping in\n # self.current_mapping and not our internal mapping self._current_row_major_mapping\n self._send_cmd_with_mapped_ids(cmd)\n else:\n for qureg in cmd.all_qubits:\n for qubit in qureg:\n active_ids.discard(qubit.id)\n new_stored_commands.append(cmd)\n self._stored_commands = new_stored_commands", "def generate (self, options=None):\n\n with self.__t.steps():\n import json\n import exception\n from lib import schema\n from lib import common\n from lib import process\n from lib import data\n from sqlalchemy import and_\n\n def queue (process_id, generated):\n from sqlalchemy.exc import IntegrityError\n queued = schema.table.process_queue()\n queued.process = process_id\n queued.tags = generated[\"output_tags\"]\n queued.depend = generated[\"output_depend\"]\n try:\n schema.save(queued)\n except IntegrityError:\n pass\n\n with schema.select(\"process\") as select:\n for process_def in select.all():\n\n if options and \"process_id\" in options:\n if process_def.id != options[\"process_id\"]: continue\n\n proc = process.get(process_def)\n for generated in proc.generate():\n generate = False\n print(generated)\n try:\n output_tags_ids = data.resolve_tags(generated[\"output_tags\"], create=True)\n generated_series = schema.select_one( \"series\"\n , schema.table.series.tags == output_tags_ids)\n if generated_series:\n generate = generated[\"input_last_modified\"] > generated_series.last_modified\n if not generate: #check modified config\n try:\n config = proc.get_config(generated[\"output_tags\"])\n generate = config.last_modified > generated_series.last_modified\n except (exception.NoConfigException, exception.NoConfigKeyException):\n pass\n else:\n print(\"create\")\n generated_series = schema.table.series()\n generated_series.symbol = generated[\"output_tags\"][\"symbol\"]\n generated_series.tags = output_tags_ids\n schema.save(generated_series)\n generate = True\n except exception.MissingTagException:\n generate = True\n if generate or (options and \"force\" in options and options[\"force\"]):\n print(\"queue %s\" % (generated))\n queue(proc.id, generated)\n\n #self.__t.progress_end(30)\n #self.__t.progress()\n #self.apply_async(queue=\"control\", countdown=30) #queue next\n self.__t.ok()", "def get_cmds_queue(self):\n\t\t\n\t\treturn Queue()", "def queue (self):\n\n with self.__t.steps():\n import exception\n from lib import schema\n from lib import common\n from lib import process\n from lib import data\n from sqlalchemy import and_\n import json\n from collections import OrderedDict\n\n with schema.select(\"process_queue\", schema.table.process_queue.status==None) as select:\n for queued in select.limit(1000).all():\n blocked = False\n if len(queued.depend) > 0:\n for depend_id in queued.depend:\n depend = schema.select_one(\"series\", schema.table.series.id==depend_id)\n match_tags = json.dumps(OrderedDict(sorted(data.decode_tags(depend.tags).items())))\n if depend and schema.select_one(\"process_queue\", schema.table.process_queue.tags==match_tags):\n blocked = True\n break # queued dependencies\n if not blocked:\n queued.status = \"queued\"\n schema.save(queued)\n run.apply_async([queued.tags]) #queue process\n self.__t.ok()\n self.apply_async(queue=\"control\", countdown=30) #queue next", "def add_command(self, cmd):\n self.command_queue.put(cmd)", "def runQueueEnqueue(self):\n raise NotImplementedError", "def test_appended(self):\n genFn = Mock(return_value=None)\n expected = 123\n \n wrapper = KaoGenerator(genFn)\n wrapper.queue(expected)\n actual = wrapper._queue.pop()\n self.assertEqual(expected, actual)", "def _push_queue(self):\n\n self.add_cons_vars(self._var_queue, sloppy=self.sloppy)\n self.add_cons_vars(self._cons_queue, sloppy = self.sloppy)\n\n if len(self._var_queue) > 0:\n self.regenerate_variables()\n if len(self._cons_queue) > 0:\n self.regenerate_constraints()\n\n self._var_queue = list()\n self._cons_queue = list()", "async def queue_commands(decoy: Decoy, state_store: StateStore) -> None:\n\n def get_next_to_execute() -> Generator[str, None, None]:\n yield \"command-id-1\"\n yield \"command-id-2\"\n raise RunStoppedError()\n\n get_next_to_execute_results = get_next_to_execute()\n\n decoy.when(\n await state_store.wait_for(condition=state_store.commands.get_next_to_execute)\n ).then_do(lambda *args, **kwargs: next(get_next_to_execute_results))", "def send_command_queue(self, command_queue):\n for command in command_queue:\n self._send_string(command)\n\n self._done_sending()", "def send_command_queue(self, command_queue):\n for command in command_queue:\n self._send_string(command)\n\n self._done_sending()", "def _run(self):\n self._send_sequence() # Share the initial generator\n with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:\n while True:\n if self.stop_signal.is_set():\n return\n\n self.queue.put(\n executor.apply_async(next_sample, (self.uid,)), block=True)", "def Enqueue(self, command):\n\n self.queue.put(command)", "def data_generator():\n msg = Message(Message.ADD, queue.uuid, queue)\n PROVIDER_MQ.put(msg)\n keep_running = True\n while keep_running:\n try:\n chunk = queue.get()\n yield chunk\n except Empty:\n app.logger.info('Queue empty. Ending stream')\n keep_running = False", "def bqm_move_queue(self):\n self.bqm.turn_once()", "def __init__(self):\n self.queues=[]", "def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)", "def controls():\n\n context = zmq.Context()\n\n print(\"Transmitting commands to process.\")\n socket = context.socket(zmq.REQ)\n rc = socket.connect(\"ipc:///tmp/mail_queue_ipc\")\n #print(rc)\n\n\n for request in range(2):\n print(\"Sending request %s\" % request)\n socket.send(b\"insert\")\n\n message = socket.recv()\n print(\"Recieved reply %s [ %s ]\" % (request, message))\n time.sleep(1)", "def command(self, command_string):\n self.__command_queue.append(command_string)", "def task(self, chunk_filename_queue, writer):\n self.init_structs()\n while True:\n filename = chunk_filename_queue.get()\n for item in self.single_file_gen(filename):\n writer.send_bytes(item)", "def task_gen(self):\n pass", "def populatereadyqueue():\n readyQueue.put(Process(\"P1\", time(0, 0, 1), time(0, 0, 4)))\n readyQueue.put(Process(\"P2\", time(0, 0, 2), time(0, 0, 6)))\n readyQueue.put(Process(\"P3\", time(0, 0, 3), time(0, 0, 2)))", "def subscribe_to_commands(self):\n self.basic_consume(self.process_command, queue=self.name)", "def putonqueue(self, nr, *args):\n self.outqueues[10-nr].put_nowait(args)\n self.tickqueue.put_nowait('go')", "def generate():", "def q_mgr(HyS, HyQ):\r\n q_file = os.path.join(HyS.var_dir, 'Queue.txt')\r\n while True:\r\n run = ''\r\n lines = open(q_file, 'r').readlines()\r\n if lines:\r\n run = lines[0].rstrip()\r\n open(q_file, 'w').writelines(lines[1:])\r\n if run:\r\n HyQ.put(run)\r\n else:\r\n break\r\n return True", "def main():\n produce()", "def __init__(self):\r\n self.queue = []", "def __init__(self):\r\n self.queue = []", "async def queue(self, ctx):\n srv = self.get_server_dict(ctx.message.server.id)\n que = srv['queue']\n msg = self.format_song_display('▶', srv['song'][1], srv['song'][2], srv['song'][3])\n i = 1\n for item in que:\n line = self.format_song_display(i, item[1], item[2], item[3])\n i += 1\n msg += line\n await ctx.bot.send_message(ctx.message.channel, msg)", "def _process_command_queue(self, command_queue):\n while True:\n if len(command_queue) > 0:\n command_tuple = command_queue.pop()\n func, kwargs = command_tuple[0], command_tuple[1]\n getattr(self, func)(**kwargs)\n time.sleep(.5)", "def declare(self):\n self.channel.queue_declare(queue='files_to_database')", "def produce(self, ctx):\n pass", "def run(self):\n logger.info('Starting PathoGen supervisor')\n\n # We defer creating the Couchbase object until we are actually\n # 'in' the separate process here.\n self._connect()\n\n # Create initial list of documents on the 'finished' queue\n finished_items = list()\n for i in range(self.num_items):\n finished_items.append((i, 0))\n\n for iteration in range(self.num_iterations):\n\n # Create a tuple for each item in the finished queue, of\n # (doc_id, generator, doc_size). For the first iteration\n # this will be all items, for subsequent iterations it may\n # be fewer if some have been frozen.\n # Spread these across the input queues of all workers, to ensure\n # that each worker operates on different sizes.\n expected_items = len(finished_items)\n num_queues = len(self.queues)\n for (i, size) in list(finished_items):\n queue_index = i % num_queues\n self.queues[queue_index].put(\n (i,\n self.promotion_policy.build_generator(i),\n 0))\n finished_items = list()\n\n while expected_items > 0:\n (i, doc, size) = self.in_queue.get()\n try:\n next_size = doc.next()\n value = self.buffer[:next_size]\n self._set_with_retry('doc_' + str(i), value)\n size = next_size\n self.out_queue.put((i, doc, size))\n except StopIteration:\n # Note: Items are not put back on out_queue at end of an\n # iteration (unlike Worker), instead we keep for the next\n # iteration, to build the new generators.\n finished_items.append((i, size))\n if len(finished_items) == expected_items:\n # Got all items, end of iteration.\n break\n\n assert self.in_queue.empty()\n assert self.out_queue.empty()\n\n # Any finished items which didn't reach max size should be\n # removed from the next iteration - we want to leave them\n # frozen at their last size.\n finished_items = [(ii, sz) for (ii, sz) in finished_items if sz == self.max_size]\n\n logger.info('Completed iteration {}/{}, frozen {}/{} documents (aggregate)'.format(\n iteration + 1, self.num_iterations,\n self.num_items - len(finished_items), self.num_items))\n # Sleep at end of iteration to give disk write queue chance to drain.\n logger.info('Sleeping for {}s'.format(self.SLEEP_TIME))\n time.sleep(self.SLEEP_TIME)\n\n # All iterations complete. Send a special null generator\n # document around the ring - this tells the workers to shutdown.\n self.out_queue.put((-1, None, 0))\n\n # Finally, set all remaining documents back to size zero.\n for (i, size) in list(finished_items):\n self._set_with_retry('doc_' + str(i), self.buffer[:0])", "def __init__(self): \n self.queue = []", "def enqueue(self, command):\n\n lock = Locker(str(self.qlockfile))\n if lock.lockfile():\n q = []\n if self.queuefile.exists():\n line = self.queuefile.read_text()\n q = line.split(',')\n if command not in q:\n q.append(command)\n line = \",\".join(q)\n self.queuefile.write_text(line)\n lock.unlockfile()", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def __init__(self):\n self.queue = []", "def enqueue(self, xyz):\n command = 'enqueue ' + str(xyz)\n self.run_command(command)", "def test_queueUsed(self):\n items = [1,2,3,4,5]\n queueValues = ['a', 'b', 'c', 'd', 'e']\n \n generator = Mock()\n generator.send = Mock(side_effect=items)\n genFn = Mock(return_value=generator)\n \n wrapper = KaoGenerator(genFn)\n for v in queueValues:\n wrapper.queue(v)\n \n for i, yieldedValue in enumerate(wrapper):\n self.assertEqual(items[i], yieldedValue)\n generator.send.assert_called_with(queueValues[i])", "def test_delivery_of_queued_messages(self):\n yield self.connect('127.0.0.1', self.pbPort)\n\n localConfig = copy.copy(self.defaultConfig)\n localConfig.id = str(randint(10, 99))\n localConfig.requeue_delay = 2\n localConfig.submit_sm_throughput = 20\n yield self.add(localConfig)\n\n # Send 150 messages to the queue\n submitCounter = 0\n submit_sm_pdu = copy.copy(self.SubmitSmPDU)\n while submitCounter < 150:\n submit_sm_pdu.params['short_message'] = '%s' % submitCounter\n yield self.submit_sm(localConfig.id, submit_sm_pdu, self.SubmitSmBill.user.uid)\n submitCounter += 1\n\n # Wait for 20 seconds\n yield waitFor(20)\n\n # Start the connector again\n yield self.start(localConfig.id)\n\n # Wait for 30 seconds, all the rest of the queue must be sent\n yield waitFor(50)\n\n yield self.stop(localConfig.id)\n\n # Wait for unbound state\n yield waitFor(20)\n\n # Assertions\n # Take the lastClient (and unique one) and assert received message\n self.assertEqual(len(self.SMSCPort.factory.lastClient.submitRecords), 150)", "def putonqueue(self, nr, *args):\n\n self.outqueues[nr].put_nowait(*args)\n self.tickqueue.put_nowait('go')", "def dump_queue(self):\n self.set_polling_many(self.queue)\n self.queue = []", "def vendedorBehavior(queue):\n gr = register_message()", "async def _queue(self, msg):\n if msg.voice_client is not None:\n if msg.guild.id in self.player:\n if self.player[msg.guild.id]['queue']:\n emb = discord.Embed(\n colour=self.random_color, title='queue')\n emb.set_footer(\n text=f'Command used by {msg.author.name}', icon_url=msg.author.avatar_url)\n for i in self.player[msg.guild.id]['queue']:\n emb.add_field(\n name=f\"**{i['author'].author.name}**\", value=i['title'], inline=False)\n return await msg.send(embed=emb, delete_after=120)\n\n return await msg.send(\"No songs in queue\")", "def creator(data, q):\n print('Creating data and putting it on the queue')\n for item in data:\n q.put(item)", "def generate_tasks(self, task):", "def generate(self):\n pass", "def execute(self):\n for move in self._queue:\n move.execute()", "def worker(self):\n while True: # Feed forever. Enqueue will block when queue is full.\n while len(self.memory) < self.min_memory:\n time.sleep(1)\n batch = self.memory.sample(self.batchsize)\n states, actions, rewards, terminals = zip(*batch)\n self.session.run(self.enqueue_op, {\n self.states: states, self.actions: actions,\n self.rewards: rewards, self.terminals: terminals,\n })", "def _queue_create(self, **kwargs):\n name = self.generate_random_name()\n return self.clients(\"zaqar\").queue(name, **kwargs)", "def _setup_tubes(self):\n chan = self.channel\n inp = self.config[self.MODULE_NAME]['amqp']['in']\n out = self.config[self.MODULE_NAME]['amqp']['out']\n if inp['exchange']:\n log.info('generating Input Queue'+ str(inp))\n chan.exchange_declare(**inp)\n self.qname = chan.queue_declare(exclusive=True).queue\n chan.queue_bind(exchange=inp['exchange'],queue=self.qname)\n self.consume = lambda cb : chan.basic_consume(cb,queue=self.qname,no_ack=True)\n self.start_loop = lambda : pika.asyncore_loop()\n\n if out['exchange']:\n log.info('generating Output Exchange'+ str(out))\n chan.exchange_declare(**out)\n self.publish = lambda msg: self.channel.basic_publish(exchange=out['exchange'],routing_key='',body=msg)", "def do_all(self):\r\n self.frame_gen.start()\r\n\r\n while True:\r\n msg = self.rec_queue.get()\r\n if msg[0] == 'sync':\r\n self.send_queue.put(('sync', time.time()))\r\n continue\r\n if msg[0] == 'finish':\r\n break\r\n if msg[0] != 'img':\r\n raise ValueError(f'strange msg: {msg}')\r\n\r\n frame_num = msg[1]\r\n time_ms = self.ms_per_frame * frame_num\r\n rawimg = self.frame_gen.generate_at(time_ms)\r\n self.img_queue.put((frame_num, rawimg))\r\n self.send_queue.put(('post', frame_num))\r\n rawimg = None\r\n\r\n self.frame_gen.finish()\r\n\r\n self.img_queue.close()\r\n self.rec_queue.close()\r\n self.send_queue.close()", "async def __track_commands(self):\n while True:\n done_cids = []\n for cid in self.__running_commands.keys():\n if self.__running_commands[cid].done():\n self._id_pool.add(cid)\n done_cids.append(cid)\n for cid in done_cids:\n self.__running_commands.pop(cid)\n await asyncio.sleep(0.5)", "def prepare_gen(self, targets):\r\n pass", "def generate_all(self):\n self._menu_select('Generate->Generate All BB Tracks')\n self.wait_ready()", "def worker(scenes, cap_templates, ques_templates, worker_id, out_q):\n\n dialogs = []\n for index, scene in enumerate(scenes):\n cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime())\n print('Generating [ %s ] [ Worker: %d, Progress: %d/%d Scene: %d ]' % \\\n (cur_time, worker_id, index, len(scenes), scene['image_index']))\n try:\n gen_dialog = generate_dialog_bfs(scene, cap_templates, ques_templates)\n dialogs.append(json.loads(json.dumps(gen_dialog)))\n except:\n print('NOTE: Missing data for %d' % scene['image_index'])\n out_q.put({worker_id: dialogs})", "def setup(bot):\n bot.add_cog(Queue(bot))", "def commands():", "def _flush_enqueued(self):\n\n msgs = self.RPC.query.all()\n for msg in msgs:\n if msg.enqueued:\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n msg.delete()", "def generate(self):", "def send_command(self, cmd):\n self.mgen_pipe.Send(cmd)", "def generate(self):\n pass", "def generate(self):\n pass", "def generate(self):\n pass", "def at_cmdset_creation(self):\n self.add(Command())", "def _producer(self) -> None:\n while (gtex_path := self.gtex.pop(0)) is not None and (\n bm_path := self.bm.pop(0)\n ) is not None:\n data = merge_data(gtex_path, bm_path, self.mane)\n self._q.put(data)\n logger.info(f\"Contents of file {gtex_path} added to queue\")\n else:\n self._q.put(None) # Send end signal to consumer\n logger.info(\"All files added. None signal sent. Producer returns\")\n return", "def queue_communication(self, session):\n\n # Here we can queue all communication to be sent to the Client\n # Examples follow...\n session['queue'].append(GetObjects())\n session['queue'].append(DeleteObjects())\n session['queue'].append(RpcExecute())\n session['queue'].append(GetDeviceInfo())", "def run(self):\n print(f\"{self._name} is ready to go!\")\n self._pc = 0\n while self._pc < len(self.commands):\n self.run_command(self.commands[self._pc])\n self._pc += 1\n yield (self._pc < len(self.commands))\n print(f\"{self._name} is finished!\")", "def write(self):\n # build up all commands into a single request to increase network perf\n connection = self.connection\n commands = self.commands\n try:\n connection.send_packed_command(connection.pack_commands([c.args for c in commands]))\n except ConnectionError as e:\n for c in commands:\n c.result = e", "async def main(queue: Queue):\n # TODO: in the long term stop relying on https://twitchtokengenerator.com/ to populate ACCESS TOKEN\n\n curr_access_token = constants.TWITCH_ACCESS_TOKEN\n curr_refresh_token = constants.TWITCH_REFRESH_TOKEN\n\n try:\n bot = await Bot.new(curr_access_token)\n except AuthenticationError:\n curr_access_token, curr_refresh_token = await auth.refresh_token(curr_refresh_token)\n bot = await Bot.new(curr_access_token)\n while True:\n data = await queue.get()\n try:\n for chan in bot.connected_channels:\n await chan.send(data)\n except AuthenticationError:\n await queue.put(data)\n curr_access_token, curr_refresh_token = await auth.refresh_token(curr_refresh_token)\n bot = await Bot.new(curr_access_token)", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def commands():\n pass", "def processCommands(self):\n if self.in_q.empty() is False and self.die_pending is False:\n cmdstr = self.in_q.get()\n cmd = cmdstr.split(\"|\")\n # print \"Thread received command \" + cmd[0]\n\n # process the commands\n if cmd[0] == \"die\":\n self.die_pending = True\n self.stop()\n \n elif cmd[0] == \"start\":\n self.start(cmd[1])\n\n elif cmd[0] == \"stop\":\n if len(cmd[1]) > 0:\n if self.stop(cmd[1]) is True:\n self.out_q.put(\"stopped|\" + cmd[1])\n else:\n self.stop()\n self.out_q.put(\"stopall\")\n\n elif cmd[0] == \"toggle\": # toggle the run state of the sequence\n if len(cmd[1]) > 0:\n #print \"processing toggle\"\n if (self.isRunning(cmd[1]) == True):\n if (self.stop(cmd[1]) == True):\n self.out_q.put(\"stopped|\" + cmd[1])\n else:\n self.start(cmd[1]) \n\n elif cmd[0] == \"tap\":\n try:\n tap_time = float(cmd[1])\n self.btic.BeatRecorder(tap_time)\n # Testing only - should be in a better location\n self.out_q.put(\"message|beat period: \" + str(self.btic.getPeriod()))\n except Exception as e:\n self.out_q.put(\"exception|\" + e.strerror + \", errno: \" + str(e.errno)) \n elif cmd[0] == \"align\":\n self.btic.align(float(cmd[1]))\n\n elif cmd[0] == \"loadbank\":\n self.next_bank = cmd[1]\n self.bank_load_pending = True\n self.stop()\n\n elif cmd[0] == \"clearbank\":\n self.bank_clear_pending = True\n self.stop()\n\n elif cmd[0] == \"usebeat\":\n if cmd[1] == \"yes\":\n self.use_beat = True\n #print \"Using Beat\"\n else:\n self.use_beat = False\n for seq in self.sequences:\n seq.stopSynching()\n #print \"Not Using Beat\"\n elif cmd[0] == \"settempo\":\n self.btic.setPeriod(cmd[1])\n\n #clear or load bank\n if self.bank_clear_pending == True and self.allClear() == True:\n self.bank_clear_pending = False\n self.clearBank()\n elif self.bank_load_pending == True and self.allClear() == True:\n self.bank_load_pending = False\n self.loadBank(self.next_bank)", "def onCommandGenerationStart(self, event):\n self.turnOffTimerTrans()\n self.turnOffTimerXML()\n logging.info(\"Start command generation\")\n self.turnOnTimerItem()\n self.turnOnTimerCmd()\n\n xcor_text = self.XMLCorr.GetValue().strip()\n self.formerCorrCommand = self.CorrCommand.GetValue().strip()\n if len(xcor_text) > 0:\n commands = self.conceptGenerator.recognizeString(xcor_text, isStrict=False)\n if len(commands) > 0:\n cmdText = '\\n'.join(commands)\n else:\n cmdText = 'NO_COMMAND'\n\n if cmdText.strip() == self.formerCorrCommand:\n logging.info(\"Command generation failed\")\n self.Btn_CMD.SetLabel('Get Command')\n self.cmdButtonMode = BUTTON_GENERATE\n self.formerCorrCommand = None\n else:\n self.CorrCommand.SetValue(cmdText)\n logging.info(\"Command generation completed\")\n self.Btn_CMD.SetLabel('Undo Command')\n self.cmdButtonMode = BUTTON_REVERT\n\n self.OnUpdatePlantCtrl(event)", "def start(self, wait=False):\n start_nodes = self._get_start_nodes()\n\n for node in start_nodes:\n if not node.init_generator:\n self.queue.put((node, (), {}))\n else:\n node_copy = copy.deepcopy(node)\n node_copy.init_generator = None\n for args, kwargs in node.init_generator:\n print(args, kwargs)\n self.queue.put((node_copy, args, kwargs))\n\n if wait:\n self.wait_for_completion()", "def build_commands(self):\r\n for tag in self.bmark.tags.keys():\r\n # if this tag is a command then return true\r\n if tag in COMMANDLIST:\r\n self.commands.append(tag)", "def run(self) -> None:\n iterable = any(hasattr(self.generator, key)\n for key in ('__iter__', '__getitem__'))\n if iterable and not self.args and not self.kwargs:\n self.__gen = self.generator\n else:\n self.__gen = self.generator(*self.args, **self.kwargs)\n for result in self.__gen:\n while True:\n if self.finished.is_set():\n return\n try:\n self.queue.put_nowait(result)\n except queue.Full:\n time.sleep(0.25)\n continue\n break\n # wait for queue to be emptied, then kill the thread\n while not self.finished.is_set() and not self.queue.empty():\n time.sleep(0.25)\n self.stop()", "def run_setup_commands(self):\n if not hasattr(self, 'commands') or not self.commands:\n return\n print('{GREEN}Running setup commands...{NC}'.format(**colors))\n for c in self.commands:\n self.mqtt.connect(self.mqtt_host)\n command = \"{c_topic}/{cmnd}\".format(**self, cmnd=c['command'])\n payload = ''\n if 'concat' in c: #It's a set of rules; so do fancy shit\n payload = ' '.join(c['concat'])\n else: #payload is the correct thing\n payload=c['payload']\n print(\"Sending {c} {p}\".format(c=command, p=payload))\n self.mqtt.publish(command, payload)\n self.mqtt.disconnect()\n sleep(1)\n if \"restart\" in c and c['restart'] == 1:\n self.online_check()", "def sendCommand(self, command:str=\"?\"):\n self.commandQueue.put(command)\n #self.queueLock.release()\n pass", "def produce(queue):\n data = ('image.xpm', 'scaled_image.xpm')\n queue.put(data) # producer adds data to the queue", "def traceQueueContents(self):\n from typhon.objects.printers import toString\n debug_print(\"Pending queue for \" + self.name.encode(\"utf-8\"))\n for (resolver, target, atom, args, namedArgs) in self._pending:\n debug_print(toString(target).encode('utf-8') +\n \".\" + atom.verb.encode('utf-8') + \"(\" +\n ', '.join([toString(a).encode('utf-8')\n for a in args]) + \")\")", "def init_queues(self):\n for step in self.get_children():\n if step.kind == StepKinds.queue:\n step.init_object(self.context, None)", "def _submit_to_queue(self, script_file):", "def queue_maker(queue, bucket_name):\n scraper = key_scraper.KaleidoscopeKeyScraper(\n bucket_name=bucket_name,\n queue=queue,\n )\n scraper.add_keys_to_queue()\n\n return None", "def get_from_queue(self):\n while not self.receive_queue.empty():\n cmd, kwargs = bcp.decode_command_string(\n self.receive_queue.get(False))\n self._process_command(cmd, **kwargs)", "def send_emission(self):\n if self._emit_queue.empty():\n return\n emit = self._emit_queue.get()\n emit()", "def initialize_commands(self) -> None:\n\n @self.command(name=\"snr\")\n @logger(\"all\")\n async def snr(ctx, *args):\n await ctx.message.channel.send(str(indie_seq.Seq([int(k) for k in args]).f()))\n\n @self.command(name=\"oeis\")\n @logger(\"all\")\n async def oeis(ctx, *args):\n global oeis_in_progress\n if not oeis_in_progress:\n oeis_in_progress = True\n if len(args) > 0:\n await ctx.message.channel.send(indie_oeis.get_sequence_from_b_file(args[0]))\n else:\n await ctx.message.channel.send(indie_oeis.get_sequence_from_b_file(str(random.randint(1, 341962))))\n oeis_in_progress = False\n else:\n await ctx.message.add_reaction(\"❌\")\n\n @self.command(name=\"collatz\")\n @logger(\"all\")\n async def collatz(ctx, *args):\n num = int(args[0])\n inity = \"\" if len(args) < 2 else args[1]\n\n collatz_results = indie_collatz.collatz_info(num)\n if len(inity) == 1:\n if inity == \"e\":\n await ctx.message.channel.send(f\"Evenity trajectory of {num}: {collatz_results.evenity_trajectory}\")\n elif inity == \"o\":\n await ctx.message.channel.send(f\"Oddinity trajectory of {num}: {collatz_results.oddinity_trajectory}\")\n else:\n await ctx.message.channel.send(f\"Collatz trajectory of {num}: {collatz_results.collatz_trajectory}\")\n\n @self.group(name=\"pig\")\n @logger(\"pig-math\")\n async def pig(ctx, *args):\n if ctx.invoked_subcommand is None:\n await ctx.message.add_reaction(\"❌\")\n\n def get_user_id_from_mention(user_id):\n user_id = user_id.replace(\"<\", \"\")\n user_id = user_id.replace(\">\", \"\")\n user_id = user_id.replace(\"@\", \"\")\n user_id = user_id.replace(\"!\", \"\")\n return user_id\n\n # Pig Math commands\n\n @pig.command(name=\"challenge\")\n @logger(\"pig-math\")\n async def pig_challenge(ctx, *args):\n challengee = get_user_id_from_mention(args[1])\n challengee = (await self.fetch_user(challengee)).name\n if len(args) > 2:\n point_target = int(args[2])\n else:\n point_target = 100\n pig_challenge = indie_pig.PigChallenge.create_challenge(ctx.message.author.name, challengee, point_target)\n await ctx.message.channel.send(pig_challenge.status)\n\n @pig.command(name=\"accept\")\n @logger(\"pig-math\")\n async def pig_accept(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigChallenge.accept_challenge(ctx.message.author.name))\n\n @pig.command(name=\"reject\")\n @logger(\"pig-math\")\n async def pig_reject(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigChallenge.reject_challenge(ctx.message.author.name))\n\n @pig.command(name=\"roll\")\n @logger(\"pig-math\")\n async def pig_roll(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"roll\"))\n\n @pig.command(name=\"bank\")\n @logger(\"pig-math\")\n async def pig_bank(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"bank\"))\n\n @pig.command(name=\"score\")\n @logger(\"pig-math\")\n async def pig_score(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"score\"))\n\n @pig.command(name=\"quit\")\n @logger(\"pig-math\")\n async def pig_quit(ctx, *args):\n await ctx.message.channel.send(indie_pig.PigGame.play(ctx.message.author.name, \"quit\"))\n\n @self.command(name=\"save\")\n @logger(\"modonly\")\n async def save(ctx, *args):\n self.save_data_files()\n await ctx.message.channel.send(\"Saved.\")\n\n @self.command(name=\"balance\")\n @logger(\"all\")\n async def balance(ctx, *args):\n bals = self.data[\"balances.json\"]\n user = ctx.message.author.id\n bal = 0\n if user in bals:\n bal = bals[user]\n else:\n bals[user] = 0 \n await ctx.message.channel.send(ctx.message.author.name+\", your balance is \"+str(bal)+\".\")\n\n @self.command(name=\"credit\")\n @logger(\"modonly\")\n async def credit(ctx, *args):\n \"\"\"\n Command with credit users mentioned with first float arg detected\n \"\"\"\n users_mentioned = ctx.message.mentions\n user_mention = ctx.author.mention\n credit = 0\n for arg in args:\n try:\n credit = float(arg)\n await ctx.message.channel.send(user_mention+\", we have successfully debited as you commanded.\")\n break\n except:\n pass\n bals = self.data[\"balances.json\"]\n for user in users_mentioned:\n if user.id in bals:\n bals[user.id] += credit\n else:\n bals[user.id] = credit\n\n @self.command(name=\"debit\")\n @logger(\"modonly\")\n async def debit(ctx, *args):\n \"\"\"\n Command with credit users mentioned with first float arg detected\n \"\"\"\n users_mentioned = ctx.message.mentions\n user_mention = ctx.author.mention\n debit = 0\n for arg in args:\n try:\n debit = float(arg)\n await ctx.message.channel.send(user_mention+\", we have successfully debited as you commanded.\")\n break\n except:\n pass\n bals = self.data[\"balances.json\"]\n for user in users_mentioned:\n if user.id in bals:\n bals[user.id] -= debit\n else:\n bals[user.id] = -debit\n\n @self.command(name=\"register\")\n @logger(\"all\")\n async def register(ctx, *args):\n \"\"\"\n This command will trigger a check if the user is registered,\n if not, the bot will ask them to review the terms and conditions and accept,\n if they accept, the bot will consider them registered\n \"\"\"\n user = ctx.message.author\n user_mention = ctx.author.mention\n chan_mention = \"<#876850365730021386>\"\n \n if user in self.data[\"users.json\"]:\n await ctx.message.channel.send(user_mention+\", you are already registered. :blue_heart:\")\n else:\n self.data[\"users_asked_to_be_registered.json\"].append(user)\n await ctx.message.channel.send(user_mention+\", do you accept the \"+chan_mention+\n \" (Indie Library Terms of Service). Command .accept if you do. :blue_heart:\")\n \n @self.command(name=\"accept\")\n @logger(\"all\")\n async def accept(ctx, *args):\n \"\"\"\n This command will trigger a check if the user has asked to be registered.\n If they have, then calling this triggers adding them to registered users.\n If they have not, they will be asked to type .register first.\n \"\"\"\n user = ctx.message.author\n user_mention = \"<@\"+str(user.id)+\">\"\n\n if user in self.data[\"users_asked_to_be_registered.json\"]:\n self.data[\"users.json\"].append(user)\n self.data[\"users_asked_to_be_registered.json\"].remove(user)\n await ctx.message.channel.send(user_mention+\", you have been successfully registered. :blue_heart:\")\n else:\n await ctx.message.channel.send(user_mention+\", have not commanded .register yet. \"\n \"Please do so first. :blue_heart:\")", "def __init__(self):\n self.queue = Queue()", "def __post_init__(self) -> None:\n self.gtex += [None]\n self.bm += [None]\n self._q: queue.Queue = queue.Queue(maxsize=self.maxsize)", "async def send(self) -> None:\n await self._mutations.send()\n await self._counters.send()", "def encoding_loop(self, commands):\n try:\n enc_path = self.temp / 'split'\n done_path = self.temp / 'done.json'\n\n if self.resume and done_path.exists():\n log('Resuming...\\n')\n\n with open(done_path) as f:\n data = json.load(f)\n\n total = data['total']\n done = len(data['done'])\n initial = sum(data['done'].values())\n\n log(f'Resumed with {done} encoded clips done\\n\\n')\n else:\n initial = 0\n total = frame_probe_fast(self.input)\n\n if total == 0:\n total = frame_probe(self.input)\n\n d = {'total': total, 'done': {}}\n with open(done_path, 'w') as f:\n json.dump(d, f)\n\n clips = len([x for x in enc_path.iterdir() if x.suffix == \".mkv\"])\n self.workers = min(self.workers, clips)\n\n print(f'\\rQueue: {clips} Workers: {self.workers} Passes: {self.passes}\\n'\n f'Params: {self.video_params.strip()}')\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=self.workers) as executor:\n counter = Manager().Counter(total, initial)\n future_cmd = {executor.submit(self.encode, (cmd, counter)): cmd for cmd in commands}\n for future in concurrent.futures.as_completed(future_cmd):\n future_cmd[future]\n try:\n future.result()\n except Exception as exc:\n _, _, exc_tb = sys.exc_info()\n print(f'Encoding error {exc}\\nAt line {exc_tb.tb_lineno}')\n terminate()\n except KeyboardInterrupt:\n terminate()" ]
[ "0.65800864", "0.6378465", "0.61124223", "0.60805756", "0.60594606", "0.60329777", "0.5986247", "0.5985811", "0.58339554", "0.58075696", "0.5778677", "0.5776468", "0.5760314", "0.57391536", "0.57391536", "0.5685093", "0.56670094", "0.56434864", "0.5631529", "0.56295687", "0.5614686", "0.5594162", "0.557577", "0.5570109", "0.55525804", "0.55450594", "0.5503126", "0.5489246", "0.548566", "0.5481836", "0.54590666", "0.54480594", "0.54480594", "0.54472136", "0.5447072", "0.54418033", "0.5434153", "0.5427479", "0.54240936", "0.5422773", "0.5418151", "0.5418151", "0.5418151", "0.5418151", "0.5418151", "0.5415319", "0.540323", "0.5401557", "0.53951997", "0.53921264", "0.5370748", "0.5368897", "0.53672594", "0.53599125", "0.5359023", "0.53551054", "0.5352821", "0.5349933", "0.5341866", "0.53404737", "0.5340465", "0.53376514", "0.5334387", "0.5332139", "0.5326844", "0.5298666", "0.52927774", "0.52864355", "0.528101", "0.5280612", "0.5280612", "0.5280612", "0.52646774", "0.52556276", "0.52548265", "0.5254608", "0.525158", "0.524812", "0.5242713", "0.5242713", "0.5242713", "0.5242713", "0.52386606", "0.52265334", "0.52259535", "0.5223336", "0.5213259", "0.52117187", "0.52079594", "0.5202673", "0.51986593", "0.51974565", "0.5190591", "0.5183412", "0.5172842", "0.5153917", "0.5153035", "0.5150937", "0.51446956", "0.5140039", "0.5135916" ]
0.0
-1
Get the LastUpdateDate for this type of entity
def max_entity_date(entity_type): # assume empty date max_date = None try: # get a cursor conn = ecommerce.db.getConnection() cursor = conn.cursor() # execute the query cursor.execute(""" SELECT TO_CHAR(LastUpdateDate, 'YYYY-MM-DD HH24:MI:SS') FROM Stage0_DeltaControl WHERE EntityType = ? """, (entity_type, ) ) # fetch the max date row = cursor.fetchone() if row is not None: max_date = row[0] cursor.close() except: pass return max_date
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dt_last_update(self):\n return self.last_update", "def get_last_updated_at(self):\n return self.last_updated", "def get_last_update_time(self):\n return self.last_update_time", "def last_update_datetime(self):\n return datetime.strptime(self.last_update, \"%Y-%m-%d %H:%M:%S.%f\")", "def last_update_datetime(self):\n return datetime.strptime(self.last_update, \"%Y-%m-%d %H:%M:%S.%f\")", "def last_update_time(self):\n return self._last_update_time", "def last_updated_time(self) -> datetime.datetime:\n return self.__last_updated_time", "def last_updated_time(self) -> datetime.datetime:\n return self.__last_updated_time", "def last_update(self):\n return self._last_update", "def last_update(self):\n return self._last_update", "def updated_date(self):\n return self._updated_date", "def updated_date(self):\n return self._updated_date", "def last_updated(self):\n try:\n date_ = parse(self._data.get('last_updated'))\n except (ValueError, TypeError):\n date_ = None\n return date_", "def last_updated(self):\n return self._last_updated", "def last_update(self): # TOFIX model the job and return an object instead of dictionary\n return self._data.get('summary_fields', {}).get('last_update')", "def last_update(self):\n date, time = self.data.get(\"update_date\"), self.data.get(\"update_time\")\n if date is not None and time is not None:\n return datetime.strptime(date + time, \"%d-%m-%Y%H:%M\").replace(\n tzinfo=VIENNA_TIME_ZONE\n )", "def get_last_modified_date(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetLastModifiedDate', self.handle)", "def get_last_modified_date(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetLastModifiedDate', self.handle)", "def last_updated_time(self) -> str:\n return pulumi.get(self, \"last_updated_time\")", "def lastdate(self):\n if hasattr(self, \"_lastdate\"):\n return self._lastdate\n else:\n return None", "def updated_datetime(self) -> datetime:\n return utc_to_local(self._db_data.updated_datetime)", "def last_updated(self) -> str:\n return self._last_updated", "def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> str:\n return pulumi.get(self, \"last_modified_at\")", "def last_edit(self) -> datetime.datetime:\n self.update_status()\n return datetime.datetime.fromtimestamp(self._last_edit)", "def updated_at(self) -> \"datetime\":\n return self._attrs.get(\"updatedAt\")", "def updated_at(self) -> \"datetime\":\n return self._attrs.get(\"updatedAt\")", "def updated_at(self) -> \"datetime\":\n return self._attrs.get(\"updatedAt\")", "def last_modified_at(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def last_modified_at(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_at\")", "def get_last_update(self):\n last_update = os.path.getmtime(self.parent_filepath)\n return last_update", "def last_ownership_update_time(self) -> str:\n return pulumi.get(self, \"last_ownership_update_time\")", "def last_updated(self):\n try:\n return max(self.station_usage, key=lambda x: x.last_update).dt_last_update\n except ValueError:\n return datetime.fromtimestamp(0)", "def get_last_update(self):\n facility_data = self.get_raw_facilities_data()\n if facility_data is None:\n return None\n else:\n return datetime.strptime(facility_data['lastUpdate'], \"%Y-%m-%dT%H:%M:%SZ\")", "def last_modified_date_time(self):\n if \"lastModifiedDateTime\" in self._prop_dict:\n return datetime.strptime(self._prop_dict[\"lastModifiedDateTime\"].replace(\"Z\", \"\"), \"%Y-%m-%dT%H:%M:%S.%f\")\n else:\n return None", "def last_update(cls):\n\n import datetime, os\n from s3 import S3DateTime\n\n # Probe file (probing one is good enough since update_data\n # writes them all at the same time)\n filename = os.path.join(current.request.folder,\n \"static\", \"themes\", \"SHARE\", \"data\",\n \"people_affected.json\",\n )\n try:\n mtime = os.path.getmtime(filename)\n except OSError:\n last_update = None\n else:\n dt = datetime.datetime.utcfromtimestamp(mtime)\n last_update = S3DateTime.datetime_represent(dt, utc=True)\n\n return last_update", "def last_update(cls):\n\n import datetime, os\n from s3 import S3DateTime\n\n # Probe file (probing one is good enough since update_data\n # writes them all at the same time)\n filename = os.path.join(current.request.folder,\n \"static\", \"themes\", \"SHARE\", \"data\",\n \"people_affected.json\",\n )\n try:\n mtime = os.path.getmtime(filename)\n except OSError:\n last_update = None\n else:\n dt = datetime.datetime.utcfromtimestamp(mtime)\n last_update = S3DateTime.datetime_represent(dt, utc=True)\n\n return last_update", "def last_updated_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_updated_time\")", "def with_last_update(self):\n return self.annotate(last_update=Coalesce(F('modified'), F('created')))", "def get_last_update(self):\n return self.ticker.all().order_by('-created').first()", "def get_latest_date(cls):\n\n return cls.query.order_by(desc(cls.date)).first().date", "def last_timestamp(self):\n return self._last_timestamp", "def application_last_update_date(self) -> Optional[int]:\n return pulumi.get(self, \"application_last_update_date\")", "def get_last_update(self, engine=None, name=None, file_type=None, path=None):\n if engine==None and file_type == None:\n return self.last_entry_date\n elif engine:\n self.table_name(name=name)\n df = pd.read_sql_table(table_name=self.table, con=engine,\n parse_dates='DateTime', index_col='DateTime')\n else:\n if file_type.lower() not in ['pickle', 'csv']:\n raise ValueError('Incorrect file_type input, must be pickle or csv.')\n elif file_type.lower()=='pickle':\n df = pd.read_pickle(path=path)\n else:\n df = pd.read_csv(path, index_col=0)\n\n self.last_entry_date = return_datetime(df.sort_index().index.values[-1])\n return self.last_entry_date", "def time_last_modified(self):\n return self.properties.get(\"TimeLastModified\", None)", "def last_modified_time(self) -> str:\n return pulumi.get(self, \"last_modified_time\")", "def last_modified_time(self) -> str:\n return pulumi.get(self, \"last_modified_time\")", "def last_updated_user(self):\n return self._last_updated_user", "def last_modified_at(self):\n return self.viztrail.last_modified_at", "def last_modified_time(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"last_modified_time\")", "def last_update(cls):\n\n score = Score.query.with_entities(Score.updated_on).order_by(desc(Score.updated_on)).first()\n if score:\n return score[0]\n else:\n return None", "def last_change_date(self) -> Optional[str]:\n return pulumi.get(self, \"last_change_date\")", "def get_last_time(self):\n \n return self._last", "def last_modified_by_type(self) -> str:\n return pulumi.get(self, \"last_modified_by_type\")", "def last_updated_time_utc(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"last_updated_time_utc\")", "def updated_at(self):\n return self._updated_at", "def updated_at(self):\n return self._updated_at", "def updated_at(self):\n return self._updated_at", "def last_status_update(self):\n try:\n return StatusUpdate.objects.filter(section=self).latest(\"created_at\")\n except StatusUpdate.DoesNotExist:\n return None", "def last_updated_time_utc(self) -> Optional[str]:\n return pulumi.get(self, \"last_updated_time_utc\")", "def last_update(self):\r\n request = http.Request('GET', '/metadata/last_update.json')\r\n return request, parsers.parse_json", "def last_timestamp(self):\n LOGGER.debug('Getting last_timestamp as: %s', self._last_timestamp)\n return self._last_timestamp", "def update_date_time(self) -> Optional[str]:\n return pulumi.get(self, \"update_date_time\")", "def last_modified(self):\n return os.path.getmtime(self.filename)", "def last_modified_by_type(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by_type\")", "def last_modified_by_type(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by_type\")", "def last_modified_by_type(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by_type\")", "def last_modified_by_type(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by_type\")", "def last_modified_by_type(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by_type\")", "def last_modified_by_type(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by_type\")", "def updated(self) -> datetime:\n return self._updated", "def last_count_update_time(self):\n return self.__last_count_update_time", "def last_date(self):\n if self._last_date is None:\n raise ValueError(\"Run pick() method before access this property\")\n return self._last_date", "def last_modified_time(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_modified_time\")", "def last_modified(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified\")", "def last_modified(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified\")", "def DateUpdated(self, default=None):\n return self.data.get('metadata', {}).get('_updated', default)", "def last_updated(self) -> float:\n raise NotImplementedError()", "def get_max_update_dt():\r\n from .utils import connect_to_pg\r\n conn = connect_to_pg()\r\n query = \"select max(updated_at) from scf.issues\"\r\n res = conn.execute(query)\r\n max_dt = res.fetchone()[0]\r\n print(max_dt)\r\n return max_dt", "def last_os_update_utc(self) -> str:\n return pulumi.get(self, \"last_os_update_utc\")", "def svn_info_t_last_changed_date_get(svn_info_t_self): # real signature unknown; restored from __doc__\n pass", "def last_modified(self) -> str:\n\t\tif not self._closed:\n\t\t\ttimestamp = self.ds.last_modified()\n\t\t\treturn timestamp\n\t\treturn None", "def last_update(self):\n # get modification time of QWC2 themes config file\n config_updated_at = None\n if os.path.isfile(self.themes_config_path):\n config_updated_at = datetime.utcfromtimestamp(\n os.path.getmtime(self.themes_config_path)\n )\n\n # create session for ConfigDB\n session = self.config_models.session()\n\n # query timestamp\n LastUpdate = self.config_models.model('last_update')\n query = session.query(LastUpdate.updated_at)\n last_update = query.first()\n if last_update is not None:\n if config_updated_at is not None:\n # use latest of both timestamps\n updated_at = max(last_update.updated_at, config_updated_at)\n else:\n # use timestamp from ConfigDB\n updated_at = last_update.updated_at\n else:\n # no entry in ConfigDB, use config timestamp or now\n updated_at = config_updated_at or datetime.utcnow()\n\n # close session\n session.close()\n\n return {\n 'permissions_updated_at': updated_at.strftime(\"%Y-%m-%d %H:%M:%S\")\n }", "def last_updated_by_id(self) -> str:\n return self.__last_updated_by_id", "def last_update(self, value):\n if self._last_update != value:\n self._last_update = value\n return self._last_update", "def last_updated(self) -> Optional[datetime]:\n try:\n with open(self.last_updated_file) as f:\n dt = datetime.fromisoformat(f.read())\n if dt.tzinfo is None:\n dt = dt.astimezone(timezone.utc)\n return dt\n except FileNotFoundError:\n return None", "def date_modified(self):\n return self._date_modified", "def updated_at(self) -> str:\n return pulumi.get(self, \"updated_at\")", "def last_modified_dts(self):\n return self._last_modified_dts", "def last_edited(self):\n return self._last_edited", "def get_rates_grid_last_modified_date(self):\n return self.get_specific_column_value_from_grid(self.rates_grid_div_id, self.rates_grid_row_count, self.last_modified_column_name)", "def last_post_date(self):\n last_reply = Reply.query.filter_by(\n thread_id=self.id).order_by(Reply.id.desc()).first()\n\n if last_reply:\n return last_reply.date_created\n\n return self.date_created", "def last_modified(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"last_modified\")", "def getLastModifiedTime(self): #$NON-NLS-1$\r", "def last_document_time(self) -> datetime.datetime:\n with self.lock:\n return self._last_document_time", "def creation_timestamp(self):\n\n return self.getThisUpdate()" ]
[ "0.7807228", "0.7612656", "0.75777316", "0.74499947", "0.74499947", "0.7426561", "0.73883593", "0.73883593", "0.721249", "0.721249", "0.7182299", "0.7182299", "0.71680653", "0.71449316", "0.7084291", "0.70218515", "0.69548345", "0.6950892", "0.6945901", "0.686151", "0.68582565", "0.685549", "0.6846457", "0.6846457", "0.6835954", "0.6830678", "0.6830678", "0.6830678", "0.6821901", "0.678749", "0.678749", "0.678749", "0.678749", "0.678749", "0.678749", "0.6775058", "0.67742765", "0.6761493", "0.6745586", "0.67352986", "0.67295706", "0.67295706", "0.6692052", "0.6679671", "0.66747093", "0.66563284", "0.6649193", "0.6628532", "0.66144156", "0.65893954", "0.65839666", "0.65839666", "0.6575727", "0.65755355", "0.65691", "0.65638274", "0.65595245", "0.65388614", "0.652233", "0.65220034", "0.6517801", "0.6517801", "0.6517801", "0.65026504", "0.6496397", "0.64842355", "0.6468354", "0.64509803", "0.6435268", "0.6429157", "0.6429157", "0.6429157", "0.6429157", "0.6429157", "0.6429157", "0.64290965", "0.63918525", "0.63915217", "0.6387136", "0.6379905", "0.6379905", "0.63783723", "0.6372772", "0.63506836", "0.6324309", "0.6320676", "0.6319045", "0.6316337", "0.6306181", "0.63009703", "0.6275311", "0.6269876", "0.6269135", "0.6268164", "0.6264437", "0.6253999", "0.62527764", "0.62359035", "0.6222261", "0.6216549", "0.62003404" ]
0.0
-1
Get the list of modified entities before a specified date
def list_modified_entities(entity_type, max_date): # get a cursor conn = ecommerce.db.getConnection() cursor = conn.cursor() # execute the query cursor.execute(""" SELECT EntityId FROM Stage0_Delta WHERE EntityType = ? AND FlagUpdated = 1 AND LastUpdate <= TO_DATE(?, 'YYYY-MM-DD HH24:MI:SS') """, (entity_type, max_date) ) # fetch the ids elist = [ ] row = cursor.fetchone() while row is not None: elist.append(int(row[0])) row = cursor.fetchone() cursor.close() return elist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def created_before(self, date: datetime):\n return self.created_search(date, search_type=\"before\")", "def is_before(self,other_date):", "def getListModifiedDates(self):\n return _libsbml.ModelHistory_getListModifiedDates(self)", "def get_records_created_before(self, when):\n return self.get_records_created_in_date_range('1970-01-01 00:00:01', when)", "def created_after(self, date: datetime):\n return self.created_search(date, search_type=\"after\")", "def is_modified_since(thing, action, date):\r\n from pylons import g\r\n\r\n prop = 'last_' + action\r\n if not hasattr(thing, prop):\r\n last_modified = make_last_modified()\r\n setattr(thing, prop, last_modified)\r\n thing._commit()\r\n else:\r\n last_modified = getattr(thing, prop)\r\n\r\n if not date or date < last_modified:\r\n return last_modified\r\n \r\n #if a date was passed in and it's equal to last modified\r\n return True", "def get_order_dates(after_date):\n query = BreadOrderDate.query \\\n .filter(BreadOrderDate.date > after_date)\n return query.all()", "def _entries_after_date(cls, entries, date):\n new_entries = []\n max_date = date\n\n for entry in entries:\n entry_date = cls._time_to_date(entry.get(\"published_parsed\"))\n if not max_date:\n # `max_date` could be None if target's last modified date is not\n # initialized yet.\n max_date = entry_date\n if all([entry_date, date]) and entry_date <= date:\n continue\n\n new_entries.append(entry)\n if entry_date and entry_date > max_date:\n max_date = entry_date\n\n return new_entries, max_date", "def get_queryset(self):\n kwargs = {}\n if self.ends_at:\n kwargs.update({'%s__lt' % self.date_field: self.ends_at})\n return super(BeforeMixin, self).get_queryset().filter(**kwargs)", "def changes(since=\"\"):\n\n last_yielded = None\n\n while True:\n resp_json = fetch_changes(since)\n has_changes = False\n\n for result in resp_json[\"results\"]:\n last_yielded = result\n has_changes = True\n yield result\n\n if not has_changes:\n return\n else:\n since = last_yielded[\"timestamp\"]", "def visitBefore(self, date):\n raise NotImplementedError()", "def _filter_by_date(from_date, until_date):\n qlist = []\n\n if from_date:\n qlist.append(Q(oai_date_stamp__gte=from_date))\n\n if until_date:\n qlist.append(Q(oai_date_stamp__lte=until_date))\n\n return qlist", "def recently_modified(request):\n pages = models.Page.all().order('modified').fetch(10)\n return utility.respond(request, 'admin/recently_modified', {'pages': pages})", "def __lt__(self, other):\n return self.date < other.date", "def getModifiedDate(self, *args):\n return _libsbml.ModelHistory_getModifiedDate(self, *args)", "def recently(self):\n items = []\n for item in self.p.entries:\n dt = datetime.fromtimestamp(mktime(item.published_parsed))\n delta = datetime.today() - dt\n\n if delta.days > self.days:\n continue\n items.append(item)\n if 'verbose' in self.args and self.args['verbose']:\n print delta.days, dt\n self.items = items\n return items", "async def get_changed_lessons(\n self,\n last_sync: datetime = None,\n deleted=False,\n date_from=None,\n date_to=None,\n **kwargs,\n ) -> Union[AsyncIterator[ChangedLesson], List[int]]:\n return ChangedLesson.get(\n self._api, last_sync, deleted, date_from, date_to, **kwargs\n )", "def get_previous_versions(self):\n return self.study.sourcestudyversion_set.filter(\n i_version__lte=self.i_version,\n i_date_added__lt=self.i_date_added\n ).order_by(\n '-i_version',\n '-i_date_added'\n )", "def addModifiedDate(self, *args):\n return _libsbml.ModelHistory_addModifiedDate(self, *args)", "def get_fixables(\n model, from_date=USE_DEFAULT, to_date=USE_DEFAULT, status=USE_DEFAULT,\n nnow=None):\n nnow = nnow or now()\n if from_date == USE_DEFAULT:\n from_date = default_from_date(nnow=nnow)\n if to_date == USE_DEFAULT:\n to_date = nnow.date()\n query_set = model.objects.filter(\n created__gte=beginning_of_day(from_date),\n created__lte=end_of_day(to_date)).order_by('-created')\n return do_fixed_filter(query_set, status)", "def filter_by_date(items, start_time, end_time=None):\n start_time = parser.parse(start_time + \"UTC\").timestamp()\n if end_time:\n end_time = parser.parse(end_time + \"UTC\").timestamp()\n else:\n end_time = time.time()\n\n filtered_items = []\n for item in items:\n if 'time' in item:\n item_time = item['time']\n elif 'timestamp' in item:\n item_time = item['timestamp']\n timestamp = parser.parse(item_time + \"UTC\").timestamp()\n if end_time > timestamp > start_time:\n filtered_items.append(item)\n\n return filtered_items", "def recent(self):\n return self.filter(\n start_date__lte=self.current().end_date + timezone.timedelta(days=1),\n end_date__gte=self.current().start_date - timezone.timedelta(days=1),\n )", "def split_changes(self, since):\n url = _SPLIT_CHANGES_URL_TEMPLATE.format(base_url=self._sdk_api_url_base)\n params = {\n 'since': since\n }\n\n return self._get(url, params)", "def on_or_before(self, date):\n return (date - mod(date - self.CORRELATION - self.to_ordinal(), 260))", "def find_months_needing_update(\n self,\n product_name: str,\n only_those_newer_than: datetime,\n ) -> Iterable[Tuple[date, int]]:\n dataset_type = self.get_dataset_type(product_name)\n\n # Find the most-recently updated datasets and group them by month.\n return sorted(\n (month.date(), count)\n for month, count in self._engine.execute(\n select(\n [\n func.date_trunc(\n \"month\", datetime_expression(dataset_type.metadata_type)\n ).label(\"month\"),\n func.count(),\n ]\n )\n .where(ODC_DATASET.c.dataset_type_ref == dataset_type.id)\n .where(dataset_changed_expression() > only_those_newer_than)\n .group_by(\"month\")\n .order_by(\"month\")\n )\n )", "def _update_modified_since(self, timestamp):\n pass", "def get_history_since(self, start=0):\n hist = self.service.users().history()\n try:\n results = hist.list(userId='me', startHistoryId=start).execute()\n if 'history' in results:\n yield results['history']\n while 'nextPageToken' in results:\n results = hist.list(userId='me',\n pageToken=results['nextPageToken'],\n startHistoryId=start).execute()\n if 'history' in results:\n yield results['history']\n\n except googleapiclient.errors.HttpError as ex:\n if ex.resp.status == 404:\n raise Gmail.NoHistoryException\n elif ex.resp.status == 403:\n raise Gmail.UserRateException(ex)\n else:\n raise Gmail.GenericException(ex)", "def cmpArtistsByDate(artist1, artist2):\n return int(artist1['BeginDate']) < int(artist2['BeginDate'])", "def datesBeforeCurrentDate(individual):\n birthdate = individual.get_birth_data()[0]\n deathdate = individual.get_death_data()[0]\n marriageDates = gedcom_parser.get_marriages(individual)\n\n fams = gedcom_parser.get_families(individual)\n childElements = [(fam.get_child_elements()) for fam in fams]\n\n divorceDates = []\n for elements in childElements:\n for element in elements:\n if element.get_tag() == \"DIV\":\n divorceDates.append(element.get_child_elements()[0].get_value())\n\n\n latestDivorceDate = max(convertGedcomDate(date)\n for date in divorceDates) if divorceDates else None\n latestMarriageDate = max(convertGedcomDate(\n date[0]) for date in marriageDates) if marriageDates else None\n birthdate = convertGedcomDate(birthdate) if birthdate else None\n deathdate = convertGedcomDate(deathdate) if deathdate else None\n\n comparisonDates = [birthdate, deathdate,\n latestMarriageDate, latestDivorceDate]\n\n if any(day > dt.now() for day in comparisonDates if day):\n print(\n f\"Error US05: Date associated with {individual.get_name()[0]} {individual.get_name()[1]} ({individual.get_pointer()}) occurs after current date\")\n return False\n else:\n return True", "def _get_entities_updated_between_times(from_time, to_time, table_names):\n LOGGER.info(\"Finding entities updated between '{from_time}' and '{to_time}'...\".format(\n from_time=from_time,\n to_time=to_time\n ))\n select_expression = \"updat_time >= '{from_time}' and updat_time <= '{to_time}'\".format(\n from_time=acm.Time.LocalToUtc(from_time),\n to_time=acm.Time.LocalToUtc(to_time)\n )\n updated_entities = acm.FArray()\n for table_name in table_names:\n table = acm.FTable['ADM.{table_name}'.format(\n table_name=table_name\n )]\n entities = table.Select(select_expression).AsArray()\n LOGGER.info(\"Found {number_of_entities} {table_name} entities updated since '{from_time}'.\".format(\n number_of_entities=len(entities),\n table_name=table_name.lower(),\n from_time=from_time\n ))\n updated_entities.AddAll(entities)\n updated_entities.SortByProperty('UpdateTime')\n LOGGER.info(\"Found {number_of_entities} total entities updated since '{from_time}'.\".format(\n number_of_entities=len(updated_entities),\n from_time=from_time\n ))\n return updated_entities", "def change_modified_date(sbml):\n history = sbml.getModel().getModelHistory()\n if history:\n history.setModifiedDate(libsbml.Date(w3c_time()))\n # remove all but final modified date\n while history.getListModifiedDates().getSize() > 1:\n history.getListModifiedDates().remove(0)", "def filter_creation_date(groups, start, end):\n results = []\n for g in groups:\n created = datetime.fromtimestamp(g['creationTime'] / 1000.0)\n if created > end:\n continue\n if created > start:\n g['exportStart'] = created\n else:\n g['exportStart'] = start\n results.append(g)\n return results", "def filter_by_date(df, date_string, before_or_after):\n filter_date = pd.to_datetime(date_string)\n if before_or_after == \"before\":\n return df[df['timestamp'] < filter_date].copy()\n elif before_or_after == \"after\":\n return df[df['timestamp'] >= filter_date + pd.DateOffset(days=1)].copy()\n else:\n return print(\"Invalid argument.\")", "def get_records_created_after(self, when):\n bi = BuiltIn()\n now = bi.get_time()\n return self.get_records_created_in_date_range(when, now)", "def test_since(self):\n import datetime\n dt1 = datetime.datetime(2013, 12, 15, 10, 10, 10)\n dt2 = datetime.datetime(2013, 12, 15, 10, 11, 10)\n\n check_list = health.CheckList(refresh=1)\n check_list._refreshed_at = dt1\n\n mock_datetime = self.mocker.replace(datetime)\n mock_datetime.datetime.now()\n self.mocker.result(dt2)\n self.mocker.replay()\n\n self.assertEqual(check_list.since(), '0:01:00')", "def qry(cls, entity, order_by_date='-modified', compare_date=None,\n date=None, time_offset=None,\n compare_version=None, version=None,\n **kwargs):\n qry = entity.query(**kwargs)\n if order_by_date == '-modified':\n qry = qry.order(-cls.modified)\n elif order_by_date == 'modified':\n qry = qry.order(cls.modified)\n elif order_by_date == 'created':\n qry = qry.order(cls.created)\n elif order_by_date == '-created':\n qry = qry.order(-cls.created)\n\n if date:\n if isinstance(date, basestring):\n date = datetime.datetime.strptime(date,\"%Y-%m-%d %H:%M:%S\")\n if time_offset:\n date = date + datetime.timedelta(seconds=time_offset)\n if compare_date == '>modified' :\n qry = qry.filter(cls.modified > date)\n elif compare_date == '>=modified' :\n qry = qry.filter(cls.modified >= date)\n elif compare_date == '<modified' :\n qry = qry.filter(cls.modified < date)\n elif compare_date == '<=modified' :\n qry = qry.filter(cls.modified <= date)\n elif compare_date == '>created' :\n qry = qry.filter(cls.created > date)\n elif compare_date == '>=created' :\n qry = qry.filter(cls.created >= date)\n elif compare_date == '<created' :\n qry = qry.filter(cls.created < date)\n elif compare_date == '<=created' :\n qry = qry.filter(cls.created <= date)\n\n if version:\n if compare_version == '>' :\n qry = qry.filter(cls.version > version)\n elif compare_version == '>=' :\n qry = qry.filter(cls.version >= version)\n elif compare_version == '<' :\n qry = qry.filter(cls.version < version)\n elif compare_version == '<=' :\n qry = qry.filter(cls.version <= version)\n elif compare_version == '==' :\n qry = qry.filter(cls.version == version)\n return qry", "def getChanges():", "def get_events() -> list[Event]:\n g.ledger.changed()\n return [e for e in g.filtered.entries if isinstance(e, Event)]", "def filter_status_as_of(self, status_date: datetime.date) -> QuerySet:\n return self.model._filter_queryset_status_as_of(self, status_date)", "def precipitation_from_now(self) -> List[PrecipitationAt]:\n now = datetime.now()\n return [\n precip for precip in self.precipitation\n if precip.timestamp >= now\n ]", "def get_past_reminders(self, now=None):\n now = now or datetime.datetime.now()\n store = self.load_data(default=[])\n return [\n reminder_info\n for reminder_info in store\n if reminder_info['datetime'] < now\n ]", "def filter_transactions_by_date(self, request):\n transactions = Transaction.objects.all().filter(date=request.data[\"date\"])\n serializer = TransactionSerializer(transactions, many=True)\n return Response(serializer.data)", "def earlier_date(date1, date2):\r\n return (time.strptime(date1, \"%b %d %Y\") < time.strptime(date2, \"%b %d %Y\"))", "def get_same_or_newer(start_date):\n#\tdata = get_file_lines(FILE_URL) ## Moved up & out of the function\n#\treader = csv.reader(data[1:])\n\n\treader1 = csv.reader(data[1:])\t## Changed the above to two lines to these two\n\treader = sorted(reader1, key=operator.itemgetter(3))\n\t\n # We want all employees that started at the same date or the closest newer\n # date. To calculate that, we go through all the data and find the\n # employees that started on the smallest date that's equal or bigger than\n # the given start date.\n\tmin_date = datetime.datetime.today()\n\tmin_date_employees = []\n\tfor row in reader: \n\t\trow_date = datetime.datetime.strptime(row[3], '%Y-%m-%d')\n\n # If this date is smaller than the one we're looking for,\n # we skip this row\n\t\tif row_date < start_date:\n\t\t\tcontinue\n\n # If this date is smaller than the current minimum,\n # we pick it as the new minimum, resetting the list of\n # employees at the minimal date.\n\t\tif row_date < min_date:\n\t\t\tmin_date = row_date\n\t\t\tmin_date_employees = []\n\t\n\treturn min_date, min_date_employees", "def get_items_sold_between(table, month_from, day_from, year_from, month_to, day_to, year_to):\n\n items_sold_between = []\n index = 0\n start_date = str(year_from) + str(month_from) + str(day_from)\n end_date = str(year_to) + str(month_to) + str(day_to)\n for record in table:\n if end_date > record[-1] > start_date:\n items_sold_between.append(record)\n\n return items_sold_between", "def getChangeIdsLessThanIdNow(self, new_changeid):\n change_obj = rpc.RpcProxy('software_dev.commit')\n cids = change_obj.search([('id', '<', new_changeid)])\n t = Token()\n changes = self.runInteractionNow(self._get_change_num, cids)\n changes.sort(key=lambda c: c.number)\n return changes", "def setModifiedDate(self, *args):\n return _libsbml.ModelHistory_setModifiedDate(self, *args)", "def to_be_deleted(self):\n return self.filter(start__lte=timezone.now() - datetime.timedelta(days=1))", "def ls(config, args):\n\tresult = read()\n\tif args == []:\n\t\t_prettyprint(result)\n\telse:\n\t\tfor arg in args:\n\t\t\tif \"@\" == arg[0]:\n\t\t\t\tresult = list(filter(lambda x: _is_in(arg[1:], x[\"context\"]),\n\t\t\t\t\tresult))\n\t\tif \"since\" in args:\n\t\t\ttry:\n\t\t\t\tdate = datetime.datetime.strptime(args[args.index(\"since\")+1],\n\t\t\t\t\"%Y-%m-%d\").strftime(\"%Y-%m-%d\")\n\t\t\t\tresult = list(filter(lambda x: _is_newer(x[\"date\"], date), result))\n\t\t\texcept(IndexError, ValueError):\n\t\t\t\tprint(\"You must use a valid date-format after since (YYYY-MM-DD)\")\n\t\t\t\thelp()\n\t\t\t\treturn\n\t\telif \"month\" in args:\n\t\t\ttry:\n\t\t\t\traw_start_date = datetime.datetime.strptime(\n\t\t\t\t\t\targs[args.index(\"month\")+1],\n\t\t\t\t\t\t\"%Y-%m\")\n\t\t\t\tstart_date = raw_start_date.strftime(\"%Y-%m-%d\")\n\t\t\t\tend_date = (raw_start_date +\n\t\t\t\t\t relativedelta(months=1, days=-1)).strftime(\"%Y-%m-%d\")\n\t\t\t\tresult = list(filter(\n\t\t\t\t\tlambda x: _is_newer(x[\"date\"], start_date), result))\n\t\t\t\tresult = list(filter(\n\t\t\t\t\tlambda x: _is_newer(end_date, x[\"date\"]), result))\n\t\t\texcept(IndexError, ValueError):\n\t\t\t\tprint(\"You must use a valid date-format after month (YYYY-MM)\")\n\t\t\t\thelp()\n\t\t\t\treturn\n\t\t_prettyprint(result)", "def get_items_sold_between(table, month_from, day_from, year_from, month_to, day_to, year_to):\n\n min_date = common.dtime(year_from, month_from, day_from)\n max_date = common.dtime(year_to, month_to, day_to)\n\n return [[line[ID], line[TITLE], int(line[PRICE]), int(line[MONTH]), int(line[DAY]), int(line[YEAR])]\n for line in table if min_date < common.dtime(line[YEAR], line[MONTH], line[DAY]) < max_date]", "def since(self, ts):\n while True:\n items = super(TailingOplog, self).since(ts)\n for doc in items:\n yield doc\n ts = doc['ts']", "def recent(self):\n now = timezone.now()\n # construct a datetime based on now but with zero hour/minute/second\n today = datetime(\n now.year, now.month, now.day, tzinfo=timezone.get_default_timezone()\n )\n return self.filter(end_time__lt=today).order_by(\"-start_time\")", "def latest_updated(self, chef, date):\n added_recipes = chef.recipes_added.values_list('id', flat=True)\n return self.filter(Q(recipe__chef=chef) |\n Q(recipe__in=added_recipes, recipe__draft=False, recipe__private=False)) \\\n .filter(edit_date__gt=date) \\\n .order_by('edit_date')", "def get_updates(cls, date, team):\n return cls.query(\n cls.date == date,\n cls.team == team.lower()\n ).order(-cls.name).fetch(100)", "def transactions_since(self, since_unix_time, only_look_at=10):\n\n\n acc = self.ec.account(self.accounts[0])\n raw_txns = acc.transactions(length=only_look_at, direction='in')\n return self._process_history(raw_txns, since_unix_time, self._timestamp_getter)", "def created_between(self, date_a: datetime, date_b: datetime):\n return self.created_search(date_a, date_b, search_type=\"between\")", "def test_query_events_with_start_date_before_end_date(self):\n CommonTestCases.admin_token_assert_in(\n self,\n query_events_with_start_date_before_end_date,\n \"Start date must be lower than end date\"\n )", "def _update_modified_since(self, timestamp):\n new_data_sources = [\n source\n for provider in self.data_source_providers\n for source in provider.get_data_sources_modified_since(timestamp)\n ]\n filtered_data_sources = self.get_filtered_configs(new_data_sources)\n invalid_data_sources = {ds._id for ds in new_data_sources} - {ds._id for ds in filtered_data_sources}\n self._add_data_sources_to_table_adapters(filtered_data_sources, invalid_data_sources)", "def earlier_date(date1, date2):\n return (time.strptime(date1, \"%b %d %Y\") < time.strptime(date2, \"%b %d %Y\"))", "def earlier_date(date1, date2):\n return (time.strptime(date1, \"%b %d %Y\") < time.strptime(date2, \"%b %d %Y\"))", "def filter_lower_datetime(time, list_time):\n return [t for t in list_time if t <= time]", "def cmpArtworkByDate(artwork1, artwork2):\n return (lt.firstElement(artwork1)['Date'] < lt.firstElement(artwork2)['Date'])", "def get_now2(self, tag, zeit, date=None):\n if date is None:\n now = datetime.datetime.now()\n liste = self.get_next(tag, zeit, False, now)\n if liste:\n if liste[0].get(\"delta\") < datetime.timedelta(hours=0, minutes=1, seconds=0):\n return liste\n return []", "def getElemBeforeTime(self, stamp):\n older = [msg for (msg, time) in zip(self.cache_msgs, self.cache_times)\n if time <= stamp]\n if not older:\n return None\n return older[-1]", "def dateReview(soup: str, nb:int):\n dateR = []\n for span in soup.findAll('article', attrs={'itemprop': 'review'}):\n dat = str(recovTextBetweenTags(str(span.findAll('time', attrs={\n 'itemprop': 'datePublished'})), ',')).replace(\"['[\", '').replace(\"]']\", '')\n dat = (format_date(dat))\n\n if (dat) > (datetime.now() - timedelta(nb)):\n top = span.findAll('time', attrs={'itemprop': 'datePublished'})\n dateR.append(recovTextBetweenTags(str(top), ','))\n\n return dateR", "def modified(self):\n return self.properties.get(\"Modified\", datetime.min)", "def filter_by_date(sequence, _min, _max):\r\n _max, _min = [_convert_date(x) for x in (_max, _min)]\r\n return {x for x in sequence if _max >= x.date >= _min}", "def set_modified_since(self, data):\n self.add_payload('modifiedSince', data)", "def get_queryset(self):\n\t\treturn Event.objects.filter(eDate__gte= timezone.now()).order_by('-eDate')", "def remove_by_date():\n start_date = request.args.get(\"start\", default=None, type=str)\n start_date = datetime.datetime.fromisoformat(start_date)\n end_date = request.args.get(\"end\", default=None, type=str)\n end_date = datetime.datetime.fromisoformat(end_date)\n\n removed = []\n for key in rd.keys(\"*\"):\n animal = json.loads(rd.get(key))\n if (\n start_date\n <= datetime.datetime.fromisoformat(animal[\"created-on\"])\n <= end_date\n ):\n removed.append(animal)\n\n for animal in removed:\n rd.delete(animal[\"uuid\"])\n\n return jsonify(removed)", "def get_all_nda_past_date(self, date_):\n if isinstance(self._orange_book_sort_by_date_df, pd.DataFrame):\n mask = self._orange_book_sort_by_date_df[\"date\"] >= date_\n return self._orange_book_sort_by_date_df.loc[mask][\"nda\"].to_list()\n else:\n _logger.warning(\n \"Not getting Orange Book data from Mongo. There \"\n \"is no last retrieval date field in Orange Book csv!\"\n )\n return []", "def get_same_or_newer(this_date):\n data = requests.get(FILE_URL, allow_redirects=True)\n open(\"employees-with-date.csv\", \"wb\").write(data.content)\n with open(\"employees-with-date.csv\") as csv_file:\n reader = csv.DictReader(csv_file)\n \n # We want all employees that started at the same date or the closest newer\n # date. To calculate that, we go through all the data and find the\n # employees that started on the smallest date that's equal or bigger than\n # the given start date.\n this_date_employees = []\n reader = sorted(reader, key=lambda d: d['Start Date'], reverse=True)\n \n row_date = datetime.datetime.strptime(row[\"Start Date\"], '%Y-%m-%d')\n \n final_date_employees.append((row[\"Name\"], row[\"Surname\"]))\n\n return final_date_employees", "def retrieve_recently_changed_orders(self, **kwargs):\n return self.client.execute(\"order/multi-get\", \"GET\", kwargs)", "def test_since(self):\n self.create_logs(self.user1, num=50, start=self.year2000)\n self.create_logs(self.user1, num=50, start=self.year2001)\n\n response = self.client.get(telemetry_url, {\n 'since': self.year2001.isoformat(),\n })\n self.assertEqual(200, response.status_code)\n\n data = json.loads(response.content)\n\n # since is non-inclusive, so the first entry is not included\n self.assertEqual(49, len(data))\n\n for entry in data:\n time = iso8601.parse_date(entry['timestamp'])\n self.assertGreater(time, self.year2001)", "def cronWaitingList(self, date):\n match = {\"task_type\": \"crontab\", \"task_day\": date, \"status\": \"waiting\"}\n l = []\n for doc in self.search(match):\n l.append(doc)\n return l", "def __iter__(self) -> Iterator[Date]:\n return iter((self.since + TimeDelta(days=i) for i in range(0, (self.until - self.since).days + 1)))", "def log_dates(repo, start_date, days):\n # http://svnbook.red-bean.com/nightly/en/svn-book.html#svn.tour.revs.dates\n end_inclusive = start_date + datetime.timedelta(days=days)\n print('Getting data from %s for %d days' % (start_date, days))\n range_str = (\n '{%s 00:00:00 +0000}:{%s 00:00:00 +0000}' % (start_date, end_inclusive))\n data = log(repo, ['-r', range_str])\n # Strip off everything outside the range.\n start_date_time = datetime.datetime(*start_date.timetuple()[:6])\n if data:\n first = sorted(data.keys())[0]\n if data[first]['date'] < start_date_time:\n del data[first]\n return data", "def get_comments_on_date(self, from_, to):\n # Implemented from template for\n # osid.relationship.RelationshipLookupSession.get_relationships_on_date\n comment_list = []\n for comment in self.get_comments():\n if overlap(from_, to, comment.start_date, comment.end_date):\n comment_list.append(comment)\n return objects.CommentList(comment_list, runtime=self._runtime)", "def _filter_by_date(self, date: datetime.datetime) -> bool:\n if (self._date_from and date < self._date_from) or (self._date_to and date > self._date_to):\n return False\n return True", "def getNumModifiedDates(self):\n return _libsbml.ModelHistory_getNumModifiedDates(self)", "def get_state_before(course_key, date):\n previous_stat = (\n EnrollmentTabCache.objects\n .filter(course_id=course_key, created__lt=date)\n .values('unenroll', 'enroll', 'total')\n .order_by('-created')\n )\n return previous_stat.first() if previous_stat.exists() else {'unenroll': 0, 'enroll': 0, 'total': 0}", "def test_before(self):\n q = Entry.objects.before(timezone.now())\n\n self.assertEqual(q.count(), 1)\n self.assertIn(self.e1, q)", "def getOldCodeList(self):\n tmp = []\n for child in self.children:\n tmp.extend(child.getOldCodeList())\n return tmp", "def _get_changes_metadata(document):\n return ((el.get(author_attrib),\n datetime.datetime.strptime(el.get(date_attrib), date_format))\n for el in _get_comments(document))", "def get_step_changes_after(\n project: 'projects.Project',\n timestamp: float,\n write_running: bool = False\n) -> typing.List[dict]:\n return [\n _get_step_changes(project, step, write_running)\n for step in project.steps\n if step.report.last_update_time >= timestamp\n or (step.last_modified or 0) >= timestamp\n ]", "def test_listing_incidents_invalid_date_rage(self):\n resp = self.client.get(\n reverse('incidents', kwargs={'team_id': '7de98e0c-8bf9-414c-b397-05acb136935e'}), {\"since\": \"05-01-2019\", \"until\": \"01-01-2019\"}\n )\n\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(resp.json(), {'error': 'since cannot be newer than until'})", "def current_objs(self, oid):\n\n t = get_time()\n return self.query.filter(self.oid == oid, self.start <= t, t <= self.end).all()", "def sort_by_date(self, date):\n return self.articles.select().where(Article.pubDate >= date)", "def split_by_date(df):\n\n split_date = df.ix['Fahrenheit 9/11'].RelDate\n early = df[df.RelDate < split_date]\n late = df[df.RelDate > split_date]\n\n return early, late", "def currentAbove(requestContext, seriesList, n):\n return [ series for series in seriesList if safeLast(series) >= n ]", "def get_recent_history(session=None): \n from model_old_schema.reference import Reference, RefBad\n\n def f(session):\n min_date = datetime.date.today() - datetime.timedelta(days=10)\n refs = session.query(Reference).filter_by(created_by = session.user).filter(Reference.date_created >= min_date)\n refbads = session.query(RefBad).filter_by(created_by = session.user).filter(Reference.date_created >= min_date)\n \n history = {}\n today = datetime.date.today()\n for i in range(10):\n new_date = today - datetime.timedelta(days=i)\n history[new_date] = HistoryEntry(new_date)\n \n for ref in refs:\n if ref.date_created in history:\n history[ref.date_created].inc_ref_count()\n \n for refbad in refbads:\n if refbad.date_created in history:\n history[refbad.date_created].inc_refbad_count()\n \n return history\n \n return f if session is None else f(session)", "def _find_update_docs_since(since: str):\n delta = since_to_delta(since)\n earliest_dt = datetime.now(timezone.utc) - delta\n query = get_db().collection_group(\"updates\").where(\"date\", \">\", earliest_dt)\n return (doc.to_dict() for doc in query.stream())", "def get_history(history, whitelist, reference_time, min_age=0, quiet=False, sort_by='age', reverse=False):\n history = filter_images(history, whitelist)\n\n # Get the image information form history - filter by min age\n data = [(image, reference_time - timestamp) for image, timestamp in history.items()\n if reference_time - timestamp >= min_age]\n\n if sort_by:\n # Sort by required attribute - supported attributes (age, image)\n data = sorted(data, key=SORT_BY_TO_KEY[sort_by], reverse=reverse)\n\n if quiet:\n # Keep only image names\n return [image for image, age in data]\n\n return [(image, humanize.naturaltime(datetime.timedelta(seconds=age))) for image, age in data]", "def get_checkpoint_list_by_date(cls, date):\n return cls.create_checkpoint_list_by_date(date)", "def remove_old_entries(self, expires_before):\n if expires_before.tzinfo is None:\n # if expires_before is not timezone-aware, assume local time\n expires_before = expires_before.astimezone()\n\n keys_to_delete = set()\n for key, (response, _) in self.responses.items():\n if response.expiration_date is not None and response.expiration_date < expires_before:\n keys_to_delete.add(key)\n\n for key in keys_to_delete:\n self.delete(key)", "def queryset(self, request):\r\n TODAY = datetime.date.today()\r\n YEAR = int(TODAY.year)\r\n qs = super(PresentationAdmin, self).queryset(request)\r\n start_date = datetime.date(YEAR, 1, 1)\r\n return qs.filter(date_created__gte=start_date)", "def cmpBeginDate(artist1, artist2):\n return int(artist1['BeginDate']) < int(artist2['BeginDate'])", "def get_order_dates_extended(user, after_date):\n result = {}\n dates = get_order_dates(after_date)\n for order_date in dates:\n result[order_date.id] = {\n 'id': order_date.id,\n 'date': order_date.date,\n 'is_active': order_date.is_active,\n 'is_editable': order_date.is_editable,\n 'orders': [],\n 'total_price': 0\n }\n\n orders = get_orders(user, after_date)\n for order in orders:\n if order.date.id not in result:\n continue\n data = result[order.date.id]\n data['orders'].append({\n 'id': order.id,\n 'type': order.type.name\n })\n data['total_price'] += order.type.price\n return result.values()", "def get_recently_articles(cls, num):\n return cls.objects.values('title', 'view_times', 'update_time', 'author')\\\n .filter(status=0).order_by('-update_time')[:num]", "def filter_events_before_infection(events, admittime, infection_time, preceding_time,\n datetime_pattern=DATETIME_PATTERN, time_key=\"charttime\"):\n admittime_datetime = datetime.strptime(admittime, datetime_pattern)\n infection_datetime = datetime.strptime(infection_time, datetime_pattern) - timedelta(hours=preceding_time)\n new_events = []\n for event in events:\n # Pega a data do evento e o transforma em datetime\n event_datetime = datetime.strptime(event[time_key], datetime_pattern)\n # Compara se o evento aconteceu entre a data de adimissão e a data de infecção (já alterada)\n if event_datetime > admittime_datetime and event_datetime <= infection_datetime:\n new_events.append(event)\n return new_events" ]
[ "0.6760683", "0.59212226", "0.58793044", "0.58698404", "0.573927", "0.5540924", "0.5527508", "0.5492065", "0.5454459", "0.5417637", "0.5409974", "0.53736883", "0.5361394", "0.5243023", "0.52254313", "0.5220781", "0.5217296", "0.5216376", "0.51910895", "0.51544005", "0.5142516", "0.5133774", "0.5118226", "0.5090929", "0.5059159", "0.50566256", "0.5042763", "0.50417924", "0.503993", "0.5038319", "0.50272274", "0.5022974", "0.49926853", "0.49775642", "0.49697512", "0.4965181", "0.49368986", "0.49362212", "0.49323475", "0.49226293", "0.4921518", "0.4917019", "0.49149913", "0.49015504", "0.48983896", "0.48858795", "0.4884692", "0.48817524", "0.48811564", "0.48787063", "0.48768416", "0.48764655", "0.4873091", "0.48489064", "0.4843495", "0.4842259", "0.48338526", "0.4822601", "0.482183", "0.482183", "0.48186418", "0.48138034", "0.4813047", "0.48113498", "0.48020995", "0.478866", "0.4776767", "0.47702673", "0.47673926", "0.47668496", "0.4764709", "0.4762644", "0.4761384", "0.47572148", "0.47533122", "0.4752669", "0.4745974", "0.4743739", "0.473669", "0.47328734", "0.4724041", "0.47230408", "0.47174606", "0.47161356", "0.47085804", "0.47080415", "0.4705637", "0.47029927", "0.4702288", "0.47005513", "0.46989992", "0.46952516", "0.4686469", "0.46861345", "0.46816134", "0.468023", "0.4679521", "0.46784776", "0.46779597", "0.46632862" ]
0.5889012
2
Mark the entities modified before a specific date as processed
def mark_processed_entities(entity_type, max_date): try: # get a connection and cursor conn = ecommerce.db.getConnection() cursor = conn.cursor() # execute the query cursor.execute(""" UPDATE Stage0_Delta SET FlagUpdated = 0 WHERE EntityType = ? AND FlagUpdated = 1 AND LastUpdate <= TO_DATE(?, 'YYYY-MM-DD HH24:MI:SS') """, (entity_type, max_date) ) # commit changes conn.commit() except: conn.rollback() pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visitBefore(self, date):\n raise NotImplementedError()", "def is_before(self,other_date):", "def modified(self):\r\n\t\treturn self.last_modified > self.last_processed", "def modified(self):\n\t\treturn self.last_modified > self.last_processed", "def _update_modified_since(self, timestamp):\n pass", "def modified_date(self, modified_date):\n\n self._modified_date = modified_date", "def modified_date(self, modified_date):\n\n self._modified_date = modified_date", "def modified_object(obj, event):\n now = datetime.now(tz=_zone)\n obj.modification_date = now", "def set_modified_since(self, data):\n self.add_payload('modifiedSince', data)", "def is_modified_since(thing, action, date):\r\n from pylons import g\r\n\r\n prop = 'last_' + action\r\n if not hasattr(thing, prop):\r\n last_modified = make_last_modified()\r\n setattr(thing, prop, last_modified)\r\n thing._commit()\r\n else:\r\n last_modified = getattr(thing, prop)\r\n\r\n if not date or date < last_modified:\r\n return last_modified\r\n \r\n #if a date was passed in and it's equal to last modified\r\n return True", "def date_modified(self, date_modified):\n \n self._date_modified = date_modified", "def test_modification_date(self):\n form_data = {'seo_title': 'New Title',\n 'seo_title_override:int': 1,\n 'form.submitted:int': 1}\n\n md_before = self.my_doc.modification_date\n self.publish(path=self.mydoc_path+'/@@seo-context-properties',\n basic=self.basic_auth, request_method='POST',\n stdin=StringIO(urllib.urlencode(form_data)))\n md_after = self.my_doc.modification_date\n\n self.assertNotEqual(md_before, md_after)", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def date_modified(self, date_modified):\n\n self._date_modified = date_modified", "def set_modified(self, dt):\n self.modified = dt_to_iso(dt)", "def set_modified(self, dt):\n self.modified = dt_to_iso(dt)", "def created_before(self, date: datetime):\n return self.created_search(date, search_type=\"before\")", "def preProcess(self, datum):\n pass", "def before_revision(self, before_revision):\n\n self._before_revision = before_revision", "def setModifiedDate(self, *args):\n return _libsbml.ModelHistory_setModifiedDate(self, *args)", "def change_modified_date(sbml):\n history = sbml.getModel().getModelHistory()\n if history:\n history.setModifiedDate(libsbml.Date(w3c_time()))\n # remove all but final modified date\n while history.getListModifiedDates().getSize() > 1:\n history.getListModifiedDates().remove(0)", "def mark_started(self):\n self.started = datetime.now()\n self.save()", "def modified(self):\n raise NotImplementedError", "def mark_preprocessed(self, processor):\n self.__preprocessed[processor] = True", "def modified(self, modified):\n\n self._modified = modified", "def pre_modify(self):\n return 0", "def modified_at(self, modified_at):\n\n self._modified_at = modified_at", "def modified_at(self, modified_at):\n\n self._modified_at = modified_at", "def addModifiedDate(self, *args):\n return _libsbml.ModelHistory_addModifiedDate(self, *args)", "def setLastModified(when):", "def set_LastUpdatedBefore(self, value):\n super(ListOrdersInputSet, self)._set_input('LastUpdatedBefore', value)", "def original_modified(self):\n if self.modified > self.created:\n return True\n else:\n return False", "def set_CreatedBefore(self, value):\n super(ListOrdersInputSet, self)._set_input('CreatedBefore', value)", "def on_or_before(self, date):\n return (date - mod(date - self.CORRELATION - self.to_ordinal(), 260))", "def save(self, *args, **kwargs):\n self.modify_ts = datetime.now()\n super(ModelBase, self).save(*args, **kwargs)", "def pre_save(cls: any, sender: any, document: Document, **kwargs: dict) -> None:\n document.updated_at = datetime.now()", "def task_instance_pre_save_handler(instance, **_):\n if instance.state in (SUCCESSFUL, FAILED):\n instance.datetime_finished = timezone.now()", "def pre_process_before_count(self, document_lines):\n start_time = time.time()\n normalize_lines = list()\n for line in document_lines:\n line = lower_sentence(line)\n date_match_objects, replace_line = find_date_entity_by_pattern(line)\n time_match_objects, replace_line = find_url_entity_by_pattern(replace_line)\n replace_line = add_space_to_unwanted_char(replace_line)\n normalize_line = convert_entity_in_sen_to_normal(replace_line, date_match_objects + time_match_objects)\n normalize_lines.append(normalize_line)\n\n normalize_document = \"\\n\".join(normalize_lines)\n self.log.info(\"Duration pre process before count: {duration}\".format(duration=float(time.time() - start_time)))\n return normalize_document", "def visitInterpreted(self, date):\n raise NotImplementedError()", "def not_modified(self):\n self.status = 304", "def onchange_rh_date(self):\n if self._context.get('load_from_rh'):\n self.onchange_rh_job()", "def save(self, *args, **kwargs):\n self.modified_at = datetime.datetime.utcnow()\n return super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n self.modified_at = datetime.datetime.utcnow()\n return super().save(*args, **kwargs)", "def modified(self):\n return self.properties.get(\"Modified\", datetime.min)", "def add_to_timeline(new_status):\n timeline = CacheManager.get_timeline()\n index = [timeline.index(status) for status in timeline \\\n if parse(status['pubdate']) < parse(new_status['pubdate'])]\n if len(index) == 0:\n timeline.insert(0, new_status)\n else:\n timeline.insert(index[0], new_status)\n\n if len(timeline) > 1000:\n timeline = timeline[0:999]\n to_cache(CacheManager.cache_file_location, 'timeline', timeline)", "def _update_modified_since(self, timestamp):\n for data_source in self.data_source_provider.get_data_sources_modified_since(timestamp):\n pillow_logging.info(f'updating modified registry data source: {data_source.domain}: {data_source._id}')\n self._add_or_update_data_source(data_source)", "def set_due_date(node):\r\n try:\r\n student_module = StudentModule.objects.get(\r\n student_id=student.id,\r\n course_id=course.id,\r\n module_state_key=node.location\r\n )\r\n\r\n state = json.loads(student_module.state)\r\n state['extended_due'] = DATE_FIELD.to_json(due_date)\r\n student_module.state = json.dumps(state)\r\n student_module.save()\r\n except StudentModule.DoesNotExist:\r\n pass\r\n\r\n for child in node.get_children():\r\n set_due_date(child)", "def isEffective( self, date ):\n return 1", "def markChangedPublicationsFromSymplectic(modified_since):\r\n #description\r\n #date needs to be in form of yyyy-mm-dd\r\n # will then append string \"T00:00:00Z\" as we are in UTC-0 timezone in which : becomes %3A\r\n #symplectic api url and local file path\r\n url = SYMPLECTIC_API_URL + 'search-publications?modified-since-when=' + modified_since + 'T00%3A00%3A00Z'\r\n tmp_filename = SYMPLECTIC_LOCAL_XML_FOLDER + SYMPLECTIC_LOCAL_PUBSMODIFIED_FOLDER + modified_since + '.xml'\r\n #get xml document from symplectic api and store on hd\r\n (tmp_filename, http_headers,) = urllib.urlretrieve(url, tmp_filename)\r\n #parse xml file\r\n search_publications_etree = ElementTree(file=tmp_filename)\r\n #delete local file from hd\r\n #try:\r\n os.remove(tmp_filename)\r\n #except:\r\n #pass \r\n #publication lite elements are held in a subtree BUT the subtree is the root element\r\n #search_publications_subtree = search_publications_etree.find(SYMPLECTIC_NAMESPACE + 'search-publications-response')\r\n search_publications_subtree = search_publications_etree.getroot()\r\n #check if any publication elements in subtree\r\n if search_publications_subtree is None or len(search_publications_subtree) < 1:\r\n return\r\n #for each publication element in subtree\r\n for search_publication_element in search_publications_subtree.getchildren():\r\n SymplecticXMLPubs.__flagPublicationAsNeedsRefetch(search_publication_element)", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def pre_process(self):\n pass", "def _set_status_db_sample_sequenced_at(\n status_db_sample: Sample, flow_cell_sequenced_at: datetime\n) -> None:\n is_newer_date: datetime = (status_db_sample.sequenced_at is None) or (\n flow_cell_sequenced_at > status_db_sample.sequenced_at\n )\n if is_newer_date:\n status_db_sample.sequenced_at: datetime = flow_cell_sequenced_at", "def created_at_lt(self, created_at_lt):\n\n self._created_at_lt = created_at_lt", "def set_recent(self):\n for sub in Submission.objects.filter(assignment=self.assignment, student=self.student).exclude(status=self.CH_PREVIOUS):\n if sub != self:\n sub.status = self.CH_PREVIOUS\n sub.save()", "def __lt__(self, other):\n return self.date < other.date", "def _modificationStatusChanged(self, m, editor):\n raise RuntimeError('Not implemented')", "def notify_modification(self):\n self._trigger_modification(done=True)", "def test_before(self):\n q = Entry.objects.before(timezone.now())\n\n self.assertEqual(q.count(), 1)\n self.assertIn(self.e1, q)", "def save(self, *args, **kwargs):\n if self.is_published and not self.published_on:\n self.published_on = timezone.now()\n else:\n try:\n # Get the old object currently in the database\n old_object = Contribution.objects.get(pk=self.pk)\n except Contribution.DoesNotExist:\n pass\n else:\n # If the object was republished, change the datetime\n if not old_object.is_published and self.is_published:\n self.published_on = timezone.now()\n \"\"\" Always add last_modified_on date \"\"\"\n self.last_modified_on = timezone.now()\n super(Contribution, self).save(*args, **kwargs)", "def mark(self, job, status='succeeded'):\n pass", "def mark_assembly_as_modified(self):\n self._assemblyIsModified = True", "def test_modified_by_without_admin(self):\n self.subject.title = 'Subject 1(modified)'\n\n self.now += second\n self.subject.save(update_fields=['title'])\n\n self.assert_object_fields(\n self.subject,\n modified_by=None,\n modified=self.now)", "def create_posted_on_property(self):\n self.posted_on = self.added_on.date", "def hasBeenModified(self):\n return _libsbml.Date_hasBeenModified(self)", "def prepare_actor_modified_date(self, object):\n if object.actor_modified is not None:\n return object.actor_modified.date()\n else:\n return ''", "def set_valid_before(self, before=18446744073709551615):\n self.valid_before = before", "def local_created_at_lt(self, local_created_at_lt):\n\n self._local_created_at_lt = local_created_at_lt", "def calendarPageChanged(self, year, month):\n success = self.porker_thread.extendDates(datetime.date(year, month, 1))\n #if not success:\n # self.alertMessage(\"Failure!\",\"Unable to extend the thread's dates for some reason.\")\n #efficiency = self.porker_thread.getEfficiencyFor(self.getActiveDate())\n #self.porker_thread.sentDatesData = False", "def is_modified(self):\n return self._tag == 'modified'", "def touched_files(self, parent):", "def _update_modified_data_sources(self):\n new_last_imported = datetime.utcnow()\n self._update_modified_since(self.last_imported)\n self.last_imported = new_last_imported", "def on_action_time_changed(self, content):\n time = parse_iso_dt(content['time']).time()\n self.set_guarded(time=time)", "def update(self, date):\r\n self.date = date", "def visitAfter(self, date):\n raise NotImplementedError()", "def _set_date(line, dirtydate, date):\n line = re.sub(dirtydate, date, line, 2)\n return line", "def _before_execute(self, db, entity):\n pass", "def _before_execute(self, db, entity):\n pass", "def do_before(self):\r\n pass", "def isSetModifiedDate(self):\n return _libsbml.ModelHistory_isSetModifiedDate(self)", "def is_before(self, other):\n if self.year > other.year:\n return False\n if self.year == other.year:\n if self.month > other.month:\n return False\n if self.year == other.year:\n if self.month == other.month:\n if self.day >= other.day:\n return False\n return True", "def modified_flag(self, event):\n text = self.get_current()\n text.modified = 1", "def timestamp_one(self, path):\n stat = path.stat()\n sde = self.manager.source_date_epoch\n if stat.st_mtime > sde:\n cls = self.__class__.__name__\n self.log.debug(\n f\"[lite][base] <{cls}> set time to source_date_epoch {sde} on {path}\"\n )\n os.utime(path, (sde, sde))\n return\n return", "def timestamp_one(self, path):\n stat = path.stat()\n sde = self.manager.source_date_epoch\n if stat.st_mtime > sde:\n cls = self.__class__.__name__\n self.log.debug(\n f\"[lite][base] <{cls}> set time to source_date_epoch {sde} on {path}\"\n )\n os.utime(path, (sde, sde))\n return\n return", "def test_searchBefore(self):\n self.assertFalse(\n self.server.search_BEFORE(self.earlierQuery, self.seq, self.msg))\n self.assertFalse(\n self.server.search_BEFORE(self.sameDateQuery, self.seq, self.msg))\n self.assertTrue(\n self.server.search_BEFORE(self.laterQuery, self.seq, self.msg))", "def cmt_changed(self, *args):\n return _ida_hexrays.Hexrays_Hooks_cmt_changed(self, *args)", "def mark_seen(self):\r\n self.seen_at = now()\r\n return self", "def sanitise_dates(note):\n now = time.time()\n max_delta = 31536000 # a year of seconds\n\n if 'modificationDate' in note:\n if float(note['modificationDate']) - now > max_delta:\n note['modificationDate'] = now\n\n if 'creationDate' in note:\n if float(note['creationDate']) - now > max_delta:\n note['creationDate'] = now", "def test_notBeforeBefore(self):\n dbpool, qpool, clock, performerChosen = self._setupPools()\n\n @transactionally(dbpool.pool.connection)\n def check(txn):\n return qpool.enqueueWork(\n txn, DummyWorkItem, a=3, b=9,\n notBefore=datetime.datetime(2012, 12, 12, 12, 12, 0)\n )\n\n yield check\n\n clock.advance(1000)\n # Advance far beyond the given timestamp.\n self.assertEquals(performerChosen, [True])\n\n # Wait for job\n while (yield inTransaction(dbpool.pool.connection, lambda txn: JobItem.all(txn))):\n clock.advance(1)\n\n # Work item complete\n self.assertTrue(DummyWorkItem.results == {1: 12})", "def _edit(self, from_date=None, to_date=None,\n event_type=None, transparent=None, document=None, **kw):\n setdirty = 0\n old_status = self.event_status\n CPSBaseDocument.edit(self, **kw)\n new_status = self.event_status\n if new_status != old_status:\n if new_status == 'canceled':\n calendar = self.getCalendar()\n calendar.cancelEvent(self)\n if old_status == 'canceled':\n calendar = self.getCalendar()\n calendar.unCancelEvent(self)\n setdirty = 1\n\n if event_type is not None and event_type != self.event_type:\n setdirty = 1\n self.event_type = event_type\n if transparent is not None:\n self.transparent = transparent\n if from_date is not None and self.from_date != from_date:\n setdirty = 1\n self.from_date = from_date\n if to_date is not None and self.to_date != to_date:\n setdirty = 1\n self.to_date = to_date\n if document is not None and document != self.document:\n setdirty = 1\n self.document = document\n self._normalize()\n return setdirty", "def before_update(self, obj, st):\n pass", "def mark_no_changes(self):", "def check_modification_dates(record):\n tz = get_localzone()\n\n head = requests.head(record['remote_url'])\n if head.status_code != 200:\n logger.warn('Got status code: %s' % (head.status_code))\n record['modified'] = False\n return record\n record['remote_datemod'] = parse(head.headers['last-modified'])\n if os.path.isfile(record['local_file']):\n record['local_datemod'] = datetime.datetime.fromtimestamp(os.path.getmtime(record['local_file']))\n else:\n # use some date in the past\n record['local_datemod'] = datetime.datetime(year=2014, month=1, day=1)\n record['local_datemod'] = tz.normalize(tz.localize(record['local_datemod'])).astimezone(pytz.utc)\n\n logger.info(' Remote file modified: %s' % (record['remote_datemod'].isoformat()))\n logger.info(' Local file modified: %s' % (record['local_datemod'].isoformat()))\n\n if record['remote_datemod'] < record['local_datemod']:\n logger.info(' -> Local data are up-to-date.')\n record['modified'] = False\n return record\n\n logger.info(' -> Fetching updated data...')\n fetch(record['remote_url'], record['local_file'])\n record['modified'] = True\n return record" ]
[ "0.6026466", "0.60223776", "0.6005824", "0.58785576", "0.5820918", "0.5775946", "0.5775946", "0.57705164", "0.56849986", "0.5605239", "0.5544186", "0.55204594", "0.54331017", "0.54331017", "0.54331017", "0.54331017", "0.54331017", "0.54331017", "0.54331017", "0.5399244", "0.5399244", "0.5380063", "0.5378017", "0.5366152", "0.5307051", "0.53008634", "0.5261901", "0.52558434", "0.5210573", "0.5206624", "0.5199639", "0.5196136", "0.5196136", "0.51903325", "0.51821554", "0.51811075", "0.5167892", "0.51443976", "0.51134163", "0.5110457", "0.50640154", "0.50458205", "0.5040725", "0.5021446", "0.49833837", "0.49602464", "0.4955092", "0.4955092", "0.49507916", "0.49453175", "0.4919225", "0.49133426", "0.4912428", "0.48987493", "0.4898082", "0.4898082", "0.4898082", "0.4898082", "0.4898082", "0.48957822", "0.4881925", "0.4878331", "0.48733932", "0.48596534", "0.48556915", "0.48509035", "0.48438013", "0.48368752", "0.48236248", "0.48188102", "0.48129827", "0.48018026", "0.47868988", "0.47758263", "0.47754383", "0.47724786", "0.4767936", "0.47650585", "0.47550124", "0.47507155", "0.4731659", "0.47308528", "0.4727151", "0.47261426", "0.47261426", "0.47247308", "0.4722912", "0.47211635", "0.47170705", "0.47073978", "0.47073978", "0.46949592", "0.46888164", "0.4686082", "0.46852833", "0.46849662", "0.46830687", "0.46773705", "0.46761087", "0.46649984" ]
0.6365167
0
Process the Stage0_Delta table and create queue commands to process each record that has FlagUpdated = 1
def cmd_delta(arguments): cmd_delta.entities = [ # each entry is a 2-uple, stating the EntityType and # wheter if queue jobs must be generated or not ( "CONT", False ), ( "IMPR", False ), ( "_DSP", False ), ( "SUBJ", True ), ( "PROD", True ), ( "PAGE", True ) ] # iterate each entity type for e in range(len(cmd_delta.entities)): # get handy values entity_type = cmd_delta.entities[e][0] generate = cmd_delta.entities[e][1] print "Processing Entity %s" % (entity_type), logger.info("Processing Entity %s" % (entity_type)) # get the max date max_date = max_entity_date(entity_type) if max_date is not None: # get the list of modified entities (if we need to generate) entities = [ ] if generate: # get the list of entities entities = list_modified_entities(entity_type, max_date) # generate queue jobs producer = getProducer() partialJobCount = generateJobs(producer, entity_type, entities) # mark entities as processed if max_date is not None: mark_processed_entities(entity_type, max_date) print "- DONE (%d entities)" % len(entities) logger.info("Processed Entity %s OK (%d entities)" % (entity_type, len(entities))) else: print "- ERROR" logger.info("Processed Entity %s with ERROR" % (entity_type)) return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def queueUpdate(self):\n packts = 1\n self.ueLst = list(self.ues.keys())\n self.resAlloc(self.nrbUEmax)\n sym = 0\n if self.nrbUEmax == 0:\n self.sm_lim = 0\n else:\n if self.mimomd == 'MU':\n self.sm_lim = self.symMax*self.nlayers\n else:\n self.sm_lim = self.symMax\n\n while len(self.ueLst)>0 and packts>0 and sym < self.sm_lim:\n ue = self.ueLst[self.ind_u]\n self.printDebDataDM('---------------- '+ue+' ------------------<br>') # print more info in debbug mode\n if self.ues[ue].symb>0:\n if len(self.ues[ue].bearers)>0 and sym < self.sm_lim:\n if len(self.ues[ue].pendingTB)==0: # No TB to reTX\n sym = sym + self.rrcUncstSigIn(ue)\n if sym < self.sm_lim and len(self.ues[ue].bearers[0].buffer.pckts)>0:\n sym = sym + self.dataPtoTB(ue)\n else: # There are TB to reTX\n self.printPendTB()\n sym = sym + self.retransmitTB(ue)\n if self.dbMd:\n self.printQtb() # Print TB queue in debbug mode\n self.updIndUE()\n packts = self.updSumPcks()", "def process_updates():\n print \"[{x}] Processing Requests\".format(x=dates.now())\n WorkflowApi.process_requests()\n WorkflowApi.process_enhancements()", "def process_queued_msg(self):\n try:\n while not self.queue.empty():\n port, tbl = self.queue.get()\n reveived_port = self.switches[port.neighbor_switch_dpid].ports[port.neighbor_port_no]\n self.tbl.update_by_neighbor(reveived_port, port, tbl)\n self.deploy_routing_table()\n except:\n pass", "def _queue_delta_table_job(self, executor, futures_to_cb, fn, description):\n self._logger.debug('Storing delta in table containing {0} [QUEUED]...'.format(description))\n futures_to_cb[executor.submit(fn, executor)] = partial(self._process_delta_table_job_result,\n description)", "def queue (self):\n\n with self.__t.steps():\n import exception\n from lib import schema\n from lib import common\n from lib import process\n from lib import data\n from sqlalchemy import and_\n import json\n from collections import OrderedDict\n\n with schema.select(\"process_queue\", schema.table.process_queue.status==None) as select:\n for queued in select.limit(1000).all():\n blocked = False\n if len(queued.depend) > 0:\n for depend_id in queued.depend:\n depend = schema.select_one(\"series\", schema.table.series.id==depend_id)\n match_tags = json.dumps(OrderedDict(sorted(data.decode_tags(depend.tags).items())))\n if depend and schema.select_one(\"process_queue\", schema.table.process_queue.tags==match_tags):\n blocked = True\n break # queued dependencies\n if not blocked:\n queued.status = \"queued\"\n schema.save(queued)\n run.apply_async([queued.tags]) #queue process\n self.__t.ok()\n self.apply_async(queue=\"control\", countdown=30) #queue next", "def queueStatusAll():", "def stage_and_commit(self):\n self.stage_all()\n self.commit()", "def requeue_changes(cls, queue):\n for c in sorted(cls.get_changes(), key=lambda c: 1 if fnmatch.fnmatch(c, \"*mini-buildd-build*\") else 0):\n LOG.info(\"Incoming: Re-queuing: {c}\".format(c=c))\n queue.put(c)", "def mark_processed_entities(entity_type, max_date):\r\n\r\n try:\r\n \r\n # get a connection and cursor\r\n conn = ecommerce.db.getConnection()\r\n cursor = conn.cursor()\r\n \r\n # execute the query\r\n cursor.execute(\"\"\"\r\n UPDATE Stage0_Delta\r\n SET FlagUpdated = 0\r\n WHERE EntityType = ? AND\r\n FlagUpdated = 1 AND\r\n LastUpdate <= TO_DATE(?, 'YYYY-MM-DD HH24:MI:SS')\r\n \"\"\", (entity_type, max_date) )\r\n \r\n # commit changes\r\n conn.commit()\r\n except:\r\n conn.rollback()\r\n pass", "def finish_stager_tasks(self):\n\n update_files = {}\n messages = []\n while not self.finished_queue.empty():\n file = self.finished_queue.get()\n update_files[file['content_id']] = {'status': ContentStatus.AVAILABLE,\n 'pfn_size': file['pfn_size'],\n 'pfn': file['pfn']}\n msg = {'event_type': 'FILE_AVAILABLE',\n 'payload': {'scope': file['scope'],\n 'name': file['name'],\n 'startEvent': file['min_id'],\n 'lastEvent': file['max_id'],\n 'pfn': file['pfn']},\n 'created_at': date_to_str(datetime.datetime.utcnow())}\n messages.append(msg)\n\n self.logger.info('Got %s staged outputs' % len(update_files))\n update_contents_by_id(update_files)\n\n if self.send_messaging:\n for msg in messages:\n self.messaging_queue.put(msg)", "def _process_incoming_queue_messages(self):\n while self._queue.qsize():\n msg = self._queue.get()\n if msg == MAP_UPDATE:\n self._clear_measurement_progress_label()\n self._presenter.update_map(self.chosen_value.get())", "def _do(self):\n # Get all the messages in queue\n msgs = self.RPC.query.all()\n for msg in msgs:\n # Find the first msg marked as enqueued.\n\n if msg.working and \\\n (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n > self.conf.messaging_server.response_timeout:\n msg.status = message.Message.ENQUEUED\n msg.update(condition=self.working_status_condition)\n\n if not msg.enqueued:\n continue\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n\n # Change the status to WORKING (operation with a lock)\n msg.status = message.Message.WORKING\n msg.owner = socket.gethostname()\n # All update should have a condition (status == enqueued)\n _is_updated = msg.update(condition=self.enqueued_status_condition)\n\n if not _is_updated or 'FAILURE' in _is_updated:\n continue\n\n # RPC methods must not start/end with an underscore.\n if msg.method.startswith('_') or msg.method.endswith('_'):\n error_msg = _LE(\"Method {} must not start or end\"\n \"with underscores\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # The first endpoint that supports the method wins.\n method = None\n for endpoint in self.endpoints:\n if msg.method not in dir(endpoint):\n continue\n endpoint_method = getattr(endpoint, msg.method)\n if callable(endpoint_method):\n method = endpoint_method\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {} is \"\n \"handled by endpoint {}\".\n format(msg.id, msg.method,\n method.__str__.__name__))\n break\n if not method:\n error_msg = _LE(\"Message {} method {} unsupported \"\n \"in endpoints.\").format(msg.id, msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n # All methods must take a ctxt and args param.\n if inspect.getfullargspec(method).args != ['self', 'ctx', 'arg']:\n error_msg = _LE(\"Method {} must take three args: \"\n \"self, ctx, arg\").format(msg.method)\n self._log_error_and_update_msg(msg, error_msg)\n return\n\n LOG.info(_LI(\"Message {} method {} received\").format(\n msg.id, msg.method))\n if self.conf.messaging_server.debug:\n LOG.debug(\n _LI(\"Message {} method {} context: {}, args: {}\").format(\n msg.id, msg.method, msg.ctxt, msg.args))\n\n failure = None\n try:\n\n # Add the template to conductor.plan table\n # Methods return an opaque dictionary\n result = method(msg.ctxt, msg.args)\n\n # FIXME(jdandrea): Remove response/error and make it opaque.\n # That means this would just be assigned result outright.\n msg.response = result.get('response', result)\n except Exception:\n # Current sys.exc_info() content can be overridden\n # by another exception raised by a log handler during\n # LOG.exception(). So keep a copy and delete it later.\n failure = sys.exc_info()\n\n # Do not log details about the failure here. It will\n # be returned later upstream.\n LOG.exception(_LE('Exception during message handling'))\n\n try:\n if failure is None:\n msg.status = message.Message.COMPLETED\n else:\n msg.failure = \\\n rpc_common.serialize_remote_exception(failure)\n msg.status = message.Message.ERROR\n LOG.info(_LI(\"Message {} method {}, status: {}\").format(\n msg.id, msg.method, msg.status))\n if self.conf.messaging_server.debug:\n LOG.debug(\"Message {} method {}, response: {}\".format(\n msg.id, msg.method, msg.response))\n\n _is_success = 'FAILURE'\n while 'FAILURE' in _is_success and (self.current_time_seconds() - self.millisec_to_sec(msg.updated)) \\\n <= self.conf.messaging_server.response_timeout:\n _is_success = msg.update()\n LOG.info(_LI(\"updating the message status from working to {}, \"\n \"atomic update response from MUSIC {}\").format(msg.status, _is_success))\n\n except Exception:\n LOG.exception(_LE(\"Can not send reply for message {} \"\n \"method {}\").\n format(msg.id, msg.method))\n finally:\n # Remove circular object reference between the current\n # stack frame and the traceback in exc_info.\n del failure", "def process(rec, conn):\n try:\n # Changes members from distinguished name to next_id for roles\n if \"members\" in rec[\"data\"]:\n rec = translate_field_to_next(rec, \"members\")\n if \"owners\" in rec[\"data\"]:\n rec = translate_field_to_next(rec, \"owners\")\n\n add_transaction(rec)\n if \"batch\" not in rec or not rec[\"batch\"]:\n r.table(\"inbound_queue\").get(rec[\"id\"]).delete().run(conn)\n rec[\"sync_direction\"] = \"inbound\"\n r.table(\"sync_errors\").insert(rec).run(conn)\n return\n\n batch = batch_pb2.Batch()\n batch.ParseFromString(rec[\"batch\"])\n batch_list = batch_to_list(batch=batch)\n client = ClientSync()\n status = client.send_batches_get_status(batch_list=batch_list)\n while status[0][\"status\"] == \"PENDING\":\n LOGGER.info(\"Batch status is %s\", status)\n status = client.status_recheck(batch_list)\n if status[0][\"status\"] == \"COMMITTED\":\n if rec[\"data_type\"] == \"user\":\n insert_to_user_mapping(rec)\n if \"metadata\" in rec and rec[\"metadata\"]:\n data = {\n \"address\": rec[\"address\"],\n \"object_type\": rec[\"object_type\"],\n \"object_id\": rec[\"object_id\"],\n \"provider_id\": rec[\"provider_id\"],\n \"created_at\": r.now(),\n \"updated_at\": r.now(),\n **rec[\"metadata\"],\n }\n\n query = (\n r.table(\"metadata\")\n .get(rec[\"address\"])\n .replace(\n lambda doc: r.branch(\n # pylint: disable=singleton-comparison\n (doc == None), # noqa\n r.expr(data),\n doc.merge(\n {\"metadata\": rec[\"metadata\"], \"updated_at\": r.now()}\n ),\n )\n )\n )\n result = query.run(conn)\n if (not result[\"inserted\"] and not result[\"replaced\"]) or result[\n \"errors\"\n ] > 0:\n LOGGER.warning(\n \"error updating metadata record:\\n%s\\n%s\", result, query\n )\n rec[\"sync_direction\"] = \"inbound\"\n r.table(\"changelog\").insert(rec).run(conn)\n r.table(\"inbound_queue\").get(rec[\"id\"]).delete().run(conn)\n else:\n rec[\"error\"] = get_status_error(status)\n rec[\"sync_direction\"] = \"inbound\"\n r.table(\"sync_errors\").insert(rec).run(conn)\n r.table(\"inbound_queue\").get(rec[\"id\"]).delete().run(conn)\n\n except Exception as err: # pylint: disable=broad-except\n LOGGER.exception(\n \"%s exception processing inbound record:\\n%s\", type(err).__name__, rec\n )\n LOGGER.exception(err)", "def _run(self) -> None:\n while True:\n args: MigrationArgs = self._queue.get(block=True)\n with self._lock:\n if args.collection in self._chunks:\n if args.shard_key not in self._chunks[args.collection]:\n self._split_chunk(args.collection, args.shard_key)\n self._move_chunk(args)", "def step060():\n logger.logMessage('Begin: updating database')\n update_sql = 'update weather_work set tsa=$1, esDocId = $2 where time = $3;'\n pgConn = pg.connect(host=host,user=user,password=password,database=database) \n c = pgConn.cursor()\n# c.execute('drop table weather_work')\n# c.execute('create table weather_work (like weather excluding constraints)')\n# c.execute('insert into weather_work select * from weather_dupes')\n# c.execute('create index weather_work_time on weather_work(time)')\n pgConn.commit()\n c.execute('prepare updtDocid as {0}'.format(update_sql))\n numUpdates = 0\n with open(renumFile,'r') as f:\n line = f.readline().rstrip()\n while line != '':\n fields = line.split(';')\n tsa = int(fields[0])\n time = fields[1].rstrip() \n docid = fields[2].rstrip()\n try:\n dic = { 'esDocId': docid, 'tsa': tsa , 'time': time+\"+00:00\" }\n c.execute('execute updtDocid (%(tsa)s,%(esDocId)s,%(time)s)',dic)\n numUpdates += 1\n if numUpdates % 250 == 0:\n pgConn.commit()\n logger.logMessage(level='DEBUG',message=\"{0:9d} commited updates\".format(numUpdates))\n except:\n logger.logException('Exception while updating database')\n pgConn.rollback()\n raise\n line = f.readline().rstrip()\n pgConn.commit()\n logger.logMessage(\"Total updates: {0:d}\".format(numUpdates))\n c.close()\n pgConn.close()\n logger.logMessage('End : updating database')", "def run(self):\n list_count = self.queue_list.count()\n for i in range(list_count):\n if self._isRunning:\n currentItem = self.queue_list.item(0)\n self.statusChange.emit(currentItem.fName, currentItem.video, currentItem.audio)\n self.func(self.queue_list, 0)\n self.notifyProgress.emit((i+1)/list_count * 100) # current progress = completed / total jobs\n self.revertButton.emit(\"Convert\")\n # self.notifyProgress.emit(0)", "def _process_batch(self, subqueue):\n try:\n timeoutCall = None\n jo = None\n if self.max_batch_size == 1:\n #At time of writing, the regular nodes have broken JSON-RPC batch handling.\n #So when max_batch_size is set to one, we assume we need to work around this fact.\n jo = json.dumps(self.entries[subqueue[0]]._get_rpc_call_object())\n else:\n #The api.steemitstage.com node properly supports JSON-RPC batches, and so, hopefully soon, will the other nodes.\n qarr = list()\n for num in subqueue:\n qarr.append(self.entries[num]._get_rpc_call_object())\n jo = json.dumps(qarr)\n url = \"https://\" + self.nodes[self.node_index] + \"/\"\n url = str.encode(url)\n deferred = self.agent.request('POST',\n url,\n Headers({\"User-Agent\" : ['Async Steem for Python v0.6.1'],\n \"Content-Type\": [\"application/json\"]}),\n _StringProducer(jo))\n def process_one_result(reply):\n \"\"\"Process a single response from an JSON-RPC command.\"\"\"\n try:\n if \"id\" in reply:\n reply_id = reply[\"id\"]\n if reply_id in self.entries:\n match = self.entries[reply_id]\n if \"result\" in reply:\n #Call the proper result handler for the request that this response belongs to.\n match._handle_result(reply[\"result\"])\n else:\n if \"error\" in reply and \"code\" in reply[\"error\"]:\n msg = \"No message included with error\"\n if \"message\" in reply[\"error\"]:\n msg = reply[\"error\"][\"message\"]\n #Call the proper error handler for the request that this response belongs to.\n match._handle_error(reply[\"error\"][\"code\"], msg)\n else:\n self.log.error(\"Error: Invalid JSON-RPC response entry. {node!r}.\",node = self.nodes[self.node_index])\n #del self.entries[reply_id]\n else:\n self.log.error(\"Error: Invalid JSON-RPC id in entry {rid!r}. {node!r}\",rid=reply_id, node = self.nodes[self.node_index])\n else:\n self.log.error(\"Error: Invalid JSON-RPC response without id in entry: {reply!r}: {node!r}\",reply=reply, node = self.nodes[self.node_index])\n except Exception as ex:\n self.log.failure(\"Error in _process_one_result {err!r}, {node!r}\",err=str(ex), node = self.nodes[self.node_index])\n def handle_response(response):\n \"\"\"Handle response for JSON-RPC batch query invocation.\"\"\"\n try:\n #Cancel any active timeout for this HTTPS call.\n if timeoutCall.active():\n timeoutCall.cancel()\n def cbBody(bodystring):\n \"\"\"Process response body for JSON-RPC batch query invocation.\"\"\"\n try:\n results = None\n #The bosy SHOULD be JSON, it not always is.\n try:\n results = json.loads(bodystring)\n except Exception as ex:\n #If the result is NON-JSON, may want to move to the next node in the node list\n self.log.error(\"Non-JSON response from server {node!r}\", node = self.nodes[self.node_index])\n self._next_node()\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n if results != None:\n ok = False\n if isinstance(results, dict):\n #Running in legacy single JSON-RPC call mode (no batches), process the result of the single call.\n process_one_result(results)\n ok = True\n else:\n if isinstance(results, list):\n #Running in batch mode, process the batch result, one response at a time\n for reply in results:\n process_one_result(reply)\n ok = True\n else:\n #Completely unexpected result type, may want to move to the next node in the node list.\n self.log.error(\"Error: Invalid JSON-RPC response, expecting list as response on batch. {node!r}\",node = self.nodes[self.node_index])\n self._next_node()\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n if ok == True:\n #Clean up the entries dict by removing all fully processed commands that now are no longer in the queu.\n for request_id in subqueue:\n if request_id in self.entries:\n del self.entries[request_id]\n else:\n self.log.error(\"Error: No response entry for request entry in result: {rid!r}. {node!r}\",rid=request_id, node = self.nodes[self.node_index])\n except Exception as ex:\n self.log.failure(\"Error in cbBody {err!r}. {node!r}\",err=str(ex), node = self.nodes[self.node_index])\n #This HTTPS POST is now fully processed.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred2 = readBody(response)\n deferred2.addCallback(cbBody)\n return deferred2\n except Exception as ex:\n self.log.failure(\"Error in handle_response {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])\n #If something went wrong, the HTTPS POST isn't active anymore.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred.addCallback(handle_response)\n def _handle_error(error):\n \"\"\"Handle network level error for JSON-RPC request.\"\"\"\n try:\n #Abandon any active timeout triggers\n if timeoutCall.active():\n timeoutCall.cancel()\n #Unexpected error on HTTPS POST, we may want to move to the next node.\n self.log.error(\"Error on HTTPS POST : {cls!r} : {err!r}. {node!r}\",cls=error.type.__name__,err=error.getErrorMessage(),node = self.nodes[self.node_index])\n self._next_node()\n except Exception as ex:\n self.log.failure(\"Error in _handle_error {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])\n #Add the failed sub-queue back to the command queue, we shall try again soon.\n self.queue = subqueue + self.queue\n ##If something went wrong, the HTTPS POST isn't active anymore.\n self.active_call_count = self.active_call_count - 1\n #Invoke self, possibly sending new queues RPC calls to the current node\n self()\n deferred.addErrback(_handle_error)\n timeoutCall = self.reactor.callLater(self.rpc_timeout, deferred.cancel)\n #Keep track of the number of active parallel HTTPS posts.\n self.active_call_count = self.active_call_count + 1\n return deferred\n except Exception as ex:\n self.log.failure(\"Error in _process_batch {err!r}. {node!r}\",err=str(ex),node = self.nodes[self.node_index])", "def process(self):\n\t\tif self.update_check():\n\t\t\tself.ingest_all()\n\t\t\tself.update_totals()\n\t\telse:\n\t\t\tlog.info('PHE cases up to date')", "def _create_intermediate_delta_tables(self):\n with self._conn as conn:\n self._run_intermediate_table_job(conn, self._create_intermediate_delta_tables_structure)\n\n with self._conn as conn, futures.ThreadPoolExecutor(max_workers=self._nworkers) as executor:\n futures_to_cb = {}\n self._queue_intermediate_table_job(executor,\n futures_to_cb,\n self._populate_delta_blacklist,\n 'delta blacklist')\n for op in self._operators:\n self._queue_intermediate_table_job(executor,\n futures_to_cb,\n partial(self._populate_delta_notifications_list, op.id),\n 'delta notifications for {0}'.format(op.id))\n self._queue_intermediate_table_job(executor,\n futures_to_cb,\n partial(self._populate_delta_exceptions_list, op.id),\n 'delta exceptions for {0}'.format(op.id))\n self._wait_for_futures(futures_to_cb)\n\n # ANALYZE parent tables, which analyzes children as well\n with conn.cursor() as cursor:\n self._analyze_helper(cursor, self._notifications_lists_delta_tblname)\n self._analyze_helper(cursor, self._exceptions_lists_delta_tblname)", "def _store_list_deltas(self):\n # Create tables and commit immediately\n with self._conn as conn:\n self._run_intermediate_table_job(conn, self._create_missing_delta_storage_partitions)\n\n with self._conn as conn, futures.ThreadPoolExecutor(max_workers=self._nworkers) as executor:\n futures_to_cb = {}\n self._queue_delta_table_job(executor,\n futures_to_cb,\n self._store_blacklist_delta,\n 'blacklist')\n for op in self._operators:\n self._queue_delta_table_job(executor,\n futures_to_cb,\n partial(self._store_notifications_list_delta, op.id),\n 'notifications for {0}'.format(op.id))\n self._queue_delta_table_job(executor,\n futures_to_cb,\n partial(self._store_exceptions_list_delta, op.id),\n 'exceptions for {0}'.format(op.id))\n\n self._wait_for_futures(futures_to_cb)\n\n # ANALYZE parent tables, which analyzes children as well\n with conn.cursor() as cursor:\n self._analyze_helper(cursor, self._notifications_lists_tblname)\n self._analyze_helper(cursor, self._exceptions_lists_tblname)", "def handle_updates(self, update):\r\n self.__manage_pump()", "def trigger_update(self):\n update_thread = Thread(target=self.process_queued_msg)\n update_thread.setDaemon(True)\n update_thread.start()", "def _patch_update_stages(\n self,\n stage_changes_list: list[dict[str, Any]],\n changed_fields: CHANGED_FIELDS_LIST_TYPE\n ) -> bool:\n stages: list[Stage] = []\n for change_info in stage_changes_list:\n stage_was_updated = False\n # Check if valid ID is provided and fetch stage if it exists.\n if 'id' not in change_info:\n self.abort(400, msg='Missing stage ID in stage updates')\n id = change_info['id']\n stage = Stage.get_by_id(id)\n if not stage:\n self.abort(400, msg=f'Stage not found for ID {id}')\n\n # Update stage fields.\n for field, field_type in api_specs.STAGE_FIELD_DATA_TYPES:\n if field not in change_info:\n continue\n form_field_name = change_info[field]['form_field_name']\n old_value = getattr(stage, field)\n new_value = change_info[field]['value']\n self._update_field_value(stage, field, field_type, new_value)\n changed_fields.append((form_field_name, old_value, new_value))\n stage_was_updated = True\n\n # Update milestone fields.\n milestones = stage.milestones\n for field, field_type in api_specs.MILESTONESET_FIELD_DATA_TYPES:\n if field not in change_info:\n continue\n if milestones is None:\n milestones = MilestoneSet()\n form_field_name = change_info[field]['form_field_name']\n old_value = getattr(milestones, field)\n new_value = change_info[field]['value']\n self._update_field_value(milestones, field, field_type, new_value)\n changed_fields.append((form_field_name, old_value, new_value))\n stage_was_updated = True\n stage.milestones = milestones\n\n if stage_was_updated:\n stages.append(stage)\n\n # Save all of the updates made.\n # Return a boolean representing if any changes were made to any stages.\n if stages:\n ndb.put_multi(stages)\n return True\n return False", "def _flush_enqueued(self):\n\n msgs = self.RPC.query.all()\n for msg in msgs:\n if msg.enqueued:\n if 'plan_name' in list(msg.ctxt.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.ctxt['plan_name']))\n elif 'plan_name' in list(msg.args.keys()): # Python 3 Conversion -- dict object to list object\n LOG.info('Plan name: {}'.format(msg.args['plan_name']))\n msg.delete()", "def queueStatus(targets):", "def updateJobsTable(self):\n self.checkJobsDict()\n jobdict = self.DB.meta.peatsa_jobs \n M = TableModel()\n #open job log from file\n f=open('jobstates.log','r')\n jl = pickle.load(f) \n for j in jobdict: \n jobid = jobdict[j] \n try:\n M.addRecord(j,state=jl[jobid]['State'],date=jl[jobid]['Date'])\n except:\n M.addRecord(j,state='Not in DB')\n self.jobstable = TableCanvas(self.tf, model=M, height=100, editable=False)\n self.jobstable.createTableFrame() \n self.log.yview('moveto', 1)\n f.close()\n return", "def update_all_queues(batchserver_name):\n server,created = getBatchServer(batchserver_name)\n if server.queues_lastupdate and (datetime.datetime.now()-server.queues_lastupdate).total_seconds()<GlobalConfiguration.objects.get(pk=1).max_lastupdate:\n logging.debug(\"Queue info is new enough for server: %s\" % batchserver_name)\n return\n\n conn = pbs.pbs_connect(batchserver_name.encode('iso-8859-1', 'replace'))\n if conn==-1:\n logging.error(\"Cannot connect to %s - live data will be missing\" % server.name)\n return\n statqueues = pbs.pbs_statque(conn, \"\" , [], \"\")\n pbs.pbs_disconnect(conn)\n if conn==-1:\n logging.error(\"Cannot connect to %s - live data will be missing\" % server.name)\n return\n \n for sq in statqueues:\n queue,created = getQueue(sq.name, server)\n attr_dict = dict([ (x.name,x.value) for x in sq.attribs])\n update_one_queue_from_pbs_data(queue, attr_dict)\n queue.save()\n server.queues_lastupdate = datetime.datetime.now()\n server.save()", "def update_db(self):\n for tool in self.watchdb:\n if 'jobs' not in self.watchdb[tool]:\n continue\n for jname in self.watchdb[tool]['jobs']:\n job = self.watchdb[tool]['jobs'][jname]\n if 'timeout' in job:\n # Waiting on a restart or throttled,\n # leave the current state alone\n continue\n # Mark as dead pending verification of state from qstat\n job['state'] = 'DEAD'\n\n # Update the known state of all jobs from qstat data\n xml = ET.fromstring(subprocess.check_output(\n ['/usr/bin/qstat', '-u', '*', '-xml']))\n for j in xml.iter('job_list'):\n tool = j.find('JB_owner').text\n try:\n self.read_config(tool)\n except IOError:\n logger.exception('Failed to read config for %s', tool)\n continue\n\n if tool not in self.watchdb or 'jobs' not in self.watchdb[tool]:\n # Not watching any jobs for this tool\n continue\n\n jname = j.find('JB_name').text\n if jname not in self.watchdb[tool]['jobs']:\n # Not watching this job for this tool\n continue\n\n # Update the watched job's state\n job = self.watchdb[tool]['jobs'][jname]\n job['jname'] = jname\n job['state'] = j.find('state').text\n\n since_xml = j.find('JAT_start_time')\n if since_xml is None:\n since_xml = j.find('JB_submission_time')\n job['since'] = datetime.datetime.strptime(\n since_xml.text, '%Y-%m-%dT%H:%M:%S')\n\n if 'timeout' in job:\n del job['timeout']", "def _process_json(self, json_content):\n if self._ns_sqlcon.connection is None:\n LOG.error(f'failed to open connection to DB')\n return\n entries = [entry for entry in json_content]\n LOG.info('started updating DB')\n num_of_entries = len(entries)\n for x in range(num_of_entries):\n entry = entries[x]\n try:\n self._ns_sqlcon.update_plugins_table(entry['_source'])\n except AttributeError:\n LOG.exception(f'malformed entry: {entry}')\n if x % 2000 != 0:\n continue\n LOG.info(f'Updated {x} records')\n\n LOG.info(f'Updated {num_of_entries} records')\n try:\n LOG.info('Commit started')\n self._ns_sqlcon.session.commit()\n LOG.info('Commit finished')\n except sqlalchemy.exc.IntegrityError:\n LOG.exception('failed committing updates to DB')\n self._ns_sqlcon.session.rollback()\n\n LOG.info('Finished updating DB')", "def process_messages(self):\r\n for p in self._platforms.values():\r\n if p.received_messages > 0:\r\n p.queue_received_messages()\r\n for p in self._platforms.values():\r\n if p.queued_messages > 0:\r\n p.process_queued_messages()", "def __process_table_event(self, item):\n if item is not None:\n if isinstance(item, TableEvent) \\\n and item.table_event_type == TableEventType.INSERT \\\n and item.records is not None \\\n and len(item.records) > 0:\n for index, message in enumerate(item.records):\n print(message)\n elif isinstance(item, TableEvent) \\\n and (item.table_event_type in [TableEventType.DELETE, TableEventType.UPDATE]) \\\n and item.count is not None \\\n and item.count > 0:\n if item.table_event_type == TableEventType.DELETE:\n print(\"Records deleted = %s\" % item.count)\n else:\n print(\"Records updated = %s\" % item.count)", "def update_updated_data_sqlite_db(self, table_name: str):\n # go through indicators and get updated data in dataframe\n print('start downloading queries')\n df = self.__get_updated_data(table_name)\n print('api download completed')\n\n # get list of sql queries to insert to sqlite db\n print('start creating queries')\n q_list = self.__get_sql_insert_query_list(df, table_name)\n\n # insert data to sqlite\n print('start inserting data')\n AccessDB().run_insert_query(q_list)\n return 'Process Completed'", "def process_records(records):\n changes = defaultdict(int)\n cascaded_create_records = set()\n cascaded_publish_records = set()\n cascaded_unpublish_records = set()\n cascaded_undelete_records = set()\n cascaded_update_records = set()\n cascaded_delete_records = set()\n cascaded_location_changes = set()\n\n for record in records:\n if record.change != ChangeType.deleted and record.object is None:\n # Skip entries which are not deletions but have no corresponding objects.\n # Probably they are updates for objects that got deleted afterwards.\n continue\n if record.change == ChangeType.created:\n assert record.type != EntryType.category\n cascaded_create_records.add(record)\n elif record.change == ChangeType.published:\n cascaded_publish_records.add(record)\n elif record.change == ChangeType.unpublished:\n cascaded_unpublish_records.add(record)\n elif record.change == ChangeType.undeleted:\n assert record.type != EntryType.category\n cascaded_undelete_records.add(record)\n elif record.change == ChangeType.deleted:\n assert record.type != EntryType.category\n cascaded_delete_records.add(record)\n elif record.change in {ChangeType.moved, ChangeType.protection_changed}:\n cascaded_update_records.add(record)\n elif record.change == ChangeType.data_changed:\n assert record.type != EntryType.category\n changes[record.object] |= SimpleChange.updated\n # subcontributions have their parent's time information, so we need to\n # cascade contribution updates to them\n if record.type == EntryType.contribution:\n for subcontrib in record.object.subcontributions:\n changes[subcontrib] |= SimpleChange.updated\n elif record.change == ChangeType.location_changed:\n assert record.type in (EntryType.event, EntryType.contribution, EntryType.session)\n cascaded_location_changes.add(record)\n\n for obj in _process_cascaded_category_contents(cascaded_update_records):\n changes[obj] |= SimpleChange.updated\n\n for obj in _process_cascaded_category_contents(cascaded_unpublish_records):\n changes[obj] |= SimpleChange.deleted\n\n for obj in _process_cascaded_category_contents(cascaded_publish_records):\n changes[obj] |= SimpleChange.created\n\n for obj in _process_cascaded_event_contents(cascaded_delete_records):\n changes[obj] |= SimpleChange.deleted\n\n for obj in _process_cascaded_event_contents(cascaded_create_records, include_deleted=True):\n changes[obj] |= SimpleChange.created\n\n for obj in _process_cascaded_locations(cascaded_location_changes):\n changes[obj] |= SimpleChange.updated\n\n for obj in _process_cascaded_event_contents(cascaded_undelete_records, skip_all_deleted=True):\n # This may result in a create for an object which is already created - in the (somewhat rare)\n # case of a deletion being followed by a restore in the same set of records.\n # However, since we expect backends to either convert those operations to an update or skip\n # them altogether this shouldn't be a problem\n changes[obj] |= SimpleChange.created\n changes[obj] &= ~SimpleChange.deleted\n\n created_and_deleted = {obj for obj, flags in changes.items() if (flags & CREATED_DELETED) == CREATED_DELETED}\n for obj in created_and_deleted:\n # discard any change where the object was both created and deleted\n del changes[obj]\n\n return {obj: _get_final_change(flags) for obj, flags in changes.items()}", "def queue_handler(self):\n work_queue = []\n query_count = 0\n\n while query_count < self.count:\n work_queue.append(self.build_packet(self.record))\n query_count += 1\n\n self.send_queries(work_queue)", "def continuous_migration():\n from redis import StrictRedis\n redis_url = current_app.config.get('CACHE_REDIS_URL')\n r = StrictRedis.from_url(redis_url)\n\n try:\n while r.llen('legacy_records'):\n raw_record = r.lpop('legacy_records')\n if raw_record:\n # The record might be None, in case a parallel\n # continuous_migration task has already consumed the queue.\n raw_record = zlib.decompress(raw_record)\n record = marc_create_record(raw_record, keep_singletons=False)\n recid = int(record['001'][0])\n prod_record = InspireProdRecords(recid=recid)\n prod_record.marcxml = raw_record\n try:\n with db.session.begin_nested():\n errors, dummy = create_record(\n record, force=True, validation=True\n )\n logger.info(\"Successfully migrated record {}\".format(recid))\n prod_record.successful = True\n prod_record.valid = not errors\n prod_record.errors = errors\n db.session.merge(prod_record)\n except Exception as err:\n logger.error(\"Error when migrating record {}\".format(recid))\n logger.exception(err)\n prod_record.successful = False\n db.session.merge(prod_record)\n finally:\n db.session.commit()\n db.session.close()", "def run(self):\n lineage_csv_gz = self.input_files_local[0][0]\n output_db = self.output_files_local()[0]\n log.write(f\"input: {lineage_csv_gz} output: {output_db}\")\n\n with IdSeqDictForUpdate(output_db, IdSeqDictValue.VALUE_TYPE_ARRAY) as lineage_dict:\n batch_list = {}\n with gzip.open(lineage_csv_gz, \"rt\") as gzf:\n for line in gzf:\n fields = line.rstrip().split(\",\")\n taxid = fields[0]\n species, genus, family = fields[-1:-4:-1]\n batch_list[taxid] = [species, genus, family]\n if len(batch_list) >= BATCH_INSERT_SIZE:\n lineage_dict.batch_inserts(batch_list.items())\n batch_list = {}\n lineage_dict.batch_inserts(batch_list.items())", "async def migrate_tables(self) -> int:\n for table in self._migration_queue:\n current_level = await self._get_migration_level(table['name'])\n assert current_level is not None\n await self._run_migrations(table['name'], current_level)\n return len(self._migration_queue)", "def updateFCFS_queue(self, junc):\n for tl_combination in junc.tl_combinations:\n for lane in tl_combination.corresponding_lanes:\n for vehicle in traci.lane.getLastStepVehicleIDs(lane.ID):\n junc.FCFS_queue[vehicle] = tl_combination.ryg_state", "def queueLoader(input_queue, blockm_df, config, start_time):\n continue_run = True\n try: \n # create the column names that will go in each numprov file\n column_names = [t for t in (''.join(tech) \\\n for tech in powerset(config['techlist']))]\n except:\n print('ERROR - STEP 2 (MASTER): FAILED CALLING POWERSET')\n print(traceback.format_exc())\n return False, None \n \n # build the data strings that will go into the queue\n if continue_run:\n # initialize the list that holds the paths to all of the temporary area \n # tables\n append_list = []\n try:\n temp_time = time.localtime()\n for i in range(len(config['speedList'])):\n numprov_file_path = config['temp_csvs_dir_path']\\\n + 'block_numprov/block_numprov_%s_%s.csv'\\\n % (config['speedList'][i], config['fbd_vintage'])\n numprov_zero_file_path = config['temp_csvs_dir_path']\\\n +'block_numprov/block_numprov_%s_with_zero_%s.csv'\\\n % (config['speedList'][i], config['fbd_vintage'])\n temp_area_table_file_path = config['output_dir_path']\\\n +'area_table_%s.csv' % (config['speedList'][i])\n workerSpeed = config['speedList'][i]\n fbd_df = config['temp_pickles'] + 'enhanced_fbd_df.pkl'\n d_speed, u_speed = config['down_speed'][i], config['up_speed'][i]\n \n # insert the information into the queue\n temp_tuple = (numprov_file_path, numprov_zero_file_path, \n temp_area_table_file_path, workerSpeed, d_speed, \n u_speed, column_names, fbd_df, blockm_df, start_time)\n input_queue.put(temp_tuple) \n append_list.append(temp_area_table_file_path)\n\n my_message = \"\"\"\n INFO - STEP 2 (MASTER): TASK 4 of 5 - COMPLETED LOADING THE QUEUE TO MAKE BLOCK\n NUMPROV FILES,\n \"\"\"\n my_message = ' '.join(my_message.split())\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n return True, append_list\n\n except:\n my_message = \"\"\"\n ERROR - STEP 2 (MASTER): TASK 4 of 5 - FAILED LOADING THE QUEUE TO MAKE BLOCK\n NUMPROV FILES,\n \"\"\"\n my_message = ' '.join(my_message.split())\n my_message += '\\n' + traceback.format_exc()\n print(nbmf.logMessage(my_message, temp_time, time.localtime(),\n time.mktime(time.localtime())-time.mktime(start_time)))\n return False, None", "def control(batch_info):\n \n sbatch, script, cps, mem = batch_info\n qsub_name = qsub_prep(sbatch, script, cps, mem)\n\n \n batch_id = os.popen(\"sbatch \" + qsub_name).read()\n batch_id = batch_id.strip().split()[-1]\n\n \n while True:\n output = os.popen(\"squeue -u koerstz\").read()\n \n if batch_id in output:\n time.sleep(5)\n continue \n \n else:\n break\n\n # create data frame:\n df = pd.read_pickle(sbatch + \".pkl\")\n \n return df", "def _update():\n\tquery = myTaskSession.query(WorkToolkitDB.db.Task)\n\n\tIDStr = myOpt.id\n\tIDs = re.split('\\s*,\\s*', IDStr)\n\n\tif len(IDs) == 0:\n\t\tprint('ERR: no add task input')\n\t\treturn 1\n\n\t#set default finsih_status if not given\n\tif not myOpt.f:\n\t\tmyOpt.f = 1\n\n\tfor ID in IDs:\n\t\tquery.filter(WorkToolkitDB.db.Task.id == ID).update({WorkToolkitDB.db.Task.finish_status: myOpt.f})\n\n\t\tif myOpt.vt:\n\t\t\tquery.filter(WorkToolkitDB.db.Task.id == ID).update({WorkToolkitDB.db.Task.version_time: myOpt.vt})\n\n\t#commit\n\tmyTaskSession.commit()\n\n\t\"\"\"\n\t#ERR: not given itsm id for update \n\tif not myOpt.id:\n\t\tprint('Error: no itsm id given for update finish_status to 1')\n\t\treturn 1\n\t#set default finsih_status if not given\n\tif not myOpt.f:\n\t\tmyOpt.f = 1\n\n\t\n\tquery.filter(WorkToolkitDB.db.Task.id == myOpt.id).update({'finish_status': myOpt.f})\n\tmyTaskSession.commit()\n\n\t\n\tdata = query.filter(WorkToolkitDB.db.Task.id == myOpt.id).all()\n\tfor record in data:\n\t\t\t#record_arr = record.to_array()\n\t\t\tpt.add_row(record.to_array())\n\n\tprint(pt)\n\t\"\"\"\n\n\treturn 0", "def process_system(self):\n if self.already_processed or self.dont_run or not self.system_valid:\n return\n\n self.reorder_udev_rules()\n self.update_fcoe_configs()\n self.update_ifaces_configs()\n\n self.commit()", "def process_deferred_queue(self):\n\n self.process_queue(self.deferred_queue)\n\n if self.depth_counter == 0:\n self.process_queue(self.complex_deferred_queue)", "def lambda_handler(event, context):\n raw_kinesis_records = event['Records']\n\n # Deaggregate all records in one call\n records = deaggregate_records(raw_kinesis_records)\n for record in records:\n # Kinesis data in Python Lambdas is base64 encoded\n payload = base64.b64decode(record['kinesis']['data'])\n # payload is the actual ion binary record published by QLDB to the stream\n ion_record = ion.loads(payload)\n print(\"Ion reocord: \", (ion.dumps(ion_record, binary=False)))\n\n if ((\"recordType\" in ion_record) and (ion_record[\"recordType\"] == \"REVISION_DETAILS\")):\n revision_data, revision_metadata = get_data_metdata_from_revision_record(ion_record)\n print(revision_metadata[\"version\"])\n table_info = get_table_info_from_revision_record(ion_record)\n\n # Check if new wallet is being created or balance update.\n if (revision_metadata[\"version\"] == 0): # a new wallet created\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo DB insertion\n print(\"Proceed to create wallet in dynamo userwallet table\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'Balance': revision_data[\"Balance\"],\n 'last_txn_source': revision_data[\"last_txn_source\"],\n 'last_txn_ref': revision_data[\"last_txn_ref\"],\n 'last_txn_type': revision_data[\"last_txn_type\"],\n 'last_txn_amount': revision_data[\"last_txn_amount\"],\n 'last_txn_date': revision_data[\"last_txn_date\"],\n 'version' : 0\n }\n )\n else: # Balance updates\n if (table_info and table_info[\"tableName\"] == \"Wallet\" and wallet_data_has_required_fields(\n revision_data)):\n # add dynamo db logic to update the balance\n print(\"Dyanmo update balance\")\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Wallet')\n response = table.update_item(\n Key={\n 'walletid': revision_data[\"walletid\"]\n },\n UpdateExpression=\"set Balance=:a , last_txn_source=:b , last_txn_ref=:c, last_txn_type=:d ,last_txn_amount=:e ,last_txn_date=:f ,version=:g\",\n ExpressionAttributeValues={\n ':a': revision_data[\"Balance\"],\n ':b': revision_data[\"last_txn_source\"],\n ':c': revision_data[\"last_txn_ref\"],\n ':d': revision_data[\"last_txn_type\"],\n ':e': revision_data[\"last_txn_amount\"],\n ':f': revision_data[\"last_txn_date\"] ,\n ':g': revision_metadata[\"version\"],\n },\n ConditionExpression=\"version < :g\",\n ReturnValues=\"UPDATED_NEW\"\n )\n\n # update all transactions to dynamodb except for getfunds\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('Transactions')\n response = table.put_item(\n Item={\n 'walletid': revision_data[\"walletid\"],\n 'updated_balance': revision_data[\"Balance\"],\n 'txn_source': revision_data[\"last_txn_source\"],\n 'txn_ref': revision_data[\"last_txn_ref\"],\n 'txn_type': revision_data[\"last_txn_type\"],\n 'txn_amount': revision_data[\"last_txn_amount\"],\n 'txn_date': revision_data[\"last_txn_date\"],\n 'version' : revision_metadata[\"version\"]\n }\n )\n\n return {\n 'statusCode': 200\n }", "def _process_change(self):\n self._sort_records()\n self._store_writer.to_file(self.records)\n self._store_writer.to_csv_file(self.records)", "def processEvents(self):\n self.framelist = sorted(self.framelist, key=lambda event: event.timestamp, reverse=True)\n self.framequeue = sorted(self.framequeue, key=lambda event: event.timestamp, reverse=True)\n self.packetqueue = sorted(self.packetqueue, key=lambda event: event.timestamp, reverse=True)\n \n print len(self.framequeue)\n print len(self.packetqueue)\n \n while len(self.framequeue) > 0 or len(self.packetqueue) > 0:\n self.getNextEvent().processEvent(self, self.decisionAlg)", "def post_apply(self): #pragma no cover\n for e in self.obs_queue:\n\n # translate operation wire labels to the device's wire labels\n device_wires = self.map_wires(e.wires)\n\n self.measure += \"set resultArray w/= {wires[0]} <- \".format(wires=device_wires.tolist())\n self.measure += self._observable_map[e.name].format(wires=device_wires.tolist())\n self.measure += \" \"\n\n self._source_code = PROGRAM.format(wires=self.num_wires, operations=self.prog, measurements=self.measure)\n self.qs = qsharp.compile(self._source_code)", "def update_issue_tracker():\n # Only process flakes that happened at least MIN_REQUIRED_FLAKY_RUNS times in\n # the last 24 hours.\n for flake in Flake.query(Flake.count_day >= MIN_REQUIRED_FLAKY_RUNS,\n projection=[Flake.count_day]):\n logging.info('Created processing task for %s' % flake.key)\n taskqueue.add(queue_name='issue-updates',\n url='/issues/process/%s' % flake.key.urlsafe())", "def process_product_queue_line_data(self):\n shopify_product_template_obj = self.env['shopify.product.template.ept']\n comman_log_obj = self.env[\"common.log.book.ept\"]\n shopify_tmpl_id = False\n\n product_queue_dict = {}\n queue_id = self.product_data_queue_id if len(self.product_data_queue_id) == 1 else False\n if queue_id:\n if queue_id.common_log_book_id:\n log_book_id=queue_id.common_log_book_id\n else:\n log_book_id=comman_log_obj.create({'type': 'import',\n 'module':'shopify_ept',\n 'shopify_instance_id':queue_id.shopify_instance_id.id,\n 'active':True})\n commit_count = 0\n for product_queue_line in self:\n commit_count += 1\n shopify_product_template_obj.shopify_sync_products(product_queue_line,shopify_tmpl_id,\n product_queue_line.shopify_instance_id,log_book_id)\n if commit_count == 10:\n self._cr.commit()\n commit_count = 0\n queue_id.common_log_book_id = log_book_id\n # draft_or_failed_queue_line = self.filtered(lambda line: line.state in ['draft', 'failed'])\n # if draft_or_failed_queue_line:\n # queue_id.write({'state': \"partially_completed\"})\n # else:\n # queue_id.write({'state': \"completed\"})\n if queue_id.common_log_book_id and not queue_id.common_log_book_id.log_lines:\n queue_id.common_log_book_id.unlink()\n return True", "def update_all_fa_tag():\n failed_dict = {}\n mb_remaining = 100\n requests_remaining = 100\n\n fa_table_ids = pybea.get_parameter_values(UserID, 'FixedAssets', ParameterName='TableName', ResultFormat='JSON')\n tablenames = fa_table_ids['TableName'].values\n\n table_name_col = []\n series_code_col = []\n period_col = []\n data_val_col = []\n line_description_col = []\n\n for x in tablenames:\n temp = pybea.get_data(UserID, 'FixedAssets', TableName=x, Year='ALL')\n # Compute how many megabytes each request is\n size = sys.getsizeof(temp) / 1000000\n mb_remaining -= size\n requests_remaining -= 1\n\n table_name = temp['TableName']\n series_code = temp['SeriesCode']\n period = temp['TimePeriod']\n data_val = temp['DataValue']\n line_description = temp['LineDescription']\n\n table_name_col.extend(table_name)\n series_code_col.extend(series_code)\n period_col.extend(period)\n data_val_col.extend(data_val)\n line_description_col.extend(line_description)\n\n time.sleep(1)\n if mb_remaining < 5:\n time.sleep(55)\n mb_remaining = 100\n requests_remaining = 100\n if requests_remaining < 2:\n time.sleep(45)\n mb_remaining = 100\n requests_remaining = 100\n if pybea.JSON_ERROR:\n failed_dict[x] = pybea.JSON_ERROR\n time.sleep(1)\n\n aggregate_fa = pd.DataFrame()\n aggregate_fa['line_number'] = table_name_col\n aggregate_fa['line_name_short'] = line_description_col\n aggregate_fa['series_code'] = series_code_col\n aggregate_fa['year'] = period_col\n aggregate_fa['value'] = data_val_col\n\n aggregate_fa.to_csv('../FA_ALL/aggregate_fa.csv', index=False)\n aggregate_fa.to_csv('aggregate_fa.csv', index=False)\n\n\n return failed_dict", "def preprocess_job(self):\r\n while not self._coordinator.should_stop():\r\n raw_entry = self._fread_queue.get()\r\n if raw_entry is None:\r\n return\r\n preprocessed_entry_dict = self.preprocess_entry(raw_entry)\r\n if preprocessed_entry_dict is not None:\r\n feed_dict = dict([(self._tensors_to_enqueue[label], value)\r\n for label, value in preprocessed_entry_dict.items()])\r\n try:\r\n self._tensorflow_session.run(self._enqueue_op, feed_dict=feed_dict)\r\n except (tf.errors.CancelledError, RuntimeError):\r\n break\r\n logger.debug('Exiting thread %s' % threading.current_thread().name)", "def _catchup(self, generator): \n\n dbmanager = self.engine.db_binder.get_manager(self.data_binding)\n # Find out when the database was last updated.\n lastgood_ts = dbmanager.lastGoodStamp()\n\n try:\n # Now ask the console for any new records since then.\n # (Not all consoles support this feature).\n for record in generator(lastgood_ts):\n self.engine.dispatchEvent(weewx.Event(weewx.NEW_ARCHIVE_RECORD,\n record=record,\n origin='hardware'))\n except weewx.HardwareError, e:\n syslog.syslog(syslog.LOG_ERR, \"engine: Internal error detected. Catchup abandoned\")\n syslog.syslog(syslog.LOG_ERR, \"**** %s\" % e)", "def onObjectUpdateCompressed(self, packet):\n\n object_list = []\n\n # ToDo: handle these 2 variables properly\n _RegionHandle = packet['RegionData'][0]['RegionHandle']\n _TimeDilation = packet['RegionData'][0]['TimeDilation']\n\n for ObjectData_block in packet['ObjectData']:\n\n object_properties = {}\n\n object_properties['UpdateFlags'] = ObjectData_block['UpdateFlags']\n object_properties['Data'] = ObjectData_block['Data']\n _Data = object_properties['Data']\n\n pos = 0 # position in the binary string\n object_properties['FullID'] = UUID(bytes = _Data, offset = 0) # LLUUID\n pos += 16\n object_properties['LocalID'] = struct.unpack(\"<I\", _Data[pos:pos+4])[0]\n pos += 4\n object_properties['PCode'] = struct.unpack(\">B\", _Data[pos:pos+1])[0]\n pos += 1\n\n if object_properties['PCode'] != PCodeEnum.Primitive: # if it is not a prim, stop.\n logger.warning('Fix Me!! Skipping parsing of ObjectUpdateCompressed packet when it is not a prim.')\n # we ought to parse it and make sense of the data...\n continue\n\n object_properties['State'] = struct.unpack(\">B\", _Data[pos:pos+1])[0]\n pos += 1\n object_properties['CRC'] = struct.unpack(\"<I\", _Data[pos:pos+4])[0]\n pos += 4\n object_properties['Material'] = struct.unpack(\">B\", _Data[pos:pos+1])[0]\n pos += 1\n object_properties['ClickAction'] = struct.unpack(\">B\", _Data[pos:pos+1])[0]\n pos += 1\n object_properties['Scale'] = Vector3(_Data, pos)\n pos += 12\n object_properties['Position'] = Vector3(_Data, pos)\n pos += 12\n object_properties['Rotation'] = Vector3(_Data, pos)\n pos += 12\n object_properties['Flags'] = struct.unpack(\">B\", _Data[pos:pos+1])[0]\n pos += 1\n object_properties['OwnerID'] = UUID(bytes = _Data, offset = pos)\n pos += 16\n\n # Placeholder vars, to be populated via flags if present\n object_properties['AngularVelocity'] = Vector3()\n object_properties['ParentID'] = UUID()\n object_properties['Text'] = ''\n object_properties['TextColor'] = None\n object_properties['MediaURL'] = ''\n object_properties['Sound'] = UUID()\n object_properties['Gain'] = 0\n object_properties['Flags'] = 0\n object_properties['Radius'] = 0\n object_properties['NameValue'] = NameValueList(None)\n object_properties['ExtraParams'] = None\n\n if object_properties['Flags'] != 0:\n\n logger.warning(\"FixMe! Quiting parsing an ObjectUpdateCompressed packet with flags due to incomplete implemention. Storing a partial representation of an object with uuid of %s\" % (object_properties['FullID']))\n\n # the commented code is not working right, we need to figure out why!\n # ExtraParams in particular seemed troublesome\n\n '''\n print 'Flags: ', Flags\n\n if (Flags & CompressedUpdateFlags.contains_AngularVelocity) != 0:\n _AngularVelocity = Vector3(_Data, pos)\n pos += 12\n print 'AngularVelocity: ', _AngularVelocity\n else:\n _AngularVelocity = None\n\n if (Flags & CompressedUpdateFlags.contains_Parent) != 0:\n _ParentID = UUID(_Data, pos)\n pos += 16\n print 'ParentID: ', _ParentID\n else:\n _ParentID = None\n\n if (Flags & CompressedUpdateFlags.Tree) != 0:\n # skip it, only iterate the position\n pos += 1\n print 'Tree'\n\n if (Flags & CompressedUpdateFlags.ScratchPad) != 0:\n # skip it, only iterate the position\n size = struct.unpack(\">B\", _Data[pos:pos+1])[0]\n pos += 1\n pos += size\n print 'Scratchpad size'\n\n if (Flags & CompressedUpdateFlags.contains_Text) != 0:\n # skip it, only iterate the position\n _Text = ''\n while struct.unpack(\">B\", _Data[pos:pos+1])[0] != 0:\n pos += 1\n pos += 1\n _TextColor = struct.unpack(\"<I\", _Data[pos:pos+4])[0]\n pos += 4\n print '_TextColor: ', _TextColor\n\n if (Flags & CompressedUpdateFlags.MediaURL) != 0:\n # skip it, only iterate the position\n _MediaURL = ''\n while struct.unpack(\">B\", _Data[pos:pos+1])[0] != 0:\n pos += 1\n pos += 1\n print '_MediaURL: ', _MediaURL\n\n if (Flags & CompressedUpdateFlags.contains_Particles) != 0:\n # skip it, only iterate the position\n ParticleData = _Data[pos:pos+86]\n pos += 86\n print 'Particles'\n\n # parse ExtraParams\n # ToDo: finish this up, for now we are just incrementing the position and not dealing with the data\n\n _Flexible = None\n _Light = None\n _Sculpt = None\n\n num_extra_params = struct.unpack(\">b\", _Data[pos:pos+1])[0]\n print 'Number of extra params: ', num_extra_params\n pos += 1\n\n for i in range(num_extra_params):\n\n # ExtraParam type\n extraparam_type = struct.unpack(\"<H\", _Data[pos:pos+2])[0]\n pos += 2\n\n datalength = struct.unpack(\"<I\", _Data[pos:pos+4])[0]\n print 'ExtraParams type: %s length: %s' % (extraparam_type, datalength)\n pos += 4\n\n pos += int(datalength)\n\n # ToDo: Deal with extra parameters\n #logger.warning(\"Incomplete implementation in onObjectUpdateCompressed when flags are present. Skipping parsing this object...\")\n #continue\n\n if (Flags & CompressedUpdateFlags.contains_Sound) != 0:\n # skip it, only iterate the position\n #_Sound = UUID(bytes = _Data[pos:pos+16])\n pos += 16\n print 'Sound'\n\n #_Gain = struct.unpack(\">f\", _Data[pos:pos+4])[0]\n pos += 4\n\n #_Flags = stuct.unpack(\">B\", _Data[pos:pos+1])[0]\n pos += 1\n\n #_Radius = struct.unpack(\">f\", _Data[pos:pos+4])[0]\n pos += 4\n\n if (Flags & CompressedUpdateFlags.contains_NameValues) != 0:\n # skip it, only iterate the position\n _NameValue = ''\n\n while _Data[pos:pos+1] != 0:\n #_NameValue += struct.unpack(\">c\", _Data[pos:pos+1])[0]\n pos += 1\n pos += 1\n '''\n\n object_properties['PathCurve'] = None\n object_properties['PathBegin'] = None\n object_properties['PathEnd'] = None\n object_properties['PathScaleX'] = None\n object_properties['PathScaleY'] = None\n object_properties['PathShearX'] = None\n object_properties['PathShearY'] = None\n object_properties['PathTwist'] = None\n object_properties['PathTwistBegin'] = None\n object_properties['PathRadiusOffset'] = None\n object_properties['PathTaperX'] = None\n object_properties['PathTaperY'] = None\n object_properties['PathRevolutions'] = None\n object_properties['PathSkew'] = None\n object_properties['ProfileCurve'] = None\n object_properties['ProfileBegin'] = None\n object_properties['ProfileEnd'] = None\n object_properties['ProfileHollow'] = None\n object_properties['TextureEntry'] = None\n object_properties['TextureAnim'] = None\n\n else:\n properties = [\n ('PathCurve', \">B\"),\n ('PathBegin', \"<H\"),\n ('PathEnd', \"<H\"),\n ('PathScaleX', \">B\"),\n ('PathScaleY', \">B\"),\n ('PathShearX', \">B\"),\n ('PathShearY', \">B\"),\n ('PathTwist', \">B\"),\n ('PathTwistBegin', \">B\"),\n ('PathRadiusOffset', \">B\"),\n ('PathTaperX', \">B\"),\n ('PathTaperY', \">B\"),\n ('PathRevolutions', \">B\"),\n ('PathSkew', \">B\"),\n ('ProfileCurve', \">B\"),\n ('ProfileBegin', \">B\"),\n ('ProfileEnd', \">B\"),\n ('ProfileHollow', \">B\")\n ]\n\n for prop, pack in properties:\n packsize = struct.calcsize(pack)\n object_properties[prop] = struct.unpack(pack, _Data[pos:pos+packsize])[0]\n pos += packsize\n\n # Texture handling\n size = struct.unpack(\"<H\", _Data[pos:pos+2])[0]\n pos += 2\n object_properties['TextureEntry'] = _Data[pos:pos+size]\n pos += size\n\n if (object_properties['Flags'] & CompressedUpdateFlags.TextureAnim) != 0:\n object_properties['TextureAnim'] = struct.unpack(\"<H\", _Data[pos:pos+2])[0]\n pos += 2\n else:\n object_properties['TextureAnim'] = None\n\n object_list.append(object_properties)\n\n self.update_multiple_objects_properties(object_list)", "def submit_to_queue(queue_df, conn, table_name):\n queue_df.to_sql(con=conn, name=table_name, if_exists='replace', index=False)\n print 'Inserted ' + str(len(queue_df)) + ' records to the task_queue'", "def stage(branch=None, role='dev'):\n update_function = get_update_function()\n branch = branch or get_git_branch()\n\n project_path = fb_env.role(role, 'project_path')\n\n with cd(project_path):\n previous_head = update_function(branch)\n puts('Previous remote HEAD: {0}'.format(previous_head))\n run('./fbmvc dbdump')\n run('./fbmvc migrate latest')", "def monitor_queue(queue_id):\n current = dt.datetime.now()\n queue_log = {}\n for sub_id in get_submissions(queue_id=queue_id):\n submission = get_submission_bundle(queue_id, sub_id)\n if submission['status'] == 'RECEIVED':\n queue_log[sub_id] = {'status': 'PENDING'}\n continue\n run_log = submission['run_log']\n if run_log['run_id'] == 'failed':\n queue_log[sub_id] = {'status': 'FAILED'}\n continue\n run_log['wes_id'] = submission['wes_id']\n if run_log['status'] in ['COMPLETE', 'CANCELED', 'EXECUTOR_ERROR']:\n queue_log[sub_id] = run_log\n continue\n wes_instance = WESService(submission['wes_id'])\n run_status = wes_instance.get_run_status(run_log['run_id'])\n\n if run_status['state'] in ['QUEUED', 'INITIALIZING', 'RUNNING']:\n etime = convert_timedelta(\n current - ctime2datetime(run_log['start_time'])\n )\n elif 'elapsed_time' not in run_log:\n etime = 0\n else:\n etime = run_log['elapsed_time']\n\n run_log['status'] = run_status['state']\n run_log['elapsed_time'] = etime\n\n update_submission(queue_id, sub_id, 'run_log', run_log)\n\n if run_log['status'] == 'COMPLETE':\n wf_config = queue_config()[queue_id]\n sub_status = run_log['status']\n if wf_config['target_queue']:\n # store_verification(wf_config['target_queue'],\n # submission['wes_id'])\n sub_status = 'VALIDATED'\n update_submission(queue_id, sub_id, 'status', sub_status)\n\n queue_log[sub_id] = run_log\n\n return queue_log", "def _postprocess_staging_data(self):\n super()._postprocess_staging_data()\n with self._conn, self._conn.cursor() as cursor:\n cursor.execute(sql.SQL(\"\"\"UPDATE {0} SET rat_bitmask = translate_bands_to_rat_bitmask(bands)\"\"\")\n .format(self._staging_tbl_identifier))", "def upgrade():\n with op.batch_alter_table(\"dag_run\") as batch_op:\n batch_op.create_index(\n \"idx_dag_run_queued_dags\",\n [\"state\", \"dag_id\"],\n postgresql_where=text(\"state='queued'\"),\n mssql_where=text(\"state='queued'\"),\n sqlite_where=text(\"state='queued'\"),\n )", "def update_status(cls):\n for job in cls.query.filter(cls.finished == False):\n num_hits_left = session.query(BoxHit).filter_by(training_job_id = job.id, outstanding=True).count()\n urls_left = session.query(VideoTrainingURL).filter_by(training_job_id=job.id, processed = False)\n dynamo = DynamoIngestionStatusClient()\n num_urls_left = 0\n for url in urls_left:\n dynamo_url = dynamo.get(url.url)\n if dynamo_url is None or dynamo_url['status'] == 'Failed':\n # will never be processed, so ignore for our purposes\n url.processed = True\n else:\n num_urls_left += 1\n if num_hits_left+num_urls_left == 0:\n job.finished = True\n print '*** Job ID: %s is complete ***' % str(job.id)\n\n print '------------- Stats for Job ID: %s -------------' % str(job.id)\n print 'Total URLs : %i' % VideoTrainingURL.query.filter_by(training_job_id = job.id).count()\n print 'Total HITs : %i' % BoxHit.query.filter_by(training_job_id = job.id).count()\n if not job.finished:\n print 'unprocessed URLs: %i' % num_urls_left\n print 'outstanding HITs: %i\\n' % num_hits_left\n session.flush()", "def emit(self, record):\n self.eng.dispose()\n trys = 3\n info = {key: value for key, value in record.__dict__.items() if not key.startswith(\"__\")}\n for t in range(trys):\n # Having what i think is an issue to reflect so try a couple times and don't complain if breaks\n failed = False\n try:\n with self.eng.connect() as con:\n # formating values to convert from python or parsl to db standards\n info['task_fail_history'] = str(info.get('task_fail_history', None))\n info['timestamp'] = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(record.created))\n run_id = info['run_id']\n\n # if workflow or task has completed, update their entries with the time.\n if 'time_completed' in info.keys() and info['time_completed'] != 'None':\n workflows = self.meta.tables['workflows']\n up = workflows.update().values(time_completed=info['time_completed']).where(workflows.c.run_id == run_id)\n con.execute(up)\n return\n if 'task_time_returned' in info.keys() and info['task_time_returned'] is not None:\n workflow = self.meta.tables['task']\n up = workflow.update().values(task_time_returned=info['task_time_returned']).where(workflow.c.task_id == info['task_id'])\\\n .where(workflow.c.run_id == run_id)\n con.execute(up)\n\n # create workflows table if this is a new database without one\n if 'workflows' not in self.meta.tables.keys():\n workflows = create_workflows_table(self.meta)\n self.meta.create_all(con)\n # if this is the first sight of the workflow, add it to the workflows table\n if len(con.execute(self.meta.tables['workflows'].select(self.meta.tables['workflows'].c.run_id == run_id)).fetchall()) == 0:\n workflows = self.meta.tables['workflows']\n ins = workflows.insert().values(**{k: v for k, v in info.items() if k in workflows.c})\n con.execute(ins)\n\n # if log has task counts, update the workflow entry in the workflows table\n if 'tasks_completed_count' in info.keys():\n workflows = self.meta.tables['workflows']\n up = workflows.update().values(tasks_completed_count=info['tasks_completed_count']).where(workflows.c.run_id == run_id)\n con.execute(up)\n if 'tasks_failed_count' in info.keys():\n workflows = self.meta.tables['workflows']\n up = workflows.update().values(tasks_failed_count=info['tasks_failed_count']).where(workflows.c.run_id == run_id)\n con.execute(up)\n\n # create task table if this is a new run without one\n if 'task' not in self.meta.tables.keys():\n workflow = create_task_table(self.meta)\n self.meta.create_all(con)\n\n # check to make sure it is a task log and not just a workflow overview log\n if info.get('task_id', None) is not None:\n # if this is the first sight of the task in the workflow, add it to the workflow table\n if len(con.execute(self.meta.tables['task'].select(self.meta.tables['task'].c.task_id == info['task_id'])\n .where(self.meta.tables['task'].c.run_id == run_id)).fetchall()) == 0:\n if 'psutil_process_pid' in info.keys():\n # this is the race condition that a resource log is before a status log so ignore this resource update\n return\n\n workflow = self.meta.tables['task']\n ins = workflow.insert().values(**{k: v for k, v in info.items() if k in workflow.c})\n con.execute(ins)\n\n if 'task_status' in info.keys():\n # if this is the first sight of a task, create a task_status_table to hold this task's updates\n if 'task_status' not in self.meta.tables.keys():\n task_status_table = create_task_status_table(self.meta)\n self.meta.create_all(con)\n con.execute(task_status_table.insert().values(**{k: v for k, v in info.items() if k in task_status_table.c}))\n # if this status table already exists, just insert the update\n else:\n task_status_table = self.meta.tables['task_status']\n con.execute(task_status_table.insert().values(**{k: v for k, v in info.items() if k in task_status_table.c}))\n return\n\n if 'psutil_process_pid' in info.keys():\n # if this is a task resource update then handle that, if the resource table DNE then create it\n if 'task_resources' not in self.meta.tables.keys():\n task_resource_table = create_task_resource_table(self.meta)\n self.meta.create_all(con)\n con.execute(task_resource_table.insert().values(**{k: v for k, v in info.items() if k in task_resource_table.c}))\n # if this resource table already exists, just insert the update\n else:\n task_resource_table = self.meta.tables['task_resources']\n con.execute(task_resource_table.insert().values(**{k: v for k, v in info.items() if k in task_resource_table.c}))\n return\n\n except Exception as e:\n logger.error(\"Try a couple times since some known issues can occur. Number of Failures: {} Error: {}\".format(t, str(e)))\n failed = True\n time.sleep(5)\n if not failed:\n return", "async def update_model(model_updates):\n async for model_update in model_updates:\n model_location = model_update['model_location']\n print(f\"Updating model to: {model_location}\")\n\n # using incrementing version number to keep track of live model\n # but obviously doesnt work for a real distributed system \n model_table['live_version'] += 1\n model_table['model_location'] = model_location", "def process_todo_q(self):\n self.logger.debug(\"==> %s files to process\" % len(self.todo_queue))\n\n while len(self.todo_queue) > 0:\n if len(self.active_queue) == 0:\n # add job to [active] queue...\n self.active_queue.append(self.todo_queue.pop(0))\n job_id = self.active_queue[0][\"id\"]\n # ...log his 'id'...\n self.logger.info(\"[active/%s] processing file '%s'\"\n % (job_id,\n self.active_queue[0][\"objects_filename\"]))\n # ...and process it\n has_config, cfg_file = self._check_object_config()\n if has_config:\n self.logger.debug(\"[active/%s] config file '%s' is present\"\n % (job_id,\n cfg_file))\n self._set_target_symlinks()\n self._run_operations()\n else:\n self.logger.error(\"[active/%s] config file '%s' is absent\"\n % (job_id,\n cfg_file))\n self._send_alert(\"the configuration file is absent '%s'\" %\n cfg_file)\n\n # remove the job from the [active] queue\n self.active_queue = []\n else:\n raise ProfileProcessingError(\"only one job is permitted \\\n in [active] queue\")\n\n self.logger.info(\"all files has been processed\")", "def main():\r\n # Process fsevents\r\n FSEventHandler()\r\n\r\n # Commit transaction\r\n SQL_CON.commit()\r\n\r\n # Close database connection\r\n SQL_CON.close()", "def monitor_queue(self):\n\n while True:\n job = self.queue.next()\n if job:\n # print(\"found %s\" % (job.job_id))\n\n job_name = job.payload[\"job_name\"]\n\n if job_name in self.mul_func_map:\n\n t = self.mul_func_map[job_name]\n p = multiprocessing.Process(target=t, args=(job,))\n p.daemon = True\n p.start()\n\n elif job_name in self.th_func_map:\n\n t = self.th_func_map[job_name]\n # create a thread to process the job\n p = threading.Thread(target=t, args=(job,))\n p.daemon = True\n # start the thread, going into the worker function\n p.start()\n\n elif job_name in self.fk_func_map:\n t = self.fk_func_map[job_name]\n if not os.fork():\n os.setsid()\n t(job)\n exit()\n else:\n # jobs in this queue that are unknown are presently being skipped\n # however they could probably get moved to a 'dead letter' queue\n # for closer examination\n print(\"unknown job name %s, skipping\" % (job_name))\n\n # throttle so that other worker subscribers get a chance\n time.sleep(self.queue_delay)\n else:\n time.sleep(self.poll_delay)\n\n # prints the number of threads\n # print len(threading.enumerate())", "def process(self):\n\t\tif self.update_check() or self.force_update:\n\t\t\tself.district_check() #pull all local data and regions\n\t\t\tself.fix() #fix data anomalies - e.g add in Bucks.\n\t\t\tself.save_all() #store a copy of the data\n\t\t\tself.ingest() #add data to models\n\t\t\tself.update_totals() #calculate weekly data\n\t\telse:\n\t\t\tlog.info('PHE cases up to date')", "def startUpdates(self):\r\n # Analytics stream\r\n self.blptsAnalytics = blpapiwrapper.BLPTS()\r\n self.streamWatcherAnalytics = StreamWatcher(self, BloombergQuery.ANALYTICS)\r\n self.blptsAnalytics.register(self.streamWatcherAnalytics)\r\n # Price only stream\r\n self.blptsPriceOnly = blpapiwrapper.BLPTS()\r\n self.streamWatcherPriceOnly = StreamWatcher(self, BloombergQuery.PRICEONLY)\r\n self.blptsPriceOnly.register(self.streamWatcherPriceOnly)\r\n # Price change subscription\r\n self.streamWatcherBID = StreamWatcher(self,BloombergQuery.BID)\r\n self.bbgstreamBIDEM = blpapiwrapper.BLPStream(list((self.embondsisins + BBGHand + ' Corp').astype(str)), 'BID', 0)\r\n self.bbgstreamBIDEM.register(self.streamWatcherBID)\r\n self.bbgstreamBIDEM.start()\r\n # Risk free bonds: no streaming as too many updates - poll every 15 minutes\r\n rfRequest = blpapiwrapper.BLPTS(list((self.rfbondsisins + '@CBBT' + ' Corp').astype(str)), self.bbgPriceRFQuery)\r\n self.RFtimer = RFdata(900, rfRequest, self)\r\n self.BDMdata = BDMdata(900, self) #15 MINUTES\r\n self.BDMEODsave = BDMEODsave(self)", "def processInterfaceStatusUpdate(iTag, status): #@NoSelf", "def on_doctype_update():\n\tfrappe.db.add_index(\n\t\t\"Email Queue\", (\"status\", \"send_after\", \"priority\", \"creation\"), \"index_bulk_flush\"\n\t)\n\n\tfrappe.db.add_index(\"Email Queue\", [\"message_id(140)\"])", "def update_compdatabase():\n for comp_group in comp_entry:\n#\n#--- read the last set of the input data and find the last entry \n#\n past = house_keeping + comp_group + '_past'\n past = mcf.read_data_file(past)\n\n last = past[-1]\n#\n#--- find today's data entry\n#\n cmd = 'ls /data/mta_www/mp_reports/*/' + comp_group + '/data/mta*fits* >' + zspace\n os.system(cmd)\n current = mcf.read_data_file(zspace)\n\n cmd = 'mv '+ zspace + ' ' + house_keeping + comp_group + '_past'\n os.system(cmd)\n#\n#--- find the data which are not read\n#\n new_fits = []\n chk = 0\n for ent in current:\n if chk == 0:\n if ent == last:\n chk = 1\n continue\n new_fits.append(ent)\n#\n#--- uppend the data to the local fits data files\n#\n for fits in new_fits:\n [cols, tbdata] = ecf.read_fits_file(fits)\n\n time = tbdata['time']\n\n for col in cols:\n#\n#--- ignore columns with \"ST_\" (standard dev) and time\n#\n if col.lower() == 'time':\n continue\n\n mc = re.search('st_', col.lower())\n if mc is not None:\n continue\n\n mdata = tbdata[col]\n cdata = [time, mdata]\n ocols = ['time', col.lower()]\n\n ofits = out_dir + col.lower()+ '_full_data.fits'\n if os.path.isfile(ofits):\n update_fits_file(ofits, ocols, cdata)\n else:\n create_fits_file(ofits, ocols, cdata)", "def handle_wps_update(self, data):\n\n self.jobs = data", "def check_on_updates(rqueue: RQueue, bot: telegram.Bot):\n updates = rqueue.get_keys(\"updates:*\")\n if len(updates) != 0:\n for update_key in updates:\n update = rqueue.get_key(update_key)\n edit_updated(update, update_key.decode('utf-8'), bot)\n rqueue.delete_key(update_key)\n time.sleep(2)\n time.sleep(0.1)", "def process_queue(self):\n while self.input_processing_running:\n\n # Process everything in the queue.\n while self.input_queue.qsize() > 0:\n try:\n _telem = self.input_queue.get_nowait()\n self.process_telemetry(_telem)\n\n except Exception as e:\n self.log_error(\"Error processing telemetry dict - %s\" % str(e))\n\n # Sleep while waiting for some new data.\n time.sleep(0.5)", "def step070() -> None:\n logger.logMessage('Begin: elasticsearch bulk update')\n client = es.Elasticsearch(hostlist)\n\n def generate():\n with open(renumFile,'r') as f:\n line = f.readline().rstrip()\n while line != '':\n fields = line.split(';')\n oper = { '_index': fields[3], \n '_op_type': 'update',\n '_id': fields[2].rstrip(),\n '_type': 'doc',\n '_source:': {'doc': {'tsa': fields[0]}}}\n \n yield oper\n line = f.readline().rstrip()\n result = eshelp.bulk(client,generate())\n logger.logMessage('Bulk result: {0}'.format(result))\n logger.logMessage('End : elasticsearch bulk update')", "def payload_status(conn):\r\n time_old = 0\r\n global _status_old\r\n cur = conn.cursor()\r\n cur.execute(\"SELECT * FROM PLFDataLog\")\r\n rows = cur.fetchall()\r\n\r\n for row in rows:\r\n status = (row[2][23] << 24)|(row[2][22] << 16)|(row[2][21] << 8)|(row[2][20])\r\n speed = (row[2][19] << 24) | (row[2][18] << 16) | (row[2][17] << 8) | (row[2][16])\r\n filter = (row[2][15] << 24) | (row[2][14] << 16) | (row[2][13] << 8) | (row[2][12])\r\n ton = (row[2][11] << 24) | (row[2][10] << 16) | (row[2][9] << 8) | (row[2][8])\r\n time_stamp = (row[2][7] << 56) | (row[2][6] << 48) | (row[2][5] << 40) | (row[2][4]|row[2][23] << 32) | (row[2][3] << 24) | (row[2][2] << 16) | (row[2][1])<<8 |(row[2][0])\r\n if status == 0:\r\n st_status = 'STATE_UNKNOWN'\r\n elif status ==1:\r\n st_status = 'EMPTY_STOP'\r\n elif status ==2:\r\n st_status = 'EMPTY_MOVE'\r\n elif status ==3:\r\n st_status = 'LOADING'\r\n elif status ==4:\r\n st_status = 'LOADED_MOVE'\r\n elif status ==5:\r\n st_status = 'LOADED_STOP'\r\n elif status ==6:\r\n st_status = 'DUMPING'\r\n time_stamp=round((time_stamp/1000.),2)\r\n time_diff=round(((time_stamp-time_old)),2)\r\n ton = (ton/1000)\r\n filter = (filter/1000)\r\n speed = (speed/1000)\r\n slopecal(ton)#get the slope curve\r\n tonfilter(ton,speed)#get the right tonnage and status\r\n print(\"time_stamp (seg): \", time_stamp,\" time_diff: \",time_diff,\" tons: \", ton,\" ton filtered: \", filter,\" speed: \",speed ,\" status: \", st_status,\" filter ton: \", _ton,\" new status: \", _status, \" slope: \", _slope )\r\n _time.append(time_stamp)\r\n _raw_ton.append(ton)\r\n _filt_ton.append(_ton)\r\n _filt_status.append(_status)\r\n with open('/Users/jav/Desktop/komatsu.csv', 'a', newline='') as file:\r\n writer = csv.writer(file)\r\n writer.writerow([\"time_stamp (seg): \", time_stamp,\" time_diff: \",time_diff,\" tons: \", ton,\" ton filtered: \", filter,\" speed: \", speed,\" status: \", st_status, \" Filter ton: \",_ton, \" new Status: \", _status, \" slope: \", _slope])\r\n time_old = time_stamp\r\n #drawnow(makeFig)\r\n #break\r\n drawnow(makeFig)\r\n plt.pause(30)", "def update_one_queue(queue):\n conn = pbs.pbs_connect(queue.server.name.encode('iso-8859-1', 'replace'))\n if conn==-1:\n logging.error(\"Cannot connect to %s - live data will be missing\" % server.name)\n return\n statqueues = pbs.pbs_statque(conn, queue.name.encode('iso-8859-1', 'replace') , [], \"\")\n pbs.pbs_disconnect(conn)\n if len(statqueues)==0:\n logging.error(\"pbs_statque failed for queue: %s\" % queue.name)\n return\n if len(statqueues)>1:\n logging.warning(\"pbs_statque returned more than one records for queue: %s\" % queue.name)\n\n attr_dict = dict([ (x.name,x.value) for x in statqueues[0].attribs])\n update_one_queue_from_pbs_data(queue, attr_dict)\n queue.save()", "def test13(self):\n ###get a block to migrate from global dbs\n dest_datasets = set((dataset['dataset'] for dataset in self.api.listDatasets()))\n ###only dataset after last DBS2->3 because of the parentage issue in DBS 2 min_cdate=1368162000 =10May2013\n src_datasets = set((dataset['dataset'] for dataset in self.cmsweb_api.listDatasets(min_cdate=1368162000)))\n dataset_to_migrate = choice(list(src_datasets.difference(dest_datasets)))\n block_to_migrate = choice([block['block_name']\n for block in self.cmsweb_api.listBlocks(dataset=dataset_to_migrate)])\n\n ###submit migration request\n toMigrate = {'migration_url': self.source_url,\n 'migration_input': block_to_migrate}\n migration_request = self.migration_api.submitMigration(toMigrate)\n self.assertTrue('migration_request_id' in migration_request['migration_details'])\n migration_request_id = migration_request['migration_details']['migration_request_id']\n print(\"____toMigrate___\")\n print(toMigrate)\n print(\"----------migration_request -----------\")\n print(migration_request) \n\n ###check migration status for max. 300s (should be enough time to migrate the dataset)\n with Timeout(300):\n while True:\n request_status = self.migration_api.statusMigration(migration_rqst_id=migration_request_id)\n if request_status[0]['migration_status'] == 2:\n break\n\n ###validate block migration\n def check(input, output):\n non_comparable_keys = ('block_id', 'dataset_id', 'last_modification_date',\n 'parent_file_id', 'primary_ds_id')\n if isinstance(input, dict):\n for key, value in input.items():\n if key in non_comparable_keys:\n continue ###do not compare id's\n if key in ('processing_era',): ###do compare create_by, creation_date for re-used entries\n for key2remove in ('create_by', 'creation_date',):\n try:\n del input[key][key2remove]\n del output[key][key2remove]\n except KeyError:\n pass\n self.assertTrue(key in output)\n check(value, output[key])\n elif isinstance(input, list):\n for element_in, element_out in zip(sorted(remove_non_comparable_keys(input, non_comparable_keys)),\n sorted(remove_non_comparable_keys(output, non_comparable_keys))):\n check(element_in, element_out)\n else:\n self.assertEqual(str(input), str(output))\n\n block_dump_src = self.cmsweb_api.blockDump(block_name=block_to_migrate)\n block_dump_dest = self.api.blockDump(block_name=block_to_migrate)\n check(block_dump_src, block_dump_dest)\n\n ###try to delete successfully executed migration request\n toDelete = {'migration_rqst_id': migration_request_id}\n self.assertRaises(HTTPError, self.migration_api.removeMigration, toDelete)", "def _compute_queue_line_record(self):\n for product_queue in self:\n queue_lines = product_queue.product_data_queue_lines\n product_queue.queue_line_total_records = len(queue_lines)\n product_queue.queue_line_draft_records = len(\n queue_lines.filtered(lambda x:x.state == 'draft'))\n product_queue.queue_line_fail_records = len(\n queue_lines.filtered(lambda x:x.state == 'failed'))\n product_queue.queue_line_done_records = len(\n queue_lines.filtered(lambda x:x.state == 'done'))\n product_queue.queue_line_cancel_records = len(\n queue_lines.filtered(lambda x:x.state == 'cancel'))", "def execute(self):\r\n global db_runtime_context\r\n if db_runtime_context.current_db is None:\r\n print(\"!Failed to execute query because no database is selected!\")\r\n return None \r\n\r\n self.tableName = self.tableName.lower()\r\n \r\n if self.tableName is not None:\r\n\r\n update_table = db_runtime_context.current_db.getTableByName(self.tableName)\r\n\r\n if update_table is not None:\r\n pass\r\n else:\r\n print(\"!Failed to execute query on table\", self.tableName, \"because it does not exist!\")\r\n return None \r\n\r\n # Check for a lock\r\n if not db_runtime_context.current_db.isWritable(update_table.tableName):\r\n print(f\"Error: Table {update_table.tableName} is locked!\")\r\n return\r\n\r\n\r\n\r\n db_runtime_context.current_db.tables[self.tableName].update(self.targets, self.conditions)\r\n\r\n db_runtime_context.current_db.successfulTransactions += 1", "def run(self):\n\n # driver=\"H5FD_CORE\" another driver for Solid State devs?\n theFile = tables.open_file(self.hdfFileName, \"w\")\n theFile.create_group(\"/\", \"transitionLogs\")\n theLog = theFile.create_earray(where=theFile.root,\n name=\"log\",\n atom=tables.StringAtom(itemsize=120),\n shape=(0,),\n title=\"log messages\",\n filters=tables.Filters(complevel=9,\n complib='zlib'))\n speciesTables = {}\n\n try:\n # do a loop!\n while True:\n try:\n msg = self.transitionsPipe.recv()\n # msg=messagequeue.get()\n except EOFError:\n break\n cmd = msg[0]\n if cmd == \"parameters\":\n # expect two dictionaries\n parameters, runParameters = msg[1], msg[2]\n\n if type(parameters) is dict:\n if \"/parameters\" in theFile:\n parameterTable = theFile.root.parameters\n else:\n parameterTable = theFile.create_table(\n \"/\",\n \"parameters\",\n HDFLoggingProcess.parameterTableFormat)\n parameterRow = parameterTable.row\n varTypeEnum = parameterTable.coldescrs[\"varType\"].enum\n varTypeDict = {int: varTypeEnum[\"INT\"],\n str: varTypeEnum[\"STR\"],\n float: varTypeEnum[\"FLOAT\"],\n bool: varTypeEnum[\"BOOL\"]}\n runType = varTypeEnum[\"RUN\"]\n\n for k, v in parameters.items():\n varType = varTypeDict[type(v)]\n parameterRow[\"varName\"] = str(k)\n parameterRow[\"varType\"] = varType\n parameterRow[\"varValue\"] = str(v)\n parameterRow.append()\n\n for k, v in runParameters.items():\n parameterRow[\"varName\"] = str(k)\n parameterRow[\"varType\"] = runType\n parameterRow[\"varValue\"] = str(v)\n parameterRow.append()\n\n parameterTable.close()\n del parameterRow, parameterTable\n elif type(parameters) is scenario:\n print(\"writing scenarios\")\n parameters.writeToHDF(theFile.root, 'scenario')\n else:\n print(\"unsupported type: {}\".format(type(parameters)))\n\n # need a table def and a transition log\n elif cmd == \"registerTransitionType\":\n # change lists to enumerations!\n # expect list of extra columns as msg[2]\n theColumns = {}\n for name, col in msg[2].items():\n if type(col) is dict:\n # this is an enumeration type used\n # for the from/to state\n col = tables.EnumCol(tables.Enum(col),\n \"start\",\n \"uint16\")\n elif type(col) is str:\n # column of type defined by string\n col = eval(col) # ToDo: remove eval\n theColumns[name] = col\n\n # gets species name and table format as dict\n transitions = type(\"transitions\",\n (tables.IsDescription,),\n theColumns)\n speciesTables[msg[1]] = theFile.create_table(\n \"/transitionLogs\",\n msg[1],\n transitions,\n filters=tables.Filters(\n complevel=9,\n complib=\"lzo\",\n least_significant_digit=3))\n\n elif cmd == \"changeFile\":\n # close tables and file\n for t in speciesTables.values():\n t.close()\n del t\n del speciesTables\n theLog.close()\n del theLog\n theFile.close()\n del theFile\n\n # set new file name\n self.hdfFileName = msg[1]\n # open new one\n # potentially a driver=\"H5FD_CORE\" ?\n theFile = tables.open_file(self.hdfFileName, \"w\")\n theFile.create_group(\"/\", \"transitionLogs\")\n theLog = theFile.create_earray(\n where=theFile.root,\n name=\"log\",\n atom=tables.StringAtom(itemsize=120),\n shape=(0,),\n title=\"log messages\",\n filters=tables.Filters(complevel=9,\n complib='zlib'))\n speciesTables = {}\n # expecting replay of species tables\n\n elif cmd == \"logTransition\":\n # gets species name and values in order as defined by the\n # table format\n # todo: check the format!\n table = speciesTables[msg[1]]\n row = table.row\n agentId, t1, t2, fromState, toState, effort = msg[2]\n row[\"agentId\"] = agentId\n row[\"timeStamp\"] = t2\n row[\"fromState\"] = fromState\n row[\"toState\"] = toState\n row[\"dwellTime\"] = t2-t1\n row[\"effort\"] = effort\n\n if len(msg) > 2:\n # are there any extra parameters?\n for name, value in msg[3].items():\n if type(value) is str:\n row[name] = numpy.array(value.encode(),\n dtype=\"S\")\n else:\n row[name] = value\n row.append()\n del table, row\n\n # also a progress table\n elif cmd == \"progress\":\n # if not there, create new table\n if \"/progress\" not in theFile:\n theFile.create_table(\n '/',\n 'progress',\n HDFLoggingProcess.hdfProgressTable)\n # add values as they are...\n theFile.root.progress.append([msg[1]])\n\n elif cmd == \"message\":\n theLog.append(numpy.array([str(msg[1])], dtype=\"S120\"))\n\n elif cmd == \"end\":\n break\n\n else:\n print(\"unknown type {}\".format(msg[0]))\n except:\n raise\n finally:\n # messagequeue.close()\n self.transitionsPipe.close()\n del self.transitionsPipe\n # print(\"finished \", messagepipe)\n # done, be pedantic about closing all resources\n for t in speciesTables.values():\n t.close()\n del t\n del speciesTables\n theLog.close()\n del theLog\n theFile.close()\n del theFile", "def run(self):\r\n counter = 0\r\n counter_increment = 1000 # Reporting frequency\r\n\r\n last_time = 0\r\n \r\n if get_param(\"record_queue_state\"):\r\n # Add event to query queue state.\r\n query_interval = 1\r\n report_queue_state = RecordQueueState(self.servers,\r\n self.stats_manager,\r\n query_interval)\r\n self.event_queue.put((query_interval, report_queue_state))\r\n while len(self.stats_manager.completed_jobs) < self.total_jobs:\r\n assert(not self.event_queue.empty())\r\n current_time, event = self.event_queue.get()\r\n \r\n #if current_time >= 3.0 * get_param(\"total_time\") / 4.0:\r\n # set_param(\"relative_weights\", \"1,2\")\r\n #elif current_time >= 1.0 * get_param(\"total_time\") / 2.0:\r\n # set_param(\"relative_weights\", \"1,4\")\r\n\r\n assert(current_time >= last_time)\r\n last_time = current_time\r\n\r\n if current_time > counter:\r\n counter = counter + counter_increment\r\n new_events = event.run(current_time)\r\n if new_events:\r\n for new_event in new_events:\r\n self.event_queue.put(new_event)\r\n \r\n self.stats_manager.output_stats()\r\n \r\n output_params()", "def update(self):\n\n\t\tif not self.complete:\n\t\t\tfor vasp_run in self.vasp_run_list:\n\t\t\t\tvasp_run.update()", "def run(self):\n with self.get_connection() as conn:\n #self._create_new_batch_and_step_audit(conn)\n \"\"\"Kick off the transformation job.\"\"\"\n if self.run_all_steps:\n self.run_all()\n else:\n self.skipped_steps = []\n self.skipped_tables = []\n step_runner = self.step_lambda_map[self.etl_step]\n step_runner()", "def update(self, changesList: DataChangesList, doPersist: bool = False) -> bool:\n for changeLog in changesList.changeLogsList:\n if changeLog.operation == Operation.CREATE:\n item = pickle.loads(changeLog.serializedObject)\n if changeLog.objectType == ObjectType.TASKSLIST:\n self.tlr.tasksListsDict[item.uuid] = item\n elif changeLog.objectType == ObjectType.TASK:\n self.insertTask_atPos(item.tluuid, item, item.pos)\n else:\n logger.error(\"Wrong object type used for creation\")\n return False\n\n elif changeLog.operation == Operation.UPDATE:\n if changeLog.objectType == ObjectType.TASKSLIST:\n if ((changeLog.fieldName == \"title\") \n and (changeLog.tluuid in self.tlr.tasksListsDict)):\n setattr(self.tlr.tasksListsDict[changeLog.tluuid], changeLog.fieldName, changeLog.fieldChange)\n else:\n logger.error(\"Wrong field name or uuid used for update\")\n\n elif changeLog.objectType == ObjectType.TASK:\n if (((changeLog.fieldName == \"title\") or (changeLog.fieldName == \"description\")) \n and (changeLog.tluuid in self.tlr.tasksListsDict) \n and (changeLog.tuuid in self.tlr.tasksListsDict[changeLog.tluuid].tasks)):\n setattr(self.tlr.tasksListsDict[changeLog.tluuid].tasks[changeLog.tuuid], changeLog.fieldName, changeLog.fieldChange)\n elif (changeLog.fieldName == \"priority\"):\n self.moveTask_atPos(changeLog.tluuid, changeLog.tuuid, int(changeLog.fieldChange))\n else:\n logger.error(\"Wrong field name or uuid used for update\")\n else:\n logger.error(\"Wrong object type used for creation\")\n return False\n\n elif changeLog.operation == Operation.PATCH:\n if changeLog.objectType == ObjectType.TASKSLIST:\n if ((changeLog.fieldName == \"title\") \n and (changeLog.tluuid in self.tlr.tasksListsDict)):\n current = getattr(self.tlr.tasksListsDict[changeLog.tluuid], changeLog.fieldName)\n setattr(\n self.tlr.tasksListsDict[changeLog.tluuid], \n changeLog.fieldName, \n current + changeLog.fieldChange\n )\n else:\n logger.error(\"Wrong field name or uuid used for update\")\n\n elif changeLog.objectType == ObjectType.TASK:\n if (((changeLog.fieldName == \"title\") or (changeLog.fieldName == \"description\")) \n and (changeLog.tluuid in self.tlr.tasksListsDict) \n and (changeLog.tuuid in self.tlr.tasksListsDict[changeLog.tluuid].tasks)):\n current = getattr(self.tlr.tasksListsDict[changeLog.tluuid].tasks[changeLog.tuuid], changeLog.fieldName)\n setattr(\n self.tlr.tasksListsDict[changeLog.tluuid].tasks[changeLog.tuuid], \n changeLog.fieldName, \n current + changeLog.fieldChange)\n else:\n logger.error(\"Wrong field name or uuid used for update\")\n else:\n logger.error(\"Wrong object type used for creation\")\n return False\n\n elif changeLog.operation == Operation.DELETE:\n if changeLog.objectType == ObjectType.TASKSLIST:\n if (changeLog.tluuid in self.tlr.tasksListsDict):\n del self.tlr.tasksListsDict[changeLog.tluuid]\n else:\n logger.error(\"Wrong uuid used for deletion\")\n\n elif changeLog.objectType == ObjectType.TASK:\n if ((changeLog.tluuid in self.tlr.tasksListsDict) \n and (changeLog.tuuid in self.tlr.tasksListsDict[changeLog.tluuid].tasks)):\n del self.tlr.tasksListsDict[changeLog.tluuid].tasks[changeLog.tuuid]\n else:\n logger.error(\"Wrong uuid used for deletion\")\n else:\n logger.error(\"Wrong object type used for creation\")\n return False\n print(\"delete\")\n\n else:\n return False\n \n # Persist cache if required\n if doPersist:\n self.tlrFile.persist(self.tlr)\n\n return True", "def run():\n\n # establish connection\n with sqlite3.connect(DB_PATH) as conn:\n db = conn.cursor()\n\n # run reset queries\n db.execute(\"\"\"\n WITH toReset AS (\n SELECT DISTINCT table_id\n FROM cea\n WHERE mapped IS NULL\n )\n\n UPDATE tables\n SET returned=0\n WHERE table_id IN toReset\n \"\"\")", "def update_status_pipeline(recording_key_dict, status, update_field=None, update_value=None):\n\n print('recording_key_dict', recording_key_dict)\n print('status', status)\n print('update_field', update_field)\n print('update_value', update_value)\n\n if update_field is not None:\n update_task_id_dict = recording_key_dict.copy()\n update_task_id_dict[update_field] = update_value\n print('update_task_id_dict', update_task_id_dict)\n recording.Recording.update1(update_task_id_dict)\n \n update_status_dict = recording_key_dict.copy()\n update_status_dict['status_recording_id'] = status\n print('update_status_dict', update_status_dict)\n recording.Recording.update1(update_status_dict)", "def sync(self):\n self._start_slow_sync()\n self._ask_for_all_records()\n self._process_events()\n self._process_reminders()\n self._process_recurrences()\n #self._write_events()", "def deep_processing_rerun_all(self):\r\n sql = \"\"\"SELECT * FROM emails \r\n WHERE email_status = 'processing' \r\n AND clean_type = 1\"\"\"\r\n df = self.db.read_sql(sql)\r\n\r\n for i in range(df.index.size):\r\n rec = df.loc[i, :]\r\n self.deep_clean_one(rec[EMAIL], dealno=rec['dealno'])\r\n self.db.con.commit()\r\n print('Reprocessed {} records that were stuck in the processing status'.format(df.index.size))", "def run(self):\n assert self.queue is not None, \"Must specify queue or override run()\"\n\n while not self.terminated():\n qs = self.queue.objects.filter(status=self.queue.UNSUBMITTED,).order_by(\n \"-seq\"\n )[: django.conf.settings.DAEMONS_MAX_BATCH_SIZE]\n if not qs:\n self.sleep(django.conf.settings.DAEMONS_IDLE_SLEEP)\n continue\n\n for task_model in qs:\n try:\n self.do_task(task_model)\n task_model.status = self.queue.SUCCESS\n except AsyncProcessingIgnored:\n task_model.status = self.queue.IGNORED\n except Exception as e:\n if isinstance(e, AsyncProcessingRemoteError):\n # This is a bit messy. Do not log a trace when the\n # error is due to the remote service rejecting the request.\n # Such an error is still permanent for the task though.\n self.log.error(e)\n else:\n self.log.error('#' * 100)\n self.log.exception(f'Exception when handling task \"{task_model}\"')\n\n task_model.error = str(e)\n # if self.is_permanent_error(e):\n task_model.status = self.queue.FAILURE\n task_model.errorIsPermanent = True\n # raise\n else:\n task_model.submitTime = self.now_int()\n\n task_model.save()\n\n self.sleep(django.conf.settings.DAEMONS_BATCH_SLEEP)\n self.log.info(\"Exiting run loop.\")", "def update(self):\n\n if not self.db: self.validate()\n\n self.logging.debug( \"update(%s)\" % (self.db) )\n\n for name in self.tables:\n self.dbs_tables[name]['md5'] = get_md5( self.dbs_tables[name]['path'] )\n\n self._get_magnitudes()\n self._get_events()", "def update_table(table_name):\n for filename in table_name_to_funcs[table_name][\"filename\"]:\n choose_file_to_get(table_name_to_funcs[table_name][\"file_type\"], filename)\n\n for process_func in table_name_to_funcs[table_name][\"process\"]:\n process_func()\n for to_sql_func in table_name_to_funcs[table_name][\"to_sql\"]:\n to_sql_func(update=True)", "def auto_import_product_queue_line_data(self):\n # change by bhavesh jadav 03/12/2019 for process only one queue data at a time\n query = \"\"\"select product_data_queue_id from shopify_product_data_queue_line_ept where state='draft' ORDER BY create_date ASC limit 1\"\"\"\n self._cr.execute(query)\n product_data_queue_id = self._cr.fetchone()\n product_data_queue_line_ids = self.env['shopify.product.data.queue.ept'].browse(product_data_queue_id).product_data_queue_lines\n product_data_queue_line_ids.process_product_queue_line_data()", "def OnUpdate(self, event):\n # Check remote - TODO\n # Query database for status of processing\n # 2018-04-11 13:25:56.914000\n self.controller.checkRemote()\n seriesprocesses = self.controller.db.getActiveProcesses()\n self.m_dataViewListCtrlCloud.DeleteAllItems()\n for series in seriesprocesses:\n # time delta\n t1 = datetime.datetime.strptime(series[4], '%Y-%m-%d %H:%M:%S.%f')\n if series[5] is not None:\n t2 = datetime.datetime.strptime(series[5], '%Y-%m-%d %H:%M:%S.%f')\n else:\n t2 = datetime.datetime.now()\n tdiff = t2 - t1\n # Load to window\n self.m_dataViewListCtrlCloud.AppendItem(\n [False, series[0], series[1], series[2].upper(), self.getStatus(series[3]), str(tdiff)])", "def _update_time_delivered(self, time_delivered):\n # Update db record's time_delivered field\n update = {'time_delivered': time_delivered}\n datebase.update_transaction_record(filter=self.filter, update=update)\n \n # Update db record's estimated_time field\n datebase.update_transaction_record(filter=self.filter, {estimated_time:'0'})\n \n # Update db record's transaction status to delivered\n self._update_transaction_status(transaction_status='delivered')\n \t\t self.transaction_info.update(delivery_status='delivered')\n \n # Update object\n \t\tself.transaction_info.update(time_delivered=time_delivered)\n self.transaction_info.update(estimated_time=0)\n self.transaction_info(transaction_status='delivered')\n\n \tdef _update_transaction_status(self, transaction_status, photo=None):\n \"\"\"\n Update record's transaction_status and send sms msg to update seeker\n \"\"\"\n # Send text message when status changes \n self.send_text(message_type=transaction_status)\n\n # Update db record's transaction status\n update = {'transaction_status': transaction_status}\n datebase.update_transaction_record(filter=self.filter, update=update)\n\n # Update object\n self.transaction_info.update('transaction_seeker': transaction_status)\n\n # If delivered ... TODO: do we actually want to remove from db? \n \t\t# if transaction_status == 'delivered':\n # datebase.delete_transaction_record()\n # return 1 \n # arguments against: we wont be able to access delivered photo if we want to do that", "def new_changes(self):\n with ChangesStream(self.couch_db, feed='continuous', heartbeat=True, since=self.since,\n filter=self.couch_filter, **self.extra_args) as st:\n for c in st:\n self.processor(c)", "def _processDeviceUpdated(self, action: UpdateAppliedAction) -> List[Tuple]:\n session = self._dbSessionCreator()\n try:\n deviceInfo = (\n session.query(DeviceInfoTuple)\n .filter(DeviceInfoTuple.deviceId == action.deviceId)\n .one()\n )\n\n deviceId = deviceInfo.deviceId\n\n if action.appVersion is not None:\n deviceInfo.appVersion = action.appVersion\n\n if action.updateVersion is not None:\n deviceInfo.updateVersion = action.updateVersion\n\n session.commit()\n\n self._notifierController.notifyDeviceInfo(deviceId=deviceId)\n\n return []\n\n finally:\n session.close()", "def update_records(self, something):\n print(\"Some logic (not shown) to update database of units\")", "def acUpdate(deltaT):#-------------------------------- AC UPDATE\n pass # -> Delete this line if you do something here !", "def window_updated(self, stream_id, delta):\n if stream_id and stream_id in self.flow_control_futures:\n f = self.flow_control_futures.pop(stream_id)\n f.set_result(delta)\n elif not stream_id:\n for f in self.flow_control_futures.values():\n f.set_result(delta)\n\n self.flow_control_futures = {}", "def process(self, exc, update):\n assert exc\n assert update\n for proc in self._processors:\n continue_ = True\n if proc.responsible_for(update):\n continue_ = proc.process(exc, update)\n if not continue_:\n break", "def process_queue(\n *,\n stac_bucket: str,\n cog_pds_meta_pds: Dict[str, str],\n queue: str,\n message_batch_size: int,\n sns_reconcile_target_arn: str,\n catalog_update_queue: str,\n catalog_update_table: str,\n corrupted_xml_queue: str,\n delete_processed_messages: bool = False,\n):\n\n buckets = {\n \"stac\": stac_bucket,\n }\n processed_messages = 0\n for msg in sqs_messages(queue):\n\n try:\n process_message(\n msg,\n {\n **buckets,\n **{\n \"cog\": msg[\"bucket\"],\n \"metadata\": cog_pds_meta_pds[msg[\"bucket\"]],\n },\n },\n sns_reconcile_target_arn,\n catalog_update_queue,\n catalog_update_table,\n )\n except ParseError:\n LOGGER.info(\"Corrupted XML for %s quicklook.\", msg[\"key\"].split(\"/\")[-1])\n get_client(\"sqs\").send_message(\n QueueUrl=corrupted_xml_queue, MessageBody=msg[\"key\"]\n )\n\n # Remove message from queue\n if delete_processed_messages:\n get_client(\"sqs\").delete_message(\n QueueUrl=queue, ReceiptHandle=msg[\"ReceiptHandle\"]\n )\n\n processed_messages += 1\n if processed_messages == message_batch_size:\n break" ]
[ "0.57603353", "0.5615798", "0.5543773", "0.5470545", "0.5338651", "0.52531934", "0.5228426", "0.5206048", "0.5201316", "0.51806426", "0.51614136", "0.51220685", "0.50776464", "0.50263834", "0.4995501", "0.49954292", "0.4989829", "0.49809688", "0.4969443", "0.4967038", "0.49626768", "0.49619496", "0.4940859", "0.49380878", "0.49333394", "0.49143663", "0.49105018", "0.49027413", "0.4887578", "0.4886638", "0.4884164", "0.48653978", "0.48637447", "0.4853914", "0.4830119", "0.48296088", "0.4822889", "0.48082843", "0.48060322", "0.4801448", "0.47945586", "0.47929236", "0.47925222", "0.4774729", "0.4773355", "0.47711116", "0.47657776", "0.47562507", "0.47330934", "0.4729215", "0.47254455", "0.4721342", "0.4720016", "0.47175303", "0.4711226", "0.4710211", "0.4684595", "0.46840942", "0.46800363", "0.4678797", "0.46772504", "0.46737477", "0.46734968", "0.4669384", "0.46665964", "0.46640164", "0.46601188", "0.46593472", "0.4657367", "0.4651509", "0.46507633", "0.46471772", "0.46457437", "0.46448803", "0.46429405", "0.4642444", "0.46420565", "0.46400058", "0.46365938", "0.46336794", "0.46258977", "0.46253666", "0.46160173", "0.46087736", "0.46043083", "0.46008742", "0.45933887", "0.45831716", "0.45820972", "0.45799318", "0.4574661", "0.4574189", "0.45730668", "0.45693085", "0.45614952", "0.4560091", "0.45594448", "0.45550475", "0.4553295", "0.4551936" ]
0.5197049
9
List the job queue
def cmd_list(arguments): # get a producer producer = getProducer() # get the list of pending jobs jobs = producer.list() # print que list size and the entries print "Number of jobs: %d" % len(jobs) for j in range(len(jobs)): print " job %06d - %s" % (j, jobs[j]) return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ls(self):\n server = jenkins_server.get_jenkins_server()\n queue = server.get_queue_info()\n print('任务ID\\t%s\\t原因' % '任务链接'.ljust(50))\n for q in queue:\n print('%d\\t%s\\t%s' % (q['id'], q['task']['url'].ljust(50), q['why']))", "def list_jobs():\n\n name_to_job_details = redis_controller.get_name_to_job_details()\n return list(name_to_job_details.values())", "def get_queue_list(self):\n return self.manager.get_queue_list()", "def list(self):\n return self.rpc.call(MsfRpcMethod.JobList)", "def list(self):\n self.background_scheduler.print_jobs()", "def queue_list(self, req):\n project_id = req._headers.get('X-Project-ID')\n\n LOG.debug(u'Queue list - project: %(project)s',\n {'project': project_id})\n\n kwargs = {}\n\n if req._body.get('marker') is not None:\n kwargs['marker'] = req._body.get('marker')\n\n if req._body.get('limit') is not None:\n kwargs['limit'] = req._body.get('limit')\n\n if req._body.get('detailed') is not None:\n kwargs['detailed'] = req._body.get('detailed')\n\n try:\n self._validate.queue_listing(**kwargs)\n results = self._queue_controller.list(\n project=project_id, **kwargs)\n except validation.ValidationFailed as ex:\n LOG.debug(ex)\n headers = {'status': 400}\n return api_utils.error_response(req, ex, headers)\n except storage_errors.ExceptionBase as ex:\n LOG.exception(ex)\n error = 'Queues could not be listed.'\n headers = {'status': 503}\n return api_utils.error_response(req, ex, error, headers)\n\n # Buffer list of queues\n queues = list(next(results))\n\n # Got some. Prepare the response.\n body = {'queues': queues}\n headers = {'status': 200}\n\n resp = response.Response(req, body, headers)\n\n return resp", "def list(self, jobguid=\"\", executionparams=None):", "def list_jobs(arn=None, nextToken=None):\n pass", "def GetQueueList(handler, query):\n json_config = {}\n if 'TiVo' in query:\n tivoIP = query['TiVo'][0]\n with active_tivos_lock:\n if tivoIP in active_tivos:\n with active_tivos[tivoIP]['lock']:\n json_config['urls'] = [ status['url'] for status in active_tivos[tivoIP]['queue'] ]\n\n handler.send_json(json.dumps(json_config))", "async def get_jobs(): \n return mngr.getAllJobs()", "def get_job_list(self):\n return self.job_list", "def get_job_list(self):\n return self.job_list", "def list(self, jobguid=\"\", executionparams=dict()):", "def get_jobs():\n \n rate_limit()\n command = [\"bjobs\", \"-o\", \"\\\"JOBID\", \"USER\", \"STAT\", \"QUEUE\", \"JOB_NAME\", \\\n \"delimiter=';'\\\"\"]\n command = \" \".join(command)\n jobs = subprocess.check_output(command, shell=True, stderr=open(os.devnull))\n \n # if there aren't any currently running or pending jobs, then the output\n if jobs == \"\":\n return set([])\n \n jobs = jobs.decode().strip().split(\"\\n\")\n \n current_jobs = set([])\n for line in jobs:\n if line.startswith(\"JOBID\"): # ignore the header line\n continue\n \n line = line.split(\";\")\n job_name = line[4]\n current_jobs.add(job_name)\n \n return current_jobs", "def ListJobs(self, token=None):\n return aff4.FACTORY.Open(self.CRON_JOBS_PATH, token=token).ListChildren()", "def queue_job_names(self):\n return [attrs[self.QCOL_NAME] for attrs in self.queue.values()]", "def list_queues():\n url = urlparse(Config.RABBIT_MQ_URL)\n response = requests.get(f'http://{url.hostname}:{15672}/api/queues?columns=name,messages,'\\\n 'messages_ready,messages_unacknowledged',\n auth=(url.username, url.password))\n\n tasks = dict()\n\n for task in response.json():\n if 'cube' in task['name']:\n tasks[task['name']] = dict(total=task['messages'],\n ready=task['messages_ready'],\n unacked=task['messages_unacknowledged'])\n\n return tasks", "def get_all_jobs():\n fq = get_failed_queue(connection=conn)\n job_data = {'queued_jobs': q.job_ids,\n 'failed_jobs': fq.job_ids}\n return jsonify(job_data), 200", "def get_job_list(self):\n job_list = []\n if mysql.job_list() == None:\n return job_list\n return mysql.job_list()", "def get_jobs_in_queue() -> List[int]:\n output = subprocess.check_output([\"qstat\"]).decode().splitlines()\n job_ids = []\n for line in output:\n m = REGEX_QSTAT.match(line)\n if m:\n job_ids.append(int(m.group(1)))\n return job_ids", "def queue_job_ids(self):\n return list(self.queue.keys())", "def get_jobs_list(self, response):\n pass", "def ListJobs(cls):\n return [key.parent().string_id() for key in cls.query().fetch(\n 100, keys_only=True)]", "def genJobList():\n nit=10\n reply=[]\n while len(reply)<10: #assume qstat fails if less that 10 jobs on cluster\n reply=chomp(os.popen('qstat|expand|tr -s \\' \\'|cut -d\\' \\' -f 1,2,5').readlines())\n nit+=1\n if nit>10: break\n return reply", "def show_queue(Q):\n print(\"(Size of the queue:\", Q.qsize(), \")\", end=\" \")\n for n in list(Q.queue):\n print(n, end=\" \")\n print()", "def list_jobs(self):\n\n return dict(self._from_json(self.manage.run(override=\"list-jobs\")))", "def get(self):\n # TODO: auth\n return list(self.app.db.jobs.find())", "def _messages_list(self, queue):\n\n return queue.messages()", "def queues(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"queues\")", "def process_list(self):\n for p in self._queue:\n print \"%-5d %-10s %-10s %2d %10s %10s\" % (p.id, p.name,\n p.status['type'], p.priority, \n self.print_cr_tree(p.creation_tree['parent']), \n self.print_cr_tree(p.creation_tree['child']))", "def jobs(self):\n raise NotImplementedError()", "def jobs(self):\n return self.get_jobs()", "def get_list():\r\n qry = ImportQueue.query\r\n qry = qry.order_by(ImportQueue.id)\r\n return qry.all()", "def list_jobs(exproot, **kwargs):\n for jobname, args, results in load_all(exproot):\n print jobname, args, results", "def list_queues(region: str = \"\", verbose: bool = False) -> List[str]:\n sqs_client = _client(region=region)\n return [\n (x if verbose else x.split(\"/\")[-1])\n for x in sqs_client.list_queues()[\"QueueUrls\"]\n ]", "def test_get_job_queue(self):\n response = self.client.open(\n '/tx-queue/2/scheduler/job',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def get_job_names(self):\n return []", "def queue(self):\n if not self.parent_node.is_job:\n return\n\n self.winstance.send_event('Queuing job..')\n result = self.winstance.execute_operation('hpc.interfaces.'\n 'lifecycle.queue',\n kwargs={\"name\": self.name})\n result.task.wait_for_terminated()\n if result.task.get_state() == tasks.TASK_FAILED:\n init_state = 'FAILED'\n else:\n self.winstance.send_event('.. job queued')\n init_state = 'PENDING'\n self.set_status(init_state)\n return result.task", "def get_history_queue():\n response = houston.get(\"/history/queue\")\n houston.raise_for_status_with_json(response)\n return response.json()", "def get_jobs(self):\n return list(self._jobs.values())", "def listJobs():\n logger.debug('[FLASKWEB /jobs] Request for job listing')\n jobs = db.getJobs(numdays=2)\n for job in jobs:\n job['time'] = datetime.datetime.strptime(job['time'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n if job['complete']:\n job['complete'] = datetime.datetime.strptime(job['complete'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n\n # Garbage Collect Orpahened jobs\n compiles = db.getCompiles()\n for compile in compiles:\n if compile['submit']:\n compile['submit'] = datetime.datetime.strptime(compile['submit'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n if compile['complete']:\n compile['complete'] = datetime.datetime.strptime(compile['complete'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n # for c in compiles:\n # if c['uid'] not in compile_tasks.keys():\n # db.updateCompile(c['uid'], status='KILLED', done=True)\n # compiles = db.getCompiles()\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(dict(LaunchJobs=jobs, CompilingJobs=compiles)), 200\n else:\n return render_template(\"jobs.html\", joblist=jobs, compilelist=compiles)", "def jobs():\n result = []\n out = subprocess.check_output([\"/bin/launchctl\", \"list\"]).decode()\n for row in out.splitlines()[1:]:\n result.append(Job(row))\n return result", "def list_jobs(user_data, cache):\n user = cache.ensure_user(user_data)\n\n jobs = []\n for job in cache.get_jobs(user):\n try:\n if job.project_id:\n job.project = cache.get_project(user, job.project_id)\n except IntermittentProjectIdError:\n continue\n\n jobs.append(job)\n\n return result_response(JobListResponseRPC(), {\"jobs\": jobs})", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def queue_names(self) -> pulumi.Output[ServiceQueueNames]:\n return pulumi.Output.all(\n self._name,\n self.queue.name,\n self.retry_queue.name,\n self.dead_letter_queue.name,\n ).apply(lambda args: ServiceQueueNames(*args))", "def queue_names(self) -> pulumi.Output[ServiceQueueNames]:\n return pulumi.Output.all(\n self._name,\n self.queue.name,\n self.retry_queue.name,\n self.dead_letter_queue.name,\n ).apply(lambda args: ServiceQueueNames(*args))", "def queue_all_instances(self):\n if not self.is_job:\n return []\n\n tasks_list = []\n for job_instance in self.instances:\n tasks_list.append(job_instance.queue())\n\n self.status = 'QUEUED'\n return tasks_list", "async def get_jobs(jobId: int) -> str: \n return mngr.getJob(str(jobId))", "def list_command(ctx: click.Context, user_ids: Tuple[int], queue_ids: Tuple[int]) -> None:\n with RossumClient(context=ctx.obj) as rossum:\n queue_users = rossum.get_queues((USERS,), users=user_ids)\n\n user_queues: Dict[int, List[List[Optional[str]]]] = {}\n for queue in queue_users:\n if queue_ids and int(queue[\"id\"]) not in queue_ids:\n continue\n for user in queue[\"users\"]:\n user_id = int(user[\"id\"])\n if user_ids and user_id not in user_ids:\n continue\n\n if user_id not in user_queues:\n user_queues[user_id] = [[user[\"id\"], user[\"username\"], queue[\"id\"], queue[\"name\"]]]\n else:\n user_queues[user_id].append([None, None, queue[\"id\"], queue[\"name\"]])\n user_queues = dict(sorted(user_queues.items()))\n click.echo(\n tabulate(\n chain.from_iterable(user_queues.values()),\n headers=[\"id\", \"username\", \"queue id\", \"queue name\"],\n )\n )", "def queueStatusAll():", "def get_waiting_jobs(self):\n return []", "def queue_tabnav(context):\n counts = queue_counts()\n return [('apps', 'queue_pending',\n ngettext('Apps ({0})', 'Apps ({0})', counts['pending'])\n .format(counts['pending']))]", "def get_queues(self, tags=None):\r\n params = {}\r\n if tags:\r\n params['tags'] = ','.join(tags)\r\n resp = self._make_request('get', 'queues', params=params)\r\n return resp.json()", "def handle_list(self, job):\n print(\"User requested list of files.\")\n # Send LIST job to all servers\n self.put_job_in_all_queues(job)\n list_job_results = self.get_internal_results_from_all_servers()\n if len(list_job_results) == 0:\n # There were no servers active\n self.put_external_result(self.generate_failure_job(\"Unsuccessful, no servers running\"))\n return\n\n # Concatenate the lists of files\n total_files_list = []\n for result in list_job_results:\n files_list = result.result[\"files_list\"]\n for each_file in files_list:\n if each_file not in total_files_list:\n total_files_list.append(each_file)\n\n # Return the files list\n response_result = copy.deepcopy(list_job_results[0])\n response_result.result[\"files_list\"] = total_files_list\n\n self.put_external_result(response_result)", "def get_queues_info() -> List[QueueInfo]:\n from src.server.oasisapi.analyses.models import AnalysisTaskStatus\n\n # setup an entry for every element in the broker (this will include\n # queues with no workers yet)\n res = [\n {\n 'name': q,\n 'pending_count': 0,\n 'queued_count': 0,\n 'running_count': 0,\n 'worker_count': 0,\n } for q in _get_broker_queue_names()\n ]\n\n # increment the number of workers available for each queue\n queues = _get_active_queues()\n if queues:\n for worker in queues.values():\n for queue in worker:\n try:\n next(r for r in res if r['name'] == queue['routing_key'])['worker_count'] += 1\n except StopIteration:\n # in case there are workers around still for inactive queues add it here\n res.append({\n 'name': queue['routing_key'],\n 'queued_count': 0,\n 'running_count': 0,\n 'worker_count': 1,\n })\n\n # get the stats of the running and queued tasks\n pending = reduce(\n lambda current, value: _add_to_dict(current, value['queue_name'], value['count']),\n AnalysisTaskStatus.objects.filter(\n status=AnalysisTaskStatus.status_choices.PENDING,\n ).values(\n 'queue_name',\n ).annotate(\n count=Count('pk'),\n ),\n {}\n )\n\n running = reduce(\n lambda current, value: _add_to_dict(current, value['queue_name'], value['count']),\n AnalysisTaskStatus.objects.filter(\n status=AnalysisTaskStatus.status_choices.STARTED,\n ).values(\n 'queue_name',\n ).annotate(\n count=Count('pk'),\n ),\n {}\n )\n\n queued = reduce(\n lambda current, value: _add_to_dict(current, value['queue_name'], value['count']),\n AnalysisTaskStatus.objects.filter(\n status=AnalysisTaskStatus.status_choices.QUEUED,\n ).values(\n 'queue_name',\n ).annotate(\n count=Count('pk'),\n ),\n {}\n )\n\n for entry in res:\n entry['pending_count'] = pending.get(entry['name'], 0)\n entry['queued_count'] = queued.get(entry['name'], 0)\n entry['running_count'] = running.get(entry['name'], 0)\n\n return res", "def dump_queue(queue):\n result = []\n queue.put(\"STOP\")\n for i in iter(queue.get, 'STOP'):\n result.append(i)\n # time.sleep(.1)\n return result", "def dump_queue(queue):\n result = []\n queue.put(\"STOP\")\n for i in iter(queue.get, 'STOP'):\n result.append(i)\n # time.sleep(.1)\n return result", "def read_queue(self):\n query = \"\"\"SELECT server,\n otp,\n modified,\n info,\n server_nonce\n FROM queue\"\"\"\n self._execute(query)\n return self._dictfetchall()", "async def queue(self, ctx):\n srv = self.get_server_dict(ctx.message.server.id)\n que = srv['queue']\n msg = self.format_song_display('▶', srv['song'][1], srv['song'][2], srv['song'][3])\n i = 1\n for item in que:\n line = self.format_song_display(i, item[1], item[2], item[3])\n i += 1\n msg += line\n await ctx.bot.send_message(ctx.message.channel, msg)", "def _get_njobs_in_queue(self, username):", "def jobs(self):\n return self._jobs", "def getJobList_impl(self):\n my_infos = TestJob.objects.filter(\n (Q(job_status='Running')|Q(job_status='Submitted')|Q(job_status='Incomplete'))\n &Q(check_or_not=True)\n )\n\n if not connection.in_atomic_block:\n self._commit_transaction(src='getInfosList_impl')\n print(\"###\", my_infos)\n logger.info(my_infos)\n return my_infos", "def job_info(self):\n def _sortkey(x):\n return x['job_name']\n\n resp = self._cmd(uri = '/jenkins_jobs')\n jobs = resp.get('jobs', [])\n return sorted(jobs, key=_sortkey)", "def _messages(self):\n q = [json.loads(i)['message'] for i in self.client.kv.get(\n 'rhumba.q.testqueue', [])]\n return q", "def search_jobs(self, bill_id: int = 0, limit: int = 0) -> List[Job]:\n pass", "def get_queue(queue_limits):\n\n queues, limits = queue_limits.items()\n queues.pop('')\n\n while(True): \n \n queued_jobs = qstat_plain()\n jobs = {queue : [j for j in queued_jobs if j.queue == queue] for queue in queues} \n jobs[''] = [j for j in queued_jobs if j.queue not in queues]\n\n for queue in queues:\n if len(jobs[queue]) < queue_limits[queue]:\n yield queue\n else:\n time.sleep(30)", "def list(self):\n\n for job_name in self.upstart.get_all_jobs():\n yield self.get_service(job_name)", "def monitor_queue(self):\n\n while True:\n job = self.queue.next()\n if job:\n # print(\"found %s\" % (job.job_id))\n\n job_name = job.payload[\"job_name\"]\n\n if job_name in self.mul_func_map:\n\n t = self.mul_func_map[job_name]\n p = multiprocessing.Process(target=t, args=(job,))\n p.daemon = True\n p.start()\n\n elif job_name in self.th_func_map:\n\n t = self.th_func_map[job_name]\n # create a thread to process the job\n p = threading.Thread(target=t, args=(job,))\n p.daemon = True\n # start the thread, going into the worker function\n p.start()\n\n elif job_name in self.fk_func_map:\n t = self.fk_func_map[job_name]\n if not os.fork():\n os.setsid()\n t(job)\n exit()\n else:\n # jobs in this queue that are unknown are presently being skipped\n # however they could probably get moved to a 'dead letter' queue\n # for closer examination\n print(\"unknown job name %s, skipping\" % (job_name))\n\n # throttle so that other worker subscribers get a chance\n time.sleep(self.queue_delay)\n else:\n time.sleep(self.poll_delay)\n\n # prints the number of threads\n # print len(threading.enumerate())", "def qstat(jobids: T.List[str], show_finished: bool=False):\n\n extra_args = []\n if show_finished:\n extra_args.append('-x')\n\n subp = subprocess.run(\n [\"/opt/pbs/default/bin/qstat\", *extra_args, \"-f\", \"-F\", \"json\", *jobids],\n text=True,\n check=True,\n stdout=subprocess.PIPE,\n )\n\n jobs = clean_qstat_json(subp.stdout)[\"Jobs\"]\n\n return jobs", "def get_job_list():\n\tdirlist = os.listdir(\".\")\n\tjoblist = [x for x in dirlist if \"job.sh\" in x and x in job_dict]\n\ttmplist = [x for x in dirlist if \"job.sh\" in x and x not in job_dict]\n\tdef compare_function(s: str):\n\t\treturn job_dict[s].order\n\tjoblist.sort(key=compare_function)\n\tjoblist.extend(tmplist)\n\treturn joblist", "def queue_to_list(queue):\n result = []\n while queue.qsize() != 0:\n result.append(queue.get())\n return result", "def list(address: Optional[str], headers: Optional[str], verify: Union[bool, str]):\n client = _get_sdk_client(address, headers=headers, verify=verify)\n # Set no_format to True because the logs may have unescaped \"{\" and \"}\"\n # and the CLILogger calls str.format().\n cli_logger.print(pprint.pformat(client.list_jobs()), no_format=True)", "def list():\n manager = Actions()\n tasks_list = manager.get_tasks_list()\n console_utils.print_tree(manager, tasks_list)", "def get_current_jobs(ssh):\n stdin, stdout, stderr = ssh.exec_command('qstat')\n\n running_jobs = []\n for line in stdout.readlines():\n if '.awonmgr2' in line:\n jobid = line.split('.awonmgr2')[0]\n running_jobs.append(jobid)\n \n return running_jobs", "def __init__(self):\n self.queues=[]", "def running_jobs_sherlock():\n user = os.environ['USER']\n\n return subprocess.check_output(['squeue', '-u',user,'-o','%Z']).split()[1:]", "def queue(self):\n if self._queue is None:\n qstr = self.query_queue(user=self._user)\n self._queue = self.parse_queue_str(qstr, keys=self.QSTAT_KEYS)\n\n return self._queue", "def html_queued_job_table(self, job_list):\n ## class QueuePage()\n queued_list = []\n for jdict in job_list:\n if jdict.get(\"state\") == \"queued\":\n ## Populate queued list for XHTML table below\n queued_list.append(jdict)\n\n l = ['<center>',\n '<b>%d Queued Jobs</b>' % (len(queued_list)),\n '<table class=\"status_table\">',\n '<tr class=\"status_table_head\">',\n '<th>Job ID</th>',\n '<th>Struct ID</th>',\n '<th>Chain:Num Res</th>',\n '<th>Submission Date</th>',\n '</tr>']\n\n row1 = True\n for jdict in queued_list:\n if row1:\n l.append('<tr class=\"status_table_row1\">')\n else:\n l.append('<tr class=\"status_table_row2\">')\n row1 = not row1\n\n l += ['<td>%s</td>' % (self.explore_href(jdict[\"job_id\"])),\n '<td>%s</td>' % (self.rcsb_href(jdict)),\n '<td>%s</td>' % (self.chain_size_string(jdict)),\n '<td>%s</td>' % (timestring(jdict[\"submit_time\"])),\n '</tr>' ]\n\n if len(queued_list) == 0:\n l += ['<tr>',\n '<td colspan=\"4\" class=\"c\">',\n 'No Jobs Queued',\n '</td>',\n '</tr>']\n\n l.append('</table></center>')\n\n return \"\".join(l)", "def get_queue_items(self, queue_name):\n proc = start_proc([\"/usr/bin/sudo\", \"rabbitmqctl\", \"list_queues\"],\n shell=False)\n for line in iter(proc.stdout.readline, \"\"):\n print(\"LIST QUEUES:\" + line)\n m = re.search(r\"%s\\s+([0-9]+)\" % queue_name, line)\n if m:\n return int(m.group(1))\n return None", "def get_registered_jobs(self):\n with self.__lock:\n return list(self.__registered_jobs)", "def getJobListFromDB(self):\n\t\tsql = \"SELECT jobname from hudson_jobs\"\n\t\tcsr = self.db.cursor()\n\t\tcsr.execute(sql)\n\t\tdata = [ x[0] for x in csr.fetchall() ]\n\t\treturn data", "def test_list_queues(self, rabbitmq):\n assert rabbitmq.list_queues() == []\n self.declare_queue(rabbitmq, \"q1\")\n self.declare_queue(rabbitmq, \"q2\")\n assert sorted(rabbitmq.list_queues()) == [('q1', '0'), ('q2', '0')]", "def refresh_queue_status(self):\n \n # Get the jobid and state for all jobs pending/running/completed for the current user\n qacct_stdout=self.run_grid_command_resubmit([\"qacct\",\"-o\",getpass.getuser(),\"-j\",\"*\"])\n \n # info list should include jobid, state, cpus, time, and maxrss\n info=[]\n job_status=[]\n for line in qacct_stdout.split(\"\\n\"):\n if line.startswith(\"jobnumber\") or line.startswith(\"job_number\"):\n if job_status:\n info.append(job_status)\n job_status=[line.rstrip().split()[-1],\"NA\",\"NA\",\"NA\",\"NA\"]\n # get the states for completed jobs\n elif line.startswith(\"failed\"):\n failed_code = line.rstrip().split()[1]\n if failed_code != \"0\":\n if failed_code in [\"37\",\"100\"]:\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n elif line.startswith(\"deleted_by\"):\n if line.rstrip().split()[-1] != \"NONE\" and job_status[1] == self.job_code_terminated:\n job_status[1]=self.job_code_deleted\n elif line.startswith(\"exit_status\"):\n # only record if status has not yet been set\n if job_status[1] == \"NA\":\n exit_status = line.rstrip().split()[-1]\n if exit_status == \"0\":\n job_status[1]=self.job_code_completed\n elif exit_status == \"137\":\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n # get the current state for running jobs\n elif line.startswith(\"job_state\"):\n job_status[1]=line.rstrip().split()[-1]\n elif line.startswith(\"slots\"):\n job_status[2]=line.rstrip().split()[-1]\n elif line.startswith(\"ru_wallclock\"):\n try:\n # get the elapsed time in minutes\n job_status[3]=str(float(line.rstrip().split()[-1])/60.0)\n except ValueError:\n job_status[3]=\"NA\"\n elif line.startswith(\"ru_maxrss\"):\n job_status[4]=line.rstrip().split()[-1]+\"K\"\n \n if job_status:\n info.append(job_status)\n\n return info", "def query_queue(self, job_name=None, user=None, qformat=None,\n skip_rows=None):", "def __str__(self):\r\n return f\"Queue object: {self.queue}\"", "def jobs(self) -> List[Job]:\n return self._jobs.values()", "def showqueue(self, irc, msg, args):\n if len(self._queue) == 0:\n irc.reply(\"The queue is empty\", private=True)\n return\n pos = self._find_in_queue(msg.nick)\n if pos < 0:\n irc.reply(\"You're not in the queue, did your nick change?\",\n private=True)\n return\n irc.reply(\"You are queued at position %d\" % (pos + 1), private=True)", "def get_queue(self):\r\n return _channeldata[self.chan].queue", "def get_jobs(self, type = None):\n joblist = JobList()\n for jobs in self.sm.get_jobs(type = type):\n joblist.add_job(jobs['identifier'], jobs['phase'])\n return joblist.tostring()", "def queues(self):\r\n return queues.Queues(self)", "def traceQueueContents(self):\n from typhon.objects.printers import toString\n debug_print(\"Pending queue for \" + self.name.encode(\"utf-8\"))\n for (resolver, target, atom, args, namedArgs) in self._pending:\n debug_print(toString(target).encode('utf-8') +\n \".\" + atom.verb.encode('utf-8') + \"(\" +\n ', '.join([toString(a).encode('utf-8')\n for a in args]) + \")\")", "async def list_tasks():" ]
[ "0.7661594", "0.7647107", "0.762154", "0.75825155", "0.74639344", "0.7404997", "0.71474695", "0.7096951", "0.7051303", "0.69853973", "0.69715", "0.69715", "0.69136894", "0.68906635", "0.6835532", "0.683014", "0.6773141", "0.6738064", "0.6734032", "0.6690407", "0.6667332", "0.66628206", "0.66418636", "0.6626909", "0.6607109", "0.66056544", "0.65889394", "0.6514371", "0.6479179", "0.6478803", "0.6469853", "0.6467462", "0.64595705", "0.6459176", "0.6435052", "0.6420893", "0.64189005", "0.64140457", "0.64030176", "0.6373613", "0.6372211", "0.635855", "0.6331892", "0.6298411", "0.6298411", "0.6298411", "0.6298411", "0.6298411", "0.6298411", "0.6298411", "0.6298411", "0.6298411", "0.629657", "0.629657", "0.6287086", "0.62842304", "0.62823766", "0.6282016", "0.6277141", "0.6237394", "0.62242556", "0.6220853", "0.6216549", "0.62131065", "0.62131065", "0.62084717", "0.62005985", "0.61960965", "0.6186906", "0.61822087", "0.617739", "0.61707264", "0.6170529", "0.6159845", "0.614614", "0.6091975", "0.6090486", "0.608464", "0.60818493", "0.6080778", "0.6079356", "0.6074576", "0.60712534", "0.6067895", "0.60660803", "0.60441107", "0.6041804", "0.6041154", "0.60367304", "0.6027864", "0.6018476", "0.6013016", "0.60105157", "0.5993234", "0.5975003", "0.59716153", "0.5961054", "0.59550273", "0.5940798", "0.59390455" ]
0.7498685
4
List the job queue
def cmd_sitemap(arguments): # create a sitemap job job = jobs.encode(jobs.sitemap()) # get a producer producer = getProducer() # get an item, set content and make it ready item = producer.item() item.content = job producer.ready(item) print "Sitemap command put in queue" return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ls(self):\n server = jenkins_server.get_jenkins_server()\n queue = server.get_queue_info()\n print('任务ID\\t%s\\t原因' % '任务链接'.ljust(50))\n for q in queue:\n print('%d\\t%s\\t%s' % (q['id'], q['task']['url'].ljust(50), q['why']))", "def list_jobs():\n\n name_to_job_details = redis_controller.get_name_to_job_details()\n return list(name_to_job_details.values())", "def get_queue_list(self):\n return self.manager.get_queue_list()", "def list(self):\n return self.rpc.call(MsfRpcMethod.JobList)", "def cmd_list(arguments):\r\n\r\n # get a producer\r\n producer = getProducer()\r\n\r\n # get the list of pending jobs\r\n jobs = producer.list()\r\n\r\n # print que list size and the entries\r\n print \"Number of jobs: %d\" % len(jobs)\r\n for j in range(len(jobs)):\r\n print \" job %06d - %s\" % (j, jobs[j])\r\n\r\n return 0", "def list(self):\n self.background_scheduler.print_jobs()", "def queue_list(self, req):\n project_id = req._headers.get('X-Project-ID')\n\n LOG.debug(u'Queue list - project: %(project)s',\n {'project': project_id})\n\n kwargs = {}\n\n if req._body.get('marker') is not None:\n kwargs['marker'] = req._body.get('marker')\n\n if req._body.get('limit') is not None:\n kwargs['limit'] = req._body.get('limit')\n\n if req._body.get('detailed') is not None:\n kwargs['detailed'] = req._body.get('detailed')\n\n try:\n self._validate.queue_listing(**kwargs)\n results = self._queue_controller.list(\n project=project_id, **kwargs)\n except validation.ValidationFailed as ex:\n LOG.debug(ex)\n headers = {'status': 400}\n return api_utils.error_response(req, ex, headers)\n except storage_errors.ExceptionBase as ex:\n LOG.exception(ex)\n error = 'Queues could not be listed.'\n headers = {'status': 503}\n return api_utils.error_response(req, ex, error, headers)\n\n # Buffer list of queues\n queues = list(next(results))\n\n # Got some. Prepare the response.\n body = {'queues': queues}\n headers = {'status': 200}\n\n resp = response.Response(req, body, headers)\n\n return resp", "def list(self, jobguid=\"\", executionparams=None):", "def list_jobs(arn=None, nextToken=None):\n pass", "def GetQueueList(handler, query):\n json_config = {}\n if 'TiVo' in query:\n tivoIP = query['TiVo'][0]\n with active_tivos_lock:\n if tivoIP in active_tivos:\n with active_tivos[tivoIP]['lock']:\n json_config['urls'] = [ status['url'] for status in active_tivos[tivoIP]['queue'] ]\n\n handler.send_json(json.dumps(json_config))", "async def get_jobs(): \n return mngr.getAllJobs()", "def get_job_list(self):\n return self.job_list", "def get_job_list(self):\n return self.job_list", "def list(self, jobguid=\"\", executionparams=dict()):", "def get_jobs():\n \n rate_limit()\n command = [\"bjobs\", \"-o\", \"\\\"JOBID\", \"USER\", \"STAT\", \"QUEUE\", \"JOB_NAME\", \\\n \"delimiter=';'\\\"\"]\n command = \" \".join(command)\n jobs = subprocess.check_output(command, shell=True, stderr=open(os.devnull))\n \n # if there aren't any currently running or pending jobs, then the output\n if jobs == \"\":\n return set([])\n \n jobs = jobs.decode().strip().split(\"\\n\")\n \n current_jobs = set([])\n for line in jobs:\n if line.startswith(\"JOBID\"): # ignore the header line\n continue\n \n line = line.split(\";\")\n job_name = line[4]\n current_jobs.add(job_name)\n \n return current_jobs", "def ListJobs(self, token=None):\n return aff4.FACTORY.Open(self.CRON_JOBS_PATH, token=token).ListChildren()", "def queue_job_names(self):\n return [attrs[self.QCOL_NAME] for attrs in self.queue.values()]", "def list_queues():\n url = urlparse(Config.RABBIT_MQ_URL)\n response = requests.get(f'http://{url.hostname}:{15672}/api/queues?columns=name,messages,'\\\n 'messages_ready,messages_unacknowledged',\n auth=(url.username, url.password))\n\n tasks = dict()\n\n for task in response.json():\n if 'cube' in task['name']:\n tasks[task['name']] = dict(total=task['messages'],\n ready=task['messages_ready'],\n unacked=task['messages_unacknowledged'])\n\n return tasks", "def get_all_jobs():\n fq = get_failed_queue(connection=conn)\n job_data = {'queued_jobs': q.job_ids,\n 'failed_jobs': fq.job_ids}\n return jsonify(job_data), 200", "def get_job_list(self):\n job_list = []\n if mysql.job_list() == None:\n return job_list\n return mysql.job_list()", "def get_jobs_in_queue() -> List[int]:\n output = subprocess.check_output([\"qstat\"]).decode().splitlines()\n job_ids = []\n for line in output:\n m = REGEX_QSTAT.match(line)\n if m:\n job_ids.append(int(m.group(1)))\n return job_ids", "def queue_job_ids(self):\n return list(self.queue.keys())", "def get_jobs_list(self, response):\n pass", "def ListJobs(cls):\n return [key.parent().string_id() for key in cls.query().fetch(\n 100, keys_only=True)]", "def genJobList():\n nit=10\n reply=[]\n while len(reply)<10: #assume qstat fails if less that 10 jobs on cluster\n reply=chomp(os.popen('qstat|expand|tr -s \\' \\'|cut -d\\' \\' -f 1,2,5').readlines())\n nit+=1\n if nit>10: break\n return reply", "def show_queue(Q):\n print(\"(Size of the queue:\", Q.qsize(), \")\", end=\" \")\n for n in list(Q.queue):\n print(n, end=\" \")\n print()", "def list_jobs(self):\n\n return dict(self._from_json(self.manage.run(override=\"list-jobs\")))", "def get(self):\n # TODO: auth\n return list(self.app.db.jobs.find())", "def _messages_list(self, queue):\n\n return queue.messages()", "def queues(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"queues\")", "def process_list(self):\n for p in self._queue:\n print \"%-5d %-10s %-10s %2d %10s %10s\" % (p.id, p.name,\n p.status['type'], p.priority, \n self.print_cr_tree(p.creation_tree['parent']), \n self.print_cr_tree(p.creation_tree['child']))", "def jobs(self):\n raise NotImplementedError()", "def jobs(self):\n return self.get_jobs()", "def get_list():\r\n qry = ImportQueue.query\r\n qry = qry.order_by(ImportQueue.id)\r\n return qry.all()", "def list_jobs(exproot, **kwargs):\n for jobname, args, results in load_all(exproot):\n print jobname, args, results", "def list_queues(region: str = \"\", verbose: bool = False) -> List[str]:\n sqs_client = _client(region=region)\n return [\n (x if verbose else x.split(\"/\")[-1])\n for x in sqs_client.list_queues()[\"QueueUrls\"]\n ]", "def test_get_job_queue(self):\n response = self.client.open(\n '/tx-queue/2/scheduler/job',\n method='GET')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def get_job_names(self):\n return []", "def queue(self):\n if not self.parent_node.is_job:\n return\n\n self.winstance.send_event('Queuing job..')\n result = self.winstance.execute_operation('hpc.interfaces.'\n 'lifecycle.queue',\n kwargs={\"name\": self.name})\n result.task.wait_for_terminated()\n if result.task.get_state() == tasks.TASK_FAILED:\n init_state = 'FAILED'\n else:\n self.winstance.send_event('.. job queued')\n init_state = 'PENDING'\n self.set_status(init_state)\n return result.task", "def get_history_queue():\n response = houston.get(\"/history/queue\")\n houston.raise_for_status_with_json(response)\n return response.json()", "def get_jobs(self):\n return list(self._jobs.values())", "def listJobs():\n logger.debug('[FLASKWEB /jobs] Request for job listing')\n jobs = db.getJobs(numdays=2)\n for job in jobs:\n job['time'] = datetime.datetime.strptime(job['time'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n if job['complete']:\n job['complete'] = datetime.datetime.strptime(job['complete'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n\n # Garbage Collect Orpahened jobs\n compiles = db.getCompiles()\n for compile in compiles:\n if compile['submit']:\n compile['submit'] = datetime.datetime.strptime(compile['submit'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n if compile['complete']:\n compile['complete'] = datetime.datetime.strptime(compile['complete'], db.TS_FMT).replace(tzinfo=db.timezone('UTC')).isoformat()\n # for c in compiles:\n # if c['uid'] not in compile_tasks.keys():\n # db.updateCompile(c['uid'], status='KILLED', done=True)\n # compiles = db.getCompiles()\n\n if request.headers['Accept'] == 'application/json':\n return jsonify(dict(LaunchJobs=jobs, CompilingJobs=compiles)), 200\n else:\n return render_template(\"jobs.html\", joblist=jobs, compilelist=compiles)", "def jobs():\n result = []\n out = subprocess.check_output([\"/bin/launchctl\", \"list\"]).decode()\n for row in out.splitlines()[1:]:\n result.append(Job(row))\n return result", "def list_jobs(user_data, cache):\n user = cache.ensure_user(user_data)\n\n jobs = []\n for job in cache.get_jobs(user):\n try:\n if job.project_id:\n job.project = cache.get_project(user, job.project_id)\n except IntermittentProjectIdError:\n continue\n\n jobs.append(job)\n\n return result_response(JobListResponseRPC(), {\"jobs\": jobs})", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def _get_queues(self):\n return self.__queues", "def queue_names(self) -> pulumi.Output[ServiceQueueNames]:\n return pulumi.Output.all(\n self._name,\n self.queue.name,\n self.retry_queue.name,\n self.dead_letter_queue.name,\n ).apply(lambda args: ServiceQueueNames(*args))", "def queue_names(self) -> pulumi.Output[ServiceQueueNames]:\n return pulumi.Output.all(\n self._name,\n self.queue.name,\n self.retry_queue.name,\n self.dead_letter_queue.name,\n ).apply(lambda args: ServiceQueueNames(*args))", "def queue_all_instances(self):\n if not self.is_job:\n return []\n\n tasks_list = []\n for job_instance in self.instances:\n tasks_list.append(job_instance.queue())\n\n self.status = 'QUEUED'\n return tasks_list", "async def get_jobs(jobId: int) -> str: \n return mngr.getJob(str(jobId))", "def list_command(ctx: click.Context, user_ids: Tuple[int], queue_ids: Tuple[int]) -> None:\n with RossumClient(context=ctx.obj) as rossum:\n queue_users = rossum.get_queues((USERS,), users=user_ids)\n\n user_queues: Dict[int, List[List[Optional[str]]]] = {}\n for queue in queue_users:\n if queue_ids and int(queue[\"id\"]) not in queue_ids:\n continue\n for user in queue[\"users\"]:\n user_id = int(user[\"id\"])\n if user_ids and user_id not in user_ids:\n continue\n\n if user_id not in user_queues:\n user_queues[user_id] = [[user[\"id\"], user[\"username\"], queue[\"id\"], queue[\"name\"]]]\n else:\n user_queues[user_id].append([None, None, queue[\"id\"], queue[\"name\"]])\n user_queues = dict(sorted(user_queues.items()))\n click.echo(\n tabulate(\n chain.from_iterable(user_queues.values()),\n headers=[\"id\", \"username\", \"queue id\", \"queue name\"],\n )\n )", "def queueStatusAll():", "def get_waiting_jobs(self):\n return []", "def queue_tabnav(context):\n counts = queue_counts()\n return [('apps', 'queue_pending',\n ngettext('Apps ({0})', 'Apps ({0})', counts['pending'])\n .format(counts['pending']))]", "def get_queues(self, tags=None):\r\n params = {}\r\n if tags:\r\n params['tags'] = ','.join(tags)\r\n resp = self._make_request('get', 'queues', params=params)\r\n return resp.json()", "def handle_list(self, job):\n print(\"User requested list of files.\")\n # Send LIST job to all servers\n self.put_job_in_all_queues(job)\n list_job_results = self.get_internal_results_from_all_servers()\n if len(list_job_results) == 0:\n # There were no servers active\n self.put_external_result(self.generate_failure_job(\"Unsuccessful, no servers running\"))\n return\n\n # Concatenate the lists of files\n total_files_list = []\n for result in list_job_results:\n files_list = result.result[\"files_list\"]\n for each_file in files_list:\n if each_file not in total_files_list:\n total_files_list.append(each_file)\n\n # Return the files list\n response_result = copy.deepcopy(list_job_results[0])\n response_result.result[\"files_list\"] = total_files_list\n\n self.put_external_result(response_result)", "def get_queues_info() -> List[QueueInfo]:\n from src.server.oasisapi.analyses.models import AnalysisTaskStatus\n\n # setup an entry for every element in the broker (this will include\n # queues with no workers yet)\n res = [\n {\n 'name': q,\n 'pending_count': 0,\n 'queued_count': 0,\n 'running_count': 0,\n 'worker_count': 0,\n } for q in _get_broker_queue_names()\n ]\n\n # increment the number of workers available for each queue\n queues = _get_active_queues()\n if queues:\n for worker in queues.values():\n for queue in worker:\n try:\n next(r for r in res if r['name'] == queue['routing_key'])['worker_count'] += 1\n except StopIteration:\n # in case there are workers around still for inactive queues add it here\n res.append({\n 'name': queue['routing_key'],\n 'queued_count': 0,\n 'running_count': 0,\n 'worker_count': 1,\n })\n\n # get the stats of the running and queued tasks\n pending = reduce(\n lambda current, value: _add_to_dict(current, value['queue_name'], value['count']),\n AnalysisTaskStatus.objects.filter(\n status=AnalysisTaskStatus.status_choices.PENDING,\n ).values(\n 'queue_name',\n ).annotate(\n count=Count('pk'),\n ),\n {}\n )\n\n running = reduce(\n lambda current, value: _add_to_dict(current, value['queue_name'], value['count']),\n AnalysisTaskStatus.objects.filter(\n status=AnalysisTaskStatus.status_choices.STARTED,\n ).values(\n 'queue_name',\n ).annotate(\n count=Count('pk'),\n ),\n {}\n )\n\n queued = reduce(\n lambda current, value: _add_to_dict(current, value['queue_name'], value['count']),\n AnalysisTaskStatus.objects.filter(\n status=AnalysisTaskStatus.status_choices.QUEUED,\n ).values(\n 'queue_name',\n ).annotate(\n count=Count('pk'),\n ),\n {}\n )\n\n for entry in res:\n entry['pending_count'] = pending.get(entry['name'], 0)\n entry['queued_count'] = queued.get(entry['name'], 0)\n entry['running_count'] = running.get(entry['name'], 0)\n\n return res", "def dump_queue(queue):\n result = []\n queue.put(\"STOP\")\n for i in iter(queue.get, 'STOP'):\n result.append(i)\n # time.sleep(.1)\n return result", "def dump_queue(queue):\n result = []\n queue.put(\"STOP\")\n for i in iter(queue.get, 'STOP'):\n result.append(i)\n # time.sleep(.1)\n return result", "def read_queue(self):\n query = \"\"\"SELECT server,\n otp,\n modified,\n info,\n server_nonce\n FROM queue\"\"\"\n self._execute(query)\n return self._dictfetchall()", "async def queue(self, ctx):\n srv = self.get_server_dict(ctx.message.server.id)\n que = srv['queue']\n msg = self.format_song_display('▶', srv['song'][1], srv['song'][2], srv['song'][3])\n i = 1\n for item in que:\n line = self.format_song_display(i, item[1], item[2], item[3])\n i += 1\n msg += line\n await ctx.bot.send_message(ctx.message.channel, msg)", "def _get_njobs_in_queue(self, username):", "def jobs(self):\n return self._jobs", "def getJobList_impl(self):\n my_infos = TestJob.objects.filter(\n (Q(job_status='Running')|Q(job_status='Submitted')|Q(job_status='Incomplete'))\n &Q(check_or_not=True)\n )\n\n if not connection.in_atomic_block:\n self._commit_transaction(src='getInfosList_impl')\n print(\"###\", my_infos)\n logger.info(my_infos)\n return my_infos", "def job_info(self):\n def _sortkey(x):\n return x['job_name']\n\n resp = self._cmd(uri = '/jenkins_jobs')\n jobs = resp.get('jobs', [])\n return sorted(jobs, key=_sortkey)", "def _messages(self):\n q = [json.loads(i)['message'] for i in self.client.kv.get(\n 'rhumba.q.testqueue', [])]\n return q", "def search_jobs(self, bill_id: int = 0, limit: int = 0) -> List[Job]:\n pass", "def get_queue(queue_limits):\n\n queues, limits = queue_limits.items()\n queues.pop('')\n\n while(True): \n \n queued_jobs = qstat_plain()\n jobs = {queue : [j for j in queued_jobs if j.queue == queue] for queue in queues} \n jobs[''] = [j for j in queued_jobs if j.queue not in queues]\n\n for queue in queues:\n if len(jobs[queue]) < queue_limits[queue]:\n yield queue\n else:\n time.sleep(30)", "def list(self):\n\n for job_name in self.upstart.get_all_jobs():\n yield self.get_service(job_name)", "def monitor_queue(self):\n\n while True:\n job = self.queue.next()\n if job:\n # print(\"found %s\" % (job.job_id))\n\n job_name = job.payload[\"job_name\"]\n\n if job_name in self.mul_func_map:\n\n t = self.mul_func_map[job_name]\n p = multiprocessing.Process(target=t, args=(job,))\n p.daemon = True\n p.start()\n\n elif job_name in self.th_func_map:\n\n t = self.th_func_map[job_name]\n # create a thread to process the job\n p = threading.Thread(target=t, args=(job,))\n p.daemon = True\n # start the thread, going into the worker function\n p.start()\n\n elif job_name in self.fk_func_map:\n t = self.fk_func_map[job_name]\n if not os.fork():\n os.setsid()\n t(job)\n exit()\n else:\n # jobs in this queue that are unknown are presently being skipped\n # however they could probably get moved to a 'dead letter' queue\n # for closer examination\n print(\"unknown job name %s, skipping\" % (job_name))\n\n # throttle so that other worker subscribers get a chance\n time.sleep(self.queue_delay)\n else:\n time.sleep(self.poll_delay)\n\n # prints the number of threads\n # print len(threading.enumerate())", "def qstat(jobids: T.List[str], show_finished: bool=False):\n\n extra_args = []\n if show_finished:\n extra_args.append('-x')\n\n subp = subprocess.run(\n [\"/opt/pbs/default/bin/qstat\", *extra_args, \"-f\", \"-F\", \"json\", *jobids],\n text=True,\n check=True,\n stdout=subprocess.PIPE,\n )\n\n jobs = clean_qstat_json(subp.stdout)[\"Jobs\"]\n\n return jobs", "def get_job_list():\n\tdirlist = os.listdir(\".\")\n\tjoblist = [x for x in dirlist if \"job.sh\" in x and x in job_dict]\n\ttmplist = [x for x in dirlist if \"job.sh\" in x and x not in job_dict]\n\tdef compare_function(s: str):\n\t\treturn job_dict[s].order\n\tjoblist.sort(key=compare_function)\n\tjoblist.extend(tmplist)\n\treturn joblist", "def queue_to_list(queue):\n result = []\n while queue.qsize() != 0:\n result.append(queue.get())\n return result", "def list(address: Optional[str], headers: Optional[str], verify: Union[bool, str]):\n client = _get_sdk_client(address, headers=headers, verify=verify)\n # Set no_format to True because the logs may have unescaped \"{\" and \"}\"\n # and the CLILogger calls str.format().\n cli_logger.print(pprint.pformat(client.list_jobs()), no_format=True)", "def list():\n manager = Actions()\n tasks_list = manager.get_tasks_list()\n console_utils.print_tree(manager, tasks_list)", "def get_current_jobs(ssh):\n stdin, stdout, stderr = ssh.exec_command('qstat')\n\n running_jobs = []\n for line in stdout.readlines():\n if '.awonmgr2' in line:\n jobid = line.split('.awonmgr2')[0]\n running_jobs.append(jobid)\n \n return running_jobs", "def __init__(self):\n self.queues=[]", "def running_jobs_sherlock():\n user = os.environ['USER']\n\n return subprocess.check_output(['squeue', '-u',user,'-o','%Z']).split()[1:]", "def queue(self):\n if self._queue is None:\n qstr = self.query_queue(user=self._user)\n self._queue = self.parse_queue_str(qstr, keys=self.QSTAT_KEYS)\n\n return self._queue", "def html_queued_job_table(self, job_list):\n ## class QueuePage()\n queued_list = []\n for jdict in job_list:\n if jdict.get(\"state\") == \"queued\":\n ## Populate queued list for XHTML table below\n queued_list.append(jdict)\n\n l = ['<center>',\n '<b>%d Queued Jobs</b>' % (len(queued_list)),\n '<table class=\"status_table\">',\n '<tr class=\"status_table_head\">',\n '<th>Job ID</th>',\n '<th>Struct ID</th>',\n '<th>Chain:Num Res</th>',\n '<th>Submission Date</th>',\n '</tr>']\n\n row1 = True\n for jdict in queued_list:\n if row1:\n l.append('<tr class=\"status_table_row1\">')\n else:\n l.append('<tr class=\"status_table_row2\">')\n row1 = not row1\n\n l += ['<td>%s</td>' % (self.explore_href(jdict[\"job_id\"])),\n '<td>%s</td>' % (self.rcsb_href(jdict)),\n '<td>%s</td>' % (self.chain_size_string(jdict)),\n '<td>%s</td>' % (timestring(jdict[\"submit_time\"])),\n '</tr>' ]\n\n if len(queued_list) == 0:\n l += ['<tr>',\n '<td colspan=\"4\" class=\"c\">',\n 'No Jobs Queued',\n '</td>',\n '</tr>']\n\n l.append('</table></center>')\n\n return \"\".join(l)", "def get_queue_items(self, queue_name):\n proc = start_proc([\"/usr/bin/sudo\", \"rabbitmqctl\", \"list_queues\"],\n shell=False)\n for line in iter(proc.stdout.readline, \"\"):\n print(\"LIST QUEUES:\" + line)\n m = re.search(r\"%s\\s+([0-9]+)\" % queue_name, line)\n if m:\n return int(m.group(1))\n return None", "def get_registered_jobs(self):\n with self.__lock:\n return list(self.__registered_jobs)", "def getJobListFromDB(self):\n\t\tsql = \"SELECT jobname from hudson_jobs\"\n\t\tcsr = self.db.cursor()\n\t\tcsr.execute(sql)\n\t\tdata = [ x[0] for x in csr.fetchall() ]\n\t\treturn data", "def test_list_queues(self, rabbitmq):\n assert rabbitmq.list_queues() == []\n self.declare_queue(rabbitmq, \"q1\")\n self.declare_queue(rabbitmq, \"q2\")\n assert sorted(rabbitmq.list_queues()) == [('q1', '0'), ('q2', '0')]", "def refresh_queue_status(self):\n \n # Get the jobid and state for all jobs pending/running/completed for the current user\n qacct_stdout=self.run_grid_command_resubmit([\"qacct\",\"-o\",getpass.getuser(),\"-j\",\"*\"])\n \n # info list should include jobid, state, cpus, time, and maxrss\n info=[]\n job_status=[]\n for line in qacct_stdout.split(\"\\n\"):\n if line.startswith(\"jobnumber\") or line.startswith(\"job_number\"):\n if job_status:\n info.append(job_status)\n job_status=[line.rstrip().split()[-1],\"NA\",\"NA\",\"NA\",\"NA\"]\n # get the states for completed jobs\n elif line.startswith(\"failed\"):\n failed_code = line.rstrip().split()[1]\n if failed_code != \"0\":\n if failed_code in [\"37\",\"100\"]:\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n elif line.startswith(\"deleted_by\"):\n if line.rstrip().split()[-1] != \"NONE\" and job_status[1] == self.job_code_terminated:\n job_status[1]=self.job_code_deleted\n elif line.startswith(\"exit_status\"):\n # only record if status has not yet been set\n if job_status[1] == \"NA\":\n exit_status = line.rstrip().split()[-1]\n if exit_status == \"0\":\n job_status[1]=self.job_code_completed\n elif exit_status == \"137\":\n job_status[1]=self.job_code_terminated\n else:\n job_status[1]=self.job_code_error\n # get the current state for running jobs\n elif line.startswith(\"job_state\"):\n job_status[1]=line.rstrip().split()[-1]\n elif line.startswith(\"slots\"):\n job_status[2]=line.rstrip().split()[-1]\n elif line.startswith(\"ru_wallclock\"):\n try:\n # get the elapsed time in minutes\n job_status[3]=str(float(line.rstrip().split()[-1])/60.0)\n except ValueError:\n job_status[3]=\"NA\"\n elif line.startswith(\"ru_maxrss\"):\n job_status[4]=line.rstrip().split()[-1]+\"K\"\n \n if job_status:\n info.append(job_status)\n\n return info", "def query_queue(self, job_name=None, user=None, qformat=None,\n skip_rows=None):", "def __str__(self):\r\n return f\"Queue object: {self.queue}\"", "def jobs(self) -> List[Job]:\n return self._jobs.values()", "def showqueue(self, irc, msg, args):\n if len(self._queue) == 0:\n irc.reply(\"The queue is empty\", private=True)\n return\n pos = self._find_in_queue(msg.nick)\n if pos < 0:\n irc.reply(\"You're not in the queue, did your nick change?\",\n private=True)\n return\n irc.reply(\"You are queued at position %d\" % (pos + 1), private=True)", "def get_queue(self):\r\n return _channeldata[self.chan].queue", "def get_jobs(self, type = None):\n joblist = JobList()\n for jobs in self.sm.get_jobs(type = type):\n joblist.add_job(jobs['identifier'], jobs['phase'])\n return joblist.tostring()", "def queues(self):\r\n return queues.Queues(self)", "def traceQueueContents(self):\n from typhon.objects.printers import toString\n debug_print(\"Pending queue for \" + self.name.encode(\"utf-8\"))\n for (resolver, target, atom, args, namedArgs) in self._pending:\n debug_print(toString(target).encode('utf-8') +\n \".\" + atom.verb.encode('utf-8') + \"(\" +\n ', '.join([toString(a).encode('utf-8')\n for a in args]) + \")\")", "async def list_tasks():" ]
[ "0.7661594", "0.7647107", "0.762154", "0.75825155", "0.7498685", "0.74639344", "0.7404997", "0.71474695", "0.7096951", "0.7051303", "0.69853973", "0.69715", "0.69715", "0.69136894", "0.68906635", "0.6835532", "0.683014", "0.6773141", "0.6738064", "0.6734032", "0.6690407", "0.6667332", "0.66628206", "0.66418636", "0.6626909", "0.6607109", "0.66056544", "0.65889394", "0.6514371", "0.6479179", "0.6478803", "0.6469853", "0.6467462", "0.64595705", "0.6459176", "0.6435052", "0.6420893", "0.64189005", "0.64140457", "0.64030176", "0.6373613", "0.6372211", "0.635855", "0.6331892", "0.6298411", "0.6298411", "0.6298411", "0.6298411", "0.6298411", "0.6298411", "0.6298411", "0.6298411", "0.6298411", "0.629657", "0.629657", "0.6287086", "0.62842304", "0.62823766", "0.6282016", "0.6277141", "0.6237394", "0.62242556", "0.6220853", "0.6216549", "0.62131065", "0.62131065", "0.62084717", "0.62005985", "0.61960965", "0.6186906", "0.61822087", "0.617739", "0.61707264", "0.6170529", "0.6159845", "0.614614", "0.6091975", "0.6090486", "0.608464", "0.60818493", "0.6080778", "0.6079356", "0.6074576", "0.60712534", "0.6067895", "0.60660803", "0.60441107", "0.6041804", "0.6041154", "0.60367304", "0.6027864", "0.6018476", "0.6013016", "0.60105157", "0.5993234", "0.5975003", "0.59716153", "0.5961054", "0.59550273", "0.5940798", "0.59390455" ]
0.0
-1
A generator that can be used to iterate over all of the message handlers that belong to this instance.
def iter_message_handlers(self): for name in dir(self): attr = getattr(self, name) if isinstance(attr, MessageHandler): yield attr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_message_handlers(self):\n\t\treturn self.message_handlers", "def get_handlers(self):\n\n # Get handlers \n logger.debug(\"%s: Returned %d handlers.\" % \\\n (self.__class__.__name__, len(self._handlers)))\n return self._handlers[:]", "def __iter__(self):\n return _iterEvents(self._eventHandlers)", "def get_handlers(self):\n return self._handlers", "def get_handlers(self):\n raise NotImplementedError()", "def _handlers(self):\n if not self.__handlers:\n handlers = {}\n for key in dir(self):\n # Underscores are protected\n if key.startswith('_'):\n continue\n attr = getattr(self, key)\n # Tree syntax\n if issubclass(type(attr), Handler) and attr != self:\n for name, handler in attr._handlers.iteritems():\n name = '%s.%s' % (key, name)\n handlers[name] = handler\n # Normal syntax\n elif hasattr(attr, '__call__'):\n handlers[key] = attr\n self.__handlers = handlers\n return self.__handlers", "def send(self, *args, **kw):\n result = []\n for handler in self.registry.values():\n result.append(handler(*args, **kw))\n return result", "def handlers(self, handlers):\n return self._set_list_field(\"handlers\", handlers)", "def u2handlers(self):\n return []", "def get_handlers(self):\n svs = []\n paths = self.get_paths()\n for p in paths:\n s = re.sub(r\"(?<={)\\w+}\", \".*\", p).replace(\"{\", \"\")\n o = re.sub(r\"(?<=<)\\w+\", \"\", s).replace(\"<\", \"\").replace(\">\",\"\").replace(\"&\", \"\").replace(\"?\", \"\")\n svs.append((o, self))\n\n return svs", "def _dispatch_messages(self):\n while True:\n select_obj = (yield)\n if select_obj == self._message_queue.selobj:\n msg = self._message_queue.get_nowait()\n if msg is not None:\n msg_type = msg.get('type', None)\n if msg_type is not None:\n msg_handler = self._message_handlers.get(msg_type, None)\n if msg_handler is not None:\n msg_handler(msg['data'])", "def event_handlers(self):\n if self._event_handlers is not None:\n return self._event_handlers\n\n # Get event handlers for self\n ordered = []\n unordered = []\n cls = type(self)\n for cls_name in dir(cls):\n cls_item = getattr(cls, cls_name, None)\n if isinstance(cls_item, HandlerDecorator):\n bound_handler = getattr(self, cls_name)\n if cls_item.priority is not None:\n ordered.append((cls_item, bound_handler))\n else:\n unordered.append((cls_item, bound_handler))\n ordered.sort(key=lambda h: h[0].priority)\n\n # get parent event handlers\n try:\n parent = self.parent.acquire.event_handlers\n except AttributeError:\n parent = []\n\n # Combine, cache and return\n handlers = [*ordered, *unordered, *parent]\n self._event_handlers = handlers\n return handlers", "def _handlers(self):\n settings = self.get_settings(prefix='tangled.app.handler.')\n # System handler chain\n handlers = [settings['exc']]\n if self.has_any('static_directory'):\n # Only enable static file handler if there's at least one\n # local static directory registered.\n dirs = self.get_all('static_directory')\n if any(isinstance(d, LocalDirectory) for d in dirs):\n handlers.append(settings['static_files'])\n handlers.append(settings['tweaker'])\n handlers.append(settings['notifier'])\n handlers.append(settings['resource_finder'])\n if self.get_setting('csrf.enabled'):\n handlers.append(settings['csrf'])\n if 'auth' in settings:\n handlers.append(settings['auth'])\n # Handlers added by extensions and applications\n handlers += self.get_all(abcs.AHandler, [])\n if self.get_setting('cors.enabled'):\n handlers.append(settings['cors'])\n # Main handler\n handlers.append(settings['main'])\n # Wrap handlers\n wrapped_handlers = []\n next_handler = None\n for handler in reversed(handlers):\n handler = HandlerWrapper(handler, next_handler)\n wrapped_handlers.append(handler)\n next_handler = handler\n wrapped_handlers.reverse()\n return wrapped_handlers", "def getSimulationEventHandlers(self): \r\n return self.__eventHandlers.values()", "def signal_callbacks(self):\n for name in self.lookup_dict[self.__class__]:\n yield name, getattr(self, name)", "def get_handlers(self):\n # TODO(eric.cousineau): Consider just using `OrderedDict`.\n return map(self._handlers.get, self._frame_names)", "def get_registered_handlers(self):\n return list(self._registry.values())", "def handles(self) -> Union[Callable, Sequence]:\n return self._handles", "def get_command_handlers(self):\n\t\treturn self.command_handlers", "def _handlers(self) -> tuple:\n return self._classname2handlers[self.deco_class.__name__]", "def __iter__(self):\n while True:\n m = self.recv(timeout=1.0)\n if m is not None:\n yield m\n logger.debug(\"done iterating over bus messages\")", "def sender_iter(self):\n while 1:\n yield self.send_next()", "def _update_handlers(self):\n handler_map = defaultdict(list)\n for i, obj in enumerate(self.handlers):\n for dummy, handler in inspect.getmembers(obj, callable):\n if not hasattr(handler, \"_pyxmpp_event_handled\"):\n continue\n # pylint: disable-msg=W0212\n event_class = handler._pyxmpp_event_handled\n handler_map[event_class].append( (i, handler) )\n self._handler_map = handler_map", "def event_handlers(self):\n if self.is_flow:\n return self._event_handlers\n\n try:\n return self._event_handlers\n except AttributeError:\n return self.flow._event_handlers", "def messageCollector():\n\ttempui = CatchallUI()\n\trealui = base.ui\n\ttry:\n\t\tbase.ui = tempui\n\t\tyield tempui\n\tfinally:\n\t\tbase.ui = realui", "def _get_instance_handlers ( self, name ):\n return [ ( getattr( self, method_name ), item_name )\n for method_name, item_name in\n self.__class__.__instance_traits__[ name ] ]", "def send_all(self, service, payload):\n for handler in self.partyline[service]:\n try:\n yield handler(payload)\n except HighAndDry:\n pass", "def _iterate_messages(self):\n if not self.targets:\n raise sex.SullyRuntimeError(\"No targets specified in session\")\n\n if not self.edges_from(self.root.id):\n raise sex.SullyRuntimeError(\"No requests specified in session\")\n\n self._reset_fuzz_state()\n\n for x in self._iterate_messages_recursive(this_node=self.root, path=[]):\n yield x", "def ReadMessageHandlerRequests(self):\n res = []\n leases = self.message_handler_leases\n for requests in self.message_handler_requests.values():\n for r in requests.values():\n res.append(r.Copy())\n existing_lease = leases.get(r.handler_name, {}).get(r.request_id, None)\n res[-1].leased_until = existing_lease\n\n return sorted(res, key=lambda r: r.timestamp, reverse=True)", "def messages(self):\n return list(iter(self))", "def u2handlers(self):\n handlers = suds.transport.http.HttpTransport.u2handlers(self)\n if self.ssl_context:\n try:\n handlers.append(HTTPSHandler(context=self.ssl_context,\n check_hostname=self.verify))\n except TypeError:\n # Python 2.7.9 HTTPSHandler does not accept the\n # check_hostname keyword argument.\n #\n # Note that even older Python versions would also\n # croak on the context keyword argument. But these\n # old versions do not have SSLContext either, so we\n # will not end up here in the first place.\n handlers.append(HTTPSHandler(context=self.ssl_context))\n return handlers", "def flush(self):\n\n for handler in self.handlers:\n handler.flush()", "def get_rest_handlers(self):\n return self._rest_handlers", "def get_app_handlers(self):\n return []", "def makeHandlers(self):\n\n yield self.loadGrids.start(funcSelf=self)\n yield self.updateClientWatchedGrids.start(funcSelf=self)\n logger.debug(\"RPCs started\")", "def __iter__(self):\n return iter([self.format_message(record) for record in self._messages])", "def _register_handlers(self):\n DBG(\"\\nregister handlers\")\n for hook, handler in self.handlers:\n g.registerHandler(hook, handler)\n\n signal_manager.connect(self.c, 'body_changed', self._after_body_key)", "def handler_mappings(self):\n return {}", "def handles(self) -> Generator[dict, None, None]:\n\n for handle in self.session.plugins.handles().collect():\n\n # Regardless of if we use this handle or not, can make a process node\n # if this is a process that wasnt in pslist/pstree.\n e_proc = handle[\"_EPROCESS\"]\n pid = int(e_proc.pid)\n\n if pid in self.processes:\n proc_data = self.processes[pid]\n else:\n logger.warn(f\"Previously unseen PID={pid} showed up in handles\")\n\n if handle[\"obj_type\"] == \"File\":\n\n full_file_path = handle[\"details\"]\n\n if full_file_path.startswith(\"\\\\\"):\n full_file_path = full_file_path[1:]\n\n file_path, file_name = split_path(full_file_path)\n yield {\n FieldNames.FILE_PATH: file_path,\n FieldNames.FILE_NAME: file_name,\n **proc_data,\n FieldNames.EVENT_TYPE: EventTypes.FILE_OPENED,\n }\n elif handle[\"obj_type\"] == \"Key\":\n\n key_path = handle[\"details\"]\n if key_path.startswith(\"MACHINE\\\\\"):\n start_indx = len(\"MACHINE\\\\\")\n key_path = key_path[start_indx:]\n\n hive = key_path.split(\"\\\\\")[0]\n key = key_path.split(\"\\\\\")[-1]\n key_path = key_path[len(hive) : len(key)]\n\n yield {\n FieldNames.HIVE: hive,\n FieldNames.REG_KEY: key,\n FieldNames.REG_KEY_PATH: key_path,\n **proc_data,\n FieldNames.EVENT_TYPE: EventTypes.REG_KEY_OPENED,\n }", "def get_handlers_for_event(self, event):\n pass # pragma: no cover", "def connection_handler(self):\n\t\tyield", "def fileHandlers(self):\n fileHandlers = list()\n handlers = self.logger.handlers\n for handler in handlers:\n try:\n if handler._name.startswith(\"LogFile-\"):\n fileHandlers.append(handler)\n except:\n pass\n return fileHandlers", "def _register_handlers(self):\n import handlers as th\n import inspect\n for name, class_type in inspect.getmembers(th, predicate=inspect.isclass):\n if class_type is th.ZMQTopicHandlerBase:\n continue\n handler = class_type()\n topic = handler.get_topic()\n if topic in self._topic_handlers:\n self._topic_handlers.append(handler)\n else:\n self._topic_handlers[topic] = [handler]", "def get_file_handlers(self):\n return []", "def get_manager_with_least_processed_events_generator(self):\n try:\n managers = self.db_handler.get_manager_with_least_processed_events()\n\n for manager in managers:\n self.logger.write_to_log(f'returned manager for notification', 'model')\n yield (len(managers) - managers.index(manager)) - 1, manager[0]\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def _generator(self):\n\t\twhile(1):\n\t\t\ttry:\n\t\t\t\tm = self.messages.pop(0) # pop the first Flash2Message in the list\n\t\t\t\tyield m\n\t\t\texcept IndexError:\n\t\t\t\traise StopIteration", "def handle(self, message):\n for callback in self.callbacks:\n callback(message['data'])", "def consume(self):\n try:\n callback = self._handlers[type(self)]\n _LOGGER.debug(\"Calling handler %r\", callback)\n callback(self)\n except KeyError:\n _LOGGER.debug(\"%r has no handlers\", self)", "def load_handlers(self):\n\t\tself.handlers = []\n\t\tfor mod in os.listdir('classes/handlers'):\n\t\t\tif mod == '__init__.py' or mod[-3:] != '.py':\n\t\t\t\tcontinue\n\t\t\tlib = __import__(mod[:-3], locals(), globals())\n\t\t\tself.handlers.append(lib)\n\t\t#\n\t\tself.handlers.sort(key=lambda x: x.order, reverse=False)\n\t\tprint(\"Loaded handlers: \", ', '.join([x.tag for x in self.handlers]) )\n\t\tassert len(self.handlers)>0", "def get_handlers():\n handlers = list()\n\n #login\n handlers.append((r'/login', Login))\n handlers.append((r'/logout', Logout))\n\n # main\n handlers.append((r'/', Index))\n\n\n #user\n handlers.extend(get_routes(UserController))\n\n #role\n handlers.extend(get_routes(RoleController))\n\n\n handlers.extend(get_routes(ApiServiceController))\n\n handlers.extend(get_routes(InventarioController))\n\n return handlers", "def import_all_handlers(self):\n import os\n exclude_list=[\"base\"]\n\n #\n # the list of handlers (excluding base. Add more you dont want\n # to be loaded or inspected to exclude_list above.)\n #\n mods=[]\n module_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), 'handlers'))\n #print(\"importing handlers from: \" + module_path)\n for mod in os.listdir( module_path ):\n mod = mod.split(\".\")[0]\n if not mod.startswith(\"_\") and not mod in exclude_list:\n #print(\" now processing: \" + str(mod))\n mods.append(mod)\n \n #print(\"mods: \" + str(mods))\n class_list = []\n # load all the models from their modules (mods)\n #print(str(mods))\n import importlib\n for m in mods:\n #print(\"importing: \" + 'pow_comments.handlers.' + m) \n try:\n mod = importlib.import_module('pow_comments.handlers.' + m)\n except:\n pass\n #print(dir(mod))", "def release_handlers(self):\n for handler in self.handlers:\n handler.close()", "def __iter__(self):\n for channel in self.channels.itervalues():\n yield channel", "def __iter__(self):\n self.enable_receiving()\n with closing(select.epoll()) as notifier:\n notifier.register(self, select.EPOLLIN)\n while True:\n events = eintr_retry_call(notifier.poll)\n for event in events:\n yield self.receive_device()", "def import_handlers(self):\n if not self._import_handlers:\n self._initialize_handlers()\n\n return self._import_handlers", "def cleanup_handlers():\n # There's nothing to set up so we immediately yield control.\n yield\n # After the with block ends we cleanup any output handlers.\n for match_func in match_stream_handler, match_syslog_handler:\n handler, logger = find_handler(logging.getLogger(), match_func)\n if handler and logger:\n logger.removeHandler(handler)", "def __iter__(self):\n yield from self.gen", "def listeners_iter(self):\n topics = set(six.iterkeys(self._topics))\n while topics:\n event_type = topics.pop()\n try:\n yield event_type, self._topics[event_type]\n except KeyError:\n pass", "def __iter__(self):\n for mapping in self._mappings.values():\n yield mapping", "def map_all(self, policies: Iterable[Any]) -> Sequence[HandlerFactory]:\n return [self.map(policy) for policy in policies]", "def __iter__(self):\n for key in self._ctx:\n yield key", "def __iter__(self):\n yield from self.calls", "def event_stream(self):\n for message in self.subscribe():\n event = message_to_sse(message[\"data\"])\n yield event", "def _handle(self, content):\n for func, args, kwargs in self.handlers:\n func(content, *args, **kwargs)\n if not self.handlers:\n self.error = \"No handlers specified\"\n logger.error(self.error)\n raise Exception(self.error)", "def get_handler(self):\n return self._Handler(self)", "def get_handlers(self, event, **kwargs):\n name = event.name\n handlers = set()\n\n _handlers = set()\n _handlers.update(self._handlers.get(name, []))\n\n for _handler in _handlers:\n handlers.add(_handler)\n\n for c in self.components.copy():\n handlers.update(c.get_handlers(event, **kwargs))\n\n return handlers", "def generators(self):\n return self._generators", "def probe(self):\n\n # make sure the filehandler is still valid\n # (e.g. file stat hasnt changed, file exists etc.)\n if not self.validate_file_handler():\n return []\n\n messages = []\n\n # read any new lines and push them onto the stack\n for line in self.fh.readlines(self.max_lines):\n data = {\"path\":self.path}\n msg = self.new_message()\n\n # process the line - this is where parsing happens\n parsed = self.process_line(line, data)\n if not parsed:\n continue\n data.update(parsed)\n\n # process the probe - this is where data assignment\n # happens\n data = self.process_probe(data)\n msg[\"data\"] = [data]\n messages.append(msg)\n\n\n # process all new messages before returning them\n # for emission\n messages = self.process_messages(messages)\n\n return messages", "def fileHandlers(self, handlers):\n for handler in handlers:\n self.logger.addHandler(handler)", "def __iter__(self):\n for node in self.grammar.walk():\n yield node", "def get_handlers():\n\n js_path_opts = {\"path\": abspath(join(dirname(__file__), \"js\"))}\n\n return [\n (\"/networktables/ws\", NetworkTablesWebSocket),\n (\"/networktables/(.*)\", NonCachingStaticFileHandler, js_path_opts),\n ]", "def __iter__(self) -> Generator[str, None, None]:\n\n yield from self.__dict__[\"members\"]", "def __next__(self):\n for child in self.children:\n yield child", "def _notify_handlers(self):\n\n # Notify all handlers \n for handler_callback in self._registered_handlers:\n try:\n handler_callback(self._balloon_position)\n except Exception as e:\n # A receiver failed, catch and move on\n pass", "def subscribers(self) -> Iterator[Any]:\n yield from self.get_subscribers()", "def Hrep_generator(self):\n for H in self.Hrepresentation():\n yield H", "def __init__(self, fsm):\n self._handlers = {}\n for methodName, method in inspect.getmembers(self, inspect.ismethod):\n if methodName.startswith('on'):\n eventName = _NameHelper.eventNameFromHandlerName(methodName)\n event = fsm._events.__dict__[eventName]\n self._handlers[event] = method", "def get_handlers(self, component_context, instance):\n # Extract information from the context\n requirements = component_context.get_handler(\n ipopo_constants.HANDLER_REQUIRES_BEST\n )\n requires_filters = component_context.properties.get(\n ipopo_constants.IPOPO_REQUIRES_FILTERS, None\n )\n\n # Prepare requirements\n requirements = self._prepare_requirements(\n requirements, requires_filters\n )\n\n # Set up the runtime dependency handlers\n return [\n BestDependency(field, requirement)\n for field, requirement in requirements.items()\n ]", "def process_messages(self, messages):\n\n return messages", "def getHandler(self):\n raise NotImplementedError(\"Shouldn't be called\")", "def get_routes(self):\n return [\n (route, handler.handler_class, handler.init_kwargs)\n for route, handler in self._routes.iteritems()\n ]", "def get_verb_handler_extensions():\n extensions = instantiate_extensions(__name__)\n for name, extension in extensions.items():\n extension.VERB_HANDLER_NAME = name\n return order_extensions_by_name(extensions)", "def get_andesite_socket_response_handlers(obj: Any) -> Dict[andesite.WebSocketInterface, SocketResponseHandler]:\n try:\n handlers = getattr(obj, SOCKET_RESPONSE_HANDLERS_ATTR)\n except AttributeError:\n handlers = {}\n setattr(obj, SOCKET_RESPONSE_HANDLERS_ATTR, handlers)\n\n return handlers", "def server_message_iterator(self) -> Iterator[ServerMessage]:\n while not self._is_closed():\n with self._cv:\n self._cv.wait_for(\n lambda: self._status\n in [Status.CLOSED, Status.SERVER_MESSAGE_AVAILABLE]\n )\n\n self._raise_if_closed()\n\n server_message = self._server_message # Read\n self._server_message = None # Reset\n\n # Transition before yielding as after the yield the execution of this\n # function is paused and will resume when next is called again.\n # Also release condition variable by exiting the context\n self._transition(Status.AWAITING_CLIENT_MESSAGE)\n\n if server_message is None:\n raise Exception(\"Server message can not be None\")\n\n yield server_message", "def events_iter(self):\n for event_type in self._watchable_events:\n yield event_type", "def get_message_handler(self, taxii_message):\n raise NotImplementedError()", "def iterkeys(self):\n self.proto.iterinit()\n try:\n while True:\n yield wait(self.proto.iternext())\n except TyrantError:\n pass", "def sqs_messages(queue: str) -> Generator[Dict[str, Any], None, None]:\n\n while True:\n response = get_client(\"sqs\").receive_message(QueueUrl=queue)\n if \"Messages\" not in response:\n break\n msg = json.loads(response[\"Messages\"][0][\"Body\"])\n records = json.loads(msg[\"Message\"])\n retd = {}\n retd[\"key\"] = records[\"Records\"][0][\"s3\"][\"object\"][\"key\"]\n retd[\"bucket\"] = records[\"Records\"][0][\"s3\"][\"bucket\"][\"name\"]\n retd[\"ReceiptHandle\"] = response[\"Messages\"][0][\"ReceiptHandle\"]\n yield retd", "def __iter__(self):\n for value in self.__dict__.values():\n yield value", "def iterator(self):\n yield", "def __iter__(self):\n handle = self.parent.handle\n cur = getattr(gv, \"first%s\" % self.type)(handle)\n nextitem = getattr(gv, \"next%s\" % self.type)\n while gv.ok(cur):\n yield self.get(gv.nameof(cur))\n cur = nextitem(handle, cur)", "def root_handlers(desc=True):\n rl = get_root_logger()\n sort_hdlrs = sorted(rl.handlers, key=attrgetter(\"level\"), reverse=desc)\n\n return sort_hdlrs", "def receive(self):\n while True:\n if self.pending_request:\n request = self.unpack(self.pending_request)\n self.pending_request = None\n else: \n request = self.unpack(self.mh.receive_message())\n if request:\n yield request\n else: break", "def __iter__(self):\n\n for each in list(self.keys()):\n yield each", "def image_generator(self, some_messages):\n offset = 0\n outer = 0\n inner = 0\n\n for a_message in some_messages:\n msg_id = a_message.gmail_id\n for att in a_message.attachments():\n if att.type in ATTACHMENT_MIMES:\n att_type = att.type.split(\"/\")[1]\n an_image = Image(a_message, att)\n\n # map each image id with a corresponding message id for later parsing\n if an_image.id in self.mapping:\n self.mapping[msg_id].append(a_message)\n else:\n self.mapping[msg_id] = [a_message]\n\n self.num_attachments = self.count_attachments(self.num_attachments)\n\n yield an_image", "def dispatch(greps):\n try:\n while True:\n line = (yield)\n for grep in greps:\n grep.send(line)\n except GeneratorExit:\n for grep in greps:\n grep.close()", "def events(self) -> Generator[dict, None, None]:\n\n for audit_file, audit_type in self.identified_files.items():\n temp_file_path = f\"{self.tempdir.name}/{audit_file}\"\n\n if audit_type == \"stateagentinspector\":\n yield from self.parse_agent_events(temp_file_path)\n\n # If we have atleast the hits.json file, we can make alert nodes\n if self.alert_files[\"hits.json\"]:\n yield from self.parse_alert_files(self.tempdir.name)\n\n self.tempdir.cleanup()", "def start_consuming(self):\n\n for queue in self._handlers.keys():\n self._consumer_tags += self._channel.basic_consume(self.on_message,\n queue=queue)", "def WriteMessageHandlerRequests(self, requests):\n now = rdfvalue.RDFDatetime.Now()\n for r in requests:\n flow_dict = self.message_handler_requests.setdefault(r.handler_name, {})\n cloned_request = r.Copy()\n cloned_request.timestamp = now\n flow_dict[cloned_request.request_id] = cloned_request", "def notifies(self) -> Iterator[Notify]:\n while 1:\n with self.lock:\n ns = self.wait(notifies(self.pgconn))\n enc = self.client_encoding\n for pgn in ns:\n n = Notify(\n pgn.relname.decode(enc), pgn.extra.decode(enc), pgn.be_pid\n )\n yield n" ]
[ "0.75313586", "0.7092574", "0.689368", "0.6885732", "0.6611093", "0.64954853", "0.6490498", "0.6438109", "0.6379151", "0.6324771", "0.6322066", "0.62982225", "0.62764996", "0.6234379", "0.6224543", "0.62200075", "0.61538786", "0.60254633", "0.6023188", "0.60213643", "0.594284", "0.59243566", "0.5918119", "0.5899713", "0.5887125", "0.5881722", "0.5860365", "0.582422", "0.5808762", "0.57450217", "0.5715846", "0.56983757", "0.56947565", "0.56886715", "0.56661105", "0.5665099", "0.56650937", "0.566431", "0.5633507", "0.56281924", "0.5611216", "0.56067723", "0.5589687", "0.5568326", "0.5543127", "0.553703", "0.55055356", "0.5495081", "0.5444765", "0.5437905", "0.5435149", "0.5417303", "0.5390345", "0.53878576", "0.53873134", "0.5384583", "0.53740853", "0.53608686", "0.5353947", "0.53534406", "0.5351508", "0.5340303", "0.530496", "0.52915555", "0.5253283", "0.524616", "0.523766", "0.523478", "0.5231831", "0.5223661", "0.52233666", "0.52177787", "0.5215051", "0.51973784", "0.5186831", "0.5171305", "0.51694924", "0.51476794", "0.51447064", "0.51422334", "0.51340497", "0.5130978", "0.512901", "0.5126341", "0.5125852", "0.5106811", "0.5097804", "0.5093981", "0.508445", "0.507805", "0.5066226", "0.5055539", "0.50534374", "0.50518066", "0.5050308", "0.50488216", "0.5040632", "0.50312746", "0.50264055", "0.5023751" ]
0.86009705
0
Adds the given service's message handlers to our managed message handlers.
def register_service(self, service): for message_handler in service.iter_message_handlers(): self.message_handlers[message_handler.name] = message_handler
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connect(self, service, handler):\n self.partyline.setdefault(service, []).append(handler)", "def register_websock_handlers(self, service, new_client, new_message, close_client):\n if service in self.websock_handlers:\n L.error(\"Error: service:\" + service + \" is already registered\")\n return False\n handlers = {\n \"new_client\":new_client,\n \"new_message\":new_message,\n \"close_client\":close_client\n }\n self.websock_handlers[service] = handlers\n return True", "def add_handlers(self, host_pattern, host_handlers):\n pass", "def add_handler(self, handler, backtrack = False):\n\n # Add Handler\n self._handlers.append(handler)\n logger.debug(\"%s: handler %s added.\" % \\\n (self.__class__.__name__, handler.__name__))\n \n # Backtrack\n if backtrack:\n for message in self.get_waiting(): handler(message)\n logger.debug(\"%s: handler %s backtracked.\" % \\\n (self.__class__.__name__, handler.__name__))", "def addAllStatics(self, module=None):\n module = module or sys.modules[self.__module__]\n\n servicehandler_classes = inspect.getmembers(module, is_ServiceHandler)\n for servicehandler in servicehandler_classes:\n self.add(servicehandler[1])", "def fileHandlers(self, handlers):\n for handler in handlers:\n self.logger.addHandler(handler)", "def add_handler(self, handler):\n pass", "def add(self, handler, on_error=None):\n self.handlers.append(handler)", "def addHandlers(self, handlers):\n self._eventHandlers.update(handlers)\n keys = self._eventHandlers.keys()\n pygame.event.set_allowed(keys)", "def _register_handlers(self):\n import handlers as th\n import inspect\n for name, class_type in inspect.getmembers(th, predicate=inspect.isclass):\n if class_type is th.ZMQTopicHandlerBase:\n continue\n handler = class_type()\n topic = handler.get_topic()\n if topic in self._topic_handlers:\n self._topic_handlers.append(handler)\n else:\n self._topic_handlers[topic] = [handler]", "def bind(self, svc, svc_ref):\n with self._lock:\n if ORDER_HANDLER in svc_ref.get_property(pelix.OBJECTCLASS):\n targets = svc_ref.get_property(ORDER_TARGETS)\n if isinstance(targets, (list, tuple)):\n for target in targets:\n self._target_handlers.setdefault(target, []).append(svc)\n\n else:\n self._target_handlers.setdefault(str(targets), []).append(svc)", "def handlers(self, handlers):\n return self._set_list_field(\"handlers\", handlers)", "async def async_service_handler(service):\n _LOGGER.info(\"%s service called\", service.service)\n method = SERVICE_TO_METHOD.get(service.service)\n if not method:\n _LOGGER.warning(\"Unknown service method %s\", service.service)\n return\n\n params = {\n key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID\n }\n entity_ids = service.data.get(ATTR_ENTITY_ID)\n component = hass.data.get(SWITCH_DOMAIN)\n if entity_ids:\n target_switches = [component.get_entity(entity) for entity in entity_ids]\n else:\n return\n\n method_name = method[\"method\"]\n _LOGGER.debug(\"Service handler: %s %s\", method_name, params)\n\n for entity in target_switches:\n if not hasattr(entity, method_name):\n _LOGGER.error(\"Service not implemented: %s\", method_name)\n return\n await getattr(entity, method_name)(**params)", "def _register_services(self) -> None:\n\n for isr in self.immediate_services_with_reply:\n # Create a single instance of the service to cache in the router corresponding\n # to one or more message types.\n isr_instance = isr()\n for handler_type in isr.message_handler_types():\n # for each explicitly supported type, add it to the router\n self.immediate_msg_with_reply_router[handler_type] = isr_instance\n\n # for all sub-classes of the explicitly supported type, add them\n # to the router as well.\n for handler_type_subclass in get_subclasses(obj_type=handler_type):\n self.immediate_msg_with_reply_router[\n handler_type_subclass\n ] = isr_instance\n\n for iswr in self.immediate_services_without_reply:\n # Create a single instance of the service to cache in the router corresponding\n # to one or more message types.\n iswr_instance = iswr()\n for handler_type in iswr.message_handler_types():\n\n # for each explicitly supported type, add it to the router\n self.immediate_msg_without_reply_router[handler_type] = iswr_instance\n\n # for all sub-classes of the explicitly supported type, add them\n # to the router as well.\n for handler_type_subclass in get_subclasses(obj_type=handler_type):\n self.immediate_msg_without_reply_router[\n handler_type_subclass\n ] = iswr_instance\n\n for eswr in self.eventual_services_without_reply:\n # Create a single instance of the service to cache in the router corresponding\n # to one or more message types.\n eswr_instance = eswr()\n for handler_type in eswr.message_handler_types():\n\n # for each explicitly supported type, add it to the router\n self.eventual_msg_without_reply_router[handler_type] = eswr_instance\n\n # for all sub-classes of the explicitly supported type, add them\n # to the router as well.\n for handler_type_subclass in get_subclasses(obj_type=handler_type):\n self.eventual_msg_without_reply_router[\n handler_type_subclass\n ] = eswr_instance\n\n # Set the services_registered flag to true so that we know that all services\n # have been properly registered. This mostly exists because someone might\n # accidentally delete (forget to call) this method inside the __init__ function\n # of a sub-class of Node.\n self.services_registered = True", "def register_message_handlers(journal):\n journal.dispatcher.register_message_handler(\n DumpQuorumMessage, _dumpquorumhandler)", "def get_message_handlers(self):\n\t\treturn self.message_handlers", "def registerMessageHandler(self, message_handler, message_priority_list):\n if isinstance(message_handler, MessageHandler):\n for key in message_priority_list:\n rule = (message_priority_list[key], message_handler)\n self.message_handlers[key].append(rule)\n self.message_handlers[key].sort() # Keep priority order\n else:\n self.logger.critical(\n \"MessageHandler registration failed. Object \" +\n repr(message_handler) +\" is invalid type.\")\n raise TypeError(\"Only MessageHandlers can be registered!\")\n self.logger.debug(\"MessageHandler '\" + str(message_handler) +\n \"' registered to the message bus.\")", "def _update_handlers(self):\n handler_map = defaultdict(list)\n for i, obj in enumerate(self.handlers):\n for dummy, handler in inspect.getmembers(obj, callable):\n if not hasattr(handler, \"_pyxmpp_event_handled\"):\n continue\n # pylint: disable-msg=W0212\n event_class = handler._pyxmpp_event_handled\n handler_map[event_class].append( (i, handler) )\n self._handler_map = handler_map", "def register_handlers(dp, di_container: di.Container):\n general.router.register_handlers(dp)\n\n di_container.wire(packages=[sys.modules[__name__]])", "def addhandler(self, txt, handler):\n self.handlers[txt] = handler\n rlog(0, 'webserver', '%s handler added' % txt)", "def _register_handlers(self):\n DBG(\"\\nregister handlers\")\n for hook, handler in self.handlers:\n g.registerHandler(hook, handler)\n\n signal_manager.connect(self.c, 'body_changed', self._after_body_key)", "def addHandler(self, fn):\n self.handlers.append(fn)", "def set_added_handler(self, handler):\n self._added_handler = handler", "def add_service(self, service):\n # type: (LoadBalancerService) -> List[BoundAction]\n return self._client.add_service(self, service=service)", "def add_package_handler(self, package_name, cls):\n for module in messages.MESSAGES:\n if self._fuzzy_module_name_eq(module, package_name):\n for name in module.DESCRIPTOR.message_types_by_name:\n self.add_handler(name, getattr(cls, 'on_' + name.lower()))", "def register_handler(self, method, handler):\n self.handlers[method] = handler", "async def reload_service_handler(service: ServiceCall) -> None:\n auto = [e for e in component.entities if not e.user_defined]\n\n if (conf := await component.async_prepare_reload()) is None:\n return\n await _async_process_config(hass, conf)\n\n await component.async_add_entities(auto)\n\n await async_reload_integration_platforms(hass, DOMAIN, PLATFORMS)", "def register_handler(self, handler):\n if handler.key in self.handlers.keys():\n raise ValueError(f'Key {handler.key} already registered')\n self.handlers[handler.key] = handler", "def register_handler(self, topic, handler):\n if topic in self._topic_handlers:\n self._topic_handlers.append(handler)\n else:\n self._topic_handlers[topic] = [handler]", "def media_player_service_handler(service):\n target_players = component.extract_from_service(service)\n\n method = SERVICE_TO_METHOD[service.service]\n\n for player in target_players:\n getattr(player, method)()\n\n if player.should_poll:\n player.update_ha_state(True)", "def send_all(self, service, payload):\n for handler in self.partyline[service]:\n try:\n yield handler(payload)\n except HighAndDry:\n pass", "def add_topic_handlers(self):\n self.client.message_callback_add(deployment_topic, self.on_deployment_topic)\n self.client.message_callback_add(illumination_topic, self.on_illumination_topic)", "def _register_handlers(self):\n self.jm.register_handler(\"move_node\", self.move_node)\n self.jm.register_handler(\"copy_node\", self.copy_node)\n self.jm.register_handler(\"push_to_vospace\", self.push_to_vospace)\n self.jm.register_handler(\"push_from_vospace\", self.push_from_vospace)\n self.jm.register_handler(\"pull_to_vospace\", self.pull_to_vospace)\n self.jm.register_handler(\"pull_from_vospace\", self.pull_from_vospace)", "def register_arrived_message_handler(self, arrived_message_class, handler):\n message_handler = IMessageHandler()\n message_handler.on_message = handler\n self.__connection.get_processor().set_arrived_msg_handler(arrived_message_class, message_handler)", "def add(self, service: AbstractService):\n self.services.append(service)", "def add_message_handler(self,message_handler,message_filter=Filters.text):\n\t\tif(callable(message_handler)):\n\t\t\tself.message_handlers.append((message_handler,message_filter))\n\t\telse:\n\t\t\traise NotCallableException(\"{} is not callable\".format(type(message_handler)))", "def _handlers(self):\n settings = self.get_settings(prefix='tangled.app.handler.')\n # System handler chain\n handlers = [settings['exc']]\n if self.has_any('static_directory'):\n # Only enable static file handler if there's at least one\n # local static directory registered.\n dirs = self.get_all('static_directory')\n if any(isinstance(d, LocalDirectory) for d in dirs):\n handlers.append(settings['static_files'])\n handlers.append(settings['tweaker'])\n handlers.append(settings['notifier'])\n handlers.append(settings['resource_finder'])\n if self.get_setting('csrf.enabled'):\n handlers.append(settings['csrf'])\n if 'auth' in settings:\n handlers.append(settings['auth'])\n # Handlers added by extensions and applications\n handlers += self.get_all(abcs.AHandler, [])\n if self.get_setting('cors.enabled'):\n handlers.append(settings['cors'])\n # Main handler\n handlers.append(settings['main'])\n # Wrap handlers\n wrapped_handlers = []\n next_handler = None\n for handler in reversed(handlers):\n handler = HandlerWrapper(handler, next_handler)\n wrapped_handlers.append(handler)\n next_handler = handler\n wrapped_handlers.reverse()\n return wrapped_handlers", "def provide_services(self, services):\n for conv in self.conversations():\n conv.set_remote('services', json.dumps(services))", "def register(self, handler):\n self.handlers.add(handler)\n return self", "def register_handler(self, handler):\r\n self.handler = handler", "def services(self, services):\n\n self._services = services", "def services(self, services):\n\n self._services = services", "def add_handler(self, handler):\n self.register(abcs.AHandler, handler, handler)", "def register(self, msg_type, handler):\n # Should check type is valid\n if not handler and msg_type in self.handlers.keys():\n del self.handlers[msg_type]\n return\n self.handlers[msg_type] = handler", "def service_ids(self, service_ids):\n\n self._service_ids = service_ids", "def add(self, handler):\n\n if not self.__limit or len(self.__handlers) < self.__limit:\n self.__handlers.append(handler)\n else:\n raise toolkit.PylonLimitError(\n toolkit.PylonLimitError.TOO_MANY_SUBSCRIBERS,\n '{0} supports only {1} subscriber'.format(\n type(self), self.__limit\n )\n )\n return self", "def add_service(self, service):\n self.app.add_service(service)", "def _handle(self, content):\n for func, args, kwargs in self.handlers:\n func(content, *args, **kwargs)\n if not self.handlers:\n self.error = \"No handlers specified\"\n logger.error(self.error)\n raise Exception(self.error)", "def addService(self, service):\n\t\tself.services.append(service)\n\t\treturn self", "def add_handler(handler_list, handler_function):\n if not handler_function in handler_list:\n handler_list.append(handler_function)", "def u2handlers(self):\n handlers = suds.transport.http.HttpTransport.u2handlers(self)\n if self.ssl_context:\n try:\n handlers.append(HTTPSHandler(context=self.ssl_context,\n check_hostname=self.verify))\n except TypeError:\n # Python 2.7.9 HTTPSHandler does not accept the\n # check_hostname keyword argument.\n #\n # Note that even older Python versions would also\n # croak on the context keyword argument. But these\n # old versions do not have SSLContext either, so we\n # will not end up here in the first place.\n handlers.append(HTTPSHandler(context=self.ssl_context))\n return handlers", "def _add_services(self):\n this_service = {'name': 'swift-proxy'}\n other_services = [\n {'name': 'percona-cluster'},\n {'name': 'keystone'},\n {'name': 'glance'},\n {'name': 'swift-storage'}\n ]\n super(SwiftProxyBasicDeployment, self)._add_services(this_service,\n other_services)", "def load_handlers(self):\n\t\tself.handlers = []\n\t\tfor mod in os.listdir('classes/handlers'):\n\t\t\tif mod == '__init__.py' or mod[-3:] != '.py':\n\t\t\t\tcontinue\n\t\t\tlib = __import__(mod[:-3], locals(), globals())\n\t\t\tself.handlers.append(lib)\n\t\t#\n\t\tself.handlers.sort(key=lambda x: x.order, reverse=False)\n\t\tprint(\"Loaded handlers: \", ', '.join([x.tag for x in self.handlers]) )\n\t\tassert len(self.handlers)>0", "def load_services(service_store):\n service_store.register_service(GetDrugStoreService)\n service_store.register_service(FuelLevelService)\n service_store.register_service(SetFuelLevelService)\n service_store.register_service(GetRobotPosition)\n service_store.register_service(SetRobotPosition)", "def add_handler(self, handler):\n if not isinstance(handler, EventHandler):\n raise TypeError(\"Not an EventHandler\")\n with self.lock:\n if handler in self.handlers:\n return\n self.handlers.append(handler)\n self._update_handlers()", "def __create_handler():\n if not ServiceHandler.instance:\n ServiceHandler.instance = ServiceHandler()\n return ServiceHandler.instance", "def get_handlers(self):\n raise NotImplementedError()", "def add_handler ( handler_list, handler_function ):\n if not (handler_function in handler_list):\n handler_list.append ( handler_function )\n \n #cellblender_added_handlers", "def AddHandler(self, evt_id, handler):\n self._handlers[evt_id] = handler", "def exposed_services(self, exposed_services):\n\n self._exposed_services = exposed_services", "def register_handler(cls, handler):\n with cls._lock:\n cls._handlers[cls] = handler", "def handle_message(self, sender, message):\n self.logger.debug('handle_message(%r, %r)', sender, message.handler)\n\n message_handler = self.message_handlers.get(message.handler)\n if message_handler is None:\n self.logger.warning(\"sender=%r, No handler found: '%s'\",\n sender, message.handler)\n return\n\n message_handler(sender, message)", "async def locked_service_handler(service: ServiceCall) -> None:\n async with service_lock:\n await groups_service_handler(service)", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "def register_handler(self, token, handler):\r\n self._handlers[token] = handler", "def u2handlers(self):\n return []", "def RegisterService():\n hooks.RegisterHook(SERVICE_NAME, 'file-exists', hook_class=HookForExists)\n hooks.RegisterHook(SERVICE_NAME, 'file-write',\n hook_class=HookForWriteAndTouch)\n hooks.RegisterHook(SERVICE_NAME, 'file-touch',\n hook_class=HookForWriteAndTouch)\n hooks.RegisterHook(SERVICE_NAME, 'file-get', hook_class=HookForGet)\n hooks.RegisterHook(SERVICE_NAME, 'list-files', hook_class=HookForListFiles)\n hooks.RegisterHook(SERVICE_NAME, 'list-dir', hook_class=HookForListDir)", "def on_message(self, handler: Callable[[Request], Coroutine[Any, Any, Any]]):\n self.on_message_handler = handler", "def register(self, events=[]):\n self.events = events\n if not self in manager.handler:\n manager.handler.append(self)", "async def async_service_handler(service):\n method = SERVICE_TO_METHOD.get(service.service)\n params = {\n key: value for key, value in service.data.items() if key != ATTR_ENTITY_ID\n }\n entity_ids = service.data.get(ATTR_ENTITY_ID)\n if entity_ids:\n devices = [\n device\n for device in hass.data[DATA_KEY].values()\n if device.entity_id in entity_ids\n ]\n else:\n devices = hass.data[DATA_KEY].values()\n\n update_tasks = []\n for device in devices:\n if not hasattr(device, method[\"method\"]):\n continue\n await getattr(device, method[\"method\"])(**params)\n update_tasks.append(asyncio.create_task(device.async_update_ha_state(True)))\n\n if update_tasks:\n await asyncio.wait(update_tasks)", "def add_handler(self, name, callback):\n if not (name in self.handlers):\n self.handlers[name] = []\n self.handlers[name].append(callback)", "def _handle_topic(self, topic, messages):\n if topic not in self._topic_handlers:\n return self._default_handler(topic, messages)\n else:\n for handler in self._topic_handlers[topic]:\n handler.handle(self.manager, messages)", "def _notify_handlers(self):\n\n # Notify all handlers \n for handler_callback in self._registered_handlers:\n try:\n handler_callback(self._balloon_position)\n except Exception as e:\n # A receiver failed, catch and move on\n pass", "def add_handler(self, path, handler) -> None:\n if self.__test_path(path) and self.__test_path(handler):\n path_parts = self.__split_path(path) # Splits parts into constituent components\n self.route_trie.insert(path_parts, handler) # Passes parts on for addition to the trie", "def cacheHandlers(self):\n\n def collect_handlers(module):\n\n def wanted(member):\n return (isclass(member) and\n issubclass(member, handlers.HandlerBase) and\n member.__name__.endswith('Handler'))\n\n m = {}\n for name, obj in getmembers(module, wanted):\n m[name] = obj(self.skype)\n m[name].init()\n return m\n\n self.handlers = collect_handlers(handlers)\n if custom_handlers:\n self.handlers.update(collect_handlers(custom_handlers))", "def execute_handlers(self, handlers, transfer_batch, dilution_settings, robot_settings):\n for handler in handlers:\n self.execute_handler(handler, transfer_batch, dilution_settings, robot_settings)", "def service_handler(service):\n entity_id = ENTITY_ID_FORMAT.format(service.service)\n script = component.entities.get(entity_id)\n if script:\n script.turn_on()", "def async_service_handle(service):\n entity_ids = service.data.get(ATTR_ENTITY_ID)\n\n if entity_ids:\n devices = [device for device in manager.entities\n if device.entity_id in entity_ids]\n else:\n devices = manager.entities\n\n tasks = []\n for device in devices:\n if service.service == SERVICE_START:\n tasks.append(device.async_start_ffmpeg())\n elif service.service == SERVICE_STOP:\n tasks.append(device.async_stop_ffmpeg())\n else:\n tasks.append(device.async_restart_ffmpeg())\n\n if tasks:\n yield from asyncio.wait(tasks, loop=hass.loop)\n\n tasks.clear()\n for device in devices:\n tasks.append(device.async_update_ha_state())\n\n if tasks:\n yield from asyncio.wait(tasks, loop=hass.loop)", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "def registered(self, status_handler):\n self.__status_handler = status_handler", "def register_handler(config):\n\n @respond_to(\".*\")\n def handle(message):\n \"\"\"Respond to every Slack message and dispatch to another handler based\n on the contents of the message.\n\n This duplicates a little bit of the work that slackbot does, but allows\n us to define handlers dynamically based on the job config.\n \"\"\"\n\n text = message.body[\"text\"]\n logger.info(\"Received message\", message=text)\n\n if text == \"status\":\n handle_status(message)\n return\n\n for slack_config in config[\"slack\"]:\n if slack_config[\"regex\"].match(text):\n handle_command(message, slack_config)\n return\n\n for namespace, help_config in config[\"help\"].items():\n for pattern in [\"^{} help$\", \"^help {}$\"]:\n if re.match(pattern.format(namespace), text):\n handle_namespace_help(message, help_config)\n return\n\n include_apology = text != \"help\"\n handle_help(message, config[\"help\"], include_apology)", "def _forward_message(self, name, message):\n unhashed = self.message_hashes[repr(name)]\n if unhashed in self.handlers:\n for handler in self.handlers[unhashed]:\n handler(message)", "def send(self, *args, **kw):\n result = []\n for handler in self.registry.values():\n result.append(handler(*args, **kw))\n return result", "def _PushHandlerMessage(self, message):\n\n # We only accept messages of type MESSAGE.\n if message.type != rdf_flows.GrrMessage.Type.MESSAGE:\n raise ValueError(\"Unexpected message type: %s\" % type(message))\n\n if not message.session_id:\n raise ValueError(\"Message without session_id: %s\" % message)\n\n # Assume the message is authenticated and comes from this client.\n message.source = self.client_id\n\n message.auth_state = \"AUTHENTICATED\"\n session_id = message.session_id\n\n handler_name = message_handlers.session_id_map.get(session_id, None)\n if handler_name is None:\n raise ValueError(\"Unknown well known session id in msg %s\" % message)\n\n logging.info(\"Running message handler: %s\", handler_name)\n handler_cls = handler_registry.handler_name_map.get(handler_name)\n handler_request = rdf_objects.MessageHandlerRequest(\n client_id=self.client_id,\n handler_name=handler_name,\n request_id=message.response_id,\n request=message.payload)\n\n handler_cls().ProcessMessages([handler_request])", "def install_event_handlers(self, categories=None, handlers=None):\n if categories is not None and handlers is not None:\n raise ValueError(\"categories and handlers are mutually exclusive!\")\n\n from .events import get_event_handler_classes\n if categories:\n raise NotImplementedError()\n handlers = [cls() for cls in get_event_handler_classes(categories=categories)]\n else:\n handlers = handlers or [cls() for cls in get_event_handler_classes()]\n\n self._event_handlers = handlers", "def register_handlers(path = EXPLOIT_FOLDER):\n\n exploit_folder = './{}/{}'.format(os.path.dirname(__file__), path)\n handlers = []\n\n for module in os.listdir(exploit_folder):\n\n if not module.endswith(\".py\") or module == \"__init__.py\":\n continue\n\n # Execute the script\n # We assume that each executed script registers himself to the handlers dictionary.\n try:\n execfile('./{}/{}'.format(path, module))\n except Exception as e:\n log.failure(\"Could not register handler '{}' : {}\".format(module, e))\n\n log.info(\"Registered {} handler(s).\".format(len(handlers)))\n for handler in handlers:\n\n handler_name = handler.__name__\n log.info(\"- Registered '{}' handler\".format(handler_name))\n\n return handlers", "def _initChangeHandlers(self, handlers):\n if hasattr(self, \"_changeHandlerSet\") :\n return\n if isinstance(handlers, BaseChangeHandler):\n self._changeHandlerSet = set([handlers])\n elif hasattr(handlers, '__iter__'):\n self._changeHandlerSet = set(\n [h for h in handlers if isinstance(h, BaseChangeHandler)])\n else: \n self._changeHandlerSet = set()", "def setup_handlers(web_app):\n\n mlw_handlers = [\n ('/mlw/load_workspace', MLW_load_workspace_handler),\n ('/mlw/save_workspace', MLW_save_workspace_handler),\n ('/mlw/install_requirements', MLW_install_requirements_handler),\n ('/mlw/notify_still_alive', MLW_notify_still_alive_handler)\n ]\n\n # add the baseurl to our paths\n base_url = web_app.settings['base_url']\n mlw_handlers = [\n (ujoin(base_url, x[0]), x[1])\n for x in mlw_handlers\n ]\n print(\"base_url: {}\".format(base_url))\n print(mlw_handlers)\n\n web_app.add_handlers('.*', mlw_handlers)", "def RegisterFlowProcessingHandler(self, handler):\n self.UnregisterFlowProcessingHandler()\n\n # For the in memory db, we just call the handler straight away if there is\n # no delay in starting times so we don't run the thread here.\n self.flow_handler_target = handler\n\n for request in self._GetFlowRequestsReadyForProcessing():\n handler(request)\n with self.lock:\n self.flow_processing_requests.pop((request.client_id, request.flow_id),\n None)", "def _send_signals(self, svc_names: List[str], sig: str):\n pass", "def _add_services(self):\n this_service = {'name': '{{ metadata.package }}'}\n other_services = [\n {'name': 'mysql',\n 'location': 'cs:percona-cluster',\n 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'manila'}\n ]\n super(ManilaPluginCharmDeployment, self)._add_services(\n this_service, other_services)", "def iter_message_handlers(self):\n for name in dir(self):\n attr = getattr(self, name)\n if isinstance(attr, MessageHandler):\n yield attr", "def register_handler(self, method, path, fn):\n if not(method in self.handlers):\n self.handlers[method] = {}\n self.handlers[method][path] = fn", "def add_service(self, load_balancer, service):\n # type: (Union[LoadBalancer, BoundLoadBalancer], LoadBalancerService) -> List[BoundAction]\n data = self.get_service_parameters(service)\n\n response = self._client.request(\n url=\"/load_balancers/{load_balancer_id}/actions/add_service\".format(load_balancer_id=load_balancer.id),\n method=\"POST\", json=data)\n return BoundAction(self._client.actions, response['action'])", "def get_message_handler(self, taxii_message):\n raise NotImplementedError()", "def _register_services(self, pipeline):\n\n pipeline.register_service(self._aprs_service)", "def handle(self, message):\n for callback in self.callbacks:\n callback(message['data'])", "def delegate(self, services, args=None, kwargs=None):\n if not args:\n args = ()\n if not kwargs:\n kwargs = {}\n\n order = self._get_service_order()\n for srv in order:\n if srv in services:\n try:\n return services[srv](*args, **kwargs)\n except exceptions.ServiceError:\n continue\n raise exceptions.NoServiceError(services)", "def _set_handler_data(\r\n self, handler: Handler,\r\n handlers_dict: Dict[str, HandlerData],\r\n options: Optional[Dict[str, Any]]) -> None:\r\n from apysc.event.handler import get_handler_name\r\n name: str = get_handler_name(handler=handler, instance=self)\r\n if options is None:\r\n options = {}\r\n handlers_dict[name] = {\r\n 'handler': handler,\r\n 'options': options,\r\n }", "def handle_message(**payload):\n handler_instance = message.MessageHandler(payload)\n handler_instance.handle()" ]
[ "0.6415957", "0.63830936", "0.60461724", "0.6046069", "0.6006367", "0.58971536", "0.5866512", "0.5834788", "0.5806009", "0.5753481", "0.5696065", "0.5631438", "0.56120425", "0.5598932", "0.55874574", "0.55768156", "0.55757207", "0.55562836", "0.5551294", "0.5539715", "0.5536428", "0.5506033", "0.550355", "0.54882044", "0.5427758", "0.5393952", "0.53927433", "0.53868055", "0.5379734", "0.53586674", "0.5358557", "0.5346157", "0.5332317", "0.5315934", "0.53008574", "0.5258789", "0.5249054", "0.5240452", "0.52403784", "0.52385074", "0.52308136", "0.52308136", "0.5230378", "0.5225529", "0.52135575", "0.52105397", "0.52090585", "0.52066106", "0.520336", "0.51963466", "0.51723135", "0.51375866", "0.5134863", "0.5111421", "0.51072973", "0.50816435", "0.5081424", "0.50683874", "0.5067784", "0.5051351", "0.5046229", "0.50277174", "0.5014525", "0.5013006", "0.49873024", "0.49777392", "0.4972615", "0.4956381", "0.4945203", "0.4940264", "0.4928342", "0.492523", "0.4912787", "0.49089235", "0.49058878", "0.49031323", "0.48762807", "0.48648444", "0.48603097", "0.48601374", "0.48587713", "0.48576742", "0.48459938", "0.48213574", "0.48177204", "0.48016506", "0.4796191", "0.4790078", "0.47898257", "0.4785517", "0.4778462", "0.47687525", "0.47683963", "0.47571054", "0.4754087", "0.47517142", "0.47473457", "0.47441867", "0.47425535", "0.47381842" ]
0.8374331
0
Invokes the correct message handler for the given message.
def handle_message(self, sender, message): self.logger.debug('handle_message(%r, %r)', sender, message.handler) message_handler = self.message_handlers.get(message.handler) if message_handler is None: self.logger.warning("sender=%r, No handler found: '%s'", sender, message.handler) return message_handler(sender, message)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_message(self, message):\n\n\t\tself.log.debug(\"%s handle_message %s\", self.name, message)\n\n\t\tif message[\"Type\"] == \"command\":\n\t\t\ttry:\n\t\t\t\tcommand_callable = \"command_%s\" % message[\"Message\"][\"command\"]\n\t\t\t\tif hasattr(self, command_callable) and callable(getattr(self, command_callable)):\n\t\t\t\t\tcall = getattr(self, command_callable)\n\t\t\t\t\tcall(message[\"Message\"][\"arguments\"])\n\t\t\texcept Exception as e:\n\t\t\t\tself.log.error(\"%s invalid command %s %s\", self.name, message, e)", "def handle_message(**payload):\n handler_instance = message.MessageHandler(payload)\n handler_instance.handle()", "def _handle_message(self, msg):\n self.event('message', msg)", "def handle(self, message: Message) -> None:\n self.handled_message = message", "def process_message(self, message):\n processors = {\n \"^org.chicago.cta.stations.\": self._handle_station,\n \"^org.chicago.cta.arrivals.\": self._handle_arrival,\n \"org.chicago.cta.turnstiles\": self._handle_turnstiles\n }\n processor = processors.get(message.topic, False)\n if processor:\n processor(message)\n else:\n logger.debug(\n \"unable to find handler for message from topic %s\", message.topic\n )", "def handle(self, message: InternalMessage) -> None:\n if isinstance(message, TransactionMessage):\n self._handle_tx_message(message)\n elif isinstance(message, StateUpdateMessage):\n self._handle_state_update_message(message)", "def on_action(self, message):\n with self.handler.wrapee as wrapee:\n log.debug(\"Calling {method} on {name}\", method=message['action'], name=self.name)\n try:\n func = getattr(wrapee, message['action'])\n except AttributeError as ex:\n log.warn(\"Trying to call a method {method} that does not exsist!\",\n method=ex.args[0])\n return\n res, msg = func(*message['args'])\n if not res:\n log.warn(\"Error while calling {method}: {msg}\", msg=msg,\n method=message['action'])\n else:\n log.debug(\"Called method succesfully\")\n for protocol in self.service.protocols:\n protocol.send_packet()\n if msg != '':\n protocol.send_news(msg)", "def dispatch(self, message):\n data = ujson.loads(message)\n command = data.get(\"command\", \"no command field!\")\n if command in self._command_hash_views:\n self._command_hash_views[command](self, data)\n else:\n # handler.send(\"404 Error\")\n logger.warning(\"[Local] System don't understand command[%s]\" % command)", "def _PushHandlerMessage(self, message):\n\n # We only accept messages of type MESSAGE.\n if message.type != rdf_flows.GrrMessage.Type.MESSAGE:\n raise ValueError(\"Unexpected message type: %s\" % type(message))\n\n if not message.session_id:\n raise ValueError(\"Message without session_id: %s\" % message)\n\n # Assume the message is authenticated and comes from this client.\n message.source = self.client_id\n\n message.auth_state = \"AUTHENTICATED\"\n session_id = message.session_id\n\n handler_name = message_handlers.session_id_map.get(session_id, None)\n if handler_name is None:\n raise ValueError(\"Unknown well known session id in msg %s\" % message)\n\n logging.info(\"Running message handler: %s\", handler_name)\n handler_cls = handler_registry.handler_name_map.get(handler_name)\n handler_request = rdf_objects.MessageHandlerRequest(\n client_id=self.client_id,\n handler_name=handler_name,\n request_id=message.response_id,\n request=message.payload)\n\n handler_cls().ProcessMessages([handler_request])", "def messageHandler(self, source, message, messageId):\n try:\n type, params, data = message.split(':',2)\n except:\n # Not a real message\n return\n \n try:\n getattr(self, \"thive_%s\" % type)(messageId, params.split(), data)\n except exceptions.AttributeError, c:\n raise c\n print \"[HIVE] No method bound for command '%s'\" % type", "def handle(self, message):\n print(\"You received a message:\")\n print(message)\n # Overwrite this function to do something with the message!", "def run(self):\n alogger.info(\"Recieved message from %s, Message: (%d) %s\" % (self.client.getaddress(), self.action_type, self.message))\n \n #Try to call th function associated with this message type.\n #format = \"handle_<type>\" (eg: handle_100)\n fn = globals().get(\"handle_\" + str(self.action_type))\n if fn and callable(fn):\n fn(self.message, self.address, self.client)\n else:\n alogger.info(\"Received unknown message from %d, type: %d\" % (self.client.getaddress(), self.action_type))", "def handle_message(self, message):\n\n try:\n controller_func = get_controller_func(message.code)\n\n if controller_func:\n response = get_controller_func(message.code)(message.payload)\n self.send_message(response)\n else:\n self.send_bad_request()\n except Exception as e:\n Logger.log_error(e)\n self.send_server_error()", "def message_callback(self, message):\n message_data = json.loads(message)\n\n if message_data.get('command') == 'error':\n return self.command_error(message_data)\n\n if 'device_type' in message_data and not message_data['device_type'].startswith(self.device_filter):\n return\n\n # Try to find a matching command and execute it\n command_name = message_data['command']\n command_data = message_data.get('data', {})\n device_name = message_data.get('name')\n\n command_handler_name = 'command_{}'.format(command_name)\n if not hasattr(self, command_handler_name):\n logging.info(\"{} does not support command {}\".format(\n self,\n command_name\n ))\n return\n\n command_handler = getattr(self, command_handler_name)\n return command_handler(device_name, command_data)", "def handle_message(self, msg):\n\n if msg.error != None:\n return\n else:\n try:\n method = self.get_service_method(msg.method_name)\n params = getattr(msg, 'params', None)\n msg.result = self.execute_method(method, params)\n except (MethodNotFoundError, InvalidParamsError, ServerError), ex:\n logging.error(ex)\n msg.error = ex\n except Exception, ex:\n logging.error(ex)\n ex = InternalError(\"Error executing service method\")\n ex.data = ''.join(traceback.format_exception(*sys.exc_info()))\n msg.error = ex", "def handle_message(self, mxmsg):\n if self._handler is None:\n raise NotImplementedError()\n\n self.notify_started()\n response = self._handler(mxmsg)\n if response == ():\n self.no_response()\n elif isinstance(response, str):\n self.send_message(message=response, type=MessageTypes.PING)\n elif isinstance(response, dict):\n self.send_message(**response)\n else:\n raise ValueError(\"Unsupported handler return type %r\" %\n type(response))", "def handle(self, message):", "def _incoming_handler(self, context, message, fake_reply):\r\n return self._map[message.method](context, fake_reply, *message.args, **message.kwargs)", "def handle(self, message):\n for callback in self.callbacks:\n callback(message['data'])", "def handle_message(self, validated_message: dict):\n self.logger.debug(f'Sensor received message {validated_message}')\n if (validated_message['messageType'] !=\n model.MessageTypes.Control.value):\n self.logger.debug(\n 'Sensor ignoring because messageType was not control'\n )\n return\n if validated_message['messageBody']['target'] != self.component_id:\n self.logger.debug(\n 'Sensor ignoring because not targeted at me'\n )\n return\n\n subtype = validated_message['messageSubtype']\n try:\n self.logger.debug(f'Dispatching message with subtype {subtype}')\n self.message_handler_table[subtype](validated_message)\n except KeyError:\n self.logger.warning(f'No handler for with subtype {subtype}')\n pass", "def execute_message_received(self, message_received):\n pass", "def call(self, message: Message) -> None:\n self.fn(message)", "def on_message(client, userdata, msg):\n TOPIC_DISPATCH_DICTIONARY[msg.topic][\"method\"](msg)", "def _on_message(self, raw_msg):\n strmsg = raw_msg.decode()\n msg = json.loads(strmsg)\n\n print(msg)\n\n if self._handlers.get(msg['msgid']):\n for handler in self._handlers[msg['msgid']]:\n handler.handle(msg)", "async def process(self, message):\n return await self.dispatcher.dispatch(message)", "def handle_message(self, msg, identity=None):\n\n if (self._supervisor and\n not isinstance(msg, mplane.model.Envelope)):\n self._exporter.put_nowait([msg, identity])\n\n if isinstance(msg, mplane.model.Capability):\n self._add_capability(msg, identity)\n elif isinstance(msg, mplane.model.Withdrawal):\n self._withdraw_capability(msg, identity)\n elif isinstance(msg, mplane.model.Receipt):\n self._handle_receipt(msg, identity)\n elif isinstance(msg, mplane.model.Result):\n self._handle_result(msg, identity)\n elif isinstance(msg, mplane.model.Exception):\n self._handle_exception(msg, identity)\n elif isinstance(msg, mplane.model.Envelope):\n if msg.get_token() in self._receipts:\n self._handle_result(msg, identity)\n else:\n for imsg in msg.messages():\n self.handle_message(imsg, identity)\n else:\n raise ValueError(\"Internal error: unknown message \"+repr(msg))", "def handle_message(self, message):", "def _handler(self, message):\n\n data = pickle.loads(message['data'])\n\n if not data[2]:\n # empty method call; bail out\n return\n\n # call the function and respond to the proxy object with return value\n uuid = data[0]\n proxy = data[1]\n func = getattr(self, data[2])\n result = (uuid, func(*data[3], **data[4]))\n self._redis.publish('proxy:%s' % proxy, pickle.dumps(result))", "async def handle(self, message: discord.Message):\n raise NotImplementedError()", "def handle_message(self, message):\n\n\t\tself.console.handle_message(message)", "def handle_message(self, message):\n print \"[WARNING] No message handling implemented!\"", "def dispatch_message(self, addr, message_dict, kind):\n try:\n yield from self.dispatcher.dispatch_message(addr, message_dict, kind)\n except Exception as e:\n self.logger.error(\n \"Failed to dispatch mochad message {}: {}\".format(\n message_dict, e))", "def _forward_message(self, name, message):\n unhashed = self.message_hashes[repr(name)]\n if unhashed in self.handlers:\n for handler in self.handlers[unhashed]:\n handler(message)", "def handle_message(self, msg):\n pass", "def handle_message(self, msg):\n Logger.debug(\"Slave: Trying to parse\")\n if MessageKeys.command_key in msg.fields:\n Logger.info(\"Slave: Message command: %s\", str(msg.get_command()))\n return self.messagehandler[msg.get_command()](self, msg)\n return self.handle_invalid_command(msg)", "def processMessage(self, *args, **kwargs):\r\n pass", "def process(self, msg):\n print \"HANDLER: received a msg: %s\" % msg", "def get_message_handler(self, taxii_message):\n raise NotImplementedError()", "def message(self, msg):\n if (AZMessage.is_agilezen_xmpp_message(msg)):\n try:\n az_message = AZMessage(msg)\n except (MessageCreationException, api.APIException) as ex:\n print ex\n return None\n for handler in self.handlers:\n handler.handle(az_message)", "def message_handle(ws, message):\n try:\n data = json.loads(message)\n method = data['method']\n params = data['params']\n except json.JSONDecodeError:\n ws.close((1003, 'Message `{}` is invalid'.format(message)))\n except KeyError:\n keys = str(list(data.keys()))\n ws.close((1003, 'Message keys {} are missing or invalid'.format(keys)))\n else:\n try:\n public[method](ws, **params)\n except KeyError:\n ws.close((1007, 'Method `{}` not found'.format(method)))\n except TypeError:\n ws.close((1007, 'Parameters `{}` are wrong'.format(data['params'])))\n except InstanceNotFound as instance_id:\n ws.close((1007, 'Instance `{}` not found'.format(instance_id)))\n except EnvironmentMalformed as env_id:\n ws.close((1007, 'Environment `{}` is malformed'.format(env_id)))\n except EnvironmentNotFound as env_id:\n ws.close((1007, 'Environment `{}` not found'.format(env_id)))\n except WrongAction as action:\n ws.close((1007, 'Action `{}` is wrong'.format(action)))\n except Exception as err:\n ws.close((1007, 'Unknonwn error: {}'.format(err)))", "def handle_msg(self, msg):\n self.log.debug(\"handle_msg[%s](%s)\", self.comm_id, msg)\n if self._msg_callback:\n shell = self.kernel.shell\n if shell:\n shell.events.trigger('pre_execute')\n self._msg_callback(msg)\n if shell:\n shell.events.trigger('post_execute')", "def process(self, message: Message, **kwargs: Any) -> None:", "def handleMessage(msg):", "def on_message(self, handler: Callable[[Request], Coroutine[Any, Any, Any]]):\n self.on_message_handler = handler", "def message_received(self, message_header, message):\n\t\t# reset the ping counter\n\t\tself.running = time.time()\n\t\tself.logger.receive(\"{0} - {1} - {2}\".format(self.peerip, message_header.command, str(message)))\n\t\thandle_func_name = \"handle_\" + message_header.command\n\t\thandle_func = getattr(self, handle_func_name, None)\n\t\tif handle_func:\n\t\t\thandle_func(message_header, message)", "def on_message(self, message):\n log.debug(\"Protocol got message {message}\", message=message)\n if message['type'] == \"change\":\n self.handler.process_packet(message['packet'])\n self.send_packet()\n elif message['type'] == \"chat\":\n self.on_chat_message(message)\n elif message['type'] == \"action\":\n self.on_action(message)\n else:\n log.warn(\"Unrecognized message type {type}\", type=message['type'])", "def handle_message(self, message):\n print(f\"Got message {message}\")\n if message >> 7 == 1:\n # String\n self.receive_char_array(message)\n elif message >> 3 == 0b00000:\n # Servo position\n self.receive_servo_position(message)\n elif message == 0b00001000:\n # All servo positions\n self.receive_all_servo_positions()\n elif message == 0b00001001:\n # All servo limits\n self.receive_all_servo_limits()\n elif message == 0b00001010:\n # Raw force reading\n self.receive_raw_force()\n print(f\"Handled message {message}\")", "def onMessage(self, message):\n raise NotImplementedError", "def __msg_handler(self, bot, update):\n trigger = update.message.text\n self.__handler(bot, update, trigger)", "def _handle_msg(self, msg):\n data = msg['content']['data']\n method = data['method']\n\n if method == 'update':\n if 'state' in data:\n state = data['state']\n if 'buffer_paths' in data:\n _put_buffers(state, data['buffer_paths'], msg['buffers'])\n self.set_state(state)\n\n # Handle a state request.\n elif method == 'request_state':\n self.send_state()\n\n # Handle a custom msg from the front-end.\n elif method == 'custom':\n if 'content' in data:\n self._handle_custom_msg(data['content'], msg['buffers'])\n\n # Catch remainder.\n else:\n self.log.error('Unknown front-end to back-end widget msg with method \"%s\"' % method)", "def __msg_handler(self, update, bot):\n trigger = update.message.text\n self.__handler(bot, update, trigger)", "def handle(message):\n\n text = message.body[\"text\"]\n logger.info(\"Received message\", message=text)\n\n if text == \"status\":\n handle_status(message)\n return\n\n for slack_config in config[\"slack\"]:\n if slack_config[\"regex\"].match(text):\n handle_command(message, slack_config)\n return\n\n for namespace, help_config in config[\"help\"].items():\n for pattern in [\"^{} help$\", \"^help {}$\"]:\n if re.match(pattern.format(namespace), text):\n handle_namespace_help(message, help_config)\n return\n\n include_apology = text != \"help\"\n handle_help(message, config[\"help\"], include_apology)", "def process(self, message: Message, **kwargs: Any) -> None:\n pass", "def dispatch_user_message(self, message):\n session_event = message.get('session_event')\n handler = self._session_handlers.get(session_event,\n self.consume_user_message)\n return handler(message)", "def handle(msg):\n\n # glance to get some meta on the message\n content_type, chat_type, chat_id = telepot.glance(msg)\n chat_id = str(chat_id)\n\n # we only want to process text messages from our specified chat\n if (content_type == 'text') and (chat_id in allowed_chat_ids):\n command = msg['text']\n try:\n _cmd = get_command(command)\n except UserWarning as ex:\n logger.error(ex)\n raise\n _cmd.execute(chat_id)", "def message_received(self, message):\n \n # Routing\n if self.route_message(message) == True:\n return\n \n # Handlers?\n if len(self._handlers) > 0:\n for handler in self._handlers:\n handler(message)\n \n # Storage?\n else:\n timestamp = 0\n t = datetime.datetime.today()\n timestamp = t.microsecond/1000 + t.second*1000 + \\\n t.minute*60*1000 + t.hour*60*60*1000 + t.day*24*60*60*1000\n while timestamp > 4294967295: timestamp -= 4294967295\n self._messages.put(tuple([timestamp,message]))", "def process_message(self, message):\n\n if 'id' in message:\n logger.debug(\"Processing message {0}: {1!r}\",\n message['id'], message['method'])\n else:\n logger.debug(\"Processing method {0!r}\", message['method'])\n\n response = self.get_response(message.get('id', None),\n self.registry,\n message['method'],\n *message['params'])\n return response", "def handle_msg(msg):\n if comm._msg_callback:\n comm._msg_callback(msg)", "def process_message(self, message):\n self.post_to_redis(message)\n return", "def processMessage(self, msg):\r\n LOG(\"Received message: \" + msg.getId())\r\n \r\n # Process messages incoming from child executor, if any\r\n procId = msg[FIELD_PROC_ID]\r\n if procId != self.procId:\r\n if self.childManager.hasChild():\r\n self.childManager.processChildMessage(msg)\r\n else:\r\n LOG(\"Unexpected child message: \" + msg.getId(), LOG_ERROR)\r\n elif msg.getType() == MSG_TYPE_COMMAND:\r\n if msg.getId() == Messages.MSG_ADD_CLIENT:\r\n self.addClient(msg)\r\n elif msg.getId() == Messages.MSG_REMOVE_CLIENT:\r\n self.removeClient(msg)\r\n elif msg.getId() == Messages.CMD_CLOSE:\r\n self.cleanup()\r\n elif msg.getId() == Messages.CMD_RELOAD:\r\n REGISTRY['CIF'].clearAsRun()\r\n self.cleanup( executionOnly = True )\r\n self.setupResources()\r\n self.prepareExecution()\r\n else:\r\n cmdId = msg[\"Id\"]\r\n if cmdId in [ Messages.CMD_ABORT, Messages.CMD_PAUSE ]:\r\n self.mailbox.push( msg, high_priority = True )\r\n else:\r\n self.mailbox.push( msg )\r\n else:\r\n LOG(\"Unexpected message: \" + msg.getId() + \"/\" + msg.getType(), LOG_ERROR)", "def apply_handler(self):\n tmp = self.event_type\n if hasattr(self, tmp):\n getattr(self, tmp)()\n elif(self.target):\n self.message = self.message +\"\\r\\n\"\n self.target[0].send(self.message)", "async def route_message(self, msg):\n raise NotImplementedError", "def handle_msg(self, msg):\n self.logger.debug(\"Received: {}\".format(msg))\n\n try:\n msg_type = msg[\"type\"]\n except KeyError as e:\n return msgs.error(e)\n\n if msg_type == \"ping_req\":\n reply = msgs.ping_reply()\n elif msg_type == \"list_req\":\n reply = self.list_callables()\n elif msg_type == \"call_req\":\n try:\n obj_name = msg[\"obj_name\"]\n method = msg[\"method\"]\n params = msg[\"params\"]\n reply = self.call_method(obj_name, method, params)\n except KeyError as e:\n return msgs.error(e)\n elif msg_type == \"exit_req\":\n self.logger.info(\"Received message to die. Bye!\")\n reply = msgs.exit_reply()\n # Need to actually send reply here as we're about to exit\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n self.clean_up()\n sys.exit(0)\n else:\n err_msg = \"Unrecognized message: {}\".format(msg)\n self.logger.warning(err_msg)\n reply = msgs.error(err_msg)\n return reply", "def handleMessage(self, e):\n if len(self.messages) == 0:\n return\n messageInfo = self.messages[0]\n requestInfo = self.helpers.analyzeRequest(messageInfo)\n # run cheker\n self.run_replace_param_with_a_previously_valid_one(requestInfo, messageInfo)\n self.run_replace_param_with_modifying_with_one_different_character(requestInfo, messageInfo)\n self.run_replace_param_with_radamsa_output(requestInfo, messageInfo)\n self.run_remove_param(requestInfo, messageInfo)\n self.run_replace_body_with_radamsa_output(requestInfo, messageInfo)\n self.updatedb(messageInfo.getHttpService().getHost(), requestInfo.getParameters())", "def dispatch(self, event: str, message: str) -> None:\n\t\tfor subscriber, callback in self.get_subscribers(event).items():\n\t\t\tcallback(event, message)", "def onMessage(self, msg, binary):\r\n self._assembler.processMessage(msg, binary)", "def handle_message(self, session, message):\n # Handle an RPC call\n # Reason should come from inform call.\n response = {}\n if message['method'] == 'done' and message['id'] is None:\n # Here we switch roles, becoming RPC Client\n next_state, response = RPCS.SendingRpc, None\n else:\n # We have a valid method.\n # (VALID_METHODS checked in rpcsd:parse_message)\n next_state = RPCS.ExpectRpc\n response['error'] = {'code': -31998, 'message': 'Wrong request'}\n response['id'] = message['id']\n\n return next_state, response", "def execute(self, message: ACLMessage):\n super().execute(message)\n\n # Filter for protocol\n if not message.protocol == ACLMessage.FIPA_REQUEST_PROTOCOL:\n return\n\n # Filter for session_id (conversation_id)\n session_id = message.conversation_id\n if session_id not in self.open_sessions:\n return\n\n # Resume generator\n generator = self.open_sessions[session_id]\n handlers = {\n ACLMessage.INFORM: lambda: generator.send(message),\n ACLMessage.AGREE: lambda: generator.throw(FipaAgreeHandler, message),\n ACLMessage.REFUSE: lambda: generator.throw(FipaRefuseHandler, message),\n ACLMessage.FAILURE: lambda: generator.throw(\n FipaFailureHandler, message)\n }\n try:\n handlers[message.performative]()\n except StopIteration:\n pass\n except KeyError:\n return\n\n # Clear session if final message was received\n if message.performative in (ACLMessage.REFUSE, ACLMessage.INFORM, ACLMessage.FAILURE):\n self.delete_session(session_id)", "def _on_message_handler(client, callback_dict, message):\n # If the message topic is in the subscribed list, handle it\n if message.topic in callback_dict:\n callback_dict[message.topic](message)", "def _command(self, handlers, args, msg):\n com, arg = self._command_split(args)\n if com in handlers.subcommands:\n msg.inc_handlers()\n self._command(handlers.subcommands[com], arg, msg)\n for handler in handlers.handlers:\n msg.inc_handlers()\n handler.callback(msg, args)\n msg.dec_handlers()", "def handleMessage(self, channels, sender, code, datagram):\n self.stateServer.handle(channels, sender, code, datagram)\n self.clientAgent.handle(channels, sender, code, datagram)\n self.databaseServer.handle(channels, sender, code, datagram)", "def handle(self, msg, peer_protocol):\n msg_id = msg[0]\n if msg_id == 0:\n self._handle_handshake(msg, peer_protocol)\n elif msg_id == 1: #update\n print(msg, len(msg))\n self._handle_update(msg)", "def received_message(self, m):\n self.receiver.handle_message(m)", "def process(self, message):\n try:\n self.messages.remove(message)\n except ValueError:\n pass # nothing to see here, just a message that was already processed and is not on the list any more\n except Exception as e:\n print('error removing message from self.message:', e)\n \n try:\n if message['type'] in [\"ticker\"]:\n self.process_tickers(message)\n elif message['type'] in [\"snapshot\", \"l2update\"]:\n self.process_orderbook(message)\n elif message['type'] in [\"received\",\"open\",\"done\",\"match\",\"change\",\"activate\"] and 'user' in self.data:\n self.process_orders(message)\n except Exception as e:\n raise Exception(\"Process raised an error: {}\\n\\t{}\".format(e,message))", "def handleMessage(self, message):\n\n if 'started' in message.tags:\n self.handleMessage_started(message)\n\n elif 'deployment_computed' in message.tags:\n self.handleMessage_computed(message)\n\n elif 'deployment_end' in message.tags:\n self.handleMessage_end(message)", "def _dispatch_messages(self):\n while True:\n select_obj = (yield)\n if select_obj == self._message_queue.selobj:\n msg = self._message_queue.get_nowait()\n if msg is not None:\n msg_type = msg.get('type', None)\n if msg_type is not None:\n msg_handler = self._message_handlers.get(msg_type, None)\n if msg_handler is not None:\n msg_handler(msg['data'])", "def processMessage(self, msg, binary):\r\n if binary:\r\n self._handleBinary(msg)\r\n else:\r\n try:\r\n msg = json.loads(msg)\r\n except ValueError:\r\n raise InvalidRequest('Message is not in valid JSON format.')\r\n\r\n uris = self._recursiveURISearch(msg)\r\n\r\n if uris:\r\n self._handleString(msg, uris)\r\n else:\r\n self._protocol.processCompleteMessage(msg)", "def handle(self, message):\n\n tokens = message['text'].split()\n _class = self._factory(tokens[0])\n\n if ' ' in message['text']:\n if len(tokens) >= 2:\n self.reply(message, _class(tokens[1:len(tokens)]).run(),\n self._handle_opts(message))\n else:\n self.reply(message, 'Usage :' + _class(None).getHelp(),\n self._handle_opts(message))", "def message_callback(self, message):\n pass", "def dispatch_message(self, message):\n\n self.log.debug(\"Incoming message %r\", message)\n if message.code.is_request():\n # Responses don't get deduplication because they \"are idempotent or\n # can be handled in an idempotent fashion\" (RFC 7252 Section 4.5).\n # This means that a separate response may get a RST when it is\n # arrives at the aiocoap client twice. Note that this does not\n # impede the operation of observations: Their token is still active\n # so they are ACK'd, and deduplication based on observation numbers\n # filters out the rest.\n #\n # This saves memory, and allows stateful transports to be shut down\n # expeditiously unless kept alive by something else (otherwise,\n # they'd linger for EXCHANGE_LIFETIME with no good reason).\n if self._deduplicate_message(message) is True:\n return\n\n if message.mtype in (ACK, RST):\n self._remove_exchange(message)\n\n if message.code is EMPTY and message.mtype is CON:\n self._process_ping(message)\n elif message.code is EMPTY and message.mtype in (ACK, RST):\n pass # empty ack has already been handled above\n elif message.code.is_request() and message.mtype in (CON, NON):\n # the request handler will have to deal with sending ACK itself, as\n # it might be timeout-related\n self._process_request(message)\n elif message.code.is_response() and message.mtype in (CON, NON, ACK):\n success = self._process_response(message)\n if success:\n if message.mtype is CON:\n self._send_empty_ack(message.remote, message.mid, reason=\"acknowledging incoming response\")\n else:\n # A peer mustn't send a CON to multicast, but if a malicious\n # peer does, we better not answer\n if message.mtype == CON and not message.remote.is_multicast_locally:\n self.log.info(\"Response not recognized - sending RST.\")\n rst = Message(mtype=RST, mid=message.mid, code=EMPTY, payload='')\n rst.remote = message.remote.as_response_address()\n self._send_initially(rst)\n else:\n self.log.info(\"Ignoring unknown response (which is not a unicast CON)\")\n else:\n self.log.warning(\"Received a message with code %s and type %s (those don't fit) from %s, ignoring it.\", message.code, message.mtype, message.remote)", "def messageReceived(self, message):\n raise NotImplementedError(self)", "def handle_message(self, data):\n message = Message.from_text(data)\n if message is not None:\n print(message.username, message.action, message.channel, message.content)\n self._callback(\"message\", message) # TODO: add additional callbacks", "def handle(self, body):\n event_type = body['event_type']\n method_name = event_type.replace('.', '_')\n try:\n method = getattr(self, method_name)\n method(body)\n except AttributeError:\n LOG.debug('%s needs a method called `%s` to handle %s' %\n (self.__class__.__name__, method_name, event_type))", "def on_receive(self, message):\n\n if message.get('command') == 'dispatch':\n self.dispatch(message['filename'])\n\n else:\n log.error('Dispatcher received unexpected message type: {}'.format(\n message))", "def handle_message(self, data, channel):\n pass", "def process_message(self, context, message):\r\n r = self._process_message_general(context, message)\r\n if r is True:\r\n return\r\n elif r is not False:\r\n self._interface.incoming(context, message, r)\r\n else:\r\n self._interface.incoming(context, message, None)", "def handle(self, msg, options):\n raise NotImplementedError()", "def handle(self, m):\n\n\t\tline = m.split(\" \")\n\n\t\tif line[0] == \"PING\":\n\t\t\tself(\"PONG\", line[1])\n\t\telif len(line) > 1 and line[1] == \"001\":\n\t\t\tself.callbacks[\"loggedin\"](self, *line)\n\t\telif len(line) > 1 and line[1] == \"JOIN\":\n\t\t\tself.callbacks[\"joined\"](self, *line)\n\t\telif len(line) > 1 and line[1] == \"PRIVMSG\":\n\t\t\tself.callbacks[\"messaged\"](self, *line)", "def _method_call(self, msg):\n #print(\"Performing service: %s, method_name: %s\" % (msg.service_name, msg.method_name))\n service = self._services.get(msg.service_name)\n if service is None:\n raise MessageHandleError(MessageHandleError.RESULT_UNKNOWN_SERVICE, msg)\n\n try:\n return execute_remote_method_call(service, msg.method_name, *msg.pargs, **msg.kwargs)\n #return service.call(msg.method_name, *msg.pargs, **msg.kwargs)\n except MessageHandleError as error:\n error.original_message = msg\n raise error", "def process(self, msg):\n raise NotImplemented", "def handle_message(self, msg: mqtt.MQTTMessage) -> None:\n payload = json.loads(msg.payload.decode(\"utf-8\"))\n logging.info(f\"Received a new message: {payload}\")\n if \"volume\" in payload:\n validate(payload, schema=self.volume_schema)\n self.volume = payload[\"volume\"]\n elif \"volumeCtrl\" in payload:\n validate(payload, schema=self.volume_ctrl_schema)\n self.volume_up() if payload[\"volumeCtrl\"] == \"+\" else self.volume_down()\n elif \"mute\" in payload:\n validate(payload, schema=self.mute_schema)\n self.mute = payload[\"mute\"]\n elif \"toggle\" in payload:\n validate(payload, schema=self.toggle_schema)\n self.toggle_mute() if payload[\"toggle\"] == \"mute\" else self.toggle_pause()\n elif \"ctrl\" in payload:\n validate(payload, schema=self.ctrl_schema)\n self.skip_forward() if payload[\"ctrl\"] == \">>\" else self.skip_backward()\n else:\n raise ValueError(f\"Cannot handle message: {payload}, not a valid command\")", "def _message(self, msg):\n\n self.log('Message received:', msg['body'], pretty=True)\n\n if msg['type'] in ('chat', 'normal'):\n body = str(msg['body'])\n if body.startswith('/'):\n cmd, arg_string = body.split(' ', maxsplit=1)\n cmd = cmd.lstrip('/')\n\n if arg_string:\n args = arg_string.split(' ')\n else:\n args = None\n\n self.log('IRC remote command received:', cmd, args)\n return\n else:\n if True:\n msg.reply(\"Sorry, I did not understand that:\\n%s\" % body).send()", "def handle_msg(self, state_id, msg):\n pass", "def _r_handle_message_contents(self, msg, protocol):\n if isinstance(msg, ResponseMessage):\n d = self._waiting_messages.pop(msg.response_to, None)\n if d is not None:\n d.callback(msg)\n elif isinstance(msg, ServerMotdMessage):\n print(\"Connected: %s\" % msg.motd)\n self._r_successful_connection()\n elif isinstance(msg, EventMessage):\n callback = self._event_callbacks.get((msg.service_name, msg.event_name))\n if callback is not None:\n threads.deferToThread(callback, *msg.pargs, **msg.kwargs)", "def handler(self, input_message: FSPluginMessageBase, context: FSContext) -> FSPluginOutput:\n raise NotImplementedError()", "def _apply_msg_filter(self,message):\n \n for h in self._message_handlers:\n if h.filter(message):\n h.handler(message)\n break", "def _respond(self, message):\n try:\n if self.callback:\n self.callback(message)\n except Exception as e:\n LOG.error(e)\n self.service.respond(message)", "def handle_message(event):\n intention = parse_intention(event.message.text)\n if intention == config.QUERY_INTENTION:\n handle_query_weather_message(event)\n elif intention == config.SUBSCRIBE_INTENTION:\n handle_subscribe_message(event)\n else:\n handle_unknown_message(event)", "def consume_user_message(self, message):\n pass", "def request(self, msg):\n\t\tif msg.command in ('AUTH', 'EXIT', 'GET', 'SET', 'VERSION', 'COMMAND', 'UPLOAD'):\n\t\t\tmethod = 'handle_request_%s' % (msg.command.lower(),)\n\t\telse:\n\t\t\tmethod = 'handle_request_unknown'\n\n\t\tself.execute(method, msg)" ]
[ "0.7761963", "0.72658277", "0.7105738", "0.7102408", "0.7060995", "0.7036604", "0.70310044", "0.7029397", "0.699162", "0.69913566", "0.698211", "0.6949379", "0.69273627", "0.6888938", "0.68215775", "0.6818507", "0.67997396", "0.6774335", "0.67683744", "0.6749502", "0.6715756", "0.6680169", "0.665071", "0.66503346", "0.6640639", "0.6629455", "0.66195524", "0.660921", "0.659287", "0.6557109", "0.65394634", "0.65267134", "0.65153754", "0.6512464", "0.6499018", "0.6457884", "0.64223015", "0.64102453", "0.6406858", "0.6363455", "0.6351401", "0.6342919", "0.632991", "0.6323977", "0.6314623", "0.6311673", "0.6270669", "0.62693036", "0.62513834", "0.6246574", "0.6236949", "0.6226942", "0.62195843", "0.6203534", "0.6176212", "0.61749667", "0.61331993", "0.61310565", "0.6125525", "0.6123258", "0.6111707", "0.60893875", "0.60837317", "0.60616225", "0.6047916", "0.6043607", "0.60391855", "0.60350263", "0.6033995", "0.60303605", "0.60259426", "0.6019005", "0.5999167", "0.5994203", "0.5973955", "0.5953808", "0.5946433", "0.5944979", "0.5900756", "0.5899139", "0.5893713", "0.5887292", "0.58696395", "0.58636165", "0.58628964", "0.58577174", "0.5856006", "0.5849418", "0.58392006", "0.5837737", "0.5832778", "0.5826482", "0.5826015", "0.58249354", "0.5814343", "0.58126116", "0.58049864", "0.5802741", "0.57809967", "0.5778838" ]
0.73108006
1
Do not return anything, modify matrix inplace instead.
def setZeroes(self, matrix): for i in range(len(matrix)): for j in range(len(matrix[i])): if matrix[i][j] == 0 and (i, j) not in self.visited: for neighbor in self.setter(matrix, i, j): matrix[neighbor[0]][neighbor[1]] = 0 self.visited.add((neighbor[0], neighbor[1])) print(matrix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __update_matrix(self, old_matrix_view):\n # if we've cleaned dirt - we will see it on our next move, so we substitute only unseen cells\n # which are marked with \"o\"\n new_matrix_view = []\n for row in range(self.matrix_rows):\n new_matrix_view.append([char for char in input()])\n\n if old_matrix_view:\n for row in range(self.matrix_rows):\n for col in range(self.matrix_cols):\n if new_matrix_view[row][col] == \"o\":\n new_matrix_view[row][col] = old_matrix_view[row][col]\n\n return new_matrix_view", "def update(mat) -> np.ndarray:\n return mat", "def copy_matrix(matrix):\n import numpy as np\n copy_of_m = np.copy(matrix)\n return copy_of_m", "def transform_mat(matrix):\n delta = 1e-5\n matrix = matrix + delta\n return matrix", "def normalize(self,matrix):\n for i in range(self.N):\n matrix[self.N-1][i] = 0\n for i in range(self.n):\n matrix[self.N - 1][self.index(i,i)] = 1\n return matrix", "def matNew(mat):\n return matCopy(mat)", "def newMatrix(self):\n self.matrix = makeMatrix()\n for row in range(self.matrix.getHeight()):\n for column in range(self.matrix.getWidth()):\n self.canvasGrid[row][column].draw(self.matrix[row][column])", "def _setMatrixRow(self, row):\n item = self._item()\n if item is not None:\n matrix = item.getMatrix()\n matrix[self._index, :] = row.x(), row.y(), row.z()\n item.setMatrix(matrix)", "def vec_matrix_update(self, A, y, P, evecr):\n\n l = y.shape[1]\n A_old = A[:, :(P) * l]\n\n y = np.matrix(y)\n\n if evecr.size:\n new_cols = y * evecr\n A_old = np.concatenate((A_old, new_cols[P:-1]), axis=1)\n\n N = A.shape[0]\n A_old = np.concatenate((A_old, np.ones([N, 1])), axis=1)\n\n return np.array(A_old)", "def rowReduce(self):\n myMatrix = Matrix(self.Matrix)\n print(\"This is the row reduced echelon form of your matrix: \\n\", myMatrix.rref())", "def reiniciarMatrix(self):\n self.matrixMAPA = []\n self.rellenarMatrix()", "def update_F_matrix(self, F_matrix):\n self.F_matrix = F_matrix", "def _clear_matrix(self):\n\t\tself._w2i_matrix = self._i2w_matrix = None", "def separate_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.result[row] = self.matrix[row][-1]\r\n self.matrix[row].pop()", "def matrix(self):\n try:\n return self.__matrix\n except AttributeError:\n old_matrix_view = self.__read_matrix_file()\n new_matrix_view = self.__update_matrix(old_matrix_view)\n self.__write_matrix_to_file(new_matrix_view)\n self.__matrix = new_matrix_view\n return self.__matrix", "def matrix_add():", "def _apply_correction(self):\n np.copyto(self.network.weights, self.correction_matrix)", "def deepercopy(matrix):\n if isinstance(matrix[0], int) or isinstance(matrix[0], float):\n newmat = [0 for x in range(len(matrix))]\n for i in range(len(matrix)):\n newmat[i] = matrix[i]\n return newmat\n else:\n newmat = [0 for x in range(len(matrix))]\n for i in range(len(matrix)):\n newmat[i] = deepercopy(matrix[i])\n return newmat", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n not_fixed = set( ((i, j)) for i in range(0,n) for j in range(0,n))\n\n while not_fixed:\n i, j = not_fixed.pop()\n not_fixed.add((i,j))\n old_value = matrix[i][j]\n\n while True: # complete the cycle of fixes beginning at (i,j)\n i, j = j, n - i - 1\n if (i, j) not in not_fixed:\n break\n tmp = matrix[i][j]\n matrix[i][j] = old_value\n not_fixed.remove((i, j)) \n old_value = tmp", "def rotate(self, matrix: List[List[int]]) -> None:\n length = len(matrix)\n for row in range(length//2):\n for col in range(row, length-row-1):\n # matrix[row][col], matrix[col][length-1-row], matrix[length-1-row][length-1-col], matrix[length-1-col][row]\n matrix[col][length-1-row], matrix[length-1-row][length-1-col], matrix[length-1-col][row], matrix[row][col] = matrix[row][col], matrix[col][length-1-row], matrix[length-1-row][length-1-col], matrix[length-1-col][row]\n return", "def process(self, mat):", "def mutate_matrix(matrix):\n L = len(matrix)\n r_i = random.randrange(L)\n r_j = random.randrange(4)\n r = random.gauss(0,1)\n return [[matrix[i][j]+r*(i==r_i)*(j==r_j)\n for j in range(4)] for i in range(L)]", "def overlayMatrix(board_object, item_object, x, y):\n board_matrix = board_object.returnMatrixBoard()\n item_matrix = item_object.returnMatrix()\n k = 0\n l = 0\n for i in range(x, x + item_object.length):\n for j in range(y, y + item_object.width):\n board_matrix[i][j] = item_matrix[k][l]\n l += 1\n k += 1\n l = 0\n board_object.editBoard(board_matrix)", "def _transform(self, matrix):\n for x in list(self.keys()):\n ar = self[x]\n if len(ar.shape) == 2 and ar.shape[1] == 3:\n self[x] = np.dot(matrix, ar.transpose()).transpose()", "def apply_mask(mask_matrix_df, original_matrix_df):\n\n print(\"Applying the mask ...\")\n\n original_matrix_columns = list(original_matrix_df)\n original_matrix_rows = list(original_matrix_df.index)\n\n mask_array = mask_matrix_df.to_numpy()\n original_array = original_matrix_df.to_numpy().astype(float)\n\n\n # Note: np.nan cannot be inserted into an array of type int. The array needs to be float.\n np.putmask(original_array, mask_array, np.nan)\n\n\n after_masking_df = pd.DataFrame(original_array, columns=original_matrix_columns, index=original_matrix_rows)\n return after_masking_df", "def clone_matrix(mat):\n return [[x for x in row] for row in mat]", "def shrinkTrackMatrix(self):\n self.tracksMatrix = self.tracksMatrix[0:(len(self.tracksMatrix)-1)]\n self.attributesMatrix = self.attributesMatrix[0:(len(self.attributesMatrix)-1)]", "def copy(self):\n rdd = self._data.map(\n lambda m: m\n )\n\n return Matrix(rdd, self._shape,\n dtype=self._dtype, coord_format=self._coord_format, nelem=self._nelem)", "def forward_substitution(self):\r\n for col in range(0, self.SIZE):\r\n self.check_solvability(self.matrix[col][col], self.result[col])\r\n self.result[col] = self.divide(self.result[col], self.matrix[col][col])\r\n for row in range(col + 1, self.SIZE):\r\n self.result[row] -= (self.result[col] * self.matrix[row][col])\r\n return self.result", "def removeMatrixTranslate(matrix):\n\n float_matrix = [matrix(i, j) for i in xrange(4) for j in xrange(4)]\n for idx in range(12, 15):\n float_matrix[idx] = 0.0\n \n outMatrix = OpenMaya.MFloatMatrix()\n OpenMaya.MScriptUtil.createFloatMatrixFromList(float_matrix , outMatrix)\n\n return outMatrix", "def iteration_improve(opt_matrix, over_alloc_pct, under_alloc_pct, can_add, can_remove):\n for idx in prange(opt_matrix.shape[0]):\n row_values = opt_matrix[idx, :]\n improve_single_row(row_values, over_alloc_pct, under_alloc_pct, can_add, can_remove)\n\n return opt_matrix", "def rotate(self, matrix: list) -> None:\n for i in range(len(matrix)):\n for j in range(i):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n # matrix[i].reverse()\n print(matrix)\n for i in range(len(matrix)):\n matrix[i].reverse()\n print(matrix)", "def flush(self):\n\n if hasattr(self, 'trilinosMatrix'):\n if hasattr(self.matrix, 'storeZeros'):\n self.trilinosMatrix.flush(cacheStencil=self.matrix.storeZeros)\n else:\n self.trilinosMatrix.flush(cacheStencil=False)\n\n if (not hasattr(self, 'cache')) or (self.cache is False):\n del self.matrix", "def matSet(mat, r, c, v):\n mat[r][c]=v", "def change_basis(self, U_global):\n self.matrix = U_global @ self.matrix @ np.conj(U_global).T", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix) # 行\n\n # 以x=y为轴翻转\n # [[1,2,3],\n # [4,5,6],\n # [7,8,9]]\n # 变为\n # [1 4 7]\n # [2 5 8]\n # [3 6 9]\n for i in range(n):\n for j in range(i, n):\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n # 以中点为轴翻转\n for i in range(n):\n for j in range(n // 2):\n matrix[i][j], matrix[i][n - j - 1] = matrix[i][n - j - 1], \\\n matrix[i][j]\n\n # 非原地修改写法,先上下翻转,再以x=y为轴复制对应数字\n # n = len(matrix)\n # r = list(zip(*matrix[::-1]))\n # for i in range(n):\n # for j in range(n):\n # matrix[i][j] = r[i][j]", "def reverse_matrix(self):\n return SWAP.matrix @ self.matrix @ SWAP.matrix", "def make_immutable(mat):\n if issparse(mat):\n mat.data.flags.writeable = False\n if mat.format in {\"csr\", \"csc\", \"bsr\"}:\n mat.indices.flags.writeable = False\n mat.indptr.flags.writeable = False\n elif mat.format == \"coo\":\n mat.row.flags.writeable = False\n mat.col.flags.writeable = False\n else:\n mat.flags.writeable = False", "def reset(self):\n self.mat = np.zeros(9).reshape(3,3).astype(np.int32)\n return self.mat", "def copy_matrix(self, M):\r\n # Section 1: Get matrix dimensions\r\n rows = len(M)\r\n cols = len(M[0])\r\n \r\n # Section 2: Create a new matrix of zeros\r\n MC = self.zeros_matrix(rows, cols)\r\n \r\n # Section 3: Copy values of M into the copy\r\n for i in range(rows):\r\n for j in range(cols):\r\n MC[i][j] = M[i][j]\r\n \r\n return MC", "def dirty_square(self, row: int, column: int) -> None:\n self.__squares[row][column] = Floor._dirty", "def copy(self) -> 'MatrixBoolean':\n\t\treturn MatrixBoolean(matrix=self.matrix)", "def to_matrix(self, normalize: bool = True) -> jnp.ndarray:\n return NotImplemented # pragma: no cover", "def mrotate(self):\n result_matrix = [[0 for col in range(len(self.matrix[0]))] for row in range(len(self.matrix))]\n for i in range(len(self.matrix)):\n for j in range(len(self.matrix[0])):\n result_matrix[i][j] = self.matrix[i][len(self.matrix[0]) - 1 - j]\n # left turn -> result_matrix[i][j] = self.matrix[len(self.matrix) - 1 - i][j]\n self.matrix = result_matrix\n pass", "def copy_input_pattern(self, matrix, output_neuron, input):\n matrix[output_neuron, :] = input", "def setZeroes(self, matrix: List[List[int]]) -> None:\n new_matrix = [row.copy() for row in matrix]\n for i in range(len(matrix)):\n for j in range(len(matrix[0])):\n if matrix[i][j] == 0 and new_matrix[i][j] == 0:\n self.setZero(matrix, i, j)", "def fill_matr(matrix):\n for j in range(len(matrix)):\n for i in range(len(matrix[0])):\n print(\"enter another element\")\n matrix[j][i] = int(input()) \n print(\"matrix is full\")\n return matrix", "def copy(self):\n # Warning: Because we use memcpy and thus copy memory internally, we have to be careful to always update this method\n # whenever the CSRSparseMatrix class changes...\n\n cdef CSRSparseMatrix_INT64_t_FLOAT32_t self_copy\n\n # we copy manually the C-arrays\n cdef:\n FLOAT32_t * val\n INT64_t * col\n INT64_t * ind\n INT64_t nnz\n\n nnz = self.nnz\n\n self_copy = CSRSparseMatrix_INT64_t_FLOAT32_t(control_object=unexposed_value, nrow=self.__nrow, ncol=self.__ncol, store_zero=self.__store_zero, store_symmetric=self.__store_symmetric)\n\n val = <FLOAT32_t *> PyMem_Malloc(nnz * sizeof(FLOAT32_t))\n if not val:\n raise MemoryError()\n memcpy(val, self.val, nnz * sizeof(FLOAT32_t))\n self_copy.val = val\n\n col = <INT64_t *> PyMem_Malloc(nnz * sizeof(INT64_t))\n if not col:\n PyMem_Free(self_copy.val)\n raise MemoryError()\n memcpy(col, self.col, nnz * sizeof(INT64_t))\n self_copy.col = col\n\n ind = <INT64_t *> PyMem_Malloc((self.__nrow + 1) * sizeof(INT64_t))\n if not ind:\n PyMem_Free(self_copy.val)\n PyMem_Free(self_copy.col)\n raise MemoryError()\n memcpy(ind, self.ind, (self.__nrow + 1) * sizeof(INT64_t))\n self_copy.ind = ind\n\n self_copy.__nnz = nnz\n\n self_copy.__col_indices_sorted_test_done = self.__col_indices_sorted_test_done\n self_copy.__col_indices_sorted = self.__col_indices_sorted\n self_copy.__first_row_not_ordered = self.__first_row_not_ordered\n\n return self_copy", "def _matrix(*params):\n raise NotImplementedError", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for dig in range(n):\n row = dig\n for col in range(dig+1, n):\n matrix[row][col] , matrix[col][row] = matrix[col][row], matrix[row][col]\n print(matrix)\n left = 0\n right = n-1\n while left < right:\n for row in range(n):\n matrix[row][left], matrix[row][right] = matrix[row][right], matrix[row][left]\n left+=1\n right-=1", "def rotate(self, matrix) -> None:\n c = len(matrix)\n matrix[:] = [[matrix[c-i-1][j] for i in range(c)] for j in range(c)]", "def add_to_row(M, i, j):\n N = copy.deepcopy(M)\n N[i] = 1 * np.logical_xor(N[i], N[j])\n return N", "def dup_matrix(self):\n maxtrixcalc = importr('matrixcalc') # load matrixcalc library\n rscript = 'D.matrix( )'.replace(' ', str(self.k)) # generate R script\n rmatrix = robjects.r(rscript) # run R script\n dup_mat = np.array(rmatrix) # convert to ndarray\n return dup_mat # ndarray", "def rotate(self, matrix: List[List[int]]) -> None:\n for r in range(len(matrix)):\n for c in range(r):\n matrix[r][c], matrix[c][r] = matrix[c][r], matrix[r][c]\n for row in matrix:\n row.reverse()", "def add_row(matrix):\n\tl = len(matrix[0])\n\ttemp = matrix[:]\n\ttemp += [[0]*l]\n\treturn temp", "def copy(self):\n data = self.data.copy()\n return MPMatrix(self.shape, data)", "def rotate1(self, matrix: List[List[int]]) -> None:\n matrixLen = len(matrix)\n\n for i in range(matrixLen):\n for j in range(i, matrixLen):\n print(i, j)\n matrix[i][j], matrix[j][i] = matrix[j][i], matrix[i][j]\n\n for i in range(matrixLen):\n for j in range(matrixLen // 2):\n matrix[i][j], matrix[i][matrixLen - 1 - j] = matrix[i][matrixLen - 1 - j], matrix[i][j]", "def rotate(self, matrix: List[List[int]]) -> None:\n if(matrix == None or len(matrix) == 1): return\n n = len(matrix)\n for i in range(0, n//2 + 1):\n for j in range(i, n-1-i):\n tmp = matrix[i][j]\n matrix[i][j] = matrix[n-1-j][i]\n matrix[n-1-j][i] = matrix[n-1-i][n-1-j]\n matrix[n-1-i][n-1-j] = matrix[j][n-1-i]\n matrix[j][n-1-i] = tmp\n \n return", "def clear(self):\n for y in range(len(self.matrix)):\n for x in range(len(self.matrix[0])):\n self.matrix[y-1][x-1] = (0,0,0)", "def update_kb(self, row, col, falseNeg, result: bool = False):\n pass", "def second_inplace(a):", "def perform_gauss_jordan_elimination_(m, show):\n if show:\n print(\"Initial State\")\n print_matrix(m)\n \n r = 0\n c = 0\n rows, cols = len(m), len(m[0])\n\n if show:\n print(\"rows: %s cols: %s\"%(rows, cols))\n\n while True:\n if show:\n print(\"r %s c %s\"%(r, c))\n\n ## Check Pivot\n _swap = False\n if m[r,c] == 0:\n for i in range(r+1,rows):\n if m[i,c] == 1:# If new pivot found... swap\n if show:\n print(\"Swapping %s %s and %s %s\"%(r, m[r], i, m[i]))\n m[[i,r]] = m[[r,i]] ## Swap\n _swap = True\n if show:\n print_matrix(m)\n break # No more swapping in this column\n if not _swap: ## No swap, move to the next column, same row\n c+=1\n\n if m[r,c] == 1:\n ## XOR\n for i in range(rows):\n indexes = np.setdiff1d(np.where(m[:,c] == 1),r) # Get all the ones to XOR in the same column\n for i in indexes:\n m[i] = np.bitwise_xor(m[i],m[r]) # Bitwise XOR\n if show:\n print(\"XOR Row %s: %s into Row %s: %s\"%(r, m[r], i, m[i]))\n if show:\n print_matrix(m)\n\n ## Increase row and column\n r+=1\n c+=1\n\n ## break condition if all rows or all columns (except the augmented column) are treated\n if r == rows or c >= cols-1:\n break\n\n if show:\n print(\"Final State\")\n print_matrix(m)\n \n return m", "def setZeroes(self, matrix: List[List[int]]) -> None:\r\n import copy\r\n m=len(matrix)\r\n n=len(matrix[0])\r\n m_copy=copy.deepcopy(matrix)\r\n for i in range(m):\r\n for j in range(n):\r\n if m_copy[i][j]==0:\r\n matrix[i]=[0]*n\r\n for x in range(m):\r\n matrix[x][j]=0", "def inv_inplace(a):", "def setZeroes(matrix):\r\n \r\n #An average O(n^2) time traversal solution with memoization\r\n #for each 0 we encounter, we update the entire row and column to 0s, but on the condition that the row/column has not been updated yet\r\n \r\n row_cache = {}\r\n column_cache = {}\r\n \r\n for r in range(0,rows := len(matrix)):\r\n for c in range(0,cols := len(matrix[0])):\r\n \r\n if matrix[r][c] == 0:\r\n \r\n if not row_cache.get(r):\r\n for i in range(0,cols):\r\n if matrix[r][i] != 0:\r\n matrix[r][i] = '0' #we use strings so we only consider the initial 0s\r\n row_cache[r] = True\r\n \r\n if not column_cache.get(c):\r\n for i in range(0,rows):\r\n if matrix[i][c] != 0:\r\n matrix[i][c] = '0'\r\n column_cache[c] = True\r\n return", "def relax(self):\n # print(\"putin\", self.level.rhs.reshape(-1)[:])\n # print(\"getout\", self.solver(self.level.rhs.reshape(-1)))\n\n self.level.mid[:] = self.solver(self.level.rhs.reshape(-1)).reshape(self.level.mid.shape)", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for col in range(n):\n tmp = []\n for row in range(n):\n tmp.append(matrix[n-1-row][col])\n matrix.append(tmp)\n del(matrix[:n])", "def update_E(self):\n self.grid.E[:, 0, :, :] = self.grid.E[:, -1, :, :]", "def update_rec(self):\n import copy\n \n self.leftrec, self.rightrec = copy.copy(self.rec), copy.copy(self.rec)\n self.leftrec[2*self.dim + 1], self.rightrec[2*self.dim] = self.node.dimension[self.dim], self.node.dimension[self.dim]", "def add_dummy_location_to_matrix(matrix):\n matrix = [row + [0] for row in matrix]\n last_row = [0 for _ in range(len(matrix) + 1)]\n matrix.append(last_row)\n return matrix", "def setNeedToComputeMatrix(self, *args):\n return _osgAnimation.RigGeometry_setNeedToComputeMatrix(self, *args)", "def add_entry(matrix,i,j,replace=False):\n if j not in matrix[i].keys():\n matrix[i][j] = abs(i - j)\n else:\n if replace:\n matrix[i][j] = abs(i - j)", "def update_matrix(self, ope, mat):\n ope_coord = []\n for coord in self.coord_name:\n if np.isnan(ope[coord]):\n return\n ope_coord.append(int(ope[coord]))\n mat[tuple(ope_coord)] += 1", "def transform(self, transformer):\n\t\tnew_matrix = Matrix(self.dims)\n\t\tnew_matrix.data = [transformer(copy.deepcopy(c)) for c in self.data]\n\t\treturn new_matrix", "def fast_update_col(self,j,vals):\n dataptr = self.col_view[:,j].data\n self.X.data[dataptr] = vals", "def rotate(self, matrix: List[List[int]]) -> None:\r\n n = len(matrix)\r\n for j in range((n+1)//2):\r\n for i in range(n-2*j-1):\r\n matrix[j][j+i], matrix[j+i][n-1-j], matrix[n-1-j][n-1-j-i], matrix[n-1-j-i][j] = matrix[n-1-j-i][j], matrix[j][j+i], matrix[j+i][n-1-j], matrix[n-1-j][n-1-j-i]", "def _inv(self) -> None:\n\n self.inv(inplace=True)", "def perform_gauss_jordan_elimination(m, show):\n if show:\n print(\"Initial State\")\n print_matrix(m)\n\n r, c = 0, 0\n rows = len(m)\n cols = len(m[0])\n\n if show:\n print(\"rows: %s cols: %s\"%(rows, cols))\n\n while True:\n _swap = False\n\n if show:\n print(\"r %s c %s\"%(r, c))\n\n ## Check Pivot\n if m[r][c] == 0:\n ## Swap\n for i in range(rows):\n if r != i and i > r: ## Avoid comparing the same row and do not swap to upper rows\n if m[i][c] == 1 and not _swap: ## Check if a swap is not performed before in the same column\n if show:\n print(\"Swapping %s %s and %s %s\"%(r, m[r], i, m[i]))\n #m = swap(m,r,i)\n temp = m[r]\n m[r] = m[i]\n m[i] = temp\n _swap = True\n if show:\n print_matrix(m)\n if not _swap: ## If not swap, means there is no 1 to swap, so go to the next column\n c+=1\n\n if m[r][c] == 1:\n ## XOR\n for i in range(rows):\n if r != i: ## Avoid comparing the same row\n if m[i][c] == 1:\n if show:\n print(\"XOR Row %s: %s into Row %s: %s\"%(r, m[r], i, m[i]))\n for e in range(len(m[0])):\n m[i][e] ^= m[r][e]\n if show:\n print_matrix(m)\n\n ## Increase row and column\n r+=1\n c+=1\n\n ## break condition if all rows or all columns (except the augmented column are treated)\n if r == rows or c >= cols-1:\n break\n \n return m", "def rebuildMatrixCache(self):\n self.converterYUR = Mat4.convertMat(CSYupRight, self.lens.getCoordinateSystem()) * self.lens.getProjectionMat()", "def copy_matrix(M):\n rows = len(M)\n cols = len(M[0])\n\n MC = zeros_matrix(rows, cols)\n\n for i in range(rows):\n for j in range(rows):\n MC[i][j] = M[i][j]\n\n return MC", "def wrapDBMatrix(self,mat):\n return mat.todense()", "def setZeroes(self, matrix: List[List[int]]) -> None:\n m, n = len(matrix), len(matrix[0])\n\n col_0_flag = any(matrix[i][0] == 0 for i in range(m))\n row_0_flag = any(matrix[0][i] == 0 for i in range(n))\n\n # 第 0 列, 第 0 行可以作为 [i,j] 的指示存储\n # 例如,如果 [i,j] 为 0, 那么 [0, j], [i, 0] 可以置 0 \n # 再次遍历矩阵,通过判断 [0, j], [i, 0] 去把该行,该列置 0\n # 这样的话就不需要一个额外的矩阵去 for i,j 遍历是否为 0 了\n\n # 注意是从 1 开始的。 \n for i in range(1, m):\n for j in range(1, n):\n if matrix[i][j] == 0:\n matrix[i][0] = matrix[0][j] = 0 \n\n for i in range(1, m):\n for j in range(1, n):\n if matrix[i][0] == 0 or matrix[0][j] == 0:\n matrix[i][j] = 0\n\n if col_0_flag:\n for i in range(m):\n matrix[i][0] = 0\n\n if row_0_flag:\n for j in range(n):\n matrix[0][j] = 0", "def copy(self):\n data = dict()\n m, n = self.shape\n for i in range(m):\n for j in range(n):\n data[i, j] = self[i, j]\n return MPMatrix(self.shape, data)", "def update(frame_num, mat, grid, N):\n\n new_grid = np.copy(grid)\n #print(\"grid size:\", grid.shape)\n for i in range(1, grid.shape[0]-1):\n for j in range(1, grid.shape[1]-1):\n neighbors = int(grid[i-1, j] + grid[i+1, j] + \\\n grid[i, j+1] + grid[i, j-1] + \\\n grid[i-1,j-1] + grid[i+1,j+1] + \\\n grid[i+1,j-1] + grid[i-1,j+1])\n if grid[i, j] == ON:\n if not (2 <= neighbors <= 3):\n new_grid[i, j] = OFF\n elif grid[i, j] == OFF and neighbors == 3:\n # Grow a cell\n new_grid[i, j] = ON\n else:\n new_grid[i, j] = OFF\n\n ### Update new grid\n mat.set_data(new_grid)\n grid[:] = new_grid[:] # Brackets are important\n return mat", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n if n <= 1:\n return\n\n for i in range((n + 1)//2):\n for j in range(i, n - 1 - i):\n tmp = matrix[i][j]\n matrix[i][j] = matrix[n - 1 - j][i]\n matrix[n - 1 - j][i] = matrix[n - 1 - i][n - 1 - j]\n matrix[n - 1 - i][n - 1 - j] = matrix[j][n - 1 - i]\n matrix[j][n - 1 - i] = tmp", "def update_E(self):\n self.grid.E[:, :, 0, :] = self.grid.E[:, :, -1, :]", "def __neg__(self):\n #\n # TODO - your code here\n #\n matrix_neg = []\n for i in range(self.h):\n row = []\n for j in range(self.w):\n row.append(0-self.g[i][j])\n matrix_neg.append(row)\n return Matrix(matrix_neg)\n # TODO - your code here", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix[0])\n for i in range(n // 2 + n % 2):\n for j in range(n // 2):\n tmp = matrix[n - 1 - j][i]\n matrix[n - 1 - j][i] = matrix[n - 1 - i][n - j - 1]\n matrix[n - 1 - i][n - j - 1] = matrix[j][n - 1 -i]\n matrix[j][n - 1 - i] = matrix[i][j]\n matrix[i][j] = tmp", "def rotate(self, matrix: List[List[int]]) -> None:\n n = len(matrix)\n for i in range(n - 1):\n for j in range(n - 1 - i):\n matrix[i][j], matrix[n-1-j][n-1-i] = matrix[n-1-j][n-1-i], matrix[i][j]\n for i in range(n):\n for j in range(n // 2):\n matrix[j][i], matrix[n-1-j][i] = matrix[n-1-j][i], matrix[j][i]", "def getMatrix(self, frame):\n self.matrix[3, 0]=self.getValue(frame)\n return self.matrix", "def inverse(self) -> 'Matrix':\n num_R, num_C = self.shape()\n assert num_R == num_C, f\"Must be a square matrix. This one is {self.shape()}.\"\n # -------------------------------------------------------\n # TODO: You write this one.\n\n # 1) Construct the minor_matrix. Feel free to make this a separate method.\n minor_matrix_times_cofactor = Matrix.zeros(self.shape())\n\n for i in range (num_R):\n for j in range(num_C):\n minor_matrix_times_cofactor.mat[i][j] = self.get_minor(i,j).determinant() * (-1)**(i+j)\n\n minor_matrix_times_cofactor.display(message=\"minor\")\n # 2) Calculate the determinant, either by calling the determinant() method or by using the minor_matrix (faster)\n det = 0\n for i in range (num_R):\n det += self.mat[i][0] * minor_matrix_times_cofactor.mat[i][0]\n #print (f\"determinant: {self.determinant()}\")\n # 3) The inverse is the transpose of the minor matrix, divided by the determinant. Make sure that the determinant\n # isn't zero!\n if det == 0:\n return None\n return minor_matrix_times_cofactor.transpose().times(1/det)\n\n return Matrix([[\"Not yet written\"]]) # remove this when you add your code.\n # -------------------------------------------------------", "def rotate(self, matrix: list[list[int]]) -> None:", "def build_augmented_matrix(self):\r\n for row in range(self.SIZE):\r\n self.matrix[row].append(self.result[row])", "def rotate(self, matrix: List[List[int]]) -> None:\n height=len(matrix)\n for h in range(math.ceil(height/2)):\n for i in range(h,height-h-1):\n # print((h,i), (height-i-1,h))\n temp=matrix[h][i]\n matrix[h][i] = matrix[height-i-1][h]\n matrix[height-i-1][h] = matrix[height-h-1][height-i-1]\n matrix[height-h-1][height-i-1] = matrix[i][height-h-1]\n matrix[i][height-h-1] = temp", "def setZeroes(self, matrix: List[List[int]]) -> None:\n row_num, col_num = len(matrix), len(matrix[0])\n # 创建集合set()用于存放需要置零的行和列\n row_set, col_set = set(), set()\n for row in range(row_num):\n for col in range(col_num):\n if matrix[row][col]==0:\n row_set.add(row)\n col_set.add(col)\n # 将记录的行、列中的元素赋值为0\n # 再次遍历赋值\n for row in range(row_num):\n for col in range(col_num):\n if row in row_set or col in col_set:\n matrix[row][col] = 0\n # # 或者行列单独赋值均可\n # for row in row_set:\n # for col in range(col_num):\n # matrix[row][col] = 0\n # for col in col_set:\n # for row in range(row_num):\n # matrix[row][col] = 0", "def __setitem__(self, idx, value):\n row, col = idx\n\n if row < 0 or row >= self.num_rows:\n raise IndexError(\"Row out of bounds\")\n\n if col < 0 or col >= self.num_cols:\n raise IndexError(\"Col out of bounds\")\n\n if value == self.default:\n del self[row, col]\n return\n\n array_row = self._find_row_before(row)\n\n if (array_row.next_row == None or array_row.next_row.row_number > row):\n new_row = SparseMatrix.MatrixRow()\n new_row.row_number = row\n new_row.next_row = array_row.next_row\n array_row.next_row = new_row\n\n sentinel_entry = SparseMatrix.MatrixEntry()\n new_row.row_sentinel = sentinel_entry\n\n array_row = array_row.next_row\n array_entry = self._find_column_before(array_row, col)\n\n if (array_entry == None or array_entry.next_entry == None or\n array_entry.next_entry.column_number > col):\n new_entry = SparseMatrix.MatrixEntry()\n new_entry.column_number = col\n if array_entry == None:\n new_entry.next_entry = None\n else:\n new_entry.next_entry = array_entry.next_entry\n array_entry.next_entry = new_entry\n\n array_entry = array_entry.next_entry\n array_entry.value = value", "def inv(M):\n\t#clone the matrix and append the identity matrix\n\t# [int(i==j) for j in range_M] is nothing but the i(th row of the identity matrix\n\tm2 = [row[:]+[int(i==j) for j in range(len(M) )] for i,row in enumerate(M) ]\n\t# extract the appended matrix (kind of m2[m:,...]\n\treturn [row[len(M[0]):] for row in m2] if gauss_jordan(m2) else None", "def getMatrix(self, frame):\n self.matrix[3, 1]=self.getValue(frame)\n return self.matrix", "def __invert__(self):\n try:\n B = ~(self.matrix())\n except ZeroDivisionError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")\n try:\n return self.parent().reversed()(B)\n except TypeError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")", "def back_substitution(self):\r\n for col in range(self.SIZE - 1, -1, -1):\r\n self.check_solvability(self.matrix[col][col], self.result[col])\r\n self.result[col] = self.divide(self.result[col], self.matrix[col][col])\r\n for row in range(col - 1, -1, -1):\r\n self.result[row] -= (self.result[col] * self.matrix[row][col])\r\n return self.result", "def rotate(self, matrix: List[List[int]]) -> None:\n for i in range(len(matrix)):\n matrix[i] = matrix[i][::-1]\n for i in range(len(matrix)):\n for j in range(i, len(matrix[0])):\n temp = matrix[i][len(matrix[0])-1-j]\n matrix[i][len(matrix[0])-1-j] = matrix[j][len(matrix[0])-1-i]\n matrix[j][len(matrix[0])-1-i] = temp" ]
[ "0.7066283", "0.6845453", "0.6360038", "0.62143975", "0.6191982", "0.61279464", "0.6040232", "0.59938055", "0.5967881", "0.59397227", "0.59288263", "0.5918774", "0.58981085", "0.5853208", "0.58428276", "0.58091146", "0.57997805", "0.5791091", "0.57368064", "0.57231134", "0.571024", "0.567267", "0.5623311", "0.5616256", "0.5611096", "0.559556", "0.5591859", "0.55860806", "0.5582572", "0.55817497", "0.55699295", "0.5563453", "0.5561985", "0.55513245", "0.5542011", "0.55417335", "0.55417037", "0.5537588", "0.5535802", "0.55128753", "0.54937816", "0.5493724", "0.5487094", "0.5486901", "0.54838663", "0.54806215", "0.5478396", "0.5473689", "0.5452827", "0.545131", "0.545126", "0.5437012", "0.54251355", "0.54204136", "0.5415713", "0.5412535", "0.5411605", "0.54115677", "0.5411041", "0.54007477", "0.540057", "0.53956455", "0.5394152", "0.53922236", "0.5390152", "0.53897125", "0.5382547", "0.5368958", "0.5367961", "0.5364521", "0.5363985", "0.53639716", "0.5355073", "0.5352674", "0.53513575", "0.53471494", "0.5344442", "0.53364575", "0.5330477", "0.53231734", "0.53231454", "0.53146696", "0.53145325", "0.5306455", "0.5289743", "0.5276828", "0.52762246", "0.5271188", "0.52703875", "0.5270288", "0.5267794", "0.5264162", "0.5259863", "0.5258797", "0.52578264", "0.5257774", "0.52469987", "0.5246209", "0.5244357", "0.5236758", "0.52356297" ]
0.0
-1
This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`.
def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues): if normalize: cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis] print("Normalized confusion matrix") else: print('Confusion matrix, without normalization') print(cm) plt.imshow(cm, interpolation='nearest', cmap=cmap) plt.title(title) plt.colorbar() tick_marks = np.arange(len(classes)) plt.xticks(tick_marks, classes, rotation=45) plt.yticks(tick_marks, classes) fmt = '.2f' if normalize else 'd' thresh = cm.max() / 2. for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])): plt.text(j, i, format(cm[i, j], fmt), horizontalalignment="center", color="white" if cm[i, j] > thresh else "black") plt.tight_layout() plt.ylabel('True label') plt.xlabel('Predicted label')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_confusion_matrix(cm, classes=[0,1], normalize=False, title='Confusion matrix', print_matrix=False):\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n if print_matrix:\n print(cm)", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix',saveas='cm', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(cm)\n\n plt.figure() \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \n foo_fig = plt.gcf() # 'get current figure'\n# foo_fig.savefig('confusion_matrix.eps', format='eps', dpi=1000) \n foo_fig.savefig(saveas, dpi=1000, bbox_inches='tight')\n plt.show()", "def plot_confusion_matrix(cm, y_test, y_pred, class_names,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('\\n')\n print(\"Normalized confusion matrix\")\n else:\n print('\\n')\n print('Confusion matrix, without normalization')\n print_cm(cm, class_names)\n text_labels = [['True Negative', 'False Positive'],\n ['False Negative', 'True Positive']]\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(class_names))\n plt.xticks(tick_marks, class_names, rotation=45)\n plt.yticks(tick_marks, class_names)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i - 0.1, format(cm[i, j], fmt),\n verticalalignment='bottom',\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.text(j, i + 0.1, text_labels[i][j],\n verticalalignment='top',\n horizontalalignment=\"center\",\n fontsize=12,\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n # Print accuracy and precision\n print('Accuracy: ', accuracy_score(y_test, y_pred, normalize=True))\n print('Precision: ', precision_score(y_test, y_pred, average='macro'))\n print('Roc-Auc: ', roc_auc_score(y_test, y_pred))\n # Plot non-normalized confusion matrix", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n #cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n cm = cm.astype('float') / np.sum(cm.ravel())\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig=plt.figure\n plt.imshow(cm, interpolation='nearest', cmap=cmap )\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n return fig", "def plot_confusion_matrix(\n y_true, y_pred, classes, normalize=True, title=\"Confusion matrix\", cmap=plt.cm.Blues\n):\n cm = confusion_matrix(y_true, y_pred)\n\n if normalize:\n cm = cm.astype(\"float\") / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print(\"Confusion matrix, without normalization\")\n\n plt.imshow(cm, interpolation=\"nearest\", cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = \".2f\" if normalize else \"d\"\n thresh = cm.max() / 2.0\n for i, j in product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(\n j,\n i,\n format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",\n )\n\n plt.tight_layout()\n plt.ylabel(\"True label\")\n plt.xlabel(\"Predicted label\")", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n #print(\"Normalized confusion matrix\")\n #else:\n\n #print('Confusion matrix, without normalization')\n\n# print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n# for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n# plt.text(j, i, format(cm[i, j], fmt),\n# horizontalalignment=\"center\",\n# color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n cm = confusion_matrix(y_test,predictions)\n plt.figure()\n plot_confusion_matrix(cm, classes=[0,1,2], normalize=True,\n title='Confusion Matrix')", "def plot_confusion_matrix(y_test, y_pred, classes,\n normalize=True,\n title='Average accuracy \\n',\n cmap=plt.cm.Blues, verbose = 0, precision = 0):\n from sklearn.metrics import confusion_matrix\n import itertools\n \n cm = confusion_matrix(y_test, y_pred)\n accuracy = (np.sum(np.diag(cm)) / np.sum(cm)) * 100.0\n\n if normalize:\n cm = (cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]) * 100.0\n if verbose == 1:\n print(\"Normalized confusion matrix\")\n else:\n if verbose == 1:\n print('Confusion matrix, without normalization')\n \n if verbose == 1:\n print(cm)\n\n plt.figure(figsize=(18, 9))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title.format_map({'acc':accuracy}), fontsize=25)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45, fontsize=20)\n plt.yticks(tick_marks, classes, fontsize=20)\n\n fmt = '{:.'+ '%d'%(precision) +'f} %' if normalize else '{:d}'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, fmt.format(cm[i, j]),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\", fontsize=16)\n plt.tight_layout()\n plt.ylabel('True label', fontsize=20)\n plt.xlabel('Predicted label', fontsize=20)", "def plot_confusion_matrix(self, cm, classes, normalize, cmap=plt.cm.Blues, title='confusin Matrix'):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n tick_marks = np.arange(len(classes))\r\n\r\n self.subplt.set_xlabel(\"Predicted label\")\r\n self.subplt.set_ylabel(\"True Label\")\r\n self.subplt.set_title(\"Confusion Matrix\")\r\n self.subplt.set_xticks(tick_marks,classes)\r\n self.subplt.set_yticks(tick_marks,classes)\r\n\r\n self.canvas2.show()", "def showConfusionMatrix(self): \r\n sn.heatmap(self.conf_matrix, annot=True)\r\n plt.plot( label=\"Accuracy\")\r\n plt.plot( label=\"Error\")\r\n plt.figtext(0,0,'Accuracy: {}\\nError: {}\\nRecall: {}\\nPrecision: {}'.format(self.accuracy,\r\n self.error,\r\n self.recall,\r\n self.precision))\r\n plt.title('Confusion Matrix')\r\n plt.show()\r\n return None", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(cm)\n \n \n plt.title(title)\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.colorbar()\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, \"%.2f\" % cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print('Normalized confusion matrix')\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment='center',\n color='white' if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n #based on http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n cmap=plt.cm.Blues\n \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n np.set_printoptions(precision=2)\n \n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, '%1.2f' % cm[i, j],\n horizontalalignment=\"center\",\n fontsize =12,\n color=\"white\" if cm[i, j] > thresh else \"black\")\n #plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(cm)\n \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes)) \n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.axis('auto')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix',cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n print('Confusion matrix')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(confusion_matrix, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(confusion_matrix, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n confusion_matrix = confusion_matrix.astype(\n 'float') / confusion_matrix.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(confusion_matrix)\n\n thresh = confusion_matrix.max() / 2.\n for i, j in itertools.product(range(confusion_matrix.shape[0]), range(confusion_matrix.shape[1])):\n plt.text(j, i, confusion_matrix[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if confusion_matrix[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n # print(cm)\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n confusion_matrix_dir = './confusion_matrix_plots'\n if not os.path.exists(confusion_matrix_dir):\n os.mkdir(confusion_matrix_dir)\n\n plt.cla()\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"#BFD1D4\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n if normalize:\n plt.savefig(os.path.join(confusion_matrix_dir, 'normalized.jpg'))\n else:\n plt.savefig(os.path.join(confusion_matrix_dir, 'without_normalization.jpg'))", "def plot_confusion_matrix(cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues):\n\n plt.figure(figsize=(10,10))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n cm = np.around(cm, decimals=2)\n cm[np.isnan(cm)] = 0.0\n print(\"Normalized confusion matrix\")\n\n else:\n print('Confusion matrix, without normalization')\n\n thresh = cm.max() / 2.\n\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n t = \"(%.2f)\"%(cm[i, j])\n #print t\n# plt.text(j, i, t,\n# horizontalalignment=\"center\",\n# color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('IOB-Confusion-Matrix-SVM.png')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n # 1. find out how many samples per class have received their correct label\n # 计算真正类别为k的样本被预测成各个类别的比例\n # e.g. 有25个样本的 true label 是 6,其中10个样本被预测为类别7,那么在混淆矩阵中 true label = 6 并且 predicted label = 7 的一个格子中的值为 0.4\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n # 2. get the precision (fraction of class-k predictions that have ground truth label k)\n # 计算预测的准确率\n # e.g. 预测为类别k的有12个,但其中只有9个的真正类别是k,那么准确率为 0.75\n # cm = cm.astype('float') / cm.sum(axis=0)[:, np.newaxis]\n \n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n # plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n # tick_marks = np.arange(len(classes))\n # plt.xticks(tick_marks, classes, rotation=45)\n # plt.yticks(tick_marks, classes)\n\n # fmt = '.2f' if normalize else 'd'\n # thresh = cm.max() / 2.\n # for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n # plt.text(j, i, format(cm[i, j], fmt),\n # horizontalalignment=\"center\",\n # color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n \n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n \n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n \n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('confusion_matrix.png')", "def plotConfusionMatrix(self, cm, classes, normalize=True, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig('confusion_matrix.png')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion Matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion Matrix, without normalization')\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, weight='bold')\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n if i == 0:\n plt.text(j-0.1, i+0.3, format(cm[i, j], fmt), color=\"white\" if cm[i, j] > thresh else \"black\")\n if i == 1:\n plt.text(j-0.1, i-0.2, format(cm[i, j], fmt), color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True Label', weight='bold')\n plt.xlabel('Predicted Label', weight='bold')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n #print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=90)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n #pdb.set_trace()\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def sam_plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n plots_dims = itertools.product(list(range(cm.shape[0])),\n list(range(cm.shape[1])))\n for i, j in plots_dims:\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \n print(a)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap, vmin=0.0, vmax=1.0)\n\n plt.title(title)\n\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.3f'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n # plt.title(title)\n # plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n # Tweak spacing to prevent clipping of tick-labels\n plt.subplots_adjust(bottom=0.2)", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n # print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.xlabel('Predicted label') \n plt.ylabel('True label') \n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label') \n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Purples):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n # plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n # plt.grid('off')\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=True,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n# print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n return plt.gcf()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title + \"Confusion matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print(title + ' confusion matrix')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=None):\n if normalize:\n # cm = cm.T\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n # cm = cm.T\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.figure(figsize=(4, 4))\n plt.imshow(cm, interpolation='nearest', cmap=cmap or plt.cm.Blues)\n plt.title(('Normalized ' if normalize else '') + title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(list(range(cm.shape[0])), list(range(cm.shape[1]))):\n plt.text(\n j,\n i,\n format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n #print(\"Normalized confusion matrix\")\n else:\n 1#print('Confusion matrix, without normalization')\n\n #print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n print(cm)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title, fontsize=14)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title,fontsize=20)\n# plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, fontsize=15)\n plt.yticks(tick_marks, classes,rotation=30,fontsize=15)\n\n fmt = '.2f'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\",fontsize=20)\n\n plt.tight_layout()\n plt.ylabel('True label',fontsize=20)\n plt.xlabel('Predicted label',fontsize=20)", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Confusion matrix\")\n else:\n print('Confusion matrix')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Greens):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, '%.02f'%cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"red\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n # plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title.split('/')[-1])\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n # plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n if title:\n plt.savefig(title+'.png')\n\n plt.close()", "def plot_confusion_matrix(cm, classes=[],\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n plt.figure()\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n #print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=90)\n plt.yticks(tick_marks, classes)\n\n fmt = '.1f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\r\n normalize=False,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n tick_marks = np.arange(len(classes))\r\n plt.xticks(tick_marks, classes, rotation=45)\r\n plt.yticks(tick_marks, classes)\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')\r\n plt.savefig('Logistik.png')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='',\n cmap=plt.cm.Blues, file_name='cm_plot'):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.rcParams[\"font.family\"] = \"Times New Roman\"\n plt.rcParams[\"font.size\"] = FONT_SIZE\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=0)\n plt.yticks(tick_marks, classes)\n\n fmt = '.6f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label', fontsize=FONT_SIZE)\n plt.xlabel('Predicted label', fontsize=FONT_SIZE)\n plt.subplots_adjust(bottom=0.13)\n with PdfPages(file_name) as pdf:\n pdf.savefig()\n plt.close()", "def plot_confusion_matrix(self):\r\n interp = ClassificationInterpretation.from_learner(self.learn)\r\n interp.plot_confusion_matrix()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes=None, normalize=False,\n title='Confusion matrix', cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n\n if classes:\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.show()", "def plot_confusion_matrix(cm,\r\n normalize=False,\r\n title='Confusion matrix',\r\n cmap=plt.cm.Blues):\r\n if normalize:\r\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\r\n print(\"Normalized confusion matrix\")\r\n else:\r\n print('Confusion matrix, without normalization')\r\n\r\n# print(cm)\r\n\r\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\r\n plt.title(title)\r\n plt.colorbar()\r\n\r\n fmt = '.2f' if normalize else 'd'\r\n thresh = cm.max() / 2.\r\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\r\n plt.text(j, i, format(cm[i, j], fmt),\r\n horizontalalignment=\"center\",\r\n color=\"white\" if cm[i, j] > thresh else \"black\")\r\n\r\n plt.tight_layout()\r\n plt.ylabel('True label')\r\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(cm)\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.figure()\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(cm)\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j], horizontalalignment=\"center\", color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')", "def plot_confusion_matrix(y_true, y_pred, classes,\n normalize=False,\n title=None,\n cmap=plt.cm.Blues):\n if not title:\n if normalize:\n title = 'Normalized confusion matrix'\n else:\n title = 'Confusion matrix, without normalization'\n\n # Compute confusion matrix\n cm = confusion_matrix(y_true, y_pred)\n # Only use the labels that appear in the data\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n fig, ax = plt.subplots(figsize=(8, 8))\n im = ax.imshow(cm, interpolation='nearest', cmap=cmap)\n ax.figure.colorbar(im, ax=ax)\n # We want to show all ticks...\n ax.set(xticks=np.arange(cm.shape[1]),\n yticks=np.arange(cm.shape[0]),\n # ... and label them with the respective list entries\n xticklabels=classes, yticklabels=classes)\n ax.set_title(title,size = 20)\n ax.set_ylabel('True label',size = 20)\n ax.set_xlabel('Predicted label',size = 20)\n\n # Rotate the tick labels and set their alignment.\n plt.setp(ax.get_xticklabels(), rotation=45, ha=\"right\",\n rotation_mode=\"anchor\",size = 18)\n plt.setp(ax.get_yticklabels(),size = 18)\n\n # Loop over data dimensions and create text annotations.\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i in range(cm.shape[0]):\n for j in range(cm.shape[1]):\n ax.text(j, i, format(cm[i, j], fmt),\n ha=\"center\", va=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n fig.tight_layout()\n name = OUTFOLDER + \"/confusion_matrix_batch%d_layers%d_epochs%d_f1%d\" % (BATCH_SIZE,LAYERS,EPOCHS,f1_mean_test*100)\n if normalize:\n name = name + \"_norm\"\n plt.savefig(name)\n plt.close()\n return ax", "def plot_confusion_matrix(cm, classes,\n\t\t\t\t\t\t normalize=False,\n\t\t\t\t\t\t title='Confusion matrix',\n\t\t\t\t\t\t cmap=plt.cm.Blues):\n\tif normalize:\n\t\tcm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\t\tprint(\"Normalized confusion matrix\")\n\telse:\n\t\tprint('Confusion matrix, without normalization')\n\n\tprint(cm)\n\n\tplt.imshow(cm, interpolation='nearest', cmap=cmap)\n\tplt.title(title)\n\tplt.colorbar()\n\ttick_marks = np.arange(len(classes))\n\tplt.xticks(tick_marks, classes, rotation=45)\n\tplt.yticks(tick_marks, classes)\n\n\tfmt = '.2f' if normalize else 'd'\n\tthresh = cm.max() / 2.\n\tfor i, j in product(range(cm.shape[0]), range(cm.shape[1])):\n\t\tplt.text(j, i, format(cm[i, j], fmt),\n\t\t\t\t horizontalalignment=\"center\",\n\t\t\t\t color=\"white\" if cm[i, j] > thresh else \"black\")\n\n\tplt.tight_layout()\n\tplt.ylabel('True label')\n\tplt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes, normalize=False, title='Confusion matrix', cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.savefig('../results/conf_matr.png')\n\n return cm", "def plot_confusion_matrix(cm, classes,\n\t\t\t\t\t\t\t normalize=False,\n\t\t\t\t\t\t\t title='Confusion matrix',\n\t\t\t\t\t\t\t cmap=plt.cm.Blues):\n\t\tif normalize:\n\t\t\tcm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\t\t\tprint(\"Normalized confusion matrix\")\n\t\telse:\n\t\t\tprint('Confusion matrix, without normalization')\n\n\t\tplt.imshow(cm, interpolation='nearest', cmap=cmap)\n\t\tplt.title(title)\n\t\tplt.colorbar()\n\t\ttick_marks = np.arange(len(classes))\n\t\tplt.xticks(tick_marks, classes, rotation=45)\n\t\tplt.yticks(tick_marks, classes)\n\n\t\tplt.tight_layout()\n\t\tplt.ylabel('True label')\n\t\tplt.xlabel('Predicted label')", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()", "def plot(self):\n plt.imshow(self.cm, interpolation='nearest', cmap=self.cmap)\n plt.title(self.title)\n plt.colorbar()\n tick_marks = np.arange(len(self.classes))\n plt.xticks(tick_marks, self.classes, rotation=45)\n plt.yticks(tick_marks, self.classes)\n \n if self.normalize:\n self.cm = self.cm.astype('float') / self.cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n \n print(self.cm)\n \n thresh = self.cm.max() / 2.\n for i, j in itertools.product(range(self.cm.shape[0]), range(self.cm.shape[1])):\n plt.text(j, i, self.cm[i, j], horizontalalignment=\"center\", color=\"white\" if self.cm[i, j] > thresh else \"black\")\n plt.tight_layout()\n plt.ylabel('True Label')\n plt.xlabel('Predicted label')" ]
[ "0.8194862", "0.80949175", "0.8029915", "0.8019153", "0.79941195", "0.7991258", "0.7980955", "0.7976606", "0.79610753", "0.79590565", "0.79378676", "0.7934962", "0.7934504", "0.79313844", "0.7926313", "0.7924577", "0.79241234", "0.7923211", "0.7923023", "0.7921931", "0.7917871", "0.7916092", "0.79083747", "0.7907475", "0.79068965", "0.7904398", "0.7900711", "0.7900422", "0.7896704", "0.7894559", "0.7893862", "0.7891639", "0.78906786", "0.78895235", "0.7886698", "0.7884568", "0.78841054", "0.78773123", "0.78745896", "0.7869866", "0.7860299", "0.78572506", "0.7856715", "0.7853253", "0.7852508", "0.78493565", "0.78482205", "0.7847642", "0.7845746", "0.78436774", "0.78436774", "0.78436774", "0.78436774", "0.78436774", "0.78436774", "0.78436774", "0.78436774", "0.7842821", "0.783704", "0.7836942", "0.7836734", "0.78358006", "0.78322923", "0.7831496", "0.78314656", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.78289187", "0.7822435", "0.7822236", "0.7820784", "0.7820784", "0.7820304", "0.7817516", "0.78159386", "0.78157204", "0.7814644", "0.7814644", "0.7814644", "0.7814644", "0.7814644", "0.7814644", "0.7814644", "0.7814644", "0.7813965", "0.7813563" ]
0.0
-1
Export ECU data to excel format
def extract_ecu(self, vin_list=None): corvets = Corvet.objects.filter(vin__in=vin_list) self.header, self.fields = self.get_header_fields(CORVET_DICT.get("extract_ecu", [])) values_list = corvets.values_list(*self.fields).distinct() return values_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_excel(self, filename):\n self.data.to_excel(filename)", "def export_excel(self, filename):\n # convert table to array of rows\n rows = [self.headings]\n for y in range(self.rowcount):\n row = []\n for h in self.headings:\n row.append(self.table[h][y])\n rows.append(row)\n \n sheet = pyexcel.Sheet(rows, self.name, name_columns_by_row=0)\n sheet.save_as(filename)", "def export_data(self):\r\n stocks = {}\r\n headings = ['Security', 'Price', 'Change', 'Change %', '52 Week', 'Market Cap']\r\n\r\n for data in range(6):\r\n for items in self.root.main.treeview.get_children():\r\n values = self.root.main.treeview.item(items, 'values')\r\n if headings[data] not in stocks:\r\n stocks[headings[data]] = []\r\n stocks.get(headings[data]).append(values[data])\r\n\r\n df = pd.DataFrame(stocks, columns=headings)\r\n path = tk.filedialog.asksaveasfilename(title='Save File As...',\r\n filetypes=((\"CComma-separated values (.csv)\", \"*.csv\"), (\"Text Document(.txt)\", \"*.txt\")))\r\n\r\n if not path:\r\n return\r\n else:\r\n df.to_excel(path, index=False, header=True)", "def exporter():\n Session = modules.db_connect.connect()\n session = Session()\n report = xlsxwriter.Workbook('perception_report.xlsx')\n top_row_format = report.add_format({'bold': True})\n top_row_format.set_border(style=1)\n top_row_format.set_bg_color('#B8B8B8')\n\n \"\"\"Black row format at the top of each host detailed info\"\"\"\n black_row_format = report.add_format()\n black_row_format.set_border(style=1)\n black_row_format.set_bg_color('#000000')\n\n \"\"\"Detailed host row format\"\"\"\n host_row_format = report.add_format()\n host_row_format.set_border(style=1)\n host_row_format.set_bg_color('#CCCCCC')\n\n \"\"\"Format for text in row with host info\"\"\"\n host_row_wrapped_format = report.add_format()\n host_row_wrapped_format.set_border(style=1)\n host_row_wrapped_format.set_bg_color('#CCCCCC')\n host_row_wrapped_format.set_text_wrap('vjustify')\n\n \"\"\"Format description row in NSE output\"\"\"\n host_nse_output_top_format = report.add_format({'bold': True})\n host_nse_output_top_format.set_border(style=1)\n host_nse_output_top_format.set_bg_color('#B8B8B8')\n\n \"\"\"Format test row in NSE output\"\"\"\n host_nse_output_format = report.add_format()\n host_nse_output_format.set_border(style=1)\n host_nse_output_format.set_bg_color('#CCCCCC')\n\n \"\"\"Build the host_overview_worksheet\"\"\"\n host_overview_worksheet = report.add_worksheet()\n\n \"\"\"Build the host_detail_worksheet\"\"\"\n host_detail_worksheet = report.add_worksheet()\n\n \"\"\"Size up the overview worksheet\"\"\"\n host_overview_worksheet.set_column('B:B', 24)\n host_overview_worksheet.set_column('C:C', 15)\n host_overview_worksheet.set_column('D:D', 15)\n host_overview_worksheet.set_column('E:E', 15)\n host_overview_worksheet.set_column('F:F', 15)\n host_overview_worksheet.set_column('G:G', 20)\n host_overview_worksheet.set_column('H:H', 15)\n\n \"\"\"Size up the detail worksheet\"\"\"\n host_detail_worksheet.set_column('B:B', 38)\n host_detail_worksheet.set_column('C:C', 16)\n host_detail_worksheet.set_column('D:D', 16)\n host_detail_worksheet.set_column('E:E', 28)\n host_detail_worksheet.set_column('F:F', 15)\n host_detail_worksheet.set_column('H:G', 20)\n host_detail_worksheet.set_column('H:H', 25)\n host_detail_worksheet.set_column('I:I', 10)\n\n \"\"\"Description row for host overview\"\"\"\n host_overview_worksheet.write('B2', 'Hostname', top_row_format)\n host_overview_worksheet.write('C2', 'IP v4 Address', top_row_format)\n host_overview_worksheet.write('D2', 'IP v6 Address', top_row_format)\n host_overview_worksheet.write('E2', 'MAC Address', top_row_format)\n host_overview_worksheet.write('F2', 'MAC Vendor', top_row_format)\n host_overview_worksheet.write('G2', 'Operating System', top_row_format)\n host_overview_worksheet.write('H2', 'Host Type', top_row_format)\n\n \"\"\"Query the database for the hosts\"\"\"\n inventory_hosts = session.query(InventoryHost).all()\n\n \"\"\"Build overview worksheet\"\"\"\n overview_row = 2\n overview_col = 1\n for host in inventory_hosts:\n host_overview_worksheet.write(overview_row, overview_col, host.host_name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 1, host.ipv4_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 2, host.ipv6_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 3, host.macaddr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 4, host.mac_vendor.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 5, host.product.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 6, host.host_type, host_row_format)\n overview_row += 1\n\n \"\"\"Build detailed worksheet\"\"\"\n detail_row = 2\n detail_col = 1\n for host in inventory_hosts:\n\n \"\"\"Add the black row to start host detail info\"\"\"\n host_detail_worksheet.set_row(detail_row, 5)\n host_detail_worksheet.write(detail_row, detail_col, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, '', black_row_format)\n detail_row += 1\n\n \"\"\"Add row detail info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Hostname', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'IP v4 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'IP v6 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'MAC Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'MAC Vendor', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Host Type', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Operating System', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Version', top_row_format)\n detail_row += 1\n\n \"\"\"Add host info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host.host_name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, host.ipv4_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, host.ipv6_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, host.macaddr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, host.mac_vendor.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, host.host_type, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, host.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, host.product.version, host_row_format)\n detail_row += 2\n\n \"\"\"If there is no host nse script, just say so.\"\"\"\n if not host.host_nse_scripts:\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n else:\n\n \"\"\"Add the row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for host_scripts in host.host_nse_scripts:\n\n \"\"\"Count output the lines so we know what to merge\"\"\"\n lines = host_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n\n if not host.inventory_svcs:\n\n \"\"\"If there are no services for this host tell me\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n host_detail_worksheet.write(detail_row, detail_col, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'no services', host_row_format)\n detail_row += 1\n\n else:\n for ports in host.inventory_svcs:\n\n \"\"\"Host services row info\"\"\"\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n \"\"\"Write the service info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, ports.protocol, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, ports.portid, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, ports.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, ports.svc_product, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, ports.extra_info, host_row_format)\n try:\n\n \"\"\"There may not be product info, but try.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, ports.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, ports.product.version, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, ports.product.product_update,\n host_row_format)\n detail_row += 1\n except AttributeError:\n\n \"\"\"Just write unknown if there is no product info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'unknown', host_row_format)\n detail_row += 1\n\n if not ports.svc_nse_scripts:\n\n \"\"\"If there is no NSE script info just say so.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n\n else:\n\n \"\"\"Service Script row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for nse_scripts in ports.svc_nse_scripts:\n\n \"\"\"Count the lines in the output for merging\"\"\"\n lines = nse_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n nse_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines,\n detail_col + 7, nse_scripts.output,\n host_row_wrapped_format)\n detail_row += 1\n\n detail_row += 1\n report.close()\n session.close()", "def export_to_file(final_data_fetch):\r\n\r\n # Column names for data\r\n header_fields = ['Course', 'University', 'GPA', 'GRE', 'TOEFL', 'Work Experience', 'UG Course', 'UG College','Admit Status']\r\n with xlsxwriter.Workbook('yocket_data.xlsx') as workbook:\r\n worksheet = workbook.add_worksheet()\r\n\r\n # Write Header Fields\r\n worksheet.write_row(0, 0, header_fields)\r\n # Write data fields\r\n for row_num, data in enumerate(final_data_fetch):\r\n worksheet.write_row(row_num+1, 0, data)\r\n\r\n # Store as binary data\r\n with open('yocket_data.data', 'wb') as f:\r\n pickle.dump(final_data_fetch, f)", "def _write2excel(self, sheet: object, data: list, start_row: int, start_col: int):\n for r in range(0,len(data)):\n for c in range(0,len(data[0])):\n sheet.cell(r+start_row,c+start_col).value=data[r][c]", "def excel_output(df):\n output = io.BytesIO()\n #time = str(date.today())\n #filename = \"output \"+time+\".xlsx\"\n writer = pd.ExcelWriter(output, engine='xlsxwriter', options={'remove_timezone': True})\n #writer.book.filename = io\n df.to_excel(writer,'Sheet1', index=False, header=True)\n writer.save()\n xlsx_data = output.getvalue()\n return xlsx_data", "def outputExcelReport(self):\n # ++++++++++\n # init\n # ++++++++++\n wb = openpyxl.Workbook()\n wb.fonts = openpyxl.styles.Font(\n name = 'Courier New',\n size = 12\n )\n # create and delete sheets\n _ = wb.create_sheet(title='Cover',index=0)\n _ = wb.create_sheet(title='Results',index=1)\n _ = wb.create_sheet(title='AllItems',index=2)\n _ = wb.remove(wb.worksheets[-1])\n # ++++++++++\n # Sheet 1 <Cover>\n # ++++++++++\n ws = wb['Cover']\n # --- title and date\n timeNow = datetime.datetime.now().isoformat().split('T')[0]\n ws.merge_cells('A1:B1')\n ws.merge_cells('A3:B3')\n ws['A1'] = '納入チェック ダイアグ確認結果'\n ws['A3'] = '作成日:{}'.format(timeNow)\n # --- sample info\n ws['A5'] = '<サンプル情報>'\n self._write2excel(ws, self._sample_info, 6, 1)\n for r in range(6,8):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- checker info\n ws['A9'] = '<チェッカ情報>'\n self._write2excel(ws, self._checker_info, 10, 1)\n for r in range(10,13):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- dmm info\n ws['A14'] = '<DMM情報>'\n self._write2excel(ws, self._dmm_info, 15, 1)\n for r in range(15,18):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- resistor info\n ws['A19'] = '<抵抗器情報>'\n self._write2excel(ws, self._resistor_info, 20, 1)\n for r in range(20,23):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n ws[cell.coordinate].font = STYLE_FONT_PASS\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 2 <Results>\n # ++++++++++\n ws = wb['Results']\n # --- output all scenario\n ws['A1'] = '<結果一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._result_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==6:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n ws.cell(cell.row,6).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 3 <AllItems>\n # ++++++++++\n ws = wb['AllItems']\n # --- output all scenario\n ws['A1'] = '<出力一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._scenario_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==5:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # save book\n # ++++++++++\n wb.save(self._filename)", "def export_to_excel(self, worksheet, row_start, col_start, queryset, date_time=timezone.now()):\n\t\tif queryset:\n\t\t\t[row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start)\n\t\t\tfor q in queryset:\n\t\t\t\t# object_excel_write function---date_time uyiin history objectiig excel -ruu horvuulne\n\t\t\t\t[row_write, col_write] = q.object_excel_write(worksheet, row_write, col_write, date_time=date_time)\n\t\telse:\n\t\t\tworksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')", "def export_data(self):\r\n if len(app.entry6.get()) != 0:\r\n\r\n if app.var.get() == 'xls':\r\n\r\n wb = Workbook()\r\n sheet = wb.add_sheet('Sheet1')\r\n self.columns = ['id', 'Name', 'Section', 'Dept.', 'Gpa', 'MP1', 'MP2', 'MP3', 'MT', 'FINAL']\r\n style = xlwt.easyxf('font: bold 1')\r\n for col in range(10):\r\n sheet.write(0, col, self.columns[col], style)\r\n index=0\r\n for row in range(1,162):\r\n sheet.write(row, 1, open_data.sort_list[index])\r\n index += 1\r\n index1 = -1\r\n for row in range(1,162):\r\n index1 += 1\r\n index2=0\r\n for col in range(10):\r\n if col == 1 or index2 == 1:\r\n index2 += 1\r\n continue\r\n if index2 == 0:\r\n sheet.write(row, col, int(open_data.student[open_data.sort_list[index1]][index2]))\r\n index2 += 1\r\n continue\r\n sheet.write(row, col, open_data.student[open_data.sort_list[index1]][index2])\r\n index2 += 1\r\n file_name=app.entry6.get()\r\n if '.xls' not in file_name:\r\n wb.save(file_name+'.xls')\r\n else:\r\n wb.save(file_name)\r\n\r\n elif app.var.get() == 'txt':\r\n\r\n file_name = app.entry6.get()\r\n if '.txt' not in file_name:\r\n file_name = file_name + '.txt'\r\n file = open(file_name, 'w')\r\n index2 = 0\r\n for key in open_data.student:\r\n for index in range(10):\r\n if index == 0:\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n continue\r\n if index == 1:\r\n try:\r\n self.split_names = open_data.sort_list[index2].split(' ')\r\n file.write(self.split_names[0])\r\n file.write(', ')\r\n file.write(self.split_names[1])\r\n file.write(', ')\r\n index2 += 1\r\n except UnicodeEncodeError:\r\n index2 += 1\r\n pass\r\n continue\r\n if index >= 5 and index <= 9:\r\n if open_data.student[key][index] != '':\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n else:\r\n file.write('\\n')\r\n break\r\n if index == 9:\r\n file.write('\\n')\r\n continue\r\n try:\r\n file.write(str(open_data.student[key][index]))\r\n file.write(', ')\r\n except UnicodeEncodeError:\r\n pass\r\n file.close()\r\n\r\n\r\n\r\n elif app.var.get() == 'csv':\r\n app.info.configure(text=\"INFO: Type not Supported\")\r\n # The program does not support saving in 'csv' type. If the user selects 'csv' file type, 'Info' Label\r\n # shows the message: 'INFO: Type not Supported'.\r\n\r\n else:\r\n app.info.configure(text='INFO: Type not chosen!')\r\n # Also, If the user presses on 'Export Data' button, with a file name provided, but without choosing a\r\n # file type, 'Info' Label shows the message: 'INFO: Type not chosen'.\r\n\r\n else:\r\n app.info.configure(text=\"INFO: Please provide the name of the file.\")\r\n # Also, if the user presses 'Export Data' button without giving a file name, 'Info' Label shows the message:\r\n # 'INFO: Please provide the name of the file.'\r", "def to_excel(self, filename, **kwargs):\n self.data.to_excel(filename, **kwargs)", "def write_to_excel(self, fileNameNoExtension):\n self.data.to_excel(fileNameNoExtension + '.xlsx', engine='xlsxwriter')", "def write_excel(self, filename):\n writer = pd.ExcelWriter(filename)\n self.df_avg.to_excel(writer, 'Simulation')\n self.manager_df.to_excel(writer, 'FleetManagers')\n self.customer_df.to_excel(writer, 'Customers')\n self.transport_df.to_excel(writer, 'Transports')\n writer.save()", "def excel_print(data1, data2, data3, data4, data5, data6):\r\n\r\n list_data = [data1, data2, data3, data4, data5, data6]\r\n name_list = ['Old elec', 'New elec', 'Old elec dup', 'New elec dup',\r\n 'Diff After Strip', 'New Elec Before Strip']\r\n zipped = zip(list_data, name_list)\r\n excel_writer = pd.ExcelWriter('elec_delta2.xlsx', engine='xlsxwriter')\r\n for data, name in zipped:\r\n data.to_excel(excel_writer, sheet_name=name,\r\n index=False, freeze_panes=(1, 0))\r\n num_cols = len(list(data))\r\n worksheet = excel_writer.sheets[name]\r\n worksheet.autofilter(0, 0, 0, num_cols-1)\r\n worksheet.set_column(0, 0, 23.56)\r\n worksheet.set_column(1, 1, 34.89)\r\n excel_writer.save()", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.ministerial.get_excel_sheet(rpt_date, book)\n self.ministerial_auth.get_excel_sheet(rpt_date, book)\n self.ministerial_268.get_excel_sheet(rpt_date, book)\n self.quarterly.get_excel_sheet(rpt_date, book)\n self.by_tenure.get_excel_sheet(rpt_date, book)\n self.by_cause.get_excel_sheet(rpt_date, book)\n self.region_by_tenure.get_excel_sheet(rpt_date, book)\n self.indicator.get_excel_sheet(rpt_date, book)\n self.by_cause_10YrAverage.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 1')\n book.save(response)\n\n return response", "def export_data_and_class_df_to_excel(data_df, class_df, excel_filename=None):\n from pandas import ExcelWriter\n print \"==========start exporting data and class dataframe to excel================\"\n if excel_filename == None:\n session = class_df.ix[0,s_info.session_col]\n sensor = class_df.ix[0, s_info.sensor_col]\n print \"session: %d, sensor: %s\" % (session, sensor)\n excel_filename = s_info.feature_dataset_folder + \"/session\" + str(session) + \"_\" + sensor + \".feature.xlsx\"\n writer = ExcelWriter(excel_filename)\n data_df.to_excel(writer, sheet_name=\"data(features)\")\n class_df.to_excel(writer, sheet_name=\"class(other information)\")\n writer.save()\n print excel_filename + \" exported\"\n return excel_filename", "def dumptoexcel(source_html, output_excel):\r\n\r\n arguments = {'srcName' : source_html, 'desName' :output_excel }\r\n\r\n #Reading from HTML file.\r\n soup = BeautifulSoup(open(arguments['srcName']))\r\n table = soup.find('table')\r\n table_rows = table.find_all('tr')\r\n\r\n \r\n #Opening Excel File.\r\n desWorkBook = openpyxl.Workbook()\r\n desSheet = desWorkBook.active\r\n\r\n\r\n #Getting data ready to write.\r\n all_rows = []\r\n\r\n table_head = table.find_all('th')\r\n row = [i.text for i in table_head]\r\n all_rows.append(row)\r\n \r\n for tr in table_rows:\r\n td = tr.find_all('td')\r\n row = [i.text for i in td]\r\n if(len(row) != 0):\r\n all_rows.append(row)\r\n\r\n rowLen = len(all_rows[0])\r\n maxColWidths = [0]*rowLen\r\n \r\n for row in all_rows:\r\n for i in range(0,rowLen):\r\n temp = len(row[i])\r\n if(maxColWidths[i]<temp):\r\n maxColWidths[i] = temp\r\n\r\n \r\n #Writing to Excel File.\r\n rowNo = 1\r\n for row in all_rows:\r\n colNo = 1\r\n row_len = len(row)\r\n for i in xrange(1,row_len):\r\n\r\n desSheet.cell(row=rowNo, column=colNo).value = row[i]\r\n desSheet.column_dimensions[get_column_letter(colNo)].width = maxColWidths[i] \r\n colNo = colNo+1\r\n \r\n rowNo = rowNo+1\r\n\r\n #Saving Excel File.\r\n \r\n desWorkBook.save(arguments['desName'])", "def generate_xls(self):\n self.wb = xlwt.Workbook()\n ws = self.wb.add_sheet('Sheet1')\n heading_style = xlwt.easyxf('font: bold true; alignment: horizontal center, wrap true;')\n extra_row = 0\n if self.date:\n date_style = xlwt.easyxf('font: bold true; alignment: horizontal left, wrap true;')\n ws.write_merge(0,0,0,self.table.no_of_columns()-1,'Date : '+self.date,date_style) \n extra_row = 1\n for i in range(len(self.headings)):\n ws.write_merge(i+extra_row,i+extra_row,0,self.table.no_of_columns()-1,self.headings[i],heading_style)\n ws.set_panes_frozen(True)\n ws.set_horz_split_pos(len(self.headings)+extra_row+1)\n ws.set_remove_splits(True)\n self.table.to_xls(ws,start_row=len(self.headings)+extra_row,start_col=0)\n return self.wb", "def export_excel(header, data):\n tmp = NamedTemporaryFile()\n wb = Workbook()\n ws = wb.active\n\n ws.append(header)\n for row in export_data(data, header):\n ws.append(row)\n\n wb.save(tmp.name)\n tmp.seek(0)\n\n return tmp", "def rite2xl(df, file_name):\r\n print('writing dataframe to excel',)\r\n writer = pd.ExcelWriter(file_name ,engine = 'xlsxwriter')\r\n df.to_excel(writer,file_name)\r\n writer.save()\r\n print('writing to excel sheet completed')\r\n return(df)", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_10yr_average_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_auth_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(exp_data: ExportData) -> None:\n pass", "def print_xlsx(self):\n if self.date_from and self.date_to:\n if self.date_from > self.date_to:\n raise ValidationError(\"Date From must be less than Date To\")\n\n # active_record = self._context['id']\n # record = self.env['room.accommodation'].browse(active_record)\n data = {\n 'date_from': self.date_from,\n 'date_to': self.date_to,\n 'guest_id': self.guest_id.id,\n 'model_id': self.id,\n 'check_out': self.check_out,\n 'date_today': fields.Datetime.now()\n }\n\n print(\"XLSX Wizard data : \", data)\n\n return {\n 'type': 'ir.actions.report',\n 'data': {\n 'model': 'accommodation.reporting',\n 'options': json.dumps(data, default=date_utils.json_default),\n 'output_format': 'xlsx',\n 'report_name': 'Accommodation Report'\n },\n 'report_type': 'xlsx'\n }", "def click_export_to_excel_button(self):\n self.click_element(self.export_to_excel_button_locator, True)", "def click_export_to_excel_button(self):\n self.click_element(self.export_to_excel_button_locator, True)", "def save_new_excel_data(df, req_file_name, sheet):\r\n try:\r\n # select rows for a specific column and save a excel file\r\n dtc_table_ext = ['SW_DTC', 'Diagnosis_IDENTIFIER', 'Symptom', 'SW_Module', 'ISO_Pcode',\r\n 'Cust_Pcode', 'ScanT_Pcode', 'Description', 'Lamp_Manager', 'EPC_Lamp',\r\n 'SnapShot', 'MIL_FUEL_CONF', 'Diagnosis_Enabled', 'Diagnosis_presence',\r\n 'Severity', 'Priority', 'Diag_Call_task', 'Diag_Validation', 'Unit',\r\n 'Diag_DeValidation', 'DTC_available', 'EPC', 'MIL_FuelConf_bit1',\r\n 'MIL_FuelConf_bit0', 'Lamp_Manager_bit2', 'Lamp_Manager_bit1', 'Lamp_Manager_bit0',\r\n 'AUTOyyy', 'Prio_bit3', 'Prio_bit2', 'Prio_bit1', 'Prio_bit0',\r\n 'Snapshot_bit2', 'Snapshot_bit1', 'Snapshot_bit0', 'empty', 'ETC_highbit', 'ETC_lowbit']\r\n # Save df_all_cols extracted to a new excel file\r\n file_to_save = sheet+'_'+req_file_name\r\n with pd.ExcelWriter(file_to_save) as writer: # for writing more than 1 sheet\r\n df.to_excel(writer, sheet_name=sheet, index=False)\r\n # df.to_excel(writer, sheet_name=sheet, columns=dtc_table_ext, index=False)\r\n except PermissionError:\r\n print('DEBUG-->save_new_excel_data: exception raised: ', sys.exc_info())", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_indicator_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def write_xlsx(data):\n workbook = xlsxwriter.Workbook('MyWorkbook.xlsx')\n main_sheet = workbook.add_worksheet('MySheet')\n\n date_format = workbook.add_format(\n {'num_format': 'mm/dd/yy hh:mm:ss AM/PM'})\n length = str(len(data) + 1)\n \n main_sheet.add_table(('A1:D' + length), \n {'data': data,\n 'columns': [{'header': 'Department'}, {'header': 'Students'},\n {'header': 'Cumulative GPA'},\n {'header': 'Final Date',\n 'format': date_format}]})\n\n department_grades = workbook.add_chart({'type':'column'})\n department_grades.set_title(\n {'name':'Department and Grade distribution'})\n department_grades.add_series(\n {'categories':'=MySheet!$A$2:$A$5',\n 'values':'=MySheet!$C$2:$C$5'})\n main_sheet.insert_chart('A8', department_grades)\n workbook.close()", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_268_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def generate_excel(structure:dict, output:str):\t\n\n\tstructure_columns = identify_columns(structure)\n\n\tworkbook = xlsxwriter.Workbook(output)\n\tworksheet = workbook.add_worksheet()\n\n\tcol = 0\n\tfor column in structure_columns:\n\t\tworksheet.write(0, col, column)\n\t\tcol += 1\n\n\trow = 1\n\tfor day in structure['data']:\n\t\tfor key in day.keys():\n\t\t\tif isinstance(day[key], list):\n\t\t\t\tworksheet.write(row, structure_columns.index(key), ', '.join(day[key]))\n\t\t\telif isinstance(day[key], dict):\n\t\t\t\tworksheet.write(row, structure_columns.index(key), str(day[key]))\n\t\t\telse:\n\t\t\t\tworksheet.write(row, structure_columns.index(key), day[key])\n\t\trow += 1\n\t\n\tworksheet.freeze_panes(1, 1)\n\tworkbook.close()", "def mono_sheet(self):\n xls = pandas.read_excel(str(self.source))\n xls.to_csv(str(self.dest), **self.kwargs)", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'quarterly_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def _csv_export(self, exppath):\n with open(exppath, 'w') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',', skipinitialspace=True)\n csvwriter.writerow(['hexstr','dmc','name'])\n for clr in self.lookup_table:\n csvwriter.writerow([clr.hex.to_str(), clr.id, clr.name])", "def exportToCsv(self, filepath):\n table = list()\n table.append(list(self.__header))\n for a in self.__assays:\n table.append(\n [unicode(a.timestamp.isoformat()),\n unicode(a.dab_cell_count),\n unicode(a.hem_cell_count),\n unicode(a.dab_dabhemfraction),\n unicode(a.img_path)])\n # File encoding will be same as it expected by Excel on machine where\n # this file was created.\n encoding = locale.getpreferredencoding()\n with open(filepath, mode='wb') as f:\n writer = UnicodeWriter(f, encoding=encoding, delimiter=';')\n writer.writerows(table)", "def export(self):\r\n self.prices[\"returns\"] = self.returns\r\n self.prices.columns = ['prices', 'returns']\r\n self.prices = self.prices.dropna()\r\n \r\n name = QFileDialog.getSaveFileName(None, 'Save File', filter='*.xlsx')\r\n if(name[0] == ''):\r\n # if name empty\r\n pass\r\n else:\r\n self.prices.to_excel(name[0])", "def CCF_toExcel(self, data_set, ccf_inputs):\n file_name = self.file_path(target_filename=\"LEICode_CCF_ModelID_EndOfObservationPeriod_versionNumber.xlsx\")\n oxl = openpyxl.load_workbook(file_name)\n\n # Information missing from test results:\n start_date\t = datetime.date(2007, 1, 1)\n end_date\t = datetime.date(2015, 1, 1)\n nb_customer = len(data_set.id.unique())\n grade_nb = data_set.Bin_CCF.unique()\n grade_name = []\n grade_counts = []\n avCCFE_perGrade = []\n avCCFR_perGrade = []\n minCCFR_perGrade = []\n maxCCFR_perGrade = []\n q5CCFR_perGrade = []\n q10CCFR_perGrade = []\n q25CCFR_perGrade = []\n q50CCFR_perGrade = []\n q75CCFR_perGrade = []\n q90CCFR_perGrade = []\n q95CCFR_perGrade = []\n for g in range(1, len(grade_nb) + 1):\n grade_name.append( self.grade_mapping(grade_num = g) )\n grade_counts.append( data_set[data_set.Default_Binary == 1][\"Bin_CCF\"].value_counts()[g] )\n avCCFE_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF.mean()[g] )\n avCCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.mean()[g] )\n minCCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.min()[g])\n maxCCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.max()[g])\n q5CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.05)[g])\n q10CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.10)[g])\n q25CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.25)[g])\n q50CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.50)[g])\n q75CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.75)[g])\n q90CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.90)[g])\n q95CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.95)[g])\n\n bcktesting_ccf_ptf = [\"N/A\", #Name of facility grade/pool or segment\n len(data_set.id.unique()), # Number of facilities (R)\n data_set.CCF.mean(), # Average estimated CCF (CCF^E)\n data_set.CCF_realised.mean(), # Average realised CCF (CCF^R)\n 0.0, # Floor used (if applicable)\n 0.0, # Number of CCF realisations floored\n data_set.CCF_realised.min(), # Minimum CCF^R\n data_set.CCF_realised.quantile(0.05), # Quantiles\n data_set.CCF_realised.quantile(0.10), #\n data_set.CCF_realised.quantile(0.25), #\n data_set.CCF_realised.quantile(0.50), #\n data_set.CCF_realised.quantile(0.75), #\n data_set.CCF_realised.quantile(0.90), #\n data_set.CCF_realised.quantile(0.95), #\n data_set.CCF_realised.max(), # Maximum CCF^R\n 0 # Exposure-weighted average of CCF^R (to be created)\n ]\n\n # Predictive ability\n ## CCF back-testing using a t-test (§ 2.9.3.1) - sheet 3.1\n wbk31 = oxl.get_sheet_by_name(\"3.1\")\n # Grade Lvl\n self.array_toExcel(wb=wbk31, stat_array = grade_name, row_pos=10, col_pos=4, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array = grade_counts, row_pos=10, col_pos=5, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array = avCCFE_perGrade, row_pos=10, col_pos=6, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array = avCCFR_perGrade, row_pos=10, col_pos=7, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array=[0] * 7, row_pos=10, col_pos=8, row_wise=True) # Floor used (if applicable)\n self.array_toExcel(wb=wbk31, stat_array=[0] * 7, row_pos=10, col_pos=9, row_wise=True) # Number of CCF realisations floored\n self.array_toExcel(wb=wbk31, stat_array= minCCFR_perGrade, row_pos=10, col_pos=10, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= maxCCFR_perGrade, row_pos=10, col_pos=18, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array=[0] * 7, row_pos=10, col_pos=19, row_wise=True) # Exposure-weighted average of CCF^R (to be created)\n self.array_toExcel(wb=wbk31, stat_array= q5CCFR_perGrade, row_pos=10, col_pos=11, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q10CCFR_perGrade, row_pos=10, col_pos=12, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q25CCFR_perGrade, row_pos=10, col_pos=13, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q50CCFR_perGrade, row_pos=10, col_pos=14, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q75CCFR_perGrade, row_pos=10, col_pos=15, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q90CCFR_perGrade, row_pos=10, col_pos=16, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q95CCFR_perGrade, row_pos=10, col_pos=17, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= [0] * 7, row_pos=10, col_pos=23, row_wise=True) # Number of facilities excluded due to outlier handling (set to zero)\n\n # Ptf Lvl\n self.df_toExcel(wb=wbk31, df = pd.DataFrame(ccf_inputs[\"predictive_ability\"][1]).T, row_pos=10, col_pos=20)\n self.array_toExcel(wb=wbk31, stat_array=ccf_inputs[\"predictive_ability\"][0], row_pos=8, col_pos=20, row_wise=False)\n self.array_toExcel(wb=wbk31, stat_array=bcktesting_ccf_ptf, row_pos=8, col_pos=4, row_wise=False)\n wbk31.cell(row=8, column=23).value = 0 # Number of facilities excluded due to outlier handling\n\n # Discriminatory Power\n ## Current gAUC vs gAUC at initial validation/development (§ 2.9.3.1) - sheet 4.0\n wbk40 = oxl.get_sheet_by_name(\"4.0\")\n self.array_toExcel(wb=wbk40, stat_array=ccf_inputs[\"AUC\"][:-1], row_pos=7, col_pos=4, row_wise=False)\n wbk40.cell(row= 7, column= 10).value = start_date # start date\n wbk40.cell(row=7, column=11).value = end_date # end date\n wbk40.cell(row=7, column=12).value = nb_customer # nb of customers\n wbk40.cell(row=7, column=13).value = ccf_inputs[\"AUC\"][-1] # Variance (gAUC_init)\n\n # Save file\n oxl.save(file_name)\n oxl.close()\n return \"CCF results saved to Excel.\"", "def export_helped_table(db):\r\n # Get current date.\r\n date = datetime.datetime.today().strftime('%Y-%m-%d')\r\n # Create directory and file.\r\n if not os.path.exists(backup_dir):\r\n os.makedirs(backup_dir)\r\n backup_file = backup_dir + \"backup_\" + date + \".xlsx\"\r\n # Create workbook and add worksheet.\r\n workbook = xlsxwriter.Workbook(backup_file)\r\n worksheet = workbook.add_worksheet()\r\n # Add bold format to highlight cells.\r\n bold = workbook.add_format({'bold': True})\r\n # Create data headers.\r\n worksheet.write('A1', 'Customer Number', bold)\r\n worksheet.write('B1', 'Name', bold)\r\n worksheet.write('C1', 'Username', bold)\r\n worksheet.write('D1', 'RU_ID', bold)\r\n worksheet.write('E1', 'OS_Platform', bold)\r\n worksheet.write('F1', 'Description', bold)\r\n # Get number of rows in table.\r\n c = db.cursor()\r\n c.execute(\"SELECT * FROM helped\")\r\n customers = c.fetchall()\r\n # Loop through the data and write it row by row.\r\n for row in range(0, len(customers)):\r\n for col in range(0, 6):\r\n worksheet.write((row + 1), col, customers[row][col])\r\n workbook.close()", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_tenure_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def generate_xlsx_report(self, workbook, data, parts_data):\n worksheet = workbook.add_worksheet(\"daily_parts_issuance_wizard\")\n worksheet.set_column(0, 0, 10)\n worksheet.set_column(1, 1, 15)\n worksheet.set_column(2, 2, 20)\n worksheet.set_column(3, 3, 15)\n worksheet.set_column(4, 4, 10)\n worksheet.set_column(5, 5, 12)\n worksheet.set_column(6, 6, 10)\n worksheet.set_column(7, 7, 10)\n worksheet.set_column(8, 8, 15)\n worksheet.set_column(9, 9, 10)\n worksheet.set_column(10, 10, 15)\n worksheet.set_column(11, 11, 10)\n worksheet.set_column(12, 12, 20)\n worksheet.set_column(13, 13, 5)\n worksheet.set_column(14, 14, 5)\n worksheet.set_column(15, 15, 5)\n\n bold = workbook.add_format(\n {\"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n tot = workbook.add_format(\n {\"border\": 2, \"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n border = workbook.add_format(\n {\"border\": 2, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n merge_format = workbook.add_format({\"border\": 2, \"align\": \"center\"})\n format1 = workbook.add_format(\n {\"border\": 2, \"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n format1.set_bg_color(\"gray\")\n date = workbook.add_format({\"num_format\": \"dd/mm/yy\"})\n\n worksheet.merge_range(\"C3:F3\", \"Merged Cells\", merge_format)\n\n row = 0\n row += 1\n row += 1\n worksheet.write(row, 2, \"DAILY PARTS ISSUANCE\", tot)\n row += 1\n worksheet.write(row, 2, \"Date From:\", tot)\n worksheet.write(row, 3, data[\"form\"][\"date_from\"] or \"\", border)\n worksheet.write(row, 4, \"To:\", tot)\n worksheet.write(row, 5, data[\"form\"][\"date_to\"] or \"\", border)\n row += 2\n worksheet.write(row, 0, \"CMF\", bold)\n row = 3\n\n for objec in self.get_work_order_detail(data[\"form\"]):\n row += 3\n worksheet.write(row, 0, \"DATE ISSUED :\", bold)\n worksheet.write(row, 1, objec.get(\"date\") or \"\", date)\n row += 2\n worksheet.write(row, 0, \"NO.\", format1)\n worksheet.write(row, 1, \"WO NO.\", format1)\n worksheet.write(row, 2, \"VEHICLE ID\", format1)\n worksheet.write(row, 3, \"PART NO.\", format1)\n worksheet.write(row, 4, \"PART NAME\", format1)\n worksheet.write(row, 5, \"VEHICLE MAKE\", format1)\n worksheet.write(row, 6, \"USED\", format1)\n worksheet.write(row, 7, \"UNIT TYPE\", format1)\n worksheet.write(row, 8, \"OLD PART RETURND\", format1)\n worksheet.write(row, 9, \"ISSUED BY\", format1)\n worksheet.write(row, 10, \"REMARKS\", format1)\n line_row = row + 1\n line_col = 0\n counter = 1\n for obj in objec.get(\"value\"):\n worksheet.write(line_row, line_col, counter, border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"wo_name\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"vehicle_id\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"part_no\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"part_name\") or \"\", border)\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.get(\"vehicle_make\") or \"\", border\n )\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"qty\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"uom\") or \"\", border)\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.get(\"old_part_return\") or \"\", border\n )\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"issued_by\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"remarks\") or \"\", border)\n line_col = 0\n line_row += 1\n counter += 1\n worksheet.write(line_row, line_col, \"********\", border)", "def export(self):\n rpt_date = datetime.now()\n filename = 'bushfire_regionbytenure_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export_all_to_excel(input_hdf5, out_directory_path):\n data_store = pd.HDFStore(input_hdf5) # Opening the HDF5 file\n for each_key in data_store.keys():\n data_store[each_key].to_excel(out_directory_path + each_key + \".xlsx\")\n # '/' missing between folder name and\n # file name because file name already includes it.\n data_store.close()\n\n print(\"-- Dataframes written to Excel files (.xlsx) --\")", "def export_to_excel(self, workbook, tailan_queryset):\n\t\t# workbook argumentdaa avna\n\t\tif tailan_queryset:\n\t\t\t#[row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start)\n\t\t\t\n\t\t\tworksheet = workbook.add_worksheet(u'Гүний худаг')\n\t\t\tqueryset = Hudag.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Hudag.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.gunii_hudags:\n\t\t\t\t\tqueryset = tailan.gunii_hudags.hudags.all()\n\t\t\t\t\t[row_write, col_write] = Hudag.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэршүүлэх байгууламж')\n\t\t\tqueryset = Ts_baiguulamj.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsevershuuleh:\n\t\t\t\t\tqueryset = tailan.tsevershuuleh.tsevershuuleh.all()\n\t\t\t\t\t[row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэрлэх байгууламж')\n\t\t\tqueryset = Ts_baiguulamj.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tseverleh:\n\t\t\t\t\tqueryset = tailan.tseverleh.tseverleh.all()\n\t\t\t\t\t[row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Усан сан')\n\t\t\tqueryset = UsanSan.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsanSan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.usansan:\n\t\t\t\t\tqueryset = tailan.usansan.usan_sans.all()\n\t\t\t\t\t[row_write, col_write] = UsanSan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны насос станц')\n\t\t\tqueryset = NasosStants.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsever_nasos_stants:\n\t\t\t\t\tqueryset = tailan.tsever_nasos_stants.nasos_stantss.all()\n\t\t\t\t\t[row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны насос станц')\n\t\t\tqueryset = NasosStants.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_nasos_stants:\n\t\t\t\t\tqueryset = tailan.bohir_nasos_stants.nasos_stantss.all()\n\t\t\t\t\t[row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Лаборатори')\n\t\t\tqueryset = Lab.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Lab.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.lab:\n\t\t\t\t\tqueryset = tailan.lab.labs.all()\n\t\t\t\t\t[row_write, col_write] = Lab.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны шугам')\n\t\t\tqueryset = Sh_suljee.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsever_usnii_shugam:\n\t\t\t\t\tqueryset = tailan.tsever_usnii_shugam.sh_suljees.all()\n\t\t\t\t\t[row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны шугам')\n\t\t\tqueryset = Sh_suljee.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_usnii_shugam:\n\t\t\t\t\tqueryset = tailan.bohir_usnii_shugam.sh_suljees.all()\n\t\t\t\t\t[row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'АХББ')\n\t\t\tqueryset = ABB.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = ABB.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.abb:\n\t\t\t\t\tqueryset = tailan.abb.abbs.all()\n\t\t\t\t\t[row_write, col_write] = ABB.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ус, дулаан дамжуулах төв')\n\t\t\tqueryset = UsDamjuulahBair.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsDamjuulahBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.us_damjuulah_tov:\n\t\t\t\t\tqueryset = tailan.us_damjuulah_tov.usDamjuulahBair.all()\n\t\t\t\t\t[row_write, col_write] = UsDamjuulahBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ус түгээх байр')\n\t\t\tqueryset = UsTugeehBair.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsTugeehBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.us_tugeeh:\n\t\t\t\t\tqueryset = tailan.us_tugeeh.us_tugeeh_bairs.all()\n\t\t\t\t\t[row_write, col_write] = UsTugeehBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны машин')\n\t\t\tqueryset = WaterCar.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = WaterCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.water_car:\n\t\t\t\t\tqueryset = tailan.water_car.water_cars.all()\n\t\t\t\t\t[row_write, col_write] = WaterCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны машин')\n\t\t\tqueryset = BohirCar.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = BohirCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_car:\n\t\t\t\t\tqueryset = tailan.bohir_car.bohir_cars.all()\n\t\t\t\t\t[row_write, col_write] = BohirCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ажилчдын судалгаа')\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ajiltan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.ajiltans:\n\t\t\t\t\tqueryset = tailan.ajiltans.ajiltans.all()\n\t\t\t\t\t[row_write, col_write] = Ajiltan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\t\n\t\telse:\n\t\t\tworksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')", "def to_excel(self, filename, keep_raw=True):\n writer = pd.ExcelWriter(filename)\n\n for group in self.core_groups:\n label = \"Core %s\" % group\n df = self.core_set(group)\n del (df[\"Core\"])\n df.to_excel(writer, sheet_name=label, index=False)\n\n if keep_raw:\n self.data.to_excel(writer, sheet_name=\"raw\", index=False)\n writer.close()", "def D_Base_to_Exel(self):\n# for item in sorted(self.dbase.keys()): # for every key/cell add to a dataFRAME\n# self.dataFRAME[item]=self.dbase[item]\n \n self.dataFRAME = self.Dbase_to_DF()\n writer = ExcelWriter(self.path+'/ALLwells'+ self.filetype) # assign a path for the file\n self.dataFRAME.to_excel(writer, 'Sheet1') # create a file in the same path the original files came from\n writer.save()", "def click_vendor_price_list_detail_dial_digits_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.vendor_price_list_detail_dial_digits_grid_div_id)", "def export_data(self):\r\n \r\n \r\n output_file = 'export.csv'\r\n data = self.get_raw_data()\r\n \r\n if data != []:\r\n print('Writing to file', output_file)\r\n with open(output_file, 'w',) as csvfile:\r\n fluorescence_levels = csv.writer(csvfile)\r\n fluorescence_levels.writerow(['sensor_1','Time'])\r\n for i in data:\r\n fluorescence_levels.writerow(i)\r\n print('done')\r\n \r\n else:\r\n print('no recorded data')", "def write_to_xls_file(self,xls_filename,sheet_name):\r\n rb = xlrd.open_workbook(xls_filename,formatting_info=True)\r\n workbook = copy(rb) #a writable copy (I can't read values out of this, only write to it)\r\n\r\n ''' get all sheetnames '''\r\n list_of_sheetnames = []\r\n list_of_sheetnames = rb.sheet_names()\r\n ''' make a set of sheetnames without duplication '''\r\n sheet_names = set(list_of_sheetnames)\r\n ''' verify if a given ticker existed or not '''\r\n if (sheet_name in sheetnames) == True:\r\n flag = True\r\n else:\r\n flag = False\r\n\r\n if flag == True:\r\n print \"The data sheet named \" + ticker_name + \" existed.\"\r\n else:\r\n print \"No data sheet named \" + ticker_name + \", created new\"\r\n w_sheet = workbook.add_sheet(ticker_name)\r\n w_sheet.write(0,0,'Eod_C_Action')\r\n w_sheet.write(0,1,'Eod_I_Version')\r\n w_sheet.write(0,2,'UsrId')\r\n w_sheet.write(0,3,'Eod_D_Creation')\r\n w_sheet.write(0,4,'Eod_D_Quote')\r\n w_sheet.write(0,5,'InsId')\r\n w_sheet.write(0,6,'Eod_I_ProviderId')\r\n w_sheet.write(0,7,'Eod_N_Open')\r\n w_sheet.write(0,8,'Eod_N_High')\r\n w_sheet.write(0,9,'Eod_N_Low')\r\n w_sheet.write(0,10,'Eod_N_Close')\r\n w_sheet.write(0,11,'Eod_I_Volume')\r\n \r\n for row_index in range(1,len(self.close)+1):\r\n w_sheet.write(row_index,0,'A')\r\n w_sheet.write(row_index,1,0)\r\n w_sheet.write(row_index,2,8)\r\n w_sheet.write(row_index,3,datetime.datetime.now().strftime('%Y-%m-%d'))\r\n w_sheet.write(row_index,4,self.date[row_index-1].strftime('%Y-%m-%d'))\r\n w_sheet.write(row_index,5,1)\r\n w_sheet.write(row_index,6,1)\r\n w_sheet.write(row_index,7,self.open_[row_index-1])\r\n w_sheet.write(row_index,8,self.high[row_index-1])\r\n w_sheet.write(row_index,9,self.low[row_index-1])\r\n w_sheet.write(row_index,10,self.close[row_index-1])\r\n w_sheet.write(row_index,11,self.volume[row_index-1])\r\n\r\n workbook.save(xls_filename)", "def export(self,**kwargs):\n \n # import pdb;pdb.set_trace()\n \n # provide for case where recs are set extenally\n if not self.recs:\n self.select_recs(**kwargs)\n if self.recs:\n if self.export_file_name:\n filename = self.export_file_name\n else:\n filename = \"{table_name}_report_{datetime}.csv\".format(\n table_name = self.table.display_name,\n datetime = date_to_string(local_datetime_now(),'iso_datetime'),\n ).replace(' ','_').lower()\n \n if not self.export_fields:\n # include all fields by default\n self.export_fields = self._set_default_list_fields(include_all=True).copy()\n\n self.set_list_fields(self.export_fields)\n \n \n if self.export_template:\n result = render_template(self.export_template, data=self)\n else:\n # add a descriptive title row\n if self.export_title:\n result = self.export_title.strip() + '\\n'\n else:\n result = \"Export of table {} as of {}\\n\".format(self.table.table_name,excel_date_and_time_string(local_datetime_now()))\n \n result += ','.join([x['label'] for x in self.export_fields]) + '\\n'\n for rec in self.recs:\n rec_row = []\n for field in self.export_fields:\n data = rec.__getattribute__(field['name'])\n if field['type'].upper() == \"DATE\":\n data = local_date_string(data)\n elif field['type'].upper() == \"DATETIME\":\n data = excel_date_and_time_string(data)\n else:\n # just text\n data = str(data).strip()\n \n # replace double quotes with double-double quotes\n data = data.replace('\"','\"\"') #double up on double quotes\n \n if \",\" in data:\n # if any commas, wrap in quotes\n data = '\"' + data + '\"'\n \n #replace returns\n data = data.replace('\\r\\n',' -crnl- ')\n data = data.replace('\\n',' -nl- ')\n data = data.replace('\\r',' -rtn- ')\n\n rec_row.append(data)\n \n result += ','.join([str(x) for x in rec_row]) + '\\n'\n \n return DataStreamer(result,filename,'text/csv').send()\n \n self.result_text = \"No records selected\"\n self.success = False\n \n flash(self.result_text)\n return self.list(**kwargs)", "def excel(df_ccl, df_arg_stocks, df_bonds, df_arg_stocks_ccl):\n if os.path.exists('CCL.xlsx'):\n wb = xw.Book('CCL.xlsx')\n # SHEET CEDEARS\n ws = wb.sheets('CCL CEDEARs')\n ws.range('A1').expand().value = df_ccl\n # SHEET MERVAL\n ws_merval = wb.sheets('Merval')\n ws_merval.range('A1').expand().value = df_arg_stocks\n # SHEET BONOS\n ws_bonds = wb.sheets('Bonos')\n ws_bonds.range('A1').expand().value = df_bonds\n # SHEET CCL MERVAL\n ws_ccl = wb.sheets('CCL ADRs')\n ws_ccl.range('A1').expand().value = df_arg_stocks_ccl\n\n tiempo = time.asctime()\n print('Carga exitosa de datos. Ultima ejecución: ',tiempo)", "def writeToExcel(self, filename = \"Interfaces.xlsx\", idx = None, prec = 4,\\\n verbose = 1):\n\n if idx is None:\n idx = np.arange(self.atoms.shape[0])\n elif type(idx) is int: \n idx = np.array([idx])\n else:\n idx = np.array(idx)\n \n\n dataDict = {\"Index\": idx, \"Original Rotation\": self.ang[idx],\\\n \"Length a\": np.round(self.getCellLengths(idx = idx, cell = 1)[:, 0], prec),\\\n \"Length b\": np.round(self.getCellLengths(idx = idx, cell = 1)[:, 1], prec),\\\n \"Angle a/b\": np.round(self.getBaseAngles(cell = 1)[idx], prec),\\\n \"Atoms\": self.atoms[idx],\\\n \"Area\": self.getAreas()[idx],\\\n \"Strain 11\": np.round(self.eps_11[idx], prec),\\\n \"Strain 22\": np.round(self.eps_22[idx], prec),\\\n \"Strain 12\": np.round(self.eps_12[idx], prec),\\\n \"Strain MAS\": np.round(self.eps_mas[idx], prec),\\\n \"Base 1 ax\": np.round(self.cell_1[idx, 0, 0], prec),\\\n \"Base 1 ay\": np.round(self.cell_1[idx, 1, 0], prec),\\\n \"Base 1 bx\": np.round(self.cell_1[idx, 0, 1], prec),\\\n \"Base 1 by\": np.round(self.cell_1[idx, 1, 1], prec),\\\n \"Base 2 ax\": np.round(self.cell_2[idx, 0, 0], prec),\\\n \"Base 2 ay\": np.round(self.cell_2[idx, 1, 0], prec),\\\n \"Base 2 bx\": np.round(self.cell_2[idx, 0, 1], prec),\\\n \"Base 2 by\": np.round(self.cell_2[idx, 1, 1], prec),\\\n \"Rep 1 ax\": np.round(self.rep_1[idx, 0, 0], prec),\\\n \"Rep 1 ay\": np.round(self.rep_1[idx, 1, 0], prec),\\\n \"Rep 1 bx\": np.round(self.rep_1[idx, 0, 1], prec),\\\n \"Rep 1 by\": np.round(self.rep_1[idx, 1, 1], prec),\\\n \"Rep 2 ax\": np.round(self.rep_2[idx, 0, 0], prec),\\\n \"Rep 2 ay\": np.round(self.rep_2[idx, 1, 0], prec),\\\n \"Rep 2 bx\": np.round(self.rep_2[idx, 0, 1], prec),\\\n \"Rep 2 by\": np.round(self.rep_2[idx, 1, 1], prec)}\n\n for i in range(self.e_int_c.shape[1]):\n key = \"E_int_c_T%i\" % (i)\n dataDict[key] = np.round(self.e_int_c[idx, i], prec)\n\n for i in range(self.w_sep_c.shape[1]):\n key = \"W_sep_c_T%i\" % (i)\n dataDict[key] = np.round(self.w_sep_c[idx, i], prec)\n\n for i in range(self.w_seps_c.shape[1]):\n key = \"W_seps_c_T%i\" % (i)\n dataDict[key] = np.round(self.w_seps_c[idx, i], prec)\n\n for i in range(self.e_int_d.shape[1]):\n key = \"E_int_d_T%i\" % (i)\n dataDict[key] = np.round(self.e_int_d[idx, i], prec)\n\n for i in range(self.w_sep_d.shape[1]):\n key = \"W_sep_d_T%i\" % (i)\n dataDict[key] = np.round(self.w_sep_d[idx, i], prec)\n\n for i in range(self.w_seps_d.shape[1]):\n key = \"W_seps_d_T%i\" % (i)\n dataDict[key] = np.round(self.w_seps_d[idx, i], prec)\n\n\n data = pd.DataFrame(dataDict)\n data.to_excel(filename)\n\n if verbose > 0:\n string = \"Data written to Excel file: %s\" % filename\n ut.infoPrint(string)", "def on_show_eqp_datasheet_export(self):\n from EqpDatasheetExportDialog import QEqpDatasheetExportDialog\n\n dlg = QEqpDatasheetExportDialog(self)\n dlg.exec_()", "def generate_service_odometer_xlsx_report(self, res, next_service):\n workbook = xlwt.Workbook()\n worksheet = workbook.add_sheet(\"next_service_by_odometer\")\n worksheet.col(0).width = 5000\n worksheet.col(1).width = 12500\n worksheet.col(2).width = 10000\n worksheet.col(3).width = 6000\n worksheet.col(4).width = 7500\n worksheet.col(5).width = 7500\n worksheet.col(6).width = 7500\n worksheet.col(7).width = 7500\n worksheet.col(8).width = 10000\n\n font = xlwt.Font()\n font.bold = True\n font.name = \"Arial\"\n font.height = 200\n # pattern = xlwt.Pattern()\n border = xlwt.easyxf(\"font: bold 1; font: name 1; font: height 200\")\n format1 = xlwt.easyxf(\n \"font: bold 1; font: name 1; font: height 200;\\\n pattern: pattern solid, fore_colour yellow;\"\n )\n xlwt.easyxf(\n \"font: bold 1; font: name 1; font: height 200\", num_format_str=\"DD/MM/YYYY\"\n )\n\n row = 0\n row += 1\n row += 1\n worksheet.write(row, 2, \"Scheduled Maintenance By Mileage\", format1)\n row += 3\n worksheet.write(row, 7, \"Date :\", format1)\n worksheet.write(row, 8, time.strftime(\"%d-%B-%Y\"), format1)\n row += 2\n worksheet.write(row, 0, \"NO.\", format1)\n worksheet.write(row, 1, \"VEHICLE ID\", format1)\n worksheet.write(row, 2, \"VIN NO.\", format1)\n worksheet.write(row, 3, \"MAKE\", format1)\n worksheet.write(row, 4, \"MODEL\", format1)\n worksheet.write(row, 5, \"LAST SERVICE DATE\", format1)\n worksheet.write(row, 6, \"LAST MILEAGE\", format1)\n worksheet.write(row, 7, \"NEXT MILEAGE\", format1)\n worksheet.write(row, 8, \"REGISTRATION STATE\", format1)\n line_row = row + 1\n line_col = 0\n counter = 1\n for obj in next_service:\n worksheet.write(line_row, line_col, counter, border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.name or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.vin_sn or \"\", border)\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.f_brand_id and obj.f_brand_id.name or \"\", border\n )\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.model_id and obj.model_id.name or \"\", border\n )\n line_col += 1\n date = \"\"\n if obj.last_service_date:\n date = format_date(\n self.env,\n obj.last_service_date,\n self._context.get(\"lang\"),\n date_format=False,\n )\n worksheet.write(line_row, line_col, date or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.odometer or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.due_odometer or \"\", border)\n line_col += 1\n # worksheet.write(line_row, line_col,\n # obj.vechical_location_id and\n # obj.vechical_location_id.name or '', border)\n line_col = 0\n line_row += 1\n counter += 1\n worksheet.write(line_row, line_col, \"********\", border)\n fp = io.BytesIO()\n workbook.save(fp)\n fp.seek(0)\n data = fp.read()\n fp.close()\n res = base64.encodebytes(data)\n return res", "def ListDataToExcel(listdata,filename):\n\n # file_backup=f = codecs.open(parent+info.QryPositionExchangeID+\"/\"+filename,'wb','utf-8')\n csvfile = file(filename.decode(\"utf-8\"), 'wb')\n csvfile.write(codecs.BOM_UTF8)\n writer=csv.writer(csvfile)\n writer.writerows(listdata)\n csvfile.close()\n df_new = pd.read_csv(filename, encoding='utf-8')\n writer = pd.ExcelWriter(filename.replace(\".csv\",\".xlsx\"))\n df_new.to_excel(writer, index=False)\n writer.save()\n os.remove(filename)", "def click_vendor_price_list_detail_rates_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.vendor_price_list_detail_rates_grid_div_id)", "def toExcel(self, outFileName):\n workbook = Workbook(outFileName, {'constant_memory': True})\n workbook.use_zip64() # allow large size Excels just in case\n\n wks = workbook.add_worksheet('Distribution Fitting')\n hdrFmt = workbook.add_format({'bold' : True,\n 'underline' : True,\n 'align' : 'center'})\n resultFormats = [workbook.add_format({'num_format' : fmtStr}) \\\n for fmtStr in ['0.000000', '0.0000%']]\n\n row = 0\n wks.set_column(0, 0, 11)\n wks.set_column(1, 1, 8, resultFormats[0])\n wks.set_column(2, 2, 10.6, resultFormats[1])\n for col, headerName in enumerate(self.getHeaderList()):\n wks.write_string(row, col, headerName, hdrFmt)\n\n for distrName, (results, params) in self.result.iteritems():\n row += 1\n col = 0\n wks.write_string(row, col, distrName)\n for col, (result, outFormat) in \\\n enumerate(itertools.izip(results, resultFormats), col+1):\n wks.write_number(row, col, result, outFormat)\n for col, paramValue in enumerate(params, col+1):\n wks.write_number(row, col, paramValue)\n\n workbook.close()", "def create_output_file(self):\r\n self.output_file = openpyxl.Workbook()", "def convert_sheet(filename, output):\n r2dt.write_converted_sheet(filename, output)", "def to_xls(self,ws,start_row = 0,start_col = 0,width_ratio = 1): \n if self.col_width_dict: \n for c in range(self.no_of_columns()):\n ws.col(start_col+c).width = int(35*self.col_width(c)*width_ratio); \n \n boldstyle = xlwt.XFStyle()\n boldstyle.font.bold = True\n \n for r in range(self.no_of_rows()):\n for c in range(self.no_of_columns()):\n if r == 0:\n ws.write(start_row + r,start_col + c,self.cell(r,c),boldstyle)\n else:\n ws.write(start_row + r,start_col + c,self.cell(r,c))", "def export_ho_dan_as_excel_action(fields=None, exclude=None, header=True):\n def export_as_excel(modeladmin, request, queryset):\n opts = modeladmin.model._meta\n field_names = [\"name\", \"status\", \"location\", \"tinh\",\n \"xa\", \"huyen\", \"phone\", \"cuuho\", \"update_time\", \"note\"]\n display_names = [\"Tên hộ dân\", \"Tình trạng\", \"Vị trí\", \"Tỉnh\", \"Xã\",\n \"Huyện\", \"Sdt\", \"hỗ trợ\", \"Thời gian cuối cùng cập nhật\", \"Ghi chú\"]\n file_name = \"Danh_sach_ho_dan\"\n\n output = io.BytesIO()\n\n workbook = xlsxwriter.Workbook(output, {'in_memory': True})\n worksheet = workbook.add_worksheet()\n row = 0\n if header:\n write_a_row(worksheet, row, display_names)\n row += 1\n for obj in queryset:\n arr = []\n for field in field_names:\n if field == \"status\" and obj.status:\n arr.append(obj.status.name)\n elif field == \"update_time\":\n utc_time = getattr(obj, field)\n local_datetime = utc_to_local(utc_time)\n arr.append(local_datetime.strftime(\"%d/%m/%Y %H:%M:%S\"))\n else:\n arr.append(str(getattr(obj, field) or \"\"))\n write_a_row(worksheet, row, arr)\n row += 1\n\n workbook.close()\n\n output.seek(0)\n\n response = HttpResponse(output.read(\n ), content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\")\n response['Content-Disposition'] = f\"attachment; filename={file_name}.xlsx\"\n\n output.close()\n\n return response\n\n export_as_excel.short_description = \"Xuất file excel\"\n return export_as_excel", "def dataframe_to_excel(df, sheet_title, project_constants_lst, \n current_date=str(date.today()), force_flag = False, freeze_column='A'):\n \n project_steps_df, max_title, _, report_requisites_sr, *_ = project_constants_lst\n report_type, export_flag, df_decription = project_steps_df.loc[sheet_title, ['report_type', 'export_to_excel', 'description']].values\n \n # check DataFrame report type to save\n if report_type == 'report':\n report_mark = report_requisites_sr['project_title'] + '_tables'\n else:\n report_mark = report_type\n \n # construct excel filename\n file_name = report_requisites_sr['customer_name'] + '_' + report_mark + '_' + current_date + '.xlsx'\n\n # information string\n info = f'Exporting {sheet_title} table to {report_mark} file'\n print(info, end =\" \")\n file_path = os.path.join(report_requisites_sr['today_report_folder'], file_name)\n \n # save DataFrame to excel file if export_to_excel trigger is ON\n # and DataFrame is not empty\n if (force_flag or export_flag) and not df.empty:\n fsop.create_folder(report_requisites_sr['today_report_folder'], max_title, display_status=False)\n file_mode = 'a' if os.path.isfile(file_path) else 'w'\n df = df.apply(pd.to_numeric, errors='ignore')\n try:\n if_sheet_exists_param = 'replace' if file_mode == 'a' else None\n content_df, item_exist = generate_table_of_contents(file_path, file_mode, sheet_title, df_decription)\n df_flat = drop_multindex(df)\n # write table of contents and data dataframe to the excel file\n with pd.ExcelWriter(file_path, mode=file_mode, if_sheet_exists=if_sheet_exists_param, engine='openpyxl') as writer:\n if file_mode == 'w' or not item_exist:\n content_df.to_excel(writer, sheet_name='Содержание', index=False)\n df_flat.to_excel(writer, sheet_name=sheet_title, startrow=2, index=False)\n # format table of contents and data worksheets\n workbook = openpyxl.load_workbook(file_path)\n format_workbook(workbook, sheet_title, df_decription, freeze_column)\n workbook.save(file_path)\n except PermissionError:\n status_info('fail', max_title, len(info))\n print('\\nPermission denied. Close the file.\\n')\n sys.exit()\n else:\n status_info('ok', max_title, len(info))\n return file_path \n else:\n # if save key is on but DataFrame empty\n if project_steps_df.loc[sheet_title, 'export_to_excel'] and df.empty:\n status_info('no data', max_title, len(info))\n else: \n status_info('skip', max_title, len(info))\n return None", "def export_rep(name):\r\n attendance_list = read_rep()\r\n try:\r\n with open(name + '.csv', 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n # makes table in Excel by employee and attendance dates\r\n writer.writerow([\"Employee\", \"Attendance\"])\r\n for worker in attendance_list:\r\n count = 0\r\n for date in worker[1]:\r\n if not count:\r\n # first date needs to add name of worker\r\n writer.writerow([worker[0], date])\r\n count += 1\r\n # write only date\r\n else:\r\n writer.writerow(['', date])\r\n print(\"csv file made\")\r\n return attendance_list\r\n except PermissionError:\r\n print(\"file is opened, please close and try again\")\r\n return attendance_list", "def export_table (self,_w):\n try:\n _data = \"\"\n maxRow = _w.rowCount()\n maxColumn = _w.columnCount()\n for hc in range(0,maxColumn):\n try: _hci = str(_w.horizontalHeaderItem(hc).text())\n except:_hci=\"None\";pass\n if hc == (maxColumn-1) :_data += _hci\n elif hc < maxColumn:_data += \"%s,\" % _hci\n _data += \"\\n\"\n for r in range(0, maxRow):\n for c in range(0, maxColumn):\n _d = str(_w.item(r, c).text())\n if c == (maxColumn-1):_data += _d\n elif c < maxColumn:_data += \"%s,\" % _d\n _data += \"\\n\"\n options = QFileDialog.Options()\n saved_file, _ = QFileDialog.getSaveFileName(self, \"Save Table to file \", \"data\", \"Plain Text (*.txt);;CSV (*.csv);;All Files (*)\", options=options)\n _file = open(saved_file, 'w')\n _file.write(_data)\n _file.close()\n except FileNotFoundError:pass", "def career_teachers_excel(self, request):\n\n # Get the career to be processed their results.\n career_id = request.GET.get('career_id', '')\n career = EvaluationsCareer.objects.get(pk__exact=career_id)\n\n # Get the results for each esignature of the carrer en each exam.\n data = self.get_career_results(career)\n\n # Generates the CSV with the results of the career,then return as downloadable file.\n response = self.get_teacher_results_excel(data)\n return response", "def generate_spreadsheet(request, id):\n election = get_object_or_404(Election, pk=id)\n response = render_to_response(\"django_elect/spreadsheet.html\", {\n 'full_stats': election.get_full_statistics(),\n })\n filename = \"election%s.xls\" % (election.pk)\n response['Content-Disposition'] = 'attachment; filename='+filename\n response['Content-Type'] = 'application/vnd.ms-excel; charset=utf-8'\n return response", "def test_export_spreadsheet(self):\r\n client = self.getClient()\r\n if client:\r\n exp = [['#SampleID', 'DOB'],\r\n ['#Example mapping file for the QIIME analysis package. '\r\n 'These 9 samples are from a study of the effects of exercise '\r\n 'and diet on mouse cardiac physiology (Crawford, et al, '\r\n 'PNAS, 2009).'], ['PC.354', '20061218'],\r\n ['PC.355', '20061218'], ['PC.356', '20061126'],\r\n ['PC.481', '20070314'], ['PC.593', '20071210'],\r\n ['PC.607', '20071112'], ['PC.634', '20080116'],\r\n ['PC.635', '20080116'], ['PC.636', '20080116']]\r\n obs = _export_spreadsheet(client, self.spreadsheet_key,\r\n self.worksheet_id, ['#SampleID', 'DOB'])\r\n self.assertEqual(obs, exp)\r\n else:\r\n raise GoogleSpreadsheetConnectionError(\"Cannot execute test \"\r\n \"without an active Internet connection.\")", "def ortra_export(request):\n export_fields = OrderedDict(ORTRA_EXPORT_FIELDS)\n export = OpenXMLExport('Exportation')\n export.write_line(export_fields.keys(), bold=True) # Headers\n # Data\n query_keys = [f for f in export_fields.values() if f is not None]\n query = Student.objects.filter(Q(klass__name__contains='ASAFE') |\n Q(klass__name__contains='ASEFE') |\n Q(klass__name__contains='ASSCFE'),\n archived=False).order_by('klass__name',\n 'last_name',\n 'first_name')\n\n for line in query.values(*query_keys):\n values = []\n for field in query_keys:\n if field == 'gender':\n values.append(('Madame', 'Monsieur')[line[field] == 'M'])\n else:\n values.append(line[field])\n export.write_line(values)\n\n return export.get_http_response('ortra_export')", "def save_EGRID( self , filename , output_unit = EclUnitTypeEnum.ERT_ECL_METRIC_UNITS):\n self._fwrite_EGRID2( filename, output_unit )", "def save_dataframe_to_excel(df,path,filename):\n \n path_and_file_name = path.joinpath('output',filename)\n df.to_csv(path_or_buf = path_and_file_name, sep=';',index=False)", "def write_dataframe_to_excel(d, name, path=''):\n name = name if len(name.split()) == 1 else name\n filepath = join(path, name)\n d.to_excel(filepath)", "def saveAll(self):\r\n path = saveFile(ftype='xlsx')\r\n writer = pd.ExcelWriter(path)\r\n df = pd.DataFrame(self.saveAll)\r\n df.to_excel(writer, header=False, index=False)\r\n writer.save()\r\n \r\n #Format the excel file\r\n try:\r\n import openpyxl\r\n from openpyxl.styles import Alignment, Font, Border, Side\r\n #Load the workbook and worksheet\r\n wb = openpyxl.load_workbook(filename=path)\r\n ws = wb.get_sheet_by_name(\"Sheet1\")\r\n cells = ['E1','H1','K1','N1','Q1','T1','W1','Z1']\r\n ws.merge_cells('E1:G1')\r\n ws.merge_cells('H1:J1')\r\n ws.merge_cells('K1:M1')\r\n ws.merge_cells('N1:P1')\r\n ws.merge_cells('Q1:S1')\r\n ws.merge_cells('T1:V1')\r\n ws.merge_cells('W1:Y1')\r\n ws.merge_cells('Z1:AB1')\r\n #Bold and center the headers\r\n ft = Font(bold=True)\r\n for cell in cells:\r\n ws[cell].alignment = Alignment(horizontal=\"center\")\r\n ws[cell].font = ft\r\n #Add borders\r\n rows,_ = self.saveAll.shape\r\n for i in range(rows):\r\n for cell in cells:\r\n c = cell[0]+str(i+1)\r\n ws[c].border = Border(left=Side(style='thin'))\r\n\r\n \r\n \r\n wb.save(path)\r\n \r\n except ImportError:\r\n pass", "def export_data(self):\n return self.export_all_data()", "def to_xlsx(self, filename):\n # create path if it does not exist\n suffix = filename.split(\".\")[-1]\n if not suffix == \"xlsx\":\n filename = filename + \".xlsx\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n writer = pd.ExcelWriter(filename)\n for name, df in sorted(self.input_data.items()):\n df.to_excel(writer, name)\n writer.save()\n logging.info(\"Scenario saved as excel file to %s\", filename)", "def writeResult(outfilename, blocks, data_per_group):\n allfile = open(\"\".join([\"res_\", outfilename, \".xls\"]), \"w\")\n meandevfile = open(\"\".join([\"res_\", outfilename, \"_meandev.xls\"]), \"w\")\n\n # write titles\n i = 0\n for letter in range(len(blocks[0])/12):\n for number in range(12):\n i += 1\n allfile.write(\"\".join([chr(ord('A') + letter), str(number+1), '\\t']))\n if (i % data_per_group == 0):\n allfile.write(\"\\t\")\n if ((i-1) % data_per_group == 0):\n meandevfile.write(\"\".join([chr(ord('A') + letter), str(number+1), '\\t']))\n else:\n meandevfile.write(\"\\t\")\n allfile.write(\"\\n\")\n meandevfile.write(\"\\n\")\n\n\n for block in blocks:\n i = 0\n thl = []\n for n in block:\n thl.append(n)\n allfile.write(\"%f\\t\" % n)\n i += 1\n if (i % data_per_group == 0):\n allfile.write(\"\\t\")\n m, d = meandev(thl)\n meandevfile.write(\"%f\\t\" % m)\n meandevfile.write(\"%f\" % d)\n meandevfile.write(\"\\t\\t\")\n thl = []\n allfile.write(\"\\n\")\n meandevfile.write(\"\\n\")", "def modsecSaveXLSX(modsecDict, outputXLSXFileName, outputWithGraphs):\r\n modsec_header_xlsx = ['transaction_id', 'event_time', 'remote_address', 'request_host',\r\n 'request_useragent','request_line', 'request_line_method', 'request_line_url', 'request_line_protocol',\r\n 'response_protocol', 'response_status',\r\n 'action','action_phase', 'action_message',\r\n 'message_type', 'message_description', 'message_rule_id', 'message_rule_file',\r\n 'message_msg', 'message_severity', 'message_accuracy', 'message_maturity', 'full_message_line'\r\n ]\r\n wb = openpyxl.Workbook()\r\n ws1 = wb.active\r\n ws1.title = 'Modsec_entries'\r\n ws1.append(modsec_header_xlsx)\r\n\r\n for entry_mod in modsecDict:\r\n try:\r\n transaction_id = entry_mod['transaction']['transaction_id']\r\n event_time = entry_mod['transaction']['time']\r\n remote_address = entry_mod['transaction']['remote_address']\r\n request_line = entry_mod['request']['request_line']\r\n request_line_method, request_line_url, request_line_protocol = get_params(string_in=request_line, defaultmissing='-', params_to_get=3)\r\n request_headers_useragent = safedictkey(entry_mod, ['request','headers','User-Agent'], '-')\r\n request_headers_host = safedictkey(entry_mod, ['request','headers','Host'], '-')\r\n response_protocol = safedictkey(entry_mod, ['response', 'protocol'], '-')\r\n response_status = safedictkey(entry_mod, ['response','status'], '-')\r\n audit_data_producer = safedictkey(entry_mod, ['audit_data','producer'], '-')\r\n audit_data_server = safedictkey(entry_mod, ['audit_data', 'server'], '-')\r\n audit_data_enginemode = safedictkey(entry_mod, ['audit_data','Engine-Mode'], '-')\r\n audit_data_action_intercepted = 'intercepted' if (safedictkey(entry_mod, ['audit_data','action','intercepted'], '-') == True) else '-'\r\n audit_data_action_message = safedictkey(entry_mod, ['audit_data','action','message'], '-')\r\n audit_data_action_phase = safedictkey(entry_mod, ['audit_data','action','phase'], '-')\r\n\r\n if ('messages' in entry_mod['audit_data']) and (len(entry_mod['audit_data']) > 0):\r\n if len(entry_mod['audit_data']['messages']) > 1:\r\n audit_data_message_type = 'multiple'\r\n else:\r\n audit_data_message_type = 'single'\r\n for each in entry_mod['audit_data']['messages']:\r\n audit_data_message_message = regular_expression_evaluate(each, modsec_message_message_pattern)\r\n audit_data_message_file = regular_expression_evaluate(each, modsec_message_file_pattern, to_split=True, to_split_value='/', to_split_column=-1)\r\n audit_data_message_id = regular_expression_evaluate(each, modsec_message_id_pattern)\r\n audit_data_message_msg = regular_expression_evaluate(each, modsec_message_msg_pattern)\r\n audit_data_message_severity = regular_expression_evaluate(each, modsec_message_severity_pattern)\r\n audit_data_message_maturity = regular_expression_evaluate(each, modsec_message_maturity_pattern)\r\n audit_data_message_accuracy = regular_expression_evaluate(each, modsec_message_accuracy_pattern)\r\n #audit_data_message_tags = [] # TAGS not in use currently\r\n ws1.append([transaction_id, event_time, remote_address, request_headers_host, request_headers_useragent,\r\n request_line, request_line_method, request_line_url, request_line_protocol,\r\n response_protocol, response_status,\r\n audit_data_action_intercepted, audit_data_action_phase, audit_data_action_message,\r\n audit_data_message_type,audit_data_message_message, audit_data_message_id, audit_data_message_file,\r\n audit_data_message_msg, audit_data_message_severity, audit_data_message_accuracy, audit_data_message_maturity,\r\n each\r\n ])\r\n else:\r\n audit_data_message_type = 'None'\r\n each = 'None'\r\n #print('M error - message not found for transaction_id :', transaction_id)\r\n audit_data_message_message = audit_data_message_file = audit_data_message_id = audit_data_message_msg = \\\r\n audit_data_message_severity = audit_data_message_maturity = audit_data_message_accuracy = '-'\r\n ws1.append([transaction_id, event_time, remote_address, request_headers_host, request_headers_useragent,\r\n request_line, request_line_method, request_line_url, request_line_protocol,\r\n response_protocol, response_status,\r\n audit_data_action_intercepted, audit_data_action_phase, audit_data_action_message,\r\n audit_data_message_type, audit_data_message_message, audit_data_message_id, audit_data_message_file,\r\n audit_data_message_msg, audit_data_message_severity, audit_data_message_accuracy, audit_data_message_maturity,\r\n each\r\n ])\r\n except Exception as e:\r\n print('Exception at modsecSaveXLSX() :', e , ' , transaction_id :', transaction_id)\r\n\r\n if not 'error' in outputWithGraphs:\r\n img = openpyxl.drawing.image.Image(outputWithGraphs)\r\n ws2 = wb.create_sheet('Graphs')\r\n ws2.add_image(img)\r\n\r\n try:\r\n if not os.path.isdir(fileBaseOutputDir):\r\n os.mkdir(fileBaseOutputDir)\r\n fOut = os.path.join(fileBaseOutputDir, outputXLSXFileName)\r\n wb.save(filename=fOut)\r\n except Exception as e:\r\n print('modsecSaveXLSX() has thrown exception: %s', e)\r\n\r\n pass", "def click_buy_and_sell_management_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.buy_and_sell_management_grid_div_id)", "def write2file(self, save_to):\n headerstyle = xlwt.easyxf(self.header_style.get_style_string())\n missing_val_style = xlwt.easyxf(\n self.missing_value_style.get_style_string())\n row_styles = [xlwt.easyxf(self.first_style.get_style_string()),\n xlwt.easyxf(self.second_style.get_style_string())]\n\n properties, sections, table = self._build_table()\n\n workbook = xlwt.Workbook()\n sheet = workbook.add_sheet(self.sheet_name)\n\n if os.path.splitext(save_to)[-1] == '':\n save_to += '.xls'\n\n max_col_len = []\n\n if (self.switch):\n\n for i, prop in enumerate([''] + properties):\n sheet.write(0, i, prop, headerstyle)\n max_col_len.append(len(str(prop)))\n\n for row_num, sec in enumerate(sections):\n sheet.write(row_num + 1, 0, sec, headerstyle)\n if len(str(sec)) > max_col_len[0]:\n max_col_len[0] = len(str(sec))\n\n for row_num, row in enumerate(table):\n for col_num, elem in enumerate(row):\n\n if elem is None:\n style = missing_val_style\n cell_content = \"\"\n else:\n style = row_styles[row_num % 2]\n cell_content = elem\n\n if isinstance(cell_content, datetime.datetime):\n style.num_format_str = \"DD-MM-YYYY HH:MM:SS\"\n elif isinstance(cell_content, datetime.date):\n style.num_format_str = \"DD-MM-YYYY\"\n elif isinstance(cell_content, datetime.time):\n style.num_format_str = \"HH:MM:SS\"\n else:\n style.num_format_str = \"\"\n\n sheet.write(row_num + 1, col_num + 1, cell_content, style)\n if len(str(cell_content)) > max_col_len[col_num+1]:\n max_col_len[col_num+1] = len(str(cell_content))\n\n else:\n\n for i, sec in enumerate([''] + sections):\n sheet.write(0, i, sec, headerstyle)\n max_col_len.append(len(str(sec)))\n\n for row_num, prop in enumerate(properties):\n sheet.write(row_num + 1, 0, prop, headerstyle)\n if len(str(prop)) > max_col_len[0]:\n max_col_len[0] = len(str(prop))\n\n for col_num, col in enumerate(table):\n for row_num, elem in enumerate(col):\n\n if elem is None:\n style = missing_val_style\n cell_content = \"\"\n else:\n style = row_styles[row_num % 2]\n cell_content = elem\n\n if isinstance(cell_content, datetime.datetime):\n style.num_format_str = \"DD-MM-YYYY HH:MM:SS\"\n elif isinstance(cell_content, datetime.date):\n style.num_format_str = \"DD-MM-YYYY\"\n elif isinstance(cell_content, datetime.time):\n style.num_format_str = \"HH:MM:SS\"\n else:\n style.num_format_str = \"\"\n\n sheet.write(row_num + 1, col_num + 1, cell_content, style)\n if len(str(cell_content)) > max_col_len[col_num+1]:\n max_col_len[col_num+1] = len(str(cell_content))\n\n # adjust width of he columns\n for col_id, col_len in enumerate(max_col_len):\n sheet.col(col_id).width = (256 * (col_len+1))\n\n workbook.save(save_to)", "def export(self, desc):\n self.training_data.fillup_x()\n self.training_data.fillup_a()\n self.training_data.fillup_y()\n self.training_data.export(desc + \"_train.csv\")\n \n self.testing_data.fillup_x()\n self.testing_data.export(desc + \"_test_X.csv\")\n \n self.testing_data.reset_df()\n self.testing_data.fillup_ys()\n self.testing_data.fillup_azero()\n self.testing_data.export(desc + \"_test_Ys.csv\")", "def click_vendor_price_list_detail_reference_rates_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.vendor_price_list_detail_reference_rates_grid_div_id)", "def export_16(text_col, processed_col, input_filepath,\n output_filepath, country):\n processed_list_8 = process_text(text_col, processed_col, input_filepath)\n processed_list_16 = []\n for name in processed_list_8:\n name, _ = split_half(name)\n processed_list_16.append(name)\n processed_list_16.append(_)\n\n for i in range(len(processed_list_16)):\n processed_list_16[i].to_excel(output_filepath +\n country + '_processed_' +\n str(i+1) + '.xlsx',\n index=False)\n return True", "def convert_to_an_excel_sheet(app, trsfrm_no, sfilname, srcfil_delim_char, dest_fname, temp_fname):\n\n global progress\n\n # #delete files found in download directory\n # for dirpath, dirname, files in os.walk(app.config[\"DOWNLOAD_FOLDER\"]):\n # print(dirpath, dirname, files)\n # for filename in files:\n # try:\n # os.remove(os.path.join(dirpath, filename))\n # except Exception as e:\n # print(str(e))\n\n dest_file = os.path.join(app.config[\"DOWNLOAD_FOLDER\"], dest_fname)\n dest_wb = Workbook(dest_file, {'strings_to_numbers': True, 'constant_memory': True})\n sheet_name = f\"file1\"\n dest_ws = dest_wb.add_worksheet(name=sheet_name)\n\n src_file = os.path.join(app.config[\"UPLOAD_FOLDER\"], sfilname)\n\n with open(src_file, mode=\"r\") as filhdlr:\n for idx, _ in enumerate(filhdlr):\n pass\n\n total_rows = idx + 1\n\n percent_1 = False\n percent_5 = False\n percent_10 = False\n percent_20 = False\n percent_30 = False\n percent_40 = False\n percent_50 = False\n percent_60 = False\n percent_70 = False\n percent_80 = False\n percent_90 = False\n percent_100 = False\n\n with open(src_file, mode=\"r\") as filhdlr:\n csvReader = csv.reader(filhdlr, delimiter=srcfil_delim_char)\n for idx1, row in enumerate(csvReader):\n\n percent_1, percent_5, percent_10, percent_20, percent_30, percent_40, percent_50, \\\n percent_60, percent_70, percent_80, percent_90, percent_100 = determine_progress_value(idx1, total_rows,\n percent_1, percent_5,\n percent_10,\n percent_20,\n percent_30,\n percent_40,\n percent_50, \\\n percent_60,\n percent_70,\n percent_80,\n percent_90,\n percent_100)\n\n for idx2, value in enumerate(row):\n dest_ws.write(idx1, idx2, value)\n\n dest_wb.close()\n\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 100\n\n # delete the uploaded file\n try:\n os.remove(os.path.join(app.config[\"UPLOAD_FOLDER\"], sfilname))\n except Exception as e:\n print(str(e))\n\n return", "def to_excel(self, filename, recommended_only=False, include_io=True):\n df = self.to_df(recommended_only, include_io)\n if isinstance(filename, str):\n filename = os.path.expanduser(filename)\n df.to_excel(filename, index=False)", "def export(self, exdata = True, exlights = True, exaovs = True, exshaders = True, exmaster = True):\n\t\tif exdata:\n\t\t\tself.exportData()\n\t\tif exshaders:\n\t\t\tself.exportShaders()\n\t\tif exlights:\n\t\t\tself.exportLights()\n\t\tif exaovs:\n\t\t\tself.exportAovs()\n\t\tif exmaster:\n\t\t\tself.exportMasterLayerSettings()", "def output_1cell(self, filename):\n\n date_concat = \"{0} to {1}\".format(self.startDate, self.endDate)\n if active_restaurant_loop:\n column_name = [\"range\", \"start_date\",\n \"end_date\", \"location_id\", \"content\"]\n data = [date_concat, self.startDate, self.endDate, str(\n self.payload[\"locationGroupID\"]), \"{0}\".format(self.content)]\n data_out = [column_name, data]\n else:\n column_name = [\"range\", \"start_date\", \"end_date\", \"content\"]\n data = [date_concat, self.startDate,\n self.endDate, \"{0}\".format(self.content)]\n data_out = [column_name, data]\n\n # If active restaurant loop is true\n if not os.path.isfile(filename):\n with open(filename, \"w\") as f:\n writer = csv.writer(f)\n #writer.writerow([\"range\", \"start_date\", \"end_date\", \"content\"])\n #writer.writerow([date_concat, start_date, end_date, \"{0}\".format(self.content)])\n writer.writerows(data_out)\n # f.write([\"content\"])\n # f.write([\"{0}\"].format(self.content))\n f.close()\n else:\n with open(filename, \"a\") as f:\n writer = csv.writer(f)\n writer.writerows([data])\n f.close()\n\n logging.info(\"Outputting... \")\n self.produce_manifest(filename)", "def driver():\n\n directory = r\"C:/Users/Aftab Alam/Documents/GitHub\"\n directory = directory + r\"/SRM-placement-analyser/data/\"\n fileList = [directory+\"InfosysResult.xlsx\",directory+\"TCSResult.xlsx\",directory+\"CognizantResult.xlsx\",directory+\"WiproResult.xlsx\"]\n \n listOfPlaced = extractCommonData.extractCommonData(fileList)\n createNewExcelSheet(directory,listOfPlaced)", "def save_feedback_xlsx(\n df_summary: pd.DataFrame,\n df_city_no_zip: pd.DataFrame,\n df_zip_no_city: pd.DataFrame,\n df_zipCity_no_address: pd.DataFrame,\n df_address_no_zipCity: pd.DataFrame,\n df_no_address_at_all: pd.DataFrame,\n df_invalid_matrices: pd.DataFrame,\n df_employees: pd.DataFrame,\n path: str,\n):\n full_path = os.path.join(\n path,\n f\"feedback_{dt.datetime.strftime(dt.datetime.now(), '%Y-%m-%d-%H-%M-%S')}.xlsx\",\n )\n writer = pd.ExcelWriter(full_path, engine=\"xlsxwriter\")\n df_summary.to_excel(writer, sheet_name=\"SUMMARY\", index=False)\n df_invalid_matrices.to_excel(writer, sheet_name=\"invalid_matrices\", index=False)\n df_address_no_zipCity.to_excel(writer, sheet_name=\"address_no_zipCity\", index=False)\n df_no_address_at_all.to_excel(writer, sheet_name=\"no_address_at_all\", index=False)\n df_zipCity_no_address.to_excel(writer, sheet_name=\"zipCity_no_address\", index=False)\n df_zip_no_city.to_excel(writer, sheet_name=\"zip_no_city\", index=False)\n df_city_no_zip.to_excel(writer, sheet_name=\"city_no_zip\", index=False)\n df_employees.to_excel(writer, sheet_name=\"employees\", index=False)\n\n for sheet in writer.sheets.values():\n sheet.set_column(\"A:E\", 35)\n\n writer.save()", "def createcsv(fileName):\n fileName = os.path.join(pathtofolder(), 'datas', fileName)\n fileFormat = '.csv'\n file = f'{fileName + fileFormat}'\n\n csvKeys = [\"product_page_url\", \"universal_product_code\", \"title\",\n \"price_including_tax\", \"price_excluding_tax\", \"number_available\",\n \"product_description\", \"category\", \"review_rating\", \"image_url\"]\n\n addon = excelExport('excel')\n\n with open(file, 'w', newline=\"\", encoding='utf-8') as csvFile:\n csvFile.write(addon) # Define the separator as <\">.\n resultWriter = csv.writer(csvFile, delimiter = '|', dialect = \"excel\")\n resultWriter.writerow(csvKeys)\n pass", "def write_data(qids, conditions, outputs, data_path):\n data_set = pd.DataFrame(list(zip(qids, conditions, outputs)),\n columns=[\"QID\", \"CONDITION\", \"OUTPUT\"])\n data_set.to_excel(data_path)", "def export_data(fp, app_name):\n from otree.views.admin import get_display_table_rows\n colnames, rows = get_display_table_rows(\n app_name, for_export=True, subsession_pk=None)\n colnames = ['{}.{}'.format(k, v) for k, v in colnames]\n writer = csv.writer(fp)\n writer.writerows([colnames])\n writer.writerows(rows)", "def get_excel(self, file_name):\n global download_component\n\n download_soup = BeautifulSoup(self.res.text, 'lxml')\n download_component = get_download_component(download_soup)\n\n #Start excel session\n xsess = requests.Session()\n xsess.headers = EXCEL_HEADERS\n \n #prepare excel session\n self.data['SAPEVENTQUEUE'] = \"Button_Press~E002Id~E004\" + \\\n download_component + \"~E003~E002ResponseData~E004delta~E005ClientAction~E004submit~E003~E002~E003\"\n self.res = self.sess.post(self.url, data=self.data)\n\n #parse data from prepared excel session\n fileid, action = get_excel_url(BeautifulSoup(self.res.text,'lxml-xml')) \n \n #replace\n xurl = HOST_URL + action\n xurl = xurl.replace(\"\\\\x2f\",\"/\")\n xurl = xurl.replace(\"\\\\x7e\",\"~\")\n xurl = xurl.replace(\"\\\\x3f\", \"?\")\n xurl = xurl.replace(\"\\\\x2d\",\"-\")\n xurl = xurl.replace(\"\\\\x3d\",\"=\")\n xurl = xurl.replace(\"\\\\x253a\",\":\")\n xurl = xurl.replace(\"\\\\x26\",\"&\")\n xres = xsess.post(xurl)\n \n #write file\n with open(file_name,'wb') as f:\n f.write(xres.content)", "def export_event_design(self):\n try:\n self.sa.export_event_design()\n QMessageBox.information(self,\n 'info',\n '导出已完成,请查看当前文件夹下文件 export_event_design.xlsx',\n QMessageBox.Yes)\n except Exception as e:\n QMessageBox.warning(self,\n \"error\",\n str(e),\n QMessageBox.Yes)", "def excel_out(employees_dict, path):\n # Create workbook and worksheet\n try:\n workbook = xlsxwriter.Workbook(path)\n except:\n return False\n worksheet = workbook.add_worksheet(name='Прокуратура')\n # Add format to workbook\n format_headers_po = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 14,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFCA28',\n 'border': 2})\n format_headers_department = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 13,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFD54F',\n 'border': 2})\n format_headers_division = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFE082',\n 'border': 2})\n format_header = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'bold': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'bg_color': '#FFF59D',\n 'border': 2})\n employee_format_b = workbook.add_format( {'align': 'left',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'bold': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'border': 2})\n employee_format = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'font_size': 12,\n 'font_name': 'Times New Roman',\n 'border': 2})\n format_attribute = workbook.add_format( {'align': 'center',\n 'valign': 'vcenter',\n 'text_wrap': True,\n 'font_size': 10,\n 'font_name': 'Times New Roman',\n 'border': 1})\n\n # Set width of columns and height of rows\n worksheet.set_default_row(40, False)\n worksheet.set_column(0, 0, 5)\n worksheet.set_column(1, 1, 25)\n worksheet.set_column(2, 2, 21)\n worksheet.set_column(3, 3, 21)\n worksheet.set_column(4, 4, 21)\n\n # Begin from row\n row = 0\n\n # Parser for employees dictionary\n for po in employees_dict:\n # Прокуратура\n worksheet.merge_range(row, 0, row, 4, data=po.name, cell_format=format_headers_po)\n row += 1\n # Атрибуты Прокуратуры\n row = add_attribute(po, worksheet, row, format_attribute)\n # Header\n row = add_header(worksheet, row, format_header)\n # Работники Прокуратуры\n if 'employees' in employees_dict[po]:\n for num, employee in enumerate(employees_dict[po]['employees'], 1):\n row = add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n\n # Управление\n if 'departments' in employees_dict[po]:\n for department in employees_dict[po]['departments']:\n worksheet.merge_range(row, 0, row, 4, data=department.name, cell_format=format_headers_department)\n row += 1\n # Атрибуты Управления\n row = add_attribute(department, worksheet, row, format_attribute)\n # Работники Управления\n if 'employees' in employees_dict[po]['departments'][department]:\n for num, employee in enumerate(employees_dict[po]['departments'][department]['employees'], 1):\n row = add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n # Отдел Управления\n if 'divisions' in employees_dict[po]['departments'][department]:\n for division in employees_dict[po]['departments'][department]['divisions']:\n worksheet.merge_range(row, 0, row, 4, data=division.name, cell_format=format_headers_division)\n row += 1\n # Атрибуты Отдела\n row = add_attribute(division, worksheet, row, format_attribute)\n # Работники Отдела\n for num, employee in enumerate(employees_dict[po]['departments'][department]['divisions'][division], 1):\n row = add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n\n # Отдел Прокуратуры\n if 'divisions' in employees_dict[po]:\n for division in employees_dict[po]['divisions']:\n worksheet.merge_range(row, 0, row, 4, data=division.name, cell_format=format_headers_division)\n row += 1\n # Атрибуты Отдела\n row = add_attribute(division, worksheet, row, format_attribute)\n # Работники Отдела\n for num, employee in enumerate(employees_dict[po]['divisions'][division], 1):\n row += add_employee(worksheet, row, employee, num, employee_format, employee_format_b)\n try:\n workbook.close()\n except:\n return False\n return True", "def ecase_data_download():\n if file_available(rf'{constants.MAIN_DATA_DIR}\\Clinical Data\\eCaseData.xlsx'):\n ecase_driver = ecase_downloader.ecase_login()\n try:\n ecase_downloader.ecase_data(ecase_driver)\n except NoSuchElementException:\n print(\"Data report can't be downloaded\")\n\n ecase_driver.quit()\n\n try:\n ecase_data_import.ecase_data_import()\n except FileNotFoundError:\n pass", "def save_xls(self,basepath=''): \n self.generate_xls()\n self.wb.save(basepath+self.filename+'.xls')", "def SaveToExcel(self, Row, line, Data):\r\n self.sh.Cells(Row, line).Value = Data", "def writeCourseesWorksheet(wb: xlsxwriter.Workbook) -> None:\n\n global knowledgeAreas\n global coursesWsRow\n\n\n ws = wb.add_worksheet()\n ws.name = \"Courses\"\n\n row = coursesWsRow\n col = 0\n\n ws.write(row, col, 'LearningObjective')\n col += 1\n\n ws.write(row, col, 'CourseId')\n col += 1\n\n ws.write(row, col, 'CourseTitle - As Extracted')\n col += 1\n\n ws.write(row, col, 'CourseTitle - As Revised')\n col += 1\n\n ws.write(row, col, 'CourseDescription - As Extracted')\n col += 1\n\n ws.write(row, col, 'CourseDescription - As Revised')\n col += 1\n\n ws.write(row, col, 'Instructor - As Extracted')\n col += 1\n\n ws.write(row, col, 'Instructor - As Revised')\n col += 1\n\n ws.write(row, col, 'Fee - As Extracted')\n col += 1\n\n ws.write(row, col, 'Fee - As Revised')\n\n coursesWsRow += 1\n col = 0\n\n for knowledgeArea in knowledgeAreas:\n writeCourseRows(ws, knowledgeArea)", "def click_country_groups_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.country_groups_grid_div_id)", "def Export_in_files(COVID_data, COVID_data_reconstructed):\r\n F_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted.csv' % (date.today().isoformat()), 'w')\r\n FR_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted reconstructed.csv' % (date.today().isoformat()), 'w')\r\n \r\n COVID_data_lists = [COVID_data, COVID_data_reconstructed]\r\n Data_file_list = [F_data_file, FR_data_file]\r\n Countries_list = list(COVID_data.keys())[1:]\r\n \r\n for Data_set_inc in range(2): # Each data list (raw and reconstructed) is written in its corresponding file\r\n COVID_data_temp = COVID_data_lists[Data_set_inc]\r\n Data_file_temp = Data_file_list[Data_set_inc]\r\n \r\n Data_file_temp.write('Country;Date;' + ';'.join(COVID_data_temp['_Country']['Date']) + '\\n')\r\n \r\n for Country in Countries_list:\r\n COVID_data_single_country = COVID_data_temp[Country]\r\n \r\n Date_list = list(COVID_data[Country].keys())\r\n for Date in Date_list:\r\n COVID_data_single_country_single_date = COVID_data_single_country[Date]\r\n Row_reformatted = ['' if Item == None else str(Item).replace('.', ',') for Item in COVID_data_single_country_single_date] # None elements are replaced by empty strings because an empty cell is better to see that there is no data in excel rather than None\r\n \r\n Data_file_temp.write('%s;%s;' % (Country, Date))\r\n Data_file_temp.write(';'.join(str(Item) for Item in Row_reformatted))\r\n Data_file_temp.write('\\n')\r\n \r\n Data_file_temp.close()" ]
[ "0.6653394", "0.6451767", "0.643158", "0.63429654", "0.6334339", "0.6333897", "0.6286794", "0.62807244", "0.6235302", "0.62333274", "0.6191427", "0.6170842", "0.60931844", "0.60574377", "0.605236", "0.6052189", "0.6044034", "0.60123223", "0.6012149", "0.6002858", "0.5997272", "0.5995605", "0.59929526", "0.59727436", "0.5972204", "0.5967934", "0.5951535", "0.5951535", "0.59485954", "0.59445024", "0.5939689", "0.59197336", "0.59140086", "0.59105104", "0.5899089", "0.5884632", "0.58824784", "0.58735627", "0.58599097", "0.5846684", "0.58446467", "0.58318424", "0.5827482", "0.58268815", "0.5815063", "0.58122337", "0.5810122", "0.58094114", "0.5797282", "0.57829547", "0.5767511", "0.5753774", "0.5753317", "0.5727619", "0.5726155", "0.572093", "0.5718621", "0.57080024", "0.5694606", "0.5688117", "0.5674056", "0.5666553", "0.5659674", "0.56576836", "0.5649922", "0.5641021", "0.5632749", "0.56223977", "0.56216383", "0.5615314", "0.55983984", "0.5580228", "0.5566367", "0.5561907", "0.55594707", "0.55564785", "0.5542079", "0.55286235", "0.552664", "0.5516163", "0.5513876", "0.55132174", "0.5511224", "0.5502875", "0.54973507", "0.5487273", "0.5485124", "0.5472005", "0.54609895", "0.54587203", "0.544986", "0.54466087", "0.5444059", "0.5426901", "0.54227626", "0.54212654", "0.54143834", "0.54131913", "0.54058397", "0.5401825", "0.53946245" ]
0.0
-1
Export CORVET data to excel format
def extract_corvet(self, *args, **kwargs): self._product_filter(**kwargs) self._vehicle_filter(**kwargs) self._select_columns(**kwargs) queryset = self.queryset.annotate( date_debut_garantie=Cast(TruncSecond('donnee_date_debut_garantie', DateTimeField()), CharField())) if kwargs.get('tag', None): queryset = queryset.filter(opts__tag=kwargs.get('tag')) if kwargs.get('vins', None): vin_list = kwargs.get('vins').split('\r\n') queryset = queryset.filter(vin__in=vin_list) if kwargs.get('xelon_model', None): queryset = queryset.select_related().filter(xelon__modele_produit__startswith=kwargs.get('xelon_model')) if kwargs.get('xelon_vehicle', None): queryset = queryset.select_related().filter(xelon__modele_vehicule__startswith=kwargs.get('xelon_vehicle')) if kwargs.get('start_date', None): queryset = queryset.filter(donnee_date_debut_garantie__gte=kwargs.get('start_date')) if kwargs.get('end_date', None): queryset = queryset.filter(donnee_date_debut_garantie__lte=kwargs.get('end_date')) values_list = queryset.values_list(*self.fields).distinct() return values_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_excel(self, filename):\n self.data.to_excel(filename)", "def export_to_file(final_data_fetch):\r\n\r\n # Column names for data\r\n header_fields = ['Course', 'University', 'GPA', 'GRE', 'TOEFL', 'Work Experience', 'UG Course', 'UG College','Admit Status']\r\n with xlsxwriter.Workbook('yocket_data.xlsx') as workbook:\r\n worksheet = workbook.add_worksheet()\r\n\r\n # Write Header Fields\r\n worksheet.write_row(0, 0, header_fields)\r\n # Write data fields\r\n for row_num, data in enumerate(final_data_fetch):\r\n worksheet.write_row(row_num+1, 0, data)\r\n\r\n # Store as binary data\r\n with open('yocket_data.data', 'wb') as f:\r\n pickle.dump(final_data_fetch, f)", "def to_excel(self, filename, **kwargs):\n self.data.to_excel(filename, **kwargs)", "def export_excel(self, filename):\n # convert table to array of rows\n rows = [self.headings]\n for y in range(self.rowcount):\n row = []\n for h in self.headings:\n row.append(self.table[h][y])\n rows.append(row)\n \n sheet = pyexcel.Sheet(rows, self.name, name_columns_by_row=0)\n sheet.save_as(filename)", "def export_data(self):\r\n stocks = {}\r\n headings = ['Security', 'Price', 'Change', 'Change %', '52 Week', 'Market Cap']\r\n\r\n for data in range(6):\r\n for items in self.root.main.treeview.get_children():\r\n values = self.root.main.treeview.item(items, 'values')\r\n if headings[data] not in stocks:\r\n stocks[headings[data]] = []\r\n stocks.get(headings[data]).append(values[data])\r\n\r\n df = pd.DataFrame(stocks, columns=headings)\r\n path = tk.filedialog.asksaveasfilename(title='Save File As...',\r\n filetypes=((\"CComma-separated values (.csv)\", \"*.csv\"), (\"Text Document(.txt)\", \"*.txt\")))\r\n\r\n if not path:\r\n return\r\n else:\r\n df.to_excel(path, index=False, header=True)", "def mono_sheet(self):\n xls = pandas.read_excel(str(self.source))\n xls.to_csv(str(self.dest), **self.kwargs)", "def generate_xls(self):\n self.wb = xlwt.Workbook()\n ws = self.wb.add_sheet('Sheet1')\n heading_style = xlwt.easyxf('font: bold true; alignment: horizontal center, wrap true;')\n extra_row = 0\n if self.date:\n date_style = xlwt.easyxf('font: bold true; alignment: horizontal left, wrap true;')\n ws.write_merge(0,0,0,self.table.no_of_columns()-1,'Date : '+self.date,date_style) \n extra_row = 1\n for i in range(len(self.headings)):\n ws.write_merge(i+extra_row,i+extra_row,0,self.table.no_of_columns()-1,self.headings[i],heading_style)\n ws.set_panes_frozen(True)\n ws.set_horz_split_pos(len(self.headings)+extra_row+1)\n ws.set_remove_splits(True)\n self.table.to_xls(ws,start_row=len(self.headings)+extra_row,start_col=0)\n return self.wb", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.ministerial.get_excel_sheet(rpt_date, book)\n self.ministerial_auth.get_excel_sheet(rpt_date, book)\n self.ministerial_268.get_excel_sheet(rpt_date, book)\n self.quarterly.get_excel_sheet(rpt_date, book)\n self.by_tenure.get_excel_sheet(rpt_date, book)\n self.by_cause.get_excel_sheet(rpt_date, book)\n self.region_by_tenure.get_excel_sheet(rpt_date, book)\n self.indicator.get_excel_sheet(rpt_date, book)\n self.by_cause_10YrAverage.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 1')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_indicator_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'quarterly_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def exporter():\n Session = modules.db_connect.connect()\n session = Session()\n report = xlsxwriter.Workbook('perception_report.xlsx')\n top_row_format = report.add_format({'bold': True})\n top_row_format.set_border(style=1)\n top_row_format.set_bg_color('#B8B8B8')\n\n \"\"\"Black row format at the top of each host detailed info\"\"\"\n black_row_format = report.add_format()\n black_row_format.set_border(style=1)\n black_row_format.set_bg_color('#000000')\n\n \"\"\"Detailed host row format\"\"\"\n host_row_format = report.add_format()\n host_row_format.set_border(style=1)\n host_row_format.set_bg_color('#CCCCCC')\n\n \"\"\"Format for text in row with host info\"\"\"\n host_row_wrapped_format = report.add_format()\n host_row_wrapped_format.set_border(style=1)\n host_row_wrapped_format.set_bg_color('#CCCCCC')\n host_row_wrapped_format.set_text_wrap('vjustify')\n\n \"\"\"Format description row in NSE output\"\"\"\n host_nse_output_top_format = report.add_format({'bold': True})\n host_nse_output_top_format.set_border(style=1)\n host_nse_output_top_format.set_bg_color('#B8B8B8')\n\n \"\"\"Format test row in NSE output\"\"\"\n host_nse_output_format = report.add_format()\n host_nse_output_format.set_border(style=1)\n host_nse_output_format.set_bg_color('#CCCCCC')\n\n \"\"\"Build the host_overview_worksheet\"\"\"\n host_overview_worksheet = report.add_worksheet()\n\n \"\"\"Build the host_detail_worksheet\"\"\"\n host_detail_worksheet = report.add_worksheet()\n\n \"\"\"Size up the overview worksheet\"\"\"\n host_overview_worksheet.set_column('B:B', 24)\n host_overview_worksheet.set_column('C:C', 15)\n host_overview_worksheet.set_column('D:D', 15)\n host_overview_worksheet.set_column('E:E', 15)\n host_overview_worksheet.set_column('F:F', 15)\n host_overview_worksheet.set_column('G:G', 20)\n host_overview_worksheet.set_column('H:H', 15)\n\n \"\"\"Size up the detail worksheet\"\"\"\n host_detail_worksheet.set_column('B:B', 38)\n host_detail_worksheet.set_column('C:C', 16)\n host_detail_worksheet.set_column('D:D', 16)\n host_detail_worksheet.set_column('E:E', 28)\n host_detail_worksheet.set_column('F:F', 15)\n host_detail_worksheet.set_column('H:G', 20)\n host_detail_worksheet.set_column('H:H', 25)\n host_detail_worksheet.set_column('I:I', 10)\n\n \"\"\"Description row for host overview\"\"\"\n host_overview_worksheet.write('B2', 'Hostname', top_row_format)\n host_overview_worksheet.write('C2', 'IP v4 Address', top_row_format)\n host_overview_worksheet.write('D2', 'IP v6 Address', top_row_format)\n host_overview_worksheet.write('E2', 'MAC Address', top_row_format)\n host_overview_worksheet.write('F2', 'MAC Vendor', top_row_format)\n host_overview_worksheet.write('G2', 'Operating System', top_row_format)\n host_overview_worksheet.write('H2', 'Host Type', top_row_format)\n\n \"\"\"Query the database for the hosts\"\"\"\n inventory_hosts = session.query(InventoryHost).all()\n\n \"\"\"Build overview worksheet\"\"\"\n overview_row = 2\n overview_col = 1\n for host in inventory_hosts:\n host_overview_worksheet.write(overview_row, overview_col, host.host_name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 1, host.ipv4_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 2, host.ipv6_addr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 3, host.macaddr, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 4, host.mac_vendor.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 5, host.product.name, host_row_format)\n host_overview_worksheet.write(overview_row, overview_col + 6, host.host_type, host_row_format)\n overview_row += 1\n\n \"\"\"Build detailed worksheet\"\"\"\n detail_row = 2\n detail_col = 1\n for host in inventory_hosts:\n\n \"\"\"Add the black row to start host detail info\"\"\"\n host_detail_worksheet.set_row(detail_row, 5)\n host_detail_worksheet.write(detail_row, detail_col, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, '', black_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, '', black_row_format)\n detail_row += 1\n\n \"\"\"Add row detail info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Hostname', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'IP v4 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'IP v6 Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'MAC Address', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'MAC Vendor', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Host Type', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Operating System', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Version', top_row_format)\n detail_row += 1\n\n \"\"\"Add host info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host.host_name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, host.ipv4_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, host.ipv6_addr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, host.macaddr, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, host.mac_vendor.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, host.host_type, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, host.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, host.product.version, host_row_format)\n detail_row += 2\n\n \"\"\"If there is no host nse script, just say so.\"\"\"\n if not host.host_nse_scripts:\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n else:\n\n \"\"\"Add the row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Host NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for host_scripts in host.host_nse_scripts:\n\n \"\"\"Count output the lines so we know what to merge\"\"\"\n lines = host_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, host_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n host_scripts.output, host_row_wrapped_format)\n detail_row += 1\n\n if not host.inventory_svcs:\n\n \"\"\"If there are no services for this host tell me\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n host_detail_worksheet.write(detail_row, detail_col, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'no services', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'no services', host_row_format)\n detail_row += 1\n\n else:\n for ports in host.inventory_svcs:\n\n \"\"\"Host services row info\"\"\"\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'Protocol', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, 'Port', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, 'Name', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, 'Svc Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, 'Extra Info', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 5, 'Product', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'Version', top_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'Update', top_row_format)\n detail_row += 1\n\n \"\"\"Write the service info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, ports.protocol, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 1, ports.portid, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 2, ports.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 3, ports.svc_product, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 4, ports.extra_info, host_row_format)\n try:\n\n \"\"\"There may not be product info, but try.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, ports.product.name, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, ports.product.version, host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, ports.product.product_update,\n host_row_format)\n detail_row += 1\n except AttributeError:\n\n \"\"\"Just write unknown if there is no product info\"\"\"\n host_detail_worksheet.write(detail_row, detail_col + 5, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 6, 'unknown', host_row_format)\n host_detail_worksheet.write(detail_row, detail_col + 7, 'unknown', host_row_format)\n detail_row += 1\n\n if not ports.svc_nse_scripts:\n\n \"\"\"If there is no NSE script info just say so.\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n host_detail_worksheet.write(detail_row, detail_col, 'No Script Name', host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'No Script Output', host_row_wrapped_format)\n detail_row += 2\n\n else:\n\n \"\"\"Service Script row detail\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, 'Svc NSE Script Name', top_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row, detail_col + 7,\n 'Output', top_row_format)\n detail_row += 1\n\n \"\"\"Grab all the scripts\"\"\"\n for nse_scripts in ports.svc_nse_scripts:\n\n \"\"\"Count the lines in the output for merging\"\"\"\n lines = nse_scripts.output.count('\\n')\n\n if lines > 0:\n\n \"\"\"Merge the rows and write the name and output\"\"\"\n host_detail_worksheet.merge_range(detail_row, detail_col, detail_row + lines, detail_col,\n nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines, detail_col + 7,\n nse_scripts.output, host_row_wrapped_format)\n detail_row += 1\n else:\n\n \"\"\"Single line output\"\"\"\n host_detail_worksheet.write(detail_row, detail_col, nse_scripts.name, host_row_format)\n host_detail_worksheet.merge_range(detail_row, detail_col + 1, detail_row + lines,\n detail_col + 7, nse_scripts.output,\n host_row_wrapped_format)\n detail_row += 1\n\n detail_row += 1\n report.close()\n session.close()", "def export_data(self):\r\n if len(app.entry6.get()) != 0:\r\n\r\n if app.var.get() == 'xls':\r\n\r\n wb = Workbook()\r\n sheet = wb.add_sheet('Sheet1')\r\n self.columns = ['id', 'Name', 'Section', 'Dept.', 'Gpa', 'MP1', 'MP2', 'MP3', 'MT', 'FINAL']\r\n style = xlwt.easyxf('font: bold 1')\r\n for col in range(10):\r\n sheet.write(0, col, self.columns[col], style)\r\n index=0\r\n for row in range(1,162):\r\n sheet.write(row, 1, open_data.sort_list[index])\r\n index += 1\r\n index1 = -1\r\n for row in range(1,162):\r\n index1 += 1\r\n index2=0\r\n for col in range(10):\r\n if col == 1 or index2 == 1:\r\n index2 += 1\r\n continue\r\n if index2 == 0:\r\n sheet.write(row, col, int(open_data.student[open_data.sort_list[index1]][index2]))\r\n index2 += 1\r\n continue\r\n sheet.write(row, col, open_data.student[open_data.sort_list[index1]][index2])\r\n index2 += 1\r\n file_name=app.entry6.get()\r\n if '.xls' not in file_name:\r\n wb.save(file_name+'.xls')\r\n else:\r\n wb.save(file_name)\r\n\r\n elif app.var.get() == 'txt':\r\n\r\n file_name = app.entry6.get()\r\n if '.txt' not in file_name:\r\n file_name = file_name + '.txt'\r\n file = open(file_name, 'w')\r\n index2 = 0\r\n for key in open_data.student:\r\n for index in range(10):\r\n if index == 0:\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n continue\r\n if index == 1:\r\n try:\r\n self.split_names = open_data.sort_list[index2].split(' ')\r\n file.write(self.split_names[0])\r\n file.write(', ')\r\n file.write(self.split_names[1])\r\n file.write(', ')\r\n index2 += 1\r\n except UnicodeEncodeError:\r\n index2 += 1\r\n pass\r\n continue\r\n if index >= 5 and index <= 9:\r\n if open_data.student[key][index] != '':\r\n file.write(str(int(open_data.student[key][index])))\r\n file.write(', ')\r\n else:\r\n file.write('\\n')\r\n break\r\n if index == 9:\r\n file.write('\\n')\r\n continue\r\n try:\r\n file.write(str(open_data.student[key][index]))\r\n file.write(', ')\r\n except UnicodeEncodeError:\r\n pass\r\n file.close()\r\n\r\n\r\n\r\n elif app.var.get() == 'csv':\r\n app.info.configure(text=\"INFO: Type not Supported\")\r\n # The program does not support saving in 'csv' type. If the user selects 'csv' file type, 'Info' Label\r\n # shows the message: 'INFO: Type not Supported'.\r\n\r\n else:\r\n app.info.configure(text='INFO: Type not chosen!')\r\n # Also, If the user presses on 'Export Data' button, with a file name provided, but without choosing a\r\n # file type, 'Info' Label shows the message: 'INFO: Type not chosen'.\r\n\r\n else:\r\n app.info.configure(text=\"INFO: Please provide the name of the file.\")\r\n # Also, if the user presses 'Export Data' button without giving a file name, 'Info' Label shows the message:\r\n # 'INFO: Please provide the name of the file.'\r", "def print_xlsx(self):\n if self.date_from and self.date_to:\n if self.date_from > self.date_to:\n raise ValidationError(\"Date From must be less than Date To\")\n\n # active_record = self._context['id']\n # record = self.env['room.accommodation'].browse(active_record)\n data = {\n 'date_from': self.date_from,\n 'date_to': self.date_to,\n 'guest_id': self.guest_id.id,\n 'model_id': self.id,\n 'check_out': self.check_out,\n 'date_today': fields.Datetime.now()\n }\n\n print(\"XLSX Wizard data : \", data)\n\n return {\n 'type': 'ir.actions.report',\n 'data': {\n 'model': 'accommodation.reporting',\n 'options': json.dumps(data, default=date_utils.json_default),\n 'output_format': 'xlsx',\n 'report_name': 'Accommodation Report'\n },\n 'report_type': 'xlsx'\n }", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_tenure_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def outputExcelReport(self):\n # ++++++++++\n # init\n # ++++++++++\n wb = openpyxl.Workbook()\n wb.fonts = openpyxl.styles.Font(\n name = 'Courier New',\n size = 12\n )\n # create and delete sheets\n _ = wb.create_sheet(title='Cover',index=0)\n _ = wb.create_sheet(title='Results',index=1)\n _ = wb.create_sheet(title='AllItems',index=2)\n _ = wb.remove(wb.worksheets[-1])\n # ++++++++++\n # Sheet 1 <Cover>\n # ++++++++++\n ws = wb['Cover']\n # --- title and date\n timeNow = datetime.datetime.now().isoformat().split('T')[0]\n ws.merge_cells('A1:B1')\n ws.merge_cells('A3:B3')\n ws['A1'] = '納入チェック ダイアグ確認結果'\n ws['A3'] = '作成日:{}'.format(timeNow)\n # --- sample info\n ws['A5'] = '<サンプル情報>'\n self._write2excel(ws, self._sample_info, 6, 1)\n for r in range(6,8):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- checker info\n ws['A9'] = '<チェッカ情報>'\n self._write2excel(ws, self._checker_info, 10, 1)\n for r in range(10,13):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- dmm info\n ws['A14'] = '<DMM情報>'\n self._write2excel(ws, self._dmm_info, 15, 1)\n for r in range(15,18):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- resistor info\n ws['A19'] = '<抵抗器情報>'\n self._write2excel(ws, self._resistor_info, 20, 1)\n for r in range(20,23):\n for c in range(1,3):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n ws[cell.coordinate].font = STYLE_FONT_PASS\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 2 <Results>\n # ++++++++++\n ws = wb['Results']\n # --- output all scenario\n ws['A1'] = '<結果一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._result_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==6:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n ws.cell(cell.row,6).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # Sheet 3 <AllItems>\n # ++++++++++\n ws = wb['AllItems']\n # --- output all scenario\n ws['A1'] = '<出力一覧>'\n ws.merge_cells('A1:B1')\n self._write2excel(ws, self._scenario_info, 2, 1)\n for r in range(2,ws.max_row+1):\n for c in range(1,ws.max_column+1):\n ws.cell(r,c).border = BORDER\n # --- set styles\n for row in ws:\n for cell in row:\n # font color\n ws[cell.coordinate].font = STYLE_FONT_PASS\n cell.alignment = openpyxl.styles.Alignment(vertical='top')\n if cell.column==5:\n if ws[cell.coordinate].value =='FAIL':\n ws.cell(cell.row,1).font = STYLE_FONT_FAIL\n ws.cell(cell.row,2).font = STYLE_FONT_FAIL\n ws.cell(cell.row,3).font = STYLE_FONT_FAIL\n ws.cell(cell.row,4).font = STYLE_FONT_FAIL\n ws.cell(cell.row,5).font = STYLE_FONT_FAIL\n # cell color by header/even row\n if cell.row==2:\n ws[cell.coordinate].fill = STYLE_FILL_HEADER\n elif cell.row%2==0:\n ws[cell.coordinate].fill = STYLE_FILL_EVEN_ROW\n # indent in cell\n if '\\n' in str(cell.value):\n cell.alignment = openpyxl.styles.Alignment(wrapText=True)\n # --- set column width\n for col in ws.columns:\n # init\n max_length = 0\n column = openpyxl.utils.get_column_letter(col[0].column)\n # loop\n for cell in col:\n if len(str(cell.value)) > max_length:\n max_length = len(str(cell.value)) * (STYLE_FONT_PASS.size+1)/11\n # output\n adjusted_width = (max_length + 2) * 1.2\n ws.column_dimensions[column].width = adjusted_width\n # ++++++++++\n # save book\n # ++++++++++\n wb.save(self._filename)", "def export(self):\n rpt_date = datetime.now()\n filename = 'bushfire_regionbytenure_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_auth_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def rite2xl(df, file_name):\r\n print('writing dataframe to excel',)\r\n writer = pd.ExcelWriter(file_name ,engine = 'xlsxwriter')\r\n df.to_excel(writer,file_name)\r\n writer.save()\r\n print('writing to excel sheet completed')\r\n return(df)", "def export_to_excel(self, worksheet, row_start, col_start, queryset, date_time=timezone.now()):\n\t\tif queryset:\n\t\t\t[row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start)\n\t\t\tfor q in queryset:\n\t\t\t\t# object_excel_write function---date_time uyiin history objectiig excel -ruu horvuulne\n\t\t\t\t[row_write, col_write] = q.object_excel_write(worksheet, row_write, col_write, date_time=date_time)\n\t\telse:\n\t\t\tworksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'bushfire_by_cause_10yr_average_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def _write2excel(self, sheet: object, data: list, start_row: int, start_col: int):\n for r in range(0,len(data)):\n for c in range(0,len(data[0])):\n sheet.cell(r+start_row,c+start_col).value=data[r][c]", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_268_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def excel_output(df):\n output = io.BytesIO()\n #time = str(date.today())\n #filename = \"output \"+time+\".xlsx\"\n writer = pd.ExcelWriter(output, engine='xlsxwriter', options={'remove_timezone': True})\n #writer.book.filename = io\n df.to_excel(writer,'Sheet1', index=False, header=True)\n writer.save()\n xlsx_data = output.getvalue()\n return xlsx_data", "def export(self):\r\n self.prices[\"returns\"] = self.returns\r\n self.prices.columns = ['prices', 'returns']\r\n self.prices = self.prices.dropna()\r\n \r\n name = QFileDialog.getSaveFileName(None, 'Save File', filter='*.xlsx')\r\n if(name[0] == ''):\r\n # if name empty\r\n pass\r\n else:\r\n self.prices.to_excel(name[0])", "def export(self):\n if len(self.records) == 0:\n exit_message = \"Exiting. There are no records for {} {} to export.\".format(self.args.date.strftime(\"%B\"), self.year)\n sys.exit(exit_message)\n\n total_days = (self.args.date.replace(month = self.args.date.month % 12 +1, day = 1)-timedelta(days=1)).day\n start_month = self.args.date.replace(day = 1)\n end_month = self.args.date.replace(day = total_days)\n workdays = self.netto_workdays(start_month, end_month, weekend_days=(5,6))\n template_file = os.path.join(self.config[\"templates_dir\"], \"template_timesheet_{}_days.xlsx\".format(workdays))\n\n export_file = os.path.join(self.config[\"exports_dir\"], \"timesheet_{}_{}.xlsx\".format(self.year, self.month_str))\n\n # set locale to use weekdays, months full name in german\n locale.setlocale(locale.LC_TIME, 'de_DE.UTF-8')\n wb = load_workbook(template_file)\n ws = wb.active\n ws.cell(row=7, column=4).value = self.config[\"name\"]\n month_year_str = \"{} {}\".format(self.args.date.strftime(\"%B\"), self.year)\n ws.cell(row=8, column=4).value = month_year_str\n row = 12\n for record in self.records:\n col = 2\n date = datetime.strptime(record[\"date\"], \"%d.%m.%Y\")\n ws.cell(row=row, column=col).value = date.strftime(\"%A\")\n col += 1\n ws.cell(row=row, column=col).value = date\n col += 1\n if \"special\" in record.keys() and record[\"special\"] == \"true\":\n ws.cell(row=row, column=9).value = 8.00\n col += 4\n else:\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_day\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"start_break\"], \"%H:%M\").time()\n col += 1\n ws.cell(row=row, column=col).value = datetime.strptime(record[\"end_break\"], \"%H:%M\").time()\n col += 4\n ws.cell(row=row, column=col).value = record[\"comment\"]\n row += 1\n wb.save(export_file)\n return True", "def generate_spreadsheet(request, id):\n election = get_object_or_404(Election, pk=id)\n response = render_to_response(\"django_elect/spreadsheet.html\", {\n 'full_stats': election.get_full_statistics(),\n })\n filename = \"election%s.xls\" % (election.pk)\n response['Content-Disposition'] = 'attachment; filename='+filename\n response['Content-Type'] = 'application/vnd.ms-excel; charset=utf-8'\n return response", "def write_to_excel(self, fileNameNoExtension):\n self.data.to_excel(fileNameNoExtension + '.xlsx', engine='xlsxwriter')", "def write_excel(self, filename):\n writer = pd.ExcelWriter(filename)\n self.df_avg.to_excel(writer, 'Simulation')\n self.manager_df.to_excel(writer, 'FleetManagers')\n self.customer_df.to_excel(writer, 'Customers')\n self.transport_df.to_excel(writer, 'Transports')\n writer.save()", "def export(self):\n\n rpt_date = datetime.now()\n filename = 'ministerial_report_{}.xls'.format(rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n self.get_excel_sheet(rpt_date, book)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def export_vrouwen():\n voornaam, tussenvoegsel, achternaam, straat, huisnummer, postcode, woonplaats = [], [], [], [], [], [], []\n for i in range(len(df)):\n if df['Vrouw'].iloc[i]:\n voornaam.append(df['voornaam'].iloc[i])\n tussenvoegsel.append(df['tussenvoegsel'].iloc[i])\n achternaam.append(df['achternaam'].iloc[i])\n straat.append(df['straat'].iloc[i])\n huisnummer.append(df['huisnummer'].iloc[i])\n postcode.append(df['postcode'].iloc[i])\n woonplaats.append(df['woonplaats'].iloc[i])\n\n df_angfo = pd.DataFrame(voornaam, columns=['voornaam'])\n df_angfo['tussenvoegsel'] = pd.Series(tussenvoegsel, index=df_angfo.index)\n df_angfo['achternaam'] = pd.Series(achternaam, index=df_angfo.index)\n df_angfo['straat'] = pd.Series(straat, index=df_angfo.index)\n df_angfo['huisnummer'] = pd.Series(huisnummer, index=df_angfo.index)\n df_angfo['postcode'] = pd.Series(postcode, index=df_angfo.index)\n df_angfo['woonplaats'] = pd.Series(woonplaats, index=df_angfo.index)\n df_angfo.to_excel('output\\\\vrouwen_leden.xlsx', 'vrouwen_leden')", "def export_excel(header, data):\n tmp = NamedTemporaryFile()\n wb = Workbook()\n ws = wb.active\n\n ws.append(header)\n for row in export_data(data, header):\n ws.append(row)\n\n wb.save(tmp.name)\n tmp.seek(0)\n\n return tmp", "def ortra_export(request):\n export_fields = OrderedDict(ORTRA_EXPORT_FIELDS)\n export = OpenXMLExport('Exportation')\n export.write_line(export_fields.keys(), bold=True) # Headers\n # Data\n query_keys = [f for f in export_fields.values() if f is not None]\n query = Student.objects.filter(Q(klass__name__contains='ASAFE') |\n Q(klass__name__contains='ASEFE') |\n Q(klass__name__contains='ASSCFE'),\n archived=False).order_by('klass__name',\n 'last_name',\n 'first_name')\n\n for line in query.values(*query_keys):\n values = []\n for field in query_keys:\n if field == 'gender':\n values.append(('Madame', 'Monsieur')[line[field] == 'M'])\n else:\n values.append(line[field])\n export.write_line(values)\n\n return export.get_http_response('ortra_export')", "def export_to_excel(self, workbook, tailan_queryset):\n\t\t# workbook argumentdaa avna\n\t\tif tailan_queryset:\n\t\t\t#[row_write, col_write] = self.excel_write_header_and_format(worksheet, row_start, col_start)\n\t\t\t\n\t\t\tworksheet = workbook.add_worksheet(u'Гүний худаг')\n\t\t\tqueryset = Hudag.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Hudag.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.gunii_hudags:\n\t\t\t\t\tqueryset = tailan.gunii_hudags.hudags.all()\n\t\t\t\t\t[row_write, col_write] = Hudag.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэршүүлэх байгууламж')\n\t\t\tqueryset = Ts_baiguulamj.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsevershuuleh:\n\t\t\t\t\tqueryset = tailan.tsevershuuleh.tsevershuuleh.all()\n\t\t\t\t\t[row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэрлэх байгууламж')\n\t\t\tqueryset = Ts_baiguulamj.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ts_baiguulamj.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tseverleh:\n\t\t\t\t\tqueryset = tailan.tseverleh.tseverleh.all()\n\t\t\t\t\t[row_write, col_write] = Ts_baiguulamj.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Усан сан')\n\t\t\tqueryset = UsanSan.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsanSan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.usansan:\n\t\t\t\t\tqueryset = tailan.usansan.usan_sans.all()\n\t\t\t\t\t[row_write, col_write] = UsanSan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны насос станц')\n\t\t\tqueryset = NasosStants.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsever_nasos_stants:\n\t\t\t\t\tqueryset = tailan.tsever_nasos_stants.nasos_stantss.all()\n\t\t\t\t\t[row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны насос станц')\n\t\t\tqueryset = NasosStants.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = NasosStants.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_nasos_stants:\n\t\t\t\t\tqueryset = tailan.bohir_nasos_stants.nasos_stantss.all()\n\t\t\t\t\t[row_write, col_write] = NasosStants.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Лаборатори')\n\t\t\tqueryset = Lab.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Lab.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.lab:\n\t\t\t\t\tqueryset = tailan.lab.labs.all()\n\t\t\t\t\t[row_write, col_write] = Lab.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны шугам')\n\t\t\tqueryset = Sh_suljee.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.tsever_usnii_shugam:\n\t\t\t\t\tqueryset = tailan.tsever_usnii_shugam.sh_suljees.all()\n\t\t\t\t\t[row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны шугам')\n\t\t\tqueryset = Sh_suljee.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Sh_suljee.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_usnii_shugam:\n\t\t\t\t\tqueryset = tailan.bohir_usnii_shugam.sh_suljees.all()\n\t\t\t\t\t[row_write, col_write] = Sh_suljee.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'АХББ')\n\t\t\tqueryset = ABB.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = ABB.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.abb:\n\t\t\t\t\tqueryset = tailan.abb.abbs.all()\n\t\t\t\t\t[row_write, col_write] = ABB.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ус, дулаан дамжуулах төв')\n\t\t\tqueryset = UsDamjuulahBair.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsDamjuulahBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.us_damjuulah_tov:\n\t\t\t\t\tqueryset = tailan.us_damjuulah_tov.usDamjuulahBair.all()\n\t\t\t\t\t[row_write, col_write] = UsDamjuulahBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ус түгээх байр')\n\t\t\tqueryset = UsTugeehBair.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = UsTugeehBair.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.us_tugeeh:\n\t\t\t\t\tqueryset = tailan.us_tugeeh.us_tugeeh_bairs.all()\n\t\t\t\t\t[row_write, col_write] = UsTugeehBair.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Цэвэр усны машин')\n\t\t\tqueryset = WaterCar.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = WaterCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.water_car:\n\t\t\t\t\tqueryset = tailan.water_car.water_cars.all()\n\t\t\t\t\t[row_write, col_write] = WaterCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Бохир усны машин')\n\t\t\tqueryset = BohirCar.objects.none()\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = BohirCar.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.bohir_car:\n\t\t\t\t\tqueryset = tailan.bohir_car.bohir_cars.all()\n\t\t\t\t\t[row_write, col_write] = BohirCar.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\n\t\t\tworksheet = workbook.add_worksheet(u'Ажилчдын судалгаа')\n\t\t\trow_write = 5\n\t\t\tcol_write = 1\n\t\t\t[row_write, col_write] = Ajiltan.excel_write_header_and_format(worksheet = worksheet, row_start = row_write, col_start = col_write)\n\t\t\tfor tailan in tailan_queryset:\n\t\t\t\tif tailan.ajiltans:\n\t\t\t\t\tqueryset = tailan.ajiltans.ajiltans.all()\n\t\t\t\t\t[row_write, col_write] = Ajiltan.export_to_excel_without_header(worksheet = worksheet, row_start=row_write, col_start=col_write, queryset = queryset, date_time = tailan.tailan_date)\n\t\t\t\t\n\t\telse:\n\t\t\tworksheet.write_string(row_start, col_start, u'Мэдээлэл байхгүй')", "def convert_sheet(filename, output):\n r2dt.write_converted_sheet(filename, output)", "def excel(df_ccl, df_arg_stocks, df_bonds, df_arg_stocks_ccl):\n if os.path.exists('CCL.xlsx'):\n wb = xw.Book('CCL.xlsx')\n # SHEET CEDEARS\n ws = wb.sheets('CCL CEDEARs')\n ws.range('A1').expand().value = df_ccl\n # SHEET MERVAL\n ws_merval = wb.sheets('Merval')\n ws_merval.range('A1').expand().value = df_arg_stocks\n # SHEET BONOS\n ws_bonds = wb.sheets('Bonos')\n ws_bonds.range('A1').expand().value = df_bonds\n # SHEET CCL MERVAL\n ws_ccl = wb.sheets('CCL ADRs')\n ws_ccl.range('A1').expand().value = df_arg_stocks_ccl\n\n tiempo = time.asctime()\n print('Carga exitosa de datos. Ultima ejecución: ',tiempo)", "def exportToCsv(self, filepath):\n table = list()\n table.append(list(self.__header))\n for a in self.__assays:\n table.append(\n [unicode(a.timestamp.isoformat()),\n unicode(a.dab_cell_count),\n unicode(a.hem_cell_count),\n unicode(a.dab_dabhemfraction),\n unicode(a.img_path)])\n # File encoding will be same as it expected by Excel on machine where\n # this file was created.\n encoding = locale.getpreferredencoding()\n with open(filepath, mode='wb') as f:\n writer = UnicodeWriter(f, encoding=encoding, delimiter=';')\n writer.writerows(table)", "def click_export_to_excel_button(self):\n self.click_element(self.export_to_excel_button_locator, True)", "def click_export_to_excel_button(self):\n self.click_element(self.export_to_excel_button_locator, True)", "def export(self,**kwargs):\n \n # import pdb;pdb.set_trace()\n \n # provide for case where recs are set extenally\n if not self.recs:\n self.select_recs(**kwargs)\n if self.recs:\n if self.export_file_name:\n filename = self.export_file_name\n else:\n filename = \"{table_name}_report_{datetime}.csv\".format(\n table_name = self.table.display_name,\n datetime = date_to_string(local_datetime_now(),'iso_datetime'),\n ).replace(' ','_').lower()\n \n if not self.export_fields:\n # include all fields by default\n self.export_fields = self._set_default_list_fields(include_all=True).copy()\n\n self.set_list_fields(self.export_fields)\n \n \n if self.export_template:\n result = render_template(self.export_template, data=self)\n else:\n # add a descriptive title row\n if self.export_title:\n result = self.export_title.strip() + '\\n'\n else:\n result = \"Export of table {} as of {}\\n\".format(self.table.table_name,excel_date_and_time_string(local_datetime_now()))\n \n result += ','.join([x['label'] for x in self.export_fields]) + '\\n'\n for rec in self.recs:\n rec_row = []\n for field in self.export_fields:\n data = rec.__getattribute__(field['name'])\n if field['type'].upper() == \"DATE\":\n data = local_date_string(data)\n elif field['type'].upper() == \"DATETIME\":\n data = excel_date_and_time_string(data)\n else:\n # just text\n data = str(data).strip()\n \n # replace double quotes with double-double quotes\n data = data.replace('\"','\"\"') #double up on double quotes\n \n if \",\" in data:\n # if any commas, wrap in quotes\n data = '\"' + data + '\"'\n \n #replace returns\n data = data.replace('\\r\\n',' -crnl- ')\n data = data.replace('\\n',' -nl- ')\n data = data.replace('\\r',' -rtn- ')\n\n rec_row.append(data)\n \n result += ','.join([str(x) for x in rec_row]) + '\\n'\n \n return DataStreamer(result,filename,'text/csv').send()\n \n self.result_text = \"No records selected\"\n self.success = False\n \n flash(self.result_text)\n return self.list(**kwargs)", "def to_xls(self,ws,start_row = 0,start_col = 0,width_ratio = 1): \n if self.col_width_dict: \n for c in range(self.no_of_columns()):\n ws.col(start_col+c).width = int(35*self.col_width(c)*width_ratio); \n \n boldstyle = xlwt.XFStyle()\n boldstyle.font.bold = True\n \n for r in range(self.no_of_rows()):\n for c in range(self.no_of_columns()):\n if r == 0:\n ws.write(start_row + r,start_col + c,self.cell(r,c),boldstyle)\n else:\n ws.write(start_row + r,start_col + c,self.cell(r,c))", "def generate_service_odometer_xlsx_report(self, res, next_service):\n workbook = xlwt.Workbook()\n worksheet = workbook.add_sheet(\"next_service_by_odometer\")\n worksheet.col(0).width = 5000\n worksheet.col(1).width = 12500\n worksheet.col(2).width = 10000\n worksheet.col(3).width = 6000\n worksheet.col(4).width = 7500\n worksheet.col(5).width = 7500\n worksheet.col(6).width = 7500\n worksheet.col(7).width = 7500\n worksheet.col(8).width = 10000\n\n font = xlwt.Font()\n font.bold = True\n font.name = \"Arial\"\n font.height = 200\n # pattern = xlwt.Pattern()\n border = xlwt.easyxf(\"font: bold 1; font: name 1; font: height 200\")\n format1 = xlwt.easyxf(\n \"font: bold 1; font: name 1; font: height 200;\\\n pattern: pattern solid, fore_colour yellow;\"\n )\n xlwt.easyxf(\n \"font: bold 1; font: name 1; font: height 200\", num_format_str=\"DD/MM/YYYY\"\n )\n\n row = 0\n row += 1\n row += 1\n worksheet.write(row, 2, \"Scheduled Maintenance By Mileage\", format1)\n row += 3\n worksheet.write(row, 7, \"Date :\", format1)\n worksheet.write(row, 8, time.strftime(\"%d-%B-%Y\"), format1)\n row += 2\n worksheet.write(row, 0, \"NO.\", format1)\n worksheet.write(row, 1, \"VEHICLE ID\", format1)\n worksheet.write(row, 2, \"VIN NO.\", format1)\n worksheet.write(row, 3, \"MAKE\", format1)\n worksheet.write(row, 4, \"MODEL\", format1)\n worksheet.write(row, 5, \"LAST SERVICE DATE\", format1)\n worksheet.write(row, 6, \"LAST MILEAGE\", format1)\n worksheet.write(row, 7, \"NEXT MILEAGE\", format1)\n worksheet.write(row, 8, \"REGISTRATION STATE\", format1)\n line_row = row + 1\n line_col = 0\n counter = 1\n for obj in next_service:\n worksheet.write(line_row, line_col, counter, border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.name or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.vin_sn or \"\", border)\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.f_brand_id and obj.f_brand_id.name or \"\", border\n )\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.model_id and obj.model_id.name or \"\", border\n )\n line_col += 1\n date = \"\"\n if obj.last_service_date:\n date = format_date(\n self.env,\n obj.last_service_date,\n self._context.get(\"lang\"),\n date_format=False,\n )\n worksheet.write(line_row, line_col, date or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.odometer or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.due_odometer or \"\", border)\n line_col += 1\n # worksheet.write(line_row, line_col,\n # obj.vechical_location_id and\n # obj.vechical_location_id.name or '', border)\n line_col = 0\n line_row += 1\n counter += 1\n worksheet.write(line_row, line_col, \"********\", border)\n fp = io.BytesIO()\n workbook.save(fp)\n fp.seek(0)\n data = fp.read()\n fp.close()\n res = base64.encodebytes(data)\n return res", "def to_xlsx(self, filename):\n # create path if it does not exist\n suffix = filename.split(\".\")[-1]\n if not suffix == \"xlsx\":\n filename = filename + \".xlsx\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n writer = pd.ExcelWriter(filename)\n for name, df in sorted(self.input_data.items()):\n df.to_excel(writer, name)\n writer.save()\n logging.info(\"Scenario saved as excel file to %s\", filename)", "def export_ho_dan_as_excel_action(fields=None, exclude=None, header=True):\n def export_as_excel(modeladmin, request, queryset):\n opts = modeladmin.model._meta\n field_names = [\"name\", \"status\", \"location\", \"tinh\",\n \"xa\", \"huyen\", \"phone\", \"cuuho\", \"update_time\", \"note\"]\n display_names = [\"Tên hộ dân\", \"Tình trạng\", \"Vị trí\", \"Tỉnh\", \"Xã\",\n \"Huyện\", \"Sdt\", \"hỗ trợ\", \"Thời gian cuối cùng cập nhật\", \"Ghi chú\"]\n file_name = \"Danh_sach_ho_dan\"\n\n output = io.BytesIO()\n\n workbook = xlsxwriter.Workbook(output, {'in_memory': True})\n worksheet = workbook.add_worksheet()\n row = 0\n if header:\n write_a_row(worksheet, row, display_names)\n row += 1\n for obj in queryset:\n arr = []\n for field in field_names:\n if field == \"status\" and obj.status:\n arr.append(obj.status.name)\n elif field == \"update_time\":\n utc_time = getattr(obj, field)\n local_datetime = utc_to_local(utc_time)\n arr.append(local_datetime.strftime(\"%d/%m/%Y %H:%M:%S\"))\n else:\n arr.append(str(getattr(obj, field) or \"\"))\n write_a_row(worksheet, row, arr)\n row += 1\n\n workbook.close()\n\n output.seek(0)\n\n response = HttpResponse(output.read(\n ), content_type=\"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet\")\n response['Content-Disposition'] = f\"attachment; filename={file_name}.xlsx\"\n\n output.close()\n\n return response\n\n export_as_excel.short_description = \"Xuất file excel\"\n return export_as_excel", "def to_excel(self, filename, keep_raw=True):\n writer = pd.ExcelWriter(filename)\n\n for group in self.core_groups:\n label = \"Core %s\" % group\n df = self.core_set(group)\n del (df[\"Core\"])\n df.to_excel(writer, sheet_name=label, index=False)\n\n if keep_raw:\n self.data.to_excel(writer, sheet_name=\"raw\", index=False)\n writer.close()", "def ListDataToExcel(listdata,filename):\n\n # file_backup=f = codecs.open(parent+info.QryPositionExchangeID+\"/\"+filename,'wb','utf-8')\n csvfile = file(filename.decode(\"utf-8\"), 'wb')\n csvfile.write(codecs.BOM_UTF8)\n writer=csv.writer(csvfile)\n writer.writerows(listdata)\n csvfile.close()\n df_new = pd.read_csv(filename, encoding='utf-8')\n writer = pd.ExcelWriter(filename.replace(\".csv\",\".xlsx\"))\n df_new.to_excel(writer, index=False)\n writer.save()\n os.remove(filename)", "def generate_excel(structure:dict, output:str):\t\n\n\tstructure_columns = identify_columns(structure)\n\n\tworkbook = xlsxwriter.Workbook(output)\n\tworksheet = workbook.add_worksheet()\n\n\tcol = 0\n\tfor column in structure_columns:\n\t\tworksheet.write(0, col, column)\n\t\tcol += 1\n\n\trow = 1\n\tfor day in structure['data']:\n\t\tfor key in day.keys():\n\t\t\tif isinstance(day[key], list):\n\t\t\t\tworksheet.write(row, structure_columns.index(key), ', '.join(day[key]))\n\t\t\telif isinstance(day[key], dict):\n\t\t\t\tworksheet.write(row, structure_columns.index(key), str(day[key]))\n\t\t\telse:\n\t\t\t\tworksheet.write(row, structure_columns.index(key), day[key])\n\t\trow += 1\n\t\n\tworksheet.freeze_panes(1, 1)\n\tworkbook.close()", "def writeToExcel(self, filename = \"Interfaces.xlsx\", idx = None, prec = 4,\\\n verbose = 1):\n\n if idx is None:\n idx = np.arange(self.atoms.shape[0])\n elif type(idx) is int: \n idx = np.array([idx])\n else:\n idx = np.array(idx)\n \n\n dataDict = {\"Index\": idx, \"Original Rotation\": self.ang[idx],\\\n \"Length a\": np.round(self.getCellLengths(idx = idx, cell = 1)[:, 0], prec),\\\n \"Length b\": np.round(self.getCellLengths(idx = idx, cell = 1)[:, 1], prec),\\\n \"Angle a/b\": np.round(self.getBaseAngles(cell = 1)[idx], prec),\\\n \"Atoms\": self.atoms[idx],\\\n \"Area\": self.getAreas()[idx],\\\n \"Strain 11\": np.round(self.eps_11[idx], prec),\\\n \"Strain 22\": np.round(self.eps_22[idx], prec),\\\n \"Strain 12\": np.round(self.eps_12[idx], prec),\\\n \"Strain MAS\": np.round(self.eps_mas[idx], prec),\\\n \"Base 1 ax\": np.round(self.cell_1[idx, 0, 0], prec),\\\n \"Base 1 ay\": np.round(self.cell_1[idx, 1, 0], prec),\\\n \"Base 1 bx\": np.round(self.cell_1[idx, 0, 1], prec),\\\n \"Base 1 by\": np.round(self.cell_1[idx, 1, 1], prec),\\\n \"Base 2 ax\": np.round(self.cell_2[idx, 0, 0], prec),\\\n \"Base 2 ay\": np.round(self.cell_2[idx, 1, 0], prec),\\\n \"Base 2 bx\": np.round(self.cell_2[idx, 0, 1], prec),\\\n \"Base 2 by\": np.round(self.cell_2[idx, 1, 1], prec),\\\n \"Rep 1 ax\": np.round(self.rep_1[idx, 0, 0], prec),\\\n \"Rep 1 ay\": np.round(self.rep_1[idx, 1, 0], prec),\\\n \"Rep 1 bx\": np.round(self.rep_1[idx, 0, 1], prec),\\\n \"Rep 1 by\": np.round(self.rep_1[idx, 1, 1], prec),\\\n \"Rep 2 ax\": np.round(self.rep_2[idx, 0, 0], prec),\\\n \"Rep 2 ay\": np.round(self.rep_2[idx, 1, 0], prec),\\\n \"Rep 2 bx\": np.round(self.rep_2[idx, 0, 1], prec),\\\n \"Rep 2 by\": np.round(self.rep_2[idx, 1, 1], prec)}\n\n for i in range(self.e_int_c.shape[1]):\n key = \"E_int_c_T%i\" % (i)\n dataDict[key] = np.round(self.e_int_c[idx, i], prec)\n\n for i in range(self.w_sep_c.shape[1]):\n key = \"W_sep_c_T%i\" % (i)\n dataDict[key] = np.round(self.w_sep_c[idx, i], prec)\n\n for i in range(self.w_seps_c.shape[1]):\n key = \"W_seps_c_T%i\" % (i)\n dataDict[key] = np.round(self.w_seps_c[idx, i], prec)\n\n for i in range(self.e_int_d.shape[1]):\n key = \"E_int_d_T%i\" % (i)\n dataDict[key] = np.round(self.e_int_d[idx, i], prec)\n\n for i in range(self.w_sep_d.shape[1]):\n key = \"W_sep_d_T%i\" % (i)\n dataDict[key] = np.round(self.w_sep_d[idx, i], prec)\n\n for i in range(self.w_seps_d.shape[1]):\n key = \"W_seps_d_T%i\" % (i)\n dataDict[key] = np.round(self.w_seps_d[idx, i], prec)\n\n\n data = pd.DataFrame(dataDict)\n data.to_excel(filename)\n\n if verbose > 0:\n string = \"Data written to Excel file: %s\" % filename\n ut.infoPrint(string)", "def write_to_xls_file(self,xls_filename,sheet_name):\r\n rb = xlrd.open_workbook(xls_filename,formatting_info=True)\r\n workbook = copy(rb) #a writable copy (I can't read values out of this, only write to it)\r\n\r\n ''' get all sheetnames '''\r\n list_of_sheetnames = []\r\n list_of_sheetnames = rb.sheet_names()\r\n ''' make a set of sheetnames without duplication '''\r\n sheet_names = set(list_of_sheetnames)\r\n ''' verify if a given ticker existed or not '''\r\n if (sheet_name in sheetnames) == True:\r\n flag = True\r\n else:\r\n flag = False\r\n\r\n if flag == True:\r\n print \"The data sheet named \" + ticker_name + \" existed.\"\r\n else:\r\n print \"No data sheet named \" + ticker_name + \", created new\"\r\n w_sheet = workbook.add_sheet(ticker_name)\r\n w_sheet.write(0,0,'Eod_C_Action')\r\n w_sheet.write(0,1,'Eod_I_Version')\r\n w_sheet.write(0,2,'UsrId')\r\n w_sheet.write(0,3,'Eod_D_Creation')\r\n w_sheet.write(0,4,'Eod_D_Quote')\r\n w_sheet.write(0,5,'InsId')\r\n w_sheet.write(0,6,'Eod_I_ProviderId')\r\n w_sheet.write(0,7,'Eod_N_Open')\r\n w_sheet.write(0,8,'Eod_N_High')\r\n w_sheet.write(0,9,'Eod_N_Low')\r\n w_sheet.write(0,10,'Eod_N_Close')\r\n w_sheet.write(0,11,'Eod_I_Volume')\r\n \r\n for row_index in range(1,len(self.close)+1):\r\n w_sheet.write(row_index,0,'A')\r\n w_sheet.write(row_index,1,0)\r\n w_sheet.write(row_index,2,8)\r\n w_sheet.write(row_index,3,datetime.datetime.now().strftime('%Y-%m-%d'))\r\n w_sheet.write(row_index,4,self.date[row_index-1].strftime('%Y-%m-%d'))\r\n w_sheet.write(row_index,5,1)\r\n w_sheet.write(row_index,6,1)\r\n w_sheet.write(row_index,7,self.open_[row_index-1])\r\n w_sheet.write(row_index,8,self.high[row_index-1])\r\n w_sheet.write(row_index,9,self.low[row_index-1])\r\n w_sheet.write(row_index,10,self.close[row_index-1])\r\n w_sheet.write(row_index,11,self.volume[row_index-1])\r\n\r\n workbook.save(xls_filename)", "def export_data(self):\r\n \r\n \r\n output_file = 'export.csv'\r\n data = self.get_raw_data()\r\n \r\n if data != []:\r\n print('Writing to file', output_file)\r\n with open(output_file, 'w',) as csvfile:\r\n fluorescence_levels = csv.writer(csvfile)\r\n fluorescence_levels.writerow(['sensor_1','Time'])\r\n for i in data:\r\n fluorescence_levels.writerow(i)\r\n print('done')\r\n \r\n else:\r\n print('no recorded data')", "def excel_print(data1, data2, data3, data4, data5, data6):\r\n\r\n list_data = [data1, data2, data3, data4, data5, data6]\r\n name_list = ['Old elec', 'New elec', 'Old elec dup', 'New elec dup',\r\n 'Diff After Strip', 'New Elec Before Strip']\r\n zipped = zip(list_data, name_list)\r\n excel_writer = pd.ExcelWriter('elec_delta2.xlsx', engine='xlsxwriter')\r\n for data, name in zipped:\r\n data.to_excel(excel_writer, sheet_name=name,\r\n index=False, freeze_panes=(1, 0))\r\n num_cols = len(list(data))\r\n worksheet = excel_writer.sheets[name]\r\n worksheet.autofilter(0, 0, 0, num_cols-1)\r\n worksheet.set_column(0, 0, 23.56)\r\n worksheet.set_column(1, 1, 34.89)\r\n excel_writer.save()", "def test_excel_simple_input(self, extension='xls'):\n excel_support = getattr(settings, 'EXCEL_SUPPORT', django_tables2_reports.utils.get_excel_support())\n response = self.table.treatement_to_response(\n self.table.as_csv(HttpRequest()),\n report_format='xls')\n self.assertEqual(response.status_code, 200)\n open('test-file-%s.%s' % (excel_support, extension),\n 'wb').write(response.content)", "def _csv_export(self, exppath):\n with open(exppath, 'w') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',', skipinitialspace=True)\n csvwriter.writerow(['hexstr','dmc','name'])\n for clr in self.lookup_table:\n csvwriter.writerow([clr.hex.to_str(), clr.id, clr.name])", "def export_table (self,_w):\n try:\n _data = \"\"\n maxRow = _w.rowCount()\n maxColumn = _w.columnCount()\n for hc in range(0,maxColumn):\n try: _hci = str(_w.horizontalHeaderItem(hc).text())\n except:_hci=\"None\";pass\n if hc == (maxColumn-1) :_data += _hci\n elif hc < maxColumn:_data += \"%s,\" % _hci\n _data += \"\\n\"\n for r in range(0, maxRow):\n for c in range(0, maxColumn):\n _d = str(_w.item(r, c).text())\n if c == (maxColumn-1):_data += _d\n elif c < maxColumn:_data += \"%s,\" % _d\n _data += \"\\n\"\n options = QFileDialog.Options()\n saved_file, _ = QFileDialog.getSaveFileName(self, \"Save Table to file \", \"data\", \"Plain Text (*.txt);;CSV (*.csv);;All Files (*)\", options=options)\n _file = open(saved_file, 'w')\n _file.write(_data)\n _file.close()\n except FileNotFoundError:pass", "def export_data(self):\n with open('../data/yahoodata.csv', 'a', encoding='utf-8') as f:\n self.data.to_csv('../data/yahoodata.csv', sep='\\t', encoding='utf-8')\n # Header information\n template = \"# TSLA Stocks over time \\n\" + \\\n \"# --------------------------------------------------------------------- \\n\" + \\\n \"# Export of stock data of \\\"Tesla Inc.\\\" for current year. The dataset\\n\" + \\\n \"# consists of selected key stock exchange figures on a daily basis. \\n\" + \\\n \"# The data can be recreated at any time with the \\\"load_data.py\\\"-script.\\n\" + \\\n \"# The data record contains one record sorted per trading day. \\n\" + \\\n \"#\\n\" + \\\n \"# The data is restricted to the NASDAQ symbol \\\"TSLA\\\" which represents \\n\" + \\\n \"# the company Tesla Inc. The stock information was limited to the period \\n\" + \\\n \"# from 1st January to the current day of the year. \\n\" + \\\n \"#\\n\" + \\\n \"# Extracted via Yahoo-Finance API, https://pypi.org/project/yahoo-finance/ \\n\" + \\\n \"# December, 26, 2018, Marco Romanutti \\n\" + \\\n \"#\\n\" + \\\n \"#\\n\" + \\\n \"{}\"\"\"\n\n with open('../data/yahoodata.csv', 'w', encoding='utf-8') as fp:\n fp.write(template.format(self.data.to_csv(index=True, encoding='utf-8')))", "def generate_xlsx_report(self, workbook, data, parts_data):\n worksheet = workbook.add_worksheet(\"daily_parts_issuance_wizard\")\n worksheet.set_column(0, 0, 10)\n worksheet.set_column(1, 1, 15)\n worksheet.set_column(2, 2, 20)\n worksheet.set_column(3, 3, 15)\n worksheet.set_column(4, 4, 10)\n worksheet.set_column(5, 5, 12)\n worksheet.set_column(6, 6, 10)\n worksheet.set_column(7, 7, 10)\n worksheet.set_column(8, 8, 15)\n worksheet.set_column(9, 9, 10)\n worksheet.set_column(10, 10, 15)\n worksheet.set_column(11, 11, 10)\n worksheet.set_column(12, 12, 20)\n worksheet.set_column(13, 13, 5)\n worksheet.set_column(14, 14, 5)\n worksheet.set_column(15, 15, 5)\n\n bold = workbook.add_format(\n {\"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n tot = workbook.add_format(\n {\"border\": 2, \"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n border = workbook.add_format(\n {\"border\": 2, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n merge_format = workbook.add_format({\"border\": 2, \"align\": \"center\"})\n format1 = workbook.add_format(\n {\"border\": 2, \"bold\": True, \"font_name\": \"Arial\", \"font_size\": \"10\"}\n )\n format1.set_bg_color(\"gray\")\n date = workbook.add_format({\"num_format\": \"dd/mm/yy\"})\n\n worksheet.merge_range(\"C3:F3\", \"Merged Cells\", merge_format)\n\n row = 0\n row += 1\n row += 1\n worksheet.write(row, 2, \"DAILY PARTS ISSUANCE\", tot)\n row += 1\n worksheet.write(row, 2, \"Date From:\", tot)\n worksheet.write(row, 3, data[\"form\"][\"date_from\"] or \"\", border)\n worksheet.write(row, 4, \"To:\", tot)\n worksheet.write(row, 5, data[\"form\"][\"date_to\"] or \"\", border)\n row += 2\n worksheet.write(row, 0, \"CMF\", bold)\n row = 3\n\n for objec in self.get_work_order_detail(data[\"form\"]):\n row += 3\n worksheet.write(row, 0, \"DATE ISSUED :\", bold)\n worksheet.write(row, 1, objec.get(\"date\") or \"\", date)\n row += 2\n worksheet.write(row, 0, \"NO.\", format1)\n worksheet.write(row, 1, \"WO NO.\", format1)\n worksheet.write(row, 2, \"VEHICLE ID\", format1)\n worksheet.write(row, 3, \"PART NO.\", format1)\n worksheet.write(row, 4, \"PART NAME\", format1)\n worksheet.write(row, 5, \"VEHICLE MAKE\", format1)\n worksheet.write(row, 6, \"USED\", format1)\n worksheet.write(row, 7, \"UNIT TYPE\", format1)\n worksheet.write(row, 8, \"OLD PART RETURND\", format1)\n worksheet.write(row, 9, \"ISSUED BY\", format1)\n worksheet.write(row, 10, \"REMARKS\", format1)\n line_row = row + 1\n line_col = 0\n counter = 1\n for obj in objec.get(\"value\"):\n worksheet.write(line_row, line_col, counter, border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"wo_name\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"vehicle_id\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"part_no\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"part_name\") or \"\", border)\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.get(\"vehicle_make\") or \"\", border\n )\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"qty\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"uom\") or \"\", border)\n line_col += 1\n worksheet.write(\n line_row, line_col, obj.get(\"old_part_return\") or \"\", border\n )\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"issued_by\") or \"\", border)\n line_col += 1\n worksheet.write(line_row, line_col, obj.get(\"remarks\") or \"\", border)\n line_col = 0\n line_row += 1\n counter += 1\n worksheet.write(line_row, line_col, \"********\", border)", "def dumptoexcel(source_html, output_excel):\r\n\r\n arguments = {'srcName' : source_html, 'desName' :output_excel }\r\n\r\n #Reading from HTML file.\r\n soup = BeautifulSoup(open(arguments['srcName']))\r\n table = soup.find('table')\r\n table_rows = table.find_all('tr')\r\n\r\n \r\n #Opening Excel File.\r\n desWorkBook = openpyxl.Workbook()\r\n desSheet = desWorkBook.active\r\n\r\n\r\n #Getting data ready to write.\r\n all_rows = []\r\n\r\n table_head = table.find_all('th')\r\n row = [i.text for i in table_head]\r\n all_rows.append(row)\r\n \r\n for tr in table_rows:\r\n td = tr.find_all('td')\r\n row = [i.text for i in td]\r\n if(len(row) != 0):\r\n all_rows.append(row)\r\n\r\n rowLen = len(all_rows[0])\r\n maxColWidths = [0]*rowLen\r\n \r\n for row in all_rows:\r\n for i in range(0,rowLen):\r\n temp = len(row[i])\r\n if(maxColWidths[i]<temp):\r\n maxColWidths[i] = temp\r\n\r\n \r\n #Writing to Excel File.\r\n rowNo = 1\r\n for row in all_rows:\r\n colNo = 1\r\n row_len = len(row)\r\n for i in xrange(1,row_len):\r\n\r\n desSheet.cell(row=rowNo, column=colNo).value = row[i]\r\n desSheet.column_dimensions[get_column_letter(colNo)].width = maxColWidths[i] \r\n colNo = colNo+1\r\n \r\n rowNo = rowNo+1\r\n\r\n #Saving Excel File.\r\n \r\n desWorkBook.save(arguments['desName'])", "def output_1cell(self, filename):\n\n date_concat = \"{0} to {1}\".format(self.startDate, self.endDate)\n if active_restaurant_loop:\n column_name = [\"range\", \"start_date\",\n \"end_date\", \"location_id\", \"content\"]\n data = [date_concat, self.startDate, self.endDate, str(\n self.payload[\"locationGroupID\"]), \"{0}\".format(self.content)]\n data_out = [column_name, data]\n else:\n column_name = [\"range\", \"start_date\", \"end_date\", \"content\"]\n data = [date_concat, self.startDate,\n self.endDate, \"{0}\".format(self.content)]\n data_out = [column_name, data]\n\n # If active restaurant loop is true\n if not os.path.isfile(filename):\n with open(filename, \"w\") as f:\n writer = csv.writer(f)\n #writer.writerow([\"range\", \"start_date\", \"end_date\", \"content\"])\n #writer.writerow([date_concat, start_date, end_date, \"{0}\".format(self.content)])\n writer.writerows(data_out)\n # f.write([\"content\"])\n # f.write([\"{0}\"].format(self.content))\n f.close()\n else:\n with open(filename, \"a\") as f:\n writer = csv.writer(f)\n writer.writerows([data])\n f.close()\n\n logging.info(\"Outputting... \")\n self.produce_manifest(filename)", "def test_export_spreadsheet(self):\r\n client = self.getClient()\r\n if client:\r\n exp = [['#SampleID', 'DOB'],\r\n ['#Example mapping file for the QIIME analysis package. '\r\n 'These 9 samples are from a study of the effects of exercise '\r\n 'and diet on mouse cardiac physiology (Crawford, et al, '\r\n 'PNAS, 2009).'], ['PC.354', '20061218'],\r\n ['PC.355', '20061218'], ['PC.356', '20061126'],\r\n ['PC.481', '20070314'], ['PC.593', '20071210'],\r\n ['PC.607', '20071112'], ['PC.634', '20080116'],\r\n ['PC.635', '20080116'], ['PC.636', '20080116']]\r\n obs = _export_spreadsheet(client, self.spreadsheet_key,\r\n self.worksheet_id, ['#SampleID', 'DOB'])\r\n self.assertEqual(obs, exp)\r\n else:\r\n raise GoogleSpreadsheetConnectionError(\"Cannot execute test \"\r\n \"without an active Internet connection.\")", "def getrailfinancial(df,outputlocation):\n #create filename with date_and_timestamp\n formatted_date = datetime.datetime.now().strftime('%Y%m%d_%H-%M')\n destinationfilename = f'rail_financial_data_{formatted_date}.xlsx'\n\n # group and sum the superfile by two cuts\n revsplitbytocticketreg = df.groupby(['Carrier TOC / Third Party Code','Product Code','Regulated_Status'],as_index=False).agg({'Adjusted Earnings Amount':['sum']})\n revsplitbytocsectorclasscatreg = df.groupby(['Carrier TOC / Third Party Code','sector','class','Category','Regulated_Status'], as_index=False).agg({'Adjusted Earnings Amount':['sum']})\n\n # rename columns of the group and summed data\n revsplitbytocticketreg.rename(columns = {'Carrier TOC / Third Party Code':'TOC','Product Code':'Ticket','Regulated_Status':'Reg/Unreg','Adjusted Earnings Amount':'Earnings'},inplace=True)\n revsplitbytocsectorclasscatreg.rename(columns = {'Carrier TOC / Third Party Code':'TOC','sector':'Sector','class':'Class','Category':'Category','Regulated_Status':'Reg/Unreg','Adjusted Earnings Amount':'Earnings'},inplace=True) \n\n #prepare excel writer object, export dataframes to two different ranges and save excel file\n writer = pd.ExcelWriter(outputlocation + destinationfilename, engine='xlsxwriter')\n revsplitbytocticketreg.to_excel(writer,sheet_name='rail_financial_data')\n revsplitbytocsectorclasscatreg.to_excel(writer,sheet_name='rail_financial_data',startcol=10 )\n writer.save()", "def D_Base_to_Exel(self):\n# for item in sorted(self.dbase.keys()): # for every key/cell add to a dataFRAME\n# self.dataFRAME[item]=self.dbase[item]\n \n self.dataFRAME = self.Dbase_to_DF()\n writer = ExcelWriter(self.path+'/ALLwells'+ self.filetype) # assign a path for the file\n self.dataFRAME.to_excel(writer, 'Sheet1') # create a file in the same path the original files came from\n writer.save()", "def saveAll(self):\r\n path = saveFile(ftype='xlsx')\r\n writer = pd.ExcelWriter(path)\r\n df = pd.DataFrame(self.saveAll)\r\n df.to_excel(writer, header=False, index=False)\r\n writer.save()\r\n \r\n #Format the excel file\r\n try:\r\n import openpyxl\r\n from openpyxl.styles import Alignment, Font, Border, Side\r\n #Load the workbook and worksheet\r\n wb = openpyxl.load_workbook(filename=path)\r\n ws = wb.get_sheet_by_name(\"Sheet1\")\r\n cells = ['E1','H1','K1','N1','Q1','T1','W1','Z1']\r\n ws.merge_cells('E1:G1')\r\n ws.merge_cells('H1:J1')\r\n ws.merge_cells('K1:M1')\r\n ws.merge_cells('N1:P1')\r\n ws.merge_cells('Q1:S1')\r\n ws.merge_cells('T1:V1')\r\n ws.merge_cells('W1:Y1')\r\n ws.merge_cells('Z1:AB1')\r\n #Bold and center the headers\r\n ft = Font(bold=True)\r\n for cell in cells:\r\n ws[cell].alignment = Alignment(horizontal=\"center\")\r\n ws[cell].font = ft\r\n #Add borders\r\n rows,_ = self.saveAll.shape\r\n for i in range(rows):\r\n for cell in cells:\r\n c = cell[0]+str(i+1)\r\n ws[c].border = Border(left=Side(style='thin'))\r\n\r\n \r\n \r\n wb.save(path)\r\n \r\n except ImportError:\r\n pass", "def dataframe_to_excel(df, sheet_title, project_constants_lst, \n current_date=str(date.today()), force_flag = False, freeze_column='A'):\n \n project_steps_df, max_title, _, report_requisites_sr, *_ = project_constants_lst\n report_type, export_flag, df_decription = project_steps_df.loc[sheet_title, ['report_type', 'export_to_excel', 'description']].values\n \n # check DataFrame report type to save\n if report_type == 'report':\n report_mark = report_requisites_sr['project_title'] + '_tables'\n else:\n report_mark = report_type\n \n # construct excel filename\n file_name = report_requisites_sr['customer_name'] + '_' + report_mark + '_' + current_date + '.xlsx'\n\n # information string\n info = f'Exporting {sheet_title} table to {report_mark} file'\n print(info, end =\" \")\n file_path = os.path.join(report_requisites_sr['today_report_folder'], file_name)\n \n # save DataFrame to excel file if export_to_excel trigger is ON\n # and DataFrame is not empty\n if (force_flag or export_flag) and not df.empty:\n fsop.create_folder(report_requisites_sr['today_report_folder'], max_title, display_status=False)\n file_mode = 'a' if os.path.isfile(file_path) else 'w'\n df = df.apply(pd.to_numeric, errors='ignore')\n try:\n if_sheet_exists_param = 'replace' if file_mode == 'a' else None\n content_df, item_exist = generate_table_of_contents(file_path, file_mode, sheet_title, df_decription)\n df_flat = drop_multindex(df)\n # write table of contents and data dataframe to the excel file\n with pd.ExcelWriter(file_path, mode=file_mode, if_sheet_exists=if_sheet_exists_param, engine='openpyxl') as writer:\n if file_mode == 'w' or not item_exist:\n content_df.to_excel(writer, sheet_name='Содержание', index=False)\n df_flat.to_excel(writer, sheet_name=sheet_title, startrow=2, index=False)\n # format table of contents and data worksheets\n workbook = openpyxl.load_workbook(file_path)\n format_workbook(workbook, sheet_title, df_decription, freeze_column)\n workbook.save(file_path)\n except PermissionError:\n status_info('fail', max_title, len(info))\n print('\\nPermission denied. Close the file.\\n')\n sys.exit()\n else:\n status_info('ok', max_title, len(info))\n return file_path \n else:\n # if save key is on but DataFrame empty\n if project_steps_df.loc[sheet_title, 'export_to_excel'] and df.empty:\n status_info('no data', max_title, len(info))\n else: \n status_info('skip', max_title, len(info))\n return None", "def download_report():\n entities = get_names()\n save_csv(entities)", "def click_vendor_price_list_detail_rates_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.vendor_price_list_detail_rates_grid_div_id)", "def export_helped_table(db):\r\n # Get current date.\r\n date = datetime.datetime.today().strftime('%Y-%m-%d')\r\n # Create directory and file.\r\n if not os.path.exists(backup_dir):\r\n os.makedirs(backup_dir)\r\n backup_file = backup_dir + \"backup_\" + date + \".xlsx\"\r\n # Create workbook and add worksheet.\r\n workbook = xlsxwriter.Workbook(backup_file)\r\n worksheet = workbook.add_worksheet()\r\n # Add bold format to highlight cells.\r\n bold = workbook.add_format({'bold': True})\r\n # Create data headers.\r\n worksheet.write('A1', 'Customer Number', bold)\r\n worksheet.write('B1', 'Name', bold)\r\n worksheet.write('C1', 'Username', bold)\r\n worksheet.write('D1', 'RU_ID', bold)\r\n worksheet.write('E1', 'OS_Platform', bold)\r\n worksheet.write('F1', 'Description', bold)\r\n # Get number of rows in table.\r\n c = db.cursor()\r\n c.execute(\"SELECT * FROM helped\")\r\n customers = c.fetchall()\r\n # Loop through the data and write it row by row.\r\n for row in range(0, len(customers)):\r\n for col in range(0, 6):\r\n worksheet.write((row + 1), col, customers[row][col])\r\n workbook.close()", "def write_xlsx(data):\n workbook = xlsxwriter.Workbook('MyWorkbook.xlsx')\n main_sheet = workbook.add_worksheet('MySheet')\n\n date_format = workbook.add_format(\n {'num_format': 'mm/dd/yy hh:mm:ss AM/PM'})\n length = str(len(data) + 1)\n \n main_sheet.add_table(('A1:D' + length), \n {'data': data,\n 'columns': [{'header': 'Department'}, {'header': 'Students'},\n {'header': 'Cumulative GPA'},\n {'header': 'Final Date',\n 'format': date_format}]})\n\n department_grades = workbook.add_chart({'type':'column'})\n department_grades.set_title(\n {'name':'Department and Grade distribution'})\n department_grades.add_series(\n {'categories':'=MySheet!$A$2:$A$5',\n 'values':'=MySheet!$C$2:$C$5'})\n main_sheet.insert_chart('A8', department_grades)\n workbook.close()", "def click_vendor_price_list_detail_dial_digits_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.vendor_price_list_detail_dial_digits_grid_div_id)", "def Export_in_files(COVID_data, COVID_data_reconstructed):\r\n F_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted.csv' % (date.today().isoformat()), 'w')\r\n FR_data_file = open(Datafiles_directory + '\\\\OWID COVID data %s formatted reconstructed.csv' % (date.today().isoformat()), 'w')\r\n \r\n COVID_data_lists = [COVID_data, COVID_data_reconstructed]\r\n Data_file_list = [F_data_file, FR_data_file]\r\n Countries_list = list(COVID_data.keys())[1:]\r\n \r\n for Data_set_inc in range(2): # Each data list (raw and reconstructed) is written in its corresponding file\r\n COVID_data_temp = COVID_data_lists[Data_set_inc]\r\n Data_file_temp = Data_file_list[Data_set_inc]\r\n \r\n Data_file_temp.write('Country;Date;' + ';'.join(COVID_data_temp['_Country']['Date']) + '\\n')\r\n \r\n for Country in Countries_list:\r\n COVID_data_single_country = COVID_data_temp[Country]\r\n \r\n Date_list = list(COVID_data[Country].keys())\r\n for Date in Date_list:\r\n COVID_data_single_country_single_date = COVID_data_single_country[Date]\r\n Row_reformatted = ['' if Item == None else str(Item).replace('.', ',') for Item in COVID_data_single_country_single_date] # None elements are replaced by empty strings because an empty cell is better to see that there is no data in excel rather than None\r\n \r\n Data_file_temp.write('%s;%s;' % (Country, Date))\r\n Data_file_temp.write(';'.join(str(Item) for Item in Row_reformatted))\r\n Data_file_temp.write('\\n')\r\n \r\n Data_file_temp.close()", "def write_data(qids, conditions, outputs, data_path):\n data_set = pd.DataFrame(list(zip(qids, conditions, outputs)),\n columns=[\"QID\", \"CONDITION\", \"OUTPUT\"])\n data_set.to_excel(data_path)", "def save_dataframe_to_excel(df,path,filename):\n \n path_and_file_name = path.joinpath('output',filename)\n df.to_csv(path_or_buf = path_and_file_name, sep=';',index=False)", "def create_xlsx(request):\n\n date_dict = income_date_parser(request)\n\n income_history = get_incomes_funds_ids(user_id=date_dict['user_id'],\n date_start=date_dict['start_date'],\n date_end=date_dict['finish_date'],\n time_diff=date_dict['utc_difference'])\n del income_history[-1]\n\n output, worksheet, workbook, formats_dict = creating_empty_xlsx_file()\n\n if income_history:\n head_row, head_col = 1, 1\n row, col = 2, 1\n for i in income_history[0]:\n if i != 'income_history_id':\n worksheet.write(head_row, head_col, i, formats_dict['head_format'])\n head_col += 1\n\n for history_dict in income_history:\n worksheet.write(row, col, history_dict['income'], formats_dict['cell_format'])\n worksheet.write(row, col + 1, history_dict['fund'], formats_dict['cell_format'])\n date = datetime.datetime.strptime(history_dict['date'], \"%Y-%m-%d\")\n worksheet.write_datetime(row, col + 2, date, formats_dict['date_format'])\n worksheet.write_number(row, col + 3, history_dict['amount'],\n formats_dict['value_format'])\n worksheet.write(row, col + 4, history_dict['comment'], formats_dict['cell_format'])\n col, row = 1, row + 1\n\n workbook.close()\n\n response = file_streaming_response \\\n ('application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\n 'income_history.xlsx', output)\n return response", "def convert_to_an_excel_sheet(app, trsfrm_no, sfilname, srcfil_delim_char, dest_fname, temp_fname):\n\n global progress\n\n # #delete files found in download directory\n # for dirpath, dirname, files in os.walk(app.config[\"DOWNLOAD_FOLDER\"]):\n # print(dirpath, dirname, files)\n # for filename in files:\n # try:\n # os.remove(os.path.join(dirpath, filename))\n # except Exception as e:\n # print(str(e))\n\n dest_file = os.path.join(app.config[\"DOWNLOAD_FOLDER\"], dest_fname)\n dest_wb = Workbook(dest_file, {'strings_to_numbers': True, 'constant_memory': True})\n sheet_name = f\"file1\"\n dest_ws = dest_wb.add_worksheet(name=sheet_name)\n\n src_file = os.path.join(app.config[\"UPLOAD_FOLDER\"], sfilname)\n\n with open(src_file, mode=\"r\") as filhdlr:\n for idx, _ in enumerate(filhdlr):\n pass\n\n total_rows = idx + 1\n\n percent_1 = False\n percent_5 = False\n percent_10 = False\n percent_20 = False\n percent_30 = False\n percent_40 = False\n percent_50 = False\n percent_60 = False\n percent_70 = False\n percent_80 = False\n percent_90 = False\n percent_100 = False\n\n with open(src_file, mode=\"r\") as filhdlr:\n csvReader = csv.reader(filhdlr, delimiter=srcfil_delim_char)\n for idx1, row in enumerate(csvReader):\n\n percent_1, percent_5, percent_10, percent_20, percent_30, percent_40, percent_50, \\\n percent_60, percent_70, percent_80, percent_90, percent_100 = determine_progress_value(idx1, total_rows,\n percent_1, percent_5,\n percent_10,\n percent_20,\n percent_30,\n percent_40,\n percent_50, \\\n percent_60,\n percent_70,\n percent_80,\n percent_90,\n percent_100)\n\n for idx2, value in enumerate(row):\n dest_ws.write(idx1, idx2, value)\n\n dest_wb.close()\n\n for idx2, inst in enumerate(progress):\n if trsfrm_no in inst.keys():\n progress[idx2][trsfrm_no][\"currVal\"] = 100\n\n # delete the uploaded file\n try:\n os.remove(os.path.join(app.config[\"UPLOAD_FOLDER\"], sfilname))\n except Exception as e:\n print(str(e))\n\n return", "def toExcel(self, outFileName):\n workbook = Workbook(outFileName, {'constant_memory': True})\n workbook.use_zip64() # allow large size Excels just in case\n\n wks = workbook.add_worksheet('Distribution Fitting')\n hdrFmt = workbook.add_format({'bold' : True,\n 'underline' : True,\n 'align' : 'center'})\n resultFormats = [workbook.add_format({'num_format' : fmtStr}) \\\n for fmtStr in ['0.000000', '0.0000%']]\n\n row = 0\n wks.set_column(0, 0, 11)\n wks.set_column(1, 1, 8, resultFormats[0])\n wks.set_column(2, 2, 10.6, resultFormats[1])\n for col, headerName in enumerate(self.getHeaderList()):\n wks.write_string(row, col, headerName, hdrFmt)\n\n for distrName, (results, params) in self.result.iteritems():\n row += 1\n col = 0\n wks.write_string(row, col, distrName)\n for col, (result, outFormat) in \\\n enumerate(itertools.izip(results, resultFormats), col+1):\n wks.write_number(row, col, result, outFormat)\n for col, paramValue in enumerate(params, col+1):\n wks.write_number(row, col, paramValue)\n\n workbook.close()", "def writeToFile(self):\n self.dto.writeToCsv()\n print(\"File written.\")", "def export_data(self):\n return self.export_all_data()", "def driver():\n\n directory = r\"C:/Users/Aftab Alam/Documents/GitHub\"\n directory = directory + r\"/SRM-placement-analyser/data/\"\n fileList = [directory+\"InfosysResult.xlsx\",directory+\"TCSResult.xlsx\",directory+\"CognizantResult.xlsx\",directory+\"WiproResult.xlsx\"]\n \n listOfPlaced = extractCommonData.extractCommonData(fileList)\n createNewExcelSheet(directory,listOfPlaced)", "def export_rep(name):\r\n attendance_list = read_rep()\r\n try:\r\n with open(name + '.csv', 'w', newline='') as file:\r\n writer = csv.writer(file)\r\n # makes table in Excel by employee and attendance dates\r\n writer.writerow([\"Employee\", \"Attendance\"])\r\n for worker in attendance_list:\r\n count = 0\r\n for date in worker[1]:\r\n if not count:\r\n # first date needs to add name of worker\r\n writer.writerow([worker[0], date])\r\n count += 1\r\n # write only date\r\n else:\r\n writer.writerow(['', date])\r\n print(\"csv file made\")\r\n return attendance_list\r\n except PermissionError:\r\n print(\"file is opened, please close and try again\")\r\n return attendance_list", "def write2file(self, save_to):\n headerstyle = xlwt.easyxf(self.header_style.get_style_string())\n missing_val_style = xlwt.easyxf(\n self.missing_value_style.get_style_string())\n row_styles = [xlwt.easyxf(self.first_style.get_style_string()),\n xlwt.easyxf(self.second_style.get_style_string())]\n\n properties, sections, table = self._build_table()\n\n workbook = xlwt.Workbook()\n sheet = workbook.add_sheet(self.sheet_name)\n\n if os.path.splitext(save_to)[-1] == '':\n save_to += '.xls'\n\n max_col_len = []\n\n if (self.switch):\n\n for i, prop in enumerate([''] + properties):\n sheet.write(0, i, prop, headerstyle)\n max_col_len.append(len(str(prop)))\n\n for row_num, sec in enumerate(sections):\n sheet.write(row_num + 1, 0, sec, headerstyle)\n if len(str(sec)) > max_col_len[0]:\n max_col_len[0] = len(str(sec))\n\n for row_num, row in enumerate(table):\n for col_num, elem in enumerate(row):\n\n if elem is None:\n style = missing_val_style\n cell_content = \"\"\n else:\n style = row_styles[row_num % 2]\n cell_content = elem\n\n if isinstance(cell_content, datetime.datetime):\n style.num_format_str = \"DD-MM-YYYY HH:MM:SS\"\n elif isinstance(cell_content, datetime.date):\n style.num_format_str = \"DD-MM-YYYY\"\n elif isinstance(cell_content, datetime.time):\n style.num_format_str = \"HH:MM:SS\"\n else:\n style.num_format_str = \"\"\n\n sheet.write(row_num + 1, col_num + 1, cell_content, style)\n if len(str(cell_content)) > max_col_len[col_num+1]:\n max_col_len[col_num+1] = len(str(cell_content))\n\n else:\n\n for i, sec in enumerate([''] + sections):\n sheet.write(0, i, sec, headerstyle)\n max_col_len.append(len(str(sec)))\n\n for row_num, prop in enumerate(properties):\n sheet.write(row_num + 1, 0, prop, headerstyle)\n if len(str(prop)) > max_col_len[0]:\n max_col_len[0] = len(str(prop))\n\n for col_num, col in enumerate(table):\n for row_num, elem in enumerate(col):\n\n if elem is None:\n style = missing_val_style\n cell_content = \"\"\n else:\n style = row_styles[row_num % 2]\n cell_content = elem\n\n if isinstance(cell_content, datetime.datetime):\n style.num_format_str = \"DD-MM-YYYY HH:MM:SS\"\n elif isinstance(cell_content, datetime.date):\n style.num_format_str = \"DD-MM-YYYY\"\n elif isinstance(cell_content, datetime.time):\n style.num_format_str = \"HH:MM:SS\"\n else:\n style.num_format_str = \"\"\n\n sheet.write(row_num + 1, col_num + 1, cell_content, style)\n if len(str(cell_content)) > max_col_len[col_num+1]:\n max_col_len[col_num+1] = len(str(cell_content))\n\n # adjust width of he columns\n for col_id, col_len in enumerate(max_col_len):\n sheet.col(col_id).width = (256 * (col_len+1))\n\n workbook.save(save_to)", "def career_teachers_excel(self, request):\n\n # Get the career to be processed their results.\n career_id = request.GET.get('career_id', '')\n career = EvaluationsCareer.objects.get(pk__exact=career_id)\n\n # Get the results for each esignature of the carrer en each exam.\n data = self.get_career_results(career)\n\n # Generates the CSV with the results of the career,then return as downloadable file.\n response = self.get_teacher_results_excel(data)\n return response", "def test_export(self):\n\n # create fake data\n worksheet_name = \"chicken_woot\"\n mock_stores = {\n \"field_list\": [\n \"Company Name\", \"State\", \"City\", \"Trade Area\", \"Population (000)\", \"Per Capita Income ($)\", \"Aggregate Income ($M)\", \"Households (000)\",\n \"< $15K (000)\", \"$15-25K (000)\", \"$25-35K (000)\", \"$35-50K (000)\", \"$50-75K (000)\", \"$75-100K (000)\", \"$100-150K (000)\", \"$150-200K (000)\", \"$200K+ (000)\",\n \"Store ID\", \"Street Number\", \"Street\", \"Suite\", \"Zip Code\", \"Phone Number\", \"Store Opened\", \"Store Closed\", \"Company ID\", \"Trade Area ID\"\n ],\n \"results\": [\n [\"test company 1\", \"state\", \"city\", \"10 Mile Circle\", 142695, 25644, 999999999, 5000, 6000, 7000, 8000, 9000, 10000, 11000, 12000, 13000, 14000, 2, \"street_number\", \"street\", \"suite\", \"zip\", \"phone\", \"2012-01-01\", None, 1, 1],\n [\"test company 1\", \"state\", \"city\", \"10 Mile Circle\", 142695, 25644, 999999999, 5000, 6000, 7000, 8000, 9000, 10000, 11000, 12000, 13000, 14000, 3, \"street_number\", \"street\", \"suite\", \"zip\", \"phone\", \"2012-01-15\", \"2013-01-01\", 1, 2]\n ]\n }\n\n # create various mock objects\n mock_workbook = self.mox.CreateMockAnything()\n mock_sheet = self.mox.CreateMockAnything()\n mock_row = self.mox.CreateMockAnything()\n\n # create exporter\n exporter = ExcelStoreExporter(mock_stores, worksheet_name, self.mock_logger)\n\n # stub various methods/classes\n self.mox.StubOutWithMock(xlwt, \"Workbook\")\n self.mox.StubOutWithMock(exporter, \"_track_max_character\")\n self.mox.StubOutWithMock(exporter, \"_set_auto_widths\")\n\n # ------------- Begin Recording (long) -------------\n\n # create worksheet and workbook\n xlwt.Workbook().AndReturn(mock_workbook)\n mock_workbook.add_sheet(worksheet_name).AndReturn(mock_sheet)\n\n # add all headers (skip those that should be skipped)\n mock_sheet.write(0, 0, \"Company Name\", IsA(xlwt.XFStyle))\n exporter._track_max_character(0, \"Company Name\")\n mock_sheet.write(0, 1, \"State\", IsA(xlwt.XFStyle))\n exporter._track_max_character(1, \"State\")\n mock_sheet.write(0, 2, \"City\", IsA(xlwt.XFStyle))\n exporter._track_max_character(2, \"City\")\n mock_sheet.write(0, 3, \"Trade Area\", IsA(xlwt.XFStyle))\n exporter._track_max_character(3, \"Trade Area\")\n mock_sheet.write(0, 4, \"Population (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(4, \"Population (000)\")\n mock_sheet.write(0, 5, \"Per Capita Income ($)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(5, \"Per Capita Income ($)\")\n mock_sheet.write(0, 6, \"Aggregate Income ($M)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(6, \"Aggregate Income ($M)\")\n mock_sheet.write(0, 7, \"Households (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(7, \"Households (000)\")\n mock_sheet.write(0, 8, \"< $15K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(8, \"< $15K (000)\")\n mock_sheet.write(0, 9, \"$15-25K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(9, \"$15-25K (000)\")\n mock_sheet.write(0, 10, \"$25-35K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(10, \"$25-35K (000)\")\n mock_sheet.write(0, 11, \"$35-50K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(11, \"$35-50K (000)\")\n mock_sheet.write(0, 12, \"$50-75K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(12, \"$50-75K (000)\")\n mock_sheet.write(0, 13, \"$75-100K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(13, \"$75-100K (000)\")\n mock_sheet.write(0, 14, \"$100-150K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(14, \"$100-150K (000)\")\n mock_sheet.write(0, 15, \"$150-200K (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(15, \"$150-200K (000)\")\n mock_sheet.write(0, 16, \"$200K+ (000)\", IsA(xlwt.XFStyle))\n exporter._track_max_character(16, \"$200K+ (000)\")\n mock_sheet.write(0, 17, \"Street Number\", IsA(xlwt.XFStyle))\n exporter._track_max_character(17, \"Street Number\")\n mock_sheet.write(0, 18, \"Street\", IsA(xlwt.XFStyle))\n exporter._track_max_character(18, \"Street\")\n mock_sheet.write(0, 19, \"Suite\", IsA(xlwt.XFStyle))\n exporter._track_max_character(19, \"Suite\")\n mock_sheet.write(0, 20, \"Zip Code\", IsA(xlwt.XFStyle))\n exporter._track_max_character(20, \"Zip Code\")\n mock_sheet.write(0, 21, \"Phone Number\", IsA(xlwt.XFStyle))\n exporter._track_max_character(21, \"Phone Number\")\n mock_sheet.write(0, 22, \"Store Opened\", IsA(xlwt.XFStyle))\n exporter._track_max_character(22, \"Store Opened\")\n mock_sheet.write(0, 23, \"Store Closed\", IsA(xlwt.XFStyle))\n exporter._track_max_character(23, \"Store Closed\")\n\n # write down all the fields from each row (skip those fields that should be skipped)\n mock_sheet.row(1).AndReturn(mock_row)\n mock_row.set_cell_text(0, \"test company 1\")\n exporter._track_max_character(0, \"test company 1\")\n mock_row.set_cell_text(1, \"state\")\n exporter._track_max_character(1, \"state\")\n mock_row.set_cell_text(2, \"city\")\n exporter._track_max_character(2, \"city\")\n mock_row.set_cell_text(3, \"10 Mile Circle\")\n exporter._track_max_character(3, \"10 Mile Circle\")\n mock_row.set_cell_number(4, 142.695, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(4, 142695)\n mock_row.set_cell_number(5, 25644, exporter.dollar_style)\n exporter._track_max_character(5, 25644)\n mock_row.set_cell_number(6, 999.999999, exporter.dollar_style)\n exporter._track_max_character(6, 999999999)\n mock_row.set_cell_number(7, 5.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(7, 5000)\n mock_row.set_cell_number(8, 6.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(8, 6000)\n mock_row.set_cell_number(9, 7.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(9, 7000)\n mock_row.set_cell_number(10, 8.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(10, 8000)\n mock_row.set_cell_number(11, 9.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(11, 9000)\n mock_row.set_cell_number(12, 10.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(12, 10000)\n mock_row.set_cell_number(13, 11.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(13, 11000)\n mock_row.set_cell_number(14, 12.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(14, 12000)\n mock_row.set_cell_number(15, 13.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(15, 13000)\n mock_row.set_cell_number(16, 14.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(16, 14000)\n mock_row.set_cell_text(17, \"street_number\")\n exporter._track_max_character(17, \"street_number\")\n mock_row.set_cell_text(18, \"street\")\n exporter._track_max_character(18, \"street\")\n mock_row.set_cell_text(19, \"suite\")\n exporter._track_max_character(19, \"suite\")\n mock_row.set_cell_text(20, \"zip\")\n exporter._track_max_character(20, \"zip\")\n mock_row.set_cell_text(21, \"phone\")\n exporter._track_max_character(21, \"phone\")\n mock_row.set_cell_text(22, \"2012-01-01\")\n exporter._track_max_character(22, \"2012-01-01\")\n exporter._track_max_character(23, \" \")\n\n # second row\n mock_sheet.row(2).AndReturn(mock_row)\n mock_row.set_cell_text(0, \"test company 1\")\n exporter._track_max_character(0, \"test company 1\")\n mock_row.set_cell_text(1, \"state\")\n exporter._track_max_character(1, \"state\")\n mock_row.set_cell_text(2, \"city\")\n exporter._track_max_character(2, \"city\")\n mock_row.set_cell_text(3, \"10 Mile Circle\")\n exporter._track_max_character(3, \"10 Mile Circle\")\n mock_row.set_cell_number(4, 142.695, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(4, 142695)\n mock_row.set_cell_number(5, 25644, exporter.dollar_style)\n exporter._track_max_character(5, 25644)\n mock_row.set_cell_number(6, 999.999999, exporter.dollar_style)\n exporter._track_max_character(6, 999999999)\n mock_row.set_cell_number(7, 5.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(7, 5000)\n mock_row.set_cell_number(8, 6.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(8, 6000)\n mock_row.set_cell_number(9, 7.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(9, 7000)\n mock_row.set_cell_number(10, 8.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(10, 8000)\n mock_row.set_cell_number(11, 9.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(11, 9000)\n mock_row.set_cell_number(12, 10.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(12, 10000)\n mock_row.set_cell_number(13, 11.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(13, 11000)\n mock_row.set_cell_number(14, 12.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(14, 12000)\n mock_row.set_cell_number(15, 13.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(15, 13000)\n mock_row.set_cell_number(16, 14.000, exporter.thousands_1_decimal_place_style)\n exporter._track_max_character(16, 14000)\n mock_row.set_cell_text(17, \"street_number\")\n exporter._track_max_character(17, \"street_number\")\n mock_row.set_cell_text(18, \"street\")\n exporter._track_max_character(18, \"street\")\n mock_row.set_cell_text(19, \"suite\")\n exporter._track_max_character(19, \"suite\")\n mock_row.set_cell_text(20, \"zip\")\n exporter._track_max_character(20, \"zip\")\n mock_row.set_cell_text(21, \"phone\")\n exporter._track_max_character(21, \"phone\")\n mock_row.set_cell_text(22, \"2012-01-15\")\n exporter._track_max_character(22, \"2012-01-15\")\n mock_row.set_cell_text(23, \"2013-01-01\")\n exporter._track_max_character(23, \"2013-01-01\")\n\n # set auto widths\n exporter._set_auto_widths(mock_sheet)\n\n # ------------- End Recording (long) -------------\n\n\n # replay all\n self.mox.ReplayAll()\n\n # go!\n workbook = exporter.get_excel_workbook()\n\n # make sure workbook is the excel workbook\n self.assertEqual(workbook, mock_workbook)", "def export(self, outdir=os.getcwd(), filename='biogridpy_response'):\r\n\r\n suffix = self.output_format\r\n \r\n #json out includes headers in response\r\n if (self.output_format == 'json' or\r\n self.output_format == 'jsonExtended'):\r\n filepath = os.path.join(outdir, filename + \".\" + suffix)\r\n try:\r\n with open(filepath, 'w') as outfile:\r\n json.dump(self._byteify2(self.result), outfile)\r\n except AttributeError:\r\n with open(filepath, 'w') as outfile:\r\n json.dump(self._byteify3(self.result), outfile)\r\n #tab out need to add headers\r\n elif (self.output_format == 'tab2' or\r\n self.output_format == 'extendedTab2' or\r\n self.output_format == 'tab1'):\r\n filepath = os.path.join(outdir, filename + \".\" + suffix + \".txt\")\r\n with open(filepath, 'w') as outfile:\r\n outfile.write('#' + '\\t'.join(self.headers))\r\n outfile.write(self.result)", "def export_data_and_class_df_to_excel(data_df, class_df, excel_filename=None):\n from pandas import ExcelWriter\n print \"==========start exporting data and class dataframe to excel================\"\n if excel_filename == None:\n session = class_df.ix[0,s_info.session_col]\n sensor = class_df.ix[0, s_info.sensor_col]\n print \"session: %d, sensor: %s\" % (session, sensor)\n excel_filename = s_info.feature_dataset_folder + \"/session\" + str(session) + \"_\" + sensor + \".feature.xlsx\"\n writer = ExcelWriter(excel_filename)\n data_df.to_excel(writer, sheet_name=\"data(features)\")\n class_df.to_excel(writer, sheet_name=\"class(other information)\")\n writer.save()\n print excel_filename + \" exported\"\n return excel_filename", "def create_output_file(self):\r\n self.output_file = openpyxl.Workbook()", "def export_outstanding_fires(request, region_id, queryset):\n #regions = Region.objects.filter(id=region_id) if region_id else Region.objects.all()\n regions = Region.objects.filter(id=region_id) if region_id else Region.objects.filter(dbca=True)\n region_name = regions[0].name if region_id else 'All-Regions'\n\n rpt_date = datetime.now()\n filename = 'outstanding_fires_{}_{}.xls'.format(region_name, rpt_date.strftime('%d%b%Y'))\n response = HttpResponse(content_type='application/vnd.ms-excel')\n response['Content-Disposition'] = 'attachment; filename=' + filename\n\n book = Workbook()\n for region in regions:\n outstanding_fires(book, region, queryset, rpt_date)\n\n book.add_sheet('Sheet 2')\n book.save(response)\n\n return response", "def save_xls(self,basepath=''): \n self.generate_xls()\n self.wb.save(basepath+self.filename+'.xls')", "def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)", "def append_data2xls(filename, table_name, data):\n r_xls = xlrd.open_workbook(filename)\n r_sheet = r_xls.sheet_by_name(table_name)\n end_row_number = r_sheet.nrows\n w_xls = copy(r_xls)\n sheet_write = w_xls.get_sheet(0)\n\n for i, row in enumerate(data):\n for j, value in enumerate(row):\n sheet_write.write(end_row_number + i, j, value)\n w_xls.save(filename)", "def export_any_dataset(request, *fields, queryset, filename, csv_field_delimiter = \";\"):\n\n name, extension = os.path.splitext(filename)\n file_format = extension[1:]\n headers, rows = render_queryset_as_data(*fields, queryset=queryset)\n\n output = None\n if file_format == 'csv':\n content_type = 'text/csv'\n output = io.StringIO()\n writer = csv.writer(output, delimiter=csv_field_delimiter, quoting=csv.QUOTE_MINIMAL)\n\n writer.writerow(headers)\n for row in rows:\n writer.writerow(row)\n\n elif file_format == 'xlsx':\n content_type = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n #content_type = 'application/vnd.ms-excel'\n output = io.BytesIO()\n with open_xlsx_file(output) as writer:\n\n writer.write_headers_from_strings(headers)\n for row in rows:\n writer.writerow(row)\n writer.apply_autofit()\n\n assert writer.is_closed()\n else:\n raise Exception('Wrong export file format \"%s\"' % file_format)\n\n # send \"output\" object to stream with mimetype and filename\n assert output is not None\n output.seek(0)\n # response = HttpResponse(\n # output.read(),\n response = StreamingHttpResponse(\n output,\n content_type=content_type,\n )\n #response['Content-Disposition'] = 'inline; filename=\"%s\"' % filename\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n\n return response", "def CCF_toExcel(self, data_set, ccf_inputs):\n file_name = self.file_path(target_filename=\"LEICode_CCF_ModelID_EndOfObservationPeriod_versionNumber.xlsx\")\n oxl = openpyxl.load_workbook(file_name)\n\n # Information missing from test results:\n start_date\t = datetime.date(2007, 1, 1)\n end_date\t = datetime.date(2015, 1, 1)\n nb_customer = len(data_set.id.unique())\n grade_nb = data_set.Bin_CCF.unique()\n grade_name = []\n grade_counts = []\n avCCFE_perGrade = []\n avCCFR_perGrade = []\n minCCFR_perGrade = []\n maxCCFR_perGrade = []\n q5CCFR_perGrade = []\n q10CCFR_perGrade = []\n q25CCFR_perGrade = []\n q50CCFR_perGrade = []\n q75CCFR_perGrade = []\n q90CCFR_perGrade = []\n q95CCFR_perGrade = []\n for g in range(1, len(grade_nb) + 1):\n grade_name.append( self.grade_mapping(grade_num = g) )\n grade_counts.append( data_set[data_set.Default_Binary == 1][\"Bin_CCF\"].value_counts()[g] )\n avCCFE_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF.mean()[g] )\n avCCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.mean()[g] )\n minCCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.min()[g])\n maxCCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.max()[g])\n q5CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.05)[g])\n q10CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.10)[g])\n q25CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.25)[g])\n q50CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.50)[g])\n q75CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.75)[g])\n q90CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.90)[g])\n q95CCFR_perGrade.append( data_set.groupby(\"Bin_CCF\").CCF_realised.quantile(0.95)[g])\n\n bcktesting_ccf_ptf = [\"N/A\", #Name of facility grade/pool or segment\n len(data_set.id.unique()), # Number of facilities (R)\n data_set.CCF.mean(), # Average estimated CCF (CCF^E)\n data_set.CCF_realised.mean(), # Average realised CCF (CCF^R)\n 0.0, # Floor used (if applicable)\n 0.0, # Number of CCF realisations floored\n data_set.CCF_realised.min(), # Minimum CCF^R\n data_set.CCF_realised.quantile(0.05), # Quantiles\n data_set.CCF_realised.quantile(0.10), #\n data_set.CCF_realised.quantile(0.25), #\n data_set.CCF_realised.quantile(0.50), #\n data_set.CCF_realised.quantile(0.75), #\n data_set.CCF_realised.quantile(0.90), #\n data_set.CCF_realised.quantile(0.95), #\n data_set.CCF_realised.max(), # Maximum CCF^R\n 0 # Exposure-weighted average of CCF^R (to be created)\n ]\n\n # Predictive ability\n ## CCF back-testing using a t-test (§ 2.9.3.1) - sheet 3.1\n wbk31 = oxl.get_sheet_by_name(\"3.1\")\n # Grade Lvl\n self.array_toExcel(wb=wbk31, stat_array = grade_name, row_pos=10, col_pos=4, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array = grade_counts, row_pos=10, col_pos=5, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array = avCCFE_perGrade, row_pos=10, col_pos=6, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array = avCCFR_perGrade, row_pos=10, col_pos=7, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array=[0] * 7, row_pos=10, col_pos=8, row_wise=True) # Floor used (if applicable)\n self.array_toExcel(wb=wbk31, stat_array=[0] * 7, row_pos=10, col_pos=9, row_wise=True) # Number of CCF realisations floored\n self.array_toExcel(wb=wbk31, stat_array= minCCFR_perGrade, row_pos=10, col_pos=10, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= maxCCFR_perGrade, row_pos=10, col_pos=18, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array=[0] * 7, row_pos=10, col_pos=19, row_wise=True) # Exposure-weighted average of CCF^R (to be created)\n self.array_toExcel(wb=wbk31, stat_array= q5CCFR_perGrade, row_pos=10, col_pos=11, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q10CCFR_perGrade, row_pos=10, col_pos=12, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q25CCFR_perGrade, row_pos=10, col_pos=13, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q50CCFR_perGrade, row_pos=10, col_pos=14, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q75CCFR_perGrade, row_pos=10, col_pos=15, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q90CCFR_perGrade, row_pos=10, col_pos=16, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= q95CCFR_perGrade, row_pos=10, col_pos=17, row_wise=True)\n self.array_toExcel(wb=wbk31, stat_array= [0] * 7, row_pos=10, col_pos=23, row_wise=True) # Number of facilities excluded due to outlier handling (set to zero)\n\n # Ptf Lvl\n self.df_toExcel(wb=wbk31, df = pd.DataFrame(ccf_inputs[\"predictive_ability\"][1]).T, row_pos=10, col_pos=20)\n self.array_toExcel(wb=wbk31, stat_array=ccf_inputs[\"predictive_ability\"][0], row_pos=8, col_pos=20, row_wise=False)\n self.array_toExcel(wb=wbk31, stat_array=bcktesting_ccf_ptf, row_pos=8, col_pos=4, row_wise=False)\n wbk31.cell(row=8, column=23).value = 0 # Number of facilities excluded due to outlier handling\n\n # Discriminatory Power\n ## Current gAUC vs gAUC at initial validation/development (§ 2.9.3.1) - sheet 4.0\n wbk40 = oxl.get_sheet_by_name(\"4.0\")\n self.array_toExcel(wb=wbk40, stat_array=ccf_inputs[\"AUC\"][:-1], row_pos=7, col_pos=4, row_wise=False)\n wbk40.cell(row= 7, column= 10).value = start_date # start date\n wbk40.cell(row=7, column=11).value = end_date # end date\n wbk40.cell(row=7, column=12).value = nb_customer # nb of customers\n wbk40.cell(row=7, column=13).value = ccf_inputs[\"AUC\"][-1] # Variance (gAUC_init)\n\n # Save file\n oxl.save(file_name)\n oxl.close()\n return \"CCF results saved to Excel.\"", "def click_vendor_price_list_detail_reference_rates_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.vendor_price_list_detail_reference_rates_grid_div_id)", "def export_csv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".csv\",\n filetypes=((\"comma seperated values\", \"*.csv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')", "def export_16(text_col, processed_col, input_filepath,\n output_filepath, country):\n processed_list_8 = process_text(text_col, processed_col, input_filepath)\n processed_list_16 = []\n for name in processed_list_8:\n name, _ = split_half(name)\n processed_list_16.append(name)\n processed_list_16.append(_)\n\n for i in range(len(processed_list_16)):\n processed_list_16[i].to_excel(output_filepath +\n country + '_processed_' +\n str(i+1) + '.xlsx',\n index=False)\n return True", "def export_data(fp, app_name):\n from otree.views.admin import get_display_table_rows\n colnames, rows = get_display_table_rows(\n app_name, for_export=True, subsession_pk=None)\n colnames = ['{}.{}'.format(k, v) for k, v in colnames]\n writer = csv.writer(fp)\n writer.writerows([colnames])\n writer.writerows(rows)", "def download_excel(restaurant_id):\n raw_data = get_menu_items_based_on_restaurant(restaurant_id=restaurant_id)\n csv_file_path = \"{}/file.csv\".format(settings.BASE_DIR)\n static_form = ['name', 'description', 'price', 'category', 'sub_category']\n with open(csv_file_path, 'w', newline='') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=static_form)\n writer.writeheader()\n writer.writerows(raw_data['itemsList'])\n csv_file.close()\n return csv_file_path", "def click_re_analysis_grid_export_to_excel_button(self):\n self.click_grid_export_to_excel_button(self.re_analysis_grid_div_id)", "def main():\r\n\r\n # runs function to create df in order for it to be writen to excel\r\n os.chdir(directory_link_2016)\r\n return_data_2016 = whole_data(file_names_list_2016)\r\n os.chdir(directory_link_2017)\r\n return_data_2017 = whole_data(file_names_list_2017)\r\n os.chdir(directory_link_2018)\r\n return_data_2018 = whole_data(file_names_list_2018)\r\n # creates excel to be writen\r\n writer = pd.ExcelWriter(results_directory)\r\n # writes excel file with df \r\n return_data_2016.to_excel(writer,'2016')\r\n return_data_2017.to_excel(writer,'2017')\r\n return_data_2018.to_excel(writer,'2018')\r\n writer.save()", "def save_new_excel_data(df, req_file_name, sheet):\r\n try:\r\n # select rows for a specific column and save a excel file\r\n dtc_table_ext = ['SW_DTC', 'Diagnosis_IDENTIFIER', 'Symptom', 'SW_Module', 'ISO_Pcode',\r\n 'Cust_Pcode', 'ScanT_Pcode', 'Description', 'Lamp_Manager', 'EPC_Lamp',\r\n 'SnapShot', 'MIL_FUEL_CONF', 'Diagnosis_Enabled', 'Diagnosis_presence',\r\n 'Severity', 'Priority', 'Diag_Call_task', 'Diag_Validation', 'Unit',\r\n 'Diag_DeValidation', 'DTC_available', 'EPC', 'MIL_FuelConf_bit1',\r\n 'MIL_FuelConf_bit0', 'Lamp_Manager_bit2', 'Lamp_Manager_bit1', 'Lamp_Manager_bit0',\r\n 'AUTOyyy', 'Prio_bit3', 'Prio_bit2', 'Prio_bit1', 'Prio_bit0',\r\n 'Snapshot_bit2', 'Snapshot_bit1', 'Snapshot_bit0', 'empty', 'ETC_highbit', 'ETC_lowbit']\r\n # Save df_all_cols extracted to a new excel file\r\n file_to_save = sheet+'_'+req_file_name\r\n with pd.ExcelWriter(file_to_save) as writer: # for writing more than 1 sheet\r\n df.to_excel(writer, sheet_name=sheet, index=False)\r\n # df.to_excel(writer, sheet_name=sheet, columns=dtc_table_ext, index=False)\r\n except PermissionError:\r\n print('DEBUG-->save_new_excel_data: exception raised: ', sys.exc_info())", "def write_output(self) -> None:\n self.home.round(2).to_csv(var.indicators_base_cumsum + \"home_\" + str(self.year) + \".csv\")\n self.away.round(2).to_csv(var.indicators_base_cumsum + \"away_\" + str(self.year) + \".csv\")", "def export_all_to_excel(input_hdf5, out_directory_path):\n data_store = pd.HDFStore(input_hdf5) # Opening the HDF5 file\n for each_key in data_store.keys():\n data_store[each_key].to_excel(out_directory_path + each_key + \".xlsx\")\n # '/' missing between folder name and\n # file name because file name already includes it.\n data_store.close()\n\n print(\"-- Dataframes written to Excel files (.xlsx) --\")", "def export_html(self, model_view='gapd'):\n '''\n <?xml version=\"1.0\" ?>\n <ROWSET>\n <ROW>\n <SURVEYID>921</SURVEYID>\n <SURVEYNAME>Goomalling, WA, 1996</SURVEYNAME>\n <STATE>WA</STATE>\n <OPERATOR>Stockdale Prospecting Ltd.</OPERATOR>\n <CONTRACTOR>Kevron Geophysics Pty Ltd</CONTRACTOR>\n <PROCESSOR>Kevron Geophysics Pty Ltd</PROCESSOR>\n <SURVEY_TYPE>Detailed</SURVEY_TYPE>\n <DATATYPES>MAG,RAL,ELE</DATATYPES>\n <VESSEL>Aero Commander</VESSEL>\n <VESSEL_TYPE>Plane</VESSEL_TYPE>\n <RELEASEDATE/>\n <ONSHORE_OFFSHORE>Onshore</ONSHORE_OFFSHORE>\n <STARTDATE>05-DEC-96</STARTDATE>\n <ENDDATE>22-DEC-96</ENDDATE>\n <WLONG>116.366662</WLONG>\n <ELONG>117.749996</ELONG>\n <SLAT>-31.483336</SLAT>\n <NLAT>-30.566668</NLAT>\n <LINE_KM>35665</LINE_KM>\n <TOTAL_KM/>\n <LINE_SPACING>250</LINE_SPACING>\n <LINE_DIRECTION>180</LINE_DIRECTION>\n <TIE_SPACING/>\n <SQUARE_KM/>\n <CRYSTAL_VOLUME>33.6</CRYSTAL_VOLUME>\n <UP_CRYSTAL_VOLUME>4.2</UP_CRYSTAL_VOLUME>\n <DIGITAL_DATA>MAG,RAL,ELE</DIGITAL_DATA>\n <GEODETIC_DATUM>WGS84</GEODETIC_DATUM>\n <ASL/>\n <AGL>60</AGL>\n <MAG_INSTRUMENT>Scintrex CS2</MAG_INSTRUMENT>\n <RAD_INSTRUMENT>Exploranium GR820</RAD_INSTRUMENT>\n </ROW>\n </ROWSET>\n '''\n if model_view == 'prov':\n prov_turtle = self.export_rdf('prov', 'text/turtle')\n g = Graph().parse(data=prov_turtle, format='turtle')\n\n view_html = render_template(\n 'survey_prov.html',\n visjs=self._make_vsjs(g),\n prov_turtle=prov_turtle,\n )\n else: # model_view == 'gapd':\n view_html = render_template(\n 'survey_gapd.html',\n survey_no=self.survey_no,\n survey_name=self.survey_name,\n state=self.state,\n operator=self.operator,\n contractor=self.contractor,\n processor=self.processor,\n survey_type=self.survey_type,\n data_types=self.data_types,\n vessel=self.vessel,\n vessel_type=self.vessel_type,\n release_date=self.release_date,\n onshore_offshore=self.onshore_offshore,\n start_date=self.start_date,\n end_date=self.end_date,\n line_km=self.line_km,\n total_km=self.total_km,\n line_spacing=self.line_spacing,\n line_direction=self.line_direction,\n tie_spacing=self.tie_spacing,\n area=self.square_km,\n crystal_volume=self.crystal_volume,\n up_crystal_volume=self.up_crystal_volume,\n digital_data=self.digital_data,\n geodetic_datum=self.geodetic_datum,\n asl=self.asl,\n agl=self.agl,\n mag_instrument=self.mag_instrument,\n rad_instrument=self.rad_instrument,\n wkt_polygon=self.wkt_polygon\n )\n\n return render_template(\n 'page_survey.html',\n view_html=view_html,\n survey_no=self.survey_no,\n end_date=self.end_date,\n survey_type=self.survey_type,\n date_now=datetime.now().strftime('%Y-%m-%d'),\n centroid_lat=self.centroid_lat,\n centroid_lon=self.centroid_lon,\n n_lat=self.n_lat,\n s_lat=self.s_lat,\n w_long=self.w_long,\n e_long=self.e_long,\n gm_key=config.GOOGLE_MAPS_API_KEY\n )" ]
[ "0.6996788", "0.6588683", "0.65867573", "0.6548197", "0.6542862", "0.6455031", "0.63961285", "0.6389617", "0.632453", "0.6319494", "0.628004", "0.62699443", "0.6260905", "0.6237417", "0.62198824", "0.62193406", "0.6173042", "0.6167193", "0.61626554", "0.6151517", "0.61514807", "0.6143776", "0.6138488", "0.61274874", "0.6111345", "0.6094867", "0.6091024", "0.607275", "0.60527354", "0.6052335", "0.60361594", "0.60210395", "0.6014802", "0.598301", "0.595244", "0.59330237", "0.5921268", "0.5920554", "0.5920554", "0.58850664", "0.58795714", "0.58747905", "0.58672243", "0.5866318", "0.58630514", "0.5862433", "0.5843601", "0.5842042", "0.5830154", "0.5822546", "0.5818985", "0.5816501", "0.579066", "0.57894254", "0.5783047", "0.57647365", "0.5762835", "0.57586235", "0.575769", "0.5755089", "0.57404083", "0.5736755", "0.5727207", "0.5725976", "0.569864", "0.5697568", "0.5695147", "0.5684036", "0.56803334", "0.56755024", "0.56610274", "0.56397974", "0.56323934", "0.562202", "0.5620837", "0.5610212", "0.5601505", "0.55954605", "0.5588862", "0.5577609", "0.55674446", "0.5563262", "0.5560808", "0.55605865", "0.555926", "0.55564845", "0.55545896", "0.5548444", "0.55371886", "0.55289024", "0.5526103", "0.55210197", "0.55178285", "0.5512223", "0.55054915", "0.5497597", "0.54858786", "0.5485348", "0.54807377", "0.54802626", "0.5480171" ]
0.0
-1
Get a yacs CfgNode object with default values for my_project.
def get_cfg_defaults(): # Return a clone so that the defaults will not be altered # This is for the "local variable" use pattern return _C.clone()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_default_config(project):\n return {\n \"breathe_projects\": {\n project: \"./_doxygen/xml\"\n },\n \"breathe_default_project\": project,\n \"exhale_args\": {\n # required arguments\n \"containmentFolder\": \"./api\",\n \"rootFileName\": \"{0}_root.rst\".format(project),\n \"rootFileTitle\": \"``{0}`` Test Project\".format(project),\n \"doxygenStripFromPath\": \"..\",\n # additional arguments\n \"exhaleExecutesDoxygen\": True,\n \"exhaleDoxygenStdin\": \"INPUT = ../include\"\n }\n }", "def get_cfg_defaults():\n return _C.clone()", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return C.clone()", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return _C.clone()", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern\n return _C.clone()", "def get_cfg_defaults():\r\n # Return a clone so that the defaults will not be altered\r\n # This is for the \"local variable\" use pattern\r\n return _C.clone()", "def get_cfg_defaults():\n # Return a clone so that the defaults will not be altered\n # This is for the \"local variable\" use pattern recommended by the YACS repo.\n # It will be subsequently overwritten with local YAML.\n return __C.clone()", "def get_project_config(project):\n if project in CFG:\n return CFG[project]\n raise RecipeError(f\"Project '{project}' not in config-developer.yml\")", "def project_default(tmp_path):\n from nitpick.constants import NITPICK_STYLE_TOML\n from tests.helpers import ProjectMock, tomlstring\n\n nitpick_style = Path(__file__).parent.parent / NITPICK_STYLE_TOML\n return ProjectMock(tmp_path).pyproject_toml(\n f\"\"\"\n [tool.nitpick]\n style = {tomlstring(nitpick_style)}\n \"\"\"\n )", "def get_project(con):\n try:\n return con.project_read(fq_name=conf.get('default_project', 'UNEXPECTED_VALUE'))\n except:\n log.debug('Unable to find project default-domain, admin:', exc_info=True)\n return None", "def node_config_defaults(self) -> Optional[pulumi.Input['NodeConfigDefaultsArgs']]:\n return pulumi.get(self, \"node_config_defaults\")", "def get_project_config(filepath):\n config_file = Path.joinpath(filepath, \".td.cfg\")\n\n if Path.home() >= filepath:\n return None\n elif Path.exists(config_file):\n return config_file\n else:\n return get_project_config(filepath.parent)", "def project_node():", "def default(cls) -> 'Config':\n parser: configparser.ConfigParser = configparser.ConfigParser()\n parser.read_dict(dict(wpwatcher=Config.DEFAULT_CONFIG))\n return cls.fromparser(parser)", "def get_default_config():\n return _config_schema_to_namespace(_CONFIG_SCHEMA)", "def _get_MindtPy_config():\n CONFIG = ConfigBlock('MindtPy')\n\n _add_common_configs(CONFIG)\n _add_subsolver_configs(CONFIG)\n _add_tolerance_configs(CONFIG)\n _add_fp_configs(CONFIG)\n _add_bound_configs(CONFIG)\n _add_roa_configs(CONFIG)\n return CONFIG", "def _determine_default_project(project=None):\n if project is None:\n project = _get_gcd_project()\n\n if project is None:\n project = _helpers._determine_default_project(project=project)\n\n return project", "def get_cfg():\n return defcfg._assert_and_infer_cfg(defcfg._C.clone())", "def _generate_slave_config(project: PyfmuProject):\n return project.project_configuration", "def config(self) -> Optional[pulumi.Input['NodeConfigArgs']]:\n return pulumi.get(self, \"config\")", "def antenny_config_make_default(self):\n return self.antenny_config.save_as_default_config()", "def get_project_config():\n if get_project_config.CONFIG is None:\n import json\n with open('project.json') as fp:\n get_project_config.CONFIG = json.load(fp)\n return get_project_config.CONFIG", "def get_cfg():\n return _C.clone()", "def get_default_opts(project_name, **aux_opts):\n # Merge the default options generated by argparse\n opts = parse_args([project_name])\n # Remove inadvertent double definition of project_name\n aux_opts.pop('project', None)\n opts.update(aux_opts)\n opts.setdefault('package', utils.make_valid_identifier(opts['project']))\n opts.setdefault('author', info.username())\n opts.setdefault('email', info.email())\n opts.setdefault('release_date', date.today().strftime('%Y-%m-%d'))\n opts.setdefault('year', date.today().year)\n opts.setdefault('license', 'none')\n opts.setdefault('description', 'Add a short description here!')\n opts.setdefault('url', 'http://...')\n opts.setdefault('version', pyscaffold.__version__)\n opts.setdefault('title',\n '='*len(opts['project']) + '\\n' + opts['project'] + '\\n' +\n '='*len(opts['project']))\n classifiers = ['Development Status :: 4 - Beta',\n 'Programming Language :: Python']\n opts.setdefault('classifiers', utils.list2str(\n classifiers, indent=4, brackets=False, quotes=False, sep=''))\n opts.setdefault('url', 'http://...')\n # Initialize empty list of all requirements\n opts.setdefault('requirements', list())\n opts['namespace'] = utils.prepare_namespace(opts['namespace'])\n if opts['namespace']:\n opts['root_pkg'] = opts['namespace'][0]\n opts['namespace_pkg'] = \".\".join([opts['namespace'][-1],\n opts['package']])\n else:\n opts['root_pkg'] = opts['package']\n opts['namespace_pkg'] = opts['package']\n if opts['update']:\n if not os.path.exists(project_name):\n raise RuntimeError(\n \"Project {project} does not exist and thus cannot be \"\n \"updated!\".format(project=project_name))\n opts = info.project(opts)\n # Reset project name since the one from setup.cfg might be different\n opts['project'] = project_name\n if opts['django']:\n opts['force'] = True\n opts['package'] = opts['project'] # since this is required by Django\n opts['requirements'].append('django')\n if opts['cookiecutter_template']:\n opts['force'] = True\n return opts", "def get_default_config(self):\n return config.read(pathlib.Path(__file__).parent / \"ext.conf\")", "def get_project_config(project_path, use_cache=True):\n\n return get_local_config(project_path, use_cache=use_cache) \\\n or get_user_config(project_path, use_cache=use_cache) \\\n or get_default_config()", "def node_config(self) -> pulumi.Input['NodeConfigArgs']:\n return pulumi.get(self, \"node_config\")", "def get_cfg():\n return _assert_and_infer_cfg(_C.clone())", "def get_config(self):\n root_folder = os.path.dirname(os.path.dirname(__file__)).replace('\\\\', '/')\n root_folder = root_folder.replace('/core', '/config')\n # print root_folder, '<----------------------------------------'\n proj_config = os.path.join(root_folder, self.project.lower()).replace('\\\\', '/')\n # print proj_config, '============================================='\n if not os.path.isfile(proj_config):\n proj_config = os.path.join(root_folder, 'default').replace('\\\\', '/')\n # print proj_config, '<========================================'\n return proj_config", "def build_default_cfg():\n with open(Daemon.CONFIG_FILEPATH, 'wb') as fo:\n json.dump(Daemon.DEF_CONF, fo, skipkeys=True, ensure_ascii=True, indent=4)\n return Daemon.DEF_CONF", "def default(self):\n return self._configs[0] if len(self._configs) else None", "def default(self, stage=False):\n return self._build_config(state='default', stage=stage)", "def get_default_config(self, attr):\n config_val = None\n\n try:\n config_val = getattr(self.config_default, attr)\n except AttributeError:\n pass\n\n return config_val", "def get_default_config(self, attr):\n config_val = None\n\n try:\n config_val = getattr(self.config_default, attr)\n except AttributeError:\n pass\n\n return config_val", "def gclient_config(self):\n cfg = self.m.gclient.make_config()\n soln = cfg.solutions.add()\n soln.name = 'chromite'\n soln.url = self.chromite_url\n # Set the revision using 'bot_update' remote branch:revision notation.\n # Omitting the revision uses HEAD.\n soln.revision = '%s:' % (self.c.chromite_branch,)\n return cfg", "def getcfg(self, key, default=None):\n return self._config.get(key, default)", "def bootstrap_default():\n\treturn default_configuration", "def get_default_org(self):\n for org in self.list_orgs():\n org_config = self.get_org(org)\n if org_config.default:\n return org, org_config\n return None, None", "def GetNodeConfig():\n obj = ndb.Key(NodeConfig, NODE_CONFIG_ID).get()\n if not obj:\n obj = NodeConfig(id=NODE_CONFIG_ID)\n obj.put()\n return obj", "def default_configfile(self):\r\n config = None\r\n for path in self.searchpaths:\r\n if os.path.exists(path):\r\n config = path\r\n break\r\n if config is None and self.require_configfile:\r\n self.usage('No config file found at default paths (%s); '\r\n 'use the -c option to specify a config file '\r\n 'at a different path' % ', '.join(self.searchpaths))\r\n return config", "def get_default(self, parent):\n\n # TODO fix this\n Reference = load('zbx.config.Reference')\n\n return Reference(self.model, parent, self.default, self.append_host)", "def get_config_defaults(self): # pylint: disable=R0201\n return {}", "def antenny_config_load_default(self):\n return self.antenny_config.load_default_config()", "def defaults():\n global __preset_staging\n \n t = TreeDict('Default_Parameter_Tree', __defaultpresettree__ = True)\n __preset_staging[id(t)] = t\n return t", "def get_config_template(self) -> cconfig.Config:", "def get_default_configuration():\n # Pre-configured default values for various parameters:\n default_config = {\n \"name\":\"Transient\",\n \"auto\":True,\n \"ra\":0.0,\n \"dec\":0.0,\n \"radius\":10.0,\n \"resolution\":1.8,\n \"energy\":70.0,\n \"pixsize\": 16,\n \"respcode\":\"czti_Aepix.out\",\n \"txycode\":\"radec2txty.out\",\n \"resppath\":\"pixarea\",\n \"plotfile\":\"plots/localize.pdf\",\n\t \"lc_bin\":5.0,\n\t \"typ\":\"band\",\n\t \"comp_bin\":20,\t\n \"verbose\":True,\n \"do_fit\":True\n }\n required_config = {\n 'l2file':\"_level2.evt\",\n 'infile':\"file.evt\",\n 'mkffile':\"file.mkf\",\n 'trigtime':0.00,\n 'transtart':0.00,\n 'tranend':0.00,\n 'bkg1start':0.00,\n 'bkg1end':0.00,\n 'bkg2start':0.00,\n 'bkg2end':0.00,\n\t 'alpha':0.00,\n\t 'beta':0.00,\n\t 'E0':0.00,\n\t 'A':0.00\n }\n return default_config, required_config", "def get_project(arn=None):\n pass", "def configure(config_file):\n config = ConfigParser.ConfigParser()\n config.read(config_file)\n return config.get('repository', 'toplevel_processing_plots_path'), \\\n config.get('project', 'project_id')", "def load_config_with_defaults(config_path):\n config = neat.Config(\n neat.DefaultGenome,\n neat.DefaultReproduction,\n neat.DefaultSpeciesSet,\n neat.DefaultStagnation,\n config_path\n )\n return config", "def get_default_config():\n # Swallow STDERR if it says\n # \"No config file found, using default configuration\"\n result = subprocess.check_output(['pylint', '--generate-rcfile'],\n stderr=subprocess.PIPE)\n # On Python 3, this returns bytes (from STDOUT), so we\n # convert to a string.\n return result.decode('utf-8')", "def getorelse(self, name, default=None):\n try:\n return self._defaults[name]\n except KeyError:\n return default", "def get_config(cfg_file=None):\n config = _C.clone()\n if cfg_file:\n _update_config_from_file(config, cfg_file)\n return config", "def default_config(self) -> Optional['outputs.FeatureSpecFleetobservabilityLoggingConfigDefaultConfig']:\n return pulumi.get(self, \"default_config\")", "def _get_MindtPy_GOA_config():\n CONFIG = ConfigBlock('MindtPy-GOA')\n\n _add_common_configs(CONFIG)\n _add_goa_configs(CONFIG)\n _add_oa_cuts_configs(CONFIG)\n _add_subsolver_configs(CONFIG)\n _add_tolerance_configs(CONFIG)\n _add_bound_configs(CONFIG)\n return CONFIG", "def _read_project(self, filename):\n parser = configparser.ConfigParser()\n parser.read(filename, \"utf8\")\n return parser", "def GetProject(args):\n return args.project or properties.VALUES.core.project.GetOrFail()", "def GetDefaultWiredNetwork(self):\n config = ConfigParser.ConfigParser()\n config.read(self.wired_conf)\n profileList = config.sections()\n for profile in profileList:\n if config.has_option(profile, \"default\"):\n if misc.to_bool(config.get(profile, \"default\")):\n return profile\n return None", "def __init__(__self__, *,\n node_config_defaults: Optional[pulumi.Input['NodeConfigDefaultsArgs']] = None):\n if node_config_defaults is not None:\n pulumi.set(__self__, \"node_config_defaults\", node_config_defaults)", "def load_default_config():\n config_file = path.abspath(\n path.join(path.dirname(__file__), 'journal.toml'))\n return toml.load(config_file)", "def getDefault():", "def get_project_value(key, default_value=None):\r\n # Load project coniguration settings\r\n try:\r\n load_project_values()\r\n except RuntimeError:\r\n sublime.set_timeout(lambda: load_project_values(), 0)\r\n\r\n # Find value in project configuration\r\n if S.CONFIG_PROJECT:\r\n if key in S.CONFIG_PROJECT:\r\n return S.CONFIG_PROJECT[key]\r\n\r\n # Otherwise use default value\r\n return default_value", "def get_defaults():\n\n return {\n \"numberofrules\": 0,\n \"datapath\": path_join_robust(BASEDIR_PATH, \"data\"),\n \"freshen\": True,\n \"replace\": False,\n \"backup\": False,\n \"skipstatichosts\": False,\n \"keepdomaincomments\": True,\n \"extensionspath\": path_join_robust(BASEDIR_PATH, \"extensions\"),\n \"extensions\": [],\n \"compress\": False,\n \"minimise\": False,\n \"outputsubfolder\": \"\",\n \"hostfilename\": \"hosts\",\n \"targetip\": \"0.0.0.0\",\n \"sourcedatafilename\": \"update.json\",\n \"sourcesdata\": [],\n \"readmefilename\": \"readme.md\",\n \"readmetemplate\": path_join_robust(BASEDIR_PATH, \"readme_template.md\"),\n \"readmedata\": {},\n \"readmedatafilename\": path_join_robust(BASEDIR_PATH, \"readmeData.json\"),\n \"exclusionpattern\": r\"([a-zA-Z\\d-]+\\.){0,}\",\n \"exclusionregexes\": [],\n \"exclusions\": [],\n \"commonexclusions\": [\"hulu.com\"],\n \"blacklistfile\": path_join_robust(BASEDIR_PATH, \"blacklist\"),\n \"whitelistfile\": path_join_robust(BASEDIR_PATH, \"whitelist\"),\n }", "def build_config_from_user_input() -> BuildConfig:\n project_dir: str = get_user_input_for_value(\"Cmake Project Directory: \", str)\n name: str = get_user_input_for_value(\"Project Config Name: \", str)\n generator: str = get_user_input_for_value(\"Generator: \", str)\n configurationType: str = get_user_input_for_value(\"Configuration Type: \", str)\n inheritEnvironments: str = get_user_input_for_value(\"Inherit Environments: \", str)\n buildRoot: str = r\"${projectDir}\\\\out\\\\build\\\\${name}\"\n installRoot: str = r\"${projectDir}\\\\out\\\\install\\\\${name}\"\n cmakeCommandArgs: str = get_user_input_for_value(\"Cmake Command Args: \", str)\n buildCommandArgs: str = get_user_input_for_value(\"Build Command Args: \", str)\n ctestCommandArgs: str = get_user_input_for_value(\"Ctest Command Args: \", str)\n variables: List[CmakeVariable] = get_cmake_vars_from_user()\n\n # Build object and return it to function user\n return BuildConfig(name=name,\n generator=generator,\n configurationType=configurationType,\n inheritEnvironments=inheritEnvironments,\n buildRoot=buildRoot,\n installRoot=installRoot,\n cmakeCommandArgs=cmakeCommandArgs,\n buildCommandArgs=buildCommandArgs,\n ctestCommandArgs=ctestCommandArgs,\n variables=variables)", "def get_local_config(project_path, use_cache=True):\n\n pyproject_path = os.path.join(project_path, 'pyproject.toml')\n\n if os.path.exists(pyproject_path):\n with open(pyproject_path, 'r', encoding=\"utf8\") as config_file:\n config = toml.load(config_file)\n\n config = config.get('tool', {}).get('tidypy', {})\n config = merge_dict(get_default_config(), config)\n config = process_extensions(config, project_path, use_cache=use_cache)\n return config\n\n return None", "def get_config() -> Optional[Config]:\n return CurrentConfig.get()", "def get_current_config():\n global SOLR_ADDRES, SOLR_PORT, SOLR_CORE\n return {'host': SOLR_ADDRESS, 'port': SOLR_PORT, 'core': SOLR_CORE}", "def _new():\n\treturn ConfigParser(\n\tdelimiters = ('=',),\n\tcomment_prefixes = ('#', ';'),\n\tdefault_section = 'default',\n\tallow_no_value = False,\n\tstrict = False,\n\tinterpolation = ExtendedInterpolation(),\n\tdefaults = {\n\t\t'debug': False,\n\t\t'datadir': path.join(path.expanduser('~'), '.local', 'rosshm'),\n\t\t'log.level': 'warn',\n\t\t'core.enable': True,\n\t\t'db.driver': 'sqlite',\n\t\t'db.name': 'rosshmdb',\n\t\t'db.config': '',\n\t\t'static.enable': True,\n\t\t'web.enable': True,\n\t},\n)", "def getDefaultSettings(self) -> ghidra.docking.settings.Settings:\n ...", "def create_default_config():\n import codecs\n config = ConfigParser.SafeConfigParser()\n config.readfp(StringIO(DEFAULT_CONFIG))\n\n # Load user settings\n filename = get_user_config_filename()\n if not os.path.exists(filename):\n from wizard import setup_wizard\n setup_wizard(config)\n else:\n try:\n fi = codecs.open(filename, 'r', encoding='utf-8')\n config.readfp(fi)\n finally:\n fi.close()\n return config", "def get(self, item, default=''):\n value = self.getSection(CFG_GENERAL, item)\n return default if not value else value", "def default_config():\n return {'grid': {'regular': {'width': 0.05,\n 'wake': {'width': 0.1, 'progression': None},\n 'layers': 50,\n 'thickness': 5,\n 'boundary_layer': { 'initial_thickness': 4.2e-5 }}}}", "def _GetDefaultConfig(self) -> str:\n try:\n region = util.GetRegionFromZone(\n FLAGS.zones[0] if FLAGS.zones else FLAGS.zone[0])\n except IndexError:\n region = _DEFAULT_REGION\n return f'regional-{region}'", "def get_default_config():\n # pylint: disable=cyclic-import\n from raylab.agents.sac import DEFAULT_CONFIG\n\n return DEFAULT_CONFIG", "def get_config(self, budget):\n\t\traise NotImplementedError('This function needs to be overwritten in %s.'%(self.__class__.__name__))", "def get_default_config_file() -> Path:\n return get_path_to_pyflow() / \"pyflow\" / \"conf\" / CONFIG_FILE", "def generate_config():\n\n return {\n \"email_subject\": DEFAULT_EMAIL_SUBJECT,\n \"from_email\": DEFAULT_FROM_EMAIL,\n \"to_email\": DEFAULT_TO_EMAIL,\n \"url\": DEFAULT_URL,\n \"start_value\": DEFAULT_START_VALUE,\n \"look_ahead\": DEFAULT_LOOK_AHEAD,\n \"slide_window\": DEFAULT_SLIDE_WINDOW,\n }", "def getconfig(config_nm, flags_dict):\n\n try:\n user_config = importlib.import_module('.user_config', '.'.join(str(__name__).split('.')[:-1]))\n final_config = override_config(config_nm, user_config, flags_dict)\n return final_config\n\n except ImportError:\n print('There isn\\'t a user_config.py in your model folder!\\nPlease supply one based on default_config.py')\n _print_config()", "def parse():\n rcParams = configparser.ConfigParser(defaults=defaults())\n rcParams.read([os.path.join(os.getcwd(), 'watershed_workflowrc'),\n os.path.join(os.getcwd(), '.watershed_workflowrc'),\n os.path.join(home(), '.watershed_workflowrc')])\n return rcParams", "def _get_MindtPy_FP_config():\n CONFIG = ConfigBlock('MindtPy-GOA')\n CONFIG.declare(\n 'init_strategy',\n ConfigValue(\n default='FP',\n domain=In(['FP']),\n description='Initialization strategy',\n doc='Initialization strategy used by any method. Currently the '\n 'continuous relaxation of the MINLP (rNLP), solve a maximal '\n 'covering problem (max_binary), and fix the initial value for '\n 'the integer variables (initial_binary).',\n ),\n )\n\n _add_common_configs(CONFIG)\n _add_fp_configs(CONFIG)\n _add_oa_cuts_configs(CONFIG)\n _add_subsolver_configs(CONFIG)\n _add_tolerance_configs(CONFIG)\n _add_bound_configs(CONFIG)\n return CONFIG", "def default_db_config():\n return read_json_file(db_config_file)", "def default_context(project_name: str) -> None:\n return BuilderContext(\n project_name=project_name,\n kube_name=project_name.replace(\"_\", \"-\"),\n project_description=\"Generated by pytest.\",\n ci_type=CIType.none,\n db=DatabaseType.none,\n db_info=DB_INFO[DatabaseType.none],\n enable_redis=False,\n enable_migrations=False,\n enable_kube=False,\n enable_routers=True,\n add_dummy=False,\n self_hosted_swagger=False,\n force=True,\n )", "def get_default_config(branch, gcs_path, mfest_commit,\n\t\t\tverify_consistency, version):\n config = dict(AIRFLOW_CONFIG)\n\n config['BRANCH'] = AIRFLOW_CONFIG['BRANCH'].format(branch=branch)\n config['GCS_BUILD_PATH'] = AIRFLOW_CONFIG['GCS_BUILD_PATH'].format(\n\t\tgcs_build_bucket=AIRFLOW_CONFIG['GCS_BUILD_BUCKET'], gcs_path=gcs_path)\n config['GCS_FULL_STAGING_PATH'] = AIRFLOW_CONFIG['GCS_FULL_STAGING_PATH'].format(\n\t\tgcs_staging_bucket=AIRFLOW_CONFIG['GCS_STAGING_BUCKET'], gcs_path=gcs_path)\n config['GCS_RELEASE_TOOLS_PATH'] = AIRFLOW_CONFIG['GCS_RELEASE_TOOLS_PATH'].format(\n\t\tgcs_build_bucket=AIRFLOW_CONFIG['GCS_BUILD_BUCKET'], gcs_path=gcs_path)\n config['GCS_STAGING_PATH'] = AIRFLOW_CONFIG['GCS_STAGING_PATH'].format(\n\t\tgcs_path=gcs_path)\n config['ISTIO_REPO'] = AIRFLOW_CONFIG['ISTIO_REPO'].format(\n\t\tgithub_org=AIRFLOW_CONFIG['GITHUB_ORG'],\n\t\tgithub_repo=AIRFLOW_CONFIG['GITHUB_REPO'])\n config['MFEST_COMMIT'] = AIRFLOW_CONFIG['MFEST_COMMIT'].format(\n mfest_commit=mfest_commit)\n config['VERIFY_CONSISTENCY'] = AIRFLOW_CONFIG['VERIFY_CONSISTENCY'].format(\n verify_consistency=verify_consistency)\n config['VERSION'] = AIRFLOW_CONFIG['VERSION'].format(version=version)\n\n return config", "def project(project_no_init: Project) -> Project:\n from pdm.cli.utils import merge_dictionary\n\n data = {\n \"project\": {\n \"name\": \"test-project\",\n \"version\": \"0.0.0\",\n \"description\": \"\",\n \"authors\": [],\n \"license\": {\"text\": \"MIT\"},\n \"dependencies\": [],\n \"requires-python\": \">=3.7\",\n },\n \"build-system\": DEFAULT_BACKEND.build_system(),\n }\n\n merge_dictionary(project_no_init.pyproject._data, data)\n project_no_init.pyproject.write()\n # Clean the cached property\n project_no_init._environment = None\n return project_no_init", "def GetDefaultScopeIfEmpty(args):\n if args.IsSpecified('scope'):\n VerifyScopeForSearch(args.scope)\n return args.scope\n else:\n return 'projects/{0}'.format(properties.VALUES.core.project.GetOrFail())", "def configure_project():\n pass", "def getnode():\n try:\n configfile = os.environ['GET-UNREPORTED-RC']\n except KeyError:\n configfile = 'puppet-reissue-certs.conf'\n config = ConfigParser.SafeConfigParser()\n config.read(configfile)\n puppetmaster_connection = config.get('main','puppetmaster')\n if '@' in puppetmaster_connection:\n puppetmaster = puppetmaster_connection.split('@')[1]\n else:\n puppetmaster = puppetmaster_connection\n return puppetmaster", "def get_project(benchmark):\n return benchmark_config.get_config(benchmark)['project']", "def get_config(config = None):\n default = get_default()\n if config != None:\n for key in config:\n if not key.endswith(\" comment\"):\n default[key] = config[key]\n default[\"player name\"] = default[\"player name\"].strip()\n return default", "def cg_config():\n return {}", "def node_config(self) -> pulumi.Output['outputs.NodeConfigResponse']:\n return pulumi.get(self, \"node_config\")", "def test_set_default_config(qibuild_action, build_worktree):\n qibuild_action(\"add-config\", \"foo\", \"--default\")\n assert build_worktree.default_config == \"foo\"", "def get_config(self,config):\n return self.parser.get(\"main\", config)", "def get_config(group):\n config = toml.load('./config.toml')\n return config[group]", "def node_config(self) -> 'outputs.NodeConfigResponse':\n return pulumi.get(self, \"node_config\")", "def get_default_arg():\n\n arg = 'cog:C_cog_space_GRP world:parts_GRP trueWorld:noXform_GRP '\n return arg", "def _CreateCfgFile():\n default_cfg = \"\"\"\nproject: \"fake_project\"\nzone: \"fake_zone\"\nstorage_bucket_name: \"fake_bucket\"\nclient_id: \"fake_client_id\"\nclient_secret: \"fake_client_secret\"\n\"\"\"\n return default_cfg", "def get_specific_config(config_file_path, project_path, use_cache=True):\n\n if os.path.exists(config_file_path):\n with open(config_file_path, 'r', encoding=\"utf8\") as config_file:\n config = toml.load(config_file)\n if 'tidypy' in config:\n # Originally unintended, but we'll support configuration files\n # where everything in scoped in a \"tidypy\" table.\n config = config['tidypy']\n\n config = merge_dict(get_default_config(), config)\n config = process_extensions(config, project_path, use_cache=use_cache)\n return config\n\n return None", "def _get_config(self, kwargs):\n return Config(config_file=kwargs.pop('config_file', None),\n env=kwargs.pop('env', None), overrides=kwargs)", "def get_default_config():\r\n config = {}\r\n\r\n config[\"kl_coeff\"] = 1.0\r\n config[\"_num_workers_tf\"] = 4\r\n config[\"use_gae\"] = True\r\n config[\"num_gpus\"] = 0\r\n\r\n config[\"_env_name_rllib\"] = \"multicomp\"\r\n config[\"_env_fcn\"] = create_env\r\n config['_policies'] = [None, \"from_scratch\", \"pretrained\"]\r\n config[\"_env\"] = {'with_video': False,\r\n \"SingleAgentToMultiAgent\": False,\r\n \"env_name\": \"multicomp/YouShallNotPassHumans-v0\"}\r\n config['framework'] = 'tfe'\r\n\r\n config['_train_policies'] = ['player_1']\r\n config['_call'] = {}\r\n config['_trainer'] = \"PPO\"\r\n config['_policy'] = \"PPO\"\r\n config['_call']['checkpoint_freq'] = 0\r\n config['_train_steps'] = 99999999\r\n config['_update_config'] = None\r\n config['_run_inline'] = False\r\n config['_postprocess'] = None\r\n\r\n config['num_envs_per_worker'] = 4\r\n config['_log_error'] = True\r\n config['_model_params'] = {\r\n \"use_lstm\": False,\r\n \"fcnet_hiddens\": [64, 64],\r\n # \"custom_action_dist\": \"DiagGaussian\",\r\n \"fcnet_activation\": \"tanh\",\r\n \"free_log_std\": True,\r\n }\r\n\r\n config['_select_policy'] = select_policy_default\r\n config['_get_policies'] = get_policies_default\r\n config['_do_not_train_policies'] = []\r\n config['_update_withpolicies'] = None\r\n config['callbacks'] = InfoCallbacks\r\n\r\n return config" ]
[ "0.6206752", "0.60061955", "0.59438497", "0.5857599", "0.5857599", "0.58438694", "0.5837966", "0.58032465", "0.5682998", "0.5640771", "0.55415815", "0.5538713", "0.5535621", "0.5531356", "0.5446284", "0.54307395", "0.5405538", "0.5367158", "0.535104", "0.5333277", "0.5309475", "0.5307373", "0.5302389", "0.5281072", "0.523944", "0.5214846", "0.52044696", "0.5166501", "0.5163326", "0.51608336", "0.51600116", "0.51527184", "0.5149668", "0.5149668", "0.511826", "0.5116973", "0.51054513", "0.5092751", "0.5087206", "0.50821406", "0.5070664", "0.5066043", "0.5064787", "0.5064079", "0.50626004", "0.5060664", "0.5060645", "0.5060338", "0.50540894", "0.5052593", "0.50400245", "0.50304", "0.5027179", "0.5019996", "0.49904248", "0.49879378", "0.4959999", "0.4955661", "0.49434426", "0.49380335", "0.49288595", "0.49187124", "0.49003458", "0.48977304", "0.48872876", "0.48821706", "0.48803082", "0.48750117", "0.48727718", "0.48535386", "0.48520836", "0.48495427", "0.48393387", "0.48267835", "0.48216903", "0.48196056", "0.48186228", "0.4817404", "0.4813017", "0.4800702", "0.47915173", "0.47884038", "0.47849822", "0.47842735", "0.47797987", "0.47747275", "0.47732362", "0.47699913", "0.47688255", "0.47683775", "0.47683036", "0.47540593", "0.4752669", "0.47513232", "0.47504827", "0.47455043", "0.4743335", "0.47345355", "0.4716533" ]
0.5848163
6
Convert a time.struct_time as returned by feedparser into a
def _convert_struct_time_to_dt(stime): return date.fromtimestamp(mktime(stime))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_struct_to_datetime(struct_time_object):\n return datetime.datetime(*struct_time_object[:6])", "def _convert_struct_time_to_dt(stime):\n\n dt = datetime.datetime.fromtimestamp(mktime(stime))\n\n return dt.date()", "def unmarshall_time(tyme):\r\n return datetime.datetime(day=tyme['day'],\r\n month=tyme['month'],\r\n year=tyme['year'],\r\n hour=tyme['hour'],\r\n minute=tyme['minute'],\r\n second=tyme['second'],\r\n microsecond=tyme['microsecond'])", "def unpack_time(s, type='I'):\n\ttry:\n\t\t(l,), s = unpack(\"!\"+type, s)\n\texcept TypeError, e:\n\t\traise TypeError(\"Problem unpacking time: %s\" % e)\n\n\tif l < 0:\n\t\treturn None\n\treturn datetime.fromtimestamp(l), s", "def parse_time(s):\n return time.gmtime(float(s))", "def __parse_time(self, time_obj):\n if time_obj:\n resp = ''\n if isinstance(time_obj, int) or isinstance(time_obj, str):\n resp = time_obj\n elif isinstance(time_obj, datetime.datetime):\n resp = calendar.timegm(time_obj.timetuple())\n else:\n raise Exception(\"Unknown __parse_time format for {0}\".format(time_obj))\n return str(resp)\n return None", "def dehydrate_time(value):\n if isinstance(value, Time):\n nanoseconds = int(value.ticks * 1000000000)\n elif isinstance(value, time):\n nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +\n 1000000000 * value.second + 1000 * value.microsecond)\n else:\n raise TypeError(\"Value must be a neotime.Time or a datetime.time\")\n if value.tzinfo:\n return Structure(ord(b\"T\"), nanoseconds, value.tzinfo.utcoffset(value).seconds)\n else:\n return Structure(ord(b\"t\"), nanoseconds)", "def parse_time_record(self, record):\n\n time_record = TIME_RECORD_MATCHER.match(record)\n if not time_record:\n time_data = None\n else:\n time_data = struct.unpack(TIME_FORMAT, \n time_record.group(0)[0:TIME_RECORD_SIZE])\n\n return time_data", "def parse_time_to_SAML(time):\n data = datetime.utcfromtimestamp(float(time))\n return data.strftime('%Y-%m-%dT%H:%M:%SZ')", "def parse_time(time_string):\n return calendar.timegm(time.strptime(time_string, \"%Y%m%dT%H%M%SZ\"))", "def _astropy_time(time):\n return time if isinstance(time, astropy.time.Time) else astropy.time.Time(parse_time(time))", "def get_time(text_time):\n # return Observer.datetime_to_astropy_time(dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M'))\n the_time = dt.datetime.strptime(text_time, '%d/%m/%Y %H:%M')\n return Time(the_time.strftime('%Y-%m-%d %H:%M'))\n #date = [int(i) for i in date.split('/')]", "def parse_time(self):\n\n # parse time\n year = int(self.start[:4])\n month = int(self.start[5:7])\n day = int(self.start[8:10])\n hours = int(self.start[11:13])\n minutes = int(self.start[14:16])\n seconds = int(self.start[17:19])\n time = datetime.datetime(year, month, day, hours, minutes, seconds)\n\n # advance time\n time = time + datetime.timedelta(minutes=self.rain_interval)\n time = time.isoformat(\" \")\n\n # timestamp\n # elevation (m)\n evolved_elevation = (\n 'elevation_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # water depth (m)\n depth = (\n 'depth_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # sediment flux (kg/ms)\n sediment_flux = (\n 'flux_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # erosion-deposition (kg/m2s)\n erosion_deposition = (\n 'erosion_deposition_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n # elevation difference (m)\n difference = (\n 'difference_'\n + time.replace(\" \", \"_\").replace(\"-\", \"_\").replace(\":\", \"_\"))\n\n return (evolved_elevation, time, depth, sediment_flux,\n erosion_deposition, difference)", "def time(self):\n return parse_time(self['timestamp'])", "def _time_to_date(parsed_time):\n if not parsed_time:\n return parsed_time\n return datetime.fromtimestamp(calendar.timegm(parsed_time), tz=timezone.utc)", "def struct_time(self):\n _, month, day, hour, minute, second, weekday, _, _ = self.current_time\n # Bluetooth weekdays count from 1. struct_time counts from 0.\n return time.struct_time((month, day, hour, minute, second, weekday - 1, -1))", "def time_convert(timestr):\n \n try:\n # Analyse given time str to seperate elements.\n struct_time = time.strptime(timestr[:-4], \"%a, %d %b %Y %H:%M:%S\")\n # Convert given time by secend unit.\n t = time.mktime(struct_time) \n # Re-construct time to isotime format.\n isot = time.strftime(\"%Y-%m-%d\", time.gmtime(t))\n return isot\n \n except:\n return ''", "def directive_to_struct_time_item(directive, value):\n if directive == DIRECTIVES.YEAR:\n # Return YEAR as TM_YEAR.\n return STRUCT_TIME.TM_YEAR, value\n elif directive == DIRECTIVES.YEAR_NO_CENTURY:\n # Return YEAR_NO_CENTURY as TM_YEAR.\n # Assume that a two-digit year is relative to the year 2000.\n return STRUCT_TIME.TM_YEAR, value + 2000\n elif directive == DIRECTIVES.MONTH:\n # Return MONTH as TM_MON.\n return STRUCT_TIME.TM_MON, value\n elif directive == DIRECTIVES.ABBREV_MONTH_NAME:\n # Return ABBREV_MONTH_NAME as TM_MON.\n return STRUCT_TIME.TM_MON, ABBREVIATED_MONTH_NAMES.index(value)\n elif directive == DIRECTIVES.MONTH_NAME:\n # Return MONTH_NAME as TM_MON.\n return STRUCT_TIME.TM_MON, MONTH_NAMES.index(value)\n elif directive == DIRECTIVES.DAY_OF_MONTH:\n # Return DAY_OF_MONTH as TM_MDAY\n return STRUCT_TIME.TM_MDAY, value\n elif directive == DIRECTIVES.HOUR_24:\n # Return HOUR_24 as TM_HOUR\n return STRUCT_TIME.TM_HOUR, value\n elif directive == DIRECTIVES.HOUR_12:\n # Return HOUR_12 as 0-based TM_HOUR\n return STRUCT_TIME.TM_HOUR, 0 if value == 12 else value\n elif directive == DIRECTIVES.MINUTE:\n # Return MINUTE as TM_MIN\n return STRUCT_TIME.TM_MIN, value\n elif directive == DIRECTIVES.SECOND:\n # Return SECOND as TM_SEC\n return STRUCT_TIME.TM_SEC, value\n elif directive == DIRECTIVES.DAY_OF_WEEK:\n # Return DAY_OF_WEEK as TM_WDAY\n return STRUCT_TIME.TM_WDAY, value\n elif directive == DIRECTIVES.ABBREV_WEEKDAY_NAME:\n # Return ABBREV_WEEKDAY_NAME as TM_WDAY\n return STRUCT_TIME.TM_WDAY, ABBREVIATED_WEEKDAY_NAMES.index(value)\n elif directive == DIRECTIVES.WEEKDAY_NAME:\n # Return WEEKDAY_NAME as TM_WDAY\n return STRUCT_TIME.TM_WDAY, WEEKDAY_NAMES.index(value)\n elif directive == DIRECTIVES.DAY_OF_YEAR:\n # Return DAY_OF_YEAR as TM_YDAY\n return STRUCT_TIME.TM_YDAY, value\n elif directive == DIRECTIVES.TIME_ZONE:\n # Take no action for TIME_ZONE.\n return None\n elif directive == DIRECTIVES.TIME_ZONE_OFFSET:\n # Return TIME_ZONE_OFFSET as TM_MIN - to be subtracted from any\n # existing minute value to arrive at UTC.\n return STRUCT_TIME.TM_MIN, -value\n elif directive == DIRECTIVES.AM_PM:\n # Return AM_PM as TM_HOUR\n # If value = 'PM' return +12 to update hour value to 24-hour format.\n return STRUCT_TIME.TM_HOUR, 12 if value == 'PM' else 0\n elif directive == DIRECTIVES.PERCENT:\n # Take no action for PERCENT.\n return None\n else:\n raise NotImplementedError(\n 'struct_time conversion not defined for directive: {}'\n .format(directive)\n )", "def __init__(self, struct_time):\r\n\t\tself.struct_time = struct_time\r\n\t\tself.year = struct_time[0]\r\n\t\tself.mon = self.set_month(struct_time[1])\r\n\t\tself.day = struct_time[2]\r\n\t\tself.hour = struct_time[3]\r\n\t\tself.min = struct_time[4]\r\n\t\tself.wday = self.set_week_day(struct_time[6])\r\n\t\tself.day_or_night = self.set_day_state(struct_time[8])", "def convert_time(slog_time_str):\n \n base_time = datetime.datetime(2007, 1, 1)\n delta = datetime.timedelta(0, float(slog_time_str))\n \n timestamp = base_time + delta\n taml_dtg = timestamp.strftime('%Y-%m-%dT%H:%M:%S')\n return taml_dtg", "def _serialize_time(val):\n return val.isoformat()", "def convert_time(t):\n return datetime.fromtimestamp(t / 1e7 - 11644473600)", "def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):\r\n return datetime.datetime.strptime(timestr, fmt)", "def parse_gerrit_time(value):\n parts = value.split('.')\n dt = datetime.datetime.strptime(parts[0], GERRIT_TIMESTAMP_FMT)\n if len(parts) > 1:\n dt += datetime.timedelta(\n microseconds=int(float('0.%s' % parts[1]) * 1000000.0))\n return dt", "def parse_timestamp(timestamp):\n if not timestamp or timestamp == '0000-00-00T00:00:00Z':\n return struct_time((0, 0, 0, 0, 0, 0, 0, 0, 0))\n return strptime(timestamp, '%Y-%m-%dT%H:%M:%SZ')", "def _parse_timestamp(self, api_time):\n return (\n pendulum.parse(api_time)\n if api_time is not None\n else pendulum.from_timestamp(-1)\n )", "def _ParseTimeElements(self, time_elements_structure):\n try:\n year, month, day_of_month, hours, minutes, seconds = (\n time_elements_structure)\n\n # Ensure time_elements_tuple is not a pyparsing.ParseResults otherwise\n # copy.deepcopy() of the dfDateTime object will fail on Python 3.8 with:\n # \"TypeError: 'str' object is not callable\" due to pyparsing.ParseResults\n # overriding __getattr__ with a function that returns an empty string\n # when named token does not exist.\n time_elements_tuple = (year, month, day_of_month, hours, minutes, seconds)\n date_time = dfdatetime_time_elements.TimeElements(\n time_elements_tuple=time_elements_tuple)\n\n # APT History logs store date and time values in local time.\n date_time.is_local_time = True\n\n return date_time\n\n except (TypeError, ValueError) as exception:\n raise errors.ParseError(\n 'Unable to parse time elements with error: {0!s}'.format(exception))", "def rfc3339nano_to_datetime(my_time, time_dash='_'):\n my_time = my_time.replace('_', ':') # Replace undercores with colons\n my_time = re.sub(r\"\\.\\d*\", \"\", my_time) # Strip nanoseconds\n return datetime.datetime.strptime(my_time, f\"%Y-%m-%dT%H:%M:%S%z\") # Parse string to datetime", "def _datetime2et(time: datetime) -> float:\n if isinstance(time, float):\n return time\n if not isinstance(time, datetime):\n raise TypeError(\"Time must be a float or a datetime object.\")\n return spy.str2et(time.isoformat())", "def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):\n return datetime.datetime.strptime(timestr, fmt)", "def from_iso(iso, fmt=\"%Y-%m-%dT%H:%M:%S.%f\"):\n # change datetime.datetime to time, return time.struct_time type\n return datetime.strptime(iso, fmt)", "def datetime_from_string(time):\n try:\n if type(time) == datetime.datetime:\n return time\n else:\n try:\n return datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S')\n except ValueError:\n return datetime.datetime.strptime(time, '%Y-%m-%d %H:%M:%S.%f')\n except ValueError:\n return time\n except TypeError:\n return time", "def datetimeify(t):\n if type(t) in [datetime, Timestamp]:\n return t\n fmts = ['%Y-%m-%d %H:%M:%S', '%Y-%m-%d', '%Y %m %d %H %M %S',]\n for fmt in fmts:\n try:\n return datetime.strptime(t, fmt)\n except ValueError:\n pass\n raise ValueError(\"time data '{:s}' not a recognized format\".format(t))", "def _change_time_format(time_string):\n datetime_object = parser.isoparse(time_string)\n return datetime_object", "def time_convert(intime):\n Nt = intime.shape[0]\n outtime = []\n for t in range(Nt):\n timestr = ''.join([intime[t,:][~intime[t,:].mask].data[i].decode('utf-8') for i in range(len(intime[t,:][~intime[t,:].mask].data))])\n outtime.append(datetime.strptime(timestr, '%Y-%m-%d_%H:%M:%S'))\n return outtime", "def epoch2time(time):\n\tvalue = datetime.datetime.fromtimestamp(time)\n\tNormal = value.strftime('%Y-%m-%d %H:%M:%S')\n\tprint(normal)\n\treturn normal", "def parse_timestamp(node):\n\n if len(node) != 3:\n raise ValueError(\"Invalid timestamp object.\")\n\n return Timestamp(node[0], node[1], node[2])", "def cvt_time(dt_str):\n # Note, these timestamps don't include time zones\n return datetime.strptime(dt_str, '%Y-%m-%dT%H:%M:%S.%fZ')", "def parse_time(dt: str) -> datetime:\n return datetime.strptime(dt, \"%Y-%m-%dT%H:%M:%SZ\")", "def _ToBlogTime(self, time_tuple):\r\n return time.strftime('%Y-%m-%dT%H:%M:%SZ', time_tuple)", "def decodeSpaceTime(self, result):\r\n if self.case == 1:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(x[0]/self.scale))),\r\n reader.morton2coordsX2D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY2D(x[1], self.offy, self.scaley, self.roundNum), x[2]], result)\r\n else:\r\n return map(lambda x: [int(x[0]/self.scale), reader.morton2coordsX2D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY2D(x[1], self.offy, self.scaley, self.roundNum), x[2]], result)\r\n elif self.case == 2:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(x[0]/self.scale))), \r\n reader.morton2coordsX3D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY3D(x[1], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ3D(x[1], self.offz, self.scalez, self.roundNum)], result)\r\n else:\r\n return map(lambda x: [int(x[0]/self.scale), reader.morton2coordsX3D(x[1], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY3D(x[1], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ3D(x[1], self.offz, self.scalez, self.roundNum)], result)\r\n elif self.case == 3:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(reader.morton2coordst3D(x[0])/self.scale))), \r\n reader.morton2coordsY3D(x[0], self.offx, self.scalex, self.roundNum),\r\n reader.morton2coordsZ3D(x[0], self.offy, self.scaley, self.roundNum), x[1]], result)\r\n else:\r\n return map(lambda x: [int(reader.morton2coordst3D(x[0])/self.scale), \r\n reader.morton2coordsY3D(x[0], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsZ3D(x[0], self.offy, self.scaley, self.roundNum), x[1]], result)\r\n elif self.case == 4:\r\n if self.granularity == 'day':\r\n return map(lambda x: [reader.formatTime(reader.inverseDaySinceEpoch(int(reader.morton2coordst4D(x[0])/self.scale))), \r\n reader.morton2coordsX4D(x[0], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY4D(x[0], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ4D(x[0], self.offz, self.scalez, self.roundNum)], result)\r\n else:\r\n return map(lambda x: [int(reader.morton2coordst4D(x[0])/self.scale), \r\n reader.morton2coordsX4D(x[0], self.offx, self.scalex, self.roundNum), \r\n reader.morton2coordsY4D(x[0], self.offy, self.scaley, self.roundNum), \r\n reader.morton2coordsZ4D(x[0], self.offz, self.scalez, self.roundNum)], result)", "def UpdateStructTime(self, t):\n self.date.UpdateStructTime(t)\n self.time.UpdateStructTime(t)", "def decode(self, s):\r\n (tsec, tfrac, self.eventType, self.eventCode,\r\n self.eventValue) = struct.unpack(Format.Event, s)\r\n\r\n self.time = tsec + tfrac / 1000000.0", "def parse_tql_time(self, data, pid, label):\n field = self.parse_tql_field(data, pid, label)\n if field:\n hour, minute, second = [int(i) for i in field.split(':')[0:3]]\n field = datetime.time(hour, minute, second)\n return field", "def ParseDatetimeIntoSecs(dom, tag):\n el = dom.getElementsByTagName(tag)\n if not el:\n return None\n assert el[0].getAttribute('type') == 'datetime'\n data = el[0].firstChild.data\n\n # Tracker emits datetime strings in UTC or GMT.\n # The [:-4] strips the timezone indicator\n parsable_date=(\"{}\".format(data[:-4])).strip()\n when = time.strptime(parsable_date, '%Y/%m/%d %H:%M:%S')\n # calendar.timegm treats the tuple as GMT\n return calendar.timegm(when)", "def extract_time(bs_soup):\n sub_item = bs_soup.find(\"div\", class_=TIME_CLASS).text\n \n if sub_item:\n return datetime.strptime(sub_item, \"%d %b %Y, %H:%M\")\n return None", "def parse_time(time_string, time_format='', **kwargs):\n if isinstance(time_string, pandas.Timestamp):\n return time_string.to_pydatetime()\n elif isinstance(time_string, datetime) or time_format == 'datetime':\n return time_string\n elif isinstance(time_string, tuple):\n return datetime(*time_string)\n elif time_format == 'utime' or isinstance(time_string, (int, float)):\n return datetime(1979, 1, 1) + timedelta(0, time_string)\n elif isinstance(time_string, pandas.DatetimeIndex):\n return time_string._mpl_repr()\n elif isinstance(time_string, np.ndarray) and 'datetime64' in str(time_string.dtype):\n ii = [ss.astype(datetime) for ss in time_string]\n # Validate (in an agnostic way) that we are getting a datetime rather than a date\n return np.array([datetime(*(dt.timetuple()[:6])) for dt in ii])\n elif time_string is 'now':\n return datetime.utcnow()\n elif isinstance(time_string, astropy.time.Time):\n return time_string.datetime\n else:\n # remove trailing zeros and the final dot to allow any\n # number of zeros. This solves issue #289\n if '.' in time_string:\n time_string = time_string.rstrip(\"0\").rstrip(\".\")\n for time_format in TIME_FORMAT_LIST:\n try:\n try:\n ts, time_delta = _regex_parse_time(time_string,\n time_format)\n except TypeError:\n break\n if ts is None:\n continue\n return datetime.strptime(ts, time_format) + time_delta\n except ValueError:\n pass\n\n time_string_parse_format = kwargs.pop('_time_string_parse_format', None)\n if time_string_parse_format is not None:\n # Following a comment by the Lead Developer, the Try / except clause\n # is replaced. The Lead Developer thinks that this the try/except\n # clause is related to SunPy's database module.\n try:\n ts, time_delta = _regex_parse_time(time_string,\n time_string_parse_format)\n if ts and time_delta:\n return datetime.strptime(ts, time_string_parse_format) + time_delta\n else:\n return datetime.strptime(time_string, time_string_parse_format)\n except Exception:\n pass\n raise ValueError(\"'{tstr!s}' is not a valid time string!\".format(tstr=time_string))", "def parse_timestamp(ts):\n return DateTimeField()._to_python(ts)", "def parse(s):\n\n rise = False\n set = False\n if s[-1:] == \"R\":\n rise = True\n s = s[:-1]\n elif s[-1:] == \"T\":\n set = True\n s = s[:-1]\n \n x = s.split(\":\")\n if len(x) == 1:\n x.append(\"0\")\n if len(x) == 2:\n x.append(\"0\")\n \n return Time(int(x[0]), int(x[1]), int(x[2]), after_sunrise=rise,\n after_sunset=set)", "def parseTimestamp(gameTime):\n\n half, time = gameTime.split(' - ')\n \n min, sec = time.split(':')\n \n time_float = (float(min)*60 + float(sec)) // 1\n \n return (half, time_float)", "def _convert_time_to_frames(pars, framerate):\n pars['stimulus-frames'] = \\\n int(round(pars['stimulus-training-ms']/1000.0*framerate))\n pars['stimulus-offset-frames'] = \\\n int(round(pars['stimulus-training-offset-ms']/1000.0*framerate))\n pars['classification-frames'] = \\\n int(round(pars['classification-ms']/1000.0*framerate))\n\n # Make the exclusion time a tuple rather than an int\n if isinstance(pars['excluded-time-around-onsets-ms'], int):\n pars['excluded-time-around-onsets-ms'] = (\n pars['excluded-time-around-onsets-ms'],\n pars['excluded-time-around-onsets-ms'])\n\n # Then convert to frames\n pars['excluded-time-around-onsets-frames'] = (\n int(round(pars['excluded-time-around-onsets-ms'][0]/1000.0*framerate)),\n int(round(pars['excluded-time-around-onsets-ms'][1]/1000.0*framerate)))\n\n pars['temporal-prior-fwhm-frames'] = \\\n int(round(pars['temporal-prior-fwhm-ms']/1000.0*framerate))\n\n return pars", "def ConvertTime( self ) :\n \n # modules:\n import logging\n import datetime\n import netCDF4\n import numpy\n \n #\n # Original 'Time' units and description:\n #\n # title = \"Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC), 1 January 1970.\"\n # units = \"s\"\n #\n # Create new field 'Datetime' field with units:\n # units = \"Seconds since 1970-01-01 00:00'\n #\n # select:\n varid = self.swaths[self.component]['Geolocation Fields']['Time']\n # values:\n tvalues = varid['data']\n # extract description:\n long_name = varid['long_name'].decode('latin-1')\n # check ...\n key = 'Time at Start of Scan (s, UNIX time / POSIX time), number of seconds that have elapsed since midnight Coordinated Universal Time (UTC),'\n if long_name.startswith(key) :\n # remove leading description:\n time0 = long_name.replace(key,'').replace('.','').strip()\n # extract datetime object:\n t0 = datetime.datetime.strptime(time0,'%d %B %Y')\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n if 'mask' in dir(tvalues) :\n values1d = netCDF4.num2date( tvalues.data, var['units'] )\n else :\n values1d = netCDF4.num2date( tvalues , var['units'] )\n #endif\n # alternative:\n # \"Time at Start of Scan (s, TAI93)\"\n elif 'TAI' in long_name :\n # find start:\n i0 = long_name.index('TAI')\n # extract:\n year = int(long_name[i0+3:].replace(')',''))\n # convert to 4-digits if necessary:\n if year < 100 :\n if year > 50 :\n year = 1900 + year\n else :\n year = 2000 + year\n #endif\n #endif\n # reference time:\n t0 = datetime.datetime(year,1,1,0,0,0)\n # convert:\n var = {}\n var['units' ] = t0.strftime('seconds since %Y-%m-%d %H:%M:%H')\n var['long_name'] = long_name\n values1d = netCDF4.num2date( tvalues, var['units'] )\n else :\n self.logger.error( 'could not convert time units \"%s\"' % long_name )\n self.logger.error( 'first value : %f' % tvalues[0] )\n raise Exception\n #endif\n \n # expand to 2D:\n var['data'] = numpy.zeros( (self.ntime,self.np), values1d.dtype )\n for ip in range(self.np) :\n var['data'][:,ip] = values1d\n #endfor\n \n # set dim names:\n var['dimnames'] = ('time','pixel')\n \n # store:\n self.swaths[self.component]['Geolocation Fields']['Datetime'] = var", "def parse_time(text):\n\n # When keyword is 'in' adds values to time\n if text[-3] == 'in':\n remind_time = time.gmtime(int(text[-2]) * int(text[-1]) + time.time())\n # Otherwise try to parse time as written\n else:\n remind_time = text[-1].replace(':', ' ') \\\n + \" \" \\\n + time.strftime(\"%m/%d/%y\", time.gmtime(time.time()))\n remind_time = time.strptime(remind_time, \"%H %M %m/%d/%y\")\n return remind_time", "def parse(s):\n\n t = AbsoluteTimer()\n t.id = s.get(\"id\", None)\n t.name = s.get(\"name\", None)\n \n if s.has_key(\"abstime\"):\n\n parts = s[\"abstime\"].split(\" \")\n\n if len(parts) != 2:\n raise RuntimeError, \"Invalid date format\"\n\n dateparts = parts[0].split(\"-\")\n timeparts = parts[1].split(\":\")\n \n if len(dateparts) != 3:\n raise RuntimeError, \"Invalid date format\"\n if len(timeparts) != 3:\n raise RuntimeError, \"Invalid date format\"\n\n t.year = int(dateparts[0])\n t.month = int(dateparts[1])\n t.date = int(dateparts[2])\n t.hours = int(timeparts[0])\n t.minutes = int(timeparts[1])\n t.seconds = int(timeparts[2])\n\n return t", "def convertTime(string):\n try:\n d = dtparser.parse(string)\n except ValueError:\n try:\n d = datetime.fromtimestamp(float(string))\n except ValueError:\n return string\n\n d.replace(tzinfo=tz.tzlocal())\n return datetime.strftime(d, \"%Y/%m/%d %H:%M:%S\")", "def date_to_python(self, value):\r\n # this throws away fractions of a second\r\n return datetime(*strptime(value[:-5], \"%Y-%m-%dT%H:%M:%S\")[0:6])", "def time_string2dt(time_string: str)-> datetime:\n return parse(time_string, fuzzy=True)", "def parse_time(value: str) -> float:\n return float(value[:-1]) * TIME[value[-1]]", "def converttime(time, currentformat, newformat):\n\n # Define conversion dictionary\n conversions = {\n \"milliseconds\": {\n \"milliseconds\": \"time\",\n \"seconds\": \"time / 1000\",\n \"minutes\": \"time / 1000 / 60\",\n \"hours\": \"time / 1000 / 60 / 60\",\n \"days\": \"time / 1000 / 60 / 60 / 24\",\n \"weeks\": \"time / 1000 / 60 / 60 / 24 / 7\",\n \"fortnights\": \"time / 1000 / 60 / 60 / 24 / 14\",\n \"years\": \"time / 1000 / 60 / 60 / 24 / 365\",\n \"decades\": \"time / 1000 / 60 / 60 / 24 / 365 / 10\",\n \"centuries\": \"time / 1000 / 60 / 60 / 24 / 365 / 100\",\n \"millenniums\": \"time / 1000 / 60 / 60 / 24 / 365 / 1000\"\n },\n \"seconds\": {\n \"milliseconds\": \"time * 1000\",\n \"seconds\": \"time\",\n \"minutes\": \"time / 60\",\n \"hours\": \"time / 60 / 60\",\n \"days\": \"time / 60 / 60 / 24\",\n \"weeks\": \"time / 60 / 60 / 24 / 7\",\n \"fortnights\": \"time / 60 / 60 / 24 / 14\",\n \"years\": \"time / 60 / 60 / 24 / 365\",\n \"decades\": \"time / 60 / 60 / 24 / 365 / 10\",\n \"centuries\": \"time / 60 / 60 / 24 / 365 / 100\",\n \"millenniums\": \"time / 60 / 60 / 24 / 365 / 1000\"\n },\n \"minutes\": {\n \"milliseconds\": \"time * 60 * 1000\",\n \"seconds\": \"time * 60\",\n \"minutes\": \"time\",\n \"hours\": \"time / 60\",\n \"days\": \"time / 60 / 24\",\n \"weeks\": \"time / 60 / 24 / 7\",\n \"fortnights\": \"time / 60 / 24 / 14\",\n \"years\": \"time / 60 / 24 / 365\",\n \"decades\": \"time / 60 / 24 / 365 / 10\",\n \"centuries\": \"time / 60 / 24 / 365 / 100\",\n \"millenniums\": \"time / 60 / 24 / 365 / 1000\"\n },\n \"hours\": {\n \"milliseconds\": \"time * 60 * 60 * 1000\",\n \"seconds\": \"time * 60 * 60\",\n \"minutes\": \"time * 60\",\n \"hours\": \"time\",\n \"days\": \"time / 24\",\n \"weeks\": \"time / 24 / 7\",\n \"fortnights\": \"time / 24 / 14\",\n \"years\": \"time / 24 / 365\",\n \"decades\": \"time / 24 / 365 / 10\",\n \"centuries\": \"time / 24 / 365 / 100\",\n \"millenniums\": \"time / 24 / 365 / 1000\"\n },\n \"days\": {\n \"milliseconds\": \"time * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 24 * 60 * 60\",\n \"minutes\": \"time * 24 * 60\",\n \"hours\": \"time * 24\",\n \"days\": \"time\",\n \"weeks\": \"time / 7\",\n \"fortnights\": \"time / 14\",\n \"years\": \"time / 365\",\n \"decades\": \"time / 365 / 10\",\n \"centuries\": \"time / 365 / 100\",\n \"millenniums\": \"time / 365 / 1000\"\n },\n \"weeks\": {\n \"milliseconds\": \"time * 7 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 7 * 24 * 60 * 60\",\n \"minutes\": \"time * 7 * 24 * 60\",\n \"hours\": \"time * 7 * 24\",\n \"days\": \"time * 7\",\n \"weeks\": \"time\",\n \"fortnights\": \"time / 2\",\n \"years\": \"time / 52\",\n \"decades\": \"time / 52 / 10\",\n \"centuries\": \"time / 52 / 100\",\n \"millenniums\": \"time / 52 / 1000\"\n },\n \"fortnights\": {\n \"milliseconds\": \"time * 14 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 14 * 24 * 60 * 60\",\n \"minutes\": \"time * 14 * 24 * 60\",\n \"hours\": \"time * 14 * 24\",\n \"days\": \"time * 14\",\n \"weeks\": \"time * 2\",\n \"fortnights\": \"time\",\n \"years\": \"time / 26\",\n \"decades\": \"time / 26 / 10\",\n \"centuries\": \"time / 26 / 100\",\n \"millenniums\": \"time / 26 / 1000\"\n },\n \"years\": {\n \"milliseconds\": \"time * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 256 * 24 * 60\",\n \"hours\": \"time * 256 * 24\",\n \"days\": \"time * 256\",\n \"weeks\": \"time * 52\",\n \"fortnights\": \"time * 26\",\n \"years\": \"time\",\n \"decades\": \"time / 10\",\n \"centuries\": \"time / 100\",\n \"millenniums\": \"time / 1000\"\n },\n \"decades\": {\n \"milliseconds\": \"time * 10 * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 10 * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 10 * 256 * 24 * 60\",\n \"hours\": \"time * 10 * 256 * 24\",\n \"days\": \"time * 10 * 256\",\n \"weeks\": \"time * 10 * 52\",\n \"fortnights\": \"time * 10 * 26\",\n \"years\": \"time * 10\",\n \"decades\": \"time\",\n \"centuries\": \"time / 10\",\n \"millenniums\": \"time / 100\"\n },\n \"centuries\": {\n \"milliseconds\": \"time * 100 * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 100 * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 100 * 256 * 24 * 60\",\n \"hours\": \"time * 100 * 256 * 24\",\n \"days\": \"time * 100 * 256\",\n \"weeks\": \"time * 100 * 52\",\n \"fortnights\": \"time * 100 * 26\",\n \"years\": \"time * 100\",\n \"decades\": \"time * 10\",\n \"centuries\": \"time\",\n \"millenniums\": \"time / 10\"\n },\n \"millenniums\": {\n \"milliseconds\": \"time * 1000 * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 1000 * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 1000 * 256 * 24 * 60\",\n \"hours\": \"time * 1000 * 256 * 24\",\n \"days\": \"time * 1000 * 256\",\n \"weeks\": \"time * 1000 * 52\",\n \"fortnights\": \"time * 1000 * 26\",\n \"years\": \"time * 1000\",\n \"decades\": \"time * 100\",\n \"centuries\": \"time * 10\",\n \"millenniums\": \"time\"\n }\n }\n\n # Return evaluated value\n return eval(conversions[currentformat][newformat])", "def transform_time(t):\n if t is None:\n return None\n elif isinstance(t, basestring):\n return t\n\n dt = datetime.fromtimestamp(t, UTC())\n return dt.strftime('%Y-%m-%dT%H:%M:%S%z')", "def _time_to_datetime(value):\r\n assert isinstance(value, datetime.time)\r\n return datetime.datetime(1970, 1, 1,\r\n value.hour, value.minute, value.second,\r\n value.microsecond)", "def parse_influxdb_time(t_str):\n try:\n return datetime.datetime.strptime(t_str[:26].rstrip('Z'), '%Y-%m-%dT%H:%M:%S.%f')\n except ValueError:\n return datetime.datetime.strptime(t_str[:19], '%Y-%m-%dT%H:%M:%S')", "def _convert_timestamp(timestamp):\n extract_time = re.match('(.*?\\+\\d{2}):(.*)', timestamp)\n formated = datetime.strptime('{}{}'.format(extract_time.group(1), extract_time.group(2)),\n '%Y-%m-%dT%H:%M:%S%z').strftime('%Y-%m-%dT%H:%M:%S%z')\n return formated", "def _parse_time(time_string: str) -> datetime:\n\n # Strings with timezone (+01:00) in v2 are not easily parsed. But time\n # zones are not important here, so we just omit them.\n time_string = time_string.rsplit('+')[0]\n\n time_formats = [\n '%Y-%m-%dT%H:%M:%S.%fZ', # Default\n '%Y-%m-%dT%H:%M:%SZ', # Imported UNCCD data\n '%Y-%m-%dT%H:%M:%S.%f', # Stripped timezone format (v2)\n ]\n for t_format in time_formats:\n try:\n return datetime.strptime(time_string, t_format)\n except ValueError:\n continue", "def narrow(avp):\n if len(avp.payload)!=4:\n raise InvalidAVPLengthException(avp)\n value = struct.unpack(\"!I\",avp.payload)[0] - DiamAVP_Time.seconds_between_1900_and_1970\n a = DiamAVP_Time(avp.code, value, avp.vendor_id)\n a.flags = avp.flags\n return a", "def parse_time(text):\n try:\n if len(text) == 17:\n date = datetime.datetime.strptime(text, '%Y-%m-%dT%H:%MZ')\n elif len(text) == 20:\n date = datetime.datetime.strptime(text, '%Y-%m-%dT%H:%M:%SZ')\n else:\n date = datetime.datetime.utcnow()\n except Exception as _:\n date = datetime.datetime.utcnow()\n return date", "def parse_time(self, gc):\n\n def match(time_str):\n if time_str == \"Half\":\n time = 0\n minute = -3\n status = 'd'\n elif time_str == \"ET\":\n time = 0\n minute = -1\n status = 'd'\n elif time_str == \"Final\":\n time = 0\n minute = 90\n status = 'f'\n elif re.match(\".*[\\d]{2}:[\\d]{2} UK\", time_str):\n time = re.search(\".*([\\d]{2}):([\\d]{2}) UK\", time_str).groups()\n time = datetime.time(int(time[0]), int(time[1]))\n minute = 0\n status = 'o'\n elif re.match(\".*[\\d]{1,3}\\'\", time_str):\n time = 0\n minute = re.search(\"([\\d]{1,3})\\'\", time_str).groups()[0]\n status = 'd'\n elif re.match(\".*[\\d]{1,3} min\", time_str):\n time = 0\n minute = re.search(\"([\\d]{1,3}) min\", time_str).groups()[0]\n status = 'd'\n elif time_str == \"1st\":\n time = 0\n minute = -4\n status = 'd'\n elif time_str == \"2nd\":\n time = 0\n minute = -2\n status = 'd'\n else:\n time = 0\n minute = 0\n status = 'c'\n\n return time, minute, status\n\n # (o)pen / (s)tarted / (f)inished\n try:\n t = gc.find(name='div', attrs={'class': 'teamTop_inGame'}).contents\n if type(t) == type([]) and len(t) > 0:\n return match(str(t[0]).strip())\n else:\n pass\n except AttributeError:\n pass\n\n try:\n t = gc.find(name='div', attrs={'class': 'teamTop'}).a.contents\n if type(t) == type([]):\n return match(str(t[0]).strip())\n else:\n pass\n\n except AttributeError:\n pass\n\n try:\n t = gc.find(name='div', attrs={'class': 'teamTop'}).contents\n if type(t) == type([]):\n if str(t[0]).strip() == \"Postp.\": # match postponed\n return 0, 0, 'p'\n else: # match cancelled or sth ;)\n return 0, 0, 'c'\n else:\n pass\n\n except AttributeError:\n pass\n\n return False, False, False", "def unparse(dt_or_rel):\n if isinstance(dt_or_rel, SMPPRelativeTime):\n return unparse_relative_time(dt_or_rel)\n return unparse_absolute_time(dt_or_rel)", "def from_knx(self, payload) -> time.struct_time:\n return self.dpt_class.from_knx(payload.value)", "def parse_publish_time(ep_info):\n\n return ep_info.find('div', {'class': 'l'}).get_text().encode('utf-8')[14:33]", "def parse_rss_timestamp(timestamp):\n import datetime as dt\n return dt.datetime(*timestamp[:7])", "def decode(self, data):\r\n if '+' in data:\r\n data = data[:data.index('+')]\r\n\r\n try:\r\n dt = datetime(year=int(data[0:4]), month=int(data[5:7]),\r\n day=int(data[8:10]), hour=int(data[11:13]),\r\n minute=int(data[14:16]), second=int(data[17:19]),\r\n microsecond=int(data[20:]))\r\n except ValueError:\r\n return Time()\r\n\r\n return Time.from_sec(time.mktime(dt.timetuple()))", "def _ParseTimeElements(self, time_elements_structure):\n try:\n if len(time_elements_structure) == 5:\n month_string, day_of_month, hours, minutes, seconds = (\n time_elements_structure)\n\n milliseconds = None\n else:\n _, month_string, day_of_month, hours, minutes, seconds, milliseconds = (\n time_elements_structure)\n\n month = self._GetMonthFromString(month_string)\n\n self._UpdateYear(month)\n\n relative_year = self._GetRelativeYear()\n\n if milliseconds is None:\n time_elements_tuple = (\n relative_year, month, day_of_month, hours, minutes, seconds)\n\n date_time = dfdatetime_time_elements.TimeElements(\n is_delta=True, time_elements_tuple=time_elements_tuple)\n\n else:\n time_elements_tuple = (\n relative_year, month, day_of_month, hours, minutes, seconds,\n milliseconds)\n\n date_time = dfdatetime_time_elements.TimeElementsInMilliseconds(\n is_delta=True, time_elements_tuple=time_elements_tuple)\n\n return date_time\n\n except (TypeError, ValueError) as exception:\n raise errors.ParseError(\n 'Unable to parse time elements with error: {0!s}'.format(exception))", "def parse_line(line):\n log_line = LogLine(line)\n dt = datetime.datetime.strptime(log_line.line[0], \"%Y-%m-%d %H:%M:%S\")\n # make a tuple with dt and the rest (splatted)\n return (dt, *log_line.line[1:])", "def convert_timestamp(ts):\n format = '%Y-%m-%d %H:%M:%S'\n return datetime.strptime(ts, format)", "def convert_timestamp_to_object(data):\n for k, value in data.items():\n value_type = value.split(\"::\", 1)[0]\n if value_type == \"datetime\":\n timestamp = int(value.split(\"::\", 1)[1])\n value = datetime.fromtimestamp(timestamp)\n elif value_type == \"date\":\n timestamp = int(value.split(\"::\", 1)[1])\n value = date.fromtimestamp(timestamp)\n data[k] = value\n return data", "def parse_time(time_string):\n times = time_string.split(\"\\n\")\n\n user_time_str = times[-2].split(\"\\t\")[-1]\n sys_time_str = times[-1].split(\"\\t\")[-1]\n\n #print user_time_str, sys_time_str\n\n user_time = parse_m_s(user_time_str)\n sys_time = parse_m_s(sys_time_str)\n\n return user_time + sys_time", "def parseLineWTime ( tupl ):\n\n\ttry:\n\t\th1,m1 = tupl[0].split(\":\")\n\t\th2,m2 = tupl[2].split(\":\")\n\t\tactivity = tupl[3] \t\n\t\tnotes = tupl[4]\n\texcept IndexError:\n\t\treturn 0.0, \"unk\", []\n\n\tt1 = timedelta(hours=int(h1), minutes=int(m1))\n\tt2 = timedelta(hours=int(h2), minutes=int(m2))\n\n\tdelta = t2 - t1\n\n\treturn delta.seconds / (3600.0) , activity, notes", "def _parse_time_str(self, time_str):\n time_fmt = \"%I:%M%p\"\n time_str = re.sub(\n r\":+\",\n \":\",\n re.sub(r\"\\s+\", \"\", re.sub(r\"to|from|\\.\", \"\", time_str.lower())).replace(\n \"o\", \"0\"\n ),\n )\n if \":\" not in time_str:\n time_fmt = \"%I%p\"\n elif len(time_str) < 6:\n time_fmt = \"%I%p\"\n time_str = time_str.replace(\":\", \"\")\n return datetime.strptime(time_str, time_fmt).time()", "def _get_datetime(s):\n\n # It would be nice to be able to define a single format string \n # for use with datetime.strptime, but as seen in the examples, \n # Kindle clipping files can express datetimes in slightly \n # different ways. Therefore, we first have to normalize the components \n # of the Kindle datetime string into a consistent form. \n # The normalized form has no commas and always specifies seconds:\n # \"April 22 2018 12:33:10 PM\"\n \n # Use the DATETIME_REGEX regex which contains these groups:\n # 1: Month (en_US spelling of months)\n # 2: Day\n # 3: Year\n # 4: Hour\n # 5: Minute\n # 6: Optional seconds\n # 7: AM/PM (en_US spelling)\n \n month = None\n day = None\n year = None\n hour = None\n minute = None\n seconds = '00' # the Kindle datetime may not specify any seconds\n period = 'AM'\n \n for match in datetime_regex.finditer(s):\n month = match.group(1)\n day = match.group(2).zfill(2) #zero padded, two digits\n year = match.group(3)\n hour = match.group(4).zfill(2) #zero padded, two digits\n minute = match.group(5)\n if match.group(6) is not None:\n seconds = match.group(6)\n period = match.group(7)\n break \n\n\n normalized_string = \"%s %s %s %s:%s:%s %s\" % (month, day, year, hour, minute, seconds, period) \n dt = datetime.datetime.strptime(normalized_string, \"%B %d %Y %I:%M:%S %p\")\n return dt", "def try_parse_datetime(log_line: str) -> Optional[time.struct_time]:\n match = TIMESTAMP_RE.search(log_line)\n if not match:\n return None\n\n timestamp_str = match.group(0)\n try:\n timed, weekdayed, time_struct = OrgDate().parse_datetime(timestamp_str)\n return time_struct\n except AttributeError:\n # 'NoneType' object has no attribute 'group'\n return None", "def time_convert(time):\n try:\n time_data = str(time)\n if time_data:\n try:\n time_data = datetime.strptime(time_data, '%Y%m%d')\n except Exception:\n time_data = datetime.strptime(time_data, '%Y%m%d%H%M%S')\n time_data = time_data.strftime('%Y-%m-%d')\n return time_data\n except Exception:\n return False", "def parse_timespan(unparsed):\n pattern = '%H:%M:%S'\n return datetime.strptime(unparsed, pattern) - datetime.strptime('00:00:00', pattern)", "def timeConvert(time):\n\n FMTin = '%Y-%m-%d %H:%M:%S'\n FMTout = '%m/%d/%y'\n\n return datetime.strftime(datetime.strptime(time, FMTin), FMTout)", "def convert_to_time(value):\n if isinstance(value, datetime.time):\n return value\n elif isinstance(value, str):\n return datetime.time.fromisoformat(value)\n else:\n return datetime.time(value)", "def parseTime(string):\t\n \n if string == \"\":\n result = None\n if 'T' in string:\n string = string.replace('T', ' ')\n if 'Z' in string:\n string = string.replace('Z', '') \n\n if len(string) < 19:\n # string has some single digits\n p = \"\"\"^([0-9]{4})-([0-9]{1,2})-([0-9]{1,2}) \n ([0-9]{1,2}):([0-9]{1,2}):([0-9]{1,2}).*$\"\"\"\n s = re.findall(p, string)\n if len(s) > 0:\n string = '{0}-{1:02d}-{2:02d} {3:02d}:{4:02d}:{5:02d}'\\\n .format(*[int(x) for x in s[0]])\n\n for date_format in DATE_FORMATS:\n try:\n result = datetime.datetime.strptime(string, date_format)\n except ValueError:\n pass\n\n return result", "def parse_timestamp(ts_str):\n dt = dateutil.parser.parse(ts_str)\n return (time.mktime(dt.timetuple()) + dt.microsecond/1000000.0)", "def convert_timestamp(data):\n try:\n return datetime.datetime.fromtimestamp(float(data))\n except ValueError:\n return datetime.datetime.fromisoformat(data.decode(\"utf-8\"))", "def UpdateStructTime(self, t):\n if not self.Complete():\n raise DateTimeError(\"UpdateStructTime requires a complete time\")\n t[3] = self.hour\n t[4] = self.minute\n t[5] = self.second\n t[8] = -1", "def fromisoformat(string):\n string = string.replace(\"T\", \" \")\n if \".\" in string:\n return datetime.strptime(string, \"%Y-%m-%d %H:%M:%S.%f\")\n return datetime.strptime(string, \"%Y-%m-%d %H:%M:%S\")", "def _get_time(self): \n\t\t# need to variable-ize the version ??? \n\t\ttime = self.root.find('.//{http://www.opengis.net/kml/2.2}when').text\n\t\t## strip off last 5 chars, ie '.135Z in '2015-08-01T00:06:29.135Z'\n\t\tutc = dateutil.tz.tzutc() \n\t\tcentral = dateutil.tz.gettz('America/Chicago')\n\t\ttime = datetime.datetime.strptime(time[:-5], '%Y-%m-%dT%H:%M:%S')\n\t\ttime = time.replace(tzinfo=utc)\n\t\tself.time = time.astimezone(central)", "def _parse_datetime(self, data):\n d = data.find('./itdDate').attrib\n t = data.find('./itdTime').attrib\n\n # -1 means nope, there is no time known\n if d['weekday'] == '-1' or d['day'] == '-1' or t['minute'] == '-1':\n return None\n\n # convert time – the EFA API likes to talk about 24:00, so we have to correct that.\n result = datetime(int(d['year']), int(d['month']), int(d['day']), min(int(t['hour']), 23), int(t['minute']))\n if int(t['hour']) == 24:\n result += timedelta(hours=1)\n return result", "def _marshal_time(\n tm_year,\n tm_mon,\n tm_mday,\n tm_hour=0,\n tm_min=0,\n tm_sec=0,\n tm_wday=-1,\n tm_yday=-1,\n tm_isdst=-1,\n ):\n _struct_time(\n tm_year,\n tm_mon,\n tm_mday,\n tm_hour,\n tm_min,\n tm_sec,\n tm_wday,\n tm_yday,\n tm_isdst,\n )", "def UpdateStructTime(self, t):\n if not self.Complete():\n raise DateTimeError(\"UpdateStructTime requires complete date\")\n t[0] = self.century * 100 + self.year\n t[1] = self.month\n t[2] = self.day\n t[6] = self.GetWeekDay()[4] - 1\n t[7] = self.GetOrdinalDay()[2]", "def parse_time_into(option_name, default=None):\r\n return parse_amount_into(parse_time, option_name, default=default)", "def parse_time(tm):\n tm_parts = tm.split()\n # Time stamps from gitiles sometimes have a UTC offset (e.g., -0800), and\n # sometimes not. time.strptime() cannot parse UTC offsets, so if one is\n # present, strip it out and parse manually.\n timezone = None\n if len(tm_parts) == 6:\n tm = ' '.join(tm_parts[:-1])\n timezone = tm_parts[-1]\n dt = datetime.datetime.strptime(tm, \"%a %b %d %H:%M:%S %Y\")\n if timezone:\n m = re.match(r'([+-])(\\d\\d):?(\\d\\d)?', timezone)\n assert m, 'Could not parse time zone information from \"%s\"' % timezone\n timezone_delta = datetime.timedelta(\n hours=int(m.group(2)), minutes=int(m.group(3) or '0'))\n if m.group(1) == '-':\n dt += timezone_delta\n else:\n dt -= timezone_delta\n return dt", "def parse_time(time: Union[str, datetime]) -> datetime:\n if isinstance(time, str):\n try:\n from ciso8601 import parse_datetime # pylint: disable=wrong-import-position # noqa: F401\n return parse_datetime(time)\n except (ImportError, ValueError): # pragma: no cover\n return dateutil.parser.parse(time)\n\n return time", "def _get_time(self) -> None:\n self.data[\"time\"] = np.zeros(len(self.data[\"yyyymmdd\"]), dtype=object)\n \n for idx, (yyyymmdd, hhmmss) in enumerate(zip(self.data[\"yyyymmdd\"], self.data[\"hhmmss\"])):\n year, month, day = yyyymmdd.split(\"/\")\n hour, minute, second = hhmmss.split(\":\")\n self.data[\"time\"][idx] = datetime(int(year), int(month), int(day), int(hour), int(minute), int(second))\n \n del self.data[\"yyyymmdd\"]\n del self.data[\"hhmmss\"]", "def parse_time(s: str):\n return utils.parsers.parse_eng_unit(s, base_unit='s', default=1e-12)", "def time_pars_evt(evt):\n evtid = evt.get(_psana.EventId)\n tsec, tnsec = evtid.time()\n fid = evtid.fiducials()\n date = strftime('%Y-%m-%d', localtime(tsec))\n time = strftime('%H:%M:%S', localtime(tsec))\n return tsec, tnsec, fid, date, time" ]
[ "0.7229315", "0.67260563", "0.6565747", "0.6513471", "0.63544554", "0.6352278", "0.63428724", "0.62921166", "0.6273469", "0.6266583", "0.62201977", "0.62145513", "0.6209023", "0.61696506", "0.6159586", "0.61529684", "0.61013764", "0.608812", "0.59947324", "0.5987127", "0.5983697", "0.59629583", "0.59613794", "0.59556013", "0.59532315", "0.5933886", "0.59100986", "0.5908248", "0.58864784", "0.5873513", "0.5872735", "0.583352", "0.5832635", "0.5830998", "0.5805237", "0.5796808", "0.5788809", "0.5785493", "0.5781999", "0.5766711", "0.57619494", "0.5752757", "0.57496667", "0.57380193", "0.5736113", "0.57350343", "0.5732222", "0.56915694", "0.56913054", "0.5690769", "0.568529", "0.56815606", "0.56733", "0.5670221", "0.5659051", "0.56550974", "0.5648305", "0.5641751", "0.5638131", "0.56364423", "0.56360835", "0.56246996", "0.5621031", "0.56164503", "0.56153226", "0.56151843", "0.5612576", "0.5611273", "0.5608223", "0.5603819", "0.56024396", "0.5599563", "0.5598333", "0.5593591", "0.5587255", "0.5580896", "0.5574909", "0.5569359", "0.55685335", "0.55608785", "0.55600077", "0.55580735", "0.55562216", "0.55428946", "0.55416065", "0.55415004", "0.5535297", "0.55326474", "0.5530681", "0.55273014", "0.5524542", "0.5519265", "0.5512509", "0.5499779", "0.5499281", "0.549866", "0.549727", "0.5492248", "0.54913104", "0.5488709" ]
0.7049241
1
Use feedparser to parse PyBites RSS feed. Return a list of Entry namedtuples (date = date, drop time part)
def get_feed_entries(feed=FEED) -> list: f = feedparser.parse(feed) entry_list = [] for entry in f.entries: date = _convert_struct_time_to_dt(entry["published_parsed"]) title = entry["title"] link = entry["link"] tags = [tag["term"].lower() for tag in entry["tags"]] entry_list.append(Entry(date=date, title=title, link=link, tags=tags)) return entry_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_feed_entries(feed=FEED):\n d = feedparser.parse(feed)\n entries = d.entries\n \n all_entries =[]\n for entry in entries:\n title = entry.title\n link = entry.link\n date = entry.published_parsed\n tags = entry.tags\n tags = [t.get('term').lower() for t in tags]\n\n date = _convert_struct_time_to_dt(date)\n\n\n entry = Entry(date,title,link,tags)\n all_entries.append(entry)\n\n return all_entries", "def feed(self):\n feed_dict = feedparser.parse(self.URL)\n return [self.entry_dict(entry) for entry in feed_dict['entries']]", "def parse_feed(feed, last_update, entry, get_updated = lambda e: e.updated_parsed[:6]):\n\n entries = []\n for e in feed.entries:\n if datetime(*get_updated(e)) > last_update:\n new = entry(e)\n if new != None:\n entries.append(new)\n return entries", "def fetch_entries(self):\n entries = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n [entries.append(FeedDocument(entry.get('title', ''), entry.get('summary', ''))) for entry in feed.get('entries', [])]\n return entries", "def rss_fetch():\n items = {}\n\n def add_item(pubDate, title, link):\n nonlocal items\n idx = float(parsedate_to_datetime(pubDate).timestamp())\n while idx in items:\n idx = idx + 0.1\n dbg(\"Adding item: %11.1f \\\"%s\\\" %s\" % (idx, title, link))\n items[idx] = {}\n items[idx]['title'] = title\n items[idx]['link'] = link\n\n state = \"\" # state parser is in (\"\", \"item\", \"title\", \"link\", \"pubDate\")\n title = \"\" # Currently parsing this title.\n link = \"\" # \" \" \" link\n pubDate = \"\" # \" \" \" pubDate (index)\n\n def start_element(name, attrs):\n nonlocal state\n nonlocal title\n nonlocal link\n nonlocal pubDate\n dbg(\"Start: %s %s %s\" %(name, str(attrs), str((state, title, link, pubDate))))\n if state == \"\":\n if name == \"item\":\n state = \"item\"\n elif state == \"item\":\n if name == \"title\":\n state = \"title\"\n if title:\n prn(\"Two titles?\")\n sys.exit(1)\n elif name == \"link\":\n state = \"link\"\n if link:\n prn(\"Two links?\")\n sys.exit(1)\n elif name == \"pubDate\":\n state = \"pubDate\"\n if pubDate:\n prn(\"Two pubDates?\")\n sys.exit(1)\n\n\n def end_element(name):\n nonlocal state\n nonlocal title\n nonlocal pubDate\n nonlocal link\n dbg(\"End: %s %s\" % (name, str((state, title, link, pubDate))))\n if state == \"item\":\n if name == \"item\":\n if title == \"\":\n prn(\"No title at end item.\")\n sys.exit(1)\n if link == \"\":\n prn(\"No link at end item.\")\n sys.exit(1)\n if pubDate == \"\":\n prn(\"No pubDate at end item.\")\n sys.exit(1)\n else:\n add_item(pubDate, title, link)\n state = \"\"\n title = \"\"\n link = \"\"\n pubDate = \"\"\n elif state == \"title\":\n if name == \"title\":\n state = \"item\"\n elif state == \"link\":\n if name == \"link\":\n state = \"item\"\n elif state == \"pubDate\":\n if name == \"pubDate\":\n state = \"item\"\n\n def char_data(data):\n nonlocal state\n nonlocal title\n nonlocal pubDate\n nonlocal link\n dbg(\"Data: %s %s)\" % (str(data), str((state, title, link, pubDate))))\n if state == \"title\":\n title = title + data\n elif state == \"link\":\n link = link + data\n elif state == \"pubDate\":\n pubDate = pubDate + data\n\n\n p = xml.parsers.expat.ParserCreate(\"UTF-8\")\n\n p.StartElementHandler = start_element\n p.EndElementHandler = end_element\n p.CharacterDataHandler = char_data\n\n with urllib.request.urlopen('https://news.ycombinator.com/rss') as f:\n xml_file = b\"\"\n while True:\n r = f.read(255)\n if r:\n xml_file = xml_file + r\n else:\n break\n\n try:\n p.Parse(xml_file.decode(\"UTF-8\"), True)\n except:\n dbg(\"Writing fetched RSS feed to file...\")\n err_f = open(parse_error_output_file, \"ab\")\n err_f.write(b\"GET URL: \")\n err_f.write(f.geturl().encode(\"UTF-8\"))\n err_f.write(b\"\\nReturn Code: \")\n err_f.write((\"%d\\n\" % (f.getcode(), )).encode(\"UTF-8\"))\n err_f.write(b\"Meta Info:\\n\")\n err_f.write(f.info().as_bytes(unixfrom=True))\n err_f.write(b\"XML output:\\n\")\n err_f.write(xml_file)\n err_f.close()\n dbg(\"Done.\")\n raise\n\n return items", "def parse_rss(database, feed, depth=1):\n # Get the updates article count, and article urls and publish dates.\n rss_a = rss_feed(feed)\n \n # Get all (article urls, publish dates) pairs\n articles = []\n pairs = rss_a[1].items()\n for url, pubdate in pairs: \n articles += crawl_url(database, url, date=pubdate, depth=depth)\n \n return articles", "def parse_rss(link, mode):\n\n one_feed = []\n news_counter = 0\n app.logger.info(f'Parsing feed: {link}')\n # Get file from internet, open it with xml-parser\n rss = feedparser.parse(link)\n\n for entry in rss.entries:\n\n if mode == 'latest':\n news_item_date = get_timestamp(entry.published)\n\n # Stop reading RSS if current news is already older than time\n # when user last got the news feed\n if news_item_date < last_time_user_got_news:\n return one_feed\n\n post = {'title': entry.title,\n 'published': get_timestamp(entry.published)}\n\n # Try to get link to image from one of a place where it can be\n try:\n pic = entry.enclosures[0].href\n except(IndexError, AttributeError):\n pic = get_img_source(entry.summary)\n\n post['image'] = pic if pic else url_for('static',\n filename=\"400x400.jpg\")\n\n link = entry.link\n post['link'] = link\n domain_name = re.search(r'://(.+?)/', link).group(1)\n post['domain_name'] = domain_name if domain_name else 'unknown'\n\n one_feed.append(post)\n\n if mode != 'latest':\n return one_feed\n else:\n print('There are no new news at all.')\n return []", "def rss_feed(rss_url):\n try:\n # Use feedparser to analyze given RSS feed, if it is valid RSS.\n d = feedparser.parse(rss_url)\n except:\n return \"Sorry, invalid RSS feed. Please check and try again later.\"\n \n total = len(d['entries'])\n updates = dict()\n for index, item in enumerate(d['entries']):\n # Convert publish time from ctime format to iso-time format.\n a_time = time_convert(item.published)\n # Set article url ad dictionary key, with publish date as value. \n updates[str(item.link)] = a_time \n return (total, updates)", "def get_news(url):\r\n \r\n # parse RSS feed into list of dictionaries\r\n feed = feedparser.parse(url)\r\n\r\n # no RSS feed articles for url\r\n if len(feed['entries']) == 0:\r\n return []\r\n \r\n # get first ten articles from the RSS feed\r\n news = []\r\n i = 0\r\n while True:\r\n if i == len(feed['entries']) or i > 30:\r\n break\r\n \r\n try:\r\n # get link to article\r\n link = feed[\"entries\"][i][\"link\"]\r\n\r\n # get title of article\r\n title = feed[\"entries\"][i][\"title\"]\r\n \r\n try:\r\n # get raw summary of article\r\n summary_raw = feed[\"entries\"][i][\"summary\"]\r\n \r\n # format summary\r\n summary = \"\"\r\n for c in summary_raw:\r\n if c == \"<\":\r\n summary += \"...\"\r\n break\r\n summary += c\r\n except KeyError as e:\r\n logging.error(\"no summary for RSS feed article: {}\".format(link))\r\n summary = \"read more here...\"\r\n \r\n # get raw date \r\n date_raw = feed[\"entries\"][i][\"published_parsed\"]\r\n \r\n if date_raw is None:\r\n date = feed[\"entries\"][i][\"published\"]\r\n \r\n else:\r\n # format date\r\n year = str(date_raw.tm_year)\r\n months = [\"January\", \"February\", \"March\", \"April\", \"May\", \"June\", \"July\", \"August\", \"September\", \"October\", \"November\", \"December\"]\r\n month = months[date_raw.tm_mon - 1]\r\n day = str(date_raw.tm_mday)\r\n weekdays = [\"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \"Friday\", \"Saturday\", \"Sunday\"]\r\n wday = weekdays[date_raw.tm_wday]\r\n hour = str(date_raw.tm_hour)\r\n hour = \"{:2}\".format(hour).format(' ','0')\r\n min = str(date_raw.tm_min)\r\n min = \"{:2}\".format(min).replace(' ','0')\r\n date = hour + \":\" + min + \" - \" + wday + \" \" + month + \" \" + day + \", \" + year\r\n \r\n # compile entry and append to news list\r\n entry = {\"link\":link, \"title\":title, \"date\":date, \"summary\":summary}\r\n \r\n # sanitize entry\r\n for key in entry:\r\n # apostrophe\r\n entry[key] = entry[key].replace(\"&#39;\", \"'\")\r\n # right single quotation mark\r\n entry[key] = entry[key].replace(\"’\", \"&#8217;\")\r\n # left single quotation mark\r\n entry[key] = entry[key].replace('\"', \"&#8216;\")\r\n # right double quotation mark\r\n entry[key] = entry[key].replace(\"'\", \"&#8221;\")\r\n # left double quotation mark\r\n entry[key] = entry[key].replace(\"'\", \"&#8220;\")\r\n # Weird ampersand formatting\r\n entry[key] = entry[key].replace(\"&amp;\", \"&\")\r\n \r\n # prepare entry for sqlite queries\r\n entry[key] = surround(entry[key])\r\n \r\n # add entry to news list\r\n news.append(entry)\r\n \r\n # max 10 entries\r\n if len(news) == 10:\r\n break\r\n i += 1\r\n \r\n except Exception as e:\r\n logging.error(e)\r\n i += 1\r\n pass\r\n \r\n # success\r\n return news", "def feed2fields(file):\r\n import feedparser\r\n d = feedparser.parse(file)\r\n for entry in d.entries:\r\n date = (time.strftime(\"%Y-%m-%d %H:%M\", entry.updated_parsed)\r\n if hasattr(entry, \"updated_parsed\") else None)\r\n author = entry.author if hasattr(entry, \"author\") else None\r\n tags = [e['term'] for e in entry.tags] if hasattr(entry, \"tags\") else None\r\n\r\n slug = slugify(entry.title)\r\n kind = 'article'\r\n yield (entry.title, entry.description, slug, date, author, [], tags,\r\n kind, \"html\")", "def get_posts(url):\r\n feed = feedparser.parse(url)\r\n return feed.entries", "def get_feed(self):\n possible_endings = ('rss', 'rss/')\n if not self.url or not self.url.endswith(possible_endings):\n print('Please check URL(is RSS?) and Internet connection')\n sys.exit()\n try:\n data = feedparser.parse(self.url)\n except urllib.error.URLError:\n print('Please input correct URL')\n sys.exit()\n self.get_content(data)\n return self.items", "def get_news(rss_feed):\r\n\r\n class _CurrentData(object):\r\n \"\"\"Class holding a set of current attributes.\"\"\"\r\n item = None\r\n text = None\r\n\r\n def _start_element_handler(name, attrs):\r\n \"\"\"Handle XML start-elements.\"\"\"\r\n if name == 'item':\r\n # Allocate a new item.\r\n current.item = NewsItem()\r\n\r\n def _end_element_handler(name):\r\n \"\"\"Handle XML end-elements.\"\"\"\r\n if name == 'item':\r\n news_items.append(current.item)\r\n elif name in ('title', 'description', 'link', 'category'):\r\n try:\r\n setattr(current.item, name, current.text)\r\n except AttributeError:\r\n # The parser has run into a non-news item.\r\n pass\r\n\r\n def _char_data_handler(data):\r\n \"\"\"Handle XML element character data.\"\"\"\r\n current.text = data\r\n\r\n news_items = list()\r\n current = _CurrentData()\r\n\r\n parser = expat.ParserCreate()\r\n parser.StartElementHandler = _start_element_handler\r\n parser.EndElementHandler = _end_element_handler\r\n parser.CharacterDataHandler = _char_data_handler\r\n\r\n news_handle = urllib2.urlopen(rss_feed)\r\n xml_data = news_handle.read()\r\n \r\n parser.Parse(xml_data)\r\n\r\n return news_items", "def fetch_feeds(self):\n feed_list = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n rss_title = rss.get('title', '-')\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n feed_list.append({\n 'title':rss_title,\n 'href':rss_href,\n 'status': feed.get('status', 400),\n 'updated': feed.get('updated', None),\n 'updated_parsed': feed.get('updated_parsed', None),\n 'encoding': feed.get('encoding', None),\n 'bozo': feed.get('bozo', None),\n 'headers': feed.get('headers', {}),\n 'etag': feed.get('etag', None),\n 'version': feed.get('version', None),\n 'entries': feed.get('entries', []),\n 'namespaces': feed.get('namespaces', None)\n })\n\n return feed_list", "def get_rss_feed(feed_key):\n\n if rss_feeds[feed_key]['updated'] is None:\n # Update Cache\n entries = update_cache(feed_key)\n elif (datetime.datetime.today() - rss_feeds[feed_key]['updated']).seconds > (60 * 5):\n # Update Cache\n entries = update_cache(feed_key)\n else:\n # Read Cache\n entries = get_cache(feed_key)\n\n return entries", "def get_feed_entries(helper, name, start, stats):\n feed_url = helper.get_arg('feed_url')\n feed_creds = helper.get_arg('credentials')\n feed_headers = {}\n # If auth is specified, add it as a header.\n if feed_creds is not None:\n auth = '{0}:{1}'.format(feed_creds['username'], feed_creds['password'])\n auth = base64.encodestring(auth).replace('\\n', '')\n feed_headers['Authorization'] = 'Basic {0}'.format(auth)\n\n # Pull events as json.\n resp = helper.send_http_request(\n url=feed_url,\n method='GET',\n parameters={'v': 'json', 'tr': 1},\n headers=feed_headers,\n verify=VERIFY_CERTIFICATE,\n )\n\n # Raise exceptions on problems.\n resp.raise_for_status()\n feed_entries = resp.json()\n\n # Return the normalized events to be saved to the kv store.\n return normalized(name, feed_entries, start)", "def download_feed_return_objects(rss_url):\r\n try:\r\n feed_obj = rss_exists(rss_url)\r\n except:\r\n yield None\r\n return\r\n\r\n feed_obj_found = False\r\n feed_parser_results, success = get_rss(rss_url)\r\n\r\n if feed_parser_results is None:\r\n error_reporter.captureMessage(u'Feed Parser results is None', **dict(rss_url=rss_url))\r\n yield None\r\n return\r\n\r\n if feed_obj is None:\r\n feed_obj = create_new_feed(feed_parser_results, rss_url)\r\n else:\r\n feed_obj_found = True\r\n\r\n feed_id = feed_obj.id\r\n feed_obj.title = feed_parser_results.get(\"title\", \"\") or \"\"\r\n max_length_field(feed_obj, 'title', 100)\r\n\r\n feed_obj.status_code = feed_parser_results.get(\"status\", \"\") or 200\r\n feed_obj.status = find_feed_status_from_scode(feed_obj)\r\n\r\n feed_obj.etag = cut_clean_etag(feed_parser_results.get(\"etag\", \"\"))\r\n\r\n updated_date = feed_parser_results.get(\"updated_parsed\")\r\n feed_obj.updated = dt.fromtimestamp(mktime(updated_date)) if updated_date is not None else dt.utcnow()\r\n #\tfeed_obj.published = dt.fromtimestamp(mktime(published_date)) if published_date is not None else None\r\n feed_obj.last_check = dt.utcnow()\r\n\r\n # We could be creating a new feed, or updating the existing one.\r\n yield feed_obj\r\n rss_posts = []\r\n\r\n for feed_article in feed_parser_results.get(\"entries\", []):\r\n ptime = feed_article.get(\"published_parsed\", None)\r\n post_date = dt.fromtimestamp(mktime(ptime)) if ptime is not None else dt.utcnow()\r\n #\t\tprint \"%r\" % post\r\n p = Post(\r\n id=uuid.uuid1(),\r\n title=feed_article.get(\"title\", \"\"),\r\n author=feed_article.get(\"author\", \"\"),\r\n href=feed_article.get(\"href\", \"\"),\r\n post_id=feed_article.get(\"id\", \"\"),\r\n published_at=post_date,\r\n feed_id=feed_id\r\n )\r\n\r\n p.original_title = max_length_field(p, 'title', 200)\r\n p.original_author = max_length_field(p, 'author', 200)\r\n\r\n p.content_html = feed_article.get(\"content\", \"\") or \"\"\r\n\r\n if feed_article.has_key(\"media_content\"):\r\n media_contents = feed_article.get(\"media_content\", []) or []\r\n if media_contents is not None and (not isinstance(media_contents, basestring)) and isinstance(\r\n media_contents, collections.Iterable):\r\n p.media = [media.get(\"url\") for media in media_contents]\r\n\r\n hasHash = False\r\n\r\n if feed_article.has_key(\"feedburner_origlink\"):\r\n p.original_link = feed_article.get(\"feedburner_origlink\", \"\")\r\n if non_empty_str(p.original_link):\r\n p.link_hash = url_hash(safe_str(p.original_link))\r\n hasHash = True\r\n\r\n if feed_article.has_key(\"link\"):\r\n p.href = feed_article.get(\"link\", \"\")\r\n if not hasHash and non_empty_str(p.href):\r\n p.link_hash = url_hash(safe_str(p.href))\r\n hasHash = True\r\n\r\n if not hasHash:\r\n print \"Post don't have any hash\"\r\n\r\n p.title_hash = url_hash(safe_str(p.title)) if non_empty_str(p.title) else \"\"\r\n p.post_id_hash = url_hash(safe_str(p.post_id)) if non_empty_str(p.post_id) else \"\"\r\n\r\n if feed_article.has_key(\"tags\"):\r\n if isinstance(feed_article['tags'], collections.Iterable):\r\n p.tags = [pst.get(\"term\") for pst in feed_article['tags']]\r\n\r\n rss_posts.append(p)\r\n\r\n has_posts = len(rss_posts) > 0\r\n post_id_hashes = [p.post_id_hash for p in rss_posts]\r\n #\tpost_title_hashes = [p.title_hash for p in rss_posts]\r\n post_link_hashes = [p.link_hash for p in rss_posts]\r\n\r\n found_posts_id_hashes = []\r\n found_posts_link_hashes = []\r\n\r\n if feed_obj_found and has_posts:\r\n existing_posts = find_existing_posts(feed_id, post_id_hashes, post_link_hashes)\r\n\r\n for ex_post_id_hash, ex_link_hash in existing_posts:\r\n found_posts_id_hashes.append(ex_post_id_hash)\r\n found_posts_link_hashes.append(ex_link_hash)\r\n\r\n has_existing_posts = len(found_posts_id_hashes) > 0 or len(found_posts_link_hashes) > 0\r\n\r\n new_post_count = 0\r\n if has_posts:\r\n for rss_post in rss_posts:\r\n should_skip = False\r\n\r\n if has_existing_posts:\r\n if non_empty_str(rss_post.post_id_hash) and rss_post.post_id_hash in found_posts_id_hashes:\r\n should_skip = True\r\n elif rss_post.link_hash in found_posts_link_hashes:\r\n should_skip = True # \"Link Hash found in existing records\"\r\n\r\n if not should_skip:\r\n new_post_count += 1\r\n yield rss_post\r\n\r\n feed_history = FeedHistory(id=uuid.uuid1(),\r\n feed_id=feed_obj.id,\r\n timestamp=dt.utcnow(),\r\n status=feed_obj.status_code,\r\n post_count=new_post_count,\r\n etag=feed_obj.etag)\r\n yield feed_history", "def parseWebFeed(data,date_object):\n\twebResult = []\n\tif data:\n\t\tfor repo in data['entries']:\n\t\t\tentryDate = repo['time'].split('at')[0]\n\t\t\tentryDate = entryDate.lstrip(\"0\").replace(\" 0\", \" \").strip()\n\t\t\tif entryDate == date_object:\n\t\t\t\ttext = repo['text']\n\t\t\t\tsentiment = repo['sentiment']\n\t\t\t\ttime = repo['time']\n\t\t\t\ttime = dateutil.parser.parse(time).isoformat(' ').split('+')[0] \n\t\t\t\ttime = datetime.datetime.strptime( time, \"%Y-%m-%d %H:%M:%S\" )\n\t\t\t\titem = copy.deepcopy(templateResult)\n\t\t\t\titem['message'] = text + \" , sentiment: \" + sentiment\n\t\t\t\titem['datetime'] = time\n\t\t\t\titem['source'] = 'Web EndPoint'\n\t\t\t\twebResult.append(item)\n\treturn webResult", "def _get_dates():\n remote = os.path.join(BASE_URL, RSS_FEED)\n local = os.path.join(TMP, RSS_FEED)\n u..(remote, local)\n\n with open(local) as f:\n return PUB_DATE.findall(f.read())", "def get_rss(self):\r\n rssfiles = []\r\n \r\n rssfiles.append(feedparser.parse(self.url))\r\n return rssfiles", "def parse_feed(self):\n parsed_feed = feedparser.parse(self.rss_url)\n # Check for malformed feed\n if parsed_feed['bozo']:\n raise Exception('malformed rss feed!')\n self.parsed_feed = parsed_feed", "def _parse_feed(self,feed): \n meta=[]\n for entry in feed:\n item_meta=self._parse_entry(entry)\n item_meta['video-id']='0'\n meta.append(item_meta)\n self._logger.info('%s videos were founded and parsed at Megavideo',len(meta)) \n return meta", "def process(url):\n feed = feedparser.parse(url)\n entries = feed.entries\n ret = []\n for entry in entries:\n print entry\n guid = entry.guid\n title = translate_html(entry.title)\n link = entry.link\n summary = translate_html(entry.summary)\n try:\n subject = translate_html(entry.tags[0]['term'])\n except AttributeError:\n subject = \"\"\n newsStory = NewsStory(guid, title, subject, summary, link)\n ret.append(newsStory)\n return ret", "def get_rss_infos():\n\n url_rss_lib = \"http://www.liberation.fr/rss\"\n soup = utils.recovery_flux_url_rss(url_rss_lib)\n\n rss_items = soup.find_all(\"li\")\n\n rss_list = []\n\n link_rss = []\n\n for ri in rss_items:\n if ri.get(\"class\") == ['rss-item']:\n rss_list.append(ri.a.get('href'))\n\n for rl in rss_list:\n soup = utils.recovery_flux_url_rss(rl)\n entre = soup.find_all('entry')\n for e in entre:\n link_rss.append(e.link.get('href'))\n\n return link_rss", "def list_feed(self):\n entities = []\n entities_j = self._get('strings/tags/module:inventory,feed:*')\n if entities_j and entities_j['feed']:\n for entity_j in entities_j['feed']:\n entities.append(Feed(entity_j, CanonicalPath('/f;{}'.format(entity_j))))\n return entities", "def request_rss(self, url):\n return feedparser.parse(url)", "def zhihu_rss_fetcher(ctx):\n URL = 'http://www.zhihu.com/rss'\n coll = ctx.get_mongo_collection()\n\n for entry in fetch_rss(URL).entries:\n try:\n coll.insert({'_id': entry.link})\n except DuplicateKeyError:\n continue\n ctx.new_item(TextOnlyItem(entry.title, entry.description), ['zhihu'],\n parse_entry_time(entry),\n {'id': entry.link})\n log_info(u'zhihu: new entry: {} {}'.format(entry.link,\n entry.title))", "def parse_shaarli_rss_export(rss_file):\n\n rss_file.seek(0)\n entries = rss_file.read().split('<entry>')[1:]\n for entry in entries:\n # example entry:\n # <entry>\n # <title>Aktuelle Trojaner-Welle: Emotet lauert in gefälschten Rechnungsmails | heise online</title>\n # <link href=\"https://www.heise.de/security/meldung/Aktuelle-Trojaner-Welle-Emotet-lauert-in-gefaelschten-Rechnungsmails-4291268.html\" />\n # <id>https://demo.shaarli.org/?cEV4vw</id>\n # <published>2019-01-30T06:06:01+00:00</published>\n # <updated>2019-01-30T06:06:01+00:00</updated>\n # <content type=\"html\" xml:lang=\"en\"><![CDATA[<div class=\"markdown\"><p>&#8212; <a href=\"https://demo.shaarli.org/?cEV4vw\">Permalink</a></p></div>]]></content>\n # </entry>\n\n trailing_removed = entry.split('</entry>', 1)[0]\n leading_removed = trailing_removed.strip()\n rows = leading_removed.split('\\n')\n\n def get_row(key):\n return [r.strip() for r in rows if r.strip().startswith('<{}'.format(key))][0]\n\n title = str_between(get_row('title'), '<title>', '</title>').strip()\n url = str_between(get_row('link'), '<link href=\"', '\" />')\n ts_str = str_between(get_row('published'), '<published>', '</published>')\n time = datetime.strptime(ts_str, \"%Y-%m-%dT%H:%M:%S%z\")\n\n yield {\n 'url': url,\n 'timestamp': str(time.timestamp()),\n 'title': title or None,\n 'tags': '',\n 'sources': [rss_file.name],\n }", "def parse_rss_export(rss_file):\n\n rss_file.seek(0)\n items = rss_file.read().split('<item>')\n items = items[1:] if items else []\n for item in items:\n # example item:\n # <item>\n # <title><![CDATA[How JavaScript works: inside the V8 engine]]></title>\n # <category>Unread</category>\n # <link>https://blog.sessionstack.com/how-javascript-works-inside</link>\n # <guid>https://blog.sessionstack.com/how-javascript-works-inside</guid>\n # <pubDate>Mon, 21 Aug 2017 14:21:58 -0500</pubDate>\n # </item>\n\n trailing_removed = item.split('</item>', 1)[0]\n leading_removed = trailing_removed.split('<item>', 1)[-1].strip()\n rows = leading_removed.split('\\n')\n\n def get_row(key):\n return [r for r in rows if r.strip().startswith('<{}>'.format(key))][0]\n\n url = str_between(get_row('link'), '<link>', '</link>')\n ts_str = str_between(get_row('pubDate'), '<pubDate>', '</pubDate>')\n time = datetime.strptime(ts_str, \"%a, %d %b %Y %H:%M:%S %z\")\n title = str_between(get_row('title'), '<![CDATA[', ']]').strip() or None\n\n yield {\n 'url': url,\n 'timestamp': str(time.timestamp()),\n 'title': title,\n 'tags': '',\n 'sources': [rss_file.name],\n }", "def getFeed(self):\n\n entries_xml = []\n\n for entry in self.middleware.entries:\n request = entry['request']\n response = entry.get('response')\n begin = time.localtime(request['begin'])\n entry_id = self._generateEntryTagURI(entry)\n entry_title = '%s %s ' % (request['method'], request['url'])\n\n short_url = request['url']\n max_url_len = 40\n if len(short_url) > max_url_len:\n prefix = short_url[:9]\n suffix = short_url[-max_url_len+9:]\n short_url = prefix + '...' + suffix\n entry_title = '%s %s ' % (request['method'], short_url)\n\n # Make the <rz:cgi_variable> nodes into a string\n cgivars = \"\"\n for k,v in request['cgi_variables']:\n newv = escape(str(v))\n s = cgi_variable_fmt % (k, newv)\n cgivars = cgivars + s\n\n # Make the <rz:cgi_variable> nodes into a string\n wsgivars = \"\"\n for k,v in request['wsgi_variables']:\n newv = escape(str(v))\n s = wsgi_variable_fmt % (k, newv)\n wsgivars = wsgivars + s\n\n # Make the <rz:request> node\n rzrequest = rzrequest_fmt % {\n 'begin': request['begin'],\n 'cgi_variables': cgivars,\n 'wsgi_variables': wsgivars,\n 'method': request['method'],\n 'url': request['url'],\n 'body': escape(request['body']),\n }\n\n if response is not None:\n # Make the <rz:request> node\n headers = ''\n for k,v in response['headers']:\n newv = escape(str(v))\n s = header_fmt % (k, newv)\n headers = headers + s\n\n rzresponse = rzresponse_fmt % {\n 'begin': response['begin'],\n 'end': response['end'],\n 'content-length': response['content-length'],\n 'headers': headers,\n 'status': response['status'],\n 'body': escape(response['body']),\n }\n else:\n rzresponse = ''\n\n\n # Make the atom:entry/atom:content node\n content = contentfmt % {\n 'logentry_id': entry_id,\n 'rzrequest': rzrequest,\n 'rzresponse': rzresponse,\n }\n\n entry_xml = entryfmt % {\n 'entry_id':entry_id,\n 'entry_title':escape(entry_title),\n 'updated':time.strftime('%Y-%m-%dT%H:%M:%SZ', begin),\n 'summary':escape(pprint.pformat(entry)),\n 'content':content,\n }\n entries_xml.append(entry_xml)\n\n now = time.time()\n\n body = feedfmt % {\n 'title':'repoze.debug feed for pid %s' % self.middleware.pid,\n 'entries':'\\n'.join(entries_xml),\n 'feed_id':self._generateFeedTagURI(now, self.middleware.pid),\n 'updated':time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime(now)),\n }\n\n resp = Response(content_type='application/atom+xml', body=body)\n return resp", "def get_feed_entries_task():\n get_feed_entries()\n logger.info(\"Entries for Feed\")", "def fetchJournalEntries(date):\n\t\n\tpattern = '%d/%m/%Y'\n\tdatetime_object = datetime.datetime.strptime(date, pattern)\n\t\n\t#Getting the feeds from respective feed functions\n\tslackFeed = getFromSlack(datetime_object)\n\twebServiceFeed = getFromWebService(datetime_object)\n\tgithubFeed = getFromGitService(datetime_object)\n\tdynamoFeed = getFromDynamo(datetime_object)\n\t\n\t#Combining feeds into a single output\n\tentireFeed = reconcileFeed(slackFeed, webServiceFeed, githubFeed, dynamoFeed)\n\t\n\treturn entireFeed", "def fetch_feed_list(self, **args):\n return self.fetch(\"/feedlist\", **args)", "def parse_medium_rss_export(rss_file):\n\n rss_file.seek(0)\n root = etree.parse(rss_file).getroot()\n items = root.find(\"channel\").findall(\"item\")\n for item in items:\n url = item.find(\"link\").text\n title = item.find(\"title\").text.strip()\n ts_str = item.find(\"pubDate\").text\n time = datetime.strptime(ts_str, \"%a, %d %b %Y %H:%M:%S %Z\")\n \n yield {\n 'url': url,\n 'timestamp': str(time.timestamp()),\n 'title': title or None,\n 'tags': '',\n 'sources': [rss_file.name],\n }", "def parse():\n G.go(SITE_URL)\n articles = []\n for article in G.doc.select(\"//li[@class='regularitem']\"):\n header = article.select('h4').text()\n text = article.select('div').text()\n url = article.select('h4/a/@href').text()\n dt_string = article.select('h5').text()\n # for date format \"1 Nov 2019 00:00:00\" or \"01 Nov 2019 00:00:00\"\n article_dt = re.search(r'\\d{1,2} [a-zA-Z]+ \\d{4} \\d{2}:\\d{2}:\\d{2}', dt_string)\n if article_dt is None:\n logging.exception('Datestring format is unknown: %s', dt_string)\n continue\n article_dt = article_dt.group(0)\n article_dt = datetime.datetime.strptime(article_dt, '%d %b %Y %H:%M:%S').strftime(\"%Y-%m-%d %H:%M:%S\")\n articles.append({'header': header, 'url': url, 'text': text, 'dt': article_dt})\n return articles", "def getFeedFromXXX(RSSlink):\n summary =\"\"\n link =\"\"\n if \"packetstormsecurity\" in RSSlink:\n link =\"link\"\n summary=\"summary_detail\"\n elif \"jetlib\" in RSSlink:\n link=\"id\"\n summary=\"summary\"\n myFeed=\"\"\n try:\n myFeed = feedparser.parse(RSSlink)\n except:\n print(\"problem with the db website.try to change the source db in option !\")\n return None\n entries = [item for item in myFeed.items() if \"entries\" in item]\n tupleInsideEntries =entries[0]\n #print len(tupleInsideEntries[1])#show the number of result founded\n for dicItem in tupleInsideEntries[1]:\n if dicItem.get(\"title\")==\"No Results Found\":\n return False #break from this loop if theres no result\n print (\"Title : \"+dicItem.get(\"title\"))#title\n if summary ==\"summary_detail\": #packetstormsecurity\n print (\"Description : \"+str(dicItem.get(summary).get(\"value\")))#description\n else:\n print (\"Description : \"+str(dicItem.get(summary)))\n print (\"Date : \"+dicItem.get(\"published\"))#date\n print (\"Link : \"+dicItem.get(link)) #link\n print (\"#################################################################################\")\n return True", "def fetch(feed):\n # Fetch the feed data.\n data = feedparser.parse(feed.ext_url)\n new_articles = []\n\n # If the `bozo` value is anything\n # but 0, there was an error parsing (or connecting) to the feed.\n if data.bozo:\n # Some errors are ok.\n if not isinstance(data.bozo_exception, feedparser.CharacterEncodingOverride) and not isinstance(data.bozo_exception, feedparser.NonXMLContentType):\n raise data.bozo_exception\n\n for entry in data.entries:\n\n # URL for this entry.\n url = entry['links'][0]['href']\n\n # Check for an existing Article.\n # If one exists, skip.\n if Article.objects(ext_url=url).first():\n continue\n\n data = extractor.extract(url, existing_data=entry)\n\n if data is None:\n continue\n\n # Secondary check for an existing Article,\n # by checking the title and source.\n existing = Article.objects(title=data['title']).first()\n if existing and existing.feed.source == feed.source:\n continue\n\n data['feed'] = feed\n\n article = Article(**data)\n article.save()\n new_articles.append(article)\n\n return new_articles", "def getRSS(self):\n return [rss for rss in self.rssCol.find()]", "def get_feed_list(parser, token):\n bits = token.split_contents()\n if len(bits) != 3:\n raise template.TemplateSyntaxError, \\\n \"'%s' tag takes two arguments\" % bits[0]\n\n if bits[1] != \"as\":\n raise template.TemplateSyntaxError, \\\n \"First argument to '%s' tag must be 'as'\" % bits[0]\n\n return FeedListNode(bits[2])", "def __init__(self, url=URL):\n self.entries = feedparser.parse(url).entries", "def _get_recent_feed(cls, target):\n response = feedparser.parse(\n target.link, modified=target.last_modified, etag=target.etag\n )\n\n # Some of the feeds offer one of these two tags and others none of them.\n modified = cls._time_to_date(response.get(\"modified_parsed\"))\n etag = response.get(\"etag\")\n\n # In case RSS feed doesn't support modified tag, we compute it artificially.\n if not modified:\n response.entries, modified = cls._entries_after_date(\n response.entries, target.last_modified\n )\n\n return response, modified, etag", "def latestEntriesRss():\n now = datetime.now()\n latestEntries = session.query(Pokemon).order_by(desc(Pokemon.date_entered))\\\n .limit(20)\n rss = render_template('rss.xml', lastBuildDate=now, entries=latestEntries)\n response = make_response(rss)\n response.headers[\"Content-Type\"] = \"application/xml\"\n return response", "def test_feed(app, status, warning):\n app.build()\n assert app.statuscode == 0\n\n feed_path = app.outdir / \"blog/atom.xml\"\n assert (feed_path).exists()\n\n with feed_path.open() as feed_opened:\n feed_tree = lxml.etree.parse(feed_opened)\n entries = feed_tree.findall(\"{http://www.w3.org/2005/Atom}entry\")\n assert len(entries) == 2\n\n entry = entries[0]\n title = entry.find(\"{http://www.w3.org/2005/Atom}title\")\n assert title.text == \"Foo Post Title\"\n summary = entry.find(\"{http://www.w3.org/2005/Atom}summary\")\n assert summary.text == \"Foo post description with link.\"\n categories = entry.findall(\"{http://www.w3.org/2005/Atom}category\")\n assert len(categories) == 2\n assert categories[0].attrib[\"label\"] == \"BarTag\"\n assert categories[0].attrib[\"term\"] == \"BarTag\"\n assert categories[1].attrib[\"label\"] == \"Foo Tag\"\n assert categories[1].attrib[\"term\"] == \"FooTag\"\n content = entry.find(\"{http://www.w3.org/2005/Atom}content\")\n assert \"Foo post content.\" in content.text\n update_time = entry.find(\"{http://www.w3.org/2005/Atom}updated\")\n first_entry_date = datetime.strptime(update_time.text, POST_DATETIME_FMT)\n\n empty_entry = entries[1]\n title = empty_entry.find(\"{http://www.w3.org/2005/Atom}title\")\n assert title.text == \"Foo Empty Post\"\n summary = empty_entry.find(\"{http://www.w3.org/2005/Atom}summary\")\n assert summary is None\n categories = empty_entry.findall(\"{http://www.w3.org/2005/Atom}category\")\n assert len(categories) == 0\n content = empty_entry.find(\"{http://www.w3.org/2005/Atom}content\")\n assert 'id=\"foo-empty-post\"' in content.text\n update_time = empty_entry.find(\"{http://www.w3.org/2005/Atom}updated\")\n second_entry_date = datetime.strptime(update_time.text, POST_DATETIME_FMT)\n\n # check order of post based on their dates\n assert first_entry_date > second_entry_date\n\n social_path = app.outdir / \"blog/social.xml\"\n assert (social_path).exists()\n\n with social_path.open() as social_opened:\n social_tree = lxml.etree.parse(social_opened)\n social_entries = social_tree.findall(\"{http://www.w3.org/2005/Atom}entry\")\n assert len(social_entries) == len(entries)\n\n social_entry = social_entries[0]\n title = social_entry.find(\"{http://www.w3.org/2005/Atom}title\")\n assert title.text == \"Foo Post Title\"\n summary = social_entry.find(\"{http://www.w3.org/2005/Atom}summary\")\n assert summary.text == \"Foo post description with link.\"\n categories = social_entry.findall(\"{http://www.w3.org/2005/Atom}category\")\n assert len(categories) == 2\n assert categories[0].attrib[\"label\"] == \"BarTag\"\n assert categories[1].attrib[\"label\"] == \"Foo Tag\"\n content = social_entry.find(\"{http://www.w3.org/2005/Atom}content\")\n assert \"Foo Post Title\" in content.text", "def get_content(self, data):\n self.name = name = data['feed'].get('title')\n for feed in data['entries']:\n title = feed.get('title', 'Absence of title')\n link = feed.get('link', 'Absence of link')\n date = feed.get('published_parsed', 'Absence of date')\n img = get_img_container(link)\n summary_list = []\n links = []\n if feed.get('summary'):\n summary_list = [feed.get('summary')]\n if feed.get('links'):\n uncleaned_links = feed.get('links')\n links = string_handlers.get_links(uncleaned_links)\n img.extend(if_link_is_image(uncleaned_links))\n fields = 'name, title, link, date, img, content, links'\n item = namedtuple('item', fields)._make((name, title, link, date, img, summary_list, links))\n save_feed_into_cache(item)\n self.items.append(item)", "def get_feeds():\n feeds = {}\n for _configuration_key, _configuration in blogs.all():\n if not _configuration.use_generic_feeds:\n continue\n\n class EntryFeed(Feed):\n configuration = _configuration\n configuration_key = _configuration_key\n\n title_template = _configuration.feed_title_template_name\n description_template = \\\n _configuration.feed_description_template_name\n\n feed_type = feedgenerator.Rss201rev2Feed\n\n def get_site(self):\n if not hasattr(self, '_current_site'):\n self._current_site = Site.objects.get_current()\n return self._current_site\n\n def title(self):\n if self.configuration.feed_title is not None:\n return self.configuration.feed_title\n return self.get_site().name\n \n def link(self):\n if self.configuration.feed_link is not None:\n return self.configuration.feed_link\n return \"http://%s/\" % (self.get_site().domain)\n \n def description(self):\n if self.configuration.feed_description is not None:\n return self.configuration.feed_description\n return \"Latest entries on %s\" % self.get_site().name\n \n def items(self):\n items = self.configuration.model.live.all()\n return items[:self.configuration.feed_limit]\n \n def item_pubdate(self, obj):\n return obj.pub_date\n\n def item_link(self, obj):\n return self.configuration.get_entry_absolute_url(obj)\n\n if _configuration.feed_format == feed_formats.ATOM:\n # Alter the class to support Atom feeds instead of RSS.\n EntryFeed.feed_type = feedgenerator.Atom1Feed\n EntryFeed.subtitle = EntryFeed.description\n\n feeds[_configuration_key] = EntryFeed\n return feeds", "def main():\n lines_list = []\n with open(bookmark_file, 'r') as f:\n lines_list = f.readlines()\n entries_list = []\n for idx, line in enumerate(lines_list):\n entry = {}\n if re.match(r'^<DT>', line):\n entry['url'] = re.match(r'^.*HREF=\\\"([^\\\"]+)\\\"', line).group(1)\n entry['add_date'] = re.match(r'^.*ADD_DATE=\\\"([^\\\"]+)\\\"', line).group(1)\n entry['private'] = re.match(r'^.*PRIVATE=\\\"([^\\\"]*)\\\"', line).group(1)\n entry['tags'] = re.match(r'^.*TAGS=\\\"([^\\\"]*)\\\"', line).group(1).split(',')\n entry['title'] = re.match(r'^.*<A [^>]+>(.*)</A>', line).group(1)\n if re.match(r'^<DD>', lines_list[idx + 1]):\n dd_tmp = []\n increment = 1\n try:\n while True:\n if re.match(r'^<DT>', lines_list[idx + increment]):\n break\n dd_tmp.append(re.match(r'^(<DD>)?(.*)$', lines_list[idx + increment]).group(2))\n increment += 1\n except:\n pass\n entry['description'] = '\\n'.join(dd_tmp)\n entries_list.append(entry)\n return entries_list", "def _get_current_rss_items(feed_path: str) -> List[str]:\n if os.path.isfile(feed_path):\n with open(feed_path) as xfd:\n feed_str = xfd.read()\n items = ['<item>{}'.format(ip) for ip in feed_str.split('<item>')[1:]]\n if len(items) > 0:\n items[-1] = items[-1].replace('</channel>', '').replace('</rss>', '')\n return items\n return []", "def parse_pinboard_rss_export(rss_file):\n\n rss_file.seek(0)\n root = etree.parse(rss_file).getroot()\n items = root.findall(\"{http://purl.org/rss/1.0/}item\")\n for item in items:\n url = item.find(\"{http://purl.org/rss/1.0/}link\").text\n tags = item.find(\"{http://purl.org/dc/elements/1.1/}subject\").text if item.find(\"{http://purl.org/dc/elements/1.1/}subject\") else None\n title = item.find(\"{http://purl.org/rss/1.0/}title\").text.strip() if item.find(\"{http://purl.org/rss/1.0/}title\").text.strip() else None\n ts_str = item.find(\"{http://purl.org/dc/elements/1.1/}date\").text if item.find(\"{http://purl.org/dc/elements/1.1/}date\").text else None\n \n # Pinboard includes a colon in its date stamp timezone offsets, which\n # Python can't parse. Remove it:\n if ts_str and ts_str[-3:-2] == \":\":\n ts_str = ts_str[:-3]+ts_str[-2:]\n\n if ts_str:\n time = datetime.strptime(ts_str, \"%Y-%m-%dT%H:%M:%S%z\")\n else:\n time = datetime.now()\n\n yield {\n 'url': url,\n 'timestamp': str(time.timestamp()),\n 'title': title or None,\n 'tags': tags or '',\n 'sources': [rss_file.name],\n }", "def parseDynamoFeed(data):\n\tdynamoResult = []\n\titems = data['Items']\n\tif items:\n\t\tfor item in items:\n\t\t\tdate = item['date']\n\t\t\tdate = dateutil.parser.parse(date).isoformat(' ').split('+')[0]\n\t\t\tdate = datetime.datetime.strptime( date, \"%Y-%m-%d %H:%M:%S\" )\n\t\t\tname = item['name']\n\t\t\ttext = item['text']\n\t\t\titemResult = copy.deepcopy(templateResult)\n\t\t\titemResult['message'] = text\n\t\t\titemResult['author'] = name\n\t\t\titemResult['datetime'] = date\n\t\t\titemResult['source'] = 'DynamoDB'\n\t\t\tdynamoResult.append(itemResult)\n\treturn dynamoResult", "def produce_entries(self):\n # Grab and parse the feed\n feed = feedparser.parse(HTTPCache(self.main_feed).content())\n \n # Normalize feed meta data\n self.FEED_META = normalize_feed_meta(feed, self.date_fmt)\n self.FEED_META['feed.title'] += ' (with Amazon items)'\n\n # Normalize entries from the feed\n entries = normalize_entries(feed.entries)\n\n # Run through all the normalized entries...\n for e in entries:\n \n # Perform a search on the entry title, extract the items\n result = self.amazon_search(e['summary'])\n items = [ x for x in result.Items if 'Item' in x._name ]\n \n # Use each search result item to populate the templates.\n insert_items = [ self.INSERT_ITEM_TMPL % {\n 'title' : i.ItemAttributes.Title,\n 'url' : i.DetailPageURL,\n 'img' : i.SmallImage.URL\n } for i in items[:self.MAX_ITEMS] ]\n insert_out = self.INSERT_TMPL % '\\n'.join(insert_items)\n\n # Append the rendered search results onto the entry summary.\n e.data['summary'] += insert_out.decode('utf-8', 'ignore')\n \n return entries", "def check_feeds(self):\n lst = []\n for feed in self.feeds:\n feed.update()\n if feed.get_new_entries():\n lst.append(feed)\n return lst", "def RSS2format(inputfile):\n print \"START: FEED GENERATOR[ITEM OBJECT CREATOR]: \", time.time()\n xmldocument = parse(inputfile)\n feed_title = \"\"\n try:\n feed_title = xmldocument.getElementsByTagName('dc:title')[0].firstChild.data\n except IndexError as details:\n print \"Handling IndexError: \", details\n feed_title = \"Handling IndexError...\"\n except AttributeError as details:\n print \"Handling AttributeError: \", details\n feed_title = \"Handling AttributeError...\"\n # only get first 100 characters.. RSS\n feed_description = \"\"\n try:\n feed_description = xmldocument.getElementsByTagName('dc:description')[0].firstChild.data[:100]\n except IndexError as details:\n print \"Handling IndexError: \"\n feed_description = \"Handling IndexError\"\n except AttributeError as details:\n\tfeed_description = \"Handling AttributeError\"\n feed_link = xmldocument.getElementsByTagName('identifier')[0].firstChild.data # get header identifier for link value\n feed_pubDate = xmldocument.getElementsByTagName('datestamp')[0].firstChild.data # get header datestamp for pubDate value\n feed_guid = xmldocument.getElementsByTagName('identifier')[0].firstChild.data # get header identifier for guid value\\\n # return a PyRSS2Gen object\n return PyRSS2Gen.RSSItem(\n title = feed_title,\n link = feed_link,\n description = feed_description,\n guid = feed_guid,\n pubDate = datetime.strptime(feed_pubDate.replace(\"T\", \" \").replace(\"Z\", \"\"), '%Y-%m-%d %H:%M:%S')\n )", "def parseGithubFeed(data):\n\tgitResult = []\n\tif data:\n\t\tfor entries in data:\n\t\t\ttext = entries['commit']['message']\n\t\t\tauthor = entries['commit']['author']['name']\n\t\t\ttime = entries['commit']['author']['date']\n\t\t\ttime = dateutil.parser.parse(time).isoformat(' ').split('+')[0] \n\t\t\ttime = datetime.datetime.strptime(time, \"%Y-%m-%d %H:%M:%S\" )\n\t\t\titem = copy.deepcopy(templateResult)\n\t\t\titem['message'] = text \n\t\t\titem['author'] = author\n\t\t\titem['datetime'] = time\n\t\t\titem['source'] = 'Github'\n\t\t\tgitResult.append(item)\n\treturn gitResult", "async def fetch_and_parse(self, timeout=10):\n\n headers = {}\n if self.username and self.password:\n creds = f'{self.username}:{self.password}'.encode('utf-8')\n headers['Authorization'] = f'Basic {base64.urlsafe_b64encode(creds)}'\n\n async with aiohttp.ClientSession(headers=headers) as session:\n rsp = await self._fetch(session, timeout)\n\n feed_entries = []\n if rsp:\n data = feedparser.parse(rsp)\n feed_entries = data.entries\n if data.bozo:\n self.log.error(f\"No valid RSS data from feed {self.url}: {data.bozo_exception}\")\n return feed_entries", "def _extract_data_from_feed(self):\n for eco in self.snyk_data:\n if eco == \"java\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Maven.\")\n self._add_default_obj_for_eco(\"maven\")\n self._parse_data(self.snyk_data[eco], \"maven\")\n elif eco == \"js\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Npm.\")\n self._add_default_obj_for_eco(\"npm\")\n self._parse_data(self.snyk_data[eco], \"npm\")\n elif eco == \"python\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Pypi.\")\n self._add_default_obj_for_eco(\"pypi\")\n self._parse_data(self.snyk_data[eco], \"pypi\")\n elif eco == \"golang\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Golang.\")\n self._add_default_obj_for_eco(\"golang\")\n self._parse_golang_data(self.snyk_data[eco], \"golang\")\n else:\n logger.info(\"Ignoring the ecosystem {} from the feed\".format(eco))", "def __add_entries(entries, feed):\n\n for entry in entries:\n try:\n # If there is entry with such title in this feed\n Entry.objects.get(title=entry.title, feed=feed)\n continue\n except Entry.DoesNotExist:\n pass\n\n # Try to find another entries with such title\n e = Entry.objects.filter(title=entry.title)\n # If found\n if len(e) != 0:\n e = e[0]\n # Copy all containing\n entry_obj = Entry(title=e.title,\n description=e.description,\n entry=e.entry, feed=feed)\n entry_obj.save()\n # Or create new Entry from scratch\n else:\n entry_name = entry.title + '.html'\n # If bad link or entry name\n try:\n urlretrieve(entry.link, entry_name)\n\n entry_file = open(entry_name)\n entry_file = File(entry_file)\n\n entry_obj = Entry(title=entry.title,\n description=entry.description,\n entry=entry_file, feed=feed)\n entry_obj.save()\n\n os.remove(entry_name)\n except:\n # Go to next entry\n continue", "def aggregator(feed, max_entries=5):\n import gluon.contrib.feedparser as feedparser\n lfeeds = isinstance(feeds,(str,unicode)) and feeds or feeds.split(\",\")\n content = DIV(A(d.channel.title,_href=d.channel.link,_rel=d.channel.description),\n UL(),_id='web2py_aggregator')\n for feed in lfeeds:\n d = feedparser.parse(feed)\n for entry in d.entries[:max_entried]:\n content[1] += LI(A(entry.title,' ',SPAN(entry.updated),\n _href=entry.link,_rel=entry.descstiption,\n _class=web2py_aggregator_link))\n return content", "def pull_feed(feed_url):\n app.logger.debug('Parsing content from %s.', feed_url)\n feed = feedparser.parse(feed_url)\n\n # Process html to remove unwanted mark-up and fix links\n post = ''\n if feed['entries']:\n soup = BeautifulSoup(feed['entries'][0].summary, 'html.parser')\n\n # Remove edited by paragraph\n soup.p.extract()\n\n # Remove final div in the feed\n feed_div = soup.find('div', class_='feed')\n children_divs = feed_div.findAll('div')\n children_divs[len(children_divs) - 1].extract()\n\n # Translate any in page links to use relative URL\n base = feed['entries'][0].summary_detail.base\n links = feed_div.select('a[href^=\"' + base + '\"]')\n for link in links:\n link['href'] = link['href'].replace(base, '')\n post = str(soup)\n\n elif feed.get('bozo_exception'):\n app.logger.error('Error retrieving feed for % with error %'.format(feed_url,\n str(feed.get('bozo_exception'))))\n return post", "def __update_feed(feed_obj):\n\n url = feed_obj.url\n feed = feedparser.parse(url)\n\n try:\n feed.feed.title\n except AttributeError:\n return\n\n # List of new entries in downloaded XML\n new_entries = feed.entries\n new_entries_titles = [entry.title for entry in new_entries]\n\n # List of current entries in database\n old_entries = Entry.objects.filter(feed=feed_obj)\n old_entries_titles = [entry.title for entry in old_entries]\n\n # Check what old entries arn't in new entries\n # They will be deleted\n for entry_title in old_entries_titles:\n if entry_title not in new_entries_titles:\n Entry.objects.get(title=entry_title, feed=feed_obj).delete()\n\n # Add all new entries\n __add_entries(new_entries, feed_obj)\n\n # Update time and save\n feed_obj.time = datetime.now()\n feed_obj.save()", "def atom_feed():\n from simblin.lib.rfc3339 import rfc3339\n posts = Post.query.filter_by(visible=True).order_by(Post.datetime.desc())\n updated = posts.first().datetime\n response = make_response(render_template('atom.xml', posts=posts, \n updated=updated, rfc3339=rfc3339))\n response.mimetype = \"application/atom+xml\"\n return response", "def check_for_new_links(feed):\n #read the feed\n feed_url = feed[\"feed_url\"]\n feed_data = feedparser.parse(feed_url)\n\n #parse out entries in the feed for the information we want\n entries = []\n for entry in feed_data.entries:\n parsed_entry = {}\n parsed_entry[\"title\"] = entry[\"title\"]\n parsed_entry[\"link\"] = entry[\"link\"]\n parsed_entry[\"published\"] = entry[\"published\"]\n parsed_entry[\"feed_url\"] = feed_url\n entries.append(parsed_entry)\n\n #check for new entries since the last known entry\n #chop off all entries starting at the last_seen_link\n if \"last_seen_link\" in feed:\n last_link = feed[\"last_seen_link\"]\n idx = -1\n for cidx in range(len(entries)):\n if entries[cidx][\"link\"] == last_link:\n idx = cidx\n break\n #else is a new link\n entries = entries[:idx]\n\n return list(reversed(entries))", "def list_feed(self):\n entities = []\n entities_j = self._get('traversal/type=f')\n if entities_j:\n for entity_j in entities_j:\n entities.append(Feed(entity_j['id'], CanonicalPath(entity_j['path'])))\n return entities", "def scanFeedList(self): \r\n data = self.feed_handler.listScanFeeds()\r\n data = data[:MAX_FEEDS_SCAN]\r\n for idx, feed in enumerate(data):\r\n print \"feeds ... / [%s/%s] (%s docs:%s passed)\" % (idx, len(data),self.feed_item_ctr, self.feed_passed)\r\n try:\r\n baseURL = feed.mainUrl\r\n self.processData(baseURL) \r\n self.createFeedItems()\r\n except Exception, ex:\r\n print(\"ERR: failed to process data and create feed item=%s\" % ex)\r\n print \"done\"", "def parse_feed(uri):\n\n if OUTPUT:\n print \"parsing \" + uri\n\n feed = urllib2.urlopen(uri)\n xml = minidom.parse(feed)\n \n # look for <enclosure> tags\n enclosures = xml.getElementsByTagName(\"enclosure\")\n\n # extract the url attribute from any <enclosure> tags found\n file_uris = []\n for enclosure in enclosures:\n file_uris.append(enclosure.attributes[\"url\"].value)\n\n download_files(file_uris)", "def extract_articles(self, parsed_xml):\n\n # Iterates over every item (article) in xml\n for item in parsed_xml.xpath(\"//item\"):\n\n article = {}\n\n\n article['title'] = self.get_text_or_attr(item, 'title')\n\n\n # The article's categories must be always a list, even if it has\n # only one element.\n categories = self.get_text_or_attr(item, 'category')\n\n if isinstance(categories, str):\n categories = [categories]\n\n article['categories'] = categories\n\n\n url = self.get_text_or_attr(item, 'feedburner:origLink')\n article['url'] = self.remove_query(url)\n\n self.article_url = article['url']\n\n\n # If article's URL is already stored, don't parse it again\n if Article.objects.filter(url=article['url']).count() > 0:\n continue\n\n\n # It is interesting to have the publication date as a `dateutil`\n # object, so we can do whatever manipulation we want.\n pub_date = self.get_text_or_attr(item, 'pubDate')\n article['date'] = self.parse_datetime_passing_errors(pub_date)\n\n\n # Get the author attribute and tries to fetch informations about\n # him/her. An article can have more than one author; on techcrunch's\n # feed, they are separated by a comma.\n author_names = self.get_text_or_attr(item, 'dc:creator').split(',')\n article['authors'] = []\n\n for i, name in enumerate(author_names):\n article['authors'] += [self.get_author(name, i)]\n\n\n # Tries to find the article's thumbnail url\n thumb = self.get_text_or_attr(item, 'media:thumbnail', 'url')\n if thumb and thumb[0]:\n article['thumb'] = self.remove_query(thumb[0])\n\n\n # Gets the article's description and strip all html tags from it\n content = self.clear_text(item.xpath('description'))\n content = content.strip(' Read More').strip('&nbsp;').strip()\n\n\n article['content'] = content\n\n\n yield article", "def parse_rss_timestamp(timestamp):\n import datetime as dt\n return dt.datetime(*timestamp[:7])", "def normalized(name, feed_entries, start):\n data = []\n for feed_entry in feed_entries:\n if 'indicator' not in feed_entry or 'value' not in feed_entry:\n continue\n\n # Make the entry dict.\n entry = feed_entry.copy()\n entry['splunk_source'] = name\n entry['splunk_last_seen'] = start\n\n data.append(entry)\n\n return data", "def filter(data, format):\n logging.debug('feed_diff.py filter')\n data_stream = cStringIO.StringIO(data)\n parser = xml.sax.make_parser()\n\n if format == 'atom':\n handler = AtomFeedHandler(parser)\n elif format == 'rss':\n handler = RssFeedHandler(parser)\n else:\n raise Error('Invalid feed format \"%s\"' % format)\n\n parser.setContentHandler(handler)\n parser.setEntityResolver(TrivialEntityResolver())\n # NOTE: Would like to enable these options, but expat (which is all App Engine\n # gives us) cannot report the QName of namespace prefixes. Thus, we have to\n # work around this to preserve the document's original namespacing.\n # parser.setFeature(xml.sax.handler.feature_namespaces, 1)\n # parser.setFeature(xml.sax.handler.feature_namespace_prefixes, 1)\n try:\n parser.parse(data_stream)\n except IOError, e:\n raise Error('Encountered IOError while parsing: %s' % e)\n\n for entry_id, content in handler.entries_map.iteritems():\n if format == 'atom' and not entry_id:\n raise Error('<entry> element missing <id>: %s' % content)\n elif format == 'rss' and not entry_id:\n raise Error('<item> element missing <guid> or <link>: %s' % content)\n\n return handler.header_footer, handler.entries_map\n\n #SMOB: Start code to return the entries_restrictions_map\n return handler.header_footer, handler.entries_map, handler.entries_restrictions_map\n #SMOB: End code", "def test_get_feeds_order_added(reader):\n parser = Parser()\n reader._parser = parser\n\n reader._now = lambda: naive_datetime(2010, 1, 1)\n feed1 = parser.feed(1, datetime(2010, 1, 2))\n reader.add_feed(feed1.url)\n\n reader._now = lambda: naive_datetime(2010, 1, 2)\n feed2 = parser.feed(2, datetime(2010, 1, 1))\n reader.add_feed(feed2.url)\n\n reader._now = lambda: naive_datetime(2009, 12, 31)\n feed3 = parser.feed(3, datetime(2010, 1, 3))\n reader.add_feed(feed3.url)\n\n assert list(f.url for f in reader.get_feeds(sort='added')) == '2 1 3'.split()\n\n reader.update_feeds()\n\n assert list(f.url for f in reader.get_feeds(sort='added')) == '2 1 3'.split()", "def get_item_group_from_feedparser(parser):\n items = list()\n\n logging.info('Loop for retrieving items.')\n for item in parser.entries:\n try:\n text, img_links = format_description(item.description)\n except AttributeError:\n continue\n\n if text:\n new_item = Item(\n title=html_unescape(item.title),\n date=item.published,\n link=item.link,\n text=text,\n img_links=img_links\n )\n\n items.append(new_item)\n\n return ItemGroup(feed=parser.feed.title, items=items)", "def getPosts(self):\n # TODO do we really need threading here or it can just do fine without\n allPosts = []\n threads = []\n feedTime = self.startTime\n for oneUrl in self.newsFeeds:\n thread = FeedparserThread(oneUrl, self.startTime, allPosts)\n threads.append(thread)\n thread.start()\n\n # Joining all threads into one\n for thread in threads:\n thread.join()\n\n return allPosts", "def feed(self, entry):\r\n pass", "def listFeeds(key):\n # read and parse config, collect each url\n filepath = confighome+\"config\"\n if fileAccessible(filepath,'r'):\n with open(filepath,mode='r', encoding='utf-8') as f:\n jconfig = json.load(f)\n\n # for each url pull the last 5 most recent posts and print them\n str=\"\"\n for url in jconfig[1]['feeds']:\n f = feedparser.parse (url['url'])\n if 'title' not in f.feed:\n print (\"::title not found in url:\",url['url'])\n else:\n str += f.feed.title + \"\\n\" + url['url'] + \"\\n\"\n\n # gimi five\n count=1\n blockcount=1\n for post in f.entries:\n if count % 5 == 1:\n str += post.title +\" - \" + post.link +\"\\n\"\n\n count+=1\n\n str=str+\"\\n\"\n\n if key==0:\n print (str)\n if key==1:\n return str\n else:\n print(\"::unable to read\")\n sys.exit()", "def update(self):\n feed = feedparser.parse(self._schema % self.project)\n added = []\n for entry in feed['entries']:\n if entry['id'] not in self.entries:\n self.entries[entry['id']] = entry\n added.append(entry)\n return added", "def test_feed_generator(self):\n moksha.feed_cache = FakeCache()\n feed = Feed(url='http://lewk.org/rss')\n iter = feed.iterentries()\n data = iter.next()\n assert iter.next()", "def parse_news(news):\n default_value = '---'\n\n news_list = []\n for entry in news:\n title = entry.get('title', default_value)\n link = entry.get('link', default_value)\n published = entry.get('published', default_value)\n source = entry.get('source', default_value)\n description = entry.get('description', default_value)\n media_content = entry.get('media_content', default_value)\n\n source_title = default_value\n if source != default_value:\n source_title = source['title']\n\n image = default_value\n if media_content != image:\n image = media_content[0]['url']\n\n article = Article(title, link, published, source_title, description, image)\n news_list.append(article)\n\n return news_list", "def get_games():\r\n feed = feedparser.parse(FEED_URL)\r\n games = []\r\n for entry in feed.entries:\r\n games.append(Game(title = entry['title']\r\n , link = entry['link']\r\n ))\r\n return games", "def hfeed2atom(doc=None, url=None, atom_url=None, hfeed=None):\n\t# if hfeed object given assume it is well formatted\n\tif hfeed:\n\t\tmf = hfeed\n\telse:\n\t\t# send to hfeed_parser to parse\n\t\tmf = feed_parser.feed_parser(doc, url)\n\n\t\tif not mf:\n\t\t\treturn None, 'h-feed not found'\n\n\tfeed = {'generator': '', 'title': '', 'subtitle': '', 'link': '', 'uid': '', 'updated': '', 'author': '', 'entries': ''}\n\n\tif 'properties' in mf:\n\t\tprops = mf['properties']\n\telse:\n\t\treturn None, 'h-feed properties not found.'\n\n\t## required properties first\n\n\tuid = _get_id(mf) or url\n\n\t# id is -- required\n\tif uid:\n\t\t# construct id of feed -- required\n\t\tfeed['uid'] = templates.ID.substitute(uid = escape(uid))\n\telse:\n\t\treturn None, 'feed does not have a valid id'\n\n\t#construct title for feed -- required\n\tif 'name' in props:\n\t\tname = props['name'][0] or uid\n\n\tfeed['title'] = templates.TITLE.substitute(title = escape(name), t_type='title')\n\n\t# entries\n\tif 'children' in mf:\n\t\tentries = [x for x in mf['children'] if 'h-entry' in x['type']]\n\telse:\n\t\tentries = []\n\n\t# construct updated/published date of feed.\n\tupdated = _updated_or_published(mf)\n\n\tif not updated and entries:\n\t\tupdated = max([_updated_or_published(x) for x in entries])\n\n\t# updated is -- required\n\tif updated:\n\t\tfeed['updated'] = templates.DATE.substitute(date = escape(updated), dt_type = 'updated')\n\telse:\n\t\treturn None, 'updated date for feed not found, and could not be constructed from entries.'\n\n\t## optional properties\n\n\t# construct subtitle for feed\n\tif 'additional-name' in props:\n\t\tfeed['subtitle'] = templates.TITLE.substitute(title = escape(props['additional-name'][0]), t_type='subtitle')\n\n\tfeed['link'] = templates.LINK.substitute(url = escape(uid), rel='alternate')\n\tfeed['self'] = templates.LINK.substitute(url = escape(atom_url), rel='self')\n\n\t# construct author for feed\n\tif 'author' in props:\n\t\tauthor = templates.AUTHOR.substitute(name = escape(props['author'][0]['properties']['name'][0]))\n\n\t# construct entries for feed\n\tfor entry in entries:\n\t\t# construct entry template - skip entry if error\n\t\tentry_atom, message = hentry2atom(entry)\n\t\tif entry_atom:\n\t\t\tfeed['entries'] += entry_atom\n\n\tfeed['generator'] = templates.GENERATOR\n\n\treturn templates.FEED.substitute(feed), 'up and Atom!'", "def extract_news(parser):\r\n news_list = []\r\n\r\n tbl_list = parser.table.findAll('table')\r\n tr_list = tbl_list[1].findAll('tr')\r\n for i in range(0, 90, 3):\r\n new = dict()\r\n new['author'] = tr_list[i + 1].a.text\r\n new['points'] = tr_list[i + 1].span.text[:-6]\r\n comments = tr_list[i + 1].findAll('a')\r\n new['comments'] = comments[len(comments) - 1].text[:-9]\r\n if new['comments'] == '':\r\n new['comments'] = '0'\r\n new['title'] = tr_list[i].findAll('a')[1].text\r\n a_mas = List[str]\r\n a_mas = tr_list[i].findAll('a')\r\n new['url'] = a_mas[len(a_mas) - 1].text\r\n if new['url'] == new['title']:\r\n new['url'] = ''\r\n news_list.append(new)\r\n\r\n return news_list", "def fetch_parsed_feed(feed_url):\n feed = feedparser.parse(feed_url)\n parse_error = hasattr(feed, 'bozo_exception') and (\n isinstance(feed.bozo_exception, SAXException))\n if not feed.bozo or not parse_error:\n return feed", "def feed_entries(self):\n date_format = \"%Y-%m-%dT%H:%M:%SZ\"\n entries = self.mapper.list_entries(limit=10)\n if entries:\n updated = max([e.updated for e in entries]).strftime(date_format)\n else:\n updated = datetime.utcnow().strftime(date_format)\n return {\"entries\": entries, \"updated\": updated}", "def article_extractor(rss_feed_link):\n user_agent = {\"user-agent\": \"Mozilla/5.0 (Windows NT 6.2; Win64;\\\n x64; rv:16.0.1) Gecko/20121011 Firefox/16.0.1\"}\n try:\n feed = requests.get(rss_feed_link, headers=user_agent)\n except requests.exceptions.ConnectionError:\n print(\"No internet connection\")\n exit()\n\n dirty_content = BeautifulSoup(feed.text, \"xml\")\n return dirty_content", "def update_rss_feed(torrent_dir, suggested_name, url, download_url, tree_size, torrents):\n # Fetching the existing feed, if possible\n filepath = os.path.join(torrent_dir, '{}.rss'.format(suggested_name))\n try:\n with open(filepath, 'rb') as fd:\n doc = xml.dom.minidom.parse(fd)\n\n except IOError:\n # The RSS file does not exist; it is probably a first run\n doc = None\n\n # Fixing download URL, if need be, such that it ends with a slash\n if download_url[-1] != '/':\n download_url += '/'\n\n # Building/Verifying the XML structure\n try:\n chan = check_rss_dom_structure(doc)\n except:\n doc, chan = init_rss_dom_structure(url)\n\n for torrent_data in torrents:\n item = doc.createElement('item')\n chan.appendChild(item)\n\n title_elmt = doc.createElement('title')\n title_txt = doc.createTextNode('Package {} for tree_size {}'.format(torrent_data[2], tree_size))\n title_elmt.appendChild(title_txt)\n item.appendChild(title_elmt)\n\n desc_elmt = doc.createElement('description')\n desc_txt = doc.createTextNode(\n 'Comment: {} Creation Date: {}'.format(torrent_data[0]['comment'], torrent_data[0]['creation date'])\n )\n desc_elmt.appendChild(desc_txt)\n item.appendChild(desc_elmt)\n\n guid_elmt = doc.createElement('guid')\n fp = codecs.getencoder('hex')(torrent_data[1])[0]\n guid_txt = doc.createTextNode(fp.decode('UTF-8'))\n guid_elmt.appendChild(guid_txt)\n item.appendChild(guid_elmt)\n\n enclosure_elmt = doc.createElement('enclosure')\n enclosure_elmt.setAttribute('url', download_url + build_torrent_name(url, torrent_data[2], tree_size))\n enclosure_elmt.setAttribute('type', 'application/x-bittorrent')\n enclosure_elmt.setAttribute('len', str(torrent_data[3]))\n item.appendChild(enclosure_elmt)\n\n with open(filepath, 'wb') as fd:\n fd.write(doc.toxml('UTF-8'))", "def get_articles(self, feed_id: int) -> List[Article]:\n articles = []\n for row in self._sqlite_connection.execute('SELECT identifier, uri, title, updated, author, content, unread, flag FROM articles WHERE feed_id = ? ORDER BY updated DESC LIMIT 200', [feed_id]):\n data = ArticleData()\n data.feed_id = feed_id\n data.identifier = row['identifier']\n data.uri = row['uri']\n data.title = row['title']\n data.updated = datetime.fromtimestamp(row['updated'], timezone.utc)\n data.author = row['author']\n data.content = row['content']\n data.unread = bool(row['unread'])\n data.flag = bool(row['flag'])\n articles.append(Article(data))\n return articles", "def get_listing():\n\n result_items = []\n\n rss_data = urllib.request.urlopen(ActivityURL)\n rss_xml = xml.dom.minidom.parse(rss_data)\n\n channel = rss_xml.getElementsByTagName('channel')[0]\n items = channel.getElementsByTagName('item')\n for item in items:\n # Most of these are hackish, but a result of using the RSS\n # feed instead of something nicer like a JSON API. This\n # listing method is specifically isolated so we can easily\n # swap out the implementation later.\n asset_id = item.getElementsByTagName('guid')[0].childNodes[0].data.split('/')[-1]\n img_url = item.getElementsByTagName('description')[0].childNodes[0].data\n # Get part after start of img src attribute\n split_href = img_url.split('src=\"', 1)[1]\n # Get part before closing quote\n img_url = split_href.split('\"', 1)[0]\n # FIXME\n zip_url = ''\n result_items.append( Asset(asset_id, img_url, zip_url) )\n\n return result_items", "def bulk_entries_create(self, feed_id: int, parsed_entries: List) -> None:\n entries_to_create = []\n for entry in parsed_entries:\n entries_to_create.append(\n Item(\n feed_id=feed_id,\n title=entry[\"title\"],\n link=entry[\"link\"],\n description=entry[\"summary\"],\n published_at=datetime.fromtimestamp(\n mktime(entry[\"published_parsed\"])\n ),\n )\n )\n self.bulk_create(entries_to_create)", "def parse(self, response):\n item = NewsScraperItem()\n containers = response.xpath(\"//div[contains(@class,'largeTitle')]/article[contains(@class,\"\n \"'articleItem')]/div[contains(@class,'textDiv')]\")\n for info in containers:\n\n try:\n date = info.xpath(\".//div[contains(@class,'articleDetails')]/span[contains(@class,'date')]/text()\").extract_first()\n date = re.sub(r'\\xa0-\\xa0', '', date)\n # Convert 'minutes ago' to datetime\n date = datetime.now() - timedelta(minutes=int(re.sub(r'[^0-9]', '', date))) # Regex = Where not numeric\n item['date'] = date.strftime(\"%Y/%m/%d %H:%M:%S\")\n earn_id = re.search(r'[0-9]{4,}', info.xpath(\".//a/@onclick\").extract_first())\n item['id'] = earn_id.group()\n item['title'] = info.xpath(\".//a/text()\").extract_first()\n item['author'] = info.xpath(\".//div[contains(@class,'articleDetails')]/span/text()\").extract_first()\n item['text'] = info.xpath(\".//p/text()\").extract_first()\n item['link'] = info.xpath(\".//a/@href\").extract_first()\n yield item\n\n except:\n print(\"Unusual format detected\")\n logging.warning(\"Item skipped due to unusual format\")", "def get_feed(self):\n\t\turl=\"http://news.google.com/news?ned=%s&topic=%s&output=rss\"\n\t\tlinks=[{\"ned\":\"us\", \"type\":\"h\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"w\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"nz\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"sa\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"n\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"b\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"t\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"m\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"s\"},\n\t\t\t {\"ned\":\"us\", \"type\":\"e\"},\n\t\t\t ]\n\t\tfeed = links[self.get_input()]\n\t\treturn url%(feed[\"ned\"],feed[\"type\"])", "def extract_news(parser):\n news_list = []\n\n titles = parser.find_all(\"tr\", class_=\"athing\")\n subtext = parser.find_all(\"td\", class_=\"subtext\")\n\n for i in range(len(titles)):\n x = titles[i].find_all(\"td\", class_=\"title\")[1]\n title = x.a.text\n url = x.a[\"href\"]\n c = subtext[i].find_all(\"a\")[4]\n if c.text == \"discuss\":\n comments = 0\n else:\n comments = c.text\n author = subtext[i].find(\"a\", class_=\"hnuser\").get_text()\n point = subtext[i].find(\"span\", class_=\"score\").text\n points = point.split(' ')[0]\n\n news_list.append({\"author\": author, \"comments\": comments, \"points\": points, \"title\": title, \"url\": url})\n\n return news_list", "def test_rss_is_parseable(self):\r\n [make_bookmark() for i in range(10)]\r\n transaction.commit()\r\n\r\n res = self.app.get('/rss')\r\n\r\n self.assertEqual(\r\n res.status,\r\n \"200 OK\",\r\n msg='recent status is 200, ' + res.status)\r\n\r\n # http://packages.python.org/feedparser/\r\n # introduction.html#parsing-a-feed-from-a-string\r\n parsed = feedparser.parse(res.body)\r\n links = []\r\n for entry in parsed.entries:\r\n links.append({\r\n 'title': entry.title,\r\n 'category': entry.category,\r\n 'date': time.strftime('%d %b %Y', entry.updated_parsed),\r\n 'description': entry.description,\r\n 'link': entry.link,\r\n })\r\n\r\n self.assertTrue(links, 'The feed should have a list of links.')\r\n self.assertEqual(10, len(links), 'There are 10 links in the feed.')\r\n\r\n sample_item = links[0]\r\n self.assertTrue(sample_item['title'], 'Items have a title.')\r\n self.assertTrue(\r\n sample_item['link'],\r\n 'Items have a link to reach things.')\r\n self.assertTrue(\r\n 'description' in sample_item,\r\n 'Items have a description string.')", "def get_entries(uri):\n if not uri.endswith('/entries'):\n uri += '/entries'\n results = VGOCache(uri).results\n\n results = [ adjust_entry(x) for x in results ]\n return results", "def entry_dict(cls, feed_entry):\n return {\n 'id': feed_entry['id'],\n 'link': feed_entry['link'],\n 'published': pd.to_datetime(feed_entry['published']),\n 'title': feed_entry['title'],\n }", "def parse_entry(msg):\n values = msg.split(';')\n return {\n 'dt': datetime.strptime(\n values[0], '%Y-%m-%d %H:%M:%S.%f'),\n 'event': values[1]\n }", "def getOldEpisodes(config, rss, chan, namespaces):\n # Indicates items are to be added. Needed to know whether or not to\n # manually add namespaces. Yes, it is wonky. A side effect of the way\n # ElementTree adds namespaces.\n itemsAdded = False\n # Return value for the old episode elements which can be empty\n # if no old episodes exist\n items = None\n # Return value for the first year of publication as indicated by the\n # `pubDate` on the earliest episode. Used for generating the copyright\n # string. Can be empty if no old episodes exist.\n firstYear = None\n\n xmlFilepath = config['xmlFilepath']\n\n if os.path.isfile(xmlFilepath):\n # Load and strip the XML\n with open(xmlFilepath, 'r') as f:\n xmlStr = ''\n for line in f:\n # strip leading and trailing whitespace so minidom can prettify\n # without adding extraenous new lines\n xmlStr += line.lstrip().rstrip()\n\n # Parse the XML\n rssPrev = ET.ElementTree()\n\n try:\n rssPrev = ET.ElementTree(ET.fromstring(xmlStr))\n except:\n logger.fatal(\"Unable to parse \\'\" + xmlFilepath + \"\\'\")\n exit(1)\n\n # Find all the items and append them to the new tree\n items = rssPrev.getroot().findall('channel/item', namespaces)\n\n # Append found items and add appropriate namespaces\n if items:\n # Indicate items are to be added\n itemsAdded = True\n\n # Items do not carry an Atom namespace element, so add it manually\n rss.set(\"xmlns:atom\", \"http://www.w3.org/2005/Atom\")\n\n # Find the earliest `lastBuildDate` to determine copyright\n pubDates = rssPrev.getroot().findall('channel/item/pubDate',\n namespaces)\n\n for pubDate in pubDates:\n # Parse out the year\n year = re.findall(r\" \\d{4} \", pubDate.text)[0].lstrip().rstrip()\n\n # Set the year if empty or lower\n if not firstYear:\n firstYear = year\n else:\n if int(year) < int(firstYear):\n firstYear = year\n\n # No items were added, then add all namespace attributes manually.\n if not itemsAdded:\n for prefix, uri in namespaces.iteritems():\n rss.set(\"xmlns:\" + prefix, uri)\n\n return items, firstYear", "def test_feed_subclassing(self):\n moksha.feed_cache = FakeCache()\n class MyFeed(Feed):\n url = 'http://lewk.org/rss'\n feed = MyFeed()\n assert feed.url == 'http://lewk.org/rss'\n assert feed.num_entries() > 0\n for entry in feed.iterentries():\n pass\n for entry in feed.get_entries():\n pass", "def create_episodes_from_feed(self, entries):\n guids = self.podcast.episode_set.values_list(\"guid\", flat=True)\n entries = [entry for entry in entries if entry[\"id\"] not in guids]\n\n episodes = [\n episode\n for episode in [self.create_episode_from_feed(entry) for entry in entries]\n if episode\n ]\n return Episode.objects.bulk_create(episodes, ignore_conflicts=True)", "def get_rss(address, website):\n #print address\n try:\n results = pattern.web.Newsfeed().search(address, count=100,\n cached=False, timeout=30)\n logger.debug('There are {} results from {}'.format(len(results),\n website))\n \n #print \"Results found\"\n except Exception as e:\n print 'There was an error. Check the log file for more information.'\n logger.warning('Problem fetching RSS feed for {}. {}'.format(address,\n e))\n results = None\n\n return results", "def archive_parse_for_posts(page_html):\n # <div\\s+class=\"post.+data\\-post\\-id\\=['\"](\\d+)['\"].+?<span\\s+class=['\"]post_date['\"]>([^<]+)</span>\n post_info_regex = \"\"\"<div\\s+class=\"post.+?data\\-post\\-id\\=['\"](\\d+)['\"].+?<span\\s+class=['\"]post_date['\"]>([^<]+)</span>\"\"\"\n post_info = re.findall(post_info_regex, page_html, re.IGNORECASE|re.DOTALL)\n return post_info", "def get_items(xml):\r\n try:\r\n from bs4 import BeautifulSoup\r\n except ImportError:\r\n error = ('Missing dependency '\r\n '\"BeautifulSoup4\" and \"lxml\" required to import Wordpress XML files.')\r\n sys.exit(error)\r\n with open(xml, encoding='utf-8') as infile:\r\n xmlfile = infile.read()\r\n soup = BeautifulSoup(xmlfile, \"xml\")\r\n items = soup.rss.channel.findAll('item')\r\n return items", "def rss(request, blog):\n\tblog = Blog.objects.get(urlname=blog)\n\tarticles = BlogEntry.objects.filter(blog=blog).order_by('-posting_time')[:RSS_COUNT]\n\treturn render_to_response('rss/blog.html', {'blog': blog, 'articles': articles}, context_instance=RequestContext(request))" ]
[ "0.7831155", "0.729255", "0.7232448", "0.67960274", "0.674423", "0.6664003", "0.66603625", "0.6557846", "0.6554401", "0.65351653", "0.65075", "0.64848477", "0.647246", "0.6385361", "0.63827616", "0.63343084", "0.6323492", "0.6318049", "0.6304754", "0.6304091", "0.62566954", "0.6235731", "0.6212814", "0.618577", "0.6166525", "0.6085335", "0.60850614", "0.6077082", "0.60504395", "0.603122", "0.598507", "0.59791887", "0.59625494", "0.5960059", "0.59528494", "0.59507304", "0.594865", "0.5940006", "0.5923033", "0.59189576", "0.5915868", "0.5905135", "0.5904109", "0.5900455", "0.5880262", "0.5876208", "0.58586574", "0.5836209", "0.5792324", "0.579219", "0.57884616", "0.5774374", "0.5768862", "0.5764184", "0.5747", "0.57230663", "0.5722852", "0.5705939", "0.569703", "0.5696347", "0.5676597", "0.5676223", "0.5673143", "0.56708133", "0.5668982", "0.56624395", "0.56578565", "0.56555575", "0.56493306", "0.5641184", "0.5625867", "0.5594003", "0.55884606", "0.558437", "0.5581171", "0.55353326", "0.55350196", "0.5524077", "0.55108744", "0.55045795", "0.54970694", "0.5491553", "0.5477711", "0.54703873", "0.5459126", "0.54532796", "0.5440459", "0.540804", "0.53967005", "0.5396322", "0.5381602", "0.5376564", "0.53734016", "0.5359175", "0.53414124", "0.5339899", "0.53097004", "0.5297824", "0.5288371", "0.52848685" ]
0.7714306
1
Check if search matches any tags as stored in the Entry namedtuple (case insensitive, only whole, not partial string matches).
def filter_entries_by_tag(search, entry) -> bool: tags = entry.tags search_words = search.strip().translate(str.maketrans("&|", " ")).split() if "&" in search: search_type = "AND" else: search_type = "OR" for word in search_words: if word.lower() in tags: if search_type == "OR": return True elif search_type == "AND": return False if search_type == "OR": return False else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_entries_by_tag(search, entry):\n \n entry_tags = entry.tags\n if '&' in search:\n splits = search.split('&')\n\n return all(split.lower() in entry_tags for split in splits)\n elif '|' in search:\n splits = search.split('|')\n return any(split.lower() in entry_tags for split in splits)\n else:\n return search.lower() in entry_tags", "def match(self, name, tags):\n return name.lower() in tags", "def find(self, search):\n if type(search) == str:\n search = [search]\n\n for s in search:\n if self.text.lower().find(s.lower()) != -1:\n return True\n\n return False", "def test_search_tags(self):\n page = self.page1\n page.search_tags = \"Chutes, Ladders\"\n page.save_revision().publish()\n taglist = page.clean_search_tags\n for name in [\"Chutes\", \"Ladders\"]:\n self.assertIn(name, taglist)", "def match(self, name, tags):\n name, tags = self.get_compiled(name, tags)\n \n def index_of_letter(l):\n return ord(l) - ord('a')\n \n true_val, false_val = name\n \n if true_val:\n return index_of_letter(true_val) in tags\n else:\n return index_of_letter(false_val) not in tags", "def name_search(self, search):\n if isinstance(search, str):\n name_re = re.compile(search)\n else:\n name_re = search\n matches = [\n entry\n for entry in self\n if entry is not None and name_re.search(entry.name)\n ]\n return matches", "def hasname(self, tag: str) -> bool:\n for key in self.formal_names:\n if key in tag.lower():\n return True\n\n # Exit case if key -> value not in mapping \n return False", "async def search(self, ctx: \"IceTeaContext\", *, query):\n response_list = await ctx.guild_data.search_tags(query)\n if len(response_list) > 0:\n response_message = \"\\n\".join([tag.title for tag in response_list])\n await ctx.send(f\"Found these tags:\\n{response_message}\")\n else:\n await ctx.send(\"No similar tags found\")", "def filter_search_results_entries(tag, form_type, description_text):\n desc_re = re.compile(description_text, re.I)\n form_re = re.compile(form_type, re.I)\n try:\n return (tag.parent.name == 'td' and\n tag.name == 'a' and\n tag['id'] == 'documentsbutton' and\n tag.parent.parent.find(string=form_re) and\n tag.parent.parent.find(string=desc_re))\n except:\n return False", "def test_case_insensitive(self):\r\n # Generate demo tag into the system\r\n tags = [make_tag() for i in range(5)]\r\n [DBSession.add(t) for t in tags]\r\n\r\n test_str = tags[0].name[0:4].upper()\r\n suggestions = TagMgr.complete(test_str)\r\n self.assertTrue(\r\n tags[0] in suggestions,\r\n \"The sample tag was found in the completion set\")", "def test_tag_search(self):\n url = reverse_lazy('tag-list') + '?search={}'.format('testtag')\n response = self.client.get(url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n results = response.data['results']\n self.assertEqual(len(results), 3)\n\n for tag in ('testtag1', 'testtag3'):\n result = list(filter(lambda it: it['title'] == tag, results))\n self.assertEqual(len(result), 1)\n result = result[0]\n\n self.assertEqual(len(result['posts']), 3)", "def exact_search(string, row):\n clear_screen()\n found = False\n for item in row:\n if string.lower() in item[\"Task\"].lower() \\\n or string.lower() in item[\"Notes\"].lower():\n print_entry(item)\n found = True\n if found is False:\n print(\"No Entries Found..\")", "def match(self, name, tags):\n S, tags = self.get_compiled(name, tags)\n return bool(S & tags)", "def search(self, term):", "def match(self, name, tags):\n or_exprs, tags = self.get_compiled(name, tags)\n \n # or_exprs = [{'a'}, {'c'}, {'d', 'a'}, {'d', 'e'}]\n return any(and_expr <= tags for and_expr in or_exprs)", "def search(self, word):", "def text_search():\n existing_fields = self.attr_name_map[object_class]\n text = \"%{}%\".format(exp[\"text\"])\n p = lambda f: f.ilike(text)\n return or_(*(\n with_key(field, p)\n for field in fields\n if field in existing_fields\n ))", "def search_entries(search):\n _, filenames = default_storage.listdir(\"entries\")\n result = []\n for filename in filenames: \n if filename.endswith(\".md\"):\n nameonly = re.sub(r\"\\.md$\", \"\", filename)\n \n if nameonly.lower() == search.lower():\n #print(\"name only :\", nameonly)\n #print(\"search :\", search)\n return (nameonly)\n elif search.lower() in nameonly.lower():\n result.append(nameonly)\n return(result)", "def match(self, filter_text):\n return filter_text.lower() in self.name.lower() or \\\n filter_text.lower() == self.isbn.lower() or \\\n filter_text.lower() in (str(tag).lower() for tag in self.tags)", "def make_query(term):\n def search(text):\n s=term.lower()\n if s in text.lower():\n return True\n return False\n return search", "def search(self, search):\n raise NotImplementedError", "def tag_dict_contains (self,\r\n tag):\r\n\r\n\r\n\r\n if self.using_database:\r\n aprint('TAGDICT CONTAINS')\r\n value_tuple = (notebookname, tag,)\r\n db_cursor.execute(\"SELECT rowid \"\r\n +\"FROM tags_to_keys\"\r\n +\" WHERE notebook=?\"\r\n +\" AND tag=?;\",\r\n value_tuple)\r\n try:\r\n return db_cursor.fetchone()[0] # MIGHT BE PROBLEMATIC\r\n except:\r\n return False\r\n\r\n return str(tag) in self.tag_dict", "def __contains__(self, query):\n if not isinstance(query, str): # Checks if the query is entered as a string.\n raise TypeError('The query must be a string')\n if query in self._words:\n return True\n elif query.lower() in self._words:\n return True\n else:\n return False", "def search(self, q):\n for x in self.strings:\n if q in x:\n return True\n \n return False\n\n\n pass", "def has_hashtag(self, tag_list, **kwargs):\n lowlist = [tag.lower() for tag in tag_list]\n alllower = ('case_sensitive' in kwargs and not kwargs['case_sensitive'])\n for ht in self.original.entities['hashtags']:\n lowht = ht['text'].lower()\n if alllower and lowht in lowlist or '#' + lowht in lowlist:\n return True\n if ht['text'] in tag_list or '#' + ht['text'] in tag_list:\n return True\n return False", "def search_all(self, word_list):\n return [k for k,v in self.data_values.iteritems() \n if all(w.lower() in v.lower() for w in word_list)]", "def search(self, find_val):\n return False", "def test_name(self):\n\n self.check_search(\n dict(name=u'flamethrower'),\n [u'Flamethrower'],\n 'searching by name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'durp'),\n [],\n 'searching for a nonexistent name',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'quICk AttACk'),\n [u'Quick Attack'],\n 'case is ignored',\n exact=True,\n )\n\n self.check_search(\n dict(name=u'thunder'),\n [ u'Thunder', u'Thunderbolt', u'Thunder Wave',\n u'ThunderShock', u'ThunderPunch', u'Thunder Fang'],\n 'no wildcards is treated as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'*under'),\n [u'Thunder'], # not ThunderShock, etc.!\n 'splat wildcard works and is not used as substring',\n exact=True,\n )\n self.check_search(\n dict(name=u'b?te'),\n [u'Bite'], # not Bug Bite!\n 'question wildcard works and is not used as substring',\n exact=True,\n )", "def search(self, tokens: List[str]) -> bool:\n item = \"\".join(tokens)\n if item in self._masked_items:\n return False\n\n cur = self._root\n for token in tokens:\n if token not in cur.children:\n return False\n cur = cur.children[token]\n\n return cur.is_term", "def search_any(self, word_list):\n # Same as search_all except uses the built-in any()\n return [k for k,v in self.data_values.iteritems() \n if any(w.lower() in v.lower() for w in word_list)]", "def main():\n entries = get_feed_entries()\n \n entries.sort(key=lambda x: x.date)\n while True:\n term = input(\"Search Term? \")\n if term == '':\n print('Please provide a search term')\n continue\n if term == 'q':\n print('Bye')\n break\n \n \n matches = 0\n for entry in entries:\n found = filter_entries_by_tag(term,entry)\n if found:\n print(entry.title)\n matches += 1\n\n \n if matches == 1:\n print('1 entry matched')\n else:\n print(f\"{matches} entries matched\")", "def match(self, filter_text):\n\n return filter_text.lower() in self.artist.lower() or \\\n super().match(filter_text)", "def search():\n\n # POST\n if request.method == \"POST\":\n\n # validate form submission\n if not request.form.get(\"intervention\"):\n return render_template(\"results.html\", results=entries.values())\n ''' \n elif not request.form.get(\"setting\"):\n return apology(\"missing setting\")\n elif not request.form.get(\"emrpref\"):\n return apology(\"missing emr pref\")\n elif not request.form.get(\"budget\"):\n return apology(\"missing budget\")'''\n \n results = []\n for k in entries:\n print('entries', entries[k]['Keywords'])\n print('term', request.form.get(\"intervention\"))\n if request.form.get(\"intervention\") in entries[k]['Keywords']:\n print('ya')\n results.append(entries[k])\n\n\n return render_template(\"results.html\", results=results)\n\n\n # GET\n else:\n return render_template(\"search.html\")", "def contains(self, searchstr: str):\n for x in self.sa:\n if searchstr in x:\n return True\n pass", "def contains(name):", "def any_term(self, row):\n return any(term in row for term in self.search_terms)", "def __search(findwhat, content, ignorecase, regexp):\n\t\tfrom re import search, IGNORECASE\n\t\tif regexp:\n\t\t\tif ignorecase:\n\t\t\t\tflag = IGNORECASE\n\t\t\telse:\n\t\t\t\tflag = 0\n\t\t\tif search(findwhat, content, flag):\n\t\t\t\treturn True\n\t\telse:\n\t\t\tif ignorecase:\n\t\t\t\tcontent = content.lower()\n\t\t\t\tfindwhat = findwhat.lower()\n\t\t\t\t\n\t\t\tif content.find(findwhat) != -1:\n\t\t\t\treturn True\n\t\treturn False", "def searchbrown_phrase(tags):\n l = len(tags)\n brown_tagged_words = brown.tagged_words(categories='news')\n hitwords = []\n for i in range(len(brown_tagged_words)-l+1):\n searchtags = [tag for _,tag in brown_tagged_words[i:i+l]]\n if tags == searchtags:\n hitwords.append(tuple([w.lower()\n for w,_ in brown_tagged_words[i:i+l]]))\n return hitwords", "def test_tags_content_search_valid_tag(self,tag_with_items):\n\n\n tag = tag_with_items\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n\n # perform the search\n self.browser.proxy_client.new_har(\"page\")\n po.search_for_content([tag])\n har_entry = self.browser.page_load_details()\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"performing a content search using the tag\" \\\n + \" '%s' returned an error response\" % (tag) \\\n + \" code on the page %s\" % (po.current_url()) \\\n + \" http archive follows:\\n%s\" % (pprint.pformat(har_entry))\n\n # get pagination counts\n po = self.catalog.load_pageobject('TagsViewPage')\n (start,end,total) = po.get_pagination_counts()\n\n # check for a valid total value\n assert total >= 0, \\\n \"performing a content search using the tag\" \\\n + \" '%s' took user to a page with\" % (tag) \\\n + \" invalid pagination: %s\" % (po.current_url())", "def search(self, *args, **kwargs):", "def test_tags_tag_search_valid_tag(self,tag_with_items):\n\n tag = tag_with_items\n\n assert tag is not None, 'Could not find a tag with items'\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n\n # perform the search\n self.browser.proxy_client.new_har(\"page\")\n po.search_for_tags(tag)\n har_entry = self.browser.page_load_details()\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"performing a tag search using an the tag\" \\\n + \"'%s' returned an error response code on\" % (tag) \\\n + \"the page %s http archive follows:\\n%s\" \\\n % (po.current_url(),pprint.pformat(har_entry))\n\n # check for valid pagination total on tags view page\n po = self.catalog.load_pageobject('TagsViewPage')\n (start,end,total) = po.get_pagination_counts()\n\n assert total >= 0, \\\n \"performing a tag search using the tag\" \\\n + \"'%s' took user to page (%s) with invalid pagination\"\\\n % (tag,po.current_url())", "def search_in_db(query):\n return Posts.objects.filter(Q(header__icontains=query) |\n Q(post__icontains=query) |\n Q(prepost__icontains=query) |\n Q(tags__tag__icontains=query))", "def search(self, *args, **kwargs): # real signature unknown\n pass", "def search(wiki, pattern):\n wiki.search_tags(pattern)", "def search(self, string, tags=None):\n\n if isinstance(tags, string_types):\n message = \"tags should be a list or None, got tags={}\".format(tags)\n raise TypeError(message)\n\n data_collection = DataCollection()\n for item in self.iteritems():\n if string == item.name:\n if tags is None or tags == []:\n data_collection.add_data(item)\n else:\n if any([tag in item.tags for tag in tags]):\n data_collection.add_data(item)\n return data_collection", "def matches_input(self, optimized_str):\n\n if all([keyword in optimized_str for keyword in self['keywords']]):\n logger.debug('Matched template %s', self['template_name'])\n return True", "def whole_word_matches(self):\n start = '1.0'\n while True:\n start = self.text.search(self.term, start, stopindex=tk.END)\n if not start:\n break\n end = start + ' wordend'\n # whole word includes a space before\n found = self.text.get(start + '-1c', end)\n if found == ' ' + self.term:\n self.text.tag_add('found', start, end)\n start = end", "def whole_word_matches(self):\n start = '1.0'\n while True:\n start = self.text.search(self.term, start, stopindex=tk.END)\n if not start:\n break\n end = start + ' wordend'\n # whole word includes a space before\n found = self.text.get(start + '-1c', end)\n if found == ' ' + self.term:\n self.text.tag_add('found', start, end)\n start = end", "def search(self):\n\n term = self.substitute()\n ##print (\"searching:\",term)\n ##print (\"in facts\",self.facts)\n ##input()\n bindings = deepcopy(self.bindings)\n found = False\n for fact in self.facts:\n found = self.unify(term,fact,bindings)\n if found:\n bound_vars = list(bindings.keys())\n n_bound_vars = len(bound_vars)\n for i in range(n_bound_vars):\n for j in range(i+1,n_bound_vars):\n if bindings[bound_vars[i]] == bindings[bound_vars[j]]:\n return False\n self.facts.remove(self.substitute_with_bindings(bindings)) #THINK ABOUT THIS\n break\n return found", "def check_for_tag(tags, tagged_events):\n found_tags = set()\n tags_set = set(tags)\n for tag in tags:\n for tag_event in tagged_events:\n if tag in tag_event[1][\"tag\"][\"labels\"]:\n found_tags.add(tag)\n not_found = tags_set - found_tags\n tag_status = {}\n for tag in found_tags:\n tag_status[tag] = True\n for tag in not_found:\n tag_status[tag] = False\n return tag_status", "def search():\n query = request.args['query']\n # find instances of the entered word in title, tags or ingredients\n results = mongo.db.places.find({\n '$or': [\n {'name': {'$regex': query, '$options': 'i'}},\n {'tags': {'$regex': query, '$options': 'i'}},\n {'city': {'$regex': query, '$options': 'i'}},\n ]\n })\n return render_template('search.html', query=query, results=results)", "def __contains__(self, key):\n return super(CaseInsensitiveStringDict, self).__contains__(key.lower())", "def search_by_contains(self, tl):\n print(\"Search by string\")\n string = input(\"Please enter search string: \")\n return tl.findall_contains(string)", "def _Search(self, model, column, key, rowiter):\n row = model[rowiter]\n # False means a match was found.\n for i, title in enumerate(self._column_titles):\n if key.lower() in row[i].lower():\n return False\n return True", "def mutt_search(self, term):\n attrs = (\"email_address\", \"name\", \"otherinfo\", \"extrainfo\")\n ret = list(\n filter(lambda aitem: any(\n term in getattr(aitem, attr, \"\") for attr in attrs\n ), self.addresses)\n )\n return ret", "def find_by_exact_match(self):\n while True: \n self.task_name_search = input(\"What is the keyword/s you are looking\"\n \" for? Press Q to quit to the main screen: \").strip()\n if self.task_name_search.upper() in [\"Q\", \"QUIT\", \"EXIT\"]:\n x = self.dict_list\n return x\n self.find_by_exact_match_list = []\n count = 0\n for i in self.dict_list:\n for key, value in i.items():\n if re.search(self.task_name_search, value):\n self.find_by_exact_match_list.append(i)\n count+=1\n break\n if count == 0:\n print(\"There were no matches.\")\n else:\n self.display_style(self.find_by_exact_match_list)\n break\n self.del_or_edit()", "def find_exact(self, expression, tag_glob=\"*\"):\n for entry in self.entries:\n if expression in entry[\"writings\"] or expression in entry[\"readings\"]:\n entry_copy = copy.deepcopy(entry)\n entry_copy[\"senses\"] = list(\n filter(lambda sense: any(fnmatch(x, tag_glob)\n for x in sense[\"tags\"]),\n entry_copy[\"senses\"]))\n return entry_copy\n return None", "def search(collection_of_books: tuple, search_tag: str, search_keyword: str) -> list:\r\n found_books = []\r\n\r\n if search_tag == \"Shelf\" and search_keyword.isnumeric():\r\n found_books = [book for book in collection_of_books if search_keyword == book[\"Shelf\"]]\r\n\r\n else:\r\n for book in collection_of_books:\r\n if search_keyword.lower() in book[search_tag].lower():\r\n found_books.append(book)\r\n\r\n return found_books", "def search(self, word):\n curr = [self.trie]\n\n for c in word:\n next_curr = []\n if c != '.':\n for n in curr:\n if c in n.children:\n next_curr.append(n.children[c])\n else:\n for n in curr:\n next_curr.extend(n.children.values())\n curr = next_curr\n if not curr:\n return False\n\n return any([n.is_term for n in curr])", "def match(self, filter):\n return filter in self.tags or filter in self.memo", "def match(self, item, classifier):\n relevant_keys = ['keywords', 'watchwords', 'skip_keywords']\n metadata = dict({key: classifier.metadata[key] for key in relevant_keys if key in classifier.metadata})\n\n def _any_keyword_in(content, words):\n for word in words:\n if content.find(word) != -1:\n return True\n return False\n\n keywords = [kwd.lower() for kwd in metadata.get('keywords', [])]\n skip_list = [kwd.lower() for kwd in metadata.get('skip_keywords', [])]\n keyword_constraint = not keywords or _any_keyword_in(item.lower(), keywords)\n skip_list_constraint = not skip_list or not _any_keyword_in(item.lower(), skip_list)\n return keyword_constraint and skip_list_constraint", "def searchbrown_word(tag):\n brown_tagged_words = brown.tagged_words(categories='news')\n hitwords = []\n for i in range(len(brown_tagged_words)):\n if tag == brown_tagged_words[i][1]:\n hitwords.append(brown_tagged_words[i][0].lower())\n return hitwords", "def regex_entry_search(self, expression):\n return [entry for entry in self.entries \n if re.search(expression, entry.name)\n or re.search(expression, entry.note)]", "def search(self, value):\n pass", "def main():\n entries = get_feed_entries()\n\n while True:\n response = input(\"What you you like to search for? \")\n\n if not response:\n print(\"Please provide a search term\")\n continue\n\n if response.lower() == \"q\":\n print(\"Bye\")\n break\n\n matches = [\n entry\n for entry in list(\n filter(lambda x: filter_entries_by_tag(response.lower(), x), entries)\n )\n ]\n\n if matches:\n for entry in sorted(matches, key=lambda x: x.date):\n print(entry.title)\n\n print(f\"{len(matches)} {'entry' if len(matches) == 1 else 'entries'} matched\")", "def test_search_key_phrase(self):\n # search via key phrase.\n test = self.data.search(key_phrase='testing entries.', all_names=True)\n self.assertIn('testing entries.', test[0].notes)", "def search(query_string):", "def testFiltrerTag(self):\n\t\ttags = (\n\t\t\t \t('in,', 'in'),\n\t\t\t \t('casse-tete', 'casse-tete'),\n\t\t\t \t)\n\t\t\n\t\tf = Flickr()\n\t\tfor tag, tag_filter in tags:\n\t\t\tresult = f.filtrer_tag(tag)\n\t\t\tself.assertEqual(result, tag_filter)", "def __contains__(self, label: str) -> bool:\n return label in self.fuzzy_patterns or label in self.regex_patterns", "def match(self, sentence) -> bool:\r\n for word in self.word_list:\r\n if word.lower() in sentence.lower():\r\n return True\r\n return False", "def search(self, word) -> bool:\n return self.match(self.root, 0, word)", "def search(self, word):\n #edge case\n if word == \"\": \n return True if self._dict.children[26] != None else False\n\n cur = self._dict\n for c in word:\n ind = ord(c) - 97\n if cur.children[ind] == None:\n return False\n cur = cur.children[ind]\n\n return True if cur.isleaf == True else False", "def search():\n pass", "def searchRef(self, searchStr):\n filter = []\n attr = self.__listAttr()\n for name in attr:\n if searchStr.lower() in name.lower():\n doc = getattr(self, name)\n filter.append([name, doc]) \n # if in gloss, search for synonymes\n elif name in self.__glossIndex.keys():\n for altName in self.__glossIndex[name]['syn']:\n if searchStr in altName or altName in searchStr:\n doc = getattr(self, name)\n filter.append([name, doc])\n break\n \n return filter", "def search(self, query):", "def search(self, lookupword):\n with sqlite3.connect(self.dbpath) as conn:\n cursor = conn.cursor()\n cursor.execute('SELECT word, videofile FROM translation WHERE \\\n lower(word)=lower(?)', (lookupword,))\n find = cursor.fetchall()\n\n if find != []:\n # the word was found\n find = self.addSuffixes(find)\n return (True, find)\n\n else:\n # the word was not found\n # search the database for similar words\n altoptions = self._findAltOpts(lookupword)\n return (False, altoptions)", "def tag_exists(tag, directory=None):\n return tag in get_tags(directory)", "def test_tags(\n self, splunk_search_util, splunk_searchtime_fields_tags, record_property, caplog\n ):\n\n is_tag_enabled = splunk_searchtime_fields_tags.get(\"enabled\", True)\n tag_query = splunk_searchtime_fields_tags[\"stanza\"]\n tag = splunk_searchtime_fields_tags[\"tag\"]\n self.logger.info(f\"Testing for tag {tag} with tag_query {tag_query}\")\n\n record_property(\"Event_with\", tag_query)\n record_property(\"tag\", tag)\n record_property(\"is_tag_enabled\", is_tag_enabled)\n\n index_list = \"(index=\" + \" OR index=\".join(splunk_search_util.search_index.split(',')) + \")\"\n search = f\"search {index_list} {tag_query} AND tag={tag}\"\n search += \" | stats count by sourcetype\"\n\n self.logger.info(f\"Search: {search}\")\n\n result = splunk_search_util.checkQueryCountIsGreaterThanZero(\n search, interval=splunk_search_util.search_interval, retries=splunk_search_util.search_retry\n )\n\n record_property(\"search\", search)\n\n if is_tag_enabled:\n assert result, (\n f\"No events found for the enabled Tag={tag}.\"\n f\"\\nsearch={search}\"\n f\"\\ninterval={splunk_search_util.search_interval}, retries={splunk_search_util.search_retry}\"\n )\n else:\n assert not result, (\n f\"Events found for the disabled Tag={tag}.\"\n f\"\\nsearch={search}\"\n f\"\\ninterval={splunk_search_util.search_interval}, retries={splunk_search_util.search_retry}\"\n )", "def test_tags_tag_search_no_tag(self):\n\n po = self.catalog.load_pageobject('TagsPage')\n po.goto_page()\n\n # perform the search\n self.browser.proxy_client.new_har(\"page\")\n po.search_for_tags('')\n har_entry = self.browser.page_load_details()\n\n # check for errors\n assert har_entry is not None, \\\n \"failed to load the uri. http archive unavailable.\"\n assert self.browser.error_loading_page(har_entry) is False, \\\n \"performing a tag search using an empty string as the tag\" \\\n + \"returned an error response code on the page\" \\\n + \"%s http archive follows:\\n%s\" \\\n % (po.current_url(),pprint.pformat(har_entry))", "def test_search_checkname(self):\n self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day')\n self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death')", "def partial_word_matches(self):\n start = '1.0'\n while True:\n start = self.text.search(self.term, start, stopindex=tk.END)\n if not start:\n break\n end = start + f'+{self.chars}c'\n self.text.tag_add('found', start, end)\n start = end", "def partial_word_matches(self):\n start = '1.0'\n while True:\n start = self.text.search(self.term, start, stopindex=tk.END)\n if not start:\n break\n end = start + f'+{self.chars}c'\n self.text.tag_add('found', start, end)\n start = end", "def has_tags_in_content(self):\n\t\treturn self.get_content() and re_tag.search(self.get_content())", "def __contains__(self, term):\n\t\tfieldname, text = term\n\t\tquery = dict(fieldname=fieldname, text=text)\n\t\treturn bool(self.index.collection.find(query).count())", "def handle_tag_search(self, tag_text):\n log.debug(\"Handling tag search: %s\", tag_text)\n tags = tag_text.split()\n self.filter_tags = tags\n self.current_selected = 0\n self._refresh()", "def isin(hi):\n return getme.lower() in hi.lowercase", "def findTags(user_input, tagged_text):\n result = []\n for item in tagged_text:\n for w in user_input:\n if w[WORD] == item[WORD]:\n tup = (w[WORD], item[TAG])\n result.append(tup)\n continue\n\n return result", "def find_in_keywords_and_entities(self, subject, eobject):\n for keyword in self.keywords:\n if subject.find(keyword) > -1 or eobject.find(keyword) > -1:\n return True\n\n for entity in self.entities:\n if subject.find(entity['name']) > -1 or eobject.find(entity['name']) > -1:\n return True \n\n return False", "def search_tag(input) :\n j = _jpdb()\n _input = _process_search_input(input)\n if not _input : return None\n f = j.base_format\n q = Query().select(f.tags, f.tags.id, f.tags.name)\n q.where().equal(f.tags.name, _input)\n tag_data = j.executeQuery(q)\n\n if tag_data:\n tag_id, tag_name = tag_data[0]\n examples = _create_examples(j.list_word_by_tag, tag_name)\n return SelectorResult('tag', tag_id, tag_name, *examples)", "def is_phrase_in(self, phrase, text):\n return re.search(r\"\\b{}\\b\".format(phrase), text, re.IGNORECASE) is not None", "def __contains__(self, keyword):\n return self._find(keyword) is not None", "def search(self, word: str) -> bool:\n node = self\n for c in word:\n node = node.d.get(c)\n if not node:\n return False\n return node.end", "def label_intersects_tags(label, tags):\n for tag in tags:\n if tag in label:\n return True\n return False", "def search(self, word):\n node = self.root\n for char in word:\n if char in node.dict:\n node = node.dict[char]\n else:\n return False\n if node.end:\n return True\n return False", "def search(self, word):\n now = self.tree\n for i in word:\n if i in now:\n now = now[i]\n else:\n return False\n return True if 'end' in now else False", "def icontains(self, other):", "def contains(self, searchstr: str):\n index = mybinsearch(self.sarray, searchstr, self.comp)\n if index < 0:\n return False\n return True", "def search(self, word: str) -> bool:\n cur = self.root\n for letter in word:\n if letter not in cur:\n return False\n cur = cur[letter]\n if \"isWord\" not in cur:\n return False\n return True", "def dunkin_query(text):\n\n return 'dunkin' in text.lower()", "def __contains__(self, item):\n return item.upper() in self.keys" ]
[ "0.7962715", "0.72779536", "0.6499833", "0.62624013", "0.62382317", "0.6214999", "0.6140391", "0.6119588", "0.6078616", "0.60098", "0.59635174", "0.5918138", "0.5911922", "0.58637714", "0.5848267", "0.5831237", "0.5806548", "0.5795138", "0.57761455", "0.57537323", "0.57031983", "0.56951916", "0.56755763", "0.5674097", "0.56596816", "0.5651486", "0.5640643", "0.5638073", "0.5636836", "0.56138223", "0.56108975", "0.5608406", "0.5608189", "0.56033033", "0.5596", "0.5586697", "0.55818886", "0.5580698", "0.55772024", "0.5566896", "0.55636525", "0.5557612", "0.5548619", "0.5548315", "0.55362517", "0.5531839", "0.55288243", "0.55288243", "0.55229396", "0.55096835", "0.54902065", "0.54895407", "0.5484957", "0.5474161", "0.5468771", "0.54639685", "0.5461019", "0.5457766", "0.54570967", "0.54548573", "0.5441938", "0.5439489", "0.54344577", "0.54307693", "0.54293567", "0.54226345", "0.54208183", "0.5419123", "0.5412017", "0.54098266", "0.5384608", "0.5369662", "0.5368509", "0.5361498", "0.535472", "0.5334037", "0.5333177", "0.5327365", "0.5326647", "0.5322545", "0.5320518", "0.5320518", "0.5320345", "0.53145885", "0.5314115", "0.5310206", "0.5303474", "0.5302858", "0.5302367", "0.530177", "0.52952284", "0.5274989", "0.5272948", "0.5266721", "0.525618", "0.5254187", "0.5250479", "0.5249082", "0.5239578", "0.5237586" ]
0.75187373
1
Entry point to the program 1. Call get_feed_entries and store them in entries 2. Initiate an infinite loop
def main(): entries = get_feed_entries() while True: response = input("What you you like to search for? ") if not response: print("Please provide a search term") continue if response.lower() == "q": print("Bye") break matches = [ entry for entry in list( filter(lambda x: filter_entries_by_tag(response.lower(), x), entries) ) ] if matches: for entry in sorted(matches, key=lambda x: x.date): print(entry.title) print(f"{len(matches)} {'entry' if len(matches) == 1 else 'entries'} matched")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_feed_entries_task():\n get_feed_entries()\n logger.info(\"Entries for Feed\")", "def main():\n feed_db, entry_db = openDBs(FEED_DB_FN, ENTRY_DB_FN)\n\n feeds = [ x.strip() for x in open(FEEDS_FN, \"r\").readlines() ]\n \n entries = getNewFeedEntries(feeds, feed_db, entry_db)\n \n if len(entries) > 0:\n out_fn = HTML_FN % time.strftime(\"%Y%m%d-%H%M%S\")\n writeAggregatorPage(entries, out_fn, DATE_HDR_TMPL, FEED_HDR_TMPL, \n ENTRY_TMPL, PAGE_TMPL)\n emailAggregatorPage(FROM_ADDR, TO_ADDR, SUBJECT, SMTP_HOST, out_fn)\n \n closeDBs(feed_db, entry_db)", "def main():\n # Construct the feed generator\n f = LogBufferFeed(FEED_DIR)\n f.MAX_AGE = 24 * 60 * 60 # 1 day\n f.FEED_META['feed.title'] = '%s Login Activity' % SITE_NAME\n f.FEED_META['feed.tagline'] = \\\n 'Summary of login activity on the %s server' % SITE_NAME\n \n # Call the command and capture output\n (sout, sin) = popen4(COMMAND) \n new_lines = [ x for x in sout.readlines() \n if x.find('reboot') == -1 ]\n \n # Attempt load up output from the previous run.\n old_lines = None\n old_output_fn = os.path.join(FEED_DIR, 'old_output.gz')\n if os.path.exists(old_output_fn):\n old_lines = gzip.open(old_output_fn, \"r\").readlines()\n \n # If there is previous output, check for changes...\n if old_lines:\n \n # Run a diff on the previous and current program output.\n diff_lines = [ x for x in difflib.ndiff(old_lines, new_lines) ]\n\n # Extract only the lines that have changed.\n changes_lines = [ x for x in diff_lines \n if x.startswith('-') or x.startswith('+') ]\n \n # Construct and append a new entry if there were changes\n if len(changes_lines) > 0:\n esc_changes_lines = [escape(x) for x in changes_lines]\n esc_diff_lines = [escape(x) for x in diff_lines]\n entry = FeedEntryDict({\n 'link' : '',\n 'title' : TITLE_TMPL % { \n 'changes' : len(changes_lines) \n },\n 'summary' : SUMMARY_TMPL % {\n 'changes_lines' : \"<br />\".join(esc_changes_lines),\n 'diff_lines' : \"<br />\".join(esc_diff_lines)\n }\n })\n f.append_entry(entry)\n\n # Save output from the current run for use next time.\n gzip.open(old_output_fn, \"w\").write(\"\".join(new_lines))\n\n # Output the current feed entries as both RSS and Atom\n open(FEED_NAME_FN % 'rss', 'w').write(f.scrape_rss())\n open(FEED_NAME_FN % 'atom', 'w').write(f.scrape_atom())", "def main():\n try:\n init_file = open('keywords.json', 'r')\n init_file.close()\n except IOError:\n copy2('keywords.base', 'keywords.json')\n try:\n init_file = open('rsslist.json', 'r')\n init_file.close()\n except IOError:\n copy2('rsslist.base', 'rsslist.json')\n \n\n config_file = 'config.ini'\n config_section = 'dev'\n slack_token = load_config(config_file, config_section)\n slack_client = SlackClient(slack_token)\n feed_count = len(feed_db)\n feed_counter = feed_count\n while feed_counter > 0:\n url = feed_db.get(doc_id = feed_counter)['url']\n last_update_obj = feed_db.get(doc_id = feed_counter)['lastupdate']\n post_list, published_date = getfeed(url, last_update_obj)\n feed_counter = feed_counter - 1\n print(post_list)\n post_lastUpdate(url, published_date)\n post_to_slack(slack_client, post_list)", "def feed(self, entry):\r\n pass", "def do_feed(self, args):\n if self.first_feed:\n self.jay.speak(\"Remember, deletebot gets to eat the emails you delete and keepbot eats the emails you don't delete!\")\n self.jay.speak('Let me know when you want to STOP feeding!')\n self.first_feed = False\n delete = None\n while delete != 'stop' and delete != 'STOP':\n # pull email from database\n mail = self.email_client.get_random_email()\n self.jay.speak('Do you want to delete this email?')\n ec.preview_email(mail)\n delete = raw_input('>')\n if delete.lower().find('no') != -1:\n self.keepbot.feed(mail)\n elif delete != 'stop' and delete != 'STOP':\n self.deletebot.feed(mail)\n else:\n break\n self.jay.speak(\"Done feeding!\")", "def run(self):\n while True:\n try:\n if not self._read_new_entries(False):\n time.sleep(0.1)\n self._update_all_tasks()\n except KeyboardInterrupt:\n break", "def main():\n entries = get_feed_entries()\n \n entries.sort(key=lambda x: x.date)\n while True:\n term = input(\"Search Term? \")\n if term == '':\n print('Please provide a search term')\n continue\n if term == 'q':\n print('Bye')\n break\n \n \n matches = 0\n for entry in entries:\n found = filter_entries_by_tag(term,entry)\n if found:\n print(entry.title)\n matches += 1\n\n \n if matches == 1:\n print('1 entry matched')\n else:\n print(f\"{matches} entries matched\")", "def feed() -> None:\n ...", "def run():\n if ARGV.get(DEBUG_OPT):\n err_print(ARGV)\n\n if cache_is_valid():\n feed = CACHE['feed']\n else:\n feed = feedparser.parse(ARCH_NEWS)\n if feed.bozo:\n err_print(RED + 'ERROR: ' + CLEAR + 'failed checking feed: {}'.format(feed.bozo_exception))\n if RFP:\n pacman_msg(\"Exiting so we don't block your pacman upgrade\")\n sys.exit(0)\n sys.exit(255)\n write_cache(feed)\n\n if ARGV.get(CHECK_CMD):\n check_cmd(feed)\n elif ARGV.get(LIST_CMD):\n list_cmd(feed)\n elif ARGV.get(READ_CMD):\n read_cmd(feed)", "def main(feed=None):\n feed_processor = core.FeedProcessor()\n feed_processor(feed_type=feed)\n return feed_processor.feed_json", "def get_feed_entries(feed=FEED):\n d = feedparser.parse(feed)\n entries = d.entries\n \n all_entries =[]\n for entry in entries:\n title = entry.title\n link = entry.link\n date = entry.published_parsed\n tags = entry.tags\n tags = [t.get('term').lower() for t in tags]\n\n date = _convert_struct_time_to_dt(date)\n\n\n entry = Entry(date,title,link,tags)\n all_entries.append(entry)\n\n return all_entries", "def scanFeedList(self): \r\n data = self.feed_handler.listScanFeeds()\r\n data = data[:MAX_FEEDS_SCAN]\r\n for idx, feed in enumerate(data):\r\n print \"feeds ... / [%s/%s] (%s docs:%s passed)\" % (idx, len(data),self.feed_item_ctr, self.feed_passed)\r\n try:\r\n baseURL = feed.mainUrl\r\n self.processData(baseURL) \r\n self.createFeedItems()\r\n except Exception, ex:\r\n print(\"ERR: failed to process data and create feed item=%s\" % ex)\r\n print \"done\"", "def run(self):\n while self.i < len(self.series):\n # Grab line + RSS\n s = self.series[self.i]\n rss = self.request_rss(s.feedUrl)\n\n # Compose Episodes\n ep_dicts = []\n for entry in rss['entries']:\n ep_dicts.append(Episode(s, entry).__dict__)\n\n # Build result dict\n result_dict = dict()\n result_dict['series'] = deepcopy(s.__dict__)\n result_dict['series']['genres'] = \\\n result_dict['series']['genres'].split(';')\n result_dict['series']['type'] = 'series'\n result_dict['episodes'] = ep_dicts\n\n # Store podcast\n self.storer.store(result_dict)\n\n # Move onto the next one\n self.i += 20\n print(\"Retrieved \" + str(s.id))", "def main():\n # Construct the feed generator\n f = LogBufferFeed(FEED_DIR)\n f.MAX_AGE = 24 * 60 * 60 # 1 day\n f.FEED_META['feed.title'] = '%s Referrering Links' % SITE_NAME\n f.FEED_META['feed.tagline'] = \\\n 'New referring links from Apache access.log on %s' % SITE_NAME\n \n # Load up tail of access log, parse, and filter\n new_lines = bookmark_tailgrep(ACCESS_LOG, max_initial_lines=100000)\n all_events = parse_access_log(new_lines)\n events = [ x for x in all_events if event_filter(x) ]\n \n # Scan through latest events for new referrers\n referrers_seen = shelve.open(REFER_SEEN)\n new_referrers = []\n for evt in events:\n k = '%(referrer)s -> %(path)s' % evt\n if not referrers_seen.has_key(k):\n referrers_seen[k] = 1\n new_referrers.append( (evt['referrer'], evt['path']) )\n referrers_seen.close()\n \n # If there were new referrers found, insert a new entry.\n if len(new_referrers) > 0:\n \n # Build a list of hyperlinks for referrers\n links_out = [\n LINK_TMPL % {\n 'SITE_ROOT' : SITE_ROOT,\n 'referrer' : x[0],\n 'path' : x[1],\n }\n for x in new_referrers\n ]\n \n # Build a summary for this entry.\n summary = SUMMARY_TMPL % { \n 'count' : len(new_referrers), \n 'links' : \"\\n\".join(links_out)\n }\n \n # Construct and append a new entry\n entry = FeedEntryDict({\n 'title' : '%s new referrers' % len(new_referrers),\n 'link' : '',\n 'summary' : summary\n })\n f.append_entry(entry)\n\n # Output the current feed entries as both RSS and Atom\n open(FEED_NAME_FN % 'rss', 'w').write(f.scrape_rss())\n open(FEED_NAME_FN % 'atom', 'w').write(f.scrape_atom())", "def main():\n exit_if_already_started()\n while True:\n for timeframe in ['all', 'month', 'week']:\n subreddits = load_list('subs.txt')\n while subreddits:\n # Grab all images/comments from sub, remove from list\n parse_subreddit(subreddits.pop(0), timeframe)", "def __add_entries(entries, feed):\n\n for entry in entries:\n try:\n # If there is entry with such title in this feed\n Entry.objects.get(title=entry.title, feed=feed)\n continue\n except Entry.DoesNotExist:\n pass\n\n # Try to find another entries with such title\n e = Entry.objects.filter(title=entry.title)\n # If found\n if len(e) != 0:\n e = e[0]\n # Copy all containing\n entry_obj = Entry(title=e.title,\n description=e.description,\n entry=e.entry, feed=feed)\n entry_obj.save()\n # Or create new Entry from scratch\n else:\n entry_name = entry.title + '.html'\n # If bad link or entry name\n try:\n urlretrieve(entry.link, entry_name)\n\n entry_file = open(entry_name)\n entry_file = File(entry_file)\n\n entry_obj = Entry(title=entry.title,\n description=entry.description,\n entry=entry_file, feed=feed)\n entry_obj.save()\n\n os.remove(entry_name)\n except:\n # Go to next entry\n continue", "def get_feed_entries(helper, name, start, stats):\n feed_url = helper.get_arg('feed_url')\n feed_creds = helper.get_arg('credentials')\n feed_headers = {}\n # If auth is specified, add it as a header.\n if feed_creds is not None:\n auth = '{0}:{1}'.format(feed_creds['username'], feed_creds['password'])\n auth = base64.encodestring(auth).replace('\\n', '')\n feed_headers['Authorization'] = 'Basic {0}'.format(auth)\n\n # Pull events as json.\n resp = helper.send_http_request(\n url=feed_url,\n method='GET',\n parameters={'v': 'json', 'tr': 1},\n headers=feed_headers,\n verify=VERIFY_CERTIFICATE,\n )\n\n # Raise exceptions on problems.\n resp.raise_for_status()\n feed_entries = resp.json()\n\n # Return the normalized events to be saved to the kv store.\n return normalized(name, feed_entries, start)", "def main(self):\n no_posts_found = 0\n while True:\n print(f\"...Searching for posts to cleanse..\")\n for post in self.reddit.subreddit(self.subreddit).stream.submissions(pause_after=1):\n if post is None:\n no_posts_found += 1\n print(f\".....Will run through {self.subreddit} one final time\")\n break\n else:\n if post.locked:\n post.mod.remove()\n else:\n post.mod.lock()\n post.mod.remove()\n print(f\"Post removed: {post.id}\")\n if no_posts_found == 2:\n print(f\"{self.subreddit} has been successfully cleansed.\")\n break\n print(f\"...Taking a small break! Be back in {self.delay} seconds\")\n time.sleep(self.delay)", "def generate_feeds():\n os.makedirs(Config.FEED_ROOT_PATH, exist_ok=True)\n use_batching = Config.DAILY_DIGEST is not None\n\n while True:\n _generate_feeds_once(use_batching=use_batching)\n interval = _interval_between_generating_feeds(Config.REFRESH_INTERVAL_SECONDS, Config.DAILY_DIGEST)\n logging.info('Sleeping %ss before attempting to generate feeds again.', interval)\n time.sleep(interval)", "def main():\r\n \r\n from TweetProcessor import TweetProcessor\r\n \r\n consumer_key = ''\r\n consumer_secret = ''\r\n tweepy_base_filter = \"Filter:links -Filter:retweets\"\r\n \r\n hashtags = [\r\n \"#covid-19\", \"#covid19\", \"#covid\", \"#coronavirus\", \"#corona\",\r\n \"#covid_19\"\r\n ]\r\n \r\n vt_keys = [\"\"]\r\n batch_size = 5000\r\n \r\n for i in range(len(hashtags)):\r\n \r\n try:\r\n tweepy_filter = hashtags[i] + \" \" + tweepy_base_filter\r\n print(\"starting pull with this filter: \" + str(tweepy_filter))\r\n \r\n tp = TweetProcessor(consumer_key, consumer_secret,\r\n tweepy_filter, vt_keys, batch_size)\r\n \r\n tp.run()\r\n\r\n except Exception as e: \r\n with open(\"tweetProcessorLog.txt\", \"a\") as file:\r\n file.write(\"\\n\" + str(datetime.now()) + \", error: \" + str(e))\r\n \r\n \r\n if e != \"Twitter error response: status code = 429\":\r\n raise e\r\n\r\n \r\n print(\"ERROR OCCURED: waiting for 15 minutes to avoid hitting tweepy request limit\")\r\n print(e)\r\n time.sleep(15 * 60)", "def run(self):\n # starting program, run hello feeds\n self.do_jobs(self.run_start)\n\n while not self.terminate:\n now = time.localtime()\n now_time = now.tm_hour * 60 + now.tm_min\n\n # next run is at most 30sec away\n next_run = 30\n\n # button hold triggered\n if self.button_hold:\n self.button_hold = False\n self.do_jobs(self.run_hold)\n\n # button tap triggered\n if self.button_tap:\n self.button_tap = False\n self.do_jobs(self.run_tap)\n\n # look for scheduled feeds to run\n when_tasks = []\n for t in self.run_when:\n if t['when'] <= now_time:\n if not t['ran_today']:\n t['ran_today'] = True\n when_tasks.append(t)\n else:\n t['ran_today'] = False\n self.do_jobs(when_tasks)\n\n # look for interval feeds to run\n interval_tasks = []\n for t in self.run_interval:\n if t['next'] <= time.mktime(now):\n t['next'] = time.mktime(now) + t['interval']\n interval_tasks.append(t)\n if time.mktime(now) - t['next'] < next_run:\n next_run = time.mktime(now) - t['next']\n\n self.do_jobs(interval_tasks)\n\n # wait until we have work to do\n if next_run >= 1:\n signal.alarm(next_run)\n signal.pause()\n else:\n time.sleep(0.25)\n\n # quitting program, run stop feeds\n self.do_jobs(self.run_stop)", "def main_loop(self):\n # main loop...don't ever exit\n while True:\n # collect data\n # get the time...the local clock is set with NTP regularly\n self._get_time()\n \n # get the latest metar data from the closest location\n self._get_metar()\n \n # get the latest fence station data\n self._get_fence_station()\n \n # get the lastest roof station data\n #METAR self._get_roof_station()\n \n # publish the data to our data file\n self.write_data_files()\n \n # show the user we are running\n print(\"{:s}\".format(datetime.datetime.now(pytz.UTC).strftime(\"%Y-%m-%d %H:%M:%S.%f\")), end=\"\\r\", flush=True)\n \n # wait a bit for the next loop\n time.sleep(3.0)\n \n return", "def run_rss(self):\n\n pass", "def get_feed_entries(feed=FEED) -> list:\n f = feedparser.parse(feed)\n\n entry_list = []\n\n for entry in f.entries:\n date = _convert_struct_time_to_dt(entry[\"published_parsed\"])\n title = entry[\"title\"]\n link = entry[\"link\"]\n tags = [tag[\"term\"].lower() for tag in entry[\"tags\"]]\n\n entry_list.append(Entry(date=date, title=title, link=link, tags=tags))\n\n return entry_list", "def main():\n feed_url = ( len(sys.argv) > 2 ) and sys.argv[2] or FEED_URL\n\n f = AmazonAdFeed(feed_url)\n f.STATE_FN = 'link_amazon_ads_state'\n \n if len(sys.argv) > 1 and sys.argv[1] == 'rss':\n print f.scrape_rss()\n else:\n print f.scrape_atom()", "def read_cmd(feed):\n if ARGV.get(READALL_OPT):\n for entry in feed.entries:\n mark_as_read(entry)\n else:\n if ARGV[ITEM_ARG]:\n item = ARGV[ITEM_ARG]\n if item.isdigit():\n entry = feed.entries[int(item)]\n pretty_print_item(entry)\n mark_as_read(entry)\n else:\n for entry in feed.entries:\n if item in entry.title:\n pretty_print_item(entry)\n mark_as_read(entry)\n break\n else:\n err_print('Could not find \"' + item + '\" in newsfeed')\n sys.exit(255)\n else:\n unread_entries = list()\n for entry in feed.entries:\n if not has_been_read(entry):\n unread_entries.insert(0, entry)\n for entry in unread_entries:\n pretty_print_item(entry)\n mark_as_read(entry)\n if entry is not unread_entries[-1]:\n read_next = prompt_yes_no('Read next item?', 'yes')\n if read_next in ('n', 'no'):\n break\n else:\n print('No more unread items')", "def main():\n\n while True:\n print(\"Let's explore some US bikeshare data!\")\n city, month, day = get_filters()\n df = load_data(city, month, day)\n # printing filter\n print(f\"Month: {month}, Day: {day}\")\n time_stats(df)\n station_stats(df)\n trip_duration_stats(df)\n user_stats(df)\n display_records(df)\n restart = prompts.yes_no_prompt(\"\\nWould you like to restart?\\n\").launch()\n if not restart:\n break\n system(\"clear\")", "def scrape(self):\n\n for feed in self.id_list:\n\n try:\n d = self.g.get_connections(feed, 'feed')\n except Exception as e:\n print(\"Error reading feed id %s, exception: %s\" % (feed, e))\n continue\n\n no_messages = 0\n self.no_messages = no_messages\n count = 1\n print(\"Scraping page %s of feed id %s\" % (count, feed))\n self.write_data(d)\n\n try:\n paging = d['paging']\n if 'next' in paging:\n next_page_url = paging['next']\n\n while next_page_url:\n\n count += 1\n print(\"Scraping page %s\" % count)\n\n try:\n # convert json into nested dicts and lists\n with urlopen(next_page_url) as url:\n read_url = url.read()\n d = simplejson.loads(read_url)\n except Exception as e:\n print(\"Error reading id %s, exception: %s\" % (feed, e))\n continue\n\n if len(d['data']) == 0:\n print(\"There aren't any other posts. Scraping of feed id %s is done! \" % feed)\n break\n\n self.write_data(d)\n\n if 'paging' in d:\n if 'next' in d['paging']:\n next_page_url = d['paging']['next']\n else:\n break\n\n except:\n if self.no_messages > 0:\n print(\"There aren't any other pages. Scraping of feed id %s is done! \" % feed)\n else:\n print(\"There is nothing to scrape. Perhaps the id you provided is a personal page.\")\n continue\n\n self.con.close()", "def run(self) -> None:\n # logging.info(\"started pull notifications thread\")\n self.set_reader_position()\n while not self.has_been_stopped.is_set():\n self.prompt_event.wait()\n self.prompt_event.clear()\n\n try:\n for notification in self.reader.read():\n if self.has_been_stopped.is_set():\n break\n domain_event = self.process_application.event_from_notification(\n notification\n )\n self.event_queue.put(\n (domain_event, notification[\"id\"], self.upstream_name)\n )\n except Exception as e:\n logging.error(traceback.format_exc(e))\n logging.error(\"Error reading notification log: %s\" % e)\n logging.error(\"Retrying...\")\n self.set_reader_position()\n sleep(1)", "def fetch_entries(self):\n entries = []\n rss_list = self.__data_adapter.fetch_rss()\n for rss in rss_list:\n rss_href = rss.get('url', None)\n if rss_href is not None:\n feed = feedparser.parse(rss_href)\n [entries.append(FeedDocument(entry.get('title', ''), entry.get('summary', ''))) for entry in feed.get('entries', [])]\n return entries", "def run():\n #LOG.debug(\"and so it begins\")\n intro()\n\n reloop = True\n while reloop is True:\n lines = []\n print(\"Awaiting your input: \")\n print('EXIT or ctrl-c to quit WPM')\n test = ''\n while test != 'END' and test != 'EXIT':\n line = input()\n if line == 'EXIT':\n exit()\n elif line != \"END\":\n lines.append(line)\n else:\n test = 'END'\n #LOG.debug(lines)\n\n parse_lines(lines, p)\n\n #LOG.debug(p)", "def test_feed_generator(self):\n moksha.feed_cache = FakeCache()\n feed = Feed(url='http://lewk.org/rss')\n iter = feed.iterentries()\n data = iter.next()\n assert iter.next()", "def feed(self):\n feed_dict = feedparser.parse(self.URL)\n return [self.entry_dict(entry) for entry in feed_dict['entries']]", "def process_loop(entries: List[StudentEntry]):\n pass", "def sync_entries():\n import time\n\n while True:\n try:\n update_pending_scripts(settings['api_handler'])\n except:\n logging.exception(\"Error occured during synchronisation\")\n time.sleep(60)", "def main():\n\n api = create_api()\n\n print('---Bot started---\\n')\n news_funcs = ['scrape_the_athletic', 'scrape_liverpool_echo']\n news_iterators = [] \n for func in news_funcs:\n news_iterators.append(globals()[func]())\n while True:\n for i, iterator in enumerate(news_iterators):\n try:\n tweet = next(iterator)\n api.update_status(tweet)\n print(tweet, end='\\n\\n')\n time.sleep(1800) \n except StopIteration:\n news_iterators[i] = globals()[newsfuncs[i]]()\n except tweepy.TweepError as e:\n print(e.reason)", "def cli():\n\n # XXX load the option parser and parser the command line\n\n aggregator.LOG.debug(\"Beginning feed update process.\")\n update()", "def wall():\n while True:\n if file_monitor.monitor():\n post_name = check_output('tail -1 {}'.format(posts_file), shell=True)\n if not post_name:\n continue\n\n user_name, post_name = post_name.split(separator.lstrip('\\n'))\n if post_name in posts_dict:\n continue\n\n posts_dict[post_name] = 1\n\n print(\"{user} has posted: {post}\".format(user=user_name, post=post_name))", "def produce_entries(self):\n # Grab and parse the feed\n feed = feedparser.parse(HTTPCache(self.main_feed).content())\n \n # Normalize feed meta data\n self.FEED_META = normalize_feed_meta(feed, self.date_fmt)\n self.FEED_META['feed.title'] += ' (with Amazon items)'\n\n # Normalize entries from the feed\n entries = normalize_entries(feed.entries)\n\n # Run through all the normalized entries...\n for e in entries:\n \n # Perform a search on the entry title, extract the items\n result = self.amazon_search(e['summary'])\n items = [ x for x in result.Items if 'Item' in x._name ]\n \n # Use each search result item to populate the templates.\n insert_items = [ self.INSERT_ITEM_TMPL % {\n 'title' : i.ItemAttributes.Title,\n 'url' : i.DetailPageURL,\n 'img' : i.SmallImage.URL\n } for i in items[:self.MAX_ITEMS] ]\n insert_out = self.INSERT_TMPL % '\\n'.join(insert_items)\n\n # Append the rendered search results onto the entry summary.\n e.data['summary'] += insert_out.decode('utf-8', 'ignore')\n \n return entries", "def cmd_runner():\n parser = argparse.ArgumentParser(description=\"Get news from extracted sources\")\n parser.add_argument('-s', '--source', help=\"Name of the news source. Available options are: {}\"\n .format(', '.join(\n available_rss_feed_sources_user_prompt\n )\n ))\n parser.add_argument('-t', '--top', help=\"Count of news items to show from top order. Default: 10\")\n parser.add_argument('-v', '--version', action='version', version='%(prog)s {version}'.format(version=__version__))\n\n args = parser.parse_args()\n \n rss_feed_top_count = RSS_FEED_TOP_COUNT\n if args.top:\n if not args.top.isnumeric():\n raise argparse.ArgumentTypeError('Provide a positive integer for --top argument')\n \n else:\n rss_feed_top_count = int(args.top)\n\n if args.source:\n source_name = args.source.lower()\n print('Source argument found, validating...')\n time.sleep(1)\n source_name_is_valid = source_name in available_rss_feed_sources\n \n if not source_name_is_valid:\n print(\n 'Invalid input {0} for source. Available sources are {1} '\n .format(source_name, ', '.join(available_rss_feed_sources_user_prompt)))\n sys.exit()\n \n \n rss_feed_details = {\n 'rss_feed_url': '',\n 'top': rss_feed_top_count,\n 'source': source_name\n }\n rss_feed_url = rss_data_main._get_rss_feed_url_by_source(source_name)\n if not rss_feed_url:\n raise ValueError('RSS url not found for source {}'.format(source_name))\n \n rss_feed_details['rss_feed_url'] = rss_feed_url\n\n \n rss_feed_details['top'] = rss_feed_top_count\n \n print('Getting news...\\n\\n')\n time.sleep(1)\n \n feed_data = rss_feed_main._get(rss_feed_details)\n feed_viewer(feed_data)\n else:\n print('--source argument not provided, getting data for all available sources...')\n rss_feed_details_list = []\n for source_name in available_rss_feed_sources:\n rss_feed_details = {\n 'rss_feed_url': '',\n 'source': source_name,\n 'top': rss_feed_top_count\n }\n rss_feed_url = rss_data_main._get_rss_feed_url_by_source(source_name)\n if not rss_feed_url:\n raise ValueError('RSS url not found for source {}'.format(source_name))\n \n rss_feed_details['rss_feed_url'] = rss_feed_url\n rss_feed_details_list.append(rss_feed_details)\n \n with Pool(AVIALABLE_CPU_COUNT) as p:\n feed_data_list = p.map(rss_feed_main._get, rss_feed_details_list) \n # put list of lists in a single list\n feed_data_flattened_list = []\n for feed_data in feed_data_list:\n feed_data_flattened_list.extend(feed_data) \n\n feed_viewer(feed_data_flattened_list)", "def command_feed(args):\n\tif not os.path.isdir(DUNGEON_PATH):\n\t\tlogger.error(\"Couldn't find dungeon. Set INQUISITOR_DUNGEON or cd to parent folder of ./dungeon\")\n\t\treturn -1\n\n\timport shutil\n\tfrom inquisitor import loader\n\tfrom inquisitor import timestamp\n\n\titems, errors = loader.load_active_items(source_names=None)\n\tif not items and not errors:\n\t\tprint(\"Feed is empty\")\n\t\treturn 0\n\n\tif errors:\n\t\titems.insert(0, {\n\t\t\t'title': '{} read errors: {}'.format(len(errors), ' '.join(errors)),\n\t\t\t'body': \"\\n\".join(errors)\n\t\t})\n\n\tsize = shutil.get_terminal_size((80, 20))\n\twidth = min(80, size.columns)\n\n\tfor item in items:\n\t\ttitle = item['title'] if 'title' in item else \"\"\n\t\ttitles = [title]\n\t\twhile len(titles[-1]) > width - 4:\n\t\t\ti = titles[-1][:width - 4].rfind(' ')\n\t\t\ttitles = titles[:-1] + [titles[-1][:i].strip(), titles[-1][i:].strip()]\n\t\tprint('+' + (width - 2) * '-' + '+')\n\t\tfor title in titles:\n\t\t\tprint(\"| {0:<{1}} |\".format(title, width - 4))\n\t\tprint(\"|{0:<{1}}|\".format(\"\", width - 2))\n\t\tinfo1 = \"\"\n\t\tif 'author' in title and item['author']:\n\t\t\tinfo1 += item['author'] + \" \"\n\t\tif 'time' in item and item['time']:\n\t\t\tinfo1 += timestamp.stamp_to_readable(item['time'])\n\t\tprint(\"| {0:<{1}} |\".format(info1, width - 4))\n\t\tcreated = timestamp.stamp_to_readable(item['created']) if 'created' in item else \"\"\n\t\tinfo2 = \"{0} {1} {2}\".format(\n\t\t\titem.get('source', ''), item.get('id', ''), created)\n\t\tprint(\"| {0:<{1}} |\".format(info2, width - 4))\n\t\tprint('+' + (width - 2) * '-' + '+')\n\t\tprint()", "def __update_feed(feed_obj):\n\n url = feed_obj.url\n feed = feedparser.parse(url)\n\n try:\n feed.feed.title\n except AttributeError:\n return\n\n # List of new entries in downloaded XML\n new_entries = feed.entries\n new_entries_titles = [entry.title for entry in new_entries]\n\n # List of current entries in database\n old_entries = Entry.objects.filter(feed=feed_obj)\n old_entries_titles = [entry.title for entry in old_entries]\n\n # Check what old entries arn't in new entries\n # They will be deleted\n for entry_title in old_entries_titles:\n if entry_title not in new_entries_titles:\n Entry.objects.get(title=entry_title, feed=feed_obj).delete()\n\n # Add all new entries\n __add_entries(new_entries, feed_obj)\n\n # Update time and save\n feed_obj.time = datetime.now()\n feed_obj.save()", "def PrintFeed(feed):\n for entry in feed.entry:\n PrintResource(entry)", "def parse_and_alert(self):\n self.parse_feed()\n self.alert_new_posts()", "def main():\n \"\"\"get and format the data\"\"\"\n observation = weather.get_observation()\n forecast = weather.get_forecast()\n draw_list = build_draw_list(observation, forecast)\n\n try:\n while 1:\n for drawable in draw_list:\n print(drawable[0])\n print(drawable[1], \"\\n\")\n time.sleep(6.5)\n weather.update()\n observation = weather.get_observation()\n forecast = weather.get_forecast()\n draw_list = build_draw_list(observation, forecast)\n\n except KeyboardInterrupt:\n print(\"\\n\\nInterrupt detected, exiting...\")", "def execute_task(self, *args):\n from flankers.scrawler import Scrawler\n\n RSS_FEEDS_CACHE = memcache.get('RSS_FEEDS_CACHE')\n if not RSS_FEEDS_CACHE or len(RSS_FEEDS_CACHE) == 0:\n RSS_FEEDS_CACHE = Scrawler.load_links()\n memcache.set('RSS_FEEDS_CACHE', RSS_FEEDS_CACHE)\n\n print len(RSS_FEEDS_CACHE)\n\n l = RSS_FEEDS_CACHE.pop()\n print l\n entries = Scrawler.read_feed(l)\n if entries:\n for entry in entries:\n #\n # Store feed\n #\n store_feed(entry)\n memcache.set('RSS_FEEDS_CACHE', RSS_FEEDS_CACHE)\n return None\n\n memcache.set('RSS_FEEDS_CACHE', RSS_FEEDS_CACHE)\n print \"This Feed has no entries\"\n return None", "def main():\n populate_satellites_array()\n latitude = float(os.environ['LATITUDE'])\n longitude = float(os.environ['LONGITUDE'])\n radius = int(os.environ['RADIUS'])\n timeout = 1\n previous_satellites = []\n while True:\n if (last_updated[0] + 86400) < int(time.time()):\n print('Expired data, updating from spacetrack')\n cron_refresh_spacetrack_cache()\n populate_satellites_array()\n print('Checking {}, {}'.format(latitude, longitude))\n currently_overhead = get_overhead_satellites_dicts(latitude, longitude, radius)\n for sat in currently_overhead:\n if not sat['name'] in previous_satellites:\n announce_satellite(sat)\n previous_satellites = [x['name'] for x in currently_overhead]\n time.sleep(timeout)", "def main():\n while True:\n city, month, day = get_filters()\n\n df = load_data(city, month, day)\n\n time_stats(df, city, month, day)\n station_stats(df, city)\n trip_duration_stats(df, city)\n # The city of washington does not provide user statistics\n if city != \"washington\":\n user_stats(df, city)\n\n sample = input(\n \"\\nIf you would like a sample of the raw date, enter 'yes' ===> \"\n )\n if sample.lower() == \"yes\":\n review_data(df)\n\n restart = input(\"\\nEnter 'yes' if you would like to restart ===> \")\n if restart.lower() != \"yes\":\n break", "def main(self):\n\n while True:\n print('Main Menu:')\n user_input = input('What would you like to do? (C)reate new record or (L)ookup existing? ').lower().strip()\n self.check_input(user_input)\n\n if user_input == 'c':\n print('Great! Let\\'s create a new log entry!\\n')\n self.create_entry()\n elif user_input == 'l':\n print('Awesome! Let\\'s look up some entries!\\n')\n self.lookup_entry()", "def main(): \n while True:\n city, month, day = get_filters()\n\n df = generate_stats(city, month, day)\n\n # step through data\n step_through = input('\\nWould you like to step through raw data? Type \"yes\" to step through: \\n')\n if step_through.lower() == 'yes':\n step_through_data(df)\n\n # restart\n restart = input('\\nWould you like to restart? Type \"yes\" to restart.\\n')\n if restart.lower() != 'yes':\n break", "def parse_feed(feed, last_update, entry, get_updated = lambda e: e.updated_parsed[:6]):\n\n entries = []\n for e in feed.entries:\n if datetime(*get_updated(e)) > last_update:\n new = entry(e)\n if new != None:\n entries.append(new)\n return entries", "def _process_feeds(self):\n if self._feeds is None:\n return\n try:\n for feed_parser in self._feed_parsers:\n # all of the nested try excepts\n try:\n for article in feed_parser.get_new_articles():\n self._downloader.queue_article(article)\n for article in self._recursive_source.get_new_articles():\n self._downloader.queue_article(article)\n except Exception as e:\n logging.exception(e)\n\n except TypeError:\n raise ValueError(\"'feeds' must be a list of RSS feed URLs to process.\")", "def iter_feed(gd_client):\n feed = gd_client.GetContactsFeed()\n while feed:\n for entry in feed.entry:\n yield entry\n # Check whether there is another page and if yes\n next_link = feed.GetNextLink()\n feed = None\n if next_link:\n feed = gd_client.GetContactsFeed(uri=next_link.href)", "def _main_loop(self):\n observer = Observer()\n observer.schedule(self.changes_event_handler, path=self.base_dir, recursive=False)\n observer.start()\n while True:\n if os.path.exists(self.todo_local_file):\n with open(self.todo_local_file, 'rb') as f:\n obj_list = pickle.load(f)\n\n today_todo_list = [i for i in obj_list if self.is_today_todo(i['time'])]\n self.solve_one_day_todo_events(todo_items_list=today_todo_list)\n else:\n time.sleep(60)\n pass", "def cli():\n fire.Fire(fetch_rss_file)", "def test_feed(app, status, warning):\n app.build()\n assert app.statuscode == 0\n\n feed_path = app.outdir / \"blog/atom.xml\"\n assert (feed_path).exists()\n\n with feed_path.open() as feed_opened:\n feed_tree = lxml.etree.parse(feed_opened)\n entries = feed_tree.findall(\"{http://www.w3.org/2005/Atom}entry\")\n assert len(entries) == 2\n\n entry = entries[0]\n title = entry.find(\"{http://www.w3.org/2005/Atom}title\")\n assert title.text == \"Foo Post Title\"\n summary = entry.find(\"{http://www.w3.org/2005/Atom}summary\")\n assert summary.text == \"Foo post description with link.\"\n categories = entry.findall(\"{http://www.w3.org/2005/Atom}category\")\n assert len(categories) == 2\n assert categories[0].attrib[\"label\"] == \"BarTag\"\n assert categories[0].attrib[\"term\"] == \"BarTag\"\n assert categories[1].attrib[\"label\"] == \"Foo Tag\"\n assert categories[1].attrib[\"term\"] == \"FooTag\"\n content = entry.find(\"{http://www.w3.org/2005/Atom}content\")\n assert \"Foo post content.\" in content.text\n update_time = entry.find(\"{http://www.w3.org/2005/Atom}updated\")\n first_entry_date = datetime.strptime(update_time.text, POST_DATETIME_FMT)\n\n empty_entry = entries[1]\n title = empty_entry.find(\"{http://www.w3.org/2005/Atom}title\")\n assert title.text == \"Foo Empty Post\"\n summary = empty_entry.find(\"{http://www.w3.org/2005/Atom}summary\")\n assert summary is None\n categories = empty_entry.findall(\"{http://www.w3.org/2005/Atom}category\")\n assert len(categories) == 0\n content = empty_entry.find(\"{http://www.w3.org/2005/Atom}content\")\n assert 'id=\"foo-empty-post\"' in content.text\n update_time = empty_entry.find(\"{http://www.w3.org/2005/Atom}updated\")\n second_entry_date = datetime.strptime(update_time.text, POST_DATETIME_FMT)\n\n # check order of post based on their dates\n assert first_entry_date > second_entry_date\n\n social_path = app.outdir / \"blog/social.xml\"\n assert (social_path).exists()\n\n with social_path.open() as social_opened:\n social_tree = lxml.etree.parse(social_opened)\n social_entries = social_tree.findall(\"{http://www.w3.org/2005/Atom}entry\")\n assert len(social_entries) == len(entries)\n\n social_entry = social_entries[0]\n title = social_entry.find(\"{http://www.w3.org/2005/Atom}title\")\n assert title.text == \"Foo Post Title\"\n summary = social_entry.find(\"{http://www.w3.org/2005/Atom}summary\")\n assert summary.text == \"Foo post description with link.\"\n categories = social_entry.findall(\"{http://www.w3.org/2005/Atom}category\")\n assert len(categories) == 2\n assert categories[0].attrib[\"label\"] == \"BarTag\"\n assert categories[1].attrib[\"label\"] == \"Foo Tag\"\n content = social_entry.find(\"{http://www.w3.org/2005/Atom}content\")\n assert \"Foo Post Title\" in content.text", "def generate_feed(results, generator):\n\n for result in results:\n content = FeedContentWrapper(result)\n\n content.add_premium_logo_to_image_url()\n feed_item = generator.add_entry(order='append')\n feed_item.id(content.id)\n feed_item.author(author=content.author)\n feed_item.link(href='%s%s' % (WELT_URL, content.web_url))\n feed_item.catalogue.availability_date(content.publication_date)\n feed_item.title(content.seo_title)\n feed_item.description(content.intro)\n feed_item.content(content.premium_paragraph)\n feed_item.catalogue.id(content.id)\n feed_item.catalogue.brand('WELT Plus')\n feed_item.catalogue.condition('new')\n feed_item.catalogue.google_product_category('Media > Magazines & Newspapers')\n feed_item.catalogue.product_type(content.category)\n feed_item.catalogue.image_link(content.add_premium_logo_to_image_url())\n feed_item.catalogue.additional_image_link(content.add_premium_logo_to_image_url(default_image=False))\n feed_item.catalogue.custom_label_0(content.topic)\n feed_item.catalogue.custom_label_1(content.headline)\n feed_item.catalogue.custom_label_2(str(content.reading_time))\n feed_item.catalogue.custom_label_3(content.age)\n feed_item.catalogue.custom_label_4(content.tags)", "def list_cmd(feed):\n if ARGV.get(REV_OPT):\n feed_list = reversed(feed.entries)\n else:\n feed_list = feed.entries\n index = 0\n for entry in feed_list:\n if not ARGV.get(UNREAD_OPT) \\\n or (ARGV.get(UNREAD_OPT) and not has_been_read(entry)):\n print(format_list_item(entry, index))\n index += 1", "def feedGraph():\n finish = False\n print('Enchanté, je suis BOBI, un agent DAVEO. Je souhaiterais vous poser des questions sur votre métier.')\n print('Tout d\\'abord, quel est votre métier ?')\n job = input()\n print('Quelles sont les tâches que vous réalisez en tant que ' + job + ' ? ')\n # This boolean is here to allow user to quit conversation by entering two empty lines in console.\n keyboardEntry = False\n while not finish:\n action = input()\n # If line is empty, verify if user want to exit conversation\n if action == \"\":\n if keyboardEntry:\n finish = True\n else:\n keyboardEntry = True\n # If line is too long, inform user (System is more efficient with simple phrases)\n elif len(action) > 140:\n keyboardEntry = False\n print('Je suis encore jeune et aie beaucoup à apprendre, pourriez vous écrire moins de 140 caractères '\n 's\\'il vous plait ?')\n else:\n keyboardEntry = False\n insertTripleV6(graph, \"rdf/generatedRdf.ttl\", action, job)", "def check_feeds():\n\n session = oercloud.Session()\n\n # load the entry point handlers for different feed types\n handlers = aggregator.handlers.get()\n\n for feed in session.query(oercloud.Feed):\n\n if (time.time() - feed.last_import) > feed.update_interval:\n\n # this feed needs updated -- call the appropriate handler\n aggregator.LOG.info(\"Updating %s\" % feed)\n\n if feed.feed_type in handlers:\n handlers[feed.feed_type].load()(feed)\n else:\n # no handler... log a warning\n aggregator.LOG.warning(\"No handler for feed type %s\" % \n feed.feed_type)", "def __init__(self, main_feed):\n self.main_feed = main_feed", "def run():\n\n while True:\n\n # get event, blah\n event_name, event_data = revent.get_event(block=True, timeout=5)\n\n if event_name is not None:\n print 'received: %s' % event_name\n\n if event_name.endswith('_oembed_details'):\n handle_new_oembed_details(event_data)\n\n elif event_name == 'new_tweet':\n handle_new_tweet(event_data)\n\n # and we're done\n assert revent.verify_msg(event_name, event_data), \\\n \"Could not verify %s\" % event_name", "def run(self): \n #\n\n \n # forever loop\n while True: \n \n for app in self.app_list:\n self.check(app) \n #print(\"check\")\n \n gevent.sleep(SLEEP_SECONDS)", "def main():\n while True:\n click.clear()\n city, month, day = get_filters()\n df = load_data(city, month, day)\n\n while True:\n select_data = choice('\\nPlease select the information you would'\n 'like to obtain:\\n'\n '\\n'\n '[ts] Time Stats\\n'\n '[ss] Station Stats\\n'\n '[tds] Trip Duration Stats \\n'\n '[us] User Stats\\n'\n '[rd] Raw Data\\n'\n '\\n'\n '[0] Exit\\n>',\n ('ts', 'ss', 'tds', 'us', 'rd', 'r'))\n click.clear()\n if select_data == 'ts':\n time_stats(df)\n elif select_data == 'ss':\n station_stats(df)\n elif select_data == 'tds':\n trip_duration_stats(df)\n elif select_data == 'us':\n user_stats(df)\n elif select_data == 'rd':\n display_data(df)\n elif select_data == '0':\n break\n\n restart = choice('\\nWould you like to restart?'\n 'Enter yes or no.\\n').lower()\n print()\n if restart.lower() != 'y':\n break", "def rss_fetch():\n items = {}\n\n def add_item(pubDate, title, link):\n nonlocal items\n idx = float(parsedate_to_datetime(pubDate).timestamp())\n while idx in items:\n idx = idx + 0.1\n dbg(\"Adding item: %11.1f \\\"%s\\\" %s\" % (idx, title, link))\n items[idx] = {}\n items[idx]['title'] = title\n items[idx]['link'] = link\n\n state = \"\" # state parser is in (\"\", \"item\", \"title\", \"link\", \"pubDate\")\n title = \"\" # Currently parsing this title.\n link = \"\" # \" \" \" link\n pubDate = \"\" # \" \" \" pubDate (index)\n\n def start_element(name, attrs):\n nonlocal state\n nonlocal title\n nonlocal link\n nonlocal pubDate\n dbg(\"Start: %s %s %s\" %(name, str(attrs), str((state, title, link, pubDate))))\n if state == \"\":\n if name == \"item\":\n state = \"item\"\n elif state == \"item\":\n if name == \"title\":\n state = \"title\"\n if title:\n prn(\"Two titles?\")\n sys.exit(1)\n elif name == \"link\":\n state = \"link\"\n if link:\n prn(\"Two links?\")\n sys.exit(1)\n elif name == \"pubDate\":\n state = \"pubDate\"\n if pubDate:\n prn(\"Two pubDates?\")\n sys.exit(1)\n\n\n def end_element(name):\n nonlocal state\n nonlocal title\n nonlocal pubDate\n nonlocal link\n dbg(\"End: %s %s\" % (name, str((state, title, link, pubDate))))\n if state == \"item\":\n if name == \"item\":\n if title == \"\":\n prn(\"No title at end item.\")\n sys.exit(1)\n if link == \"\":\n prn(\"No link at end item.\")\n sys.exit(1)\n if pubDate == \"\":\n prn(\"No pubDate at end item.\")\n sys.exit(1)\n else:\n add_item(pubDate, title, link)\n state = \"\"\n title = \"\"\n link = \"\"\n pubDate = \"\"\n elif state == \"title\":\n if name == \"title\":\n state = \"item\"\n elif state == \"link\":\n if name == \"link\":\n state = \"item\"\n elif state == \"pubDate\":\n if name == \"pubDate\":\n state = \"item\"\n\n def char_data(data):\n nonlocal state\n nonlocal title\n nonlocal pubDate\n nonlocal link\n dbg(\"Data: %s %s)\" % (str(data), str((state, title, link, pubDate))))\n if state == \"title\":\n title = title + data\n elif state == \"link\":\n link = link + data\n elif state == \"pubDate\":\n pubDate = pubDate + data\n\n\n p = xml.parsers.expat.ParserCreate(\"UTF-8\")\n\n p.StartElementHandler = start_element\n p.EndElementHandler = end_element\n p.CharacterDataHandler = char_data\n\n with urllib.request.urlopen('https://news.ycombinator.com/rss') as f:\n xml_file = b\"\"\n while True:\n r = f.read(255)\n if r:\n xml_file = xml_file + r\n else:\n break\n\n try:\n p.Parse(xml_file.decode(\"UTF-8\"), True)\n except:\n dbg(\"Writing fetched RSS feed to file...\")\n err_f = open(parse_error_output_file, \"ab\")\n err_f.write(b\"GET URL: \")\n err_f.write(f.geturl().encode(\"UTF-8\"))\n err_f.write(b\"\\nReturn Code: \")\n err_f.write((\"%d\\n\" % (f.getcode(), )).encode(\"UTF-8\"))\n err_f.write(b\"Meta Info:\\n\")\n err_f.write(f.info().as_bytes(unixfrom=True))\n err_f.write(b\"XML output:\\n\")\n err_f.write(xml_file)\n err_f.close()\n dbg(\"Done.\")\n raise\n\n return items", "def main():\n while True:\n city, month, day, filters = get_filters()\n dataframe = load_data(city, month, day, filters)\n\n print('\\n\\n************DISPLAYING STATISTICS*************')\n time_stats(dataframe, filters)\n station_stats(dataframe, filters)\n trip_duration_stats(dataframe, filters)\n user_stats(dataframe, filters)\n visualize_data(dataframe, filters, city)\n show_data(dataframe, filters, city)\n\n # To restart or quit program\n restart_program()", "def main():\n output_queue = Queue()\n\n out_list = list()\n\n logging.info('Retrieving news...')\n download = DownloadNewsWorker(output_queue)\n download.retrieve_news()\n\n while not output_queue.empty():\n item = output_queue.get()\n out_list.append(item)\n\n return out_list", "def check_feeds(self):\n lst = []\n for feed in self.feeds:\n feed.update()\n if feed.get_new_entries():\n lst.append(feed)\n return lst", "def process_desktop_entries(menu, dirname, filenames):\n for filename in filenames:\n path = os.path.join(dirname, filename)\n if os.path.isdir(path) or not path.endswith(\"desktop\"):\n continue\n else:\n menu.Feed(path)", "def run(self):\n most_recent = self.__most_recent\n while True:\n emails = self.__get_emails()\n\n if most_recent != emails[0]:\n print(f'{self.__source} New messsage recieved')\n\n # Dispatch event for new email\n self.__email_event()\n\n # Reset most recent\n most_recent = self.__get_emails()[0]\n\n else:\n time.sleep(0.3)", "def main():\n clear_screen()\n print(\"Establishing a connection with the IMDb service...\")\n session = initialize_connection()\n another = True\n while another:\n clear_screen()\n search_term = input(\"What would you like me to look up for you? \")\n if search_term:\n clear_screen()\n print(f'Please wait while I search for \"{search_term}\"...')\n shows = search_for_title(session, search_term)\n clear_screen()\n print(f'Found {len(shows)} matches.')\n if shows:\n display_shows(shows)\n another_one = input(\"Would you like to search for a different title? ([y]/n)\")\n if another_one.lower().startswith('n'):\n another = False\n else:\n break\n clear_screen()\n print('Bye!')", "def update_handler(sender, update, **kwargs):\n\n feeds = Feed.objects.filter(feed_url=sender.topic)\n\n for feed in feeds:\n for entry in update.entries:\n r = requests.get(entry['link'])\n\n kippt = feed.created_by.kippt_client()\n\n clip = kippt.clips(params={'url': r.url})\n\n if clip['meta']['total_count'] == 0:\n if feed.list_id:\n list_id = feed.list_id\n else:\n list_id = feed.created_by.list_id\n\n kippt.clips.create(\n r.url,\n list_id,\n title=entry['title'],\n notes=entry['summary']\n )", "def __scraping_loop(self, filters = None) -> None:\r\n\r\n if filters:\r\n self.update_filters(filters)\r\n\r\n while self.__running:\r\n\r\n now_time = int(datetime.now(self.__tz).timestamp())\r\n\r\n for poke in self.__get_data():\r\n\r\n enc_id = poke.get('encounter_id', '')\r\n\r\n # already in db, ignore\r\n if self.__pokes_db.get(enc_id, False):\r\n continue\r\n\r\n log.debug(f'New encounter with id {enc_id} added')\r\n self.__send_encounter(poke, now_time)\r\n\r\n time.sleep(self.__delay)", "def main():\n logging.basicConfig(\n level=logging.INFO,\n format=\"%(levelname)s:%(module)s:%(filename)s:%(lineno)s:%(message)s\",\n )\n\n logger = logging.getLogger(__name__)\n\n args = _parse_args()\n config = args.config\n\n start_time = datetime.datetime.utcnow()\n logger.info(\n json.dumps(\n {\n \"msg\": \"Starting BaseballClerk.\",\n \"subreddits\": list(config[\"subreddits\"].keys()),\n \"start_time\": start_time.isoformat(),\n }\n )\n )\n\n # Connect the datastore and create tables if not existing.\n datastore.connect(\"BaseballClerk.db\")\n EVENTS.create_if_needed()\n COMMENTS.create_if_needed()\n\n for game_thread in baseballbot.active_game_threads():\n subreddit_config = config[\"subreddits\"].get(\n game_thread[\"subreddit\"][\"name\"]\n ) # type: dict\n if not subreddit_config:\n continue\n\n logger.info(\n json.dumps(\n {\n \"msg\": \"Running game thread.\",\n \"subreddit\": game_thread[\"subreddit\"][\"name\"],\n \"game_pk\": game_thread[\"gamePk\"],\n }\n )\n )\n\n reddit = praw.Reddit(subreddit_config[\"praw_bot\"])\n\n game_pk = game_thread[\"gamePk\"]\n gamechat = reddit.submission(game_thread[\"postId\"])\n\n play_by_play(game_pk, gamechat)\n exit_velocities(game_pk, gamechat)\n due_up(game_pk, gamechat)\n\n time.sleep(2)\n\n for subreddit_config in config[\"subreddits\"].values():\n praw_bot = subreddit_config[\"praw_bot\"]\n reddit = praw.Reddit(praw_bot)\n\n logger.info(\n json.dumps(\n {\"msg\": \"Running replies.\", \"subreddit\": subreddit_config[\"name\"]}\n )\n )\n\n for item in reddit.inbox.unread():\n # Make sure it is fresh.\n created_utc = datetime.datetime.fromtimestamp(item.created_utc)\n if (datetime.datetime.utcnow() - created_utc).seconds > 600:\n item.mark_read()\n continue\n\n if isinstance(item, Comment) and praw_bot.lower() in item.body.lower():\n key = f\"textface-{item.id}\"\n cmnt = comment.default_mention_reply(\n item, subreddit_config[\"default_replies\"]\n )\n COMMENTS[key] = cmnt\n\n item.mark_read() # Keep the inbox clean.\n\n end_time = datetime.datetime.utcnow()\n elapsed = (end_time - start_time).total_seconds()\n logger.info(\n json.dumps(\n {\n \"msg\": \"Finished BaseballClerk.\",\n \"subreddits\": list(config[\"subreddits\"].keys()),\n \"start_time\": start_time.isoformat(),\n \"end_time\": end_time.isoformat(),\n \"elapsed\": elapsed,\n }\n )\n )", "def feed(self, amount=network.default_listen_time):\n asyncore.loop(timeout=amount, count=1)", "def main():\n database_connection = userdata.init()\n # signed_in = account.sign_in('jordan00', 'Jordan!23', database_connection)\n signed_in = account.sign_in_or_create_user(database_connection)\n timeline.timeline(database_connection, signed_in, 1)\n\n while True:\n action = timeline.prompt_for_timeline_action()\n\n if action.isdigit():\n action = int(action)\n if action:\n timeline.timeline(database_connection, signed_in, action)\n else:\n print('-' * 100)\n print('No posts on page 0. Please go to page 1.')\n print('-' * 100)\n\n elif action == 'ACCOUNT SETTINGS':\n account.account(signed_in, database_connection)\n timeline.timeline(database_connection, signed_in, 1)\n elif action == 'USERS':\n usersmodule.users_main_page(database_connection, signed_in)\n timeline.timeline(database_connection, signed_in, 1)\n elif action == 'POST':\n post = input('Type your post:\\n\\t>')\n userdata.add_post(database_connection, post, signed_in)\n timeline.timeline(database_connection, signed_in, 1)\n elif action == 'SIGN OUT':\n database_connection.close()\n print('Bye!')\n break", "def feed(self):\n if self._feed is None:\n self._feed = self._getter.get()\n if len(self._feed) == 0:\n raise ex.FeedTaskEmptyFeed()\n # Do we need to refresh the feed based on having been run?\n # If we haven't replicated WrapperTasks yet, there's no chance we're\n # out of sync - and we don't want to trigger GET/replication.\n if self._tx_by_uuid:\n # Rebuild the entire feed from the WrapperTasks' .wrappers.\n # TAG_WRAPPER_SYNC\n # Note that, if this happens while the WrapperTasks are running,\n # we may be grabbing the wrapper from a WrapperTask \"while\" it is\n # being changed as the result of an update(). This is threadsafe as\n # long as the assignment (by WrapperTask.execute) and the accessor\n # (WrapperTask.wrapper) remain atomic by using simple =/return.\n for wrap in self._feed:\n if hasattr(self.get_wrapper(wrap.uuid), 'etag') \\\n and self.get_wrapper(wrap.uuid).etag != wrap.etag:\n self._feed = [tx.wrapper for tx in\n self.wrapper_tasks.values()]\n break\n return self._feed", "def main():\n f = FeedHandler()\n f.add_feed(Coinbase(max_depth=10, channels=[L2_BOOK, TRADES, TICKER],\n symbols=['BTC-USD'],\n callbacks={TRADES: TradeMongo('coinbase', collection='trades'),\n L2_BOOK: BookMongo('coinbase', collection='l2_book'),\n TICKER: TickerMongo('coinbase', collection='ticker')\n }))\n\n f.run()", "def start():\n print('Running...')\n with Feed(Config.database) as feed:\n feed.refresh()", "def generate_atom_feeds(app):\n if not ablog.builder_support(app):\n return\n blog = Blog(app)\n base_url = blog.blog_baseurl\n if not base_url:\n return\n feeds = [\n (\n blog.posts,\n blog.blog_path,\n os.path.join(app.builder.outdir, blog.blog_path, feed_root + \".xml\"),\n blog.blog_title,\n os_path_join(base_url, blog.blog_path, feed_root + \".xml\"),\n feed_templates,\n )\n for feed_root, feed_templates in blog.blog_feed_templates.items()\n ]\n if blog.blog_feed_archives:\n for header, catalog in [\n (_(\"Posts by\"), blog.author),\n (_(\"Posts from\"), blog.location),\n (_(\"Posts in\"), blog.language),\n (_(\"Posts in\"), blog.category),\n (_(\"Posted in\"), blog.archive),\n (_(\"Posts tagged\"), blog.tags),\n ]:\n for coll in catalog:\n # skip collections containing only drafts\n if not len(coll):\n continue\n folder = os.path.join(app.builder.outdir, coll.path)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n for feed_root, feed_templates in blog.blog_feed_templates.items():\n feeds.append(\n (\n coll,\n coll.path,\n os.path.join(folder, feed_root + \".xml\"),\n blog.blog_title + \" - \" + header + \" \" + str(coll),\n os_path_join(base_url, coll.path, feed_root + \".xml\"),\n feed_templates,\n )\n )\n # Config options\n feed_length = blog.blog_feed_length\n feed_fulltext = blog.blog_feed_fulltext\n for feed_posts, pagename, feed_path, feed_title, feed_url, feed_templates in feeds:\n feed = FeedGenerator()\n feed.id(blog.blog_baseurl)\n feed.title(feed_title)\n feed.link(href=base_url)\n feed.subtitle(blog.blog_feed_subtitle)\n feed.link(href=feed_url, rel=\"self\")\n feed.language(app.config.language)\n feed.generator(\"ABlog\", ablog.__version__, \"https://ablog.readthedocs.io/\")\n sorted_posts_by_date = sorted(feed_posts, key=lambda post: post.date, reverse=True)\n for i, post in enumerate(sorted_posts_by_date):\n if feed_length and i == feed_length:\n break\n post_url = os_path_join(base_url, app.builder.get_target_uri(post.docname))\n if post.section:\n post_url += \"#\" + post.section\n if blog.blog_feed_titles:\n content = None\n else:\n content = post.to_html(pagename, fulltext=feed_fulltext, img_url=True)\n feed_entry = feed.add_entry(order=\"append\")\n feed_entry.id(post_url)\n feed_entry.link(href=post_url)\n feed_entry.author({\"name\": author.name for author in post.author})\n feed_entry.pubDate(post.date.astimezone())\n feed_entry.updated(post.update.astimezone())\n for tag in sorted(post.tags):\n feed_entry.category(\n dict(\n term=tag.name.strip().replace(\" \", \"\"),\n label=tag.label,\n )\n )\n # Entry values that support templates\n title = post.title\n summary = \"\".join(paragraph.astext() for paragraph in post.excerpt)\n template_values = {}\n for element in (\"title\", \"summary\", \"content\"):\n if element in feed_templates:\n template_values[element] = jinja2.Template(feed_templates[element]).render(**locals())\n feed_entry.title(template_values.get(\"title\", title))\n summary = template_values.get(\"summary\", summary)\n if summary:\n feed_entry.summary(summary)\n content = template_values.get(\"content\", content)\n if content:\n feed_entry.content(content=content, type=\"html\")\n parent_dir = os.path.dirname(feed_path)\n if not os.path.isdir(parent_dir):\n os.makedirs(parent_dir)\n with open(feed_path, \"w\", encoding=\"utf-8\") as out:\n feed_str = feed.atom_str(pretty=True)\n out.write(feed_str.decode())\n if 0:\n # this is to make the function a generator\n # and make work for Sphinx 'html-collect-pages'\n yield", "def update(self):\n feed = feedparser.parse(self._schema % self.project)\n added = []\n for entry in feed['entries']:\n if entry['id'] not in self.entries:\n self.entries[entry['id']] = entry\n added.append(entry)\n return added", "def main():\n print get_latest_data()", "def listFeeds(key):\n # read and parse config, collect each url\n filepath = confighome+\"config\"\n if fileAccessible(filepath,'r'):\n with open(filepath,mode='r', encoding='utf-8') as f:\n jconfig = json.load(f)\n\n # for each url pull the last 5 most recent posts and print them\n str=\"\"\n for url in jconfig[1]['feeds']:\n f = feedparser.parse (url['url'])\n if 'title' not in f.feed:\n print (\"::title not found in url:\",url['url'])\n else:\n str += f.feed.title + \"\\n\" + url['url'] + \"\\n\"\n\n # gimi five\n count=1\n blockcount=1\n for post in f.entries:\n if count % 5 == 1:\n str += post.title +\" - \" + post.link +\"\\n\"\n\n count+=1\n\n str=str+\"\\n\"\n\n if key==0:\n print (str)\n if key==1:\n return str\n else:\n print(\"::unable to read\")\n sys.exit()", "def main_loop(self) -> None:\n while True:\n # Log a message to say that Wheatley is waiting for 'Look To!'\n self.logger.info(\"Waiting for 'Look To!'...\")\n # Sit in an infinite loop whilst we're not ringing, and exit Wheatley if enough time\n # has passed\n self._last_activity_time = time.time()\n while not self._is_ringing:\n time.sleep(0.01)\n if self._server_mode and time.time() > self._last_activity_time + INACTIVITY_EXIT_TIME:\n self.logger.info(f\"Timed out - no activity for {INACTIVITY_EXIT_TIME}s. Exiting.\")\n return\n\n self.logger.info(f\"Starting to ring {self.row_generator.summary_string()}\")\n if self._server_mode:\n self._tower.set_is_ringing(True)\n\n while self._is_ringing:\n self.tick()\n time.sleep(0.01)\n\n self.logger.info(\"Stopping ringing!\")\n if self._server_mode:\n self._tower.set_is_ringing(False)", "def test_feed_subclassing(self):\n moksha.feed_cache = FakeCache()\n class MyFeed(Feed):\n url = 'http://lewk.org/rss'\n feed = MyFeed()\n assert feed.url == 'http://lewk.org/rss'\n assert feed.num_entries() > 0\n for entry in feed.iterentries():\n pass\n for entry in feed.get_entries():\n pass", "def feed(self) -> None:", "def generate_feeds(self, writer):\r\n\r\n if self.settings.get('FEED_ATOM'):\r\n writer.write_feed(self.articles, self.context,\r\n self.settings['FEED_ATOM'])\r\n\r\n if self.settings.get('FEED_RSS'):\r\n writer.write_feed(self.articles, self.context,\r\n self.settings['FEED_RSS'], feed_type='rss')\r\n\r\n if (self.settings.get('FEED_ALL_ATOM')\r\n or self.settings.get('FEED_ALL_RSS')):\r\n all_articles = list(self.articles)\r\n for article in self.articles:\r\n all_articles.extend(article.translations)\r\n all_articles.sort(key=attrgetter('date'), reverse=True)\r\n\r\n if self.settings.get('FEED_ALL_ATOM'):\r\n writer.write_feed(all_articles, self.context,\r\n self.settings['FEED_ALL_ATOM'])\r\n\r\n if self.settings.get('FEED_ALL_RSS'):\r\n writer.write_feed(all_articles, self.context,\r\n self.settings['FEED_ALL_RSS'],\r\n feed_type='rss')\r\n\r\n for cat, arts in self.categories:\r\n arts.sort(key=attrgetter('date'), reverse=True)\r\n if self.settings.get('CATEGORY_FEED_ATOM'):\r\n writer.write_feed(arts, self.context,\r\n self.settings['CATEGORY_FEED_ATOM']\r\n % cat.slug)\r\n\r\n if self.settings.get('CATEGORY_FEED_RSS'):\r\n writer.write_feed(arts, self.context,\r\n self.settings['CATEGORY_FEED_RSS']\r\n % cat.slug, feed_type='rss')\r\n\r\n for auth, arts in self.authors:\r\n arts.sort(key=attrgetter('date'), reverse=True)\r\n if self.settings.get('AUTHOR_FEED_ATOM'):\r\n writer.write_feed(arts, self.context,\r\n self.settings['AUTHOR_FEED_ATOM']\r\n % auth.slug)\r\n\r\n if self.settings.get('AUTHOR_FEED_RSS'):\r\n writer.write_feed(arts, self.context,\r\n self.settings['AUTHOR_FEED_RSS']\r\n % auth.slug, feed_type='rss')\r\n\r\n if (self.settings.get('TAG_FEED_ATOM')\r\n or self.settings.get('TAG_FEED_RSS')):\r\n for tag, arts in self.tags.items():\r\n arts.sort(key=attrgetter('date'), reverse=True)\r\n if self.settings.get('TAG_FEED_ATOM'):\r\n writer.write_feed(arts, self.context,\r\n self.settings['TAG_FEED_ATOM']\r\n % tag.slug)\r\n\r\n if self.settings.get('TAG_FEED_RSS'):\r\n writer.write_feed(arts, self.context,\r\n self.settings['TAG_FEED_RSS'] % tag.slug,\r\n feed_type='rss')\r\n\r\n if (self.settings.get('TRANSLATION_FEED_ATOM')\r\n or self.settings.get('TRANSLATION_FEED_RSS')):\r\n translations_feeds = defaultdict(list)\r\n for article in chain(self.articles, self.translations):\r\n translations_feeds[article.lang].append(article)\r\n\r\n for lang, items in translations_feeds.items():\r\n items.sort(key=attrgetter('date'), reverse=True)\r\n if self.settings.get('TRANSLATION_FEED_ATOM'):\r\n writer.write_feed(\r\n items, self.context,\r\n self.settings['TRANSLATION_FEED_ATOM'] % lang)\r\n if self.settings.get('TRANSLATION_FEED_RSS'):\r\n writer.write_feed(\r\n items, self.context,\r\n self.settings['TRANSLATION_FEED_RSS'] % lang,\r\n feed_type='rss')", "def parse_rss(link, mode):\n\n one_feed = []\n news_counter = 0\n app.logger.info(f'Parsing feed: {link}')\n # Get file from internet, open it with xml-parser\n rss = feedparser.parse(link)\n\n for entry in rss.entries:\n\n if mode == 'latest':\n news_item_date = get_timestamp(entry.published)\n\n # Stop reading RSS if current news is already older than time\n # when user last got the news feed\n if news_item_date < last_time_user_got_news:\n return one_feed\n\n post = {'title': entry.title,\n 'published': get_timestamp(entry.published)}\n\n # Try to get link to image from one of a place where it can be\n try:\n pic = entry.enclosures[0].href\n except(IndexError, AttributeError):\n pic = get_img_source(entry.summary)\n\n post['image'] = pic if pic else url_for('static',\n filename=\"400x400.jpg\")\n\n link = entry.link\n post['link'] = link\n domain_name = re.search(r'://(.+?)/', link).group(1)\n post['domain_name'] = domain_name if domain_name else 'unknown'\n\n one_feed.append(post)\n\n if mode != 'latest':\n return one_feed\n else:\n print('There are no new news at all.')\n return []", "def loop(self):\n while not self.should_exit:\n self._run_once()\n\n self.on_exit()", "async def process(self, timeout=60):\n\n previous_date = self.previous_date()\n new_date = previous_date\n last_sent_message_date = previous_date\n now = pendulum.now('UTC')\n\n self.log.info(\"Begining processing feed %s, previous date %s\",\n self.name, previous_date)\n\n for entry in await self.fetch_and_parse(timeout):\n\n pubdate = dateutil.parser.parse(entry.published, tzinfos=rssalertbot.BOGUS_TIMEZONES)\n entry.published = pendulum.from_timestamp(pubdate.timestamp())\n # also save a prettified string format\n entry.datestring = self.format_timestamp_local(entry.published)\n\n # skip anything that's stale\n if entry.published <= previous_date:\n continue\n\n event_id = md5((entry.title + entry.description).encode()).hexdigest()\n last_sent = self.storage.load_event(self.feed, event_id)\n re_alert = self.cfg.get('re_alert', rssalertbot.RE_ALERT_DEFAULT)\n should_delete_message = False\n\n if entry.published > now:\n if last_sent and now < last_sent.add(hours=re_alert):\n continue\n self.storage.save_event(self.feed, event_id, now)\n else:\n if entry.published > new_date:\n new_date = entry.published\n should_delete_message = last_sent\n\n self.log.debug(\"Found new entry %s\", entry.published)\n\n # alert on it\n await self.alert(entry)\n if new_date > last_sent_message_date:\n self.storage.save_date(self.feed, new_date)\n last_sent_message_date = new_date\n\n if should_delete_message:\n self.log.debug(f\"Deleting stored date for message {event_id}\")\n self.storage.delete_event(self.feed, event_id)\n\n self.log.info(\"End processing feed %s, previous date %s\", self.name, new_date)", "def main_loop(self):\r\n print('Press ctrl-c to quit')\r\n while True:\r\n url = input('\\nType Question url: ')\r\n handler = AnswerHandler(self.session)\r\n res, err = handler.answer_questions(url)\r\n if res:\r\n print('No more questions for this URL')\r\n else:\r\n print(f'Unexpected exception occurred: {err}', file=sys.stderr)\r\n traceback.print_exc()", "def _extract_data_from_feed(self):\n for eco in self.snyk_data:\n if eco == \"java\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Maven.\")\n self._add_default_obj_for_eco(\"maven\")\n self._parse_data(self.snyk_data[eco], \"maven\")\n elif eco == \"js\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Npm.\")\n self._add_default_obj_for_eco(\"npm\")\n self._parse_data(self.snyk_data[eco], \"npm\")\n elif eco == \"python\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Pypi.\")\n self._add_default_obj_for_eco(\"pypi\")\n self._parse_data(self.snyk_data[eco], \"pypi\")\n elif eco == \"golang\" and self._parse_data_for_eco(eco):\n logger.info(\"Parsing feed for Golang.\")\n self._add_default_obj_for_eco(\"golang\")\n self._parse_golang_data(self.snyk_data[eco], \"golang\")\n else:\n logger.info(\"Ignoring the ecosystem {} from the feed\".format(eco))", "async def _main(self):\n while True:\n time.sleep(1)", "def get_feeds():\n feeds = {}\n for _configuration_key, _configuration in blogs.all():\n if not _configuration.use_generic_feeds:\n continue\n\n class EntryFeed(Feed):\n configuration = _configuration\n configuration_key = _configuration_key\n\n title_template = _configuration.feed_title_template_name\n description_template = \\\n _configuration.feed_description_template_name\n\n feed_type = feedgenerator.Rss201rev2Feed\n\n def get_site(self):\n if not hasattr(self, '_current_site'):\n self._current_site = Site.objects.get_current()\n return self._current_site\n\n def title(self):\n if self.configuration.feed_title is not None:\n return self.configuration.feed_title\n return self.get_site().name\n \n def link(self):\n if self.configuration.feed_link is not None:\n return self.configuration.feed_link\n return \"http://%s/\" % (self.get_site().domain)\n \n def description(self):\n if self.configuration.feed_description is not None:\n return self.configuration.feed_description\n return \"Latest entries on %s\" % self.get_site().name\n \n def items(self):\n items = self.configuration.model.live.all()\n return items[:self.configuration.feed_limit]\n \n def item_pubdate(self, obj):\n return obj.pub_date\n\n def item_link(self, obj):\n return self.configuration.get_entry_absolute_url(obj)\n\n if _configuration.feed_format == feed_formats.ATOM:\n # Alter the class to support Atom feeds instead of RSS.\n EntryFeed.feed_type = feedgenerator.Atom1Feed\n EntryFeed.subtitle = EntryFeed.description\n\n feeds[_configuration_key] = EntryFeed\n return feeds", "def main():\n ds = 72\n title = 'Journal'\n journal_name = 'my-journal'\n headers.dashes_line(ds)\n headers.print_header(title, ds)\n data = journal.load(journal_name)\n event_loop(journal_name, data)\n # list_entries(data)\n # add_entry(data)\n # journal.save(journal_name, data)", "def download_feed_return_objects(rss_url):\r\n try:\r\n feed_obj = rss_exists(rss_url)\r\n except:\r\n yield None\r\n return\r\n\r\n feed_obj_found = False\r\n feed_parser_results, success = get_rss(rss_url)\r\n\r\n if feed_parser_results is None:\r\n error_reporter.captureMessage(u'Feed Parser results is None', **dict(rss_url=rss_url))\r\n yield None\r\n return\r\n\r\n if feed_obj is None:\r\n feed_obj = create_new_feed(feed_parser_results, rss_url)\r\n else:\r\n feed_obj_found = True\r\n\r\n feed_id = feed_obj.id\r\n feed_obj.title = feed_parser_results.get(\"title\", \"\") or \"\"\r\n max_length_field(feed_obj, 'title', 100)\r\n\r\n feed_obj.status_code = feed_parser_results.get(\"status\", \"\") or 200\r\n feed_obj.status = find_feed_status_from_scode(feed_obj)\r\n\r\n feed_obj.etag = cut_clean_etag(feed_parser_results.get(\"etag\", \"\"))\r\n\r\n updated_date = feed_parser_results.get(\"updated_parsed\")\r\n feed_obj.updated = dt.fromtimestamp(mktime(updated_date)) if updated_date is not None else dt.utcnow()\r\n #\tfeed_obj.published = dt.fromtimestamp(mktime(published_date)) if published_date is not None else None\r\n feed_obj.last_check = dt.utcnow()\r\n\r\n # We could be creating a new feed, or updating the existing one.\r\n yield feed_obj\r\n rss_posts = []\r\n\r\n for feed_article in feed_parser_results.get(\"entries\", []):\r\n ptime = feed_article.get(\"published_parsed\", None)\r\n post_date = dt.fromtimestamp(mktime(ptime)) if ptime is not None else dt.utcnow()\r\n #\t\tprint \"%r\" % post\r\n p = Post(\r\n id=uuid.uuid1(),\r\n title=feed_article.get(\"title\", \"\"),\r\n author=feed_article.get(\"author\", \"\"),\r\n href=feed_article.get(\"href\", \"\"),\r\n post_id=feed_article.get(\"id\", \"\"),\r\n published_at=post_date,\r\n feed_id=feed_id\r\n )\r\n\r\n p.original_title = max_length_field(p, 'title', 200)\r\n p.original_author = max_length_field(p, 'author', 200)\r\n\r\n p.content_html = feed_article.get(\"content\", \"\") or \"\"\r\n\r\n if feed_article.has_key(\"media_content\"):\r\n media_contents = feed_article.get(\"media_content\", []) or []\r\n if media_contents is not None and (not isinstance(media_contents, basestring)) and isinstance(\r\n media_contents, collections.Iterable):\r\n p.media = [media.get(\"url\") for media in media_contents]\r\n\r\n hasHash = False\r\n\r\n if feed_article.has_key(\"feedburner_origlink\"):\r\n p.original_link = feed_article.get(\"feedburner_origlink\", \"\")\r\n if non_empty_str(p.original_link):\r\n p.link_hash = url_hash(safe_str(p.original_link))\r\n hasHash = True\r\n\r\n if feed_article.has_key(\"link\"):\r\n p.href = feed_article.get(\"link\", \"\")\r\n if not hasHash and non_empty_str(p.href):\r\n p.link_hash = url_hash(safe_str(p.href))\r\n hasHash = True\r\n\r\n if not hasHash:\r\n print \"Post don't have any hash\"\r\n\r\n p.title_hash = url_hash(safe_str(p.title)) if non_empty_str(p.title) else \"\"\r\n p.post_id_hash = url_hash(safe_str(p.post_id)) if non_empty_str(p.post_id) else \"\"\r\n\r\n if feed_article.has_key(\"tags\"):\r\n if isinstance(feed_article['tags'], collections.Iterable):\r\n p.tags = [pst.get(\"term\") for pst in feed_article['tags']]\r\n\r\n rss_posts.append(p)\r\n\r\n has_posts = len(rss_posts) > 0\r\n post_id_hashes = [p.post_id_hash for p in rss_posts]\r\n #\tpost_title_hashes = [p.title_hash for p in rss_posts]\r\n post_link_hashes = [p.link_hash for p in rss_posts]\r\n\r\n found_posts_id_hashes = []\r\n found_posts_link_hashes = []\r\n\r\n if feed_obj_found and has_posts:\r\n existing_posts = find_existing_posts(feed_id, post_id_hashes, post_link_hashes)\r\n\r\n for ex_post_id_hash, ex_link_hash in existing_posts:\r\n found_posts_id_hashes.append(ex_post_id_hash)\r\n found_posts_link_hashes.append(ex_link_hash)\r\n\r\n has_existing_posts = len(found_posts_id_hashes) > 0 or len(found_posts_link_hashes) > 0\r\n\r\n new_post_count = 0\r\n if has_posts:\r\n for rss_post in rss_posts:\r\n should_skip = False\r\n\r\n if has_existing_posts:\r\n if non_empty_str(rss_post.post_id_hash) and rss_post.post_id_hash in found_posts_id_hashes:\r\n should_skip = True\r\n elif rss_post.link_hash in found_posts_link_hashes:\r\n should_skip = True # \"Link Hash found in existing records\"\r\n\r\n if not should_skip:\r\n new_post_count += 1\r\n yield rss_post\r\n\r\n feed_history = FeedHistory(id=uuid.uuid1(),\r\n feed_id=feed_obj.id,\r\n timestamp=dt.utcnow(),\r\n status=feed_obj.status_code,\r\n post_count=new_post_count,\r\n etag=feed_obj.etag)\r\n yield feed_history", "def main():\n\t# holds all the tags and occurences\n\ttag_dict = {}\n\ttextfile = file('tag_crawler_output.txt','wt')\n\t\n\t# retrieve data from instagram every 10 seconds\n\tfor i in xrange(10):\n\t\t# every 10 seconds, gather information regarding fashion posts\n\t\ttime.sleep(10)\n\t\tmedia_info = get_info()\n\t\tget_tags(media_info, tag_dict)\n\tfor key in tag_dict.keys():\n\t\tstrr = str(key) + \" , \" + str(tag_dict[key]) + \"\\n\"\n\t\ttextfile.write(strr)\n\n\ttextfile.close()", "def main(args):\n\n #gets urls based on sections and creates basic directories\n stack_exchange_data = get_data(args.filename)\n zip_directory, corpus_directory = args.zip_path, args.dest_path\n setup(zip_directory, corpus_directory)\n\n for (section, url) in stack_exchange_data:\n #creates directories for the current SE site\n zip_file_path, unzipped_folder, corpus_section_directory = section_setup(\n section, zip_directory, corpus_directory)\n\n done_signal_path = os.path.join(corpus_section_directory, \".done\")\n if os.path.isfile(done_signal_path):\n continue\n\n print(\"Starting \" + section)\n\n #downloads and unzips data release for a site\n load(url, zip_file_path, unzipped_folder)\n\n #gets the links data from the links table for the site\n links = get_links(unzipped_folder)\n\n #gets post data from the posts table\n posts = get_posts(unzipped_folder)\n\n #gets post history\n posthistory = get_post_history(unzipped_folder)\n\n #creates the clusters of related and duplicate posts for a site,\n #based on links data\n # clusters, related, duplicates, unique_posts = gen_clusters(links)\n clusters = iter_clusters(links, posts, posthistory)\n\n #writes cluster information to json files\n write_json_files(clusters, corpus_section_directory)\n \n # put completion marker in folder so we can skip it next time\n with open(done_signal_path, \"w\") as f:\n print(\"\", file=f)\n\n print(\"Completed \" + section)", "def get_feed(self):\n possible_endings = ('rss', 'rss/')\n if not self.url or not self.url.endswith(possible_endings):\n print('Please check URL(is RSS?) and Internet connection')\n sys.exit()\n try:\n data = feedparser.parse(self.url)\n except urllib.error.URLError:\n print('Please input correct URL')\n sys.exit()\n self.get_content(data)\n return self.items" ]
[ "0.67926604", "0.6669436", "0.66450924", "0.66018087", "0.63761574", "0.63712484", "0.63650745", "0.6307002", "0.62871724", "0.61856663", "0.6142305", "0.6107129", "0.61045074", "0.61003304", "0.60493374", "0.6042528", "0.6004268", "0.59982747", "0.59332794", "0.59283483", "0.59238905", "0.59203357", "0.59021974", "0.5884002", "0.5835004", "0.58213234", "0.5816358", "0.5815946", "0.58096886", "0.5806962", "0.5769036", "0.57545584", "0.5738963", "0.5730833", "0.5729983", "0.5724836", "0.5719943", "0.5674116", "0.56657004", "0.5662127", "0.56555194", "0.5653167", "0.5626021", "0.5622987", "0.55896914", "0.5585332", "0.5560246", "0.55501175", "0.5548218", "0.5542711", "0.5530246", "0.5524368", "0.5501515", "0.54925376", "0.54847795", "0.5470385", "0.5464068", "0.5451064", "0.54465675", "0.54433626", "0.54374474", "0.5433759", "0.54288375", "0.54251564", "0.54120165", "0.53872967", "0.5385559", "0.5382636", "0.53688973", "0.5363475", "0.53627", "0.5361156", "0.53598374", "0.53597367", "0.53590155", "0.53573275", "0.5346188", "0.5343142", "0.533963", "0.5335965", "0.5327027", "0.53235614", "0.5321508", "0.5315081", "0.5311594", "0.5307562", "0.5291045", "0.5287463", "0.52846193", "0.5281467", "0.5279676", "0.52770615", "0.52672845", "0.52588487", "0.52559", "0.5253667", "0.5252122", "0.5239264", "0.5239081", "0.5224909" ]
0.65567255
4
Gather the top 10 words by highest (descending) likelihoods for each class
def top10_likelihoods(likelihoods, vocab, classes): resultDict = {} for cls in classes: results = [] for word in vocab: results.append((word, likelihoods[cls][word])) resultDict[cls] = results # Sort and return top 10 for each class for key in resultDict: results = resultDict[key] resultDict[key] = map(lambda x: x[0], sorted(results, key=lambda x: x[1], reverse=True))[:10] return resultDict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def top10_odds_ratio(likelihoods, vocab, classes):\r\n results = []\r\n for word in vocab:\r\n highestOddsRatio = None\r\n for c1 in classes:\r\n for c2 in classes:\r\n # Skip self TODO: Is this right?\r\n # if c1 == c2:\r\n # continue\r\n oddsRatio = odds_ratio(likelihoods, c1, c2, word)\r\n if oddsRatio > highestOddsRatio or highestOddsRatio == None:\r\n highestOddsRatio = oddsRatio\r\n results.append((word, highestOddsRatio))\r\n # Sort and return top 10\r\n return map(lambda x: x[0], sorted(results, key=lambda x: x[1], reverse=True))[:10]", "def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]", "def get_top_words(self, label, n):\n score_list = []\n if('sod' in label):\n for term in self.vocab:\n score = self.cond_prob_sod[term] / self.cond_prob_pop[term]\n score_list.append((score,term)) \n else:\n for term in self.vocab:\n score = self.cond_prob_pop[term] / self.cond_prob_sod[term]\n score_list.append((score,term))\n score_list = sorted(score_list, key=lambda x:x[0],reverse=True)[:n]\n return score_list \n pass", "def printTopWords(self, N):\n topWords = []\n for i in range(self.MAX_RATING):\n topWords.append(dict(sorted(self.dictionary.items(), key=lambda x: x[1].tfidf[i+1], reverse=True)[:N]))\n\n outputFormat = \"{:>16} - {:<30}\"\n for i in range(len(topWords)):\n print(\"Top \" + str(N) + \" words for class rating \" + str(i + 1))\n print(\"--------------------------------------\")\n for j in topWords[i]:\n print(outputFormat.format(j, self.dictionary[j].tfidf[i + 1]))\n print()", "def print_top10(vectorizer, clf, class_labels):\n feature_names = vectorizer.get_feature_names()\n for i, class_label in enumerate(class_labels):\n top10 = np.argsort(clf.coef_[i])[-15:]\n print(\"%s: %s\" % (class_label,\n \" \".join(feature_names[j] for j in top10)))", "def print_top_words(components, feature_names, n_top_words: int = 10):\n for topic_idx, topic in enumerate(components):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join(\n [feature_names[i] for i in topic.argsort()[: -n_top_words - 1 : -1]]\n )\n print(message)\n print()", "def top_words(name):\n row = wiki[wiki['name'] == name]\n word_count_table = row[['word_count']].stack('word_count', new_column_name=['word','count'])\n return word_count_table.sort('count', ascending = False)", "def top10(self) -> List[Word]:\n return self._top10", "def top_sentences(query, sentences, idf, n):\n ll=[]\n for s in sentences:\n st=sentences[s]\n st=[word.lower() for word in st]\n found_word=0\n total_idf=0\n\n for word in query:\n if word in st:\n total_idf+=idf[word]\n found_word+=1 \n ll.append((total_idf,found_word/len(st),s))\n ll.sort(reverse=True)\n #print(ll)\n ans=[]\n for i in range(n):\n ans.append(ll[i][2])\n #print(\"answer is : \",*ans)\n return ans", "def print_top_misclassified(test_docs, test_labels, X_test, clf, n):\n# predictedValues = clf.predict(X_test)\n# predictedProbabilities = clf.predict_proba(X_test)\n# missClassifiedDocs = []\n# for index in range(len(predictedValues)):\n# if predictedValues[index] != test_labels[index]:\n# entry = dict()\n# entry['truth'] = test_labels[index]\n# entry['predicted'] = predictedValues[index]\n# entry['proba'] = predictedProbabilities[index][entry['predicted']]\n# entry['document'] = test_docs[index]\n# missClassifiedDocs.append(entry)\n# missClassifiedDocs = sorted(missClassifiedDocs, key=lambda x: -x['proba'])[:n]\n# for docEntry in missClassifiedDocs:\n# print('')\n# print('truth=' + str(docEntry['truth']) + ' predicted=' + str(docEntry['predicted']) + ' proba=' + str(docEntry['proba']))\n# print(str(docEntry['document']))\n to_predict = clf.predict(X_test)\n diff = np.where( to_predict != test_labels)[0] \n\n predict_prob = clf.predict_proba(X_test)\n wrong_predict = predict_prob[diff]\n\n keys1 = np.argsort(np.amax(wrong_predict, axis = 1))[::-1][:n] \n \n for i in range(0, n):\n doc_keys = diff[keys1[i]]\n truth=str(test_labels[doc_keys])\n predicted=str( to_predict[doc_keys])\n probab=str(np.max(predict_prob[doc_keys]))\n print('truth='+truth+' predicted='+predicted+' proba='+probab)\n print(test_docs[doc_keys]+'\\n')", "def top_k_frequent(top_k, words, list_of_texts):\n dict_top_freq = {}\n for word in words:\n dict_top_freq[word.lower()] = 0\n for string in list_of_texts:\n if word.lower() in string.lower():\n counter = string.lower().count(word.lower())\n dict_top_freq[word.lower()] += counter\n\n list_top_sorted = sorted(dict_top_freq.items(), key=lambda item: item[1], reverse=True)\n print(list_top_sorted)\n\n list_k = []\n for i in list_top_sorted:\n list_k.append(i[0])\n\n return list_k[:top_k]", "def print_top_misclassified(test_docs, test_labels, X_test, clf, n):\n ###TODO\n #print('test_labels =',test_labels) \n\n #step 1 -> find missclassified\n predicted = clf.predict(X_test)\n \n #print('predicted = ',predicted)\n #acc = accuracy_score(test_labels, predicted)\n #print('acc = ',acc )\n \n misclassified = np.where(predicted != test_labels)\n \n #print('misclassified = ',misclassified)\n #print('misclassified = ',misclassified[0])\n #print('misclassified = ',misclassified[0][0])\n\n #step 2 -> find predicted probabilities\n probab = clf.predict_proba(X_test)\n \n #print('probab = ',probab)\n \n #step 3 -> collect all misclassified docs with all required info\n misclassified_docs = []\n \n for i in misclassified[0]:\n #print(i)\n misclassified_docs.append( ( test_labels[i], predicted[i], probab[i][predicted[i]], test_docs[i] ) ) \n\t\t\n #step 4 -> sort in descending order of the predicted probability for the incorrect class \t\n sorted_docs = sorted(misclassified_docs,key=lambda x:(-x[2]))[:n]\n\n #step 5 -> print all value\n for doc in sorted_docs :\n print('\\n',\"truth=\",doc[0],\" predicted=\",doc[1],\" proba=\",doc[2])\n print(str(doc[3])) #.encode(\"utf-8\")", "def get_top_n_words(topic_dict, n=5):\n top_words = []\n for num, data in topic_dict.items():\n sorted_words = {k: v for k, v in sorted(data['words'].items(),\n key=lambda x: x[1],\n reverse=True\n )}\n words = sorted_words.keys()\n top_n_words = list(words)[:n]\n top_words.append(', '.join(top_n_words))\n return top_words", "def _calculate_top(self,\n words_percentage_hit: List[Tuple[str, float]]) -> List[Tuple[str, float]]:\n return sorted(words_percentage_hit, key=(lambda tup: tup[1]))[:self._top_values]", "def get_top_n_words(topic_words_dict, n):\n score_wordlist = topic_words_dict.items()\n score_wordlist.sort(key=lambda x: x[1], reverse=True)\n return [word for (word,score) in score_wordlist[:n]]", "def format_top_n(self, n=10):\n output = []\n for t, c in self._freq.most_common(n):\n files_, sents_ = self.fetch_index(t)\n word = t + ' (' + str(c) + ')'\n output.append([word, ','.join(files_), \"\\n\".join(sents_)])\n\n return output", "def _top_n_words(n, f_name):\n word_dict, idx_dict, word_cnt = _extract_words(f_name)\n print (\"number of words: %d\" % len(word_cnt))\n n = min(len(word_cnt), n)\n np_cnt = np.array(word_cnt)\n idx = np.argpartition(np_cnt, -n)[-n:]\n res = []\n for i in idx:\n res.append((idx_dict[i], np_cnt[i]))\n res.sort(key=lambda t: t[1], reverse=True)\n return res", "def top_sentences(query, sentences, idfs, n):\n rank = []\n\n for sentence in sentences:\n sentence_values = [sentence, 0, 0]\n\n for word in query:\n if word in sentences[sentence]:\n # Compute matching word measure. Sum of IDF values.\n sentence_values[1] += idfs[word]\n # Compute query term density. Proportion of words in a sentence that are in the query.\n sentence_values[2] += sentences[sentence].count(\n word) / len(sentences[sentence])\n\n rank.append(sentence_values)\n\n rank = sorted(rank, key=lambda x: (x[1], x[2]), reverse=True)[:n]\n \n return [sentence for sentence, mwm, qtd in rank]", "def get_top_words(input_string):\n # count the words\n top_words = Counter(input_string)\n # order the words in descending order\n top_words_ordered = sorted(top_words.items(), key=operator.itemgetter(1), reverse=True)\n # keep the top twenty elements\n top_twenty = top_words_ordered[0:20]\n print(top_twenty)\n return top_twenty", "def class_conditional_word_dist(self, Mprint=20):\n self.class_word_dist = np.array(np.vstack([self.data[self.labels == ci, :].sum(0)/self.data[self.labels == ci, :].sum() for ci in np.unique(self.labels)])) # num of classes x num of words\n self.labels_word = self.class_word_dist.argmax(0)\n for i in range(self.class_word_dist.shape[0]):\n print('top {} frequent words in class {}'.format(Mprint, i))\n idx = np.argsort(self.class_word_dist[i, :])[::-1][:Mprint]\n for j in range(Mprint):\n print(' {:3d}: {:10s} {:.4f}'.format(j, self.vocab[idx[j]], self.class_word_dist[i, idx[j]]))", "def get_top_n_words(word_list, n):\n\tfreq_dict = make_freq_dict (word_list) # get a dictionary\n\tordered_by_frequency = sorted(freq_dict, key=freq_dict.get, reverse=True) # sort\n\tprint ordered_by_frequency[0:n] # print\n\treturn ordered_by_frequency[0:n]", "def top_sentences(query, sentences, idfs, n):\n\n # claculate idfs of each sentence\n sent_score = dict()\n for sentence in sentences:\n sent_score[sentence] = 0\n for query_word in query:\n if query_word in sentences[sentence]:\n sent_score[sentence] += idfs[query_word]\n\n # create sorted list of sentences\n sorted_sentences = sorted(sent_score, key= lambda item: sent_score[item], reverse= True)\n\n # re-order sentences with the same rank of idfs according to query term density\n loop_sentences = sorted_sentences.copy()\n for sentence1 in loop_sentences:\n for sentence2 in loop_sentences:\n if sentence1 != sentence2:\n if sent_score[sentence1] == sent_score[sentence2]:\n qtd1 = query_term_density(sentence1, query, sentences)\n qtd2 = query_term_density(sentence2, query, sentences)\n index1 = sorted_sentences.index(sentence1)\n index2 = sorted_sentences.index(sentence2)\n if qtd1 > qtd2:\n if index1 > index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n elif qtd1 < qtd2:\n if index1 < index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n\n # get list contains top n sentences\n top_sentences = []\n for index in range(n):\n top_sentences.append(sorted_sentences[index]) \n\n return top_sentences", "def most_frequent_train(train_data):\n ### YOUR CODE HERE\n tags_counts_for_each_word = {}\n # Filling a dictionary from words and tag tags to their counters\n # Going over the words and counting their tags appearances\n for sentance in train_data:\n for word, tag in sentance:\n # If first time seeing word, adding it's tags count dictionary\n if word not in tags_counts_for_each_word:\n tags_counts_for_each_word[word] = {}\n # Fetching word tags count dictionary\n word_tags_count_dictionary = tags_counts_for_each_word[word]\n # If tag not in word's tags dictionary, initializing the counter\n if tag not in word_tags_count_dictionary:\n word_tags_count_dictionary[tag] = 0\n # Incrementing word tag counter\n word_tags_count_dictionary[tag] += 1\n \n words_maximal_tags = {}\n # Going over each word and finding it's maximal tag\n for word in tags_counts_for_each_word:\n # Fetching all word tags counts\n word_tags_count_dictionary = tags_counts_for_each_word[word]\n \n maximal_tag, maximal_tag_counter = '', 0\n # Finding word tag with maximal tag counter\n for curent_tag, current_counter in word_tags_count_dictionary.items():\n if current_counter > maximal_tag_counter:\n maximal_tag, maximal_tag_counter = curent_tag, current_counter\n \n # Setting the maximal tag for current word\n words_maximal_tags[word] = maximal_tag\n \n return words_maximal_tags\n ### END CODE HERE", "def topCommonwords(self,value=5):\n out=self.df.withColumn('word', explode(split(col('name'), ' '))) \\\n .withColumn('norm_word',trim(regexp_replace('word','[^a-zA-Z0-9 ]', ''))) \\\n .filter(col('norm_word') !='')\\\n .groupBy('norm_word')\\\n .count()\\\n .sort('count', ascending=False)\\\n .select('norm_word').limit(value)\n out.withColumnRenamed('norm_word','Top english name in pubname').write \\\n .mode(\"overwrite\").csv('{}pubname/'.format(self.target))\n\n return out.rdd.map(lambda l:l.norm_word).collect()", "def print_top10(vectorizer, clf):\n feature_names = vectorizer.get_feature_names()\n indices=np.argsort(clf.coef_)[0][-10:]\n for i in range(10):\n print(feature_names[indices[i]])", "def display_topics2(model, feature_names, n_top_words=25):\n word_dict = {};\n for topic_idx, topic in enumerate(model.components_):\n word_dict[\"Topic%d\" % (topic_idx)] = [feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]]\n return pd.DataFrame(word_dict).T", "def get_top_words(tfidf_dict: dict, n_words=10):\n header = ['year', 'term', 'tf-idf']\n dfs = []\n for each_year, tfidf_scores in tfidf_dict.items():\n df_list = []\n for term_score in tfidf_scores:\n df_list.append([each_year, term_score[0], float(term_score[1])])\n yr_df = pd.DataFrame(df_list, columns=header)\n yr_df = yr_df.sort_values(by=['tf-idf'], ascending=False)\n if n_words < len(tfidf_scores):\n yr_df = yr_df.iloc[:n_words].reset_index(drop=True)\n dfs.append(yr_df)\n else:\n raise ValueError('input of n_words is more than the words in data!')\n\n df_out = pd.concat(dfs)\n\n return df_out", "def top_sentences(query, sentences, idfs, n):\n tf_idfs = []\n for sentence, words in sentences.items():\n tf_idf = 0\n\n for word in query:\n if word not in idfs:\n continue\n idf = idfs[word]\n tf = (1 if word in words else 0)\n tf_idf += idf * tf\n t = (sentence, tf_idf)\n tf_idfs.append(t)\n\n sorted_list = sorted(tf_idfs, key=sorter)\n sorted_list.reverse()\n file_list = [item[0] for item in sorted_list]\n\n return file_list[:n]", "def test_top_n_grams():\n ngrams = NgramFrequencies()\n unigrams_dic = {\n \"COUNT\": 10,\n \"time_burton's\": 5,\n \"burton's_corpse\": 4,\n \"corpse_bride\": 1\n }\n top_n_unigrams = ngrams.top_n_grams(unigrams_dic, 2)\n assert top_n_unigrams == [\n (\"time_burton's\", 0.5),\n (\"burton's_corpse\", 0.4)\n ]", "def get_top_keywords(entries):\n # Extract text for processing\n\n raw_text = [] # raw text in sentences\n for entry in entries:\n # Its a post\n if 'title' in entry:\n raw_text.append(entry['title'])\n raw_text += tokenize.sent_tokenize(entry['selftext'])\n else:\n raw_text += tokenize.sent_tokenize(entry['body'])\n \n # Tokenize\n tokens = tokenize_posts_keywords(raw_text)\n\n # 1-gram\n fdist_1 = FreqDist(tokens)\n top_keywords_1 = fdist_1.most_common(100)\n \n # 2-gram\n bigrams = ngrams(tokens, 2)\n fdist_2 = FreqDist(bigrams)\n top_keywords_2 = fdist_2.most_common(100)\n top_keywords_2 = [(f'{keywords[0]} {keywords[1]}', mentions) for keywords, mentions in top_keywords_2]\n\n # 3-gram\n trigrams = ngrams(tokens, 3)\n fdist_3 = FreqDist(trigrams)\n top_keywords_3 = fdist_3.most_common(100)\n top_keywords_3 = [(f'{keywords[0]} {keywords[1]} {keywords[2]}', mentions) for keywords, mentions in top_keywords_3]\n\n top_keywords = top_keywords_1 + top_keywords_2 + top_keywords_3\n return [{ 'keyword' : keyword, 'mentions' : mentions } for keyword, mentions in top_keywords]", "def count_words(self,top_only=True):\n if top_only:\n self.top_skill_list()\n else:\n self.all_skill_list()\n word_counts = Counter(self.skill_list)\n top_n = word_counts.most_common(len(word_counts))\n self.feature = []\n proportion = []\n for i in top_n:\n self.feature.append(i[0])\n proportion.append(i[1])\n self.coff = 1./(np.log(proportion)+1)\n return", "def test_top_n_freqs():\n ngrams = NgramFrequencies()\n top_list = [(\"d\", 4), (\"c\", 3), (\"b\", 2), (\"a\", 1)]\n top_freq = ngrams.top_n_freq(top_list, 10)\n assert top_freq == [(\"d\", 0.4), (\"c\", 0.3), (\"b\", 0.2), (\"a\", 0.1)]", "def get_10_most_frequent_words(tokens):\n\n return FreqDist(word.lower() for word in tokens).most_common(10)", "def top_sentences(query, sentences, idfs, n):\n scored_sentences = {}\n for word in query:\n # print(f\"Searching for {word}\")\n for k, v in sentences.items():\n\n # Ignore headings\n if k.strip(\"=\") != k:\n continue\n\n if word.lower() in v:\n \n try:\n check = scored_sentences[k]\n except:\n scored_sentences[k] = 0\n\n scored_sentences[k] += idfs[word]\n\n # print(scored_sentences)\n # exit()\n\n # print(f\"Scored Sentences:\\n\\t{scored_sentences}\")\n final_result = []\n while len(final_result) < n:\n top = \"\"\n g = 0.0\n s = False\n\n for k, v in scored_sentences.items():\n\n if float(v) >= float(g):\n\n # Query term density calculation\n if float(v) == float(g):\n\n old_s_set = set(top.split(\" \"))\n new_s_set = set(k.split(\" \"))\n q_set = set(query)\n\n # similarities between words in question and our query words\n inter_new = float(len(new_s_set & q_set) / len(k))\n inter_old = float(len(old_s_set & q_set) / len(top))\n\n if inter_new < inter_old:\n continue\n\n g = v\n top = k\n\n if top:\n final_result.append(top)\n del scored_sentences[top]\n else:\n final_result.append(\"Not enough context for additional results.\")\n return final_result\n \n return final_result", "def nmax(num, T, nwords):\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(data['all_words'][n])\n return nwords", "def nmax(num, T, nwords):\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(data['all_words'][n])\n return nwords", "def top_coefs(clf, label, n, vocab):\n ###TODO\n \n # step 1 -> get .coef_\n coefficient = clf.coef_[0] #***** \n \n # step 2 -> check label and sort\n if label == 1: # positive class -> descending sorting\n # get indices of sorted list i.e. [2,3,1] -> sorting [1,2,3] -> indices[3,1,2]\n top_coef_ind = np.argsort(coefficient)[::-1][:n] # requires very less time by this methos of sorting and get sorted element's indices \n \n if label == 0: # negative class -> ascending sorting\n top_coef_ind = np.argsort(coefficient)[::1][:n]\n \n \n #step 3 -> get all top coefficient' indices\n #print('top_coef_ind = ',top_coef_ind)\n top_coef = abs(coefficient[top_coef_ind])\n #print('top_coef = ',top_coef)\n \n #step 4 -> get all top coefficient' terms i.e. tokens\n rev_Vocab = {}\n \n for term,colId in vocab.items():\n rev_Vocab.setdefault(colId,term)\n #alternatives -> check for fasted \n #vocab.__class__(map(reversed, vocab.items()))\n #rev_Vocab = lambda vocab: {v:k for k, v in vocab.items()}\n #rev_Vocab = lambda vocab: dict( zip(vocab.values(), vocab.keys()) )\n \n \n top_coef_terms = []\n \n for colId in top_coef_ind:\n top_coef_terms.append(rev_Vocab[colId])\n \n #step 5 -> get touple (top_coef_terms, top_coef) and send\n return ([x for x in zip(top_coef_terms, top_coef)])", "def textrank(self, sentences, n_top=2, stopwords=None):\n S = self.build_similarity_matrix(sentences, stopwords) \n sentence_ranks = self.pagerank(S)\n \n # Sort the sentence ranks\n ranked_sentence_indexes = [item[0] for item in sorted(enumerate(sentence_ranks), key=lambda item: -item[1])]\n #print(ranked_sentence_indexes)\n selected_sentences = sorted(ranked_sentence_indexes[:n_top])\n summary = itemgetter(*selected_sentences)(sentences)\n return summary", "def get_top_n_words(filename, n, to_search_word_or_not, word_to_serach, get_random):\n\n histogram = get_word_list(filename, True) #calls histogram file\n output = []\n for word,value in histogram.items(): #sorts words into new histogram that has value, word pairs to sort\n output.append((value,word))\n output.sort()\n output.reverse() #sorting from greatest to least\n final_n_output = []\n\n if get_random == True: #possibly sending getrandom funtion to get random words\n random_word = getrandom(histogram)\n else:\n random_word = None\n\n if to_search_word_or_not == True: #possibly sending getrandom funtion to get random words\n num_of_word = search_for_a_word(histogram, word_to_serach)\n else:\n num_of_word = None\n\n for i in range(n):\n final_n_output.append(output[i]) #making a final output list\n\n print(random_word)\n\n return final_n_output, num_of_word, random_word", "def textrank(sentences, top_n, stopwords=None):\n S = build_similarity_matrix(sentences, stopwords) \n sentence_ranking = page_rank(S)\n \n # Sort the sentence ranks\n ranked_sentence_indexes = [item[0] for item in sorted(enumerate(sentence_ranking), key=lambda item: -item[1])]\n selected_sentences = sorted(ranked_sentence_indexes[:top_n])\n summary = itemgetter(*selected_sentences)(sentences)\n return summary", "def print_top_results(test_sample_idx, preds, labels, vocab, show_max=5):\n idx_sort = preds[test_sample_idx].argsort(descending=True)\n print(f'Top {show_max} results for sample \\'{labels[test_sample_idx]}\\':')\n for rank,i in enumerate(idx_sort[:show_max]):\n print(f' [#{rank+1}] {vocab[i]} ({preds[test_sample_idx][i]:.2f})')", "def most_similar(self, words: [str], top_n=3, metric='cosine') -> [(str, float)]:\n if len(words) == 0:\n return []\n\n vec = self.mean(words)\n if numpy.count_nonzero(vec) == 0:\n return []\n\n return [w for w, sim in self.most_similar_vec(vec=vec, top_n=top_n, exclude_words=words, metric=metric)]", "def top_sentences(query, sentences, idfs, n):\n # identifies the sentences that are the best match for the query.\n top_sens = dict()\n for sentence, tokens in sentences.items():\n # add query rank to the idfs dictionary\n # top_sens is a dictionary of two columns, both initally empty\n query_tokens = len([word for word in tokens if word in query])\n value = query_tokens / (len(tokens))\n for word, idf_score in idfs.items():\n if word in query and word in tokens:\n # 'matching word measure'\n value += idf_score\n top_sens[sentence] = value\n # if a tie, prefer a higher 'query term density' -- /= : divide by and update value\n # defined as the proportion of words in the sentence that are also words in the query. For example, if a sentence has 10 words, 3 of which are in the query, then the sentence’s query term density is 0.3.\n # list of sentences to query ranked according to idf x[1] and if a tie, then density x[2] ; reverse=True: descending order\n # sentence list x[0] of length n ( [:n] )\n top_sens_rank = sorted(top_sens, key=top_sens.get, reverse=True)\n return top_sens_rank[0:n]", "def top_50():\r\n file_read = read_file()\r\n vacabulary_list = []\r\n for key in file_read:\r\n vacabulary_list.extend(file_read[key])\r\n top_50 = Counter(vacabulary_list).most_common(50)\r\n return (top_50)", "def keep_top_words(self, M, Mprint=20):\n freq = self.data.sum(axis=0)\n freq = np.squeeze(np.asarray(freq))\n idx = np.argsort(freq)[::-1]\n idx = idx[:M]\n self.keep_words(idx)\n print('most frequent words')\n for i in range(Mprint):\n print(' {:3d}: {:10s} {:6d} counts'.format(i, self.vocab[i], freq[idx][i]))\n return freq[idx]", "def getTopNWords(self, n=5):\n word_id = []\n for i in range(self.topic_word_matrix.shape[0]):\n word_id.append(self.topic_word_matrix[i].argsort()[:n])\n top_word_df = pd.DataFrame(index=['topic{}'.format(x) for x in range(self.K)],\n columns=['word{}'.format(x) for x in range(n)])\n for i in range(len(word_id)):\n for j in range(n):\n top_word_df.loc['topic{}'.format(i), 'word{}'.format(j)] = self.id2word[word_id[i][j]]\n return top_word_df", "def show_topn(classifier,vectorizer,categories,n):\n feature_names = np.asarray(vectorizer.get_feature_names())\n for i, category in enumerate(categories):\n topn = np.argsort(classifier.coef_[i])[-n:]\n print('{}: {}'.format(category,\", \".join(feature_names[topn])))", "def bow_top_n(corpus, n):\n bag_of_words_model_small = CountVectorizer(max_features=n)\n bag_of_word_df_small = pd.DataFrame(bag_of_words_model_small.fit_transform(corpus).todense())\n bag_of_word_df_small.columns = sorted(bag_of_words_model_small.vocabulary_)\n return bag_of_word_df_small", "def get_top_n_words(word_list, n):\n\t\n\t#Uses Counter function to create tuples of words and number of instances of word\n\twordCount = Counter(word_list)\n\ttopWords = []\n\n\torderedByFrequency = sorted(wordCount, key=wordCount.get, reverse=True)\n\n\t#create list of inputted 'n' top words\n\tfor i in range (0 , n):\n\t\ttopWords.append(orderedByFrequency[i])\n\n\treturn topWords", "def prepare_lexicons(self, topnwords = 80, distance_cutoff = 0.45):\n\n model = self.train_word2vec()\n\n\n # 10 topics\n topic_dict = {0: 'academics'\n , 1: 'career'\n , 2: 'commute'\n , 3: 'diversity'\n , 4: 'community'\n , 5: 'extracurricular'\n , 6: 'facilities'\n , 7: 'finance'\n , 8: 'housing'\n , 9: 'wellness'\n }\n\n # Some important words that should be included under each topic\n topics = [['academic', 'exam', 'study', 'learn', 'education', 'class', 'course', 'grade', 'assignment'\n , 'degree', 'research', 'elective'\n , 'professor', 'project', 'scholarship', 'knowledge']\n , ['career', 'job', 'coop', 'employment']\n , ['commute', 'skytrain', 'transport', 'commuter']\n , ['diversity', 'diverse', 'background']\n , ['community', 'welcome', 'support', 'social', 'friend', 'fun', 'network', 'home']\n , ['extracurricular', 'club', 'sport', 'activity']\n , ['facility', 'infrastructure', 'food', 'building', 'gym']\n , ['finance', 'tuition', 'expensive']\n , ['housing', 'live', 'residence']\n , ['wellness', 'health', 'stress', 'depression', 'anxiety']]\n\n # For each topic, collect the words most similar to them in a list of lists\n topic_lexicons = []\n\n # Loop through the ten topics\n for topic in topics:\n\n temp_words = []\n\n # Loop through each word that we have given manually under each topic\n for word in topic:\n\n # Consider most similar words according to some cutoffs\n similar_words = model.wv.most_similar(positive = word, topn = topnwords)\n temp_words1 = [x for (x,y) in similar_words if y >= distance_cutoff]\n\n temp_words = temp_words + temp_words1\n\n temp_words = temp_words + topic\n\n\n # Take unique words, there might be duplicates\n topic_lexicons.append(list(set(temp_words)))\n\n # Some manual adjustments\n # Remove 'commute' from other topic\n topic_lexicons[8].remove('commute')\n\n return topic_lexicons", "def calculate_most_popular(text, n_populars, steam=False):\n fdist = calculate_fdist(text, steam)\n term = []\n for key, value in fdist.items():\n term.append((key, value))\n term.sort(key=lambda x: int(x[1]), reverse=True)\n return term[:n_populars]", "def test_get_top_n_words_same_frequency(self):\n expected = ['happy', 'man']\n actual = get_top_n_words({'happy': 2, 'man': 2}, 2)\n self.assertEqual(expected, actual)\n expected = ['happy']\n actual = get_top_n_words({'happy': 2, 'man': 2}, 1)\n self.assertEqual(expected, actual)", "def test_most_similar_topn(self):\n self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=5)), 5)\n self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=10)), 10)\n\n predicted = self.vectors.most_similar('dog.n.01', topn=None)\n self.assertEqual(len(predicted), len(self.vectors.vocab) - 1)\n self.assertEqual(predicted[-1][0], 'gallant_fox.n.01')", "def predict_currword(word, top_n=10):\r\n try:\r\n return [\r\n (k, v) for k, v in model.WORDS_MODEL.most_common() if k.startswith(word)\r\n ][:top_n]\r\n except KeyError:\r\n raise Exception(\r\n \"Please load predictive models. Run:\\\r\n \\n\\tautocomplete.load()\"\r\n )", "def explore_topic_nouns(topic_number, topn=25, model=10):\n #\n if model==10:\n lda = LdaMulticore.load(joinp(pilot_path, 'lda_model_10'))\n topicname=topic_names_10[topic_number]\n gensimdic={0:9,1:8,2:6,3:7,4:3,5:10,6:5,7:1,8:2,9:4}\n gensimSTR=str(gensimdic[topic_number])\n \n # \n print(u'{:20} {}'.format(u'term', u'frequency') + u'\\n')\n \n dic={}\n j=0\n \n print('top 5 terms')\n for term, frequency in lda.show_topic(topic_number, topn):\n if dfff[dfff['nouns']==term].empty: ## dfff is loaded from pilot_path/bow_nouns.csv\n pass\n else:\n j=j+1\n if j<6:\n print (u'{:20} {:.3f}'.format(term, round(frequency, 3)))\n dic[term]=frequency\n dff=pd.DataFrame.from_dict(dic,orient='index')\n dff.columns=[''.join(['topic:',topicname,' (gensim topic:',gensimSTR,')'])] \n return(dff)", "def top_n(self, n):\n top = {}\n for code, feat_set in self.iteritems():\n tuples = sorted(feat_set.items(), reverse=True, key=itemgetter(1))\n best = {feat for feat, _ in tuples[:n]}\n top[code] = best\n return top", "def most_words(self, n):\n return big_tags", "def get_topics(model, nlp_model, n_top_words):\n\n words = nlp_model.get_feature_names()\n\n return [convert_to_string([words[i] for i in topic.argsort()[:-n_top_words - 1:-1]]) for topic_idx, topic in enumerate(model.components_)]", "def nmax(num, T, nwords):\n values = []\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(((data['all_words'][n])))\n values.append(round(T[n],3))\n return nwords", "def prepare_words(self, top_words, total_count):\r\n list_to_return = []\r\n percents = 0\r\n for num, word_tuple in enumerate(top_words.iteritems()):\r\n if num == len(top_words) - 1:\r\n percent = 100 - percents\r\n else:\r\n percent = round(100.0 * word_tuple[1] / total_count)\r\n percents += percent\r\n list_to_return.append(\r\n {\r\n 'text': word_tuple[0],\r\n 'size': word_tuple[1],\r\n 'percent': percent\r\n }\r\n )\r\n return list_to_return", "def top_keywords(urls, count=10):\n try:\n res = Counter()\n for url in urls:\n res += Counter(get_keyword_dict(url))\n return [w[0] for w in res.most_common(count)]\n except:\n print('Error finding top keywords')", "def get_top_n_words(word_list, n):\n\tfreqs = get_word_frequencies(word_list)\n\tfreq_words = sorted(freqs, key=freqs.get, reverse=False)\n\treturn freq_words[:n]", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500, hash_function_2)\n\n # This block of code will read a file one word at a time and\n # put the word in `w`\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n current_word = w.lower()\n #get a count for current word\n current_count = ht.get(current_word)\n if current_count is None:\n ht.put(current_word, 1)\n else:\n ht.put(current_word, current_count + 1)\n\n #create an empty list to store top words in\n tuple_list = []\n\n #traverse hash_map to find most used words\n for i in range(ht.capacity):\n if ht._buckets[i] is not None:\n #traverse links at each bucket\n current = ht._buckets[i].head\n while current is not None:\n tuple_list.append((current.key, current.value))\n current = current.next\n\n #create an ordered list out of items\n iter_tuple_quick_sort(tuple_list, len(tuple_list) - 1, 0)\n\n #create a new list to return with passed number arg\n return_list = []\n list_counter = 0\n while list_counter <= number - 1:\n if list_counter == len(tuple_list) - 1:\n break\n else:\n return_list.append(tuple_list[list_counter])\n list_counter += 1\n\n return return_list", "def get_top_tweet_ngrams(corpus, dim=2, n=None):\r\n vec = CountVectorizer(ngram_range=(dim, dim)).fit(corpus)\r\n bag_of_words = vec.transform(corpus)\r\n sum_words = bag_of_words.sum(axis=0) \r\n words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]\r\n words_freq =sorted(words_freq, key = lambda x: x[1], reverse=True)\r\n return words_freq[:n]", "def get_top_n_words(word_list, n):\n d = dict()\n for w in word_list:\n d[w] = d.get(w, 0) + 1\n ordered_by_frequency = sorted(d, key=d.get, reverse=True)\n return ordered_by_frequency[0:n]", "def test_top_n_counts():\n ngrams = NgramFrequencies()\n new_dic = {\n \"a\": 1,\n \"b\": 2,\n \"c\": 3,\n \"d\": 4\n }\n top_list = ngrams.top_n_counts(new_dic)\n assert top_list == [(\"d\", 4), (\"c\", 3), (\"b\", 2), (\"a\", 1)]", "def classify(priors, likelihoods, testData, classes):\r\n results = []\r\n for document in testData:\r\n bestClass = None\r\n bestProb = None\r\n currentProb = 0.0\r\n for cls in classes:\r\n prior = priors[cls]\r\n currentProb = log(prior)\r\n lhoods = likelihoods[cls]\r\n for (word, count) in document:\r\n if word in lhoods:\r\n currentProb += log(lhoods[word])\r\n else:\r\n currentProb += log(lhoods[None])\r\n if currentProb > bestProb or bestClass == None:\r\n bestProb = currentProb\r\n bestClass = cls\r\n results.append(bestClass)\r\n return results", "def most_common_labels(examples: List[Example], top_n: int = 1) -> List:\n top_labels = Counter([example.label for example in examples]).most_common(top_n)\n return [label[0] for label in top_labels]", "def top_sentences(query, sentences, idfs, n):\n sentence_scores = dict()\n\n for sentence, words in sentences.items():\n words_in_query = query.intersection(words)\n \n # idf value of sentence\n idf = 0\n for word in words_in_query:\n idf += idfs[word]\n \n # query term density of sentence\n num_words_in_query = sum(map(lambda x: x in words_in_query, words))\n query_term_density = num_words_in_query / len(words)\n\n # update sentence scores with idf and query term density values\n sentence_scores[sentence] = {'idf': idf, 'qtd': query_term_density}\n \n # rank sentences by idf then query term density\n ranked_sentences = sorted(sentence_scores.items(), key=lambda x: (x[1]['idf'], x[1]['qtd']), reverse=True)\n ranked_sentences = [x[0] for x in ranked_sentences]\n\n return ranked_sentences[:n]", "def _explore(f_name):\n print _top_n_words(10, f_name)", "def main():\n vocab = str.split(file(sys.argv[1]).read())\n testlambda = numpy.loadtxt(sys.argv[2])\n testlambda = topN(testlambda, int(sys.argv[3]))\n words_per_topic = 20\n\n for k in range(0, len(testlambda)):\n lambdak = list(testlambda[k, :])\n lambdak = lambdak / sum(lambdak)\n temp = zip(lambdak, range(0, len(lambdak)))\n temp = sorted(temp, key=lambda x: x[0], reverse=True)\n\n print 'topic %d:' % (k)\n # feel free to change the \"53\" here to whatever fits your screen nicely.\n for i in range(0, words_per_topic):\n print '%s:%.4f' % (vocab[temp[i][1]], temp[i][0])\n print", "def top_files(query, files, idfs, n):\n # calculate term-frequency of each words in query\n tf = dict()\n for query_word in query:\n tf[query_word] = dict()\n for file_name in files:\n tf[query_word][file_name] = files[file_name].count(query_word)\n\n # claculate tf-idfs of each document\n tf_idfs = dict()\n for file_name in files:\n tf_idfs[file_name] = 0\n for query_word in query:\n tf_idfs[file_name] += tf[query_word][file_name] * idfs[query_word]\n \n # create sorted list by tf_idfs\n sorted_tf_idfs = sorted(tf_idfs, key= lambda item: tf_idfs[item], reverse= True)\n\n # return list contains top n file names\n top_files_names = []\n for index in range(n):\n top_files_names.append(sorted_tf_idfs[index]) \n\n return top_files_names", "def get_top_n_words(word_list, n):\n word_counts = dict()\n\n for word in word_list:\n freq = word_counts.get(word, 1)\n word_counts[word] = freq + 1\n\n ordered_by_frequency = sorted(word_counts, key=word_counts.get, reverse=True)\n\n return ordered_by_frequency[0:n]", "def test_get_top_n_words_more_number(self):\n expected = ['man', 'happy']\n actual = get_top_n_words({'happy': 2, 'man': 3}, 10)\n self.assertEqual(expected, actual)", "def get_top_n_words(word_list, n):\n words = []\n\n # Change all words to lowercase\n for word in word_list:\n word = str.lower(word)\n if word not in words:\n words.append(word)\n\n # Calculate frequency of each word\n frequency = []\n for word in words:\n word_count = 0\n for test in word_list:\n if word == test:\n word_count += 1\n frequency.append(word_count)\n\n dic = dict()\n for i, word in enumerate(words):\n dic[frequency[i]] = word\n\n # Sort dictionary to return ranks\n keys = dic.keys()\n keys = sorted(keys)\n words_ranked = []\n for key in keys:\n words_ranked.append(dic.get(key))\n words_ranked = words_ranked[::-1]\n words_ranked = words_ranked[:n]\n return words_ranked", "def get_top_n_words(column, n):\r\n frequencies = Counter()\r\n column.str.lower().str.split().apply(frequencies.update)\r\n return frequencies.most_common(n)", "def most_influential_words(model, vectorizer, genre_index=0, num_words=10):\n features = vectorizer.get_feature_names()\n max_coef = sorted(enumerate(model.coef_[genre_index]), key=lambda x:x[1], reverse=True)\n return [[features[x[0]], x[1] ] for x in max_coef[:num_words]]", "def top_question_words(args, examples, word_dict):\n word_count = Counter()\n for ex in examples:\n for w in ex['question']:\n w = Dictionary.normalize(w)\n if args.uncased_question:\n w = w.lower()\n if w in word_dict:\n word_count.update([w])\n return word_count.most_common(args.tune_partial)", "def print_most_frequent(ngrams, num=10):\n for n in sorted(ngrams):\n print('----- {} most common {}-grams -----'.format(num, n))\n for gram, count in ngrams[n].most_common(num):\n print('{0}: {1}'.format(' '.join(gram), count))\n print('')", "def print_most_frequent(ngrams, num=10):\r\n for n in sorted(ngrams):\r\n print('----- {} most common {}-grams -----'.format(num, n))\r\n for gram, count in ngrams[n].most_common(num):\r\n print('{0}: {1}'.format(' '.join(gram), count))\r\n print('')", "def print_most_frequent(ngrams, num=10):\n for n in sorted(ngrams):\n print('----- {} most common {}-grams -----'.format(num, n))\n for gram, count in ngrams[n].most_common(num):\n print('{0}: {1}'.format(' '.join(gram), count))\n print('')", "def print_most_frequent(ngrams, num=10):\n for n in sorted(ngrams):\n print('----- {} most common {}-grams -----'.format(num, n))\n for gram, count in ngrams[n].most_common(num):\n print('{0}: {1}'.format(' '.join(gram), count))\n print('')", "def get_top10(dataset, contrib_type):\n return dataset.order_by('-{0}'.format(contrib_type))[:10]", "def test_get_top_n_words_ideal(self):\n expected = ['man']\n actual = get_top_n_words({'happy': 2, 'man': 3}, 1)\n self.assertEqual(expected, actual)", "def parse_topics(self, n=10):\n assert(self.is_trained)\n raw_topics = self._lda_model.print_topics(self._lda_model.num_topics)\n topics = map(lambda x: x.split(' + '), raw_topics)\n top_words = [\n map(\n lambda x: x.split('*')[1], \n topic[:n]\n ) \n for topic in topics]\n self.topics = top_words\n self.has_topics = True\n return top_words", "def extract_frequent_words(df:pd.DataFrame):\n x = (pd.pivot_table(df.drop(['text', 'percent_correct'], axis=1),\n index='success_lvl',\n aggfunc=['sum', 'mean']) # Count shows ~50/50 split\n .transpose()\n .loc[:, ['high', 'low']]\n .unstack(level=0))\n\n # Rank the most frequent phrases\n x['high_rank'] = x[('high', 'sum')].rank(method='dense', ascending=False)\n x['low_rank'] = x[('low', 'sum')].rank(method='dense', ascending=False)\n print(x[x.high_rank <= 10.].sort_values('high_rank'))\n print(x[x.low_rank <= 10.].sort_values('low_rank'))", "def get_top_predictions(preds, top=5):\n results = []\n for pred in preds:\n top_indices = pred.argsort()[-top:][::-1]\n # result = [tuple(CLASS_INDEX[str(i)]) + (pred[i],) for i in top_indices]\n # result.sort(key=lambda x: x[2], reverse=True)\n # results.append(result)\n return top_indices", "def get_top_n_motif_scores(score_list,top_n):\r\n\treturn score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]", "def most_wordy(data_sent):\n #initialize lists\n sylls = []\n words = []\n sents = []\n fkgs = []\n\n #looping through sentences to find lengthy sentences\n for sent in data_sent:\n token = word_tokenize(sent)\n word = len(token)\n if word > 40:\n\n #appending to lists\n syll = textstat.syllable_count(sent)\n sylls.append(syll)\n words.append(word)\n sents.append(sent)\n fkgs.append(fkg(int(word), 1, int(syll)))\n\n #transfer information to dataframe\n df_wordy = pd.DataFrame({'Words' : words,\n 'Syllables' : sylls,\n 'Flesch Kincaid Grade Level': fkgs,\n 'Sentence' : sents}, columns = [\"Words\", \"Syllables\", \"Flesch Kincaid Grade Level\", \"Sentence\"])\n df_wordy.sort_values(\"Words\", ascending = False, inplace = True)\n return df_wordy", "def top10(self, top10: List[Word]):\n\n self._top10 = top10", "def filter_top_n_words(topic_words_dict, n, word_list):\n # First remove any redundant words in word_list\n words = set(word_list)\n # Now get the intersection with words, that appear as keys in the dict\n topic_words_intersect = set(topic_words_dict.keys()).intersection(words)\n # Now get the words with their scores, sort descending for the scores\n # and return the first n words:\n score_wordlist = [(x, topic_words_dict[x]) for x in topic_words_intersect]\n score_wordlist.sort(key=lambda x: x[1], reverse=True)\n return [word for (word,score) in score_wordlist[:n]]", "def top_terms_tuples(self, num):\n \n sorted_tokens = sorted(\n self.centroid_vector.items(),\n key=itemgetter(1), # (1) is value\n reverse=True)\n\n # count to index\n top_terms = []\n \n for i in xrange(0, min(num, len(sorted_tokens))):\n top_terms.append(sorted_tokens[i])\n\n return top_terms", "def return_top_tfidf_words_array(array, encoder):\n load = open('resources/' + encoder, 'rb')\n encoder = pickle.load(load)\n load.close()\n tfidf_dict = {}\n encoder = dict([[v, k] for k,v in encoder.items()])\n for idx, a in enumerate(array):\n tfidf_dict[encoder[idx]] = a\n sorted_dict = sorted(tfidf_dict.items(), key=operator.itemgetter(1))\n return sorted_dict[-30:]", "def build_thesaurus(home_dir, dir, percent):\n word_count = {}\n top_50 = {}\n word_count = word_count_dict(home_dir, dir, percent)\n file = open(home_dir + dir + 'Thesaurus.txt','w')\n file2 = open(home_dir + dir + 'Top50.txt','w')\n #Sort words based on the frequency of the word\n count = 0\n for word in sorted(word_count, key = word_count.get, reverse = True):\n file.write(word + ' ' + str(word_count[word]) + '\\n')\n if count < len(word_count) / 2:\n file2.write(word + ' ' + str(word_count[word]) + '\\n')\n top_50[word] = word_count[word]\n else:\n break\n count = count + 1\n file.close()\n file2.close()\n return word_count, top_50", "def most_common(self, number=10):\n\n words_full_list = []\n\n for string in self.__corpora:\n words_full_list += string.split()\n\n print(Counter(words_full_list).most_common(number))", "def get_words(df, size = 1000):\n top_words = df.sort_values(by = 'Median_Frequency', ascending = False).head(size)\n # w1 is low tau (uniform words), w2 is high tau words\n w1 = top_words.sort_values(by = 'Tau', ascending = True).head(int(.2 * size)).word.values \n w2 = top_words.sort_values(by = 'Tau', ascending = False).head(int(.2 * size)).word.values\n return w1, w2", "def reduce_sort_counts(self, type, word_counts):\n aux = 0\n for count, word in sorted(word_counts, reverse=True):\n if aux < 50: # Controls that we get only the 50 most common keywords\n aux = aux+1\n yield type, (int(count), word)", "def extract_topn_from_vector(feature_names, sorted_items, topn=10):\n \n #use only topn items from vector\n sorted_items = sorted_items[:topn]\n \n score_vals = []\n feature_vals = []\n \n # word index and corresponding tf-idf score\n for idx, score in sorted_items:\n \n #keep track of feature name and its corresponding score\n score_vals.append(round(score, 3))\n feature_vals.append(feature_names[idx])\n \n #create a tuples of feature,score\n results= {}\n for idx in range(len(feature_vals)):\n results[feature_vals[idx]]=score_vals[idx]\n \n return results", "def _find_top_idf_words(company_names):\n feature_as_list = remove_special_chars(company_names)\n feature_as_list = [x.lower().strip() for x in feature_as_list]\n feature_as_list = set(feature_as_list)\n features = get_top_idf_features(feature_as_list, 100, 1)\n print(features)\n return features", "def run(self, words: List[str]) -> List[Tuple[str, float]]:\n files_found_by_word = self._count_words_in_file(words)\n found_files_with_percentage = self._calculate_ranking(files_found_by_word, words)\n sorted_results = self._calculate_top(found_files_with_percentage)\n return sorted_results" ]
[ "0.7205802", "0.70088744", "0.6974292", "0.6939049", "0.68271315", "0.67730814", "0.6618862", "0.66091466", "0.64232916", "0.6413391", "0.639833", "0.63636", "0.6353295", "0.63519716", "0.6349507", "0.63440084", "0.6335887", "0.63027114", "0.6243625", "0.623767", "0.6233897", "0.62330055", "0.622652", "0.6224103", "0.62226653", "0.6205877", "0.6198221", "0.6185104", "0.61655253", "0.61471224", "0.61464125", "0.6135342", "0.61336887", "0.61321145", "0.61213773", "0.61213773", "0.6115548", "0.61134696", "0.6107726", "0.610618", "0.61030686", "0.60942614", "0.60780346", "0.60571057", "0.60478485", "0.604102", "0.6030517", "0.6029183", "0.6024256", "0.6020753", "0.60158384", "0.60093874", "0.6008203", "0.5997336", "0.59972644", "0.5994071", "0.5988568", "0.5979129", "0.5964036", "0.5958958", "0.5955722", "0.595556", "0.5948736", "0.5942756", "0.59397936", "0.59360796", "0.5924148", "0.5921216", "0.5912066", "0.5902139", "0.58952135", "0.5878664", "0.58733636", "0.5860886", "0.5860867", "0.58586067", "0.5856571", "0.58521014", "0.583662", "0.5833106", "0.5822507", "0.5822507", "0.58088255", "0.580141", "0.57993704", "0.5795987", "0.57847923", "0.5778965", "0.5774537", "0.57721156", "0.57672113", "0.57525474", "0.5743405", "0.57425654", "0.5741181", "0.57327276", "0.57303923", "0.5722862", "0.57225406", "0.57130545" ]
0.7892108
0
Gather the top 10 words by highest (descending) odds ratios
def top10_odds_ratio(likelihoods, vocab, classes): results = [] for word in vocab: highestOddsRatio = None for c1 in classes: for c2 in classes: # Skip self TODO: Is this right? # if c1 == c2: # continue oddsRatio = odds_ratio(likelihoods, c1, c2, word) if oddsRatio > highestOddsRatio or highestOddsRatio == None: highestOddsRatio = oddsRatio results.append((word, highestOddsRatio)) # Sort and return top 10 return map(lambda x: x[0], sorted(results, key=lambda x: x[1], reverse=True))[:10]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]", "def get_top_words(self, label, n):\n score_list = []\n if('sod' in label):\n for term in self.vocab:\n score = self.cond_prob_sod[term] / self.cond_prob_pop[term]\n score_list.append((score,term)) \n else:\n for term in self.vocab:\n score = self.cond_prob_pop[term] / self.cond_prob_sod[term]\n score_list.append((score,term))\n score_list = sorted(score_list, key=lambda x:x[0],reverse=True)[:n]\n return score_list \n pass", "def printTopWords(self, N):\n topWords = []\n for i in range(self.MAX_RATING):\n topWords.append(dict(sorted(self.dictionary.items(), key=lambda x: x[1].tfidf[i+1], reverse=True)[:N]))\n\n outputFormat = \"{:>16} - {:<30}\"\n for i in range(len(topWords)):\n print(\"Top \" + str(N) + \" words for class rating \" + str(i + 1))\n print(\"--------------------------------------\")\n for j in topWords[i]:\n print(outputFormat.format(j, self.dictionary[j].tfidf[i + 1]))\n print()", "def calculate_most_popular(text, n_populars, steam=False):\n fdist = calculate_fdist(text, steam)\n term = []\n for key, value in fdist.items():\n term.append((key, value))\n term.sort(key=lambda x: int(x[1]), reverse=True)\n return term[:n_populars]", "def test_get_top_n_words_same_frequency(self):\n expected = ['happy', 'man']\n actual = get_top_n_words({'happy': 2, 'man': 2}, 2)\n self.assertEqual(expected, actual)\n expected = ['happy']\n actual = get_top_n_words({'happy': 2, 'man': 2}, 1)\n self.assertEqual(expected, actual)", "def get_top_words(input_string):\n # count the words\n top_words = Counter(input_string)\n # order the words in descending order\n top_words_ordered = sorted(top_words.items(), key=operator.itemgetter(1), reverse=True)\n # keep the top twenty elements\n top_twenty = top_words_ordered[0:20]\n print(top_twenty)\n return top_twenty", "def top10(self) -> List[Word]:\n return self._top10", "def get_top_n_words(filename, n, to_search_word_or_not, word_to_serach, get_random):\n\n histogram = get_word_list(filename, True) #calls histogram file\n output = []\n for word,value in histogram.items(): #sorts words into new histogram that has value, word pairs to sort\n output.append((value,word))\n output.sort()\n output.reverse() #sorting from greatest to least\n final_n_output = []\n\n if get_random == True: #possibly sending getrandom funtion to get random words\n random_word = getrandom(histogram)\n else:\n random_word = None\n\n if to_search_word_or_not == True: #possibly sending getrandom funtion to get random words\n num_of_word = search_for_a_word(histogram, word_to_serach)\n else:\n num_of_word = None\n\n for i in range(n):\n final_n_output.append(output[i]) #making a final output list\n\n print(random_word)\n\n return final_n_output, num_of_word, random_word", "def top_words(name):\n row = wiki[wiki['name'] == name]\n word_count_table = row[['word_count']].stack('word_count', new_column_name=['word','count'])\n return word_count_table.sort('count', ascending = False)", "def format_top_n(self, n=10):\n output = []\n for t, c in self._freq.most_common(n):\n files_, sents_ = self.fetch_index(t)\n word = t + ' (' + str(c) + ')'\n output.append([word, ','.join(files_), \"\\n\".join(sents_)])\n\n return output", "def _calculate_top(self,\n words_percentage_hit: List[Tuple[str, float]]) -> List[Tuple[str, float]]:\n return sorted(words_percentage_hit, key=(lambda tup: tup[1]))[:self._top_values]", "def print_top_words(components, feature_names, n_top_words: int = 10):\n for topic_idx, topic in enumerate(components):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join(\n [feature_names[i] for i in topic.argsort()[: -n_top_words - 1 : -1]]\n )\n print(message)\n print()", "def get_top_n_words(topic_words_dict, n):\n score_wordlist = topic_words_dict.items()\n score_wordlist.sort(key=lambda x: x[1], reverse=True)\n return [word for (word,score) in score_wordlist[:n]]", "def test_get_top_n_words_incorrect_numbers(self):\n expected = []\n actual = get_top_n_words({}, -1)\n self.assertEqual(expected, actual)\n actual = get_top_n_words({'happy': 2}, 0)\n self.assertEqual(expected, actual)", "def test_get_top_n_words_more_number(self):\n expected = ['man', 'happy']\n actual = get_top_n_words({'happy': 2, 'man': 3}, 10)\n self.assertEqual(expected, actual)", "def get_top_n_words(word_list, n):\n\tfreq_dict = make_freq_dict (word_list) # get a dictionary\n\tordered_by_frequency = sorted(freq_dict, key=freq_dict.get, reverse=True) # sort\n\tprint ordered_by_frequency[0:n] # print\n\treturn ordered_by_frequency[0:n]", "def get_top_n_words(topic_dict, n=5):\n top_words = []\n for num, data in topic_dict.items():\n sorted_words = {k: v for k, v in sorted(data['words'].items(),\n key=lambda x: x[1],\n reverse=True\n )}\n words = sorted_words.keys()\n top_n_words = list(words)[:n]\n top_words.append(', '.join(top_n_words))\n return top_words", "def _top_n_words(n, f_name):\n word_dict, idx_dict, word_cnt = _extract_words(f_name)\n print (\"number of words: %d\" % len(word_cnt))\n n = min(len(word_cnt), n)\n np_cnt = np.array(word_cnt)\n idx = np.argpartition(np_cnt, -n)[-n:]\n res = []\n for i in idx:\n res.append((idx_dict[i], np_cnt[i]))\n res.sort(key=lambda t: t[1], reverse=True)\n return res", "def top_sentences(query, sentences, idf, n):\n ll=[]\n for s in sentences:\n st=sentences[s]\n st=[word.lower() for word in st]\n found_word=0\n total_idf=0\n\n for word in query:\n if word in st:\n total_idf+=idf[word]\n found_word+=1 \n ll.append((total_idf,found_word/len(st),s))\n ll.sort(reverse=True)\n #print(ll)\n ans=[]\n for i in range(n):\n ans.append(ll[i][2])\n #print(\"answer is : \",*ans)\n return ans", "def get_top_n_words(word_list, n):\n words = []\n\n # Change all words to lowercase\n for word in word_list:\n word = str.lower(word)\n if word not in words:\n words.append(word)\n\n # Calculate frequency of each word\n frequency = []\n for word in words:\n word_count = 0\n for test in word_list:\n if word == test:\n word_count += 1\n frequency.append(word_count)\n\n dic = dict()\n for i, word in enumerate(words):\n dic[frequency[i]] = word\n\n # Sort dictionary to return ranks\n keys = dic.keys()\n keys = sorted(keys)\n words_ranked = []\n for key in keys:\n words_ranked.append(dic.get(key))\n words_ranked = words_ranked[::-1]\n words_ranked = words_ranked[:n]\n return words_ranked", "def wcount(lines, topn=10):\n words=lines.lower()\n words=words.replace('.', '')\n words=words.replace(',', ' ')\n words=words.replace('!', ' ')\n words=words.replace('?', ' ')\n words=words.replace(':', ' ')\n words=words.replace('_', ' ')\n words=words.replace('\"', ' ')\n words=words.replace(\"'\", ' ')\n words=words.replace('(', ' ')\n words=words.replace(')', ' ')\n words=words.replace('[', ' ')\n words=words.replace(']', ' ')\n words=words.replace('-', ' ')\n words=words.replace(';', ' ')\n words=words.replace('\"', ' ')\n words=words.replace('*', ' ')\n lst=words.split(' ')\n lst2=list(set(lst))\n lst2.remove('')\n dic={}\n for i in lst2:\n dic[i]=lst.count(i)\n wds=list(dic.keys())\n numbers=list(dic.values())\n numbers2=sorted(numbers, reverse=True)\n for k in range(topn):\n m=numbers.index(numbers2[k])\n print(\"%-15s%-5d\"%(wds[m],numbers2[k]))", "def test_get_top_n_words_ideal(self):\n expected = ['man']\n actual = get_top_n_words({'happy': 2, 'man': 3}, 1)\n self.assertEqual(expected, actual)", "def top_sentences(query, sentences, idfs, n):\n rank = []\n\n for sentence in sentences:\n sentence_values = [sentence, 0, 0]\n\n for word in query:\n if word in sentences[sentence]:\n # Compute matching word measure. Sum of IDF values.\n sentence_values[1] += idfs[word]\n # Compute query term density. Proportion of words in a sentence that are in the query.\n sentence_values[2] += sentences[sentence].count(\n word) / len(sentences[sentence])\n\n rank.append(sentence_values)\n\n rank = sorted(rank, key=lambda x: (x[1], x[2]), reverse=True)[:n]\n \n return [sentence for sentence, mwm, qtd in rank]", "def get_top_n_words(word_list, n):\n\t\n\t#Uses Counter function to create tuples of words and number of instances of word\n\twordCount = Counter(word_list)\n\ttopWords = []\n\n\torderedByFrequency = sorted(wordCount, key=wordCount.get, reverse=True)\n\n\t#create list of inputted 'n' top words\n\tfor i in range (0 , n):\n\t\ttopWords.append(orderedByFrequency[i])\n\n\treturn topWords", "def topn_similarity(word_vecs, word, n):\n vec = word_vecs[word]\n sim = dict()\n for w in word_vecs:\n if w != '<TOP>' and w != '<BOT>':\n # sim[w] = np.dot(vec, np.transpose(word_vecs[w]))\n sim[w] = 1 - spatial.distance.cosine(vec, word_vecs[w])\n # sim[w] = np.dot(vec, np.transpose(word_vecs[w]))/(mod(vec)*mod(np.transpose(word_vecs[w])))\n dd = OrderedDict(sorted(sim.items(), key=lambda x: x[1], reverse=True))\n return list(dd.items())[1:n+1]", "def prepare_words(self, top_words, total_count):\r\n list_to_return = []\r\n percents = 0\r\n for num, word_tuple in enumerate(top_words.iteritems()):\r\n if num == len(top_words) - 1:\r\n percent = 100 - percents\r\n else:\r\n percent = round(100.0 * word_tuple[1] / total_count)\r\n percents += percent\r\n list_to_return.append(\r\n {\r\n 'text': word_tuple[0],\r\n 'size': word_tuple[1],\r\n 'percent': percent\r\n }\r\n )\r\n return list_to_return", "def top_k_frequent(top_k, words, list_of_texts):\n dict_top_freq = {}\n for word in words:\n dict_top_freq[word.lower()] = 0\n for string in list_of_texts:\n if word.lower() in string.lower():\n counter = string.lower().count(word.lower())\n dict_top_freq[word.lower()] += counter\n\n list_top_sorted = sorted(dict_top_freq.items(), key=lambda item: item[1], reverse=True)\n print(list_top_sorted)\n\n list_k = []\n for i in list_top_sorted:\n list_k.append(i[0])\n\n return list_k[:top_k]", "def test_top_n_grams():\n ngrams = NgramFrequencies()\n unigrams_dic = {\n \"COUNT\": 10,\n \"time_burton's\": 5,\n \"burton's_corpse\": 4,\n \"corpse_bride\": 1\n }\n top_n_unigrams = ngrams.top_n_grams(unigrams_dic, 2)\n assert top_n_unigrams == [\n (\"time_burton's\", 0.5),\n (\"burton's_corpse\", 0.4)\n ]", "def get_10_most_frequent_words(tokens):\n\n return FreqDist(word.lower() for word in tokens).most_common(10)", "def top_sentences(query, sentences, idfs, n):\n\n # claculate idfs of each sentence\n sent_score = dict()\n for sentence in sentences:\n sent_score[sentence] = 0\n for query_word in query:\n if query_word in sentences[sentence]:\n sent_score[sentence] += idfs[query_word]\n\n # create sorted list of sentences\n sorted_sentences = sorted(sent_score, key= lambda item: sent_score[item], reverse= True)\n\n # re-order sentences with the same rank of idfs according to query term density\n loop_sentences = sorted_sentences.copy()\n for sentence1 in loop_sentences:\n for sentence2 in loop_sentences:\n if sentence1 != sentence2:\n if sent_score[sentence1] == sent_score[sentence2]:\n qtd1 = query_term_density(sentence1, query, sentences)\n qtd2 = query_term_density(sentence2, query, sentences)\n index1 = sorted_sentences.index(sentence1)\n index2 = sorted_sentences.index(sentence2)\n if qtd1 > qtd2:\n if index1 > index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n elif qtd1 < qtd2:\n if index1 < index2:\n sorted_sentences[index2], sorted_sentences[index1] = sorted_sentences[index1], sorted_sentences[index2]\n\n # get list contains top n sentences\n top_sentences = []\n for index in range(n):\n top_sentences.append(sorted_sentences[index]) \n\n return top_sentences", "def extract_frequent_words(df:pd.DataFrame):\n x = (pd.pivot_table(df.drop(['text', 'percent_correct'], axis=1),\n index='success_lvl',\n aggfunc=['sum', 'mean']) # Count shows ~50/50 split\n .transpose()\n .loc[:, ['high', 'low']]\n .unstack(level=0))\n\n # Rank the most frequent phrases\n x['high_rank'] = x[('high', 'sum')].rank(method='dense', ascending=False)\n x['low_rank'] = x[('low', 'sum')].rank(method='dense', ascending=False)\n print(x[x.high_rank <= 10.].sort_values('high_rank'))\n print(x[x.low_rank <= 10.].sort_values('low_rank'))", "def top_sentences(query, sentences, idfs, n):\n # identifies the sentences that are the best match for the query.\n top_sens = dict()\n for sentence, tokens in sentences.items():\n # add query rank to the idfs dictionary\n # top_sens is a dictionary of two columns, both initally empty\n query_tokens = len([word for word in tokens if word in query])\n value = query_tokens / (len(tokens))\n for word, idf_score in idfs.items():\n if word in query and word in tokens:\n # 'matching word measure'\n value += idf_score\n top_sens[sentence] = value\n # if a tie, prefer a higher 'query term density' -- /= : divide by and update value\n # defined as the proportion of words in the sentence that are also words in the query. For example, if a sentence has 10 words, 3 of which are in the query, then the sentence’s query term density is 0.3.\n # list of sentences to query ranked according to idf x[1] and if a tie, then density x[2] ; reverse=True: descending order\n # sentence list x[0] of length n ( [:n] )\n top_sens_rank = sorted(top_sens, key=top_sens.get, reverse=True)\n return top_sens_rank[0:n]", "def top_sentences(query, sentences, idfs, n):\n scored_sentences = {}\n for word in query:\n # print(f\"Searching for {word}\")\n for k, v in sentences.items():\n\n # Ignore headings\n if k.strip(\"=\") != k:\n continue\n\n if word.lower() in v:\n \n try:\n check = scored_sentences[k]\n except:\n scored_sentences[k] = 0\n\n scored_sentences[k] += idfs[word]\n\n # print(scored_sentences)\n # exit()\n\n # print(f\"Scored Sentences:\\n\\t{scored_sentences}\")\n final_result = []\n while len(final_result) < n:\n top = \"\"\n g = 0.0\n s = False\n\n for k, v in scored_sentences.items():\n\n if float(v) >= float(g):\n\n # Query term density calculation\n if float(v) == float(g):\n\n old_s_set = set(top.split(\" \"))\n new_s_set = set(k.split(\" \"))\n q_set = set(query)\n\n # similarities between words in question and our query words\n inter_new = float(len(new_s_set & q_set) / len(k))\n inter_old = float(len(old_s_set & q_set) / len(top))\n\n if inter_new < inter_old:\n continue\n\n g = v\n top = k\n\n if top:\n final_result.append(top)\n del scored_sentences[top]\n else:\n final_result.append(\"Not enough context for additional results.\")\n return final_result\n \n return final_result", "def get_top_n_words(word_list, n):\n d = dict()\n for w in word_list:\n d[w] = d.get(w, 0) + 1\n ordered_by_frequency = sorted(d, key=d.get, reverse=True)\n return ordered_by_frequency[0:n]", "def get_top_n_words(column, n):\r\n frequencies = Counter()\r\n column.str.lower().str.split().apply(frequencies.update)\r\n return frequencies.most_common(n)", "def topCommonwords(self,value=5):\n out=self.df.withColumn('word', explode(split(col('name'), ' '))) \\\n .withColumn('norm_word',trim(regexp_replace('word','[^a-zA-Z0-9 ]', ''))) \\\n .filter(col('norm_word') !='')\\\n .groupBy('norm_word')\\\n .count()\\\n .sort('count', ascending=False)\\\n .select('norm_word').limit(value)\n out.withColumnRenamed('norm_word','Top english name in pubname').write \\\n .mode(\"overwrite\").csv('{}pubname/'.format(self.target))\n\n return out.rdd.map(lambda l:l.norm_word).collect()", "def get_top_n_words(word_list, n):\n word_counts = dict()\n\n for word in word_list:\n freq = word_counts.get(word, 1)\n word_counts[word] = freq + 1\n\n ordered_by_frequency = sorted(word_counts, key=word_counts.get, reverse=True)\n\n return ordered_by_frequency[0:n]", "def most_similar(self, article: str, topn: int = 5):\n return [article[0] for article in self._model.similar_by_word(article, topn)]", "def top_5_similar_2(list_string, my_nlp=nlp1, model_type=my_model, doc_topic=my_doc_topic):\n vec = my_nlp.transform(list_string)\n vtrans = model_type.transform(vec)\n array_5 = pairwise_distances(vtrans, doc_topic, metric='cosine').argsort()[0][0:5]\n # result_df = df_reviews[['game_link']].iloc[array_5]\n return df_reviews[['game']].iloc[array_5]\n # return(\"test\")\n return result_df", "def get_top_n_words(word_list, n):\n\tword_counts = dict()\n\tfor word in word_list:\n\t\tword_counts[word] = 1 + word_counts.get(word,0)\n\n\twords_list = word_counts\n\tsorted_list = sorted(words_list.items(), key = lambda x: x[1])\n\tfinal_list = []\n\n\ti = -1\n\twhile i > ((-1 * n) - 1):\n\t\tfinal_list.append(sorted_list[i])\n\t\ti -= 1\n\n\tlist_without_numbers = [x[0] for x in final_list]\n\n\treturn list_without_numbers", "def get_words(df, size = 1000):\n top_words = df.sort_values(by = 'Median_Frequency', ascending = False).head(size)\n # w1 is low tau (uniform words), w2 is high tau words\n w1 = top_words.sort_values(by = 'Tau', ascending = True).head(int(.2 * size)).word.values \n w2 = top_words.sort_values(by = 'Tau', ascending = False).head(int(.2 * size)).word.values\n return w1, w2", "def most_similar(self, words: [str], top_n=3, metric='cosine') -> [(str, float)]:\n if len(words) == 0:\n return []\n\n vec = self.mean(words)\n if numpy.count_nonzero(vec) == 0:\n return []\n\n return [w for w, sim in self.most_similar_vec(vec=vec, top_n=top_n, exclude_words=words, metric=metric)]", "def wcount(lines, topn=10):\n '''a=[]\n for line in lines:\n word = line.strip()\n a.append(word)\n def histogram(s):\n d = dict()\n for i in s:\n if i in d:\n d[i]+=1\n else:\n d[i]=1\n return d'''\n def process_line(lines,diction):\n lines = lines.replace('-',' ')\n for word in lines.split():\n word=word.strip(string.punctuation+string.whitespace)\n word.lower()\n diction[word]=diction.get(word,0)+1\n\n def process_file(lines):\n diction = {}\n process_line(lines,diction)\n return diction\n diction=process_file(lines)\n x=list(diction.values())\n x.sort()\n x.reverse()\n count = 0\n for i in range(topn):\n for key in list(diction.keys()):\n if diction[key]==x[i] and count<topn:\n print(\"%s %d\"%(key,diction[key]))\n count +=1\n del diction[key]\n pass", "def sort_words(boxes):\n mean_height = sum([y2 - y1 for _, y1, _, y2 in boxes]) / len(boxes)\n boxes.view('i8,i8,i8,i8').sort(order=['f1'], axis=0)\n current_line = boxes[0][1]\n lines = []\n tmp_line = []\n for box in boxes:\n if box[1] > current_line + mean_height:\n lines.append(tmp_line)\n tmp_line = [box]\n current_line = box[1]\n continue\n tmp_line.append(box)\n lines.append(tmp_line)\n\n for line in lines:\n line.sort(key=lambda box: box[0])\n\n return lines", "def print_most_common(self, hist, n=10):\n t = []\n for word,freq in hist.items():\n t.append((freq, word))\n t.sort(reverse=True)\n\n for word,freq in t[:n]:\n print(word, '\\t', freq)", "def print_most_common(hist, num=100):\n word_list_ordered = most_common(hist)\n top_list = word_list_ordered[0:num]\n for pair in top_list:\n print(pair[1], \":\", pair[0])", "def get_top_n_words(word_list, n):\n\tfreqs = get_word_frequencies(word_list)\n\tfreq_words = sorted(freqs, key=freqs.get, reverse=False)\n\treturn freq_words[:n]", "def print_most_common(hist, num=10):\n t = most_common(hist)\n print 'The most common words are:'\n for freq, word in t[:num]:\n print word, '\\t', freq", "def textrank(sentences, top_n, stopwords=None):\n S = build_similarity_matrix(sentences, stopwords) \n sentence_ranking = page_rank(S)\n \n # Sort the sentence ranks\n ranked_sentence_indexes = [item[0] for item in sorted(enumerate(sentence_ranking), key=lambda item: -item[1])]\n selected_sentences = sorted(ranked_sentence_indexes[:top_n])\n summary = itemgetter(*selected_sentences)(sentences)\n return summary", "def top_matches(prefs, person, n=5, similarity=sim_pearson):\n scores = [(similarity(prefs, person, other), other)\n for other in prefs if other != person]\n\n scores.sort()\n scores.reverse()\n return scores[0:n]", "def topMatches(prefs, person, n=5, similarity=sim_pearson):\n all_matches = [(similarity(prefs, person, other), other) \n for other in prefs.keys()\n if person != other]\n all_matches.sort()\n all_matches.reverse()\n return all_matches[0:n]", "def report_distribution(count):\n # create a list containing tuples of count and word,\n # while summing the total number of word occurrences\n num = 0\n tup_list = []\n\n for key, value in count.items():\n num += int(value)\n tup_list.append((value, key))\n # make me use string formatting smh im gonna use lambas i don't care what we have learned\n #tup_list.sort(key = lambda t: t[0], reverse = True)\n tup_list.sort(reverse = True)\n\n s_list = []\n s_list.append(\"{:>5}\".format(num))\n max = 20\n for tup in tup_list:\n if max == 0:\n break\n else:\n max -= 1\n s_list.append(\"{:>5}\".format(tup[0]) + \" \" + tup[1])\n\n format_string = \"count word\\n\"\n for i in s_list:\n format_string = format_string + i + \"\\n\"\n\n # remove last new line im too lazy to do it right in the for-loop\n #format_string = format_string[:-1]\n # add lines with the title and total word count to the output string\n \n # sort the list from largest number to smallest,\n # add a line to the output for each word in the top 20 containing count and word\n \n # return the string containing the report\n return format_string", "def print_most_common(hist, num=10):\n t = most_common(hist, excluding_stopwords=True)\n for freq, word in t[0:num]:\n print(f\"{word:<10}\", \"*\" * int((freq)))", "def top_words_bar_chart(df, n=10):\n messages = df['message'].values\n word_counts = {}\n for message in messages:\n tokens = tokenize(message)\n for token in tokens:\n if token in word_counts:\n word_counts[token] += 1\n else:\n word_counts[token] = 1\n\n items = sorted(word_counts.items(), key=lambda x: x[1], reverse=True)\n items = items[0:n]\n words = list(map(lambda x: x[0], items))\n counts = list(map(lambda x: x[1], items))\n return {\n 'data': [\n Bar(\n x=words,\n y=counts\n )\n ],\n\n 'layout': {\n 'title': 'Most common word stems (outside stopwords)',\n 'yaxis': {\n 'title': \"Count\",\n },\n 'xaxis': {\n 'title': \"Word\"\n }\n }\n }", "def top_n_satisfy2(content, n):\n #print(n)\n sum_satisfy = 0.0\n query_num = 0.0\n for qid in content:\n label_sort = []\n score = []\n all_info = content[qid]\n num_label1 = 0\n for info in all_info:\n if info[0] > 0:\n num_label1 += 1\n label_sort.append([info[0], info[1]])\n label_sort.sort(key=take_second, reverse=True)\n satisfy = 0.0\n count = 0\n size = len(label_sort)\n for i in range(min(n, size)):\n cur_label = label_sort[i][0]\n if cur_label > 0:\n satisfy += 1\n cur_satisfy = satisfy / min(n, num_label1)\n sum_satisfy += cur_satisfy\n query_num += 1\n return sum_satisfy / query_num", "def getTopTen():\n\n if moviesRanked > 10:\n return moviesRanked[0:10]\n else: \n return moviesRanked", "def filter_top_n_words(topic_words_dict, n, word_list):\n # First remove any redundant words in word_list\n words = set(word_list)\n # Now get the intersection with words, that appear as keys in the dict\n topic_words_intersect = set(topic_words_dict.keys()).intersection(words)\n # Now get the words with their scores, sort descending for the scores\n # and return the first n words:\n score_wordlist = [(x, topic_words_dict[x]) for x in topic_words_intersect]\n score_wordlist.sort(key=lambda x: x[1], reverse=True)\n return [word for (word,score) in score_wordlist[:n]]", "def top_50():\r\n file_read = read_file()\r\n vacabulary_list = []\r\n for key in file_read:\r\n vacabulary_list.extend(file_read[key])\r\n top_50 = Counter(vacabulary_list).most_common(50)\r\n return (top_50)", "def reduce_sort_counts(self, type, word_counts):\n aux = 0\n for count, word in sorted(word_counts, reverse=True):\n if aux < 15: # Controls that we get only the 15 most common keywords\n aux = aux+1\n yield type, (int(count), word)", "def get_top_words(tfidf_dict: dict, n_words=10):\n header = ['year', 'term', 'tf-idf']\n dfs = []\n for each_year, tfidf_scores in tfidf_dict.items():\n df_list = []\n for term_score in tfidf_scores:\n df_list.append([each_year, term_score[0], float(term_score[1])])\n yr_df = pd.DataFrame(df_list, columns=header)\n yr_df = yr_df.sort_values(by=['tf-idf'], ascending=False)\n if n_words < len(tfidf_scores):\n yr_df = yr_df.iloc[:n_words].reset_index(drop=True)\n dfs.append(yr_df)\n else:\n raise ValueError('input of n_words is more than the words in data!')\n\n df_out = pd.concat(dfs)\n\n return df_out", "def top_sentences(query, sentences, idfs, n):\n sentence_scores = dict()\n\n for sentence, words in sentences.items():\n words_in_query = query.intersection(words)\n \n # idf value of sentence\n idf = 0\n for word in words_in_query:\n idf += idfs[word]\n \n # query term density of sentence\n num_words_in_query = sum(map(lambda x: x in words_in_query, words))\n query_term_density = num_words_in_query / len(words)\n\n # update sentence scores with idf and query term density values\n sentence_scores[sentence] = {'idf': idf, 'qtd': query_term_density}\n \n # rank sentences by idf then query term density\n ranked_sentences = sorted(sentence_scores.items(), key=lambda x: (x[1]['idf'], x[1]['qtd']), reverse=True)\n ranked_sentences = [x[0] for x in ranked_sentences]\n\n return ranked_sentences[:n]", "def top_words(source, number):\n\n keys = set()\n\n ht = HashMap(2500, hash_function_2)\n\n # This block of code will read a file one word at a time and\n # put the word in `w`\n with open(source) as f:\n for line in f:\n words = rgx.findall(line)\n for w in words:\n current_word = w.lower()\n #get a count for current word\n current_count = ht.get(current_word)\n if current_count is None:\n ht.put(current_word, 1)\n else:\n ht.put(current_word, current_count + 1)\n\n #create an empty list to store top words in\n tuple_list = []\n\n #traverse hash_map to find most used words\n for i in range(ht.capacity):\n if ht._buckets[i] is not None:\n #traverse links at each bucket\n current = ht._buckets[i].head\n while current is not None:\n tuple_list.append((current.key, current.value))\n current = current.next\n\n #create an ordered list out of items\n iter_tuple_quick_sort(tuple_list, len(tuple_list) - 1, 0)\n\n #create a new list to return with passed number arg\n return_list = []\n list_counter = 0\n while list_counter <= number - 1:\n if list_counter == len(tuple_list) - 1:\n break\n else:\n return_list.append(tuple_list[list_counter])\n list_counter += 1\n\n return return_list", "def print_most_frequent(ngrams, num=10):\n for n in sorted(ngrams):\n print('----- {} most common {}-grams -----'.format(num, n))\n for gram, count in ngrams[n].most_common(num):\n print('{0}: {1}'.format(' '.join(gram), count))\n print('')", "def reduce_sort_counts(self, type, word_counts):\n aux = 0\n for count, word in sorted(word_counts, reverse=True):\n if aux < 50: # Controls that we get only the 50 most common keywords\n aux = aux+1\n yield type, (int(count), word)", "def test_top_n_freqs():\n ngrams = NgramFrequencies()\n top_list = [(\"d\", 4), (\"c\", 3), (\"b\", 2), (\"a\", 1)]\n top_freq = ngrams.top_n_freq(top_list, 10)\n assert top_freq == [(\"d\", 0.4), (\"c\", 0.3), (\"b\", 0.2), (\"a\", 0.1)]", "def top10_likelihoods(likelihoods, vocab, classes):\r\n resultDict = {}\r\n for cls in classes:\r\n results = []\r\n for word in vocab:\r\n results.append((word, likelihoods[cls][word]))\r\n resultDict[cls] = results\r\n # Sort and return top 10 for each class\r\n for key in resultDict:\r\n results = resultDict[key]\r\n resultDict[key] = map(lambda x: x[0], sorted(results, key=lambda x: x[1], reverse=True))[:10]\r\n return resultDict", "def print_most_frequent(ngrams, num=10):\r\n for n in sorted(ngrams):\r\n print('----- {} most common {}-grams -----'.format(num, n))\r\n for gram, count in ngrams[n].most_common(num):\r\n print('{0}: {1}'.format(' '.join(gram), count))\r\n print('')", "def run(self, words: List[str]) -> List[Tuple[str, float]]:\n files_found_by_word = self._count_words_in_file(words)\n found_files_with_percentage = self._calculate_ranking(files_found_by_word, words)\n sorted_results = self._calculate_top(found_files_with_percentage)\n return sorted_results", "def print_most_frequent(ngrams, num=10):\n for n in sorted(ngrams):\n print('----- {} most common {}-grams -----'.format(num, n))\n for gram, count in ngrams[n].most_common(num):\n print('{0}: {1}'.format(' '.join(gram), count))\n print('')", "def print_most_frequent(ngrams, num=10):\n for n in sorted(ngrams):\n print('----- {} most common {}-grams -----'.format(num, n))\n for gram, count in ngrams[n].most_common(num):\n print('{0}: {1}'.format(' '.join(gram), count))\n print('')", "def top_sentences(query, sentences, idfs, n):\n ranking = {}\n qtd = {}\n\n for s in sentences:\n value = 0\n # Calculate qtm for each sentence\n for w in sentences[s]:\n if w in query:\n value += 1\n qtd[s] = value/len(sentences[s])\n # calculate sum of idfs for each sentence\n value = 0\n for word in query:\n if word in sentences[s]:\n value += idfs[word]\n ranking[s] = value\n # sort the ranking according to the values\n sortedRank = sorted(ranking.items(), key=lambda x: x[1], reverse=True)\n # if they have same idfs, sort according to qtd\n change = True\n while change:\n change = False\n for i, s in enumerate(sortedRank):\n if i == len(sortedRank)-1:\n break\n if s[1] == sortedRank[i+1][1]:\n if qtd[s[0]] < qtd[sortedRank[i+1][0]]:\n sortedRank[i], sortedRank[i+1] = sortedRank[i+1], sortedRank[i]\n change = True\n break\n finalRank = []\n for j,s in enumerate(sortedRank):\n if j == n:\n break\n finalRank.append(s[0])\n return finalRank", "def textrank(self, sentences, n_top=2, stopwords=None):\n S = self.build_similarity_matrix(sentences, stopwords) \n sentence_ranks = self.pagerank(S)\n \n # Sort the sentence ranks\n ranked_sentence_indexes = [item[0] for item in sorted(enumerate(sentence_ranks), key=lambda item: -item[1])]\n #print(ranked_sentence_indexes)\n selected_sentences = sorted(ranked_sentence_indexes[:n_top])\n summary = itemgetter(*selected_sentences)(sentences)\n return summary", "def top_by_num_of_ratings(self, n):\n return top_movies", "def top_chars(phrase):\n list_string = phrase.split(\" \")\n phrase_without_spaces = \"\".join(list_string)\n\n letters_count = {}\n letters_count_list = []\n\n for letter in phrase_without_spaces:\n if letter in letters_count:\n letters_count[letter] += 1\n else:\n letters_count[letter] = 1\n\n for letter, count in letters_count.items():\n letters_count_list.append([letter, count])\n\n max_count = 0\n letters_with_highest_count = ['a']\n\n for letter_and_count in letters_count_list:\n if letter_and_count[1] > max_count:\n letters_with_highest_count[:] = letter_and_count[0]\n max_count = letter_and_count[1]\n elif letter_and_count[1] == max_count:\n letters_with_highest_count.append(letter_and_count[0])\n\n return sorted(letters_with_highest_count)\n\n\n \n\n\n\n \n\n\n\n return []", "def top_chars(phrase):\n phrase = phrase.split()\n letter_counts = {}\n\n # loops through phrase and adds word name to key with the length of the word. If no such key exists, it is created\n for word in phrase:\n for letter in word:\n if letter in letter_counts:\n letter_counts[letter] = letter_counts[letter] + 1\n else:\n letter_counts[letter] = 1\n\n most_used = []\n # loops through each key in the dictionary of usage counts and checks if it has the highest usage count.\n # if it does, it replaces the old elements in the list. If it is used as much as the currently most-used letter,\n # it is appended to the list.\n for key in letter_counts:\n if most_used == []:\n most_used.append(key)\n elif letter_counts[key] > letter_counts[most_used[0]]:\n most_used = [key]\n elif letter_counts[key] == letter_counts[most_used[0]]:\n most_used.append(key)\n\n return sorted(most_used)", "def wcount(lines, topn):\n word = ''\n for i in lines:\n if 65<=ord(i) and ord(i)<=90:\n word = word + i \n elif 97<=ord(i) and ord(i)<=122:\n word = word + i\n else:\n word = word + ' ' \n word = word.split()\n #提取不重复的单词\n alreadyknown = []\n for m in word:\n if m not in alreadyknown:\n alreadyknown.append(m)\n #分别数数,排序,建构字典\n empty = []\n final = {}\n final2 = {}\n for j in alreadyknown:\n number = icount(word,j)\n final[j]=number\n final2[str(number)]=j\n empty.append(number)\n empty.sort()\n empty.reverse()\n last_step = empty[:10]\n #通过数字找到对应word\n last_str = ''\n for y in last_step:\n z = final2[str(y)]\n last_str += z + \"\\t\" + str(y) + \"\\n\"\n return last_str", "def test_most_similar_topn(self):\n self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=5)), 5)\n self.assertEqual(len(self.vectors.most_similar('dog.n.01', topn=10)), 10)\n\n predicted = self.vectors.most_similar('dog.n.01', topn=None)\n self.assertEqual(len(predicted), len(self.vectors.vocab) - 1)\n self.assertEqual(predicted[-1][0], 'gallant_fox.n.01')", "def get_top_n_motif_scores(score_list,top_n):\r\n\treturn score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]", "def get_word_list(file_name, n):\n f = open(file_name, 'r')\n text = f.read()\n words = re.compile('\\w+').findall(text)\n return get_top_n_words(words, n)", "def nmax(num, T, nwords):\n values = []\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(((data['all_words'][n])))\n values.append(round(T[n],3))\n return nwords", "def top_sentences(query, sentences, idfs, n):\n tf_idfs = []\n for sentence, words in sentences.items():\n tf_idf = 0\n\n for word in query:\n if word not in idfs:\n continue\n idf = idfs[word]\n tf = (1 if word in words else 0)\n tf_idf += idf * tf\n t = (sentence, tf_idf)\n tf_idfs.append(t)\n\n sorted_list = sorted(tf_idfs, key=sorter)\n sorted_list.reverse()\n file_list = [item[0] for item in sorted_list]\n\n return file_list[:n]", "def wcount(lines, topn=10):\n newlines=lines.lower()\n total=newlines.split()\n adict={}\n for x in total :\n adict[x]=total.count(x)\n x=sorted(adict.items(),reverse=True,key=lambda kv:kv[1])[0:topn]\n for (k,v) in x:\n print(k,'\\t',v)", "def count_words(s, n):\r\n list_of_words=get_listOfWords(s)\r\n res=wrap_with_freq_toList(list_of_words)\r\n res=sortit(res)\r\n top_n=res[0:n]\r\n return top_n\r\n \r\n # TODO: Count the number of occurences of each word in s\r\n # TODO: Sort the occurences in descending order (alphabetically in case of ties)\r\n # TODO: Return the top n words as a list of tuples (<word>, <count>)\r", "def most_common(self, number=10):\n\n words_full_list = []\n\n for string in self.__corpora:\n words_full_list += string.split()\n\n print(Counter(words_full_list).most_common(number))", "def get_most_popular_talks_by_like_ratio(videos):\n return sorted(videos, key=get_ratio, reverse=True)", "def sorted_by_count_desc_and_word(word_counts):\n\n return sorted(word_counts.items(), key=reversed_and_negated_tuple)", "def plot_most_common_words(plotting_string, method):\n top_twenty_after_stop = get_top_words(plotting_string)\n top_twenty_after_stop_dict = dict(top_twenty_after_stop)\n keys = top_twenty_after_stop_dict.keys()\n values = top_twenty_after_stop_dict.values()\n plt.bar(keys, values)\n plt.xticks(rotation=75)\n plt.xlabel(\"Most common words\")\n plt.ylabel(\"Frequency\")\n plt.title(\"Most common words in {} of posts from ErictheCarGuy\".format(method))\n plt.show()", "def keywords(articles, top_n=25):\n\n # compute term idfs\n token_docs = [lemma_tokenize(clean(a.text)) for a in articles]\n local_term_idf = IDF(token_docs)\n\n token_docs, phrases = extract_phrases(token_docs, [a.text for a in articles], global_term_idf)\n\n titles = [a.title for a in articles]\n title_tokens = [lemma_tokenize(clean(t)) for t in titles]\n term_counts = defaultdict(int)\n for doc in token_docs:\n for t in set(doc):\n if t:\n term_counts[t] += 1\n\n title_terms = set()\n for title_tks in title_tokens:\n title_terms = title_terms | set(title_tks)\n for ph in phrases:\n if any(ph in title.lower() for title in titles):\n title_terms.add(ph)\n\n # Score terms\n term_scores = []\n for t, count in term_counts.items():\n # Ignore numbers, they are very specific to a particular event and\n # introduce noise\n try:\n float(t)\n continue\n except ValueError:\n # TODO This is a troublesome token, not sure why it's not filtered out by\n # IDF. needs more investigation\n if t == 'n\\'t':\n continue\n score = count * (global_term_idf[t] - local_term_idf[t])\n if t in title_terms:\n score *= 1.5\n term_scores.append((t, score))\n\n return sorted(term_scores, key=lambda t: t[1], reverse=True)[:top_n]", "def top_by_ratings(self, n, metric=average):\n return top_movies", "def most_words(self, n):\n return big_tags", "def top_controversial(self, n):\n return top_movies", "def test_top_n_counts():\n ngrams = NgramFrequencies()\n new_dic = {\n \"a\": 1,\n \"b\": 2,\n \"c\": 3,\n \"d\": 4\n }\n top_list = ngrams.top_n_counts(new_dic)\n assert top_list == [(\"d\", 4), (\"c\", 3), (\"b\", 2), (\"a\", 1)]", "def top_three_letters2(string):\n # create a dictionary with letter and frequency\n countdict = defaultdict(int) # gets a dictionary with initial value 0 for every key encountered during loop\n for c in string:\n countdict[c] += 1 # gets dictionary with letter frequency\n top_three = sorted(countdict, key = lambda k: countdict[k], reverse = True)[:3]\n # sorts the dictionary in place, mutates it; based on key, lambda k function, which is countdict[k], values in dictionary, reverses the sorted output\n # to get key-value pairs in descending order\n # uses slicing to get only top three elements from sorted list\n result = [(letter, countdict[letter]) for letter in top_three ] # to get the resullt in desired output format\n print(result)", "def nmax(num, T, nwords):\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(data['all_words'][n])\n return nwords", "def nmax(num, T, nwords):\n top_n = T.argsort()[-num:][::-1]\n for n in top_n:\n nwords.append(data['all_words'][n])\n return nwords", "def most_wordy(data_sent):\n #initialize lists\n sylls = []\n words = []\n sents = []\n fkgs = []\n\n #looping through sentences to find lengthy sentences\n for sent in data_sent:\n token = word_tokenize(sent)\n word = len(token)\n if word > 40:\n\n #appending to lists\n syll = textstat.syllable_count(sent)\n sylls.append(syll)\n words.append(word)\n sents.append(sent)\n fkgs.append(fkg(int(word), 1, int(syll)))\n\n #transfer information to dataframe\n df_wordy = pd.DataFrame({'Words' : words,\n 'Syllables' : sylls,\n 'Flesch Kincaid Grade Level': fkgs,\n 'Sentence' : sents}, columns = [\"Words\", \"Syllables\", \"Flesch Kincaid Grade Level\", \"Sentence\"])\n df_wordy.sort_values(\"Words\", ascending = False, inplace = True)\n return df_wordy", "def k_most_talkative(self):\n word_counts = self.get_usercounts() # {u1: 3, u2: 4, }\n word_counts_heap = [(-count, username) for username, count in word_counts.items()] # [(-4, username), (-3, username)]\n heapify(word_counts_heap) # [(-4, u2), (-3, u1)]\n counter = 0\n while word_counts_heap or counter < k:\n _, username = heappop(word_counts_heap)\n counter += 1 # 1, 2\n yield username # u2, u1", "def display_topics2(model, feature_names, n_top_words=25):\n word_dict = {};\n for topic_idx, topic in enumerate(model.components_):\n word_dict[\"Topic%d\" % (topic_idx)] = [feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]]\n return pd.DataFrame(word_dict).T", "def get_top_k(weight_query, doc_dict, k):\n \n # find fraction of all inlinks to doc_id\n total_num_inlinks = 0\n frac_inlinks = {}\n with open(num_inlinks_file) as f:\n doc_ids_set = doc_dict.keys()\n for i, line in enumerate(f):\n total_num_inlinks += int(line.strip())\n if i in doc_ids_set:\n frac_inlinks[i] = int(line.strip())\n \n\n for doc_id, frac in frac_inlinks.items():\n frac_inlinks[doc_id] = frac / total_num_inlinks\n\n # calculate score\n # score = alpha * frac_inlinks + (1 - alpha) * cosine similarity\n alpha = 0.5\n score = {}\n for doc_id, weight_doc in doc_dict.items():\n cosine_score = 0\n for term, weight in weight_doc.items():\n cosine_score += weight_doc[term] * weight_query[term]\n score[doc_id] = alpha * frac_inlinks[doc_id] + (1 - alpha) * cosine_score\n \n # sort based on score, high to low\n sorted_score = OrderedDict( sorted(score.items(), key=lambda t: t[1], reverse=True) )\n \n # type(top_k) == {doc_id: [score, \"doc_text\"]}\n # note top_k is not sorted based on score!\n top_k = {}\n num_results = 0\n for doc_id, score in sorted_score.items():\n num_results += 1\n top_k[doc_id] = [score, \"\"]\n if num_results == k:\n break\n return top_k", "def extract_topn_from_vector(feature_names, sorted_items, topn=10):\n \n #use only topn items from vector\n sorted_items = sorted_items[:topn]\n \n score_vals = []\n feature_vals = []\n \n # word index and corresponding tf-idf score\n for idx, score in sorted_items:\n \n #keep track of feature name and its corresponding score\n score_vals.append(round(score, 3))\n feature_vals.append(feature_names[idx])\n \n #create a tuples of feature,score\n results= {}\n for idx in range(len(feature_vals)):\n results[feature_vals[idx]]=score_vals[idx]\n \n return results" ]
[ "0.7232739", "0.6893036", "0.67285866", "0.6680685", "0.66617835", "0.66482335", "0.6614465", "0.6595784", "0.6562109", "0.645501", "0.64387554", "0.6425363", "0.63707566", "0.6370042", "0.6349563", "0.6348198", "0.6341136", "0.6331591", "0.63161516", "0.6312066", "0.6284538", "0.6275941", "0.6267955", "0.6251291", "0.62424177", "0.62385476", "0.6232322", "0.61934334", "0.61820835", "0.6181658", "0.61652535", "0.6164928", "0.6140876", "0.6136782", "0.6135696", "0.6089707", "0.6086518", "0.6080043", "0.60663617", "0.6065763", "0.6056828", "0.6041534", "0.60408396", "0.60264647", "0.60234684", "0.6015173", "0.6008986", "0.60078144", "0.60068303", "0.599565", "0.5991856", "0.59881294", "0.59760153", "0.5970843", "0.5950229", "0.5933224", "0.592383", "0.59203494", "0.5904886", "0.58933103", "0.58816564", "0.58730847", "0.58724666", "0.5867766", "0.5867615", "0.58612174", "0.5844071", "0.5843266", "0.5843023", "0.5843023", "0.5839932", "0.58375984", "0.58370346", "0.58351696", "0.5819906", "0.58068883", "0.5791194", "0.5785558", "0.5775442", "0.5767927", "0.5762073", "0.5761041", "0.57550514", "0.5754205", "0.5739947", "0.5737814", "0.5736086", "0.57189316", "0.571809", "0.5716334", "0.57073903", "0.5706687", "0.5703481", "0.56812465", "0.56812465", "0.56794286", "0.5678455", "0.5675436", "0.5674728", "0.5671243" ]
0.7499493
0
Calculate the accuracy of our predicated vs. actual
def evaluate(predicted, actual): assert(len(predicted) == len(actual)) total = len(actual) correct = len([x for x in range(total) if predicted[x] == actual[x]]) return (float(correct) / float(total)) * 100.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def accuracy(self):\n total_predictions = self.tp + self.fp + self.tn + self.fn;\n return float(self.tp + self.tn) / total_predictions if total_predictions != 0 else 1", "def get_accuracy(self) -> float:\n self.network.load_data()\n self.network.train()\n\n n = len(self.network.y_test)\n correct = 0\n for i in range(n):\n # Predict by running forward pass through the neural network\n pred = self.network.predict(self.network.x_test[i])\n # Sanity check of the prediction\n assert 0 <= pred <= 1, \"The prediction needs to be in [0, 1] range.\"\n # Check if right class is predicted\n correct += self.network.y_test[i] == round(float(pred))\n return round(correct / n, 3)", "def accuracy(self):", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.episode.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def accuracy ( actuals, predictions ):\n return np.mean ( actuals == predictions )\n # End accuracy()", "def accuracy ( actuals, predictions ):\n return np.mean ( actuals == predictions )\n # End accuracy()", "def compute_accuracy(self):\n self.test_predictions = tf.cast(tf.argmax(self.test_logits, 1), tf.int32)\n correct = tf.equal(self.data.test_labels, self.test_predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def getAccuracy(self):\n\t\tcorrect = (self.testing[self.classLabel]==self.bestLabel).sum()\n\t\tself.accuracy = (correct/float(len(self.testing))) * 100.0", "def accuracy(actual, predicted):\n return np.sum(predicted == actual) / actual.shape[0]", "def accuracy(predictions, test_labels):\n return f1_score(test_labels, predictions, average='micro') * 100", "def compute_accuracy(self):\n if not self.is_training:\n logits = self.test_logits\n labels = self.data.test_labels\n else:\n logits = self.train_logits\n labels = self.data.labels\n\n predictions = tf.cast(tf.argmax(logits, 1), tf.int32)\n correct = tf.equal(labels, predictions)\n return tf.reduce_mean(tf.cast(correct, tf.float32))", "def accuracy(self):\n return (self.table[0, 0] + self.table[1, 1]) / self.N", "def calc_accuracy(true, predicted):\n return sum([t==p for t,p in zip(true, predicted)]) / float(len(true))", "def accuracy(preds, labels):\n correct = preds == labels\n return correct.sum().float() / correct.shape[0]", "def overall_accuracy(y_true, y_pred):\n pred_flat, true_flat = y_pred.flatten(), y_true.flatten()\n intersection = list(pred_flat == true_flat).count(True)\n sum_ = len(true_flat)\n accuracy = round(intersection/sum_, 4)\n return accuracy", "def accuracy_fn(y_true, y_pred):\n correct = torch.eq(y_true, y_pred).sum().item()\n acc = (correct / len(y_pred)) * 100\n return acc", "def accuracy(actual, pred):\n error = actual - pred\n pe = error/actual *100\n me = np.mean(error)\n mse = np.mean(np.power(error, 2))\n mae = np.mean(np.abs(error))\n mape = np.mean(np.abs(pe))\n mpe = np.mean(pe)\n rmse = np.sqrt(mse)\n out = {'Mean error' : me, 'Root mean squared error' : rmse,\n 'Mean absolute error' : mae, 'Mean percentage error' : mpe,\n 'Mean absolute percentage error' : mape}\n return out", "def train_accuracy(self):\n # Train accuarcy\n add = np.ones(len(self.X_train))\n X_add1 = np.c_[add, self.X_train]\n pred_train = np.dot(X_add1, self.w_result.T)\n pred_train[pred_train > 0] = 1\n pred_train[pred_train < 0] = 0\n print(pred_train)\n train_check_lable = np.isclose(pred_train, self.y_train)\n num_true_lable = np.sum(train_check_lable)\n num_all_lable = np.size(train_check_lable)\n train_accuracy = num_true_lable / num_all_lable\n print(\"train_accuracy is: %f\" %train_accuracy)\n return train_accuracy", "def get_accuracy(actual, predicted):\n predicted_correct = 0\n # for each index in the actual result\n for i in range(len(actual)):\n # if actual is the same as predicted\n if actual[i] == predicted[i]:\n predicted_correct+=1\n return predicted_correct/len(actual)", "def accuracy_v2(y_true, y_pred):\n tp = true_positive(y_true, y_pred)\n fp = false_positive(y_true, y_pred)\n fn = false_negative(y_true, y_pred)\n tn = true_negative(y_true, y_pred)\n accuracy_score = (tp + tn) / (tp + tn + fp + fn)\n return accuracy_score", "def accuracy(pred, target):\n N = pred.shape[0]\n return (pred == target).sum() * 1.0 / N", "def accuracy(self,predicted, original):\n TP=0\n TN=0\n FP=0\n FN=0\n for i in range(len(predicted)):\n if(predicted[i]==1 and original[i]==1):\n TP+=1\n elif(predicted[i]==0 and original[i]==1):\n FN+=1\n elif(predicted[i]==1 and original[i]==0):\n FP+=1\n elif(predicted[i]==0 and original[i]==0):\n TN+=1\n\n acc = (TP+TN)/(TP+TN+FP+FN)\n return acc", "def accuracy(predictions, targets):\n\n compare = predictions == targets\n # compare = (predictions.argmax(dim=1)) == (targets)\n # compare = (predictions.argmax(dim=1)) == (targets.argmax(dim=1))\n # summed = compare.sum().item()\n summed = compare.sum()\n # print(summed, compare.size())\n # print(compare.size()[0])\n return summed/compare.size", "def calculate_accuracy(targets, preds):\n intersection_foreground = targets * preds\n intersection_background = np.invert(targets) * np.invert(preds)\n\n acc_foreground = float(np.sum(intersection_foreground)) \\\n / (float(np.sum(targets)) + 1e-7)\n acc_background = float(np.sum(intersection_background)) \\\n / (float(np.sum(np.invert(targets))) + 1e-7)\n return (acc_foreground + acc_background) / 2", "def get_accuracy(self, predicted_y, actual_y, log_tests=False):\n if log_tests:\n for i in range(actual_y.shape[0]):\n print 'predicted = {0}, actual = {1}'.format(predicted_y[i], actual_y[i])\n return float(sum(predicted_y == actual_y)) / predicted_y.shape[0]", "def accuracy(y_test, y_pred):\n\treturn accuracy_score(y_test, y_pred)", "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n correct = 0\n for i in range(len(targets)):\n if(predictions[i] == targets[i]):\n correct += 1\n accuracy = correct/len(targets)\n #raise NotImplementedError\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "def accuracy(self):\n\t\treturn self.accuracy_", "def _calculate_accuracy(self):\n same = 0\n dif = 0\n for x, y in zip(self.test_string[3:], self.prediction[3:]):\n if x == y:\n same += 1\n else:\n dif += 1\n\n accuracy = round((same / (same + dif)) * 100, 2)\n print(f'Computer guessed right {same} out of {same + dif} symbols ({accuracy} %)')\n self.capital += dif\n self.capital -= same\n\n return", "def accuracy(self, X_train, y_train):\n y_train_pred = self.predict(X_train)\n diffs = y_train_pred - y_train\n count = 0.\n for i in range(y_train.shape[0]):\n if diffs[i] != 0:\n count+=1\n return 100 - count*100/y_train.shape[0]", "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n i1 = np.arange(0, len(targets), 1)\n i2 = np.argmax(predictions, axis = 1)\n accuracy = targets[i1, i2].sum()/targets.sum()\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "def accuracy_score(y_true, y_pred):\n\ttp, fn, fp, tn = confusion_matrix(y_true, y_pred, table_show=False)\n\n\treturn (tp+tn) / (tp+tn+fn+fp)", "def calculate_metrics(self, predictions, actual):\n\n predictions.dtype = np.bool\n actual.dtype = np.bool\n\n N = len(predictions) * len(predictions[0])\n\n TP = np.sum(np.bitwise_and(predictions, actual))\n FP = np.sum(np.bitwise_and(np.invert(predictions), np.invert(actual) ))\n FN = np.sum(np.bitwise_and(predictions, np.invert(actual)))\n TN = np.sum(np.bitwise_and(np.invert(predictions), (actual)))\n\n correct = np.sum(predictions == actual) / N\n accuracy = (TP + TN) / N\n precision = TP / (TP + FP) # positive predictive value\n sensitivity = TP / (TP + FN) # true positive rate\n specificity = TN / (TN + FP) # true negative rate\n\n return correct, accuracy, precision, sensitivity, specificity", "def compute_accuracy(self,pad_pred:torch.Tensor, pad_targets:torch.Tensor):\n pad_pred = pad_pred.argmax(2)\n mask = pad_targets != self.ignore_label\n numerator = torch.sum(\n pad_pred.masked_select(mask) == pad_targets.masked_select(mask)\n )\n denominator = torch.sum(mask)\n return float(numerator) / float(denominator)", "def accuracy(self):\n if not self.run:\n self._run()\n return self.model_acc", "def accuracy(gt, pred):\n \n return np.mean(gt == pred)", "def accuracy(predictions, targets):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n n_samples = targets.shape[0]\n _, y_pred = predictions.max(dim=1)\n accuracy = (y_pred == targets).sum().item() / n_samples\n ########################\n # END OF YOUR CODE #\n #######################\n\n return accuracy", "def accuracy_score(y_true, y_pred):\n accuracy = np.sum(y_true == y_pred, axis=0) / len(y_true)\n return accuracy", "def accuracy_score(y_true, y_pred):\n accuracy = np.sum(y_true == y_pred, axis=0) / len(y_true)\n return accuracy", "def accuracy_score(preds, y):\n accuracy = sum([1 for i in range (len(preds)) if preds[i] == y[i]])*1.0/len(preds) \n return accuracy", "def accuracy(y_true, y_pred):\n assert y_true.shape == y_pred.shape\n return (y_true == y_pred).mean()", "def _compute_final_accuracies(self, meval):\n valid_accuracy = self.eval_child_model(meval, self.data_loader, 'val')\n if self.hparams.eval_test:\n test_accuracy = self.eval_child_model(meval, self.data_loader, 'test')\n else:\n test_accuracy = 0\n tf.logging.info('Test Accuracy: {}'.format(test_accuracy))\n return valid_accuracy, test_accuracy", "def accuracy(labels, preds):\n\tassert labels.shape[0]==preds.shape[0]\n\treturn np.sum(preds==labels)/float(labels.shape[0])", "def calc_accuracy(self, X, y):\n accuracy = 0.0\n ###########################################################################\n # TODO: #\n # Implement this method. #\n ###########################################################################\n\n y_pred = self.predict(X)\n if len(y_pred) != len(y):\n raise Exception('Fatal Error in dim - please checkout your prediction code!')\n accuracy = np.sum(y_pred == y)/len(y)*100\n ###########################################################################\n # END OF YOUR CODE #\n ###########################################################################\n\n return accuracy", "def accuracy_score(truth, predicted):\n return len(np.where(truth==predicted)[0]) / len(truth)", "def accuracy_score(truth, predicted):\n return len(np.where(truth==predicted)[0]) / len(truth)", "def accuracy(predicted, ground_truth):\n predicted_labels_decoded = np.argmax(predicted, axis=1)\n ground_truth_labels_decoded = np.argmax(ground_truth, axis=1)\n correct_rate = [1 if pred == truth else 0 for (pred, truth) in\n zip(predicted_labels_decoded, ground_truth_labels_decoded)]\n accuracy = sum(correct_rate) / ground_truth_labels_decoded.size\n return accuracy * 100", "def computeAccuracy(self, targetLabels, actualLabels):\r\n self.accuracy = (0.0 + sum([1 for x in map(lambda y,z:(y,z), targetLabels, actualLabels) if x[0] == x[1]])) / len(targetLabels)\r\n return self.accuracy", "def accuracy(predictions, targets):\n return accuracy", "def compute_accuracy(Y_test, Y_pred):\n number_correct_prediction = 0\n for i in range(len(Y_pred)): # They have the same length\n id_pred = np.argmax(Y_pred[i]) # Take the argmax of the prediction\n id_test = np.where(Y_test[i] == 1.)[0][0] # Take the real position of the POS tag\n if id_test == id_pred:\n number_correct_prediction += 1\n\n percentage_correct = number_correct_prediction / len(Y_pred)\n\n return percentage_correct", "def accuracy(self, logits, labels):\r\n preds = tf.map_fn(tf.math.round, logits)\r\n diff = tf.math.abs(preds - labels)\r\n return 1 - tf.reduce_mean(diff, axis=0)", "def accuracy(y_pred, y_true):\n # Number of correct predictions\n correct = (y_pred == y_true).sum()\n # Predictions accuracy\n acc = correct / (len(y_pred[0]) * y_pred.shape[0]) * 100\n # Accuracy of non zero pixels predictions\n non_zero = (y_true > 0).sum()\n non_zero_correct = (y_pred[y_true > 0] == y_true[y_true > 0]).sum()\n if non_zero == 0:\n if non_zero_correct == 0:\n non_zero_acc = 100.0\n else:\n non_zero_acc = 0.0\n else:\n\n non_zero_acc = non_zero_correct / non_zero * 100\n return acc, non_zero_acc, non_zero_correct", "def accuracy(output1, output2):\n pred1 = output1\n pred2 = output2\n correct = torch.gt(pred1, pred2)\n return float(correct.sum())/correct.size(0)", "def accuracy_on_one(y_true, y_pred):\n sum_true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n \n sum_all_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n \n acc_on_one = sum_true_positives / (sum_all_positives + K.epsilon())\n return acc_on_one", "def calculate_accuracy(actual_list, predicted_list):\n valid_part_len = sum(i == j for i, j in zip(actual_list, predicted_list))\n return (100.0 / len(actual_list)) * valid_part_len", "def show_accuracy(self):\r\n return round(accuracy_score(self.actual, self.predicted),2)", "def calculateAccuracy(numCorrect, numWrong):\n return np.round((numCorrect)/(numCorrect+numWrong),3)", "def get_accuracy(self, gold, predicted):\n # Exercise 3: calculate accuracy\n i = 0\n j = 0\n for labels in gold:\n if labels == predicted[i]:\n j +=1\n i +=1\n return j / i * 100", "def evaluate_accuracy(self, prediction, reality):\n correct = 0\n for i in range(len(prediction)):\n if prediction[i] == reality[i]:\n correct = correct + 1\n\n return correct / len(prediction)", "def compute_accuracy(self, inputs, true_values):\n predicted_values = self.predict(inputs)\n number_correct = np.sum(predicted_values == true_values)\n accuracy = number_correct / len(predicted_values)\n\n return accuracy", "def accuracy(self, X_test, y_test):\n\t\tif X_test.ndim == 1:\n\t\t\tX_test = np.reshape(X_test, (X_test.shape[0],1))\n\t\ty_pred = self.predict(X_test)\n\t\treturn np.sum(np.argmax(y_pred,axis=1)==np.argmax(y_test,axis=1))/float(y_test.shape[0])", "def precision_score(y_true, y_pred):\n return ((y_true == 1) * (y_pred == 1)).sum() / (y_pred == 1).sum()", "def accuracy(y_true, y_pred):\r\n\r\n cm = confusion_matrix(y_true=y_true, y_pred=y_pred)\r\n cost_m = np.max(cm) - cm\r\n indices = linear_sum_assignment(cost_m)\r\n indices = np.asarray(indices)\r\n indexes = np.transpose(indices)\r\n total = 0\r\n for row, column in indexes:\r\n value = cm[row][column]\r\n total += value\r\n return total * 1. / np.sum(cm)", "def accuracy(cls, test_labels):\n N = len(test_labels)\n\n # Calculate total correct as precentage\n total_correct = 100*(N - np.count_nonzero(cls - test_labels))/N\n\n # Calculate precentag correct for each class\n lab = np.unique(test_labels)\n cls_correct = {}\n for label in lab:\n idx = np.where(test_labels == label)[0]\n N_cls = len(idx)\n cls_correct[label] = 100*(N_cls - np.count_nonzero(label -\n cls[idx]))/N_cls\n\n print(\"Accuracy for:\")\n print(\"All classes is %.2f%%\" % total_correct)\n for label in lab:\n print(\"Class %d is %.2f%%\" % (label, cls_correct[label]))\n return(total_correct, cls_correct)", "def accuaracy_score(y_true, y_pred):\n\taccuracy = np.sum(y_true == y_pred, axis=0) / len(y_true)\n\treturn accuracy", "def accuracy(links_true, links_pred=None, total=None):\n\n if isinstance(total, pandas.MultiIndex):\n total = len(total)\n\n if _isconfusionmatrix(links_true):\n confusion_matrix = links_true\n\n v = (confusion_matrix[0, 0] + confusion_matrix[1, 1]) / numpy.sum(\n confusion_matrix\n )\n else:\n tp = true_positives(links_true, links_pred)\n tn = true_negatives(links_true, links_pred, total)\n\n v = (tp + tn) / total\n\n return float(v)", "def calcAccuracy(measuredConc, expectedConc):\n accuracy = (numpy.mean(measuredConc) / expectedConc) * 100\n return accuracy", "def accuracy(predictions, labels):\n return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1)) / predictions.shape[0])", "def accuracy(self, X_train, X_test):\n loss, accuracy = self.estimator.evaluate(X_test, X_train)\n return accuracy", "def test_nn_predicts_accurate_results(self):\n self.nn.train_nn(self.X_train, self.y_train, 6, 10, 0.06)\n accuracy = 0\n X_test, y_test = load_data(\"../data/testdata.mat.tar.gz\")\n for i in range(len(X_test[:100])):\n out = self.nn.forward_prop(X_test[i])[0][-1]\n if np.argmax(out) == np.where(y_test[i])[0][0]:\n accuracy += 1\n else:\n print(\"Incorrect\", np.argmax(out))\n print(\"accuracy: \", accuracy)\n self.assertGreaterEqual(accuracy, 70)", "def accuracy(pred, labels):\n pred = torch.sigmoid(pred)\n predicted = (pred > 0.5).int()\n correct = (predicted == labels).sum().item()\n return correct / labels.shape[0]", "def accuracy(labels, predictions, n_classes):\n\t\tequality = tf.equal(x = predictions, y = labels) # match the type of labels\n\t\treturn tf.reduce_mean(tf.cast(equality, tf.float32))", "def accuracy(labels, predictions):\n if len(labels) != len(predictions):\n return -1\n\n correct = 0\n total = 0\n\n for i,v in enumerate(predictions):\n if labels[i] == str(v):\n correct += 1\n total += 1\n\n return (float(correct) / float(total)) * 100.0", "def accuracy(qtd_true_positives, qtd_true_negatives, all_documents, ref=0\t):\n\n\tfp = fn = 0\n\tfor d in all_documents:\n\t\tif d.polarity == domain.Document.POSITIVE and d.predicted_polarity < ref:\n\t\t\tfn = fn + 1\n\t\telif d.polarity == domain.Document.NEGATIVE\tand d.predicted_polarity > ref:\n\t\t\tfp = fp + 1\n\n\tfp = decimal.Decimal(fp)\n\tfn = decimal.Decimal(fn)\n\tqtp = decimal.Decimal(qtd_true_positives)\n\tqtn = decimal.Decimal(qtd_true_negatives)\n\n\treturn (qtp + qtn) /(qtp + qtn + fp + fn)", "def accuracy1(y_test, predictions):\n accuracy = 0.0\n\n for i in range(y_test.shape[0]):\n intersection = 0.0\n union = 0.0\n for j in range(y_test.shape[1]):\n if int(y_test[i,j]) == 1 or int(predictions[i,j]) == 1:\n union += 1\n if int(y_test[i,j]) == 1 and int(predictions[i,j]) == 1:\n intersection += 1\n \n if union != 0:\n accuracy = accuracy + float(intersection/union)\n\n accuracy = float(accuracy/y_test.shape[0])\n\n return accuracy", "def accuracy(classifier_output, true_labels):\n\n # TODO: finish this.\n conf_matrix = confusion_matrix(classifier_output, true_labels)\n return (conf_matrix[0][0]+conf_matrix[1][1])/(conf_matrix[0][0] + conf_matrix[0][1]\\\n + conf_matrix[1][0] + conf_matrix[1][1])", "def accuracy(unwarped_orig_tensor, predicted_tensor):\n \n m = tf.keras.metrics.RootMeanSquaredError() \n _ = m.update_state(unwarped_orig_tensor, predicted_tensor)\n return m.result().numpy()", "def test_accuracy(self):\n total_accuracy, weights = losses.weighted_accuracy(\n logits=self.logits, targets=self.targets)\n\n expected_accuracy = 2 / 3\n\n self.assertEqual(weights, 3)\n self.assertAlmostEqual(total_accuracy / weights, expected_accuracy)", "def accuracy(self):\r\n # Load tarined model using intent id.\r\n clf = joblib.load(filename=self.intention_id+'.pkl')\r\n # Compute accuracy for hole training data and return.\r\n return clf.score(X=self.training_data, y=self.target_data)", "def accuracy(predictions, targets):\n correct_count = 0\n for prediction, target in zip(predictions, targets):\n if prediction == target:\n correct_count += 1\n return correct_count / len(predictions)", "def compute_accuracy(self, X_data, y_data):\n assert isinstance(X_data, np.ndarray)\n assert isinstance(y_data, np.ndarray)\n assert X_data.shape[0] == y_data.shape[0]\n \n correct = 0\n for i in range(len(X_data)):\n outputs = self.predict(X_data[i])\n outputs = outputs > 0.5\n if outputs == y_data[i]:\n correct += 1\n acc = float(correct) / len(X_data)\n return acc", "def accuracy(targets: List[List[float]], predict: List[List[float]]):\r\n correct = 0\r\n for i in range(len(targets)):\r\n if predict[i] == targets[i]:\r\n correct += 1\r\n return correct / len(targets) * 100", "def precision(y_true, y_pred):\n true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + K.epsilon())\n return precision", "def get_accuracy(test_sets, predictions, class_index):\n actual_classes = [test_set[class_index] for test_set in test_sets]\n\n num_correct = sum(int(actual == prediction) for actual, prediction in zip(actual_classes, predictions))\n\n return float(num_correct) / len(test_sets)", "def accuracy_compute(predictions, labels):\n with tf.name_scope('test_accuracy'):\n accu = 100 * np.sum(np.argmax(predictions, 1) == labels) / predictions.shape[0]\n tf.summary.scalar('test_accuracy', accu)\n return accu", "def accuracy(self, X, y):\n pred_labels = self.predict(X)\n return np.sum(pred_labels == y) / pred_labels.shape[0]", "def accuracy(output, target):\n correct = target.eq(torch.round(output))\n correct = correct.float()*100.0\n correct = torch.mean(correct, 0)\n res = torch.mean(correct)\n\n return res,correct", "def calculate_accuracy(y, y_pred):\n prediction = tf.argmax(y_pred, 1)\n correct = tf.argmax(y, 1)\n equality = tf.equal(prediction, correct)\n accuracy = tf.cast(equality, tf.float32)\n accuracy = tf.math.reduce_mean(accuracy)\n return accuracy", "def predictionAccuracy(self, predicted, actual):\n\t\taccuracyCount=0\n\n\t\t###### your implementation below ######\n\t\tfor x in range(len(predicted)):\n\t\t\tif (predicted[x] == actual[x]):\n\t\t\t\taccuracyCount += 1\n\t\taccuracyCount /= len(predicted)\n\t\treturn accuracyCount;", "def accuracy(self):\n # Initialize key variables\n correct = {}\n prediction = 0\n cls_count = {}\n accuracy = {}\n\n # Analyze all the data\n for cls in self.pca_object.classes():\n # Get list of x values to test\n vectors = self.pca_object.xvalues(cls)\n\n # Process each vector\n for vector in vectors:\n # Get the prediction\n prediction = self.classifier(vector)\n\n # Only count definitive predictions\n if prediction is not None:\n # Count the number of correct predictions\n if prediction == cls:\n if cls in correct:\n correct[cls] += 1\n else:\n correct[cls] = 1\n\n # Increment the count\n if cls in cls_count:\n cls_count[cls] += 1\n else:\n cls_count[cls] = 1\n\n # Calculate per class accuracy\n correct[None] = 0\n cls_count[None] = 0\n for cls in cls_count.keys():\n if cls_count[cls] != 0:\n accuracy[cls] = correct[cls] / cls_count[cls]\n\n # Keep a tally for all successes\n correct[None] = correct[None] + correct[cls]\n cls_count[None] = cls_count[None] + cls_count[cls]\n\n # Calulate overall accuracy\n accuracy[None] = correct[None] / cls_count[None]\n\n # Return\n return accuracy", "def percent_accuracy(self, test_set, predicted_values):\r\n\r\n correct = 0\r\n for i in range(len(test_set)):\r\n if test_set[i].classification == predicted_values[i]:\r\n correct += 1\r\n return correct / len(test_set)", "def accuracy_score(y_true, y_predict):\n assert y_true.shape[0] == y_predict.shape[0], \\\n \"The size of y_true must be equal to y_predict\"\n\n return sum(y_true == y_predict) / len(y_true)", "def get_accuracy(pos_test, neg_test, pos_train, neg_train):\n pos_file = open(pos_test, \"r\")\n neg_file = open(neg_test, \"r\")\n trained_pos = train_model(pos_train)\n trained_neg = train_model(neg_train)\n pos_count = 0\n #keeps track of how many positive reviews are accurately predicted\n total_pos_reviews = 0 \n neg_count = 0\n #keeps track of how many negative reviews are accurately predicted\n total_neg_reviews = 0\n for review in pos_file:\n classification = classify(review, trained_pos, trained_neg)\n total_pos_reviews += 1\n if classification == \"positive\":\n pos_count += 1 \n positive_accuracy = pos_count/total_pos_reviews \n for review in neg_file:\n classification = classify(review, trained_pos, trained_neg)\n total_neg_reviews += 1\n if classification == \"negative\":\n neg_count += 1 \n negative_accuracy = neg_count/total_neg_reviews \n total_accuracy = average(positive_accuracy, negative_accuracy)\n print(\"Positive accuracy: \" + str(positive_accuracy))\n print(\"Negative accuracy: \" + str(negative_accuracy))\n print(\"Total accuracy: \" + str(total_accuracy))", "def calculate_metrics(predictions, expected):\n # type: (np.ndarray, np.ndarray) -> (float, float, float)\n clients_count = predictions.shape[0]\n products_count = predictions.shape[1]\n\n true_positive = 0.0\n true_negative = 0.0\n false_positive = 0.0\n false_negative = 0.0\n\n total = float(clients_count * products_count)\n\n for c in range(0, clients_count):\n for p in range(0, products_count):\n if predictions[c, p] == expected[c, p]:\n if predictions[c, p] == 1:\n true_positive += 1\n else:\n true_negative += 1\n else:\n if predictions[c, p] == 1:\n false_positive += 1\n else:\n false_negative += 1\n\n accuracy = float(true_positive + true_negative) / total\n if true_positive + false_positive == 0:\n precision = 0\n else:\n precision = true_positive / float(true_positive + false_positive)\n\n if true_positive + false_negative == 0:\n recall = 0\n else:\n recall = true_positive / float(true_positive + false_negative)\n\n return accuracy, precision, recall", "def accuracy(self, predictions, truth):\n return np.mean(np.argmax(predictions, axis=1) == truth) # <COGLINE>", "def accuracy(output, target): # , topk=(1,)):\n correct = 0\n batch_size = target.size(0)\n for i in range(batch_size):\n tar = target[i].data.cpu().numpy()\n pred = output[i].data.cpu().numpy()\n if (tar) == np.argmax(pred):\n correct += 1\n return float(correct/batch_size)", "def precision(y_true, y_pred):\n true_positives = bk.sum(bk.round(bk.clip(y_true * y_pred, 0, 1)))\n predicted_positives = bk.sum(bk.round(bk.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + bk.epsilon())\n return precision", "def balanced_accuracy(self):\n return 0.5 * (self.sensitivity + self.specificity)", "def accuracy(outputs, labels):\n predicted = outputs.argmax(dim=1)\n correct = (predicted == labels).sum().item()\n return correct / labels.size(0)", "def precision(y_true, y_pred):\n true_positives = backend.sum(backend.round(backend.clip(y_true * y_pred, 0, 1)))\n predicted_positives = backend.sum(backend.round(backend.clip(y_pred, 0, 1)))\n precision = true_positives / (predicted_positives + backend.epsilon())\n return precision", "def accuracy_measures(predictions, trues):\n\n tn, fp, fn, tp = confusion_matrix(trues, predictions).ravel()\n print \"\\t(tn, fp, fn, tp) =\", (tn, fp, fn, tp)\n\n # how often is classifier correct?\n print \"\\tAccuracy = {:.2%}\".format(float(tp + tn) / len(trues))\n\n # how often is it wrong?\n print \"\\tMisclassification Rate = {:.2%}\".format(float(fp + fn) / len(trues))\n\n # when actually yes, how often does it predict yes?\n print \"\\tTrue Positive Rate = {:.2%}\".format(float(tp) / trues.count(True))\n\n # when actually no, how often does it predict yes?\n print \"\\tFalse Positive Rate = {:.2%}\".format(float(fp) / trues.count(False))\n\n # when actually no, how often does it predict no?\n print \"\\tSpecificity = {:.2%}\".format(float(tn) / trues.count(False))\n\n # when it predicts yes, how often is it correct?\n print \"\\tPrecision = {:.2%}\".format(float(tp) / predictions.count(True))\n\n # how often does yes condition occur in our sample?\n print \"\\tPrevalence = {:.2%}\\n\".format(float(trues.count(True)) / len(trues))\n\n # return accuracy, precision, and recall score\n return accuracy_score(trues, predictions), precision_score(trues, predictions, average='binary'), recall_score(\n trues, predictions, average='binary')" ]
[ "0.83784646", "0.82262033", "0.822042", "0.8146395", "0.8089007", "0.8089007", "0.80558175", "0.8008387", "0.80057275", "0.79616034", "0.79590094", "0.79344165", "0.7930567", "0.78854644", "0.78593326", "0.7858755", "0.7850988", "0.7762686", "0.7759415", "0.7753433", "0.7709539", "0.76758903", "0.76606226", "0.7651398", "0.7651393", "0.7650622", "0.7649327", "0.7646539", "0.76422733", "0.7622081", "0.7612885", "0.75933623", "0.7591095", "0.7587169", "0.756304", "0.75539243", "0.7551827", "0.75252765", "0.75252765", "0.7486485", "0.7455175", "0.74354017", "0.7425438", "0.74241", "0.7419053", "0.7419053", "0.7416185", "0.7399762", "0.7391277", "0.7391261", "0.7387492", "0.7385819", "0.738475", "0.7378317", "0.73747945", "0.73721623", "0.7369725", "0.7367761", "0.73656243", "0.7354566", "0.7347361", "0.73471457", "0.73468226", "0.73435456", "0.7332609", "0.7320642", "0.73124546", "0.7306482", "0.7303461", "0.7292691", "0.72923017", "0.72891456", "0.7281064", "0.72801924", "0.725842", "0.7256731", "0.72543126", "0.7253042", "0.7244891", "0.72259045", "0.7224112", "0.7217661", "0.7200009", "0.7197385", "0.71955484", "0.7195506", "0.71909916", "0.7188285", "0.7187404", "0.71799934", "0.7178831", "0.71768886", "0.7168524", "0.71655536", "0.7158725", "0.71542704", "0.7149079", "0.7148404", "0.71354926", "0.712598", "0.7116362" ]
0.0
-1
Classify data based on bernoulli model
def classify(priors, likelihoods, testData, classes): results = [] for document in testData: bestClass = None bestProb = None currentProb = 0.0 for cls in classes: prior = priors[cls] currentProb = log(prior) lhoods = likelihoods[cls] for (word, count) in document: if word in lhoods: currentProb += log(lhoods[word]) else: currentProb += log(lhoods[None]) if currentProb > bestProb or bestClass == None: bestProb = currentProb bestClass = cls results.append(bestClass) return results
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_Bernoulli_NB_estimators():", "def fit_and_predict_BernoulliNB(X_train, Y_train, X_test):\n\n # Import the package\n from sklearn.naive_bayes import BernoulliNB \n\n ### YOUR SOLUTION STARTS HERE### \n #referenced to sklearn documentation \n # fit the model... \n clf = BernoulliNB(binarize=0.0).fit(X_train, Y_train) #fit naive bayes to X and Y train data\n # make predictions\n predicted_bernNB = clf.predict(X_test)\n return predicted_bernNB\n ### END SOLUTION ### ", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_classifier.predict(data)", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def classify( self, data):\n\n\t\t\"*** YOUR CODE HERE ***\"\n\t\tguesses = np.zeros(len(data))\n\n\t\tfor k in range(len(self.classifiers)):\n\t\t\tclassifier = self.classifiers[k]\n\t\t\tguesses += np.dot(classifier.classify(data),self.alphas[k])\n\t\t\n\t\tguesses = np.sign(guesses)\n\t\tguesses[np.where(guesses == 0)[0]] = np.repeat(np.expand_dims(np.random.choice([-1,1]),axis=0),len(np.where(guesses == 0)[0]),axis=0)\n\t\treturn guesses\n\t\t# util.raiseNotDefined()", "def __init__(self, data, class_column):\n print(\"Naive Bayes Model created!\")\n\n # create report\n self.predict_summary = {}\n self.fit_report = {}\n\n # self.data=data\n self.data = data\n self.class_column = class_column\n\n # get the class column and get classes\n col_data = self.data[class_column]\n self.class_list = unique_list(col_data)\n\n # get numeric columns and categorical columns\n self.num_cols, self.cat_cols = get_both_columns(self.data, class_column)\n\n # Build the pro\n self.prob_hub = {}", "def bernoulli(train_data, train_labels, test_data, test_labels, data_set1=True, combined=None):\n\n DECISION_TREE_ACCURACIES = {\n 'Accuracy_train': 0,\n 'Accuracy_test': 0\n }\n ALPHA = [0, .01, .025, .05, .075, 0.1, 0.2, 0.3, .5, .75, 1, 1.5, 2.5]\n #ALPHA = [0, 0.175, 0.190, 0.195, 0.2, 0.205, 0.21, 0.225]\n\n FIT_PRIOR = [True, False]\n\n for alpha, fit_prior in itertools.product(ALPHA, FIT_PRIOR):\n bern = BernoulliNB(alpha=alpha, fit_prior=fit_prior)\n bern.fit(train_data, train_labels)\n\n pred_test = bern.predict(test_data)\n acc = accuracy_score(test_labels, pred_test)\n print(\"Alpha: {} Fit Prior: {} Accuracy: {}\".format(alpha, fit_prior, acc))\n\n if acc > DECISION_TREE_ACCURACIES['Accuracy_test']:\n DECISION_TREE_ACCURACIES['Accuracy_test'] = acc # todo this line is new, test\n DECISION_TREE_ACCURACIES['Alpha'] = alpha\n DECISION_TREE_ACCURACIES['Fit_prior'] = fit_prior\n pred_train = bern.predict(train_data)\n acc_ = accuracy_score(train_labels, pred_train)\n DECISION_TREE_ACCURACIES['Accuracy_train'] = acc_\n\n bern = BernoulliNB(alpha=DECISION_TREE_ACCURACIES['Alpha'],\n fit_prior=DECISION_TREE_ACCURACIES['Fit_prior'])\n\n if combined is not None:\n bern.fit(combined[0], combined[1]) # both first sets given, extra data == extra training\n else:\n bern.fit(train_data, train_labels)\n\n # save the trained model\n file_name = 'ds1TEST-nb.pkl' if data_set1 else 'ds2TEST-nb.pkl'\n with open(file_name, 'wb') as file:\n pickle.dump(bern, file)\n\n return bern, DECISION_TREE_ACCURACIES", "def multiclass_toy_data(): \n #dataset = np.zeros((10,5), np.int)\n dataset = np.array([[0,0,0,0,4],\n [0,0,0,0,5],\n [1,3,0,0,0],\n [3,1,0,0,1],\n [0,0,6,2,0],\n [0,0,0,0,0],\n [0,0,1,7,2], \n [0,0,5,1,5],\n [0,0,34,0,0],\n [0,0,3,0,0]])\n Y = np.array([3,3,2,2,1,0,1,1,0,0])\n #for i in range(10):\n #for j in range(5):\n #dataset[i][j] = np.random.randint(0,10) \n dataset = np.column_stack((dataset, Y))\n return (dataset)", "def classify_data(data, **kwargs):\r\n\r\n if 'mode' not in kwargs:\r\n kwargs['mode'] = 'log_reg'\r\n if 'threshold' not in kwargs:\r\n kwargs['threshold'] = 0.5\r\n if 'encode' not in kwargs:\r\n kwargs['encode'] = True\r\n if 'impute' not in kwargs:\r\n kwargs['impute'] = True\r\n data_copy = data.copy()\r\n if kwargs['impute']:\r\n data_copy = impute_missing_values_nominal(data)\r\n if kwargs['encode']:\r\n data_copy = encode_data_numerical(data)\r\n X_train, X_test, y_train, y_test = get_train_test(data_copy)\r\n model = train_model(X_train, y_train, kwargs['mode'])\r\n y_prob, y_pred = get_y_prob_pred(X_test, model, threshold=kwargs['threshold'])\r\n return X_train, X_test, y_train, y_test, model, y_prob, y_pred", "def class_distribution(y): \n # ===================== PLEASE WRITE HERE =====================\n \n bin_array = np.bincount(y)\n n_class1 = bin_array[1]\n n_class2 = bin_array[2]\n n_class3 = bin_array[3]\n \n # ===================== PLEASE WRITE HERE =====================\n \n print('Number of samples in class_1:', n_class1)\n print('Number of samples in class_2:', n_class2)\n print('Number of samples in class_3:', n_class3)", "def predict_bn(cp, prior0, prior1, data, attr):\n # gets class names for dataframe manipulation\n classes = attr.tail(1)['vars'].tolist()\n classlist = [classes[0][0], classes[0][1]]\n class0 = classlist[0]\n class1 = classlist[1]\n # loops through test data and calculates a posterior probability for\n # each class\n attrs = attr['attr'].drop(attr.index[-1]).tolist()\n preds = []\n correct = 0\n for index, row in data.iterrows():\n actual_class = row['class']\n pp0 = 1.0\n pp1 = 1.0\n i = 0\n for a in attrs:\n attr_val = row[a]\n sub = cp[cp['attr']==a]\n sub = sub[sub['var']==attr_val]\n pp0 = pp0 * sub.get_value(i, class0) \n pp1 = pp1 * sub.get_value(i, class1) \n i = i + 1\n pp0 = (pp0 * prior0) \n pp1 = (pp1 * prior1) \n # prediction comparison\n predict = np.log(pp0) - np.log(pp1)\n if predict > 0:\n predicted_class = class0\n post_prob = pp0 / (pp0 + pp1)\n else:\n predicted_class = class1\n post_prob = pp1 / (pp0 + pp1)\n line = [predicted_class, actual_class, \"{:.12f}\".format(post_prob)]\n preds.append(line)\n if actual_class == predicted_class:\n correct = correct + 1\n \n return preds, correct", "def predict(self):\n probabilities = self.probability_array()\n # THIS ASSUMES the classifiers are in order: 0th column of the\n # probabilities corresponds to label = 0, ..., 9th col is for 9.\n classes = np.argmax(probabilities, axis=1)\n return classes", "def classify(self, data):\n \"*** YOUR CODE HERE ***\"\n return self.sklearn_svm.predict(data)", "def classify(self):\n\n if self.classifier is None:\n raise ValueError('self.classifier is None')\n if self.df is None:\n raise ValueError('self.df is None')\n if self.features is None:\n raise ValueError('self.features is None')\n\n train_set = self.df[self.df[self.label_col] != CLASSIFIER_NAN]\n test_set = self.df[self.df[self.label_col] == CLASSIFIER_NAN]\n\n test_set_timestamps = list(test_set.index.strftime('%Y-%m-%d %H:%M:%S.%f'))\n\n self.classifier.fit(\n train_set[self.features],\n train_set[self.label_col]\n )\n\n preds = self.classifier.predict(test_set[self.features])\n probs = self.classifier.predict_proba(test_set[self.features])\n\n res = []\n\n for i in range(0, len(preds)):\n probability = max(probs[i])\n res.append([test_set_timestamps[i], preds[i], probability])\n\n return res", "def _classifier(self, classes):\n # Initialize key variables\n pseudo = np.linalg.pinv(self.data)\n result = np.dot(pseudo, classes)\n return result", "def predict_category(self):\n pass", "def transform_data(data, nb_classes):\n (X_train, y_train), (X_test, y_test) = data\n X_train = X_train.reshape(60000, 784)\n X_test = X_test.reshape(10000, 784)\n X_train = X_train.astype(\"float32\")\n X_test = X_test.astype(\"float32\")\n X_train /= 255\n X_test /= 255\n y_train = np_utils.to_categorical(y_train, nb_classes)\n y_test = np_utils.to_categorical(y_test, nb_classes)\n return X_train, X_test, y_train, y_test", "def model(self):\n filePath = self.config['data_path']['train_data']\n data = self.loadCSV(filePath)\n cleandata = self.preprocess(data)\n X, y = self.dataSplit(cleandata)\n X = self.CountVect(X, self.config['transform_path']['transform_model_path'])\n X_train, X_test, y_train, y_test = self.TrainTestSplit(X, y)\n self.MultinomialNB(X_train, X_test, y_train, y_test, self.config['nlp_path']['model_path'])", "def predict_class_binary(self):\n return round(logistic(self.input_matrix[len(self.dimens)-1][0]))", "def classify(trait_arg, alpha):\r\n x = df['essay'][1:]\r\n x = x.str.lower()\r\n y = df[trait_arg][1:]\r\n\r\n print(\"Predicting \", trait_arg, \" with alpha = \", alpha)\r\n print(\"Test set, Train Set ratio: 1:3\")\r\n\r\n # Test train split in 25 : 75 ratio\r\n x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=11)\r\n\r\n # TF-IDF vectorizer\r\n vectorizer = TfidfVectorizer()\r\n xx_train = vectorizer.fit_transform(x_train)\r\n xx_test = vectorizer.transform(x_test)\r\n\r\n # Multinomial Naive Bayes Classifier\r\n classifier = MultinomialNB(alpha=alpha)\r\n classifier.fit(xx_train, y_train)\r\n\r\n predictions = classifier.predict(xx_test)\r\n print(\"Confusion Matrix:\")\r\n print(classification_report(y_test, predictions))\r\n score = accuracy_score(y_test, predictions)\r\n print(\"Accuracy:\", score)", "def get_pred_class_probs(self, pred_mu, pred_sigma):", "def fit_and_predict_multinomialNB(X_train, Y_train, X_test):\n # Import the package\n from sklearn.naive_bayes import MultinomialNB \n\n #used scikit-learn tutorial on training a classifier\n # fit the model... \n clf = MultinomialNB().fit(X_train, Y_train) #naive bayes\n # make predictions\n predicted_MultinomialnNB = clf.predict(X_test) #predict\n return predicted_MultinomialnNB", "def predict(self, threshold=0.5):\n probabilities = self.probability_array()\n classes = np.zeros(self.N)\n classes[probabilities > threshold] = 1\n return classes", "def classify(self, data):\n abstract", "def naive_bayes_predict(data, model):\n d, n = data.shape\n # Convert priors and likelihoods to log-scale\n prior = np.log(model['prior'])\n likelihood = np.log(model['likelihood'])\n num_classes = likelihood.shape[1]\n prediction = np.zeros(n, dtype=float)\n\n # Each cell i,j (i < num_classes, j < n) corresponds to log(prior_i) + log(conditionals_ij)\n # In other words, each cell is the numerator of log(P(Y=i|X_j))\n probs = np.zeros((num_classes, n))\n # Find the sums of logged feature|class conditionals for each document\n probs = np.matmul(likelihood.T, data)\n # Add the appropriate logged prior to each row\n probs = np.add(prior.reshape(num_classes,1), probs)\n\n # Find index of maximum probability term for each document\n prediction = np.argmax(probs, axis=0)\n return prediction", "def classify_data(X_train, Y_train, X_test):\r\n\r\n # Use this array to make a prediction for the labels of the data in X_test\r\n predictions = []\r\n # QHACK #\r\n np.random.seed(42)\r\n\r\n dev = qml.device(\"default.qubit\", wires=3)\r\n\r\n def layer(W):\r\n qml.Rot(W[0, 0], W[0, 1], W[0, 2], wires=0)\r\n qml.Rot(W[1, 0], W[1, 1], W[1, 2], wires=1)\r\n qml.Rot(W[2, 0], W[2, 1], W[2, 2], wires=2)\r\n\r\n qml.CNOT(wires=[0, 1])\r\n qml.CNOT(wires=[1, 2])\r\n qml.CNOT(wires=[2, 0])\r\n \r\n def stateprep(x):\r\n qml.templates.embeddings.AngleEmbedding(x, wires=[0, 1, 2])\r\n \r\n @qml.qnode(dev)\r\n def circuit(weights, x):\r\n\r\n stateprep(x)\r\n\r\n for W in weights:\r\n layer(W)\r\n \r\n return qml.expval(qml.PauliZ(0))\r\n\r\n def variational_classifier(var, x):\r\n weights = var[0]\r\n bias = var[1]\r\n return circuit(weights, x) + bias\r\n\r\n def square_loss(labels, predictions):\r\n loss = 0\r\n for l, p in zip(labels, predictions):\r\n loss = loss + (l - p) ** 2\r\n\r\n loss = loss / len(labels)\r\n return loss\r\n\r\n def cost(var, X, Y):\r\n predictions = [variational_classifier(var, x) for x in X]\r\n return square_loss(Y, predictions)\r\n \r\n def accuracy(labels, predictions):\r\n loss = 0\r\n for l, p in zip(labels, predictions):\r\n if abs(l - p) < 1e-5:\r\n loss = loss + 1\r\n loss = loss / len(labels)\r\n\r\n return loss\r\n\r\n num_layers = 3\r\n num_qubits = 3\r\n var_init = (np.random.randn(num_layers, num_qubits, 3), 0.0)\r\n\r\n opt = qml.AdamOptimizer(0.12)\r\n batch_size = 10\r\n\r\n def pred(x):\r\n if x > 0.33:\r\n return 1\r\n if x > -0.33:\r\n return 0\r\n else:\r\n return -1\r\n\r\n var = var_init\r\n for it in range(25):\r\n\r\n # Update the weights by one optimizer step\r\n batch_index = np.random.randint(0, len(X_train), (batch_size,))\r\n X_batch = X_train[batch_index]\r\n Y_batch = Y_train[batch_index]\r\n var = opt.step(lambda v: cost(v, X_batch, Y_batch), var)\r\n\r\n # Compute accuracy\r\n predictions = [pred(variational_classifier(var, x)) for x in X_train]\r\n acc = accuracy(Y_train, predictions)\r\n\r\n #print(\r\n # \"Iter: {:5d} | Cost: {:0.7f} | Accuracy: {:0.7f} \".format(\r\n # it + 1, cost(var, X_train, Y_train), acc\r\n # )\r\n #)\r\n if acc > 0.95:\r\n break\r\n predictions = [pred(variational_classifier(var, x)) for x in X_test]\r\n\r\n # QHACK #\r\n\r\n return array_to_concatenated_string(predictions)", "def determine_classes_based_on_target(dataset):\n gains = dataset[TARGET]\n dataset[GLOBAL_CLASS_COLUMN] = [POSITIVE_CLASS if i > ALPHA else NEGATIVE_CLASS for i in gains]\n return dataset", "def predict(self, testData=[]):\n result = []\n for classValue in self._classAttrs:\n #print(f'Computing Label: {classValue}, {self._classLabelMap[classValue]}')\n result.append(self._computeCondProb(testData, classValue))\n return self._classLabelMap[result.index(max(result))]", "def naive_bayes_classify(df: pd.DataFrame, vect, names):\n features = vect\n target = df.success_lvl\n\n X_train, X_test, y_train, y_test = \\\n train_test_split(features, target, test_size=0.2, random_state=42)\n\n nb_clf = MultinomialNB()\n nb_clf.fit(X_train, y_train)\n nb_predictions = nb_clf.predict(X_test)\n print('Accuracy score for Naive Bayes:', accuracy_score(y_test, nb_predictions))\n\n\n # Find Top/Bottom num of terms used to describe the classes.\n num = 10\n low_class_prob_sorted = nb_clf.feature_log_prob_[0, :].argsort()[::-1]\n hi_class_prob_sorted = nb_clf.feature_log_prob_[1, :].argsort()[::-1]\n print('\\n', f'Low score Top{num} phrases:', np.take(names, low_class_prob_sorted[:num]))\n print('\\n', f'Low score Bot{num} phrases:', np.take(names, low_class_prob_sorted[-num:]))\n print('\\n', f'High score Top{num} phrases:', np.take(names, hi_class_prob_sorted[:num]))\n print('\\n', f'High score Bot{num} phrases:', np.take(names, hi_class_prob_sorted[-num:]))", "def predict(self, X):\n res = self.predict_proba(X)\n positive_mask = res >= 0.5\n negative_mask = res < 0.5\n res[positive_mask] = self.POSITIVE_CLASS\n res[negative_mask] = self.NEGATIVE_CLASS\n return res", "def __init__(self, classes, data_size):\r\n self.classes = classes\r\n self.data_size = data_size\r\n self.conditional_prob = {class_:{} for class_ in classes} # Conditional Probability Table for storing parameters useful to compute P(feat|class_)\r\n self.class_prob = {} # Stores the priors\r", "def naiveBayes(x_train, x_test, y_train):\n gnb = GaussianNB()\n y_pred = gnb.fit(x_train, y_train).predict(x_test)\n return y_pred", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.6215704159296479:\r\n return 0\r\n else:\r\n return 1", "def multiclass_noisify(y, P, random_state=0):\n print(np.max(y), P.shape[0])\n assert P.shape[0] == P.shape[1]\n assert np.max(y) < P.shape[0]\n\n # row stochastic matrix\n assert_array_almost_equal(P.sum(axis=1), np.ones(P.shape[1]))\n assert (P >= 0.0).all()\n\n n_classes = len(P)\n\n m = y.shape[0]\n print(m)\n new_y = y.copy()\n\n for idx in np.arange(m):\n i = y[idx]\n # draw a vector with only an 1\n new_y[idx] = np.random.choice(n_classes, 1, p=P[i, :])[0]\n\n return new_y", "def classification(self,a_train,a_test,c_train,c_test,classifier):\n le =LabelEncoder()\n le.fit(c_train)\n c_train = le.transform(c_train)\n c_test = le.transform(c_test)\n if classifier==\"GNB\": #Gaussian Naive Bayes\n gnb = GaussianNB()\n gnb.fit(a_train, c_train)\n c_pred = gnb.predict(a_test)\n elif classifier==\"DT\": #Decision Tree\n dt=DecisionTreeClassifier()\n dt.fit(a_train, c_train)\n c_pred = dt.predict(a_test)\n elif classifier==\"KNN\": #K-Next-Neighbors\n kn=KNeighborsClassifier(n_neighbors=5)\n kn.fit(a_train, c_train)\n c_pred = kn.predict(a_test)\n elif classifier==\"RF\": #Random Forest\n rf=RandomForestClassifier()\n rf.fit(a_train, c_train)\n c_pred = rf.predict(a_test)\n elif classifier==\"SVC\": # Support Vector Classifier\n \"\"\"\n SVC needs normalisation of Feature Values to scale of [-1,1] or [0,1] depending on sign of them\n \"\"\"\n if a_train.min()<0:\n mms = MinMaxScaler(feature_range=(-1,1))\n else:\n mms = MinMaxScaler()\n mms.fit(a_train)\n a_train = mms.transform(a_train)\n a_test = mms.transform(a_test)\n svc=SVC(cache_size=2000,C=1, probability=True,kernel='rbf')\n svc.fit(a_train,c_train)\n #c_pred = svc.predict(a_test) did not work, that's why it is predicted manual\n new_prob = svc.predict_proba(a_test)\n samples=new_prob.shape[0]\n c_pred= np.array\n for k in range(samples):\n c_pred=np.append(c_pred,new_prob[k].argmax())\n c_pred = c_pred[1:samples+1]\n elif classifier==\"DC\": #Dummy Classifier\n dc=DummyClassifier(strategy=\"uniform\")\n dc.fit(a_train, c_train)\n c_pred = dc.predict(a_test)\n elif classifier==\"GMM\": #Gaussian Mixture Modell\n #number of existing classes get passed to the GMM (n_classes)\n n_classes_train = len(np.unique(c_train))\n n_classes_test = len(np.unique(c_test))\n if n_classes_train>n_classes_test:\n n_classes = n_classes_train\n else:\n n_classes = n_classes_test\n #init_params='', because initial values get calculated manual\n gmm = GMM(n_components=n_classes,init_params='')\n #array of feature values of class i get extracted for further process\n gmm.means_=np.array([a_train[c_train==i,:].mean(axis=0) for i in xrange(n_classes)])\n gmm.weights_=np.array([a_train[c_train==i,:].shape[0]/float(c_train.shape[0]) for i in xrange(n_classes)])\n \n gmm_covars = np.zeros((a_train.shape[1]))\n for i in xrange(n_classes):\n valuesOfClassi = a_train[c_train==i,:]\n valuesOfClassi = np.asarray(valuesOfClassi).T\n matrixOfCov = np.cov(valuesOfClassi)+gmm.min_covar*np.eye(valuesOfClassi.shape[0])\n variance = np.array([matrixOfCov[j,j] for j in xrange(matrixOfCov.shape[0])])\n gmm_covars=np.vstack((gmm_covars,variance))\n gmm_covars=gmm_covars[1:,:] #deletes initial row with zeros\n \n gmm.covars_=gmm_covars\n c_pred = gmm.predict(a_test)\n \n c_pred=le.inverse_transform(c_pred)\n return c_pred", "def test_classify(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n data_to_classify = [1, 0.5]\n classifications = ada_boost.classify(data_to_classify, classifiers)\n expected = np.mat([-1.])\n self.assertEqual(classifications, expected)", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.1142382568740966:\r\n return 1\r\n else:\r\n return 1", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.02728102940334218:\r\n return 1\r\n else:\r\n return 1", "def predict(self, data):\n prediction = []\n\n for x in data:\n prob_0 = self._sigmoidLikelihood(x, 0)\n prob_1 = self._sigmoidLikelihood(x, 1)\n\n if prob_0 > prob_1:\n prediction.append(0)\n else:\n prediction.append(1)\n\n return prediction", "def predict(self, x):\n # *** START CODE HERE ***\n return self.clf.predict_classes(x.reshape(x.shape[0], 28, 28, 1))\n # *** END CODE HERE ***", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 0\r\n elif (float(i[1])) <= 0.1142382568740966:\r\n return 1\r\n else:\r\n return 0", "def _generate_data(self, x_data, y_data, max_seq_len, digits, seq_len,\n n_samples, use_one_hot, class_partition,\n upsample_control):\n # modify seq_len in case we do upsampling control\n if upsample_control:\n upsample_factor = seq_len\n seq_len = 1\n if not self.two_class:\n raise NotImplementedError()\n\n # construct all possible classes\n classes = [\"\".join(seq) for seq in \\\n itertools.product(\"01\", repeat=seq_len)]\n\n # get the right number of samples per class to get a balanced data set\n # with the desired n_samples.\n num = n_samples\n div = len(classes)\n n_samples_per_class = [num // div + (1 if x < num % div else 0) \\\n for x in range (div)]\n\n # find indices of samples with the wanted digit class\n y_data = [np.argmax(y) for y in y_data]\n digit_idx = []\n digit_idx.append(np.where(np.asarray(y_data) == digits[0])[0])\n digit_idx.append(np.where(np.asarray(y_data) == digits[1])[0])\n\n # generate samples for every class\n samples = []\n labels = []\n for i,c in enumerate(classes):\n this_label = i\n digits_to_sample = [int(c[i]) for i in range(len(c))]\n for s in range(n_samples_per_class[i]):\n this_sample = None\n for d in digits_to_sample:\n rand_idx = self._rstate.randint(len(digit_idx[d]))\n sample_idx = digit_idx[d][rand_idx]\n digit_sample = x_data[sample_idx]\n if this_sample is None:\n this_sample = digit_sample\n else:\n this_sample = np.vstack((this_sample,digit_sample)) \n samples.append(this_sample)\n labels.append(this_label)\n\n # if configured sort labels into 2 classes\n labels = np.asarray(labels)\n if self.two_class and not upsample_control:\n lbl_mask = np.isin(labels, class_partition)\n labels[~lbl_mask] = 0\n labels[lbl_mask] = 1\n\n if upsample_control:\n for i,s in enumerate(samples):\n # Initial timestep is absolute start position of digit. To\n # translate to a higher resolution image, we can just multiply\n # the abolute position vby the scaling factor.\n upsample = s[0,:]*upsample_factor\n for t in np.arange(1,s.shape[0]):\n # don't do upsampling at end of strokes or end of digits\n if all((s[t,2] == 0, s[t,3] == 0)):\n # Repeat original stroke \"upsample_factor\" times, such\n # that the relative stroke length is identical if\n # images are normalized to same resolution.\n for k in range(upsample_factor):\n upsample = np.vstack((upsample, s[t,:]))\n else:\n upsample = np.vstack((upsample, s[t,:]))\n samples[i] = upsample\n\n # structure output data\n out_data = labels.reshape(-1, 1)\n if use_one_hot:\n n_classes = 2**seq_len\n if self.two_class:\n n_classes = 2\n\n # FIXME We shouldn't call this method if the validation set size is\n # zero.\n if out_data.size == 0:\n out_data = np.matlib.repmat(out_data, 1, n_classes)\n else:\n # FIXME use internal method `_to_one_hot` and set required class\n # attributes beforehand.\n one_hot_encoder = OneHotEncoder(categories=[range(n_classes)])\n one_hot_encoder.fit(npm.repmat(np.arange(n_classes), 1, 1).T)\n out_data = one_hot_encoder.transform(out_data).toarray()\n\n if self.target_per_timestep:\n out_data = np.matlib.repmat(np.asarray(out_data), 1, max_seq_len)\n\n # structure input data\n in_data = np.zeros((n_samples,max_seq_len,4))\n sample_lengths = np.zeros(n_samples)\n for i,s in enumerate(samples):\n in_data[i,:s.shape[0],:] = s\n sample_lengths[i] = s.shape[0]\n\n in_data = self._flatten_array(in_data)\n\n return in_data, out_data, sample_lengths", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 0\r\n elif (float(i[1])) <= 0.02728102940334218:\r\n return 1\r\n else:\r\n return 0", "def test_categorical():\n # assert the distribution of the samples is close to the distribution of the data\n # using cstest:\n # - uniform (assert p-value > 0.05)\n # - very skewed / biased? (assert p-value > 0.05)\n # - inversely correlated (assert correlation < 0)", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.891599215656381:\r\n return 1\r\n else:\r\n return 0", "def fit(self, data):\n self.labels = np.array(data['class'])\n self.num_classes = len(data['class'].unique())\n self.all_classes = np.sort(np.unique(self.labels))\n # populating the features dataframe\n feat_df = data[['value']].copy()\n feat_df['length'] = feat_df['value'].apply(lambda val: len(val))\n feat_df['digit_frac'] = feat_df['value'].apply(\n lambda val: 0 if len(val) == 0 else\n sum(char.isdigit() for char in val) / len(val))\n feat_df['digit_num'] = feat_df['value'].apply(\n lambda val: sum(char.isdigit() for char in val))\n feat_df['alpha_frac'] = feat_df['value'].apply(\n lambda val: 0 if len(val) == 0 else\n sum(char.isalpha() for char in val) / len(val))\n feat_df['alpha_num'] = feat_df['value'].apply(\n lambda val: sum(char.isalpha() for char in val))\n feat_df['space_frac'] = feat_df['value'].apply(\n lambda val: 0 if len(val) == 0 else\n sum(char.isspace() for char in val) / len(val))\n feat_df['space_num'] = feat_df['value'].apply(\n lambda val: sum(char.isspace() for char in val))\n self.features = feat_df.ix[:, 1:].as_matrix()\n # training the classifier\n self.clf.fit(self.features, self.labels)", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.610257172808176:\r\n return 1\r\n else:\r\n return 0", "def classify(self, data):\n guesses = []\n for datum in data:\n vectors = util.Counter()\n for l in self.legalLabels:\n vectors[l] = self.weights[l] * datum + self.bias[l]\n guesses.append(vectors.argMax())\n return guesses", "def classify(cls, i):\r\n # lda \r\n if i[1] == None:\r\n return 1\r\n elif (float(i[1])) <= 0.01755814193254369:\r\n return 1\r\n else:\r\n return 0", "def predictionBinaryClassifier(x, beta):\n x = np.insert(x, 0, 1, axis = 1)\n probability = logisticFunction(np.dot(beta, x.T))\n func = np.vectorize(lambda x: 1 if x >=0.5 else 0)\n probability = func(probability)\n return probability", "def classify(self,X):\n return int(self.classifier.predict(self.scaler.transform(X)))", "def classify(self,X):\n return int(self.classifier.predict(self.scaler.transform(X)))", "def naive_bn(data, attributes):\n bn = []\n attr = attributes['attr'].tolist()\n # each attribute is only dependent on the class node\n i = 0\n while (i < len(attr)-1):\n row = [attr[i], attr[-1]]\n bn.append(row)\n i= i + 1\n # frequency table \n freq = counts_table(data, attributes)\n # conditional probabilities and prior probabilities\n cond_probs, prior0, prior1 = conditional_probability(data, attributes, freq)\n\n return bn, cond_probs, prior0, prior1", "def filter_classes(X, y, num=1000): \n classes = np.unique(y)\n for i, label in enumerate(classes):\n indices = np.where(y==label)[0]\n indices = np.random.choice(indices, num, replace=False)\n if i == 0:\n X_new = X[indices]\n y_new = y[indices]\n else:\n X_new = np.vstack([X_new, X[indices]])\n y_new = np.hstack([y_new, y[indices]]) \n # Shuffle data\n indices = np.arange(0,len(y_new))\n np.random.shuffle(indices)\n return X_new[indices], y_new[indices]", "def classify_spam(sms):\n return naive_bayes_predict(spam_ratio, words, spamicity, sms) > seuil", "def train_naive_bayes(X_train_input, y_train_input):\r\n from sklearn.naive_bayes import GaussianNB\r\n nb_clf = GaussianNB()\r\n nb_clf.fit(X_train_input, y_train_input)\r\n return nb_clf", "def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):\n #print vec2Classify\n # [0 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1]\n \n #print p0Vec\n \n #print p1Vec\n \"\"\"[-3.04452244 -3.04452244 -3.04452244 -2.35137526 -2.35137526 -3.04452244\n -3.04452244 -3.04452244 -2.35137526 -2.35137526 -3.04452244 -3.04452244\n -3.04452244 -2.35137526 -2.35137526 -2.35137526 -2.35137526 -2.35137526\n -3.04452244 -1.94591015 -3.04452244 -2.35137526 -2.35137526 -3.04452244\n -1.94591015 -3.04452244 -1.65822808 -3.04452244 -2.35137526 -3.04452244\n -3.04452244 -3.04452244]\"\"\" \n \n #print vec2Classify * p1Vec\n \"\"\"\n [-0. -3.04452244 -0. -0. -0. -0.\n -0. -0. -0. -0. -0. -3.04452244\n -0. -0. -0. -0. -0. -0.\n -0. -0. -0. -0. -0. -0.\n -0. -0. -0. -0. -0. -0.\n -0. -3.04452244]\n \"\"\"\n \n #print sum(vec2Classify * p1Vec)\n # -9.13356731317\n \n p1 = sum(vec2Classify * p1Vec) + log(pClass1) #element-wise mult\n p0 = sum(vec2Classify * p0Vec) + log(1.0 - pClass1)\n \n if p1 > p0:\n return 1\n else: \n return 0", "def generate_data(self,seed):\n X, y = make_classification( n_samples = 250, random_state = seed )\n # Add bias term\n X = np.concatenate( ( np.ones( ( 250, 1 ) ), X ), axis = 1 )\n self.X_train, self.X_test, self.y_train, self.y_test = train_test_split( \n X, y, test_size = 50, random_state = seed )", "def train_self(self):\n # for each numeric column, we need to record mean and std for both classes\n for col in self.num_cols:\n self.prob_hub[col] = {}\n for claz in self.class_list:\n mean, std = get_mean_std(self.data[self.data[self.class_column] == claz][col])\n self.prob_hub[col][claz] = (mean, std)\n\n # for each categorical columns, we need to record P(X=x|Y=y)\n for col in self.cat_cols:\n ulist = unique_list(self.data[col])\n self.prob_hub[col] = {}\n stat = self.data.groupby(self.class_column)[col].value_counts() / self.data.groupby(self.class_column)[col].count()\n # for each class\n for claz in self.class_list:\n self.prob_hub[col][claz] = {}\n for uni_element in ulist:\n self.prob_hub[col][claz][uni_element] = stat[claz][uni_element]\n\n self.predict(self.data, True)", "def nbc_model(params):\n if (params['random']):\n params['alpha'] = random.randrange(1, 10, step=1) * 0.1\n model = MultinomialNB(\n alpha=params['alpha']\n )\n\n return model", "def __init__(self, X, y, sample_weights):\n self.name = \"Gaussian Naive Bayes\"\n self.clf = self.train(X, y, sample_weights)", "def classify(self, nn=1):\n\t\t#err=0\n\t\tpossibilities=[]\n\t\tfor i in range(len(self.X_test)):\n\t\t\tfor lines in range(len((self.X_train))):\n\t\t\t\tdist=np.linalg.norm(self.X_test[i]-self.X_train[lines])\n\t\t\t\tpossibilities.append([dist,self.Y_train[lines]])\n\t\t\tpossibilities.sort()\n\t\t\tfinal=[]\n\t\t\tfor c in range(0,15):\n\t\t\t\tfinal.append(possibilities[c][1])\n\t\t\t\tprint possibilities[c][1]\n\t\t\tcount=np.zeros(10)\n\t\t\tfor m in final:\n\t\t\t\tcount[m]+=1\n\t\t\t\n\t\t\tans=np.any(count==count.max())\n\t\t\t\n\t\t\tprint \"actual=\",self.Y_test[i]\n\t\t\tif(ans!=self.Y_test[i]):\n\t\t\t\tglobal err\n\t\t\t\terr=err+1", "def preprocess():\n # Load the data\n random.seed(77)\n X,y = make_classification(n_samples=500, n_features=30, n_informative=8, n_redundant=2, \n n_repeated=0, n_classes=3, n_clusters_per_class=2, weights=None, \n flip_y=0.01, class_sep=1.0, hypercube=True, shift=0.0, scale=1.0, \n shuffle=True, random_state=None)\n\n x_train, x_val, y_train, y_val = train_test_split(X, y, random_state=0, test_size=0.25)\n\n # Standardize the data\n scaler = StandardScaler()\n X_train = scaler.fit_transform(x_train)\n X_val = scaler.transform(x_val)\n\n \n return X_train,y_train,X_val,y_val", "def predict_labels(model):\n test_datagen = ImageDataGenerator(featurewise_center=True,\n featurewise_std_normalization=True\n #rescale=1. / 255,\n #samplewise_center=True,\n #samplewise_std_normalization=True\n )\n test_datagen.fit(test_data)\n # datagen.fit(val_data)\n # create generator for train data\n test_generator = test_datagen.flow(\n test_data,\n batch_size=batch_size,\n shuffle=False)\n pred_prob=model.predict_generator(test_generator,test_data.shape[0])\n pred_prob=pred_prob[:,0]\n def pre_class(x):\n \tif x<0.5:\n return 0\n else:\n return 1\n #def true_label(id):\n #\tif 'f0' in id:\n #\t return 0\n # elif 'f1' in id: \n # return 1\n #\telse:\n #\t pass\n #pred_true=map(true_label,test_id)\n #pred_true=np.array(pred_true)\n #print roc_auc_score(val_target, pred_prob)\n #prediction=map(pre_class,pred_prob)\n #print confusion_matrix(val_target,prediction)\n with open(\"prediction.csv\", \"w\") as f: \n\tp_writer = csv.writer(f, delimiter=',', lineterminator='\\n')\n for id,label in zip(test_id,pred_prob):\n\t p_writer.writerow([id, label])\n\t\n #base_path = \"PZ/test/test/\"\n\n #with open(\"prediction.csv\", \"w\") as f:\n # p_writer = csv.writer(f, delimiter=',', lineterminator='\\n')\n # for _, _, imgs in os.walk(base_path):\n # for im in imgs:\n # pic_id = im.split(\".\")[0]\n #img = cv2.imread(base_path+im)\n #img = cv2.resize(img, (img_width, img_height), cv2.INTER_LINEAR)\n #img = img.transpose((2,0,1))\n #img = np.expand_dims(img,axis=0)\n #img = load_img(base_path + im)\n #img = imresize(img, size=(img_height, img_width))\n #test_x = img_to_array(img).reshape(3, img_height, img_width)\n #test_x = test_x.reshape((1,) + test_x.shape)\n #test_datagen.fit(img)\n #test_generator = test_datagen.flow(img,\n # batch_size=1,\n # shuffle=False)\n #prediction = model.predict_generator(test_generator, 1)\n #p_writer.writerow([pic_id, prediction])", "def y_to_classification_form(y,n_classes):\n \n return np.eye(n_classes)[y]", "def _class_distribution(y):\n unique, counts = np.unique(y, return_counts = True)\n\n percentages = counts / np.sum(counts)\n\n return unique, counts, percentages", "def calculate_likelihoods_bernoulli(data, labels, vocab):\r\n classes = set(labels)\r\n likelihoods = {}\r\n # Calculate likelihood for each class\r\n for cls in classes:\r\n documentsInClass = [set(map(lambda y: y[0], data[x])) for x in range(len(data)) if labels[x] == cls]\r\n numDocsInClass = len(documentsInClass)\r\n results = {}\r\n for word in vocab:\r\n numDocsWithWordInClass = len(filter(lambda x: word in x, documentsInClass))\r\n # Binary variable-- either present or not present\r\n results[word] = laplace_smooth(numDocsWithWordInClass, numDocsInClass, 2)\r\n # Special laplace smoothing for words not found in training data\r\n results[None] = laplace_smooth(0, numDocsInClass, 2)\r\n likelihoods[cls] = results\r\n return likelihoods", "def predict_classes(model, x, batch_size=None, verbose=0, steps=None):\n\tproba = model.predict(x, batch_size=batch_size, verbose=verbose,\n\t\t\t\t\t\t steps=steps)\n\n\tif proba.shape[-1] > 1:\n\t\treturn proba.argmax(axis=-1)\n\telse:\n\t\treturn (proba > 0.5).astype('int32')", "def classify(trainData, testData, nNumFeatures, verbosity = False):\n path = os.path.dirname(trainData)\n trainFile = os.path.basename(trainData)\n testFile = os.path.basename(testData)\n outName = os.path.splitext(testData)[0] + '.out'\n callCommand = ['Timbl']\n callCommand.append('-mO:N1-%d' % nNumFeatures)\n callCommand.append('-o')\n callCommand.append(outName)\n callCommand.append('-P')\n callCommand.append(path)\n callCommand.append('-f')\n callCommand.append(trainFile)\n callCommand.append('-t')\n callCommand.append(testFile)\n if verbosity:\n call(callCommand)\n else:\n with open(os.devnull, 'w') as devnull:\n call(callCommand, stdout=devnull, stderr=devnull)\n predictV, predict = importC5(outName)\n os.remove(outName)\n return predict", "def classify(self, data):\n guesses = []\n for datum in data:\n vectors = util.Counter()\n for l in self.legalLabels:\n vectors[l] = self.weights[l] * datum\n guesses.append(vectors.argMax())\n return guesses", "def to_categorical(y, nb_classes):\n y = np.asarray(y, dtype='int32')\n if not nb_classes:\n nb_classes = np.max(y)+1\n Y = np.zeros((len(y), nb_classes))\n for i in range(len(y)):\n Y[i, y[i]] = 1.\n return Y", "def predict_classes(self, X, boundary=0.5):\n # Add an intercept if desired.\n X = self._add_intercept(X)\n # Predict the probabilities of belonging to class 1.\n predicted_probabilities = self.predict_probabilities(X)\n # Set predictions to 1 or 0 based on the decision boundary.\n predicted_classes = np.where(predicted_probabilities >= boundary, 1, 0)\n \n return predicted_classes", "def main():\r\n x = [\r\n [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ], [ 0,0 ], [ 0,0 ],\r\n [ 0,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 0,0 ], [ 1,0 ],\r\n [ 1,0 ], [ 0,0 ], [ 1,1 ], [ 0,0 ], [ 1,0 ], [ 0,0 ]\r\n ]\r\n\r\n # Encoding of the correct classes for the training material\r\n y = [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0]\r\n b = BinaryLogisticRegression(x, y)\r\n b.fit()\r\n b.print_result()", "def classify(self, data ):\n guesses = []\n for datum in data:\n vectors = util.Counter()\n for l in self.legalLabels:\n vectors[l] = self.weights[l] * datum\n guesses.append(vectors.argMax())\n return guesses", "def BinaryClassif(synth_sample, real_sample, label, n_cores=1):\n synth_sample = BinaryLabelCheck(synth_sample, label)\n real_sample = BinaryLabelCheck(real_sample, label)\n\n train_col = list(set(synth_sample.columns) - set([label]))\n \n X_test = real_sample[train_col]\n y_test = real_sample[label]\n \n X_train = synth_sample[train_col]\n y_train = synth_sample[label]\n \n model = XGBClassifier(n_estimators=512,\n use_label_encoder=False,\n max_depth=64,\n verbosity=0,\n objective='binary:logistic',\n eval_metric='error',\n maximize=False,\n n_jobs=n_cores,\n )\n y_pred = model.fit(X_train, y_train).predict(X_test)\n \n return matthews_corrcoef(y_test, y_pred)", "def classify(self):\r\n Classify(os.path.join(self.__path,'test.csv'),self.__rang,self.__numeric,self.__statistics,self.__k,self.__classes,self.__abs_n,self)\r\n self.view.Build_Button.configure(state=\"active\")", "def classify_data(X, model, batch_size):\n n_batch = int(np.ceil(len(X) / batch_size))\n predictions = np.vstack(\n [\n model.classifier(\n model.encoder(np.array(X[(i) * batch_size : (i + 1) * batch_size, :]))\n )\n for i in range(n_batch)\n ]\n )\n predictions = tf.nn.softmax(predictions).numpy()\n return predictions[:, 1] - predictions[:, 0]", "def predict(self, datum):\r\n probs = {}\r\n for class_ in set(self.train_classes):\r\n probs[class_] = self.distribution.class_prob[class_] * reduce(lambda x,y:x*y, [self.distribution.prob(feat_ind_feat[0],feat_ind_feat[1],class_) for feat_ind_feat in enumerate(datum)])\r\n return max(probs, key=lambda x:probs[x])", "def train_naive_bayes_soy(train_set, classes):\n\n print('[ INFO ]: Training soy data with Naive Bayes Classifier...')\n\n class_probabilities = {}\n class_feature_probs = {}\n\n for soy_class in classes:\n\n feature_true_probs = {}\n feature_false_probs = {}\n\n # Find the probability that each class is in the training set\n class_probabilities[soy_class] = len(train_set[(train_set[soy_class] == 1)]) / len(train_set)\n\n # Compute the conditional feature probabilities based on the class probabilities\n # where the class is present\n class_true = train_set[(train_set[soy_class] == 1)]\n for col in class_true.columns:\n if col not in classes:\n try:\n true_true = len(class_true[(class_true[col] == 1)]) / len(class_true)\n except:\n true_true = 0\n feature_true_probs[col] = true_true\n\n # Compute the conditional feature probabilities based on the class probabilities\n # where the class is not present\n class_false = train_set[(train_set[soy_class] == 0)]\n for col in class_false.columns:\n if col not in classes:\n try:\n false_false = len(class_false[(class_false[col] == 0)]) / len(class_false)\n except:\n false_false = 0\n feature_false_probs[col] = false_false\n\n class_feature_probs[soy_class] = [feature_true_probs, feature_false_probs]\n\n return class_probabilities, class_feature_probs", "def check_classifier():\n content = []\n labels = []\n file = 'COMP3074-CW1-Dataset.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'name.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'Small_talk.csv'\n content, labels = get_tag(file, \"small_talk\", content, labels, )\n x_train, x_test, y_train, y_test = train_test_split(content, # Sample feature set to be divided\n labels, # The sample result to be divided (label)\n stratify=labels, # Keep the category proportions\n # the same in training and testing\n test_size=0.25, # Refers to the proportion of\n # samples reserved for testing\n random_state=22) # Random seed\n count_vect = CountVectorizer(stop_words=stopwords.words('english'))\n x_train_counts = count_vect.fit_transform(x_train)\n tfidf_transformer = TfidfTransformer(use_idf=True, # Tf_idf\n sublinear_tf=True).fit(x_train_counts)\n x_train_tf = tfidf_transformer.transform(x_train_counts) # Standardize the inherent attributes of the training set,\n # reduce dimensionality and normalize\n classify = LogisticRegression(random_state=0).fit(x_train_tf, y_train) # Logistic regression\n return classify, tfidf_transformer, count_vect", "def predict(self, X):", "def predict(self, X):", "def massage(X, y, S, b, d):\n\n\t# Learn R, a Gaussian NB classifier which will act as a ranker\n\tR = GaussianNB()\n\tprobas = R.fit(np.asarray(X), y).predict_proba(X)\n\n\t# Create a df with training data, labels, and desired class probabilities\n\tX['class'] = y\n\tX['prob'] = [record[d] for record in probas]\n\n\t# Promotion candidates sorted by descending probability of having desired class\n\tpr = X[(X[S] == b) & (X['class'] != d)]\n\tpr = pr.sort_values(by = 'prob', ascending = False)\n\n\t# Demotion candidates sorted by ascending probability\n\tdem = X[(X[S] != b) & (X['class'] == d)]\n\tdem = dem.sort_values(by = 'prob', ascending = True)\n\n\t# Non-candidates\n\tnon = X[((X[S] == b) & (X['class'] == d)) | ((X[S] != b) & (X['class'] != d))]\n\n\t# Calculate the discrimination in the dataset\n\tdisc = discKC(X, y, S, b, d)\n\n\t# Calculate M, the number of labels which need to be modified\n\tM = (disc * len(X[X[S] == b]) * len(X[X[S] != b])) / float(len(X))\n\tM = int(M)\n\n\t# Flip the class label of the top M objects of each group\n\t# i.e. M pairs swap labels, where M is chosen to make discKC = 0\n\tc = pr.columns.get_loc(\"class\")\n\tpr.iloc[:M, c] = d\n\tdem.iloc[:M, c] = 1 - d\n\n\tX.drop(['class', 'prob'], axis = 1, inplace = True)\n\tX_prime = pd.concat([pr, dem, non]) \n\ty_prime = X_prime['class'].tolist()\n\tX_prime = X_prime.drop(['class', 'prob'], axis = 1)\n\n\treturn(X_prime, y_prime)", "def gen_random_labels(\n X: Union[np.ndarray, int], n_classes: int, pvec=None\n) -> np.ndarray:\n\n if isinstance(X, int):\n num = X\n else:\n num = X.shape[0]\n\n pvec = np.ones((n_classes,)) / n_classes\n\n return npr.multinomial(1, pvec, size=num)", "def generate_data(groups):\n # get path list for the intended classification problem\n input_paths = generate_input_list(groups) \n X_lst = []\n y = []\n for p in input_paths:\n dp = pd.read_csv(p, sep = '\\t') #datapoint\n # Normalization \n # norm = lambda x: (x - x.mean()) / x.std()\n # dp = dp.apply(norm)\n # Min-Max scaling \n #dp_norm = (dp - dp.min()) / (dp.max() - dp.min())\n #dp = dp_norm.values\n if dp.isnull().sum().sum()>0:\n# print(p, dp.isnull().sum().sum())\n continue\n dp = dp.drop(['time'], axis = 1) \n dp = dp.iloc[:1600:4]\n\n if dp.isnull().sum().sum()>0:\n# print('after norm',p, dp.isnull().sum().sum())\n continue\n dp = dp.values\n\n X_lst.append(dp)\n sample_y = get_target(p, text= True)\n y.append(sample_y)\n X = np.stack(X_lst, axis=0)\n \n # convert y into int 0 and 1\n encoder = LabelEncoder()\n encoder.fit(y)\n y = encoder.transform(y)\n y_dummy = y\n # convert y into one-hot encoding\n if len(groups)>2:\n y_dummy = pd.get_dummies(y)\n y_dummy = y_dummy.values\n return X, y , y_dummy", "def bl_predict(self, n_samples, data=None):\n\n if data is None:\n data = self.datas[self.train_idx]\n\n y_train = data.gen_labels()\n bl = DummyClassifier()\n bl.fit(np.random.rand(len(y_train), 1), y_train)\n\n return self._predict_proba(bl, np.random.rand(n_samples, 1))", "def test_bernoulli(self):\n with Model() as model:\n Bernoulli('x', 0.5)\n steps = assign_step_methods(model, [])\n assert isinstance(steps, BinaryGibbsMetropolis)", "def bayes_model(feature_train, help_rank_train, model_name):\n model = MultinomialNB()\n model.fit(feature_train, help_rank_train)\n modelpkl = open(model_name,'wb')\n dump(model, modelpkl, -1)\n return", "def classification(original_training_data):\n\n ''' Storing the dataframe as numpy array '''\n original_training_data_values = original_training_data.values\n\n ''' Storing the values of target attribute for finding out the counts of each recipetype'''\n target_column = original_training_data_values[:, -1]\n\n ''' Recipe_type stores the unique values of target attribute in the form of a list [Muffin Cupcake] \n cupcake_muffin_count stores the count of muffin and cupcakes in the form of a list [451 451]'''\n recipe_type, cupcake_muffin_count = np.unique(target_column, return_counts=True)\n\n ''' cupcake_muffin_count.argmax() returns the index of the highest value. In this case, it will return the index of \n muffin or cupcake count. '''\n majority_class = recipe_type[cupcake_muffin_count.argmax()]\n\n return majority_class", "def class_probability(self, x):\n # permutation before softmax b x a x c x spatial dims --> b x c x a x spatial dims\n # as expected by PyTorch Softmax the class axis = 1 \n return self._class_prob(x.permute([0, 2, 1, 3, 4]))", "def classify(data, labels, (train_idx, test_idx), classifier=None):\r\n\r\n assert classifier is not None, \"Why would you pass not classifier?\"\r\n\r\n # Data scaling based on training set\r\n scaler = SupervisedStdScaler() #SupervisedRobustScaler() # # \r\n scaler.fit(data[train_idx,:], labels[train_idx], label=-1)\r\n #scaler.fit(data[train_idx,:], labels[train_idx])\r\n data_train = scaler.transform(data[train_idx,:])\r\n data_test = scaler.transform(data[test_idx,:])\r\n try:\r\n classifier.fit(data_train, labels[train_idx])\r\n \r\n \r\n confMat = confusion_matrix(labels[test_idx],\r\n classifier.predict(data_test))\r\n if confMat.shape == (1,1):\r\n if all(labels[test_idx] == -1):\r\n confMat = np.array([[confMat[0], 0], [0, 0]], dtype=confMat.dtype)\r\n else:\r\n confMat = np.array([[0, 0], [0, confMat[0]]], dtype=confMat.dtype)\r\n confMatRate = confMat / np.tile(np.sum(confMat, axis=1).astype('float'), (2,1)).transpose()\r\n totalErr = (confMat[0, 1] + confMat[1, 0]) / float(confMat.sum())\r\n #if type(classifier) not in [type(None), DummyClassifier]:\r\n if hasattr(classifier,'param_grid'): \r\n #isinstance(classifier, GridSearchCV) or \\\r\n # isinstance(classifier, RandomizedSearchCV):\r\n fitted_model = classifier.best_estimator_\r\n else:\r\n fitted_model = copy.copy(classifier) \r\n return confMatRate, totalErr, fitted_model\r\n except np.linalg.linalg.LinAlgError as e:\r\n # sahil added statement to raise the error instead of returning nun values\r\n print e.message\r\n raise e\r\n # return np.array([[np.nan, np.nan], [np.nan, np.nan]]), np.nan, None\r", "def naiveBayes(self):\n acc = 0\n #for each example in the test-set\n for d in self.dev:\n pred_good = self.prob_True\n pred_bad = self.prob_False\n #calc the probability for yes and no\n for index in range(len(d[0])):\n pred_good *= self.probs_yes[(index,d[0][index])]\n pred_bad *=(self.probs_no[(index,d[0][index])])\n pred = False\n if pred_good >= pred_bad:\n pred = True\n if pred == d[1]:\n acc +=1\n return acc/len(self.dev)", "def predict(self, data):\n xdata, _ = self.array_from_cases(data)\n preds = self.model.predict(xdata)\n label_preds = [dict(zip(self.binarizer.classes_, pred)) for pred in preds]\n return label_preds", "def classify(im, model):\n\n classe = model.predict(im)\n classe = classe.argmax(axis=-1) # taking index of the maximum %\n return classe[0]", "def initialization_based(input_array):\n\n # search for the unique labels in the array\n oh_array = np.unique(input_array, return_inverse=True)[1]\n # set the predicted class on 1, and all the other classes on 0\n out = np.zeros((oh_array.shape[0], oh_array.max() + 1), dtype=int)\n out[np.arange(out.shape[0]), oh_array.ravel()] = 1\n return out", "def preprocess_labels(y):\n\n y = tf.keras.utils.to_categorical(y, nclasses)\n\n return y", "def predict(self, data):\n try:\n getattr(self, \"tree\")\n except AttributeError:\n raise RuntimeError(\"You must train classifer before predicting data!\")\n\n predicts_proba = self.predict_proba(data)\n predicts = _classify_from_probs(predicts_proba)\n return predicts", "def classify(self, data ):\n\t\tguesses = []\n\t\tfor datum in data:\n\t\t\tvectors = util.Counter()\n\t\t\tfor l in self.legalLabels:\n\t\t\t\tvectors[l] = self.weights[l] * datum\n\t\t\tguesses.append(vectors.argMax())\n\t\treturn guesses", "def classify(self, data ):\n\t\tguesses = []\n\t\tfor datum in data:\n\t\t\tvectors = util.Counter()\n\t\t\tfor l in self.legalLabels:\n\t\t\t\tvectors[l] = self.weights[l] * datum\n\t\t\tguesses.append(vectors.argMax())\n\t\treturn guesses", "def convert_output_to_class(preds,mu_stds,use_thresh=True,scale=1.0):\n preds_prob = torch.sigmoid(preds) # convert logits to probability with sigmoid\n max_class = torch.argmax(preds_prob,dim=-1).numpy().tolist() # get class with the largest probability\n max_prob = torch.max(preds_prob,dim=-1).values.detach().numpy().tolist() # get the max value of probability\n pred_class = [] # predicted class\n for i in range(len(max_prob)): # loop each output of the model\n max_class_one = max_class[i] # get class with the largest probability\n threshold = max(0.5, 1. - scale * mu_stds[max_class_one][1]) if use_thresh is True else 0.5 # find threshold for the predicted class\n # print(threshold)\n if max_prob[i] >= threshold: # if the max value of probability greater than threshold\n pred_class.append(max_class[i]) # append the max class\n else:\n pred_class.append(-1) # append unseen class\n return pred_class", "def label(d, X, ind_class0, ind_class1, N, V, binary):\n if binary == True:\n K = 1\n C = torch.zeros(N + V, K)\n C[ind_class0, :] = 0.0\n C[ind_class1, :] = 1.0\n else:\n K = 2\n C = torch.zeros(N + V, K)\n C[ind_class0, :] = torch.tensor([1.0, 0.0])\n C[ind_class1, :] = torch.tensor([0.0, 1.0])\n\n X_train = X[:N, :]\n X_val = X[N:, :]\n C_train = C[:N, :]\n C_val = C[N:, :]\n\n return [X_train, C_train, X_val, C_val, d, K]" ]
[ "0.6858228", "0.68162215", "0.6734391", "0.6612314", "0.65618414", "0.64322984", "0.64175165", "0.64140654", "0.6396146", "0.63302946", "0.6329519", "0.62704647", "0.62355125", "0.6218283", "0.6174795", "0.6173328", "0.6168497", "0.61587894", "0.61451745", "0.61435205", "0.6125925", "0.6113199", "0.610356", "0.6087301", "0.60775065", "0.60705227", "0.6066578", "0.60550207", "0.60484266", "0.6042391", "0.60384727", "0.60376006", "0.6017807", "0.60056347", "0.6004291", "0.60023016", "0.60011166", "0.59955907", "0.5994925", "0.5987182", "0.59825", "0.59821296", "0.5979789", "0.5974897", "0.5972712", "0.59695214", "0.5963302", "0.5960309", "0.59519994", "0.5944246", "0.59360635", "0.59360635", "0.5920051", "0.5919401", "0.59185016", "0.590397", "0.58954656", "0.58953446", "0.58950746", "0.58948135", "0.58921766", "0.5891457", "0.5890258", "0.58884764", "0.5888347", "0.588789", "0.58809674", "0.5878881", "0.5872988", "0.58678347", "0.58546036", "0.5853329", "0.5850481", "0.58423316", "0.5840101", "0.5837361", "0.58256173", "0.58249617", "0.5821861", "0.58196217", "0.5819582", "0.5819582", "0.5815985", "0.58130795", "0.58104765", "0.5802786", "0.58011925", "0.57998216", "0.579933", "0.57927406", "0.5791935", "0.57914394", "0.57898074", "0.5789594", "0.5783716", "0.57813185", "0.5778369", "0.5775558", "0.5775558", "0.57736695", "0.57704794" ]
0.0
-1
Estimate the priors for a class
def calculate_priors(trainingLabels): sum = 0 priors = {} totalSamples = len(trainingLabels) classes = set(trainingLabels) for cls in classes: numCls = len(filter(lambda x: x == cls, trainingLabels)) sum += numCls priors[cls] = float(numCls) / float(totalSamples) # Sanity check: valid partitioning assert(sum == totalSamples) return priors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def classProbs(observation, tree, classes):\n res = classify(observation, tree) #res = results\n total = sum(res.values())\n probs = []\n for c in classes:\n if c in res.keys():\n probs.append(float(res[c])/total)\n else:\n probs.append(0)\n return probs", "def priors(self):\n\n return self._priors", "def get_pred_class_probs(self, pred_mu, pred_sigma):", "def __init__(self, classes, data_size):\r\n self.classes = classes\r\n self.data_size = data_size\r\n self.conditional_prob = {class_:{} for class_ in classes} # Conditional Probability Table for storing parameters useful to compute P(feat|class_)\r\n self.class_prob = {} # Stores the priors\r", "def checkPriors(para):\n\t\n\t# extract parameters\n\tA = para[0]\n\tw = para[1]\n\tp = para[2]\n\t\n\t# check them\n\tif (A<0.01 or A>10.0): A = s.uniform.rvs(0.01,10.)\n\t\n\tif (w<0.01 or w>10.0): w = s.uniform.rvs(0.01,10.)\n\t\t\n\tif ( p<0. or p>2*np.pi): p = s.uniform.rvs(0.0,2*np.pi)\n\t\n\treturn np.array([A,w,p])", "def classify(priors, likelihoods, testData, classes):\r\n results = []\r\n for document in testData:\r\n bestClass = None\r\n bestProb = None\r\n currentProb = 0.0\r\n for cls in classes:\r\n prior = priors[cls]\r\n currentProb = log(prior)\r\n lhoods = likelihoods[cls]\r\n for (word, count) in document:\r\n if word in lhoods:\r\n currentProb += log(lhoods[word])\r\n else:\r\n currentProb += log(lhoods[None])\r\n if currentProb > bestProb or bestClass == None:\r\n bestProb = currentProb\r\n bestClass = cls\r\n results.append(bestClass)\r\n return results", "def class_probability(self, x):\n # permutation before softmax b x a x c x spatial dims --> b x c x a x spatial dims\n # as expected by PyTorch Softmax the class axis = 1 \n return self._class_prob(x.permute([0, 2, 1, 3, 4]))", "def _estimate_priors(self):\n\n # Estimate the log UMI count turning point between cells and 'empties'.\n self.priors['log_counts_crossover'] = \\\n np.mean(np.log1p([self.priors['cell_counts'],\n self.priors['empty_counts']])).item()\n\n # Estimate prior for the scale param of LogNormal for d.\n if self.model_name != \"simple\":\n self.priors['d_std'] = (np.log1p(self.priors['cell_counts'])\n - self.priors['log_counts_crossover']) / 5\n else:\n self.priors['d_std'] = 0.2 # This is a reasonable prior in log space.\n\n # Priors for models that include empty droplets:\n if self.model_name != \"simple\":\n # Estimate fraction of trimmed dataset that contains cells.\n # cell_prob = self.priors['n_cells'] / self.analyzed_barcode_inds.size\n cell_prob = (1 - self.fraction_empties) \\\n * (self.priors['n_cells'] / self.analyzed_barcode_inds.size)\n self.priors['cell_prob'] = cell_prob\n\n assert cell_prob > 0, f\"Fraction of trimmed dataset \" \\\n f\"containing cells should be > 0, \" \\\n f\"but is {cell_prob}.\"\n\n assert cell_prob <= 1, f\"Fraction of trimmed dataset \" \\\n f\"containing cells should be at most 1, \" \\\n f\"but is {cell_prob}.\"\n\n # Turn cell probability into logit.\n self.priors['cell_logit'] = np.log(cell_prob / (1 - cell_prob)).item()\n\n # Estimate the ambient gene expression profile.\n self.priors['chi_ambient'], self.priors['chi_bar'] = \\\n estimate_chi_from_dataset(self)", "def calc_priors(categories, data):\n counts = np.zeros(categories)\n for val in range(categories):\n counts[val] = np.count_nonzero(data.labels == val)\n return counts / len(data.labels)", "def classes_calculations(input):\n counts, _ = np.histogram(input, bins=int(\n input.max() + 1), range=(0, int(input.max())))\n return np.nonzero(counts)[0]", "def __init__(self, N=40):\n self._primes = []\n self.find_primes(N)", "def make_priors(self):\r\n if self.last_img_size != (self.target_size, self.target_size):\r\n prior_data = []\r\n\r\n for conv_w, conv_h, scale in zip(self.conv_ws, self.conv_hs, self.scales):\r\n for i in range(conv_h):\r\n for j in range(conv_w):\r\n # +0.5 because priors are in center-size notation\r\n cx = (j + 0.5) / conv_w\r\n cy = (i + 0.5) / conv_h\r\n\r\n for ar in self.aspect_ratios:\r\n ar = np.sqrt(ar)\r\n\r\n w = scale * ar / self.target_size\r\n h = scale / ar / self.target_size\r\n\r\n # This is for backward compatability with a bug where I made everything square by accident\r\n h = w\r\n\r\n prior_data += [cx, cy, w, h]\r\n\r\n self.priors = np.array(prior_data).reshape(-1, 4)\r\n self.last_img_size = (self.target_size, self.target_size)\r\n return self.priors", "def __init__(self, num_class):\n self.sum_hit_at_one = 0.0\n self.sum_perr = 0.0\n self.sum_f1score = 0.0\n self.sum_f2score = 0.0\n self.sum_loss = 0.0\n self.num_examples = 0", "def generate_probabilities(self):\n k = 1\n v= 10\n for g in self.class_probabilities:\n curr_list = self.class_probabilities[g]\n for l in range(0,28):\n for w in range(0,28):\n total = float(curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2])\n curr_list[l][w][0] = (float(curr_list[l][w][0])+k)/(total + k*v) \n curr_list[l][w][1] = (float(curr_list[l][w][1])+k)/(total + k*v)\n curr_list[l][w][2] = (float(curr_list[l][w][2])+k)/(total + k*v)\n curr_list[l][w][3] = curr_list[l][w][0] + curr_list[l][w][1] + curr_list[l][w][2]", "def probabilities(self):\n raise NotImplementedError", "def _class_distribution(y):\n unique, counts = np.unique(y, return_counts = True)\n\n percentages = counts / np.sum(counts)\n\n return unique, counts, percentages", "def p(self) -> Probability:\n ...", "def calc_priors(self, prior_U, method='inverse'):\n if self.Pchance is None:\n raise IOError(\"Set Pchance before calling this method\")\n\n # TODO -- Move this into Bayesian\n if prior_U < 0.:\n self.prior_U = np.product(self.candidates['P_c'])\n else:\n self.prior_U = prior_U\n\n # Raw priors\n self.raw_prior_Oi = bayesian.raw_prior_Oi(method, self.candidates[self.filter].values,\n Pchance=self.Pchance,\n half_light=self.candidates.half_light.values)\n\n # Normalize\n self.prior_Oi = bayesian.renorm_priors(self.raw_prior_Oi, self.prior_U)\n\n # Add to table\n self.candidates['P_O'] = self.prior_Oi", "def determineClasses(self, particles):\n\t\tapDisplay.printMsg(\"sorting refineparticledata into classes\")\n\t\tt0 = time.time()\n\t\tclasses={}\n\t\tclass_stats={}\n\t\tquality=numpy.zeros(len(particles))\n\t\tfor partnum in range(len(particles)):\n\t\t\tquality[partnum] = particles[partnum]['quality_factor']\n\t\t\tkey = (\"%.3f_%.3f\"%(particles[partnum]['euler1'], particles[partnum]['euler2']))\n\t\t\tif key not in classes.keys():\n\t\t\t\tclasses[key]={}\n\t\t\t\tclasses[key]['particles']=[]\n\t\t\t\tclasses[key]['euler1'] = particles[partnum]['euler1']\n\t\t\t\tclasses[key]['euler2'] = particles[partnum]['euler2']\n\t\t\t\t#classes have no inplane rotation\n\t\t\t\tclasses[key]['euler3'] = 0.0 #particles[partnum]['euler3']\n\t\t\tclasses[key]['particles'].append(particles[partnum])\n\t\tclass_stats['meanquality']=quality.mean()\n\t\tclass_stats['stdquality']=quality.std()\n\t\tclass_stats['max']=quality.max()\n\t\tclass_stats['min']=quality.min()\n\t\tapDisplay.printMsg(\"sorted %d particles into %d classes\"%(len(particles), len(classes)))\n\t\t### print stats\n\t\tprint \"-- quality factor stats --\"\n\t\tprint (\"mean/std :: \"+str(round(class_stats['meanquality'],2))+\" +/- \"\n\t\t\t+str(round(class_stats['stdquality'],2)))\n\t\tprint (\"min/max :: \"+str(round(class_stats['min'],2))+\" <> \"\n\t\t\t+str(round(class_stats['max'],2)))\n\t\tapDisplay.printMsg(\"finished sorting in \"+apDisplay.timeString(time.time()-t0))\n\t\treturn classes, class_stats", "def carbon_prime(C,p,p0):\r\n \r\n if p > p0:\r\n return C\r\n else:\r\n return .03", "def perplexity(self):\n raise NotImplementedError(\"To be implemented\")", "def percent_to_class(prc, fair):\n assert len(prc) == 1, \"Should be only one column.\"\n prc = prc[0]\n\n # Threshold between fair and unfair.\n tsh_fair = 0.1\n # Threshold between unfair and very unfair.\n tsh_unfair = 0.4\n\n dif = (fair - prc) / fair\n if dif < -1 * tsh_unfair:\n # We are much higher than fair.\n cls = 4\n elif -1 * tsh_unfair <= dif < -1 * tsh_fair:\n # We are not that much higher than fair.\n cls = 3\n elif -1 * tsh_fair <= dif <= tsh_fair:\n # We are fair.\n cls = 2\n elif tsh_fair < dif <= tsh_unfair:\n # We are not that much lower than fair.\n cls = 1\n elif tsh_unfair < dif:\n # We are much lower than fair.\n cls = 0\n else:\n assert False, \"This should never happen.\"\n return cls", "def detect_class_onpic(boxes, allowed_classes):\n object_class = \"all\"\n highest_prob = 0\n for box in boxes:\n box_prob = float(box[1].strip('%')) / 100.0\n if box[0] in allowed_classes and box_prob > highest_prob:\n highest_prob = box_prob\n object_class = box[0]\n return object_class, highest_prob", "def decision(self, neighbors=None):\n if not neighbors:\n return sorted(self.class_prb.items(), key=lambda n: n[1],\n reverse=True)\n\n else:\n n = len(neighbors)\n prb = {}\n for label in self.labels:\n prb[label] = 0.0\n for kdnode, dist in neighbors:\n index = self.train_data.index(kdnode.data)\n prb[self.train_label[index]] += 1\n for label in self.labels:\n prb[label] = prb[label] / n\n return sorted(prb.items(), key=lambda n: n[1], reverse=True)", "def proportion_of_primes(bound, **args):\n v = []\n k = 0.0\n for n in range(1, bound + 1):\n if is_prime(n):\n k += 1\n v.append((n, k / n))\n return plot_step_function(v, **args)", "def prob(self, tple, class_counts, feature_counts):\n feats = self.dataset.input_features\n unnorm = [prod(feature_counts[i][feat(tple)][c]\n for (i,feat) in enumerate(feats))\n /(class_counts[c]**(len(feats)-1))\n for c in range(self.num_classes)]\n thesum = sum(unnorm)\n return [un/thesum for un in unnorm]", "def ComparePriors():\n dataset = [60]\n high = 1000\n\n thinkplot.Clf()\n thinkplot.PrePlot(num=2)\n\n constructors = [Train, Train2, Train3]\n labels = ['uniform', 'power law', 'many companies']\n\n for constructor, label in zip(constructors, labels):\n suite = MakePosterior(high, dataset, constructor)\n suite.name = label\n thinkplot.Pmf(suite)\n\n thinkplot.Save(root='train4',\n xlabel='Number of trains',\n ylabel='Probability')", "def sort_priors(self):\n return", "def _get_model_priors(self):\n if self._alpha_model_priors:\n return self._alpha_model_priors\n # sample the variables from their corresponding distributions\n params = self._get_prior_params()\n self._alpha_model_priors = self._params2probs(params)\n return self._alpha_model_priors", "def propose(self):\n\n p = type(self)(self.n, alpha=self.alpha)\n\n return p, p.compute_prior() - self.compute_prior()", "def gini(rows):\n counts = class_counts(rows)\n print(counts)\n impurity = 1\n for lbl in counts:\n prob_of_lbl = counts[lbl] / float(len(rows))\n impurity -= prob_of_lbl**2\n return impurity", "def GetNFactors(n, primes, n_pfactors, _):\n sqrtn = int(n ** 0.5) + 1\n\n for p in primes:\n if p > sqrtn:\n break\n if n % p == 0:\n n //= p\n if n % p == 0:\n return n_pfactors[n]\n else:\n return n_pfactors[n] + 1\n\n # n is primes\n primes.append(n)\n return 1", "def pointPerClass(classMap):\n rand1 = 100 * pcr.uniform(pcr.boolean(classMap)) \n rand2 = 100 * pcr.uniform(pcr.boolean(classMap))\n rand3 = 100 * pcr.uniform(pcr.boolean(classMap))\n \n randomMap = pcr.scalar(classMap) * rand1 * rand2 * rand3\n pointMap = pcr.ifthen(randomMap == pcr.areaminimum(randomMap, classMap), classMap)\n nrPointsPerClass = pcr.areatotal(pcr.scalar(pcr.boolean(pointMap)), classMap)\n assert pcr.cellvalue(pcr.mapmaximum(nrPointsPerClass), 0)[0] == 1\n return pointMap", "def filtro_probs(prediccion,p_min):\n clases = []\n for probabilidad in prediccion:\n if probabilidad[1]>=p_min:\n clases.append(probabilidad)\n else:\n clases.append(\"-\")\n return clases", "def purity(clusters, classes):\n\n d = defaultdict(list)\n\n # Get a list of class numbers of all examples in a cluster.\n for k, v in zip(clusters, classes):\n d[k].append(v)\n\n mayority = 0\n\n # Count the mayority class number and add it up over all clusters.\n for k in d:\n mayority += Counter(d[k]).most_common(1)[0][1]\n\n return float(mayority) / len(clusters)", "def computeNumClass(self):\n # Get the number of data\n n = len(self.data)\n # For IQR\n # First, compute the position of the first and third quartile\n fQPos = ( (n - 1) / 4 ) + 1\n tQPos = ( (3 * (n - 1)) / 4 ) + 1\n # Get the quartiles\n firstQ = 0.0\n thirdQ = 0.0\n if fQPos == round(fQPos):\n firstQ = self.data[int(fQPos)]\n else:\n up = round(fQPos)\n firstQ = self.data[up - 1] + ((self.data[up] - self.data[up - 1]) / 4.0)\n if tQPos == round(tQPos):\n thirdQ = self.data[int(tQPos)]\n else:\n up = round(tQPos)\n thirdQ = self.data[up - 1] + (3 * (self.data[up] - self.data[up - 1]) / 4.0)\n # Compute the IQR\n IQR = thirdQ - firstQ\n # Compute the number of classes and its length\n self.numBins = int(2 * IQR * m.pow(n, -1/3))\n self.computeBinWidth()", "def class_distribution(y): \n # ===================== PLEASE WRITE HERE =====================\n \n bin_array = np.bincount(y)\n n_class1 = bin_array[1]\n n_class2 = bin_array[2]\n n_class3 = bin_array[3]\n \n # ===================== PLEASE WRITE HERE =====================\n \n print('Number of samples in class_1:', n_class1)\n print('Number of samples in class_2:', n_class2)\n print('Number of samples in class_3:', n_class3)", "def count_prime():\n nums = []\n for i in range(2, 10000):\n if is_prime(i):\n nums.append(i)\n return nums", "def prim_method(self):", "def prim_method(self):", "def get_probable_prime(n: int) -> [int]:\n return [6*n-1, 6*n+1]", "def get_pc_per_range(model, class_name):\n class_total = model.class_counts[class_name]\n if model.num_runs is not None:\n class_total = model.num_runs * class_total * .33\n\n true_positives, totals = model.range_metrics_10[class_name]\n purities = [] # Accuracy per range (true positive/total)\n comps = []\n TP_count = 0\n total_count = 0\n\n for index in reversed(range(len(true_positives))):\n cur_p = 0 # Current purity\n cur_c = 0 # Current completeness\n TP_count += true_positives[index]\n total_count += totals[index]\n if total_count != 0:\n # positive class samples / totals # with prob in range\n cur_p = TP_count / total_count\n if class_total != 0:\n cur_c = TP_count / class_total\n\n purities.append(cur_p)\n comps.append(cur_c)\n purities.reverse()\n comps.reverse()\n return purities, comps", "def _m(self):\n return self._k // self._n_classes", "def _computeCondProb(self, testData, classValue):\n classAttrObj = self._classAttrs[classValue]\n frequencyDict = classAttrObj.frequencyDict\n totalDocsInClass = classAttrObj.totalDocsInClass\n\n result = (totalDocsInClass/self._totalTrainDocs) # P(c)\n # Compute P(t|c) for each t in d\n for word in testData:\n result *= ((frequencyDict.get(word, 0) + 1) / (sum(frequencyDict.values()) + self._sizeOfVocabulary))\n return result", "def get_primes_in(self, grange):\n for n in grange:\n if self.is_prime(n):\n yield n", "def _get_guide_priors(self):\n if not self._alpha_guide_prior_params:\n # create initial parameters\n params = self._get_prior_params()\n # register all parameters in pyro\n for p, v in iteritems(params):\n pyro.param(p, v)\n self._alpha_guide_prior_params = dict(\n self._param_store.named_parameters()\n )\n else:\n # register all parameters in pyro\n for p, v in iteritems(self._alpha_guide_prior_params):\n pyro.param(p, v)\n return self._params2probs(self._alpha_guide_prior_params)", "def prior_of_priors(self, tt):\n for i in xrange(self.n_params): \n try: \n p_theta *= self.param_obj.prior()[i].pdf(tt[i]) \n\n except UnboundLocalError: \n p_theta = self.param_obj.prior()[i].pdf(tt[i]) \n\n return p_theta", "def componeProbs(p,p_prime):\n return p + p_prime * (1-p)", "def __convert_prob_into_class(self, probs):\n probs = T.set_subtensor(probs[probs > 0.5], 1)\n return T.set_subtensor(probs[probs <= 0.5], 0)", "def percent_to_class(prc, fair):\n assert len(prc) == 1, \"Should be only one column.\"\n prc = prc[0]\n\n # Threshold between fair and unfair.\n tsh_fair = 0.1\n\n dif = (fair - prc) / fair\n if dif < -1 * tsh_fair:\n # We are much higher than fair.\n cls = 2\n elif -1 * tsh_fair <= dif <= tsh_fair:\n # We are fair.\n cls = 1\n elif tsh_fair < dif:\n # We are much lower than fair.\n cls = 0\n else:\n assert False, \"This should never happen.\"\n return cls", "def class_size(self):\n if not self.is_mutation_finite():\n return infinity\n else:\n components = []\n multiplicities = []\n for x in self.irreducible_components():\n if components.count(x) == 0:\n components.append(x)\n multiplicities.append(1)\n else:\n y = components.index(x)\n multiplicities[y] = multiplicities[y]+1\n\n sizes = [ x.class_size() for x in components ]\n if NotImplemented in sizes:\n print(\"Size unknown\")\n return NotImplemented\n else:\n return prod( [binomial(sizes[i]+multiplicities[i]-1,\n multiplicities[i] ) for i in range (0,len(sizes))])", "def getpriPridict(cls, instcls, stage):\n if not any(pridict):\n cls.initpridict()\n try:\n pri = pridict[instcls][stage]\n except AttributeError as err:\n print(\"Err\", err)\n return 0\n return pri", "def reprime(self):\n self.__primed = 1", "def primish(n):\n\n factors = set()\n for i in range(n, 1, -1):\n\n # Find the smallest divisor of i.\n smallest = 2\n while (i % smallest) != 0:\n smallest += 1\n\n # Divide by that divisor until we have 1 or something else.\n remainder = i\n while (remainder % smallest) == 0:\n remainder /= smallest\n\n # Keep it if needed.\n if remainder == 1:\n factors.add(i)\n\n return factors", "def get_probs(self, a):\n with torch.no_grad():\n probabilities = (np.array(self.priorities) ** a) / sum(np.array(self.priorities) ** a)\n return probabilities", "def get_priors(heuristic_lengthscales, use_priors=True, is_composite=False):\n # Determine if we use priors\n lengthscale_prior = None\n outputscale_prior = None\n if use_priors:\n lengthscale_prior = GammaPrior(0.01 * heuristic_lengthscales,\n 0.01 * torch.ones(heuristic_lengthscales.size()))\n print(\"LENGTHSCALE MEAN: \\n{}\".format(lengthscale_prior.mean))\n print(\"LENGTHSCALE VARIANCE: \\n{}\".format(lengthscale_prior.variance))\n\n if is_composite:\n return lengthscale_prior, lengthscale_prior, outputscale_prior\n\n else:\n return lengthscale_prior, outputscale_prior", "def __init__(self,x,dof):\n self.x=x\n self.dof=dof\n numSeg=10\n p1=self.P(numSeg)\n p2=0\n while True:\n numSeg*=2\n p2=self.P(numSeg)\n if self.checarPs(p1,p2):\n break\n else:\n p1=p2\n print(round(p2,5))", "def obtain_parametric_priors(resolution, num_actions):\n # maximum prior magnitude for any discritized state\n max_prior = 10\n \n priors = []\n \n for p in range(resolution):\n for v in range(resolution):\n for a in range(num_actions):\n priors.append(set_parametric_prior(resolution, p, v, a, max_prior))\n \n priors = np.array(priors).reshape(resolution,resolution,num_actions)\n #print(\"priors\", priors[5,5,0])\n return priors", "def optimal_instances_per_class(df, factor=1.0, draw=False):\n # `bincount` returns the number of instances we have for each website\n counts = np.bincount(df.class_label.tolist())\n hist, bin_edges = np.histogram(counts)\n if draw:\n inst_counts = get_num_instances(df)\n inst_counts.hist(cumulative=-1, bins=100)\n plt.xlabel('Num of instances')\n plt.ylabel('Num of classes with x or more insts')\n plt.show()\n\n # scale the y-axis\n dx = bin_edges[1] - bin_edges[0]\n cum_hist = np.cumsum(hist) * dx\n\n # get the inverse cumulative sum\n inv_cum_hist = max(cum_hist) - cum_hist\n\n # compute the harmonic mean of tuples (y=f(x), x)\n hms = [harmonic_mean(x, y, factor) if y > 0 and x > 0 else 0\n for x, y in zip(bin_edges[1:], inv_cum_hist)]\n\n print(hms)\n\n # find index for max harmonic mean\n i = np.argmax(hms)\n\n # this is the optimal number of instances:\n opt_num_insts = int(bin_edges[i])\n\n # which leaves us with this number of classes:\n opt_num_classes = len(counts[counts >= opt_num_insts])\n\n if draw:\n print(\"Optimal number of instances:\", opt_num_insts)\n print(\"Optimal number of classes:\", opt_num_classes)\n\n return opt_num_insts, opt_num_classes", "def fairness_discrepancy(props, n_classes, norm=0):\n # unique, freq = np.unique(data, return_counts=True)\n # props = freq / len(data) #Proportion of data that belongs to that data\n \n # #------------------Modification to correct the zero support problem------------------------------------------------\n # temp=np.zeros(n_classes)\n # temp[unique]=props\n # props=temp\n # #------------------------------------------------------------------------------\n \n # print (freq)\n truth = 1./n_classes\n\n\n # L2 and L1=================================================================================================\n #(Remove Normalisation)\n l2_fair_d = np.sqrt(((props - truth)**2).sum())\n l1_fair_d = abs(props - truth).sum()\n\n # q = props, p = truth\n # kl_fair_d = (props * (np.log(props) - np.log(truth))).sum()\n\n #Cross entropy\n p=np.ones(n_classes) \n # ce=cross_entropy(p,props,n_classes)-cross_entropy(p,p,n_classes)\n \n #information specificity=====================================================================================\n rank=np.linspace(1,n_classes-1,n_classes-1)\n rank[::-1].sort() #Descending order\n perc=np.array([i/np.sum(rank) for i in rank])\n \n \n props[::-1].sort()\n alpha=props[1:]\n specificity=abs(props[0]-np.sum(alpha*perc))\n info_spec=(l1_fair_d+specificity)/2\n \n #Wasstertein Distance\n wd=wasserstein_distance(props,np.ones(len(props))*truth)\n \n #Wassertein Specificity\n wds=(wd+specificity)/2\n if norm==0:\n return l2_fair_d, l1_fair_d,info_spec,specificity,wd,wds\n # return l2_fair_d, l1_fair_d,info_spec,specificity\n else:\n return l2_fair_d/metric_max(n_classes,\"L2\"), l1_fair_d/metric_max(n_classes,\"L1\"),info_spec/metric_max(n_classes,\"Is\"),specificity,wd/metric_max(n_classes,\"Wd\")\n # return l2_fair_d/metric_max(n_classes,\"l2\"), l1_fair_d/metric_max(n_classes,\"l1\"),info_spec/metric_max(n_classes,\"is\"),specificity", "def factorize(n:int,primesDict:dict = primesDict):\r\n\r\n \r\n if isPrime(n,primesDict):\r\n return {n:1}\r\n\r\n factors = {}\r\n\r\n lastPrime = getLastPrime(primesDict)\r\n print (lastPrime,\"Lastprimes\")\r\n if lastPrime < n:\r\n print (\"Creating DictS\")\r\n\r\n prma(n,lastPrime,primesDict)\r\n\r\n for i in primesDict:\r\n if n%i == 0 :\r\n count = 0\r\n while n % i**(count+1) == 0 :\r\n count+=1 \r\n factors[i]= count\r\n\r\n return factors", "def get_probs(self):\n\t\tprobArray = []\n\t\tfor combination in self.codepool:\n\t\t\tif self.feasible(combination):\n\t\t\t\tprobArray.append(self.get_probability(combination))\n\t\t\telse:\n\t\t\t\tprobArray.append(0)\n\t\tprobArray = np.array(probArray) / np.sum(probArray)\n\t\treturn probArray", "def set_priors(parnames, limits, linenames, vsyst, nssps=1):\n priors = {}\n for parname in parnames:\n name = parname.split(\"_\")[0]\n if name in limits: #all the CvD ssp parameters\n vmin, vmax = limits[name]\n# print(parname,vmin,vmax)\n delta = vmax - vmin\n priors[parname] = stats.uniform(loc=vmin, scale=delta)\n elif parname in vsyst:\n priors[parname] = stats.norm(loc=vsyst[parname], scale=500)\n elif parname == \"eta\": #what does eta do?\n priors[\"eta\"] = stats.uniform(loc=1., scale=19)#uniform distribution in range [1,19]\n elif parname == \"nu\": #what does nu do?\n priors[\"nu\"] = stats.uniform(loc=2, scale=20)#uniform distribution in range [2,20]\n elif parname == \"sigma\":\n priors[\"sigma\"] = stats.uniform(loc=50, scale=300)#obtains the uniform distribution on [loc, loc + scale]. i.e. uniform in range [50,300]\n elif parname == \"sigma_gas\":\n priors[parname] = stats.uniform(loc=50, scale=100)#uniform between [50,100]km/s\n elif name == \"w\":\n priors[parname] = stats.uniform(loc=0, scale=1)#weights uniform between 0 and 1\n elif name in linenames:\n# priors[parname] = stats.expon(loc=0, scale=0.5)#favors low values>~0; make even stronger by decreasing scale. \n priors[parname] = stats.expon(loc=0, scale=0.2)#favors low values>~0; make even stronger by decreasing scale. \n elif name in [\"pred\", \"pblue\"]:\n porder = int(parname.split(\"_\")[1])\n if porder == 0:\n mu, sd = 1 / nssps, 1\n a, b = (0 - mu) / sd, (np.infty - mu) / sd\n priors[parname] = stats.truncnorm(a, b, mu, sd)\n else:\n priors[parname] = stats.norm(0, 0.05)\n else:\n print(f\"parameter without prior: {parname}\")\n return priors", "def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2", "def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2", "def class_size(self):\n if not self.is_mutation_finite():\n return infinity\n\n # type A (finite and affine)\n if self._letter == 'A':\n # the formula is taken from Torkildsen - Counting\n # cluster-tilted algebras of type A\n if self.is_finite():\n n = self._rank\n a = binomial( 2*(n+1), n+1 ) // (n+2)\n if n % 2 == 1:\n a += binomial( n+1, (n+1)//2 )\n if n % 3 == 0:\n a += 2 * binomial( 2*n//3, n//3 )\n return a // (n+3)\n # the formula is taken from Bastian, Prellberg, Rubey, Stump\n elif self.is_affine():\n i,j = self._bi_rank\n i = ZZ(i)\n j = ZZ(j)\n n = i+j\n f = Euler_Phi()\n if i == j:\n return ( binomial( 2*i,i ) +\n sum( f(k) * binomial(2*i//k,i//k)**2\n for k in [k for k in i.divisors()\n if k in j.divisors()] ) // n ) // 4\n else:\n return sum( f(k) * binomial(2*i//k,i//k) *\n binomial(2*j//k,j//k)\n for k in [k for k in i.divisors()\n if k in j.divisors()] ) // ( 2 * n )\n\n # types B and C (finite and affine)\n elif self._letter in ['B', 'C']:\n # this formula is proven but nowhere published correctness\n # is clear enough that I don't think a warning is needed\n if self.is_finite():\n n = self._rank\n return binomial(2 * n, n) // (n + 1)\n\n elif self._letter in ['BB','CC']:\n # these two formulas are not yet proven\n print(Warning(\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if self.is_affine():\n if self._twist == 1:\n n = self._rank - 1\n if n%2==1:\n return binomial( 2*n-1, n-1 )\n else:\n return binomial( 2*n-1, n-1 ) + binomial( n-1, n//2 -1 )\n\n # type BC (affine)\n elif self._letter == 'BC':\n # this formula is not yet proven\n print(Warning(\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if self.is_affine():\n if self._twist == 1:\n n = self._rank - 1\n return binomial( 2*n, n )\n\n # types BD and CD (affine)\n elif self._letter in ['BD','CD']:\n # this formula is not yet proven\n print(Warning(\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if self.is_affine():\n if self._twist == 1:\n n = self._rank - 2\n return 2*binomial( 2*n, n )\n\n # type D (finite and affine)\n elif self._letter == 'D':\n # the formula is taken from Bastian, Prellberg, Rubey, Stump\n if self.is_finite():\n if self._rank == 4:\n return 6\n else:\n f = Euler_Phi()\n n = ZZ(self._rank)\n return sum( f( n//k ) * binomial( 2*k, k )\n for k in n.divisors() ) // (2*n)\n # this formula is not yet proven\n elif self.is_affine():\n n = self._rank - 3\n if n == 2:\n return 9\n else:\n print(Warning (\"Warning: This method uses a formula \"\n \"which has not been proved correct.\"))\n if n%2==1:\n return 2*binomial(2*n,n)\n else:\n return 2*binomial(2*n,n) + binomial(n, n//2)\n\n # the exceptional types are hard-coded\n # type E (finite, affine and elliptic)\n elif self._letter == 'E':\n if self.is_finite():\n if self._rank == 6:\n return 67\n elif self._rank == 7:\n return 416\n elif self._rank == 8:\n return 1574\n elif self.is_affine():\n if self._rank == 7:\n return 132\n elif self._rank == 8:\n return 1080\n elif self._rank == 9:\n return 7560\n elif self.is_elliptic():\n if self._rank == 8:\n return 49\n elif self._rank == 9:\n return 506\n elif self._rank == 10:\n return 5739\n\n # type F\n elif self._letter == 'F':\n if self.is_finite():\n return 15\n elif self.is_affine():\n return 60\n elif self.is_elliptic():\n if self._twist == [1,2]:\n return 90\n if self._twist == [1,1] or self._twist == [2,2]:\n return 35\n\n # type G\n elif self._letter == 'G':\n if self.is_finite():\n return 2\n elif self.is_affine():\n return 6\n elif self.is_elliptic():\n if self._twist == [1,3]:\n return 7\n if self._twist == [1,1] or self._twist == [3,3]:\n return 2\n\n # type X\n elif self._letter == 'X':\n if self._rank == 6:\n return 5\n elif self._rank == 7:\n return 2\n\n # otherwise the size is returned to be unknown\n else:\n print(\"Size unknown\")\n return NotImplemented", "def gen_primes(N):\n primes = set()\n for n in range(2, N):\n if all(n % p > 0 for p in primes):\n primes.add(n)\n yield n", "def non_maxima_suppression(boxes, probs, classes_num, thr=0.2):\n for i, box in enumerate(boxes):\n if probs[i] == 0:\n continue\n for j in range(i+1, len(boxes)):\n if classes_num[i] == classes_num[j] and iou(box, boxes[j]) > thr:\n probs[j] = 0.0\n\n return probs", "def factors(n, primes):\n\n for p in takewhile(lambda p: p*p < n, primes):\n exponent = 0\n\n while n % p == 0:\n exponent += 1\n n /= p\n\n if exponent > 0:\n yield p, exponent\n\n if n > 1:\n yield n, 1", "def set_priors(self,alpha):\n\n\t\tassert type(alpha) == float\n\t\tself.alpha = alpha", "def start_prime_test():", "def return_class_probas(pnode, pY):\n\n nof_objects = pY.shape[0]\n nof_classes = pY.shape[1]\n class_probas = numpy.zeros(nof_classes)\n\n for i in range(nof_objects):\n class_probas += pnode[i] * pY[i, :]\n\n # class_probas = class_probas/numpy.sum(pnode)\n class_probas = class_probas / len(pnode)\n # class_probas = pY\n\n return class_probas", "def __init__(self) :\n self.probabilities_ = None", "def __init__(self) :\n self.probabilities_ = None", "def __init__(self) :\n self.probabilities_ = None", "def get_num_classes(self):", "def get_prime(self):\n return self.prime", "def primes(n):\n primfac = {}\n primfac = defaultdict(lambda: 0, primfac)\n while (n % 2) == 0:\n primfac[2] += 1 \n n //= 2\n d = 3\n while d*d <= n:\n while (n % d) == 0:\n primfac[d] += 1 # supposing you want multiple factors repeated\n n //= d\n d += 2\n if n > 1:\n primfac[n] = 1\n return primfac", "def calculate_class_apriori_probability(self, class_name):\n unique, counts = np.unique(self.class_data, return_counts=True)\n frequency_dict = dict(zip(unique, counts))\n return float(frequency_dict[class_name])/len(self.class_data)", "def __init__(self, count):\n assert count >= 0\n self.is_proportion = count < 1.0\n self.cutoff = count", "def classes(self):\n #print \"making classes again!\"\n l = []\n for p in self.marks:\n l.append(psi_class(self,p))\n for d in range(1, self.dimension + 1):\n l.append(kappa_class(self,d))\n for i in range(1, self.genus+1):\n l.append(chern_char(self, 2*i-1))\n if True:#self.genus != 0:\n l.append(irreducible_boundary(self))\n marks = set(self.marks)\n reducible_boundaries = []\n if self.n != 0:\n first_mark_list = [marks.pop()] \n for g1 in range(0, self.genus + 1):\n for p in subsets(marks):\n r_marks = set(first_mark_list + p)\n if 3*g1 - 3 + len(r_marks) + 1 >= 0 and 3*(self.genus-g1) - 3 + self.n - len(r_marks) + 1 >= 0:\n reducible_boundaries.append( reducible_boundary(self, Mgn(g1, r_marks)) )\n \n reducible_boundaries.sort(key = lambda b: sorted(list(b.component1.marks)))\n reducible_boundaries.sort(key = lambda b: len(b.component1.marks))\n reducible_boundaries.sort(key = lambda b: b.component1.genus)\n \n else: #self.n == 0\n for g1 in range(1, floor(self.genus/2.0)+1):\n reducible_boundaries.append(reducible_boundary(self, Mgn(g1, []))) \n \n \n l += reducible_boundaries \n \n for i in range(1,self.genus+1):\n l.append(lambda_class(self,i))\n return l", "def setpriPridict(cls, instcls, stage, pri):\n if not any(pridict):\n cls.initpridict()\n try:\n pridict[instcls][stage] = pri\n except AttributeError as err:\n print(\"Err\", err)\n return 0\n return pridict[instcls][stage]", "def solution(resources, args):\n largest_prime_factor = 1\n number = args.number\n prime_generator = primes.get_prime_generator()\n\n while number > 1:\n prime = next(prime_generator)\n if number % prime == 0:\n number /= prime\n largest_prime_factor = prime\n\n if largest_prime_factor == 1:\n largest_prime_factor = args.number\n\n return largest_prime_factor", "def __init__(self, N, S, students, leaders):\n self.N = N\n self.S = S\n self.G = int(math.ceil(N/S))\n self.partitions = []\n self.students = students\n self.leaders = leaders", "def primal_problem(\n states: list[np.ndarray], probs: list[float] = None, dist_method=\"min-error\"\n) -> float:\n dim_x, _ = states[0].shape\n\n obj_func = []\n meas = []\n constraints = []\n\n dim = int(np.log2(dim_x))\n dim_list = [2] * int(np.log2(dim_x))\n\n sys_list = list(range(1, dim, 2))\n\n # Unambiguous consists of k + 1 operators, where the outcome of the k+1^st corresponds to the\n # inconclusive answer.\n if dist_method == \"unambiguous\":\n for i in range(len(states) + 1):\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(partial_transpose(meas[i], sys_list, dim_list) >> 0)\n\n for i, _ in enumerate(states):\n for j, _ in enumerate(states):\n if i != j:\n constraints.append(probs[j] * cvxpy.trace(states[j].conj().T @ meas[i]) == 0)\n\n # Minimize error of distinguishing via PPT measurements.\n elif dist_method == \"min-error\":\n for i, _ in enumerate(states):\n meas.append(cvxpy.Variable((dim_x, dim_x), PSD=True))\n constraints.append(partial_transpose(meas[i], sys_list, dim_list) >> 0)\n\n for i, _ in enumerate(states):\n obj_func.append(probs[i] * cvxpy.trace(states[i].conj().T @ meas[i]))\n\n constraints.append(sum(meas) == np.identity(dim_x))\n\n objective = cvxpy.Maximize(sum(obj_func))\n problem = cvxpy.Problem(objective, constraints)\n sol_default = problem.solve()\n\n return sol_default", "def is_prime(self):\n pass", "def prob4():\n#raise NotImplementedError(\"Problem 4 Incomplete\")\n h = lambda x : x[0] < -1 and x[1] > 1\n f = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([0,0]),cov=np.eye(2))\n g = lambda x : stats.multivariate_normal.pdf(x,mean=np.array([-1,1]),cov=np.eye(2))\n X = np.random.multivariate_normal(mean=np.array([-1,1]),cov=np.eye(2),size=10000)\n return 1./10000*np.sum(np.apply_along_axis(h,1,X)*np.apply_along_axis(f,1,X)/np.apply_along_axis(g,1,X))", "def gini(self, rows):\n counts = self.class_counts(rows)\n impurity = 1\n for lbl in counts:\n prob_of_lbl = counts[lbl] / float(len(rows))\n impurity -= prob_of_lbl**2\n return impurity", "def _calc_train_class_prb(self, labels_list=None):\n if not labels_list:\n return {}\n\n n = len(labels_list)\n label_num = len(self.labels)\n prb = {}\n for l in self.labels:\n # tmp = (l, sum(1 if v == l else 0 for k, v in train_data)/n)\n prb[l] = (labels_list.count(l) + 1.0) / (n + label_num)\n return prb", "def probs(self) -> List:\n return self._probs", "def classify_proba(self, X):\n return self._expectation(X)", "def __init__(self, prim):\n self.actual = prim", "def __init__(self, n, e):\n\t\tself.known_primes = [2,3,5,7,11,13,17,19,23,29,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,\n\t\t\t103,107,109,113,127,131,137,139,149,151,157,163,167,173,179,181,191,193,197,\n\t\t\t199,211,223,227,229,233,239,241,251,257,263,269,271,277,281,283,293,307,311,\n\t\t\t313,317,331,337,347,349,353,359,367,373,379,383,389,397,401,409,419,421,431,\n\t\t\t433,439,443,449,457,461,463,467,479,487,491,499,503,509,521,523,541,547,557,\n\t\t\t563,569,571,577,587,593,599,601,607,613,617,619,631,641,643,647,653,659,661,\n\t\t\t673,677,683,691,701,709,719,727,733,739,743,751,757,761,769,773,787,797,809,\n\t\t\t811,821,823,827,829,839,853,857,859,863,877,881,883,887,907,911,919,929,937,\n\t\t\t941,947,953,967,971,977,983,991,997,1009,1013,1019,1021,1031,1033,1039,1049,\n\t\t\t1051,1061,1063,1069,1087,1091,1093,1097,1103,1109,1117,1123,1129,1151,1153,\n\t\t\t1163,1171,1181,1187,1193,1201,1213,1217,1223,1229,1231,1237,1249,1259,1277,\n\t\t\t1279,1283,1289,1291,1297,1301,1303,1307,1319,1321,1327,1361,1367,1373,1381,\n\t\t\t1399,1409,1423,1427,1429,1433,1439,1447,1451,1453,1459,1471,1481,1483,1487,\n\t\t\t1489,1493,1499,1511,1523,1531,1543,1549,1553,1559,1567,1571,1579,1583,1597,\n\t\t\t1601,1607,1609,1613,1619,1621,1627,1637,1657,1663,1667,1669,1693,1697,1699,\n\t\t\t1709,1721,1723,1733,1741,1747,1753,1759,1777,1783,1787,1789,1801,1811,1823,\n\t\t\t1831,1847,1861,1867,1871,1873,1877,1879,1889,1901,1907,1913,1931,1933,1949,\n\t\t\t1951,1973,1979,1987,1993,1997,1999,2003,2011,2017,2027,2029,2039,2053,2063]\n\t\tself.hidden_primes_product = n\n\t\tself.public_key = e\n\t\tself.private_key = None", "def classifier(x):\n return x[0] - x[1] + 4 < 0", "def primes():\n yield 2\n found = []\n for i in itertools.count(start=3, step=2):\n for p in found:\n if i % p == 0:\n break\n else:\n yield i\n found.append(i)", "def getPrimeFactors(num):\n n = num\n primes = {}\n\n p = 2\n sqrt = math.sqrt(num)\n\n def checkAndUpdate(inc):\n nonlocal n\n nonlocal p\n nonlocal primes\n if n % p == 0:\n if str(p) in primes.keys():\n primes[str(p)] += 1\n else:\n primes[str(p)] = 1\n n /= p\n else:\n p += inc\n \n while p == 2 and p <= n:\n checkAndUpdate(1)\n while p <= n and p <= sqrt:\n checkAndUpdate(2)\n if len(primes.keys()) == 0:\n primes[str(num)] = 1\n elif n != 1:\n primes[str(n)] = 1\n return primes", "def classify(cls, i):\r\n sums = [0,0]\r\n sums[int(WekaClassifier_0.classify(i))] += 1.2134644010075073\r\n sums[int(WekaClassifier_1.classify(i))] += 0.57177685574344\r\n sums[int(WekaClassifier_2.classify(i))] += 0.40154496884580815\r\n sums[int(WekaClassifier_3.classify(i))] += 0.35999934750119333\r\n sums[int(WekaClassifier_4.classify(i))] += 0.36937329276984643\r\n sums[int(WekaClassifier_5.classify(i))] += 0.16351990613377496\r\n sums[int(WekaClassifier_6.classify(i))] += 0.1396078832952814\r\n sums[int(WekaClassifier_7.classify(i))] += 0.15882943193304253\r\n sums[int(WekaClassifier_8.classify(i))] += 0.1284505298097081\r\n sums[int(WekaClassifier_9.classify(i))] += 0.09903161346969916\r\n sums[int(WekaClassifier_10.classify(i))] += 0.19672733155497407\r\n sums[int(WekaClassifier_11.classify(i))] += 0.17672847093616786\r\n sums[int(WekaClassifier_12.classify(i))] += 0.18729151620386228\r\n sums[int(WekaClassifier_13.classify(i))] += 0.24810462685136855\r\n sums[int(WekaClassifier_14.classify(i))] += 0.23706555932983922\r\n sums[int(WekaClassifier_15.classify(i))] += 0.14276017880034322\r\n sums[int(WekaClassifier_16.classify(i))] += 0.2655207144416779\r\n sums[int(WekaClassifier_17.classify(i))] += 0.24759035974335297\r\n sums[int(WekaClassifier_18.classify(i))] += 0.14255881855351965\r\n sums[int(WekaClassifier_19.classify(i))] += 0.1181101393342422 \r\n return float(sums[0] - sums[1])", "def classify(self, instance):\n numerator = 0\n denominator = 0\n for training_instance in self.training_data:\n h_value = self._h_function(instance, training_instance[0])\n numerator = numerator + h_value*training_instance[1]\n denominator = denominator + h_value\n return numerator/denominator", "def init_probability_dict(self):\n for x in xrange(0,10):\n self.class_probabilities[x] = self.init_probability_2d()", "def factor_in_multiple_professors(self):\n professors = [professor for professor in self.course.professors if professor.lower() != \"none\"]\n number_professors = len(set(professors))\n if number_professors > 1:\n self.score = self.score + number_professors" ]
[ "0.66456854", "0.6316928", "0.6273953", "0.60905004", "0.6055876", "0.60154176", "0.5993265", "0.59645927", "0.59377426", "0.5896327", "0.58948433", "0.58720356", "0.58248436", "0.5820803", "0.58123535", "0.5798083", "0.57771003", "0.5732761", "0.5723448", "0.57202864", "0.5715706", "0.5703063", "0.56836087", "0.5680424", "0.5671575", "0.56651324", "0.5663909", "0.56608474", "0.56446904", "0.5637236", "0.5617636", "0.56107754", "0.55928665", "0.55895376", "0.5584581", "0.55804324", "0.558003", "0.5571902", "0.55660063", "0.55660063", "0.55646574", "0.5563585", "0.5558303", "0.555732", "0.55518746", "0.55453765", "0.55426174", "0.55395937", "0.5531384", "0.5530471", "0.5526897", "0.55211365", "0.55190337", "0.5517503", "0.55029136", "0.5498567", "0.54968536", "0.5494666", "0.5491102", "0.5487706", "0.5476384", "0.54709655", "0.54648423", "0.54599553", "0.54599553", "0.5453153", "0.54331565", "0.54284537", "0.54120564", "0.5408297", "0.54081017", "0.5401262", "0.54005796", "0.54005796", "0.54005796", "0.539656", "0.53965586", "0.5394005", "0.53828895", "0.53787965", "0.5373641", "0.53709143", "0.5368248", "0.5368218", "0.53597236", "0.53572786", "0.5355978", "0.5345065", "0.5343879", "0.5341475", "0.53407586", "0.53341544", "0.5326323", "0.5320414", "0.5312234", "0.5311984", "0.5311955", "0.53067887", "0.5300547", "0.5292743" ]
0.699718
0
Calculate the likelihoods for multinomial
def calculate_likelihoods_multinomial(data, labels, vocab): likelihoods = {} counts = {} words = {} classes = set(labels) vocabLen = len(vocab) for cls in classes: # Initialize counts[cls] = {} words[cls] = 0 # Perform counts line = 0 for doc in data: cls = labels[line] wordCounts = counts[cls] for (word, count) in doc: if word not in wordCounts: wordCounts[word] = 0 wordCounts[word] += count words[cls] += count line += 1 # Compute likliehoods for cls in counts: wordCounts = counts[cls] likelihoods[cls] = {} wordsInClass = words[cls] for word in wordCounts: likelihoods[cls][word] = laplace_smooth(wordCounts[word], wordsInClass, vocabLen) # Add all training words: for word in vocab: if word not in likelihoods[cls]: likelihoods[cls][word] = laplace_smooth(0, wordsInClass, vocabLen) # Special laplace smoothing for words not found in training data likelihoods[cls][None] = laplace_smooth(0, wordsInClass, vocabLen) return likelihoods
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def multinomial_likelihood(m_true, alpha, alpha0, m_probs):\n\n ll = tf.reduce_sum(input_tensor=m_true * (tf.math.log(alpha0) - tf.math.log(alpha)), axis=1, keepdims=True)\n ll = tf.reduce_mean(input_tensor=ll)\n return ll", "def multinomial_nll(true_counts, logits):\n counts_per_example = tf.reduce_sum(true_counts, axis=-1)\n dist = tfp.distributions.Multinomial(total_count=counts_per_example,\n logits=logits)\n return (-tf.reduce_sum(dist.log_prob(true_counts)) / \n tf.cast(tf.shape(true_counts)[0], dtype=tf.float32))", "def log_multinomial_coefficient(n, x):\n return gammaln(n + 1) - gammaln(x + 1).sum()", "def calculate_log_p_multinomial(self, n_counts, nbins, batch_size):\n\n n_counts = tf.cast(n_counts, tf.float32)\n nbins = tf.cast(nbins, tf.float32)\n batch_size = tf.cast(batch_size, tf.float32)\n\n term_a = tf.lgamma(batch_size + 1)\n term_b = tf.reduce_sum(tf.lgamma(n_counts + 1))\n term_c = batch_size * tf.log(nbins)\n\n return term_a - term_b - term_c", "def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):", "def _compute_likelihood(self, mus, pmfs):\n expected_counts = pmfs.copy()\n for mu, _p_bin_source in zip(mus, expected_counts):\n _p_bin_source *= mu # Works because of numpy view magic...\n expected_total = np.sum(expected_counts, axis=0)\n\n observed_counts = self.data_events_per_bin.histogram\n\n ret = observed_counts * np.log(expected_total) - expected_total - gammaln(observed_counts + 1.).real\n return np.sum(ret)", "def likelihood(params,data):\n spec, isnflux, igalflux = data\n chi2=0\n modflux = (params[0]*isnflux + params[1]*igalflux)\n chi2 += sum((spec.flux - modflux)**2)/((0.05*sum(spec.var)**2)/2.0)\n return np.exp(-chi2/2.0)", "def likelihood(self):\n \n raise NotImplementedError()", "def likelihood_prediction():\n # Get info\n selected_word = prompt_tech_selection()\n article_json = get_json_from_file()\n\n # Calculate results\n total_word_counter, selected_word_counter = count_occurrences(article_json, selected_word)\n probability = selected_word_counter / total_word_counter\n total_time = article_json[-1]['time'] - article_json[0]['time'] # unix subtraction = seconds\n months_in_train_set = total_time / SECONDS_IN_MONTH\n expected_posts_per_month = int(total_word_counter / months_in_train_set)\n\n # Show results\n print_text_results(expected_posts_per_month, probability, selected_word)\n plot_likelihood(expected_posts_per_month, probability)", "def regularized_multinomial_likelihood(m_true, alpha, alpha0, m_probs, global_step, annealing_step=1000, max_lambda=1.0):\n\n ll = multinomial_likelihood(m_true, alpha, alpha0, m_probs)\n kl = kullback_leibler_dirichlet(m_true, alpha)\n lamb = tf.cast(tf.minimum(max_lambda, global_step / annealing_step), dtype=tf.float32)\n loss = ll + lamb * kl\n return loss", "def likelihood(self, data, hypo):\n tagged, n, k = data\n if hypo < tagged + n - k:\n return 0\n\n p = tagged / hypo\n like = thinkbayes.eval_binomial_pmf(k, n, p)\n return like", "def log_likelihood(data, probs):\n # Assume data is given as counts\n return _np.sum([nlogp(n, p) for n, p in zip(data, probs) if n > 0])", "def multinomial_prob(counts, probs):\n return nCkarray(*counts.values) * (probs ** counts).prod()", "def GSM_log_likelihood(X, model):\n D, M = X.shape\n k = model.mix.shape[0]\n log_likelihood = 0\n for i in range(M):\n logpdf_X = 0\n for j in range(k):\n mvn = multivariate_normal(cov=model.cov[j, :])\n logpdf_X = mvn.logpdf(x=X[:, i]) * model.mix[j]\n log_likelihood += logpdf_X\n return log_likelihood", "def calculate_likelihoods_bernoulli(data, labels, vocab):\r\n classes = set(labels)\r\n likelihoods = {}\r\n # Calculate likelihood for each class\r\n for cls in classes:\r\n documentsInClass = [set(map(lambda y: y[0], data[x])) for x in range(len(data)) if labels[x] == cls]\r\n numDocsInClass = len(documentsInClass)\r\n results = {}\r\n for word in vocab:\r\n numDocsWithWordInClass = len(filter(lambda x: word in x, documentsInClass))\r\n # Binary variable-- either present or not present\r\n results[word] = laplace_smooth(numDocsWithWordInClass, numDocsInClass, 2)\r\n # Special laplace smoothing for words not found in training data\r\n results[None] = laplace_smooth(0, numDocsInClass, 2)\r\n likelihoods[cls] = results\r\n return likelihoods", "def multinomial(rng, logits, num_samples):\n # NOTE(tycai): Currently, tf.multinomial uses CDF for non-XLA CPU only.\n # We may want to switch to the Gumbel trick as used in XLA.\n if len(logits.shape) > 2 or not logits.shape:\n raise ValueError(\"Logits must be rank-1 or rank-2.\")\n probs = jax.nn.softmax(logits)\n probs = jnp.cumsum(probs, axis=-1)\n # Special-case num_samples == 1 due to TPU padding, as in TF2XLA.\n # https://github.com/tensorflow/tensorflow/blob/b1608511d5a50d05825c4025b0c347e8689a241f/tensorflow/compiler/tf2xla/kernels/categorical_op.cc#L79\n if num_samples == 1:\n a = jax.random.uniform(rng, logits.shape[:-1] + (1,))\n out = jnp.argmin(a > probs, axis=-1)\n return out[..., None]\n else:\n a = jax.random.uniform(rng, (num_samples,) + logits.shape[:-1] + (1,))\n out = jnp.argmin(a > probs, axis=-1)\n return jnp.transpose(out)", "def log_likelihood(X, Z, variable_types):\n\tk = Z['pi_unconstrained'].shape[1]+1 # the number of mixture components\n\t## We gather the log probabilities of each indiv in batch for each mixture component into\n\t## a matrix of size (B x k), where B is the batch size.\n\tlogps = torch.zeros([len(X), k])\n\t## First insert the mixture weight contribution to the array\n\tlogps += logsoftmax(Z['pi_unconstrained'], dim=-1)\n\t## Next loop over the features and sum the contributions to logps\n\tfor i, (key, z) in enumerate(Z.items()):\n\t\tif key not in ['pi_unconstrained']:\n\t\t\tdata = torch.Tensor(X[key].values).unsqueeze(-1)\n\t\t\tdist = variable_types[key]\n\t\t\tif dist == 'Categorical':\n\t\t\t\talpha = softmax(z, dim=-1, additional=-50.)\n\t\t\t\tlogps += Categorical(probs = alpha).log_prob(data)\n\t\t\telif dist == 'Bernoulli':\n\t\t\t\ttheta = z\n\t\t\t\tlogps += Bernoulli(logits = theta).log_prob(data)\n\t\t\telif dist == 'Beta':\n\t\t\t\talpha, beta = torch.exp(z).transpose(0,1)\n\t\t\t\tlogps += Beta(alpha, beta).log_prob(data)\n\t## Compute logsumexp over the mixture components and return the sum over data elements.\n\tlogp = torch.logsumexp(logps, dim=-1)\n\treturn logp.sum()", "def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self.conditional_prob[label][feature][value]\n\t\t\tif cp > 0: #condition check for values < 0 because log(0) is undefined and math domain error occurs\n\t\t\t\tlogJoint[label] += math.log(cp) #summing up\n\t\t\t\t\n\treturn logJoint", "def get_log_likelihood(phi, pred, t, dot_product, weight, reg= 1):\n prior = -0.5* np.sum(np.multiply(weight, weight))\n likelihood = np.multiply(t, np.log(pred+TOLERANCE)) + np.multiply(1.0- t, np.log(1.0-pred+TOLERANCE))\n likelihood = np.sum(likelihood)\n\n return prior + likelihood", "def likelihoods(self, alleles):\n\n models = self.models_dict[len(alleles)]\n\n F = self.joint_frequencies_combo(alleles)\n\n ### BPH ###\n (((A0, A1),((B0,),)),) = models['BPH'][1].items()\n\n BPH = (A0 / A1) * F[B0]\n\n\n BPH += sum( sum(F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['BPH'][2].items())\n\n if len(alleles)>2:\n BPH += sum( sum(F[B0] * sum( F[B1] * F[B2] for (B1, B2) in C[B0]) for B0 in C) * A0 / A1\n for (A0, A1), C in models['BPH'][3].items())\n\n ### SPH ###\n (((A0, A1),((B0,),)),) = models['SPH'][1].items()\n SPH = (A0 / A1) * F[B0]\n\n SPH += sum( sum(F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['SPH'][2].items())\n\n ### DIPLOIDY ###\n (((A0, A1),((B0,),)),) = models['DISOMY'][1].items()\n DISOMY = (A0 / A1) * F[B0]\n\n DISOMY += sum( sum( F[B0] * F[B1] for (B0, B1) in C) * A0 / A1\n for (A0, A1), C in models['DISOMY'][2].items())\n\n ### MONOSOMY ###\n ((B0,),) = models['MONOSOMY'][1][(1,1)]\n MONOSOMY = F[B0]\n\n result = likelihoods_tuple(MONOSOMY, DISOMY, SPH, BPH)\n return result", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \"*** YOUR CODE HERE ***\"\n\t#Adds log(P(y)) to calculate P(y|f1,f2...)\n for label in self.legalLabels:\n\t\tlogJoint[label] += math.log(self.prior[label])\n\t#Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...)\n for key in datum:\n\t\t#if key == (7, 3):\n\t\t\t#print self.condprobs[key, 0]\n\t\tfor label in self.legalLabels:\n\t\t\t#print str(key) + str(datum[key])\n\t\t\tlogJoint[label] += math.log(self.condprobs[key, label][datum[key]])\n return logJoint", "def _learn_global_mixture_weights(alpha, multinomials, val_data, num_em_iter=100, tol=0.001):\n num_comp = len(multinomials)\n if np.any(alpha <= 1):\n raise ValueError('alpha values have to be bigger than 1')\n\n for i, mult in enumerate(multinomials):\n if np.any(np.abs(np.sum(mult, axis=1) - 1) > 0.001):\n raise ValueError('component %d param is not a proper multinomial -- all rows must sum to 1' % i)\n\n if type(alpha) == float or type(alpha) == int:\n alpha = np.ones(num_comp) * alpha * 1.\n\n # Creating responsibility matrix and initializing it hard assignment on random\n log_like_tracker = [-np.inf]\n pi = np.ones(num_comp) / num_comp\n start = time.time()\n em_iter = 0\n for em_iter in xrange(1, num_em_iter + 1):\n # Evey 5 iteration we will compute the posterior log probability to see if we converged.\n if em_iter % 2 == 0:\n\n event_prob = _data_prob(pi, multinomials, val_data)\n event_prob = np.sum(event_prob, axis=0) # prob\n\n # The data likelihood was computed for each location, but it should be in the power of the number\n # of observations there, or a product in the log space.\n data_likelihood = np.log(np.array(event_prob)) * val_data[:, 2]\n\n prior_probability = dirichlet.logpdf(pi, alpha=alpha)\n log_likelihood = np.sum(data_likelihood + prior_probability) / np.sum(val_data[:, 2])\n\n if np.abs(log_likelihood - log_like_tracker[-1]) < tol:\n log.debug('[iter %d] [Reached convergence.]' % em_iter)\n break\n\n log.debug('[iter %d] [Likelihood: [%.4f -> %.4f]]' % (em_iter, log_like_tracker[-1], log_likelihood))\n log_like_tracker.append(log_likelihood)\n\n # E-Step\n\n resp = _data_prob(pi, multinomials, val_data)\n\n if np.all(resp == 0):\n raise ValueError('0 mix probability')\n\n resp = np.array(resp).T\n resp = normalize(resp, 'l1', axis=1)\n\n resp = np.multiply(resp, val_data[:, 2][:, np.newaxis])\n pi = np.sum(resp, axis=0)\n pi += alpha - 1\n pi /= np.sum(pi)\n\n total_time = time.time() - start\n log.debug('Finished EM. Total time = %d secs -- %.3f per iteration' % (total_time, total_time / em_iter))\n\n data_log_like = _data_prob(pi, multinomials, val_data)\n data_log_like = np.sum(data_log_like, axis=0)\n ll = np.sum(np.log(np.array(data_log_like)) * val_data[:, 2]) / np.sum(val_data[:, 2])\n return pi, ll", "def likelihood(self, w, class_words):\n return log((class_words.count(w) + 1)/(len(class_words) + self.N))", "def log_prob(self):", "def __calc_likelihood(self, *args):\n params = {}\n for i, p in enumerate(self._par_names):\n if self._par_islog[p]:\n params[p] = np.power(10., args[i])\n else:\n params[p] = args[i]\n return self.return_likelihood(params)", "def likelihood_ratio(cls, *marginals):\n cont = cls._contingency(*marginals)\n return (cls._n *\n sum(obs * _ln(float(obs) / (exp + _SMALL) + _SMALL)\n for obs, exp in zip(cont, cls._expected_values(cont))))", "def test_multinomial(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.multinomial((4,4), 1, [0.1]*10), updates=random.updates())\r\n\r\n fn_val0 = fn()\r\n fn_val1 = fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.multinomial(1, [0.1]*10, size=(4,4))\r\n numpy_val1 = rng.multinomial(1, [0.1]*10, size=(4,4))\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def _LL(state, effects, observed_frequencies) -> float:\n observed_frequencies = np.array(observed_frequencies)\n predicted_probs = np.array([np.real(np.trace(state.dot(effect))) for effect in effects])\n return sum(np.log10(predicted_probs) * observed_frequencies)", "def log_likelihood(self, x):\n # set nuisance parameters to their central values!\n predictions = self.get_predictions(self.shortarray_to_array(x), nuisance=False)\n m_obj = flavio.Measurement['Pseudo-measurement for FastFit instance: ' + self.name]\n m_obs = m_obj.all_parameters\n prob_dict = m_obj.get_logprobability_all(predictions)\n ll = sum(prob_dict.values())\n return ll", "def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients", "def log_likelihood(self,samples,times):\n prior_mu = np.ones(2*len(self.A)+1) \n prior_var = np.eye(2*len(self.A)+1)*0.7\n prior_p = np.log(self.prior_pdf())\n #prior_p = np.log(self.normal_prior(prior_mu,prior_var))\n xform = [self.sum_exp(t) for t in times]\n lp = scipy.stats.norm(xform,np.sqrt(self.var)).pdf(samples)\n sample_p =np.sum(np.log(lp))\n ll = prior_p + sample_p\n\n if np.isnan(ll):\n return -np.infty\n return ll", "def _build_multinomial_weights(self) -> None:\n weights_obs = ramp_up_weights(\n len(self.obs), self.tpe.full_weight_num, self.tpe.equal_weight\n )\n counts_obs = numpy.bincount(\n self.obs, minlength=len(self.choices), weights=weights_obs\n )\n counts_obs = counts_obs + self.tpe.prior_weight\n self.weights = counts_obs / counts_obs.sum()", "def calculate_likelihood(self, number_of_topics):\n print(\"Start of calculate_likelihood...\")\n maxNumberOfWrd = 0\n for docValue in range(0,self.number_of_documents):\n for wordValue in range(0,self.vocabulary_size):\n maxTopicVal = 0\n for topicValue in range(0,number_of_topics):\n maxTopicVal = maxTopicVal + ((self.document_topic_prob[docValue][topicValue]*self.topic_word_prob[topicValue][wordValue]))\n if maxTopicVal > 0:\n maxNumberOfWrd = maxNumberOfWrd + (np.log(maxTopicVal) * self.term_doc_matrix[docValue][wordValue])\n\t\t#print(\"Value of maxNumberOfWrd >>> \"+maxNumberOfWrd)\n self.likelihoods.append(maxNumberOfWrd)\n\t\t#print(\"End of calculate_likelihood...\")\n return", "def test_multinomial(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n m = Module()\r\n m.random = RandomStreams(utt.fetch_seed())\r\n m.fn = Method([], m.random.multinomial((20,20), 1, [0.1]*10))\r\n\r\n made = m.make()\r\n made.random.initialize()\r\n fn_val0 = made.fn()\r\n fn_val1 = made.fn()\r\n\r\n rng_seed = numpy.random.RandomState(utt.fetch_seed()).randint(2**30)\r\n rng = numpy.random.RandomState(int(rng_seed)) #int() is for 32bit\r\n numpy_val0 = rng.multinomial(1, [0.1]*10, size=(20,20))\r\n numpy_val1 = rng.multinomial(1, [0.1]*10, size=(20,20))\r\n\r\n assert numpy.all(fn_val0 == numpy_val0)\r\n assert numpy.all(fn_val1 == numpy_val1)", "def evaluate_GMM_log_likelihood(model, x, y):\n y_pred = model.predict(x)\n \n num_datapoints = len(x)\n output_dim = y.shape[-1]\n num_comp = int(y_pred.shape[-1] / (3*output_dim))\n\n mix_comp_logits = y_pred[:, :num_comp]\n mus = y_pred[:, num_comp:(1+output_dim)*num_comp]\n sigmas = y_pred[:, (1+output_dim)*num_comp:]\n \n # convert logits to categorical distribution - need to itterate through all points\n mix_comp = np.zeros((num_datapoints, num_comp))\n for i in range(num_datapoints):\n mix_comp[i,:] = get_mixture_dist(mix_comp_logits[i,:], num_comp)\n \n log_likelihood = 0\n for i in range(num_comp):\n for j in range(output_dim):\n mse = -0.5*np.sum(mix_comp[:,i]*np.square((y[:,j]-mus[:,(i*output_dim)+j])/sigmas[:,(i*output_dim)+j]))\n sigma_trace = -np.sum(mix_comp[:,i]*np.log(sigmas[:,(i*output_dim)+j]))\n log2pi = -np.sum(mix_comp[:,i]*0.5*output_dim*np.log(2*np.pi))\n\n log_likelihood += mse + sigma_trace + log2pi\n \n avg_log_likelihood = np.round(log_likelihood / num_datapoints, 2)\n print(f'Log likelihood: {avg_log_likelihood}')\n return avg_log_likelihood", "def compute_movie_rating_likelihood(M):\n\n # define the size to begin with\n likelihood = np.zeros((M, M))\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE FOR PART (c)\n #\n # Remember to normalize the likelihood, so that each column is a\n # probability distribution.\n \n for i in range(M):\n for j in range(M):\n if i == j:\n likelihood[i][j] = 2\n else:\n likelihood[i][j] = 1/abs(j-i)\n \n likelihood = likelihood / likelihood.sum(axis = 1)\n \n #\n # END OF YOUR CODE FOR PART (c)\n # -------------------------------------------------------------------------\n\n return likelihood", "def _compute_log_likelihood(self, X, S):\n log_likelihood = 0\n for n in range(self.n_col):\n likelihood = 1\n for k in range(self.n_components):\n likelihood *= self.weights[k] \\\n * multivariate_normal(self.means[k], self.covs[k]).pdf(X[n]) \\\n * poisson(self.rates[k]).pmf(S[n])\n log_likelihood += np.log(likelihood)\n\n return log_likelihood", "def test_LM(self):\n\t\t\n\t\tprecision = 10**-8\n\t\t\t\t \n\t\tif self.n == 1:\n\t\t\t\t \n\t\t\tP_sum = sum(self.estimate_prob('', w) for w in self.vocab)\n\t\t\t\n\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one.'\n\t\t\t\t \n\t\telif self.n == 2:\n\t\t\thistories = ['the', 'in', 'at', 'blue', 'white']\n\t\t\t\t \n\t\t\tfor h in histories:\n\t\t\t\t \n\t\t\t\tP_sum = sum(self.estimate_prob(h, w) for w in self.vocab)\n\t\t\t\t\n\t\t\t\tassert abs(1.0 - P_sum) < precision, 'Probability mass does not sum up to one for history' + h\n\t\t\t\t\t \n\t\tprint('TEST SUCCESSFUL!')", "def log_likelihood(mu, sigma, y, T):\n ll = 0.\n for yi, Ti in zip(y, T):\n d = yi.size\n log_det_cov = np.linalg.slogdet(sigma[Ti])[1]\n y_minus_mean = yi - mu[Ti]\n term3 = np.dot(y_minus_mean.T.ravel(),\n np.linalg.solve(sigma[Ti], y_minus_mean.T).ravel())\n ll += (-0.5 * d * np.log(2 * np.pi) - 0.5 * log_det_cov - 0.5 * term3)\n return ll", "def log_likelihood_exp(self, x):\n predictions = self.get_predictions(x)\n ll = 0.\n for measurement in self.get_measurements:\n m_obj = flavio.Measurement[measurement]\n m_obs = m_obj.all_parameters\n exclude_observables = set(m_obs) - set(self.observables)\n prob_dict = m_obj.get_logprobability_all(predictions, exclude_parameters=exclude_observables)\n ll += sum(prob_dict.values())\n return ll", "def MVN_log_likelihood(X, model):\n D, M = X.shape\n X_normalized = normalize_log_likelihoods(X.copy())\n mvn = multivariate_normal(mean=model.mean, cov=model.cov)\n return mvn.logpdf(X_normalized.T).sum()\n # log_2pi = D * np.log(2 * np.pi)\n # log_det = np.log(np.linalg.det(model.cov))\n # residuals = calc_residuals(X_normalized, model.mean, \"minus\")\n # mahalanobis_distance = np.dot(np.dot(residuals.T, np.linalg.inv(model.cov)), residuals)\n # return -0.5 * (log_2pi + log_det + mahalanobis_distance).sum()", "def compute_likelihood(self, corpus: str, test_corpus: str, n: int):\n probs_per_ngram = self.compute_probabilities_per_word(corpus, n)\n test_corpus_tokens = self.tokenize(test_corpus, n)\n test_corpus_ngrams = ()\n end_i = len(test_corpus_tokens) - 1\n for i in range(end_i):\n if i - (n - 1) < 0:\n continue\n test_corpus_ngrams = test_corpus_ngrams + (self._make_ngrams(test_corpus_tokens, i, n), )\n likelihood_of_test_corpus = 1\n for test_corpus_ngram in test_corpus_ngrams:\n likelihood_of_test_corpus = likelihood_of_test_corpus * probs_per_ngram[test_corpus_ngram]\n return likelihood_of_test_corpus", "def test_posterior_logprobs(self):\n x = list(product([True, False], repeat=2))\n xs = list(e for e in product(x, repeat=3))\n all_obs = list(o for o in xs\n if all(any(e) and not all(e) for e in o))\n total = logsumexp(list(posterior_logprobs(np.array(obs), self.S, self.A, self.E)[1]\n for obs in all_obs))\n assert_allclose(total, np.log(1))", "def log_likelihood(self, data, reward_model, bias_params):", "def compute_log_likelihood(self, indicators, weights, l2):\n scores, _ = self.predict_probability(self.train_feature_x, weights)\n probs = self.predict_probability(self.train_feature_x, weights)\n lp = np.sum((indicators-1)*scores + np.log(probs)) - l2* np.sum(weights[1:]**2)\n return lp", "def log_likelihood(X, mu, sigma, phi):\n ll = None\n\n #######################################################################\n # TODO: #\n # Compute the log-likelihood of the data under the current model. #\n # This is used to check for convergnence of the algorithm. #\n #######################################################################\n\n ll = np.zeros((X.shape[0], 1))\n k = mu.shape[0]\n\n for i in range(k):\n ll += multivariate_normal(mu[i, :], sigma[i]).pdf(X)[:, np.newaxis]*phi[i]\n\n ll = sum(np.log(ll))\n\n #######################################################################\n # END OF YOUR CODE #\n #######################################################################\n\n return ll", "def get_log_likelihood(response_probability, response):\n pass", "def likelihood(self,x,params = None,**kwargs):\n return np.exp(self.log_likelihood(x,params=params,**kwargs))", "def likelihood(ts,w,Phi):\n a = Phi.dot(w)\n return np.exp(a*ts)*sigmoid(-a)", "def loglikelihood(self, y):\n raise NotImplementedError", "def multinomial_accuracy(distribution_or_probs: tfd.Distribution,\n data: jnp.DeviceArray) -> jnp.DeviceArray:\n return jnp.mean(\n jnp.sum(multinomial_mode(distribution_or_probs) * data, axis=1))", "def lnprob(theta, observables):\n prior = lnprior(theta)\n if not np.isfinite(prior):\n return -inf\n return prior + lnlike(theta, observables)", "def likelihood_genotype(genotype, bases_all_reads, error_rates):\n likelihood = 1\n for observed_base in bases_all_reads:\n p = 0\n for base in \"ACGT-\":\n l = prob_t_N(genotype, base) * error_rates[base][observed_base]\n p += l\n likelihood *= p\n\n return likelihood", "def test_marginal_likelihood(self):\n data = np.repeat([1, 0], [50, 50])\n marginals = []\n a_prior_0, b_prior_0 = 1.0, 1.0\n a_prior_1, b_prior_1 = 20.0, 20.0\n\n for alpha, beta in ((a_prior_0, b_prior_0), (a_prior_1, b_prior_1)):\n with pm.Model() as model:\n a = pm.Beta(\"a\", alpha, beta)\n y = pm.Bernoulli(\"y\", a, observed=data)\n trace = pm.sample_smc(2000, chains=2, return_inferencedata=False)\n # log_marginal_likelihood is found in the last value of each chain\n lml = np.mean([chain[-1] for chain in trace.report.log_marginal_likelihood])\n marginals.append(lml)\n\n # compare to the analytical result\n assert abs(np.exp(marginals[1] - marginals[0]) - 4.0) <= 1", "def pmi(cls, *marginals):\n return (_log2(marginals[NGRAM] * marginals[TOTAL] ** (cls._n - 1)) -\n _log2(_product(marginals[UNIGRAMS])))", "def loglikelihood(self):\n raise NotImplementedError(\"To be implemented\")", "def likelihood(mean, logs, x):\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / np.exp(logs * 2.) + GaussianDiag.Log2PI)", "def naivebayesPXY_mle(x,y):\n pos_denom = x[y==1].sum()\n neg_denom = x[y==-1].sum()\n posprob = x[y==1].sum(axis = 0)/pos_denom\n negprob = x[y==-1].sum(axis = 0)/neg_denom\n return posprob, negprob", "def log_likelihood_function(self, instance) -> float:\n return self.prior.factor(instance[0])", "def calc_likelihood(par_num, par_rng):\n\n likelihoods = np.zeros(np.size(par_rng))\n\n trivial_prior = trivial_prior_class()\n\n pipe = pipeline(observables_generator=hammu12,\n likelihood=likelihood,\n prior=trivial_prior,\n optimizer_class=Hamiltonian_Monte_Carlo)\n\n parameters = [0]*hammu12.get_parameter_dimension()\n for par_val in par_rng:\n parameters[par_num] = par_val\n likelihoods[par_val-par_rng[0]] = pipe._calc_posterior(parameters)\n\n np.save('data%s_RM' % (par_num), likelihoods)", "def calculate_log_perplexity(self, output, flat_labels): #completed, expensive, should be compiled\n return -np.sum(np.log2(np.clip(output, a_min=1E-12, a_max=1.0))[np.arange(flat_labels.shape[0]), flat_labels[:,1]])", "def Ls(GTn:torch.tensor, Mn:torch.tensor) -> torch.tensor:\n return (-(GTn * torch.log(Mn+1e-15) + (1- GTn) * torch.log((1- Mn)+1e-15))).sum()", "def likelihood_function(X, taus, mus, sigmas):\n N = X.shape[0] # number of data points\n get_component_prob = lambda x: component_pdfs(x, mus, sigmas)\n T = np.apply_along_axis(arr=X, func1d=get_component_prob, axis=1) # gaussian component probabilities in row format (NxK)\n taus_rep = np.tile(taus, reps=(N, 1)) # repeat tau along N-axis so elementwise product can work\n\n return np.sum(T*taus_rep, axis=1)", "def log_marginal(self):\n #\n # Predictive covariance of x is sum of covariance of phi a and covariance of x|a\n x_Sigma = self.phi @ self.phi.T + np.diag(self.sigma_n**2 * np.ones(self.M))\n #\n # Predictive mean is 0 by symmetry\n # so given that x is distributed as a MVN, the exact marginal is\n lp_exact = st.multivariate_normal.logpdf(self.x, cov=x_Sigma)\n #\n return lp_exact", "def nll_logprobs(self, input, target_idx):\n raise NotImplementedError()", "def likelihood(alphas, sigmas, mus, x):\n if len(alphas.shape) == 0:\n alphas = np.expand_dims(alphas, 1)\n sigmas = np.expand_dims(sigmas, 1)\n k = alphas.shape[0]\n t_dim = int(mus.shape[0] / k)\n\n likelihood_ = 0.0\n\n for i in range(k):\n likelihood_t = gaussian_np(x, mus[i*t_dim:(i+1)*t_dim], sigmas[i])\n likelihood_ += alphas[i] * likelihood_t\n\n return likelihood_", "def eml_use_pseudowords_and_mle(xi, yi, deml):\n if xi not in deml[yi]:\n xi = pw(xi) # use pseudo-word instead\n\n return (deml[yi][xi]) / (sum(deml[yi].values()))", "def likelihood(\n self,\n observation: np.ndarray,\n state: np.ndarray,\n control_z: Optional[np.ndarray] = None\n ) -> np.matrix:\n pass", "def joint_logpdf(self, x1, x2 = None):\n dists = self.conditionalMVNs\n joint_pdfs = np.array([d.joint_pdf(x1, x2) for d in dists])\n return np.log(np.sum(self.weights * joint_pdfs))", "def compute_prob_mle(X: np.ndarray, n: int) -> float:\n\n assert n > 1, \"for n = 1 use Bernoulli distribution.\"\n Binomial._check_input_data(X=X)\n Binomial._check_support(X=X, n=n)\n\n prob = X.mean() / n\n return prob", "def test_multinomial(self):\r\n # Check over two calls to see if the random state is correctly updated.\r\n rng_R = random_state_type()\r\n post_r, out = multinomial(rng_R, (7, 3), 6, [0.2] * 5)\r\n\r\n f = compile.function(\r\n [compile.In(rng_R,\r\n value=numpy.random.RandomState(utt.fetch_seed()),\r\n update=post_r, mutable=True)],\r\n [out], accept_inplace=True)\r\n\r\n numpy_rng = numpy.random.RandomState(utt.fetch_seed())\r\n val0, = f()\r\n val1, = f()\r\n numpy_val0 = numpy_rng.multinomial(6, [0.2] * 5, (7, 3))\r\n numpy_val1 = numpy_rng.multinomial(6, [0.2] * 5, (7, 3))\r\n print val0\r\n print numpy_val0\r\n print val1\r\n print numpy_val1\r\n self.assertTrue(numpy.all(val0 == numpy_val0))\r\n self.assertTrue(numpy.all(val1 == numpy_val1))\r\n\r\n self.assertTrue(val0.shape == (7, 3, 5))\r\n self.assertTrue(val1.shape == (7, 3, 5))", "def naiveBayesMixture(train_set, train_labels, dev_set, bigram_lambda,unigram_smoothing_parameter, bigram_smoothing_parameter, pos_prior):\n\n # TODO: Write your code here\n # return predicted labels of development set\n\n # counters for Training Phase\n ham = Counter()\n ham_bi = Counter()\n spam = Counter()\n spam_bi = Counter()\n\n for string, label in zip(train_set, train_labels):\n for i in range(len(string)):\n word = string[i]\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if label == 1:\n ham_bi.update({word_bi:1})\n else:\n spam_bi.update({word_bi:1})\n if label == 1:\n ham.update({word:1})\n else:\n spam.update({word:1})\n\n ham_len = 0\n for w in ham:\n ham_len += ham[w]\n spam_len = 0\n for w in spam:\n spam_len += spam[w]\n \n hambi_len = 0\n for w in ham_bi:\n hambi_len += ham_bi[w]\n spambi_len = 0\n for w in spam_bi:\n spambi_len += spam_bi[w]\n\n # labels for Development Phase\n dev_labels = []\n # dicts for P(word|ham) and P(word|spam)\n p_ham = {}\n p_spam = {}\n p_hambi = {}\n p_spambi = {}\n\n # develop likelihoods based on dev_set\n for word in ham:\n numerator = ham[word] + unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham[word] = numerator / denominator\n for word in spam:\n numerator = spam[word] + unigram_smoothing_parameter\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam[word] = numerator / denominator\n\n for word_bi in ham_bi:\n numerator = ham_bi[word_bi] + bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi[word_bi] = numerator / denominator\n for word_bi in spam_bi:\n numerator = spam_bi[word_bi] + bigram_smoothing_parameter\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi[word_bi] = numerator / denominator\n \n numerator = unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham_zero = numerator / denominator\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam_zero = numerator / denominator\n\n numerator = bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi_zero = numerator / denominator\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi_zero = numerator / denominator\n\n for string in dev_set:\n p_words_ham = math.log(pos_prior)\n p_words_spam = math.log(1 - pos_prior)\n\n p_words_hambi = math.log(pos_prior)\n p_words_spambi = math.log(1 - pos_prior)\n \n for i in range(len(string)):\n word = string[i]\n if word in p_ham:\n p_words_ham += math.log(p_ham[word])\n else:\n p_words_ham += math.log(p_ham_zero)\n if word in p_spam:\n p_words_spam += math.log(p_spam[word])\n else:\n p_words_spam += math.log(p_spam_zero)\n\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if word_bi in p_hambi:\n p_words_hambi += math.log(p_hambi[word_bi])\n else:\n p_words_hambi += math.log(p_hambi_zero)\n if word_bi in p_spambi:\n p_words_spambi += math.log(p_spambi[word_bi])\n else:\n p_words_spambi += math.log(p_spambi_zero)\n\n p_ham_mix = p_words_ham*(1-bigram_lambda) + p_words_hambi*bigram_lambda\n p_spam_mix = p_words_spam*(1-bigram_lambda) + p_words_spambi*bigram_lambda\n\n dev_labels.append(p_ham_mix >= p_spam_mix)\n\n return dev_labels", "def log_likelihood(self, params):\n # extract the parameters\n m1 = params['m1']\n m2 = params['m2']\n DL = params['DL']\n Tc = params['Tc']\n iota = params['iota']\n phic = params['phic']\n psi = params['psi']\n thetaS = params['thetaS']\n phiS = params['phiS']\n\n # calculate the model\n model = self._model(time, m1, m2, DL, Tc, iota, phic, psi, thetaS, phiS)\n\n# # normalisation\n# norm = -0.5*self._ndata*LN2PI - self._ndata*self._logsigma\n\n# # chi-squared\n# chisq = np.sum(((self._data - model)/(self._sigma))**2)\n\n return -np.vdot(self._data - model,self._data - model)", "def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')", "def perplexity(sentences: List[Tuple[List[int], List[int]]], model: Seq2SeqAttentionModel) -> float:\n LL_Total = torch.tensor(0, dtype=torch.float)\n total_words = torch.tensor(0, dtype=torch.float)\n for i, (source_sentence, target_sentence) in enumerate(sentences):\n LL_Total += log_likelihood(source_sentence, target_sentence, model)\n total_words += len(target_sentence)\n\n return torch.exp(-LL_Total / total_words)", "def MLE(ngram, freqs):\n\tn = len(ngram)\n\tif ngram in freqs[n]:\n\t\tnumerator = freqs[n][ngram]\n\telse:\n\t\treturn 0.0\n\tif n == 1:\n\t\tdenominator = sum(freqs[1].values()) # unigram probability (unconditional): f(x) / corpus_size\n\telse:\n\t\thistory = ngram[0:n-1] # conditional ngram probability: f(x_1 .. x_n) / f(x_1 .. x_{n-1})\n\t\tif history in freqs[n-1]:\n\t\t\tdenominator = freqs[n-1][history]\n\t\telse:\n\t\t\treturn 0.0\n\treturn float(numerator)/denominator", "def get_total_log_likelihood(self, x, **kwargs):\n pass", "def log_likelihood(self, y_list):\n if self.lambda_mat is None:\n raise ValueError(\"Can't compute model likelihood before fitting!\")\n\n # precision prior distribution given precision hyper-parameters\n prec_distr = stats.gamma(a=self.prec_distr[0],\n scale=self.prec_distr[1] ** -1.0)\n\n # likelihood of projection matrix precision priors given\n # precision hyper-parameters\n lambda_logl = np.sum(\n prec_distr.logpdf(self.lambda_mat['alpha']\n / self.lambda_mat['beta'])\n )\n\n # likelihood of projection matrix values given their precision priors\n a_logl = np.sum(\n stats.norm(loc=0, scale=(self.lambda_mat['beta']\n / self.lambda_mat['alpha']))\n .logpdf(self.A_mat['mu'])\n )\n\n # likelihood of latent feature matrix given kernel matrix,\n # projection matrix, and standard deviation hyper-parameter\n h_logl = np.sum(\n stats.norm(loc=self.A_mat['mu'].transpose() @ self.kernel_mat,\n scale=self.sigma_h)\n .logpdf(self.H_mat['mu'])\n )\n\n # likelihood of bias parameter precision priors given\n # precision hyper-parameters\n weight_prior_logl = np.sum(\n prec_distr.logpdf(np.array(self.weight_priors['alpha'])\n / np.array(self.weight_priors['beta']))\n )\n\n # likelihood of bias parameters given their precision priors\n weight_logl = np.sum(\n stats.norm(loc=0, scale=(np.array(self.weight_priors['beta'])\n / np.array(self.weight_priors['alpha'])))\n .logpdf(self.weight_mat['mu'])\n )\n\n # likelihood of predicted outputs given latent features, bias\n # parameters, and latent feature weight parameters\n f_logl = np.sum(\n stats.norm(\n loc=(self.weight_mat['mu'][1:, :].transpose()\n @ self.H_mat['mu']\n + np.vstack(self.weight_mat['mu'][0, :])),\n scale=1).logpdf(self.output_mat['mu'])\n )\n\n # likelihood of actual output labels given class separation margin\n # and predicted output labels\n y_logl = np.sum(self.get_y_logl(y_list))\n\n return (lambda_logl + a_logl + h_logl\n + weight_prior_logl + weight_logl + f_logl + y_logl)", "def gmmloglik(log_emlik, weights):", "def gmmloglik(log_emlik, weights):", "def log_likelihood(self, theta=None, phi=None):\n theta = theta if theta is not None else self.theta\n phi = phi if phi is not None else self.phi\n ret = 0.\n for m, dm in enumerate(self.corpus):\n for n, w_mn in enumerate(dm):\n tp = 0.\n for k in range(self.n_components):\n tp += theta[m, k] * phi[k, w_mn]\n ret += np.log(tp)\n return ret", "def get_likelihoods(self, alleles):\n\n l = len(alleles)\n if l==2:\n result = self.likelihoods2(alleles)\n elif l==3:\n result = self.likelihoods3(alleles)\n elif l==4:\n result = self.likelihoods4(alleles)\n elif l==5:\n result = self.likelihoods5(alleles)\n else:\n result = self.likelihoods(alleles)\n return result", "def prob_m_of_n(m, n, T, l):\n PFD_one_unit = l*T\n m_of_n = binom(n, m) * (PFD_one_unit)**(n-m) * (1-PFD_one_unit)**m\n return m_of_n", "def log_likelihood(y_true, y_pred):\n ll = np.sum(y_true * np.log(y_pred) - y_pred)\n return ll", "def loglikelihood(model, data, q):\n\tph, pvh = model\n\tnPeople, nQuestions = data.shape\n\tlogL = 0\n\tfor i in range(nPeople):\n\t\tanswers = data[i,:]\n\t\tfor k in range(nQuestions):\n\t\t\tlogL += np.log(sum(pvh[:, k, int(answers[k] - 1)] * q[i,:].T))\n\treturn logL", "def lnlike(theta, dtarray, dmagarray, sigmaarray):\n gamma, A = theta\n\n aux=np.sum(np.log(like_one(theta,dtarray,dmagarray,sigmaarray)))\n\n return aux", "def compute_prob_mle(X: np.ndarray) -> float:\n\n Geometric._check_input_data(X=X)\n Geometric._check_support(X=X)\n\n prob = 1 / X.mean()\n return prob", "def log_marginal_likelihood(self) -> tf.Tensor:\n X, Y = self.data\n Y = Y[..., :-1]\n K = self.kernel(X)\n ks = self._add_noise_cov(K)\n L = tf.linalg.cholesky(ks)\n m = self.mean_function(X)\n\n # [R,] log-likelihoods for each independent dimension of Y\n log_prob = gpflow.logdensities.multivariate_normal(Y, m, L)\n return tf.reduce_sum(log_prob)", "def likelihood(x, n, P):\n if not isinstance(n, int) or (n <= 0):\n raise ValueError('n must be a positive integer')\n if not isinstance(x, int) or (x < 0):\n raise ValueError(\n 'x must be an integer that is greater than or equal to 0')\n if x > n:\n raise ValueError('x cannot be greater than n')\n if not isinstance(P, np.ndarray) or len(P.shape) != 1:\n raise TypeError('P must be a 1D numpy.ndarray')\n if not np.all((P >= 0) & (P <= 1)):\n raise ValueError('All values in P must be in the range [0, 1]')\n nume = np.math.factorial(n)\n deno = (np.math.factorial(x) * (np.math.factorial(n - x)))\n fact = nume / deno\n P_likelihood = fact * (np.power(P, x)) * (np.power((1 - P), (n - x)))\n return P_likelihood", "def likelihood(self):\n\n # assert the Gaussian process is up to date\n self._gp_up_to_date()\n\n noise_penalization_term = -1 / 2 * np.log(\n np.linalg.det(self.cov_matrix))\n\n y = np.linalg.solve(self.cov_matrix, self.list_y)\n y = np.array(self.list_y) @ y\n data_fidelity_term = -1 / 2 * y\n\n nbr_obs_term = - self.n_observation * np.log(2 * np.pi)\n likelihood = (\n noise_penalization_term + data_fidelity_term + nbr_obs_term\n )\n return likelihood", "def bayesian_info_criterion(log_likelihood, n_params, n_samples):\n return n_params * np.log(n_samples) - 2.0 * log_likelihood", "def log_likelihood_bernoulli(mu, target):\n # init\n batch_size = mu.size(0)\n mu = mu.view(batch_size, -1)\n target = target.view(batch_size, -1)\n\n # log_likelihood_bernoulli\n log_bernoulli = torch.sum(target * torch.log(mu) + (1. - target) * torch.log(1. - mu), dim=1)\n return log_bernoulli", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n for cls in self.classes:\n class_probability = self.prior_prob[cls]\n for key, value in datum.items():\n relative_feature_values = self.likelihoods[cls][key]\n class_probability += math.log(relative_feature_values.get(datum[key], 0.01))\n\n logJoint[cls] = class_probability\n\n return logJoint", "def perplexity(self, corpus):\n sum_pro = 0.0\n total_words = 0\n for sentence in corpus:\n sen_pro = self.sentence_logprob(sentence)\n sum_pro += sen_pro\n total_words += len(sentence)\n\n \n\n l = sum_pro/total_words\n w = 0.0\n w = 2**(-l)\n\n return w", "def Likelihood(self, data, hypo):\n p_correct = hypo\n score = data\n\n k = self.exam.Reverse(score)\n n = self.exam.max_score\n like = thinkbayes2.EvalBinomialPmf(k, n, p_correct)\n return like", "def compute_prob_mle(X: np.ndarray) -> float:\n\n Bernoulli._check_input_data(X=X)\n Bernoulli._check_support(X=X)\n\n prob = X.mean()\n return prob", "def log_likelihood(self, X, Y):\n\t\tr,c = twod(Y).shape\n\t\tif r == 1 and c != 1:\n\t\t\tY = twod(Y).T\n\n\t\tsoft = self.predict_soft(X)\n\t\treturn np.mean(np.sum(np.log(np.power(soft, Y, )), 1), 0)", "def marginal_ln_likelihood(samples, prior, data):\n n_samples = len(samples)\n n_linear = len(prior._linear_equiv_units)\n mu = np.zeros(n_linear)\n\n marg_ll = np.zeros(n_samples)\n for n, M, Lambda, ivar, *_ in get_M_Lambda_ivar(samples, prior, data):\n try:\n marg_ll[n], *_ = likelihood_worker(data.rv.value, ivar, M,\n mu, np.diag(Lambda),\n make_aA=False)\n except np.linalg.LinAlgError as e:\n raise e\n\n return marg_ll", "def log_m_probs(self):\n m = self.kernel.feature_log_prob_[self._match_class_pos()]\n return self._prob_inverse_transform(m)" ]
[ "0.7405791", "0.71270245", "0.6805791", "0.67948574", "0.6551636", "0.6476551", "0.64614797", "0.6425553", "0.6342785", "0.63420993", "0.6332708", "0.633066", "0.6330074", "0.6298897", "0.62908417", "0.62799925", "0.62471", "0.62063324", "0.61783415", "0.6177655", "0.61492676", "0.6103034", "0.60990226", "0.60570467", "0.60409313", "0.601715", "0.6015817", "0.6010898", "0.6003754", "0.59715736", "0.5967609", "0.5958885", "0.595258", "0.59231687", "0.5919504", "0.591918", "0.5914721", "0.5896283", "0.5895937", "0.5888113", "0.58879983", "0.58612007", "0.5860122", "0.58575034", "0.58549076", "0.58512044", "0.58423156", "0.58388513", "0.58324647", "0.582239", "0.5817351", "0.58041096", "0.5795078", "0.5785493", "0.57818705", "0.577473", "0.57710975", "0.57705635", "0.5764597", "0.57593507", "0.5759117", "0.5750386", "0.5747177", "0.57456315", "0.57406026", "0.5738948", "0.5736493", "0.5727552", "0.5726234", "0.572506", "0.5724687", "0.5717582", "0.5714601", "0.5713584", "0.5708932", "0.5706694", "0.56997716", "0.56929654", "0.5683291", "0.568157", "0.568157", "0.5670652", "0.56700927", "0.5665721", "0.5665466", "0.5665378", "0.566428", "0.566097", "0.5653138", "0.56483", "0.56473124", "0.5637153", "0.5629207", "0.5628895", "0.56273615", "0.562719", "0.56244504", "0.56168896", "0.56152666", "0.5609881" ]
0.7152682
1