max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
src/api/controller/CommandsController.py | gleicon/RedisLive | 1 | 12760351 | <gh_stars>1-10
from BaseController import BaseController
import dateutil.parser
from datetime import datetime, timedelta
from twisted.internet import defer
class CommandsController(BaseController):
@defer.inlineCallbacks
def get(self):
"""Serves a GET request.
"""
return_data = dict(data=[], timestamp=datetime.now().isoformat())
server = self.get_argument("server")
from_date = self.get_argument("from", None)
to_date = self.get_argument("to", None)
if from_date == None or to_date == None:
end = datetime.now()
delta = timedelta(seconds=120)
start = end - delta
else:
start = dateutil.parser.parse(from_date)
end = dateutil.parser.parse(to_date)
difference = end - start
# added to support python version < 2.7, otherwise timedelta has
# total_seconds()
difference_total_seconds = difference.days * 24 * 3600
difference_total_seconds += difference.seconds
difference_total_seconds += difference.microseconds / 1e6
minutes = difference_total_seconds / 60
hours = minutes / 60
seconds = difference_total_seconds
if hours > 120:
group_by = "day"
elif minutes > 120:
group_by = "hour"
elif seconds > 120:
group_by = "minute"
else:
group_by = "second"
combined_data = []
stats = yield self.stats_provider.get_command_stats(server, start, end,
group_by)
for data in stats:
combined_data.append([data[1], data[0]])
for data in combined_data:
return_data['data'].append([self.datetime_to_list(data[0]), data[1]])
self.write(return_data)
| 2.609375 | 3 |
utils_pkg/utils_pkg/__init__.py | felipery03/disaster-response | 1 | 12760352 | <reponame>felipery03/disaster-response
from .utils import *
from .transformers import * | 0.910156 | 1 |
networks/build_entity_embedding.py | avalanchesiqi/twitter-sampling | 1 | 12760353 | <reponame>avalanchesiqi/twitter-sampling
import sys, os
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from utils.helper import Timer
def main():
timer = Timer()
timer.start()
app_name = 'cyberbullying'
complete_user_id_set = set()
with open('../data/{0}_out/complete_user_{0}.txt'.format(app_name), 'r') as fin:
for line in fin:
tid, root_uid, _ = line.rstrip().split(',', 2)
complete_user_id_set.add(root_uid)
embed_uid_dict = {'u{0}'.format(embed): uid for embed, uid in enumerate(sorted(list(complete_user_id_set)))}
num_user_complete = len(embed_uid_dict)
print('{0} users appear in the complete set'.format(num_user_complete))
with open('../networks/{0}_embed_user.txt'.format(app_name), 'w') as fout:
for uid in sorted(embed_uid_dict.keys()):
fout.write('{0},{1}\n'.format(uid, embed_uid_dict[uid]))
print('>>> Finish embedding users')
timer.stop()
complete_hashtag_id_set = set()
with open('../data/{0}_out/complete_hashtag_{0}.txt'.format(app_name), 'r', encoding='utf-8') as fin:
for line in fin:
tid, *hashtags = line.rstrip().lower().split(',')
complete_hashtag_id_set.update(hashtags)
embed_hid_dict = {'h{0}'.format(embed): hashtag for embed, hashtag in enumerate(sorted(list(complete_hashtag_id_set)))}
num_hashtag_complete = len(embed_hid_dict)
print('{0} hashtags appear in the complete set'.format(num_hashtag_complete))
with open('../networks/{0}_embed_hashtag.txt'.format(app_name), 'w', encoding='utf-8') as fout:
for hid in sorted(embed_hid_dict.keys()):
fout.write('{0},{1}\n'.format(hid, embed_hid_dict[hid]))
print('>>> Finish embedding hashtags')
timer.stop()
if __name__ == '__main__':
main()
| 2.15625 | 2 |
usfm/verifyManifest.py | unfoldingWord-dev/tools | 6 | 12760354 | # -*- coding: utf-8 -*-
# Script for verifying the format of a manifest.yaml file that is part of a Door43 Resource Container.
# Should check the following:
# Manifest file does not have a BOM.
# Valid YAML syntax.
# Manifest contains all the required fields.
# conformsto 'rc0.2'
# contributor is a list of at least one name, all names at least 3 characters long
# creator is a non-empty string
# identifier is a recognized value: tn, tq, ulb, etc.
# The RC spec requires identifier to be all lowercase alphanumeric and hyphens.
# The first characters must be a letter, and the last character must not be a hyphen.
# identifier equals the last part of the name of the directory in which the manifest file resides
# format corresponds to identifier
# language.direction is 'ltr' or rtl'
# language.identifier equals to first part of the name of the directory in which the manifest file resides
# language.title is a non-empty string. Prints reminder to localize language title.
# issued date is less or equal to modified date
# modified date is greater than or equal to issued date
# modified date equals today
# publisher does not contain "Unfolding" or "unfolding" unless language is English
# relation is a list of at lesat one string, all of which:
# start with the language identifer and a slash
# identifier following the slash is valid and must not equal the current project identifer
# other valid relation strings may also be predefined in this script
# rights value is 'CC BY-SA 4.0'
# source has no extraneous fields
# source.identifier matches project type identifier above
# source.language is 'en' (Warning if not)
# source.version is a string
# subject is one of the predefined strings and corresponds to project type identifier
# title is a non-empty string
# type corresponds to subject
# version is a string that starts with source.version followed by a period followed by a number
# checking has no extraneous fields
# checking.checking_entity is a list of at least one string
# checking.checking_level is '3'
# projects is a non-empty list. The number of projects in the list is reasonable for the project type.
# each subfield of each project exists
# project identifiers correspond to type of project
# project categories correspond to type of project
# project paths exist
# checks for extraneous files in the folder and subfolders.
# verifies presence of LICENSE and README files.
# verifies presence of valid toc.yaml files in tA projects.
# verifies today's date on README file.
# verifies presence of media.yaml file for OBS projects.
#
# Globals
manifestDir = r'C:\DCS\Hindi\hi_ta.STR'
nIssues = 0
projtype = ''
issuesFile = None
from datetime import datetime
from datetime import date
from datetime import timedelta
import pathlib
import sys
import os
import yaml
import io
import codecs
import numbers
import re
import usfm_verses
# Returns language identifier based on the directory name
def getLanguageId():
global manifestDir
parts = os.path.basename(manifestDir).split('_', 1)
return parts[0]
# If manifest-issues.txt file is not already open, opens it for writing.
# Returns file pointer, which is also a global.
def openIssuesFile():
global issuesFile
if not issuesFile:
global manifestDir
path = os.path.join(manifestDir, "manifest-issues.txt")
issuesFile = io.open(path, "tw", encoding='utf-8', newline='\n')
return issuesFile
# Writes error message to stderr and to manifest-issues.txt.
def reportError(msg):
global nIssues
try:
sys.stderr.write(msg + '\n')
except UnicodeEncodeError as e:
sys.stderr.write("See error message in manifest-issues.txt. It contains Unicode.\n")
# issues = openIssuesFile().write(msg + u'\n')
nIssues += 1
# Returns the number of .usfm files in the manifest directory.
def countBookDirs():
n = 0
for fname in os.listdir(manifestDir):
path = os.path.join(manifestDir, fname)
if os.path.isdir(path) and fname.upper() in usfm_verses.verseCounts:
n += 1
return n
# Returns the number of .usfm files in the manifest directory.
def countUsfmFiles():
n = 0
for fname in os.listdir(manifestDir):
if fname.endswith(".usfm"):
n += 1
return n
# Returns True if the specified string is a recognized Bible type of project type
def isBibleType(id):
return (isAlignedBibleType(id) or id in {'ulb','udb','reg','blv'})
# Returns True if the specified string is a recognized Aligned Bible type of project type
# Preliminary implementation - list needs refinement (6/21/21)
def isAlignedBibleType(id):
return (id in {'ust', 'ult', 'iev','irv','isv','glt','gst', \
'rlb','rob','rlob','rsb','rsob','stv','trs','rlv','ocb','gnt'})
# This function validates the project entries for a tA project.
# tA projects should have four projects entries, each with specific content
# However, the projet titles don't seem to matter. It's probably better if they are translated, but not required.
def verifyAcademyProject(project):
if len(project['categories']) != 1 or project['categories'][0] != 'ta':
reportError("Invalid project:categories: " + project['categories'][0])
section = project['identifier']
projtitle = project['title']
if section == 'intro':
if projtitle.isascii() and projtitle != "Introduction to Translation Academy":
reportError("Invalid project:title: " + projtitle)
if project['sort'] != 0:
reportError("Invalid project:sort: " + str(project['sort']))
elif section == 'process':
if projtitle.isascii() and projtitle != "Process Manual":
reportError("Invalid project:title: " + projtitle)
if project['sort'] != 1:
reportError("Invalid project:sort: " + str(project['sort']))
elif section == 'translate':
if projtitle.isascii() and projtitle != "Translation Manual":
reportError("Invalid project:title: " + projtitle)
if project['sort'] != 2:
reportError("Invalid project:sort: " + str(project['sort']))
elif section == 'checking':
if projtitle.isascii() and projtitle != "Checking Manual":
reportError("Invalid project:title: " + projtitle)
if project['sort'] != 3:
reportError("Invalid project:sort: " + str(project['sort']))
else:
reportError("Invalid project:identifier: " + section)
# Verifies that all chapters exist for the given folder.
def verifyBook(book, bookpath):
if not book.islower():
reportError("Upper case book folder: " + shortname(bookpath))
nchapters = usfm_verses.verseCounts[book.upper()]['chapters']
subdirs = os.listdir(bookpath)
if len(subdirs) < nchapters or ("front" in subdirs and len(subdirs) <= nchapters):
reportError("Missing chapters in: " + shortname(bookpath))
for chapter in subdirs:
path = os.path.join(bookpath, chapter)
if os.path.isdir(path):
verifyChapter( os.path.join(bookpath, chapter) )
# Verifies the folder names correspond to books of the Bible.
# Verifies the chapter folders and file names under each book.
# for tN and tQ projects only.
def verifyBooks(path):
for book in os.listdir(path):
if book not in {".git", "LICENSE", "LICENSE.md", "README.md", "manifest.yaml", "media.yaml"}:
bookpath = os.path.join(path, book)
if len(book) == 3 and os.path.isdir(bookpath) and book.upper() in usfm_verses.verseCounts:
verifyBook(book, bookpath)
elif not book.startswith("issues"):
reportError("Invalid(?) file or folder: " + shortname(bookpath))
fname2_re = re.compile(r'[0-8][0-9]\.md$')
fname3_re = re.compile(r'[0-1][0-9][0-9]\.md$')
# Verifies that all file names in the chapter folder are legit.
# Applies to tN and tQ projects only.
def verifyChapter(path):
skip = (projtype == 'tq' and "psa" in path and "119" in path) # Psalm 119 in tQ has some 2-digit and some 3-digit verse numbers, and some in the 90s
fname_re = fname2_re
if projtype != 'tq' and "psa" in path or "PSA" in path:
fname_re = fname3_re
for fname in os.listdir(path):
if not skip and not fname_re.match(fname) and fname != "intro.md":
reportError("Invalid file name: " + fname + " in " + shortname(path))
# Verifies the checking section of the manifest.
def verifyChecking(checking):
verifyKeys('checking', checking, ['checking_entity', 'checking_level'])
if 'checking_entity' in list(checking.keys()): # would this work: 'checking_entity' in checking
if len(checking['checking_entity']) < 1:
reportError("Missing checking_entity.")
for c in checking['checking_entity']:
if not isinstance(c, str) or len(c) < 3:
reportError("Invalid checking_entity: " + str(c))
if 'checking_level' in list(checking.keys()): # would this work: 'checking_level' in checking
if not isinstance(checking['checking_level'], str):
reportError('checking_level must be a string')
elif checking['checking_level'] != '3' and projtype not in {'reg','tq'}:
reportError("Invalid value for checking_level: " + checking['checking_level'])
badname_re = re.compile(r'.*\d\d\d\d+.*\.md$')
# Checks for extraneous files in the directory... recursive
def verifyCleanDir(dirpath):
for fname in os.listdir(dirpath):
path = os.path.join(dirpath, fname)
if projtype == 'ta' and fname == 'media.yaml':
reportError("Unwanted media.yaml file: " + shortname(path))
if "temp" in fname or "tmp" in fname or "orig" in fname or "bak" in fname or \
"Copy" in fname or "txt" in fname or "projects" in fname:
if fname not in {"translate-original", "temple.md", "tempt.md", "contempt.md", "habakkuk.md"}:
reportError("Possible extraneous file: " + shortname(path))
elif badname_re.match(fname):
reportError("Likely misnamed file: " + shortname(path))
if os.path.isdir(path) and fname != ".git":
verifyCleanDir(path)
# Verifies the contributors list
def verifyContributors(core):
if 'contributor' in list(core.keys()): # would this work: 'contributor' in core
if len(core['contributor']) < 1:
reportError("Missing contributors!")
for c in core['contributor']:
if not isinstance(c, str) or len(c) < 3:
reportError("Invalid contributor name: " + str(c))
# Checks the dublin_core of the manifest
def verifyCore(core):
verifyKeys("dublin_core", core, ['conformsto', 'contributor', 'creator', 'description', 'format', \
'identifier', 'issued', 'modified', 'language', 'publisher', 'relation', 'rights', \
'source', 'subject', 'title', 'type', 'version'])
# Check project identifier first because it is used to validate some other fields
verifyIdentifier(core) # Sets the projtype global
if 'conformsto' in list(core.keys()) and core['conformsto'] != 'rc0.2': # would this work: 'conforms_to' in core
reportError("Invalid value for conformsto: " + core['conformsto'])
verifyContributors(core)
verifyStringField(core, 'creator', 3)
verifyDates(core['issued'], core['modified'])
verifyFormat(core)
verifyLanguage(core['language'])
pub = core['publisher']
if pub.lower().find('unfolding') >= 0 and core['language']['identifier'] != 'en':
reportError("Invalid publisher: " + pub)
elif len(pub) > 3 and pub != "Wycliffe Associates":
reportError("This may be the wrong publisher name: " + pub + ". Use 'BCS' as the publisher for BCS resources.")
verifyRelations(core['relation'])
if 'rights' in list(core.keys()) and core['rights'] != 'CC BY-SA 4.0': # would this work: 'rights' in core
reportError("Invalid value for rights: " + core['rights'])
verifySource(core['source'])
verifySubject(core['subject'])
verifyTitle(core['title'])
verifyType(core['type'])
verifyVersion(core['version'], core['source'][0]['version'])
def verifyDates(issued, modified):
issuedate = datetime.strptime(issued, "%Y-%m-%d").date()
moddate = datetime.strptime(modified, "%Y-%m-%d").date()
if moddate != date.today():
reportError("Modified date is not today: " + modified)
if issuedate > moddate:
reportError("Dates wrong - issued: " + issued + ", modified: " + modified)
def verifyDir(dirpath):
path = os.path.join(dirpath, "manifest.yaml")
if os.path.isfile(path):
verifyFile(path)
verifyOtherFiles()
else:
reportError("No manifest.yaml file in: " + dirpath)
verifyCleanDir(dirpath)
if projtype == 'ta':
for folder in ['checking', 'intro', 'process', 'translate']:
verifyYamls(dirpath, folder)
sys.stdout.write("Remember to check contents of 4 toc.yaml files. (Title fields must be translated.)\n")
if projtype == 'obs':
verifyMediaYaml(dirpath)
if projtype in {'tn','tq'}:
verifyBooks(dirpath)
verifyReadme(dirpath)
# Manifest file verification
def verifyFile(path):
if has_bom(path):
reportError("manifest.yaml file has a Byte Order Mark. Remove it.")
manifestFile = io.open(path, "tr", encoding='utf-8-sig')
manifest = yaml.safe_load(manifestFile)
manifestFile.close()
verifyKeys("", manifest, ['dublin_core', 'checking', 'projects'])
verifyCore(manifest['dublin_core'])
verifyChecking(manifest['checking'])
verifyProjects(manifest['projects'])
# Verifies format field is a valid string, depending on project type.
# Done with iev, irv, isv, obs, obs-tn, obs-tq, obs-sn, obs-sq, reg, ta, tq, tn, tw, tsv, ulb, udb, ust
def verifyFormat(core):
global projtype
if verifyStringField(core, 'format', 8):
format = core['format']
if projtype in {'tn'}:
if format == 'text/tsv':
projtype = 'tn-tsv'
print("projtype = " + projtype)
elif format != 'text/markdown':
reportError("Invalid format: " + format)
elif projtype in {'ta', 'tq', 'tw', 'obs', 'obs-tn', 'obs-tq', 'obs-sn', 'obs-sq'}:
if format != 'text/markdown':
reportError("Invalid format: " + format)
elif isBibleType(projtype):
if format not in {'text/usfm', 'text/usfm3'}:
reportError("Invalid format: " + format)
if projtype in {'ust','irv','glt','gst','rob','rlob','rsob'}:
if format != 'text/usfm3':
reportError("Invalid format: " + format + ". Expected 'text/usfm3'.")
else:
reportError("Unable to validate format because script does not yet support project type: " + projtype)
# Validates the dublin_core:identifier field in several ways.
# Sets the global projtype variable which is used by subsequent checks.
def verifyIdentifier(core):
global projtype
global manifestDir
if verifyStringField(core, 'identifier', 2):
id = core['identifier']
if id not in {'tn','tq','tw','ta','obs','obs-tn','obs-tq','obs-sn','obs-sq'} and not isBibleType(id):
reportError("Invalid id: " + id)
else:
projtype = id
print("projtype = " + projtype)
parts = manifestDir.rsplit('_', 1)
lastpart = parts[-1].lower()
if lastpart != id.lower() and lastpart != id.lower() + ".str" and lastpart != id.lower() + ".rpp":
# last part of directory name should match the projtype string
reportError("Project identifier (" + id + ") does not match last part of directory name: " + lastpart)
# Verify that the specified fields exist and no others.
def verifyKeys(group, dict, keys):
for key in keys:
if key not in dict:
reportError('Missing field: ' + group + ':' + key)
for field in dict:
if field not in keys and (field != "comment" or group != "dublin_core"): # dublin_core:comment is optional
reportError("Extra field: " + group + ":" + field)
# Validate the language field and its subfields.
def verifyLanguage(language):
verifyKeys("language", language, ['direction', 'identifier', 'title'])
if 'direction' in list(language.keys()): # would this work: 'direction' in language
if language['direction'] != 'ltr' and language['direction'] != 'rtl':
reportError("Incorrect language direction: " + language['direction'])
# if 'identifier' in list(language.keys()): # would this work: 'identifier' in language
if 'identifier' in language:
if language['identifier'] != getLanguageId():
reportError("Language identifier (" + language['identifier'] + ") does not match first part of directory name: " + os.path.basename(manifestDir))
if verifyStringField(language, 'title', 3):
if language['title'].isascii():
sys.stdout.write("Remember to localize language title: " + language['title'] + '\n')
# For OBS projects, verify that media.yaml is valid.
def verifyMediaYaml(dirpath):
yamlpath = os.path.join(dirpath, "media.yaml")
if os.path.isfile(yamlpath):
yamlFile = io.open(yamlpath, "tr", encoding='utf-8-sig')
contents = yaml.safe_load(yamlFile)
yamlFile.close()
verifyKeys("", contents, ['projects'])
verifyProjectsOBS(contents['projects'])
else:
reportError("Missing file: " + shortname(yamlpath))
# Verify media entry from OBS media.yaml file
def verifyMedium(medium):
verifyKeys("media", medium, ['identifier', 'version', 'contributor', 'url'])
if 'en' in medium['url']:
reportError("Replace 'en' with the correct langauge code in media.yaml url's")
version = "v" + medium['version']
if medium['identifier'] != 'door43' and medium['url'].count(version) != 2:
reportError("Correct the version numbers in media.yaml url's")
if medium['identifier'] != 'pdf':
sys.stdout.write("Verify manually the " + medium['identifier'] + " media entry in media.yaml.\n")
# Confirms the existence of a LICENSE file
def verifyOtherFiles():
licensepath1 = os.path.join(manifestDir, "LICENSE.md")
licensepath2 = os.path.join(manifestDir, "LICENSE")
if not os.path.isfile(licensepath1) and not os.path.isfile(licensepath2):
reportError("LICENSE file is missing")
# Verifies that the project contains the six required fields and no others.
# Verifies that the path exists.
# Verifies that the title corresponds to the project type.
# Verifies that the sort field is not octal.
# Validate some other field values, depending on the type of project
def verifyProject(project):
verifyKeys("projects", project, ['title', 'versification', 'identifier', 'sort', 'path', 'categories'])
global manifestDir
fullpath = os.path.join(manifestDir, project['path'])
if len(project['path']) < 5 or not os.path.exists(fullpath):
reportError("Invalid path: " + project['path'])
if not isinstance(project['sort'], numbers.Integral):
reportError("project:sort is the wrong type: " + str(project['sort']))
if projtype == 'ta':
verifyAcademyProject(project)
elif projtype in {'tn', 'tq'}:
bookinfo = usfm_verses.verseCounts[project['identifier'].upper()]
if project['sort'] != bookinfo['sort']:
reportError("Incorrect project:sort: " + str(project['sort']))
if projtype == 'tn' and len(project['categories']) != 0:
reportError("Categories list should be empty: project:categories")
elif projtype == 'tn-tsv':
bookinfo = usfm_verses.verseCounts[project['identifier'].upper()]
if project['sort'] != bookinfo['sort']:
reportError("Incorrect project:sort: " + str(project['sort']))
if project['versification'] != 'ufw':
reportError("Invalid project:versification: " + project['versification'] + ". Should be 'ufw'")
cat = project['categories'][0]
if len(project['categories']) != 1 or cat not in {'bible-ot', 'bible-nt'}:
reportError("Invalid project:categories: " + cat)
elif projtype == 'tw':
if project['title'] not in {'translationWords','Translation Words'}:
reportError("Invalid project:title: " + project['title'] + ". Should be translationWords or Translation Words")
elif isBibleType(projtype):
bookinfo = usfm_verses.verseCounts[project['identifier'].upper()]
if int(project['sort']) != bookinfo['sort']:
reportError("Incorrect project:sort: " + str(project['sort']))
if project['versification'] != 'ufw':
reportError("Invalid project:versification: " + project['versification'] + ". Should be 'ufw'")
if len(project['identifier']) != 3:
reportError("Invalid project:identifier: " + project['identifier'])
cat = project['categories'][0]
if len(project['categories']) != 1 or not (cat == 'bible-ot' or cat == 'bible-nt'):
reportError("Invalid project:categories: " + cat)
elif projtype == 'obs':
if project['categories']:
reportError("Should be blank: project:categories")
if project['versification']:
reportError("Should be blank: project:versification")
if project['identifier'] != 'obs':
reportError("Invalid project:identifier: " + project['identifier'])
if project['title'] != 'Open Bible Stories':
reportError("Invalid project:title: " + project['title'])
elif projtype in {'obs-tn','obs-tq','obs-sn','obs-sq'}:
if project['categories'] and len(project['categories']) != 0:
reportError("Categories list should be empty: project:categories")
if project['identifier'] != "obs": # New as of November 2021
reportError("Invalid project:identifier: " + project['identifier'])
if projtype == 'obs-tn':
if not project['title'].endswith('Open Bible Stories Translation Notes') and project['title'] != 'OBS translationNotes':
reportError("Invalid project:title: " + project['title'])
elif projtype == 'obs-tq':
if not project['title'].endswith('Open Bible Stories Translation Questions'):
reportError("Invalid project:title: " + project['title'])
elif projtype == 'obs-sn':
if project['title'] != 'Open Bible Stories Study Notes':
reportError("Invalid project:title: " + project['title'])
elif projtype == 'obs-sq':
if project['title'] != 'Open Bible Stories Study Questions':
reportError("Invalid project:title: " + project['title'])
else:
sys.stdout.write("Verify each project entry manually.\n") # temp until all projtypes are supported
# For most project types, the projects:identifier is really a part identifier, like book id (ULB, tQ, etc.), or section id (tA)
# Verifies the projects list
def verifyProjects(projects):
if not projects:
reportError('Empty projects list')
else:
global projtype
nprojects = len(projects)
if nprojects < 1:
reportError('Empty projects list')
if isBibleType(projtype) and nprojects != countUsfmFiles():
reportError("Number of projects listed " + str(nprojects) + " does not match number of usfm files: " + str(countUsfmFiles()))
if projtype in {'tn', 'tq'} and nprojects != countBookDirs():
reportError("Number of projects listed " + str(nprojects) + " does not match number of book folders: " + str(countBookDirs()))
if projtype in ['obs','obs-tn','obs-tq','obs-sn','obs-sq', 'tw'] and nprojects != 1:
reportError("There should be exactly 1 project listed under projects.")
elif projtype == 'ta' and nprojects != 4:
reportError("There should be exactly 4 projects listed under projects.")
elif projtype in {'tn','tn-tsv','tq'} or isBibleType(projtype):
if nprojects not in (27,39,66):
reportError("Number of projects listed: " + str(nprojects))
for p in projects:
verifyProject(p)
# Verify one project of an OBS media.yaml file
def verifyProjectOBS(project):
if project['identifier'] == 'obs':
nmedia = len(project['media'])
if nmedia < 1:
reportError('No media are defined in media.yaml')
else:
for medium in project['media']:
verifyMedium(medium)
else:
reportError("Unknowns identifier in media.yaml: " + project['identifier'])
# Verify the projects section of an OBS media.yaml file, which is the only section
def verifyProjectsOBS(projects):
if not projects:
reportError('media.yaml is empty')
else:
for p in projects:
verifyProjectOBS(p)
def verifyReadme(dirpath):
readmepath = os.path.join(dirpath, "README.md")
if not os.path.isfile(readmepath):
readmepath = os.path.join(dirpath, "README")
if not os.path.isfile(readmepath):
reportError("No README file is found")
else:
pathlibpath = pathlib.Path(readmepath)
modtime = datetime.fromtimestamp(pathlibpath.stat().st_mtime)
gitpath = os.path.join(dirpath, ".git/config")
if os.path.isfile(gitpath):
pathlibpath = pathlib.Path(gitpath)
delta = modtime - datetime.fromtimestamp(pathlibpath.stat().st_mtime)
else:
delta = timedelta(hours=2)
if modtime.date() != date.today():
reportError("Warning: README file was not updated today")
else:
print("Remember to update README file.")
# NOT DONE - need to support UHG-type entries
def verifyRelation(rel):
if not isinstance(rel, str):
reportError("Relation element is not a string: " + str(rel))
elif len(rel) < 5:
reportError("Invalid value for relation element: " + rel)
else:
parts = rel.split('/')
if len(parts) != 2:
reportError("Invalid format for relation element: " + rel)
else:
global projtype
if parts[0] != getLanguageId() and parts[0] != "el-x-koine" and parts[0] != "hbo":
reportError("Incorrect language code for relation element: " + rel)
if parts[1] not in {'obs','obs-tn','obs-tq','obs-sn','obs-sq','tn','tq','tw','ta','tm'} and not isBibleType(parts[1]):
if parts[1][0:4] != 'ugnt' and parts[1][0:3] != 'uhb':
reportError("Invalid project code in relation element: " + rel)
if parts[1] == projtype or (projtype == 'tn-tsv' and parts[1] == 'tn'):
reportError("Project code in relation element is same as current project: " + rel)
# The relation element is a list of strings.
def verifyRelations(relations):
uniq = set(relations)
if len(uniq) < len(relations):
reportError("There are duplicates in the relations list")
if len(uniq) < 2 and not isBibleType(projtype):
reportError("The relations list seems incomplete")
uhg = False
if len(relations) < 1 and projtype != 'reg':
reportError("No relations are listed")
for r in relations:
verifyRelation(r)
if projtype == 'tn-tsv':
parts = r.split('/')
if len(parts) == 2 and parts[0] == 'el-x-koine' and 'ugnt?v=' in parts[1]:
uhg = True
if projtype == 'tn-tsv' and not uhg:
reportError("Must reference 'el-x-koine/ugnt?v=...' in relation")
if projtype in {'tn-tsv','tw'} and '/glt' not in relations[0] and '/ult' not in relations[0]:
reportError("'glt' should be first relation listed for tn and tw projects, if there is a glt")
# Validates the source field, which is an array of dictionaries.
def verifySource(source):
if not source or len(source) < 1:
reportError("Invalid source spec: should be an array of dictionary of three fields.")
for dict in source:
verifyKeys("source[x]", dict, ['language', 'identifier', 'version'])
global projtype
if dict['identifier'] != projtype and projtype in {'obs', 'obs-tn','obs-tq','obs-sn','obs-sq', 'tn', 'tq', 'tw'}:
reportError("Inappropriate source:identifier (" + dict['identifier'] + ") for project type: " + projtype)
if dict['identifier'] != 'ulb' and projtype == 'reg':
reportError("Incorrect source:identifier for reg project: " + dict['identifier'])
if dict['identifier'] != 'tn' and projtype == 'tn-tsv':
reportError("Incorrect source:identifier for tn-tsv project: " + dict['identifier'])
if not re.match(r'[a-z][a-z0-9-]', dict['identifier'], re.UNICODE):
reportError("Invalid source:identifier (need lower case ascii, no spaces): " + dict['identifier'])
if dict['language'] == 'English':
reportError("Use a language code in source:language, not \'" + dict['language'] + '\'')
elif dict['language'] == getLanguageId():
reportError("Warning: source:language matches target language")
elif dict['language'] not in {'en','hbo','el-x-koine'}:
reportError("Possible bad source:language: " + dict['language'])
verifyStringField(dict, 'version', 1)
# Validates that the specified key is a string of specified minimum length.
# Returns False if there is a problem.
def verifyStringField(dict, key, minlength):
success = True
if key in list(dict.keys()): # would this work: key in dict
if not isinstance(dict[key], str):
reportError("Value must be a string: " + key + ": " + str(dict[key]))
success = False
elif len(dict[key]) < minlength:
reportError("Invalid value for " + key + ": " + dict[key])
success = False
return success
# Validates the subject field
def verifySubject(subject):
if isBibleType(projtype):
if subject not in {'Bible', 'Aligned Bible'}:
reportError("Invalid subject: " + subject + " (expected 'Bible' or 'Aligned Bible')")
elif isAlignedBibleType(projtype) and subject != "Aligned Bible":
reportError("Invalid subject: " + subject + " (expected 'Aligned Bible')")
expected_subject = subject # to avoid redundant error msgs
elif projtype == 'ta':
expected_subject = 'Translation Academy'
elif projtype == 'tw':
expected_subject = 'Translation Words'
elif projtype == 'tn':
expected_subject = 'Translation Notes'
elif projtype == 'tn-tsv':
expected_subject = 'TSV Translation Notes'
elif projtype == 'tn':
expected_subject = 'Translation Notes'
elif projtype == 'tq':
expected_subject = 'Translation Questions'
elif projtype == 'obs':
expected_subject = 'Open Bible Stories'
elif projtype == 'obs-tq':
expected_subject = 'OBS Translation Questions'
elif projtype == 'obs-tn':
expected_subject = 'OBS Translation Notes'
elif projtype == 'obs-sq':
expected_subject = 'OBS Study Questions'
elif projtype == 'obs-sn':
expected_subject = 'OBS Study Notes'
else:
sys.stdout.write("Verify subject manually.\n")
expected_subject = subject
if subject != expected_subject:
reportError("Invalid subject: " + subject + " (expected '" + expected_subject + "')")
# Verifies that the title
def verifyTitle(title):
if not isinstance(title, str):
reportError("Incorrect type for title field: " + str(title))
elif len(title) < 3:
reportError("String value too short for title: " + title)
if isBibleType(projtype):
if projtype in {'iev', 'udb', 'ust', 'gst'} and ("Literal" in title or "Revised" in title):
reportError("Title contradicts project type: " + title)
elif projtype in {'irv', 'isv', 'ulb', 'ult','glt'} and ("Easy" in title or "Dynamic" in title):
reportError("Title contradicts project type: " + title)
# Determines whether toc.yaml file exists.
# Add checking for translated contents later.
def verifyTocYaml(yamlpath):
if os.path.isfile(yamlpath):
yamlFile = io.open(yamlpath, "tr", encoding='utf-8-sig')
contents = yaml.safe_load(yamlFile)
yamlFile.close()
else:
reportError("file missing: " + shortname(yamlpath))
# For tA projects, verify that each folder has a valid toc.yaml and config.yaml file.
def verifyYamls(dirpath, folder):
folderpath = os.path.join(dirpath, folder)
yamlpath = os.path.join(folderpath, "config.yaml")
if os.path.isfile(yamlpath):
yamlFile = io.open(yamlpath, "tr", encoding='utf-8-sig')
contents = yaml.safe_load(yamlFile)
yamlFile.close()
else:
reportError("file missing: " + shortname(yamlpath))
# For toc.yaml, need to check contents as well as existence of file
verifyTocYaml( os.path.join(folderpath, "toc.yaml") )
def verifyType(type):
failure = False
if projtype == 'ta':
failure = (type != 'man')
elif projtype == 'tw':
failure = (type != 'dict')
elif projtype in {'tn', 'tn-tsv', 'tq', 'obs-tn','obs-tq','obs-sn','obs-sq'}:
failure = (type != 'help')
elif isBibleType(projtype):
failure = (type != 'bundle')
elif projtype == 'obs':
failure = (type != 'book')
else:
sys.stdout.write("Verify type manually.\n")
if failure:
reportError("Invalid type: " + type)
def verifyVersion(version, sourceversion):
# The rules seem to be optional, so may comment out most of this code if necessary.
parts = version.rsplit('.', 1)
# if int(sourceversion) < 100 and (len(parts) < 2 or parts[0] != sourceversion or int(parts[-1]) < 1):
# reportError("Invalid version: " + version + "; Source version is " + sourceversion)
# if int(sourceversion) >= 100 and (len(parts) > 1 or int(parts[0]) > 99):
# reportError("Invalid version: " + version + ". Source version is " + sourceversion)
# Returns True if the file has a BOM
def has_bom(path):
with open(path, 'rb') as f:
raw = f.read(4)
for bom in [codecs.BOM_UTF8, codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE, codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE]:
if raw.startswith(bom):
return True
return False
def shortname(longpath):
shortname = longpath
if manifestDir in longpath:
shortname = longpath[len(manifestDir)+1:]
return shortname
if __name__ == "__main__":
if len(sys.argv) > 1 and sys.argv[1] != 'hard-coded-path':
manifestDir = sys.argv[1]
if os.path.isdir(manifestDir):
verifyDir(manifestDir)
else:
reportError("Invalid directory: " + manifestDir + '\n')
if issuesFile:
issuesFile.close()
if nIssues == 0:
print("Done, no issues found.\n")
else:
print("Finished checking, found " + str(nIssues) + " issues.\n") | 1.6875 | 2 |
lazyboy/view.py | joestump/lazyboy | 1 | 12760355 | <gh_stars>1-10
# -*- coding: utf-8 -*-
#
# © 2009 Digg, Inc. All rights reserved.
# Author: <NAME> <<EMAIL>>
#
"""Lazyboy: Views."""
import datetime
import uuid
from cassandra.ttypes import SlicePredicate, SliceRange
from lazyboy.key import Key
from lazyboy.base import CassandraBase
from lazyboy.iterators import multigetterator, unpack
from lazyboy.record import Record
from lazyboy.connection import Client
def _iter_time(start=None, **kwargs):
"""Return a sequence which iterates time."""
day = start or datetime.datetime.today()
intv = datetime.timedelta(**kwargs)
while day.year >= 1900:
yield day.strftime('%Y%m%d')
day = day - intv
def _iter_days(start=None):
"""Return a sequence which iterates over time one day at a time."""
return _iter_time(start, days=1)
class View(CassandraBase):
"""A regular view."""
def __init__(self, view_key=None, record_key=None, record_class=None):
assert not view_key or isinstance(view_key, Key)
assert not record_key or isinstance(record_key, Key)
assert not record_class or isinstance(record_class, type)
CassandraBase.__init__(self)
self.chunk_size = 100
self.key = view_key
self.record_key = record_key
self.record_class = record_class or Record
self.reversed = 0
def __repr__(self):
return "%s: %s" % (self.__class__.__name__, self.key)
def __len__(self):
"""Return the number of records in this view."""
return self._get_cas().get_count(
self.key.keyspace, self.key.key, self.key, self.consistency)
def _keys(self, start_col=None, end_col=None):
"""Return keys in the view."""
client = self._get_cas()
assert isinstance(client, Client), \
"Incorrect client instance: %s" % client.__class__
last_col = start_col or ""
end_col = end_col or ""
chunk_size = self.chunk_size
passes = 0
while True:
# When you give Cassandra a start key, it's included in the
# results. We want it in the first pass, but subsequent iterations
# need to the count adjusted and the first record dropped.
fudge = int(passes > 0)
cols = client.get_slice(
self.key.keyspace, self.key.key, self.key,
SlicePredicate(slice_range=SliceRange(
last_col, end_col, self.reversed, chunk_size + fudge)),
self.consistency)
if len(cols) == 0:
raise StopIteration()
for col in unpack(cols[fudge:]):
yield self.record_key.clone(key=col.value)
last_col = col.name
passes += 1
if len(cols) < self.chunk_size:
raise StopIteration()
def __iter__(self):
"""Iterate over all objects in this view."""
return (self.record_class().load(key) for key in self._keys())
def _record_key(self, record=None):
"""Return the column name for a given record."""
return record.key.key if record else str(uuid.uuid1())
def append(self, record):
"""Append a record to a view"""
assert isinstance(record, Record), \
"Can't append non-record type %s to view %s" % \
(record.__class__, self.__class__)
self._get_cas().insert(
self.key.keyspace, self.key.key,
self.key.get_path(column=self._record_key(record)),
record.key.key, record.timestamp(), self.consistency)
def remove(self, record):
"""Remove a record from a view"""
assert isinstance(record, Record), \
"Can't remove non-record type %s to view %s" % \
(record.__class__, self.__class__)
self._get_cas().remove(
self.key.keyspace, self.key.key,
self.key.get_path(column=self._record_key(record)),
record.timestamp(), self.consistency)
class FaultTolerantView(View):
"""A view which ignores missing keys."""
def __iter__(self):
"""Iterate over all objects in this view, ignoring bad keys."""
for key in self._keys():
try:
yield self.record_class().load(key)
except GeneratorExit:
raise
except Exception:
pass
class BatchLoadingView(View):
"""A view which loads records in bulk."""
def __init__(self, view_key=None, record_key=None, record_class=None):
"""Initialize the view, setting the chunk_size to a large value."""
View.__init__(self, view_key, record_key, record_class)
self.chunk_size = 5000
def __iter__(self):
"""Batch load and iterate over all objects in this view."""
keys = tuple(self._keys())
recs = multigetterator(keys, self.consistency)
if (self.record_key.keyspace not in recs
or self.record_key.column_family not in
recs[self.record_key.keyspace]):
raise StopIteration()
data = recs[self.record_key.keyspace][self.record_key.column_family]
for k in keys:
yield (self.record_class()._inject(
self.record_key.clone(key=k.key), data[k.key]))
class PartitionedView(object):
"""A Lazyboy view which is partitioned across rows."""
def __init__(self, view_key=None, view_class=None):
self.view_key = view_key
self.view_class = view_class
def partition_keys(self):
"""Return a sequence of row keys for the view partitions."""
return ()
def _get_view(self, key):
"""Return an instance of a view for a partition key."""
return self.view_class(self.view_key.clone(key=key))
def __iter__(self):
"""Iterate over records in the view."""
for view in (self._get_view(key) for key in self.partition_keys()):
for record in view:
yield record
def _append_view(self, record):
"""Return the view which this record should be appended to.
This defaults to the first view from partition_keys, but you
can partition by anything, e.g. first letter of some field in
the record.
"""
key = iter(self.partition_keys()).next()
return self._get_view(key)
def append(self, record):
"""Append a record to the view."""
return self._append_view(record).append(record)
| 2.359375 | 2 |
telegram_bot/keyboard/inline/__init__.py | alenworld/django_telegram_bot | 3 | 12760356 | from .utils import make_addresses_inline_keyboard, keyboard_confirm_decline_broadcasting
from .faq import make_faq_inline_keyboard
| 0.976563 | 1 |
test_proj/apps/app1/models/relations.py | andrewbird2/django-data-validation | 1 | 12760357 | from random import choice, sample, seed
from typing import List
from datavalidation import data_validator, PASS, FAIL, NA
from django.db import models
from .base import BaseModel
seed(1234)
class Relation(BaseModel):
fkey = models.ForeignKey(
"RelatedFields", on_delete=models.CASCADE, blank=True, null=True
)
class TestData:
ORDER = 1 # generate data on this model first
class RelatedFieldsManager(models.Manager):
def generate(self, passing: int = 0) -> List["RelatedFields"]:
""" generate objects with related fields """
relatives = sorted(Relation.objects.all().values_list("id", flat=True))
existing = RelatedFields.objects.count()
assert existing + passing <= len(relatives)
# add relations: RelatedFields -> Relation
objs = []
for ix in range(existing, existing + passing):
objs.append(RelatedFields(
o2o_id=relatives[ix],
fkey_id=relatives[-ix],
))
objs = self.bulk_create(objs)
# add M2M relations
for obj in objs:
obj.m2m.set(sample(relatives, k=4))
# add relations: Relation -> RelatedFields
updated = []
for relative in Relation.objects.all():
if choice([True, False]):
relative.fkey = choice(objs)
updated.append(relative)
Relation.objects.bulk_update(updated, fields=["fkey"])
return objs
class RelatedFields(models.Model):
""" Models with Related Fields
tests: related fields with select_related/prefetch_related
"""
o2o = models.OneToOneField(
Relation, on_delete=models.CASCADE, related_name="o2o_relation"
)
fkey = models.ForeignKey(
Relation, on_delete=models.CASCADE, related_name="fkey_relation"
)
m2m = models.ManyToManyField(Relation, through="RelatedFieldsM2M")
objects = RelatedFieldsManager()
class TestData:
ORDER = 2 # generate data on this model second
@data_validator(select_related="o2o")
def select_related_o2o(self):
""" tests: select_related on OneToOneField """
if self.o2o.foobar is None:
return NA
return self.o2o.foobar < 10
@data_validator(select_related=["fkey"])
def select_related_fkey(self):
""" tests: select_related on ForeignKey """
if self.fkey.foobar is None:
return NA
return self.o2o.foobar < 10
@data_validator(select_related="fkey__o2o_realtion__fkey")
def select_related_multi(self):
""" tests select_related with multiple levels """
foobar = self.fkey.o2o_relation.fkey.foobar
if foobar is None:
return NA
return foobar < 10
@data_validator(prefetch_related="relation_set")
def prefetch_related_rev_fkey(self):
""" tests: prefetch_related on a ReverseForeignKey """
return self.relation_set.filter(foobar__gte=10).count() == 0
@data_validator(prefetch_related="m2m")
def prefetch_related_m2m(self):
""" tests: prefetch_related on a ManyToManyField """
if self.m2m.count() == 4:
return PASS
return FAIL
@data_validator(select_related="wibble", prefetch_related="wobble")
def bad_related_names(self):
""" tests: user has used invalid select_related and
prefetch_related fields
"""
return PASS
@data_validator(select_related=["fkey"])
@classmethod
def useless_select_related(cls):
""" test select_related with classmethod doesn't raise error
(even if it does nothing)
"""
return PASS
class RelatedFieldsM2M(models.Model):
class TestData:
NO_GENERATE = True
rf = models.ForeignKey(RelatedFields, blank=True, null=True, on_delete=models.SET_NULL)
rl = models.ForeignKey(Relation, blank=True, null=True, on_delete=models.SET_NULL)
| 2.3125 | 2 |
segmatch/python/autoencoder_node.py | OpenSLAM/segmatch | 1 | 12760358 |
# coding: utf-8
# In[ ]:
from __future__ import print_function
import numpy as np
import tensorflow as tf
from autoencoder import model
import pickle
import os
# In[ ]:
DEBUG = False
PLOTTING_SUPPORT = True
RUN_AS_PY_SCRIPT = False
SET_EULER_PARAMS = False
SET_MARMOT_PARAMS = False
# Handle arguments (When executed as .py script)
import sys
argv = sys.argv[:]
if len(argv) > 1:
script_path = argv.pop(0)
if "--euler" in argv:
import sys
sys.stdout = open('stdout.txt', 'w')
RUN_AS_PY_SCRIPT = True
PLOTTING_SUPPORT = False
SET_EULER_PARAMS = True
print("Parameters set for execution on euler cluster")
argv.remove("--euler")
if "--marmot" in argv:
RUN_AS_PY_SCRIPT = True
PLOTTING_SUPPORT = False
SET_MARMOT_PARAMS = True
print("Parameters set for execution on marmot cluster")
argv.remove("--marmot")
if "--script" in argv:
RUN_AS_PY_SCRIPT = True
PLOTTING_SUPPORT = False
print("Running as script")
argv.remove("--script")
# In[ ]:
if not RUN_AS_PY_SCRIPT:
get_ipython().magic('load_ext autoreload')
get_ipython().magic('autoreload 2')
from IPython.display import clear_output
if PLOTTING_SUPPORT:
get_ipython().magic('matplotlib notebook')
from matplotlib import pyplot as plt
# ## Parameters
# In[ ]:
BATCH_SIZE = 10
VOXEL_SIDE = 24
MAX_STEPS = 10000
VAL_EXAMPLES = 200
N_ROTATION_ANGLES = 12
ROTATION_OFFSET = 0
VAL_EVERY_N_STEPS = 1
VAL_STEP_TOLERANCE = 3
TRAIN_TWINS = False
MP = model.ModelParams()
MP.INPUT_SHAPE = [VOXEL_SIDE, VOXEL_SIDE, VOXEL_SIDE, 1]
HOME_DIR = os.path.expanduser('~')
DATA_DIR = "./database/"
RUN_NAME = "kitti18"
RESTORE_MODEL = True
SAVE_DIR = HOME_DIR + "/Desktop/autoencoder/"
SAVE_FILE = "model.checkpoint"
MP_FILENAME = "model_params.pckl"
TENSORBOARD_DIR = HOME_DIR + "/tensorboard"
SAVE_UNVALIDATED = False
CREATE_VISUALS = False
DETAILED_STEP_TIMES = False
EXPORT_FEATURES = False
# In[ ]:
if SET_EULER_PARAMS:
DATA_DIR = "/cluster/home/dugasd/database/"
SAVE_DIR = "/cluster/home/dugasd/autoencoder-euler/"
TENSORBOARD_DIR = None
CREATE_VISUALS = False
MAX_STEPS = 1000000
VAL_STEP_TOLERANCE = 5
if SET_MARMOT_PARAMS:
DATA_DIR = "/home/daniel/database/"
RUN_NAME = "kitti18-20-27"
SAVE_DIR = "/home/daniel/autoencoder-marmot/"
TENSORBOARD_DIR = None
CREATE_VISUALS = False
MAX_STEPS = 1000000
VAL_STEP_TOLERANCE = 10
N_ROTATION_ANGLES = 36
if not RUN_AS_PY_SCRIPT:
#MP.CONVOLUTION_LAYERS = [{'type': 'conv3d', 'filter': [5, 5, 5, 1, 10], 'downsampling': {'type': 'max_pool3d', 'k': 2}}]
MP.CONVOLUTION_LAYERS = []
#MP.LATENT_SHAPE = [2]
N_ROTATION_ANGLES = 6
CREATE_VISUALS = True
TRAIN_TWINS = True
# In[ ]:
if RUN_AS_PY_SCRIPT:
while argv:
arg = argv.pop(0)
if arg == "-RUN_NAME":
RUN_NAME = argv.pop(0)
print("RUN_NAME set to " + RUN_NAME)
elif arg == "-SAVE_DIR":
SAVE_DIR = argv.pop(0)
print("SAVE_DIR set to " + SAVE_DIR)
elif arg == "--noconv":
MP.CONVOLUTION_LAYERS = []
print("CONVOLUTION LAYERS REMOVED")
elif arg == "--twins":
TRAIN_TWINS = True
print("Training twins.")
elif arg == "-LEARNING_RATE":
MP.LEARNING_RATE = float(argv.pop(0))
print("LEARNING_RATE set to " + str(MP.LEARNING_RATE))
elif arg == "-LATENT_SHAPE":
MP.LATENT_SHAPE = [int(argv.pop(0))]
print("LATENT_SHAPE set to " + str(MP.LATENT_SHAPE))
elif arg == "-VAL_STEP_TOLERANCE":
VAL_STEP_TOLERANCE = int(argv.pop(0))
print("VAL_STEP_TOLERANCE set to " + str(VAL_STEP_TOLERANCE))
elif arg == "-N_ROTATION_ANGLES":
N_ROTATION_ANGLES = int(argv.pop(0))
print("N_ROTATION_ANGLES set to " + str(N_ROTATION_ANGLES))
elif arg == "-ROTATION_OFFSET":
frac = list(map(float, argv.pop(0).split('/'))) + [1.0]
ROTATION_OFFSET = frac[0]/frac[1]
print("ROTATION_OFFSET set to " + str(ROTATION_OFFSET))
elif arg == "--float64":
MP.FLOAT_TYPE = tf.float64
print("MP.FLOAT_TYPE set to " + str(MP.FLOAT_TYPE))
else:
print("Unknown argument: " + arg)
raise NotImplementedError
# In[ ]:
SAVE_PATH = SAVE_DIR+SAVE_FILE
if SAVE_UNVALIDATED:
SAVE_DIR_NOVAL = SAVE_DIR+"unvalidated/"
SAVE_PATH_NOVAL = SAVE_DIR_NOVAL+SAVE_FILE
# ## Load Segments and Features
# In[ ]:
import utilities
run_names, runs = utilities.list_runs(DATA_DIR)
try:
run_names.remove(RUN_NAME)
run_names = [RUN_NAME] + run_names
except:
print(RUN_NAME + " not found in runs.")
print(run_names)
# In[ ]:
if not RUN_AS_PY_SCRIPT:
from ipywidgets import widgets
run_dropdown = widgets.Dropdown(description="Run to import : ", options=run_names)
button = widgets.Button(description="import")
# Interaction functions
def import_run_data(btn):
display.clear_output()
print("Loading segments, features, matches, classes for run")
global segments, features, fnames, matches, classes, ids, classes_set # 'output' variables
segments, features, fnames, matches, classes, ids = utilities.import_run(run_dropdown.value, folder=DATA_DIR)
classes_set = sorted(list(set(classes)))
button.on_click(import_run_data)
# Display widgets
from IPython import display
display.display(run_dropdown)
display.display(button)
import_run_data(button)
else:
segments, features, fnames, matches, classes, ids = utilities.import_run(RUN_NAME, folder=DATA_DIR)
classes_set = sorted(list(set(classes)))
# ## Create Autoencoder
# In[ ]:
if not RUN_AS_PY_SCRIPT:
try:
stored_MP = pickle.load(open(SAVE_DIR+MP_FILENAME, 'rb'))
if MP != stored_MP:
print("WARNING: Setting params for compatibility with stored model.")
print("Stored model: "); print(stored_MP); print("New model: "); print(MP)
MP = stored_MP
except FileNotFoundError:
print("No stored model found. Creating a new model.")
# In[ ]:
vae = model.Autoencoder(MP)
if TRAIN_TWINS: vae.build_twin_graph()
# In[ ]:
summary_writer = None
if TENSORBOARD_DIR != None:
summary_writer = tf.train.SummaryWriter(TENSORBOARD_DIR, vae.sess.graph)
# In[ ]:
if RESTORE_MODEL:
try:
vae.saver.restore(vae.sess, SAVE_PATH)
print("Model restored.")
print(MP.CONVOLUTION_LAYERS)
except Exception as err:
print("Could not load model: ", end="")
try:
stored_MP = pickle.load(open(SAVE_DIR+MP_FILENAME, 'rb'))
print("ERROR: mismatch between model params.")
print("Stored model: "); print(stored_MP); print("New model: "); print(MP)
raise err
except:
print("no model folder.")
# ## Create Voxelized Segment Dataset - With Rotated Copies
# In[ ]:
## Split into training and val data
split_at = min(VAL_EXAMPLES, int(0.2 * len(ids)))
val = segments[:split_at]
train = segments[split_at:]
# In[ ]:
if not TRAIN_TWINS:
print("Rotating segments")
from voxelize import create_rotations
train = create_rotations(train, N_ROTATION_ANGLES, ROTATION_OFFSET)
val = create_rotations(val, 12, ROTATION_OFFSET)
print("Voxelizing training data")
from voxelize import voxelize
train_vox, _ = voxelize(train,VOXEL_SIDE)
val_vox, _ = voxelize(val ,VOXEL_SIDE)
train_twins_vox = None
val_twins_vox = None
if train_vox[0].shape != MP.INPUT_SHAPE:
print("Reshaping")
train_vox=[np.reshape(vox, MP.INPUT_SHAPE) for vox in train_vox]
val_vox=[np.reshape(vox, MP.INPUT_SHAPE) for vox in val_vox]
del train # Save some memory
else:
from voxelize import create_twins
val, val_twins = create_twins(val)
train, train_twins = create_twins(train)
print("Voxelizing training data")
from voxelize import voxelize
train_vox, _ = voxelize(train,VOXEL_SIDE)
val_vox, _ = voxelize(val ,VOXEL_SIDE)
train_twins_vox, _ = voxelize(train_twins,VOXEL_SIDE)
val_twins_vox, _ = voxelize(val_twins ,VOXEL_SIDE)
del train_twins
# In[ ]:
import os
import psutil
process = psutil.Process(os.getpid())
print("Using " + str(process.memory_info().rss/(1024.0*1024.0)) + "mB of memory")
# ## Train Autoencoder ( Computationally Intensive )
# In[ ]:
from timeit import default_timer as timer
from autoencoder.batchmaker import Batchmaker, progress_bar
total_step_cost = None
step_cost_log = []
total_val_cost = 0
val_steps_since_last_improvement = 0
step_start = timer()
try:
val_cost_log = list(np.loadtxt(SAVE_DIR+"val_cost_log.txt"))
print("Previous cost log found.")
except:
val_cost_log = []
# single step
for step in range(MAX_STEPS):
if TRAIN_TWINS:
val, val_twins = create_twins(val)
train, train_twins = create_twins(train)
print("Voxelizing training data")
from voxelize import voxelize
train_vox, _ = voxelize(train,VOXEL_SIDE)
val_vox, _ = voxelize(val ,VOXEL_SIDE)
train_twins_vox, _ = voxelize(train_twins,VOXEL_SIDE)
val_twins_vox, _ = voxelize(val_twins ,VOXEL_SIDE)
del train_twins
# Validation
val_batchmaker = Batchmaker(val_vox, val_twins_vox, BATCH_SIZE, MP)
if np.mod(step, VAL_EVERY_N_STEPS) == 0:
total_val_cost = 0
while True:
if val_batchmaker.is_depleted():
break
else:
batch_input_values, batch_twin_values = val_batchmaker.next_batch()
cost_value = vae.cost_on_single_batch(batch_input_values, batch_twin_values)
total_val_cost += cost_value
if PLOTTING_SUPPORT:
progress_bar(val_batchmaker)
print("Validation cost: "+str(total_val_cost)+" (Training cost: "+str(total_step_cost)+")", end="")
try:
print(" Step Time: " + str(step_end-step_start))
if DETAILED_STEP_TIMES:
print(step_times)
except:
print(" ")
val_cost_log.append(total_val_cost)
# Training Monitor
if len(val_cost_log) > 1:
# Save cost log.
import os
if not os.path.exists(SAVE_DIR):
os.makedirs(SAVE_DIR)
if SAVE_UNVALIDATED: os.makedirs(SAVE_DIR_NOVAL)
print("Created directory: %s" % SAVE_DIR)
with open(SAVE_DIR+MP_FILENAME, 'wb') as file:
pickle.dump(MP, file, protocol=2)
np.savetxt(SAVE_DIR+"val_cost_log.txt", val_cost_log)
# Save if cost has improved. Otherwise increment counter.
if val_cost_log[-1] < min(val_cost_log[:-1]):
val_steps_since_last_improvement = 0
# save model to disk
print("Saving ... ", end='')
save_path = vae.saver.save(vae.sess, SAVE_PATH)
print("Model saved in file: %s" % save_path)
else:
val_steps_since_last_improvement += 1
# Stop training if val_cost hasn't improved in VAL_STEP_TOLERANCE steps
if val_steps_since_last_improvement > VAL_STEP_TOLERANCE:
if SAVE_UNVALIDATED:
print("Saving ... ", end='')
save_path = vae.saver.save(vae.sess, SAVE_PATH_NOVAL)
print("Unvalidated model saved in file: %s" % save_path)
print("Training stopped by validation monitor.")
break
# Train on batches
step_start = timer()
zero = timer() - timer()
step_times = {'batchmaking': zero, 'training': zero, 'plotting': zero}
total_step_cost = 0
training_batchmaker = Batchmaker(train_vox, train_twins_vox, BATCH_SIZE, MP)
while True:
if training_batchmaker.is_depleted():
break
else:
t_a = timer()
batch_input_values, batch_twin_values = training_batchmaker.next_batch()
t_b = timer()
# Train over 1 batch.
cost_value = vae.train_on_single_batch(batch_input_values, batch_twin_values, summary_writer=summary_writer)
total_step_cost += cost_value
t_c = timer()
if PLOTTING_SUPPORT:
progress_bar(training_batchmaker)
t_d = timer()
step_times['batchmaking'] += t_b - t_a
step_times['training'] += t_c - t_b
step_times['plotting'] += t_d - t_c
step_cost_log.append(total_step_cost)
step_end = timer()
print("Training ended.")
# ## Visualize Autoencoder Performance
# In[ ]:
if PLOTTING_SUPPORT:
# Plot a few random samples
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib notebook')
plt.ion()
n_samples = 5
import random
x_samples = random.sample(val_vox, 5)
x_samples = [np.reshape(sample, MP.INPUT_SHAPE) for sample in x_samples]
x_reconstruct = vae.encode_decode(x_samples)
plt.figure(figsize=(8, 12))
for i in range(n_samples):
plt.subplot(n_samples*2, 1, 2*i + 1)
plt.imshow(x_samples[i].reshape(VOXEL_SIDE, VOXEL_SIDE*VOXEL_SIDE), vmin=0, vmax=1, cmap='spectral')
plt.title("Top: val input - Bottom: Reconstruction")
plt.subplot(n_samples*2, 1, 2*i + 2)
plt.imshow(x_reconstruct[i].reshape(VOXEL_SIDE, VOXEL_SIDE*VOXEL_SIDE), vmin=0, vmax=1, cmap='spectral')
plt.tight_layout()
# In[ ]:
if PLOTTING_SUPPORT:
nx = ny = 4
nz = 1
dim1 = 0
dim2 = 1
dim3 = 0
x_values = np.linspace(-3, 3, nx)
y_values = np.linspace(-3, 3, ny)
z_values = np.linspace(-3, 3, nz)
canvas = np.empty((VOXEL_SIDE*ny, VOXEL_SIDE*nx, VOXEL_SIDE*nz))
for i, yi in enumerate(x_values):
for j, xi in enumerate(y_values):
for k, zi in enumerate(z_values):
# we can only visualize 3 dimensions, in this case the first 3
latent_sample = np.zeros([1]+MP.LATENT_SHAPE)
latent_sample.flat[dim1] = xi
latent_sample.flat[dim2] = yi
latent_sample.flat[dim3] = zi
x_mean = vae.decode(latent_sample)
canvas[(nx-i-1)*VOXEL_SIDE:(nx-i)*VOXEL_SIDE,
j*VOXEL_SIDE:(j+1)*VOXEL_SIDE,
k*VOXEL_SIDE:(k+1)*VOXEL_SIDE] \
= x_mean[0].reshape(VOXEL_SIDE, VOXEL_SIDE, VOXEL_SIDE)
from mpl_toolkits.mplot3d import Axes3D
threshold = 0.7
X,Y,Z = np.where(canvas > (threshold*np.max(canvas)))
fig = plt.figure()
plt.cla()
ax = Axes3D(fig)
ax.scatter(X, Y, Z)
# ## Compute Autoencoder Features for Segments
# In[ ]:
if not RUN_AS_PY_SCRIPT:
print("Voxelizing segments")
from voxelize import voxelize
segments_vox, features_voxel_scale = voxelize(segments, VOXEL_SIDE)
# In[ ]:
if not RUN_AS_PY_SCRIPT:
print("Computing Eigenvalue Features")
from eigenvalues import eigenvalue_features
features_eig = eigenvalue_features(segments)
features_eig[np.where(np.isnan(features_eig))] = 0
F = features_eig
C = classes
# In[ ]:
if not RUN_AS_PY_SCRIPT:
print("Computing Features for Segments")
features_nn, confusion_nn = vae.batch_encode([np.reshape(sample, MP.INPUT_SHAPE) for sample in segments_vox])
fnames_nn = ['autoencoder_feature'+str(i+1) for i, _ in enumerate(features_nn[0])]
F = features_nn
C = classes
# In[ ]:
if not RUN_AS_PY_SCRIPT:
print("Rotating segments")
from voxelize import create_rotations
rotated_segments, rotated_classes = create_rotations(segments, N_ROTATION_ANGLES, classes=classes)
if False: # walls_vs_cars
print("Removing unknowns")
rotated_segments = [segment for segment, class_ in zip(rotated_segments, rotated_classes) if class_ != "unknown"]
rotated_classes = [class_ for class_ in rotated_classes if class_ != "unknown"]
print("Voxelizing rotations")
from voxelize import voxelize
rotated_segments_vox, rotated_segments_scale = voxelize(rotated_segments, VOXEL_SIDE)
print("Computing Features for rotations")
rotated_features, _ = vae.batch_encode([np.reshape(sample, MP.INPUT_SHAPE) for sample in rotated_segments_vox])
F = rotated_features
C = rotated_classes
# ## T-SNE
# In[ ]:
if not RUN_AS_PY_SCRIPT:
print("T-SNE")
dir_ = "/tmp/online_matcher/visuals/"
import os
if not os.path.exists(dir_):
os.makedirs(dir_)
if MP.LATENT_SHAPE[0] == 2:
F2 = F
else:
from tools.tsne import tsne
F2 = tsne(F, err_threshold=1.0)
from itertools import cycle
cnames = ['dodgerblue', 'gold', 'silver', 'tomato',
'plum', 'lemonchiffon', 'grey', 'orchid', 'lime', 'palegreen']
from matplotlib import pyplot as plt
plt.figure(figsize=(12,7))
for c_, name in zip(cycle(cnames), classes_set):
x = [values[0] for values, class_ in zip(F2, C) if class_ == name]
y = [values[1] for values, class_ in zip(F2, C) if class_ == name]
plt.scatter(x, y, c=c_, alpha=0.8, lw = 0)
box = plt.gca().get_position()
plt.gca().set_position([box.x0, box.y0, box.width * 0.8, box.height])
ncol = 2 if len(classes_set) > 10 else 1
plt.legend(classes_set, loc='center left', bbox_to_anchor=(1, 0.5), ncol=ncol)
plt.title('T-SNE')
plt.xlabel('x_dim')
plt.ylabel('y_dim')
plt.show()
try:
plt.gcf().savefig(dir_+"t-sne.png")
except:
print("not saved.")
if len(matches) > 0:
print("Adding matches")
# Dim all points
plt.cla()
for c_, name in zip(cycle(cnames), classes_set):
x = [values[0] for values, class_ in zip(F2, C) if class_ == name]
y = [values[1] for values, class_ in zip(F2, C) if class_ == name]
plt.scatter(x, y, c=c_, alpha=0.2, lw = 0)
plt.legend(classes_set, loc='center left', bbox_to_anchor=(1, 0.5), ncol=ncol)
plt.title('T-SNE')
plt.xlabel('x_dim')
plt.ylabel('y_dim')
# Bring out matched points
matched_ids = [id_ for match in matches for id_ in match]
for c_, name in zip(cycle(cnames), classes_set):
x = [values[0] for values, class_, id_ in zip(F2, C, ids) if class_ == name and id_ in matched_ids]
y = [values[1] for values, class_, id_ in zip(F2, C, ids) if class_ == name and id_ in matched_ids]
plt.scatter(x, y, c=c_, s=30, lw = 1)
# Show matches as lines
for match in matches:
line_x = [ F2[ids.index(match[0])][0], F2[ids.index(match[1])][0] ]
line_y = [ F2[ids.index(match[0])][1], F2[ids.index(match[1])][1] ]
plt.plot(line_x, line_y, 'black', linewidth=1)
try:
plt.gcf().savefig(dir_+"t-sne_matches.png")
except:
print("not saved.")
# ## Reconstructions
# In[ ]:
RC_CONFIDENCE = 0.2
ONEVIEW = True
# In[ ]:
# Reconstructions
if not RUN_AS_PY_SCRIPT:
N = 400
SV_ = segments_vox[:N]
S_ = segments[:N]
I_ = ids[:N]
reconstruction_vox = vae.batch_encode_decode([np.reshape(sample, MP.INPUT_SHAPE) for sample in SV_])
reconstruction_vox = [np.reshape(vox, [VOXEL_SIDE, VOXEL_SIDE, VOXEL_SIDE]) for vox in reconstruction_vox]
from voxelize import unvoxelize
reconstruction = [unvoxelize(vox > RC_CONFIDENCE) for vox in reconstruction_vox]
reconstruction = [segment*scale for (segment, scale) in zip(reconstruction, features_voxel_scale)]
if CREATE_VISUALS:
dir_ = "/tmp/online_matcher/visuals/reconstructions/"
from visuals import visuals_of_matches
reconstruction_ids = [id_+max(I_)+1 for id_ in I_]
one_to_one_matches = [[id1, id2] for id1, id2 in zip(I_, reconstruction_ids)]
visuals_of_matches(one_to_one_matches, S_+reconstruction, I_+reconstruction_ids, directory=dir_, oneview=ONEVIEW)
clear_output()
# In[ ]:
# Reconstructions of rotations for one object
if CREATE_VISUALS:
dir_ = "/tmp/online_matcher/visuals/rotations/"
class_name = "car"
class_ids = [np.random.choice([id_ for id_, class_ in zip(ids, classes) if class_ == class_name])]
class_indices = [ids.index(id_) for id_ in class_ids]
class_segments = np.array(segments)[class_indices]
from voxelize import create_rotations
class_rotated_segments = np.array(list(class_segments) + list(create_rotations(class_segments, N_ROTATION_ANGLES)))
from voxelize import voxelize
class_segments_vox, class_voxel_scale = voxelize(class_rotated_segments, VOXEL_SIDE)
if CREATE_VISUALS:
class_reconstruction_vox = vae.batch_encode_decode([np.reshape(vox, MP.INPUT_SHAPE) for vox in class_segments_vox])
class_reconstruction_vox = [np.reshape(vox, [VOXEL_SIDE, VOXEL_SIDE, VOXEL_SIDE]) for vox in class_reconstruction_vox]
from voxelize import unvoxelize
class_reconstruction = [unvoxelize(vox > RC_CONFIDENCE) for vox in class_reconstruction_vox]
class_reconstruction = [segment*scale for (segment, scale) in zip(class_reconstruction, class_voxel_scale)]
from visuals import visuals_of_matches
fake_ids = list(range(len(class_reconstruction)))
fake_reconstruction_ids = [id_+max(fake_ids)+1 for id_ in fake_ids]
one_to_one_matches = [[id1, id2] for id1, id2 in zip(fake_ids, fake_reconstruction_ids)]
visuals_of_matches(one_to_one_matches,
list(class_rotated_segments)+class_reconstruction,
fake_ids+fake_reconstruction_ids,
directory=dir_, oneview=ONEVIEW)
clear_output()
class_features, confusion = vae.batch_encode([np.reshape(vox, MP.INPUT_SHAPE) for vox in class_segments_vox])
class_features = np.array(class_features)
print(class_name)
print("Id: "+str(class_ids[0]))
from matplotlib import pyplot as plt
plt.figure()
plt.step(range(len(class_features.T)), class_features.T, color='k', alpha=0.2, where='mid')
plt.plot(np.sqrt(np.exp(confusion)).T, 'r')
plt.show()
plt.gcf().savefig(dir_+"signature.png")
# In[ ]:
#Gifs
id_ = np.random.choice(ids)
print(id_)
segment = segments[ids.index(id_)]
import visuals
visuals.single_segment_as_gif(segment)
visuals.single_segment_reconstruction_as_gif(segment, vae, confidence=0.3)
visuals.single_segment_rotations_reconstruction_as_gif(segment, vae, confidence=0.3)
visuals.single_segment_degeneration_as_gif(segment, vae, confidence=0.3)
visuals.single_segment_confidence_as_gif(segment, vae)
# ## Class Signatures
# In[ ]:
if PLOTTING_SUPPORT:
dir_ = "/tmp/online_matcher/visuals/reconstructions/"
for class_name in classes_set:
print(class_name)
class_ids = [id_ for id_, class_ in zip(ids, classes) if class_ == class_name]
class_indices = [ids.index(id_) for id_ in class_ids]
class_segments = np.array(segments)[class_indices]
class_features = np.array(features_nn)[class_indices]
class_confusion = np.array(confusion_nn)[class_indices]
from matplotlib import pyplot as plt
plt.figure()
plt.step(range(len(class_features.T)), class_features.T, color='k', alpha=0.2, where='mid')
plt.plot(np.sqrt(np.exp(class_confusion)).T, 'r')
plt.show()
plt.gcf().savefig(dir_+class_name+"_signature.png")
# In[ ]:
if PLOTTING_SUPPORT:
# Include Rotated segments
for class_name in classes_set:
print(class_name)
class_ids = [id_ for id_, class_ in zip(ids, classes) if class_ == class_name]
class_indices = [ids.index(id_) for id_ in class_ids]
class_segments = np.array(segments)[class_indices]
from voxelize import create_rotations
class_rotated_segments = np.array(list(class_segments) + list(create_rotations(class_segments, N_ROTATION_ANGLES)))
from voxelize import voxelize
class_segments_vox, _ = voxelize(class_rotated_segments, VOXEL_SIDE)
class_features, confusion = vae.batch_encode([np.reshape(vox, MP.INPUT_SHAPE) for vox in class_segments_vox])
class_features = np.array(class_features)
from matplotlib import pyplot as plt
plt.figure()
plt.step(range(len(class_features.T)), class_features.T, color='k', alpha=0.2, where='mid')
plt.plot(np.sqrt(np.exp(confusion)).T, 'r')
plt.show()
plt.gcf().savefig(dir_+class_name+"_rotations_signature.png")
# In[ ]:
if PLOTTING_SUPPORT:
from itertools import cycle
colors = cycle(['dodgerblue', 'gold', 'silver', 'tomato'])
plt.figure()
plt.title("Average absolute value of features, per class")
plt.xlabel('feature #')
plt.ylabel('avg(abs(feature))')
for class_name, color_ in zip(classes_set, colors):
class_ids = [id_ for id_, class_ in zip(ids, classes) if class_ == class_name]
class_indices = [ids.index(id_) for id_ in class_ids]
class_features = np.array(features_nn)[class_indices]
plt.plot(np.mean(np.abs(class_features), axis=0), marker='_', color=color_, label=class_name)
plt.hlines(np.mean(np.abs(class_features)),0,len(class_features[0])-1, linestyle='--', color=color_)
plt.show()
plt.legend()
plt.figure()
plt.title("Average confusion, per class")
plt.xlabel('feature #')
plt.ylabel('sigma^2')
for class_name, color_ in zip(classes_set, colors):
class_ids = [id_ for id_, class_ in zip(ids, classes) if class_ == class_name]
class_indices = [ids.index(id_) for id_ in class_ids]
class_confusion = np.array(confusion_nn)[class_indices]
plt.plot(np.mean(np.exp(class_confusion), axis=0), marker='_', color=color_, label=class_name)
plt.hlines(np.mean(np.exp(class_confusion)),0,len(class_features[0])-1, linestyle='--', color=color_)
plt.show()
plt.legend()
print("")
# ## Export Features
# In[ ]:
def remove_features(fnames_to_remove, fnames, features):
# Remove the autencoder features from the imported features if they already exist
for fname_to_remove in fnames_to_remove:
if fname_to_remove in fnames:
print(" Removing pre-existing feature " + fname_to_remove)
for j, values in enumerate(features):
features[j] = np.delete(values, fnames.index(fname_to_remove))
fnames.remove(fname_to_remove)
assert len(fnames) == len(features[0])
def update_features(fnames_to_update, features_to_update, fnames, features):
assert len(fnames_to_update) == len(features_to_update[0])
# Remove the selected features if they already exist
remove_features(fnames_to_update, fnames, features)
# Add in the selected features
for fname in fnames_to_update: print(" Adding feature " + fname)
for i, [f, ftu] in enumerate(zip(features, features_to_update)):
features[i] = np.concatenate([f, ftu])
fnames += fnames_to_update
# Create copies of the original features
# In[ ]:
if EXPORT_FEATURES:
updated_fnames = fnames[:]
updated_features = features[:]
print(fnames)
print(features[0])
# Add/overwrite autoencoder features
# In[ ]:
if EXPORT_FEATURES:
# AE features
fnames_nn = ['autoencoder_feature'+str(i+1) for i in range(features_nn[0].shape[0])]
update_features(fnames_nn, features_nn, updated_fnames, updated_features)
# Scale features
sc_fnames = ['x_scale', 'y_scale', 'z_scale']
update_features(sc_fnames, features_voxel_scale, updated_fnames, updated_features)
# In[ ]:
if EXPORT_FEATURES:
from load_segments import write_features
write_features(ids, updated_features, updated_fnames, filename=runs[run_index][features_file_index])
# ## Evaluate Features
# In[ ]:
# Features
if CREATE_VISUALS:
from visuals import visuals_of_segments
visuals_of_segments(segments, ids, features=features_nn)
clear_output()
# In[ ]:
# Matches
if CREATE_VISUALS:
from visuals import visuals_of_matches
visuals_of_matches(matches, segments, ids, features=features_nn)
clear_output()
# ## Save or Convert Model
# In[ ]:
CONVERT_VARIABLE_NAMES = False
name_to_var_dict = {}
if CONVERT_VARIABLE_NAMES:
for var in vae.variables:
# Modify a few names
if 'LatentLayerWeights/' in var.name:
name = var.name.replace('LatentLayerWeights/', '')
elif 'ReconstructionLayerWeights/' in var.name:
name = var.name.replace('ReconstructionLayerWeights/', '')
# Leave other names unchanged
else:
name = var.name
name_to_var_dict[name] = var
temp_saver = tf.train.Saver(name_to_var_dict)
temp_saver.restore(vae.sess, SAVE_PATH)
name_to_var_dict
# In[ ]:
# Save model and params
if False:
vae.saver.save(vae.sess, SAVE_PATH)
with open(SAVE_DIR+MP_FILENAME, 'wb') as file:
pickle.dump(MP, file, protocol=2)
# In[ ]:
| 2.34375 | 2 |
softqlearning/softqlearning/replay_buffers/simple_replay_buffer.py | Kiwoo/HVHRL | 0 | 12760359 | import numpy as np
from rllab.core.serializable import Serializable
from .replay_buffer import ReplayBuffer
class SimpleReplayBuffer(ReplayBuffer, Serializable):
def __init__(self, env_spec, max_replay_buffer_size):
super(SimpleReplayBuffer, self).__init__()
Serializable.quick_init(self, locals())
max_replay_buffer_size = int(max_replay_buffer_size)
self._env_spec = env_spec
self._observation_dim = env_spec.observation_space.flat_dim
self._action_dim = env_spec.action_space.flat_dim
self._max_buffer_size = max_replay_buffer_size
self._observations = np.zeros((max_replay_buffer_size,
self._observation_dim))
# It's a bit memory inefficient to save the observations twice,
# but it makes the code *much* easier since you no longer have to
# worry about termination conditions.
self._next_obs = np.zeros((max_replay_buffer_size,
self._observation_dim))
self._actions = np.zeros((max_replay_buffer_size, self._action_dim))
self._rewards = np.zeros(max_replay_buffer_size)
# self._terminals[i] = a terminal was received at time i
self._terminals = np.zeros(max_replay_buffer_size, dtype='uint8')
self._top = 0
self._size = 0
def add_sample(self, observation, action, reward, terminal,
next_observation, **kwargs):
self._observations[self._top] = observation
self._actions[self._top] = action
self._rewards[self._top] = reward
self._terminals[self._top] = terminal
self._next_obs[self._top] = next_observation
self._advance()
def terminate_episode(self):
pass
def _advance(self):
self._top = (self._top + 1) % self._max_buffer_size
if self._size < self._max_buffer_size:
self._size += 1
def random_batch(self, batch_size):
indices = np.random.randint(0, self._size, batch_size)
return {
'observations': self._observations[indices],
'actions': self._actions[indices],
'rewards': self._rewards[indices],
'terminals': self._terminals[indices],
'next_observations': self._next_obs[indices]
}
@property
def size(self):
return self._size
def __getstate__(self):
buffer_state = super(SimpleReplayBuffer, self).__getstate__()
buffer_state.update({
'observations': self._observations.tobytes(),
'actions': self._actions.tobytes(),
'rewards': self._rewards.tobytes(),
'terminals': self._terminals.tobytes(),
'next_observations': self._next_obs.tobytes(),
'top': self._top,
'size': self._size,
})
return buffer_state
def __setstate__(self, buffer_state):
super(SimpleReplayBuffer, self).__setstate__(buffer_state)
flat_obs = np.fromstring(buffer_state['observations'])
flat_next_obs = np.fromstring(buffer_state['next_observations'])
flat_actions = np.fromstring(buffer_state['actions'])
flat_reward = np.fromstring(buffer_state['rewards'])
flat_terminals = np.fromstring(
buffer_state['terminals'], dtype=np.uint8)
self._observations = flat_obs.reshape(self._max_buffer_size, -1)
self._next_obs = flat_next_obs.reshape(self._max_buffer_size, -1)
self._actions = flat_actions.reshape(self._max_buffer_size, -1)
self._rewards = flat_reward.reshape(self._max_buffer_size)
self._terminals = flat_terminals.reshape(self._max_buffer_size)
self._top = buffer_state['top']
self._size = buffer_state['size']
| 2.21875 | 2 |
taggregator/tagg.py | jamtartley/taggregator | 0 | 12760360 | #! /usr/bin/env python3
#! -*- coding: utf-8 -*-
from taggregator import printer
from pathlib import Path
import itertools
import os
import re
import sys
class Match:
NO_PRIORITY = -1
def __init__(self, file_name, line_number, line, tag, priority):
self.file_name = file_name
self.line_number = str(line_number)
self.line = line
self.tag = tag
self.priority = priority
def __str__(self):
return self.file_name
def __eq__(self, other):
return self.file_name == other.file_name and self.line_number == other.line_number and self.tag == other.tag
def get_piped_list(items):
return "|".join(items)
def get_tag_regex(tag_marker, tags, priority_regex):
"""
Get compiled regex for matching tags by:
-> match tag_marker + tag_string as group
-> match priority as group
"""
# @BUG(LOW) Slightly weird matching property
# Because we have decided that priorities can be optional, we allow zero parentheses around
# the priority regex. This has the interesting property that the line below would be marked
# as high priority even though the user might not want it to be:
# @FEATURE High priority test
# Not really sure if this is undesired behaviour or not.
regex_string = tag_marker + \
"(" + get_piped_list(tags) + ")" + r"\s*\(*" + priority_regex + r"\)*"
# Return regex which will match (for example): @HACK|SPEED|FEATURE(LOW|MEDIUM)
# with the priority being an optional match
return re.compile(regex_string, re.IGNORECASE)
def get_priority_regex(priorities):
return r"\s*(" + get_piped_list(priorities) + r")?\s*"
def find_matches(tag_regex, tags, file_name, priority_value_map):
if os.path.isdir(file_name):
return
# @SPEED(HIGH) File opening/reading
# Profiling shows that this is the greatest bottleneck in the app
# at the minute, experiments with multiprocessing only slowed it down
# because it is IO bound work
with open(file_name) as f:
# Read whole file into one buffer and see if any of the tags
# match against it so we dont need to do the expensive regex
# findall on every line individually unless we find a whole match
try:
file_contents = f.read()
except UnicodeDecodeError:
# Ignore non utf-8 files
return
lower_contents = file_contents.lower()
lower_tags = [t.lower() for t in tags]
if any(t in lower_contents for t in lower_tags):
# @BUG(HIGH) Throws OSError on some files if in use
# Can't repro on *nix but happens on Cygwin if the file is in use
for number, line in enumerate(file_contents.split('\n'), 1):
# @SPEED(MEDIUM) Regex search of processed line
matches = tag_regex.findall(line)
for match in matches:
tag = match[0].upper()
priority = match[1]
priority_idx = priority_value_map.get(
priority.upper(), Match.NO_PRIORITY)
truncated_line = printer.get_truncated_text(
line.strip(), 100)
yield Match(file_name, number, truncated_line, tag, priority_idx)
def get_priority_value_map(all_priorities):
"""
Maps an index of increasing size to each priority ranging from low -> high
e.g. given ['LOW', 'MEDIUM', 'HIGH'] will return {'LOW': 0, 'MEDIUM': 1, 'HIGH': 2}
"""
return dict((priority_text.upper(), priority_index)
for priority_index, priority_text in enumerate(all_priorities))
def run(config_map):
tag_marker = re.escape(config_map["tag_marker"])
extensions = config_map["extensions"]
priorities = config_map["priorities"]
tags = config_map["tags"]
priority_value_map = get_priority_value_map(priorities)
value_priority_map = dict(reversed(item)
for item in priority_value_map.items())
priority_regex = get_priority_regex(priorities)
tag_regex = get_tag_regex(tag_marker, tags, priority_regex)
exclude = [os.path.join(os.getcwd(), d) for d in config_map["exclude"]]
can_search_any_extension = "*" in extensions
files = []
for root, dirs, files_in_dir in os.walk(config_map["root"]):
for file_name in files_in_dir:
file_path = os.path.join(root, file_name)
# We only want to search for tags in files which have one of the correct
# extensions (or user has chosen to include every extension with '*')
# and are not inside one of the excluded folders.
if can_search_any_extension or any(
file_path.endswith(ext) for ext in extensions):
if not any(file_path.startswith(e) for e in exclude):
files.append(file_path)
matches = []
for file_name in files:
for match in find_matches(
tag_regex,
tags,
file_name,
priority_value_map):
# Equality check is handled by the overridden __eq__ in the Match
# class
if not any(match == m for m in matches):
matches.append(match)
printer.print_matches(matches, tag_marker, priority_value_map)
| 3.265625 | 3 |
src/icemac/addressbook/browser/entities/fields.py | icemac/icemac.addressbook | 1 | 12760361 | <filename>src/icemac/addressbook/browser/entities/fields.py<gh_stars>1-10
# -*- coding: utf-8 -*-
from icemac.addressbook.i18n import _
from icemac.addressbook.interfaces import IMayHaveCustomizedPredfinedFields
from six.moves.urllib_parse import urlsplit
import grokcore.component as grok
import icemac.addressbook.browser.breadcrumb
import icemac.addressbook.browser.interfaces
import icemac.addressbook.browser.metadata
import icemac.addressbook.entities
import icemac.addressbook.interfaces
import icemac.addressbook.utils
import z3c.form.group
import z3c.form.interfaces
import z3c.form.widget
import z3c.formui.form
import zope.app.publication.traversers
import zope.component
import zope.i18n
import zope.interface
import zope.location
import zope.proxy
import zope.publisher.interfaces
import zope.publisher.interfaces.http
import zope.schema
import zope.schema.interfaces
import zope.security.proxy
import zope.traversing.browser
class FieldBreadCrumb(
icemac.addressbook.browser.breadcrumb.Breadcrumb):
"""Breadcrumb for a user defined Field."""
grok.adapts(
icemac.addressbook.interfaces.IField,
icemac.addressbook.browser.interfaces.IAddressBookLayer)
@property
def parent(self):
return icemac.addressbook.interfaces.IEntity(self.context)
class IProxiedField(zope.interface.Interface):
"""Wrapped zope.schema field used for renaming its title."""
title = zope.schema.TextLine(
title=_('title'),
description=_(
'Delete the value and submit the form to reset to the default'
' value.'),
required=False)
description = zope.schema.TextLine(
title=_('description'),
description=_(
'Delete the value and submit the form to reset to the default'
' value.'),
required=False)
@zope.interface.implementer(IProxiedField)
class ProxiedField(object):
"""Wrapper for a zope.schema field to allow access to the title attrib.
This wrapper is located, it has:
* __parent__ ... entity, the field belongs to
* __name__ ... name of the field in the interface.
"""
def __init__(self, field):
pass
@property
def title(self):
return icemac.addressbook.utils.translate(
self._field_customization.query_value(self._field, 'label'),
zope.globalrequest.getRequest())
@title.setter
def title(self, value):
return self._field_customization.set_value(
self._field, u'label', value)
@property
def description(self):
return icemac.addressbook.utils.translate(
self._field_customization.query_value(self._field, 'description'),
zope.globalrequest.getRequest())
@description.setter
def description(self, value):
return self._field_customization.set_value(
self._field, u'description', value)
@property
def _field_customization(self):
address_book = icemac.addressbook.interfaces.IAddressBook(
self.__parent__)
return icemac.addressbook.interfaces.IFieldCustomization(address_book)
@property
def _field(self):
return self.__parent__.interface[self.__name__]
class ProxiedFieldBreadCrumb(icemac.addressbook.browser.breadcrumb.Breadcrumb):
"""Breadcrumb for a proxied pre-defined field."""
grok.adapts(IProxiedField,
icemac.addressbook.browser.interfaces.IAddressBookLayer)
@property
def title(self):
return self.context.title
@zope.component.adapter(
icemac.addressbook.interfaces.IEntity,
zope.publisher.interfaces.http.IHTTPRequest)
@zope.interface.implementer_only(zope.publisher.interfaces.IPublishTraverse)
class FieldsTraverser(
zope.app.publication.traversers.SimpleComponentTraverser):
"""Make fields traversable."""
def publishTraverse(self, request, name):
entities = zope.component.queryUtility(
icemac.addressbook.interfaces.IEntities)
try:
return entities[name]
except KeyError:
try:
field = self.context.interface[name]
except KeyError:
return super(FieldsTraverser, self).publishTraverse(
request, name)
else:
proxy = ProxiedField(field)
zope.location.locate(proxy, self.context, name)
return proxy
def get_field_URL(entity, field, request, view=None):
"""Compute the URL to access a field."""
entities = zope.component.getUtility(
icemac.addressbook.interfaces.IEntities)
url_parts = [zope.traversing.browser.absoluteURL(entities, request),
entity.__name__,
field.__name__]
if view is not None:
url_parts.append('@@%s' % view)
return '/'.join(url_parts)
class MetadataForm(z3c.form.group.GroupForm, z3c.formui.form.Form):
"""Form to only render metadata."""
id = 'standalone-metadata-form'
groups = (icemac.addressbook.browser.metadata.MetadataGroup,)
class List(icemac.addressbook.browser.base.FlashView):
"""List fields of an entity."""
title = _('Edit fields')
def _values(self):
# zope.schema fields are no content classes, so they have no
# permissions defined
return [zope.security.proxy.getObject(field)
for name, field in self.context.getRawFields()]
def fields(self):
is_customized = IMayHaveCustomizedPredfinedFields.implementedBy(
self.context.getClass())
address_book = icemac.addressbook.interfaces.IAddressBook(None)
customization = icemac.addressbook.interfaces.IFieldCustomization(
address_book)
field_types = icemac.addressbook.sources.FieldTypeSource().factory
for field in self._values():
omit = False
title = field.title
if icemac.addressbook.interfaces.IField.providedBy(field):
field_type = field_types.getTitle(field.type)
url = get_field_URL(self.context, field, self.request)
delete_url = get_field_URL(
self.context, field, self.request, 'delete.html')
else:
delete_url = None
try:
field_type = field_types.getTitle(field.__class__.__name__)
except KeyError:
field_type = ''
omit = field.queryTaggedValue('omit-from-field-list', False)
if is_customized:
url = get_field_URL(self.context, field, self.request)
title = customization.query_value(field, u'label')
else:
url = None
if not omit:
yield {'title': title,
'type': field_type,
'delete-link': delete_url,
'edit-link': url,
'id': field.__name__}
def metadata(self):
# Entities are not persisitent. Because the sort order of the fields
# is shown in the list, we show the metadata of of this sort order:
os = zope.component.getUtility(
icemac.addressbook.interfaces.IOrderStorage)
try:
context = os.byNamespace(self.context.order_storage_namespace)
except KeyError:
# Sort order not yet changed, so we have no metadata:
return ''
form = MetadataForm(context, self.request)
# We cannot use form() here as this renders the layout template,
# too, which is not needed here:
form.update()
return form.render()
class SaveSortorder(icemac.addressbook.browser.base.BaseView):
"""Save the field sort order as defined by user."""
def __call__(self, f):
self.context.setFieldOrder(f)
self.send_flash(_('Saved sortorder.'))
self.request.response.redirect(self.url(self.context))
class AddForm(icemac.addressbook.browser.base.BaseAddForm):
"""Add a new user defined field to an entity."""
title = _(u'Add new field')
class_ = icemac.addressbook.entities.Field
interface = icemac.addressbook.interfaces.IField
next_url = 'parent'
def add(self, obj):
self._name = zope.security.proxy.getObject(self.context).addField(obj)
class BaseForm(object):
"""Mix-in class redirecting back to the entity."""
def redirect_to_next_url(self, *args):
# redirect to the entity
self.request.response.redirect(self.request.getURL(2))
class EditForm(BaseForm, icemac.addressbook.browser.base.GroupEditForm):
"""Edit a user defined field on an entity."""
title = _(u'Edit field')
interface = icemac.addressbook.interfaces.IField
groups = (icemac.addressbook.browser.metadata.MetadataGroup,)
class DeleteForm(BaseForm, icemac.addressbook.browser.base.BaseDeleteForm):
"""Delete a user defined field from an entity."""
title = _('Delete field')
label = _(
'Caution: When you delete this field, possibly data will get lost. '
'Namely the data which was entered into this field when it was '
'displayed in a form of an object.')
interface = icemac.addressbook.interfaces.IField
field_names = ('type', 'title', 'notes')
def _do_delete(self):
# We need the name of the entity from the url here to
# unregister the adapter.
path = urlsplit(self.request.getURL()).path
entity_name = path.split('/')[-3]
entity = zope.component.getUtility(
icemac.addressbook.interfaces.IEntity, name=entity_name)
# XXX Without the following line removing the interface from the field
# (``field.interface = None`` in ``removeField``) fails with a
# ForbiddenAttribute error:
field = zope.proxy.removeAllProxies(self.context)
entity.removeField(field)
# We have no need for a super call here as `removeField()` already
# did the job.
return True
def get_field_customization(type, name):
"""Get `z3c.form:IValue` adapter for the customization of `type`.
type ... `label` or `description`
name ... name for the adapter
"""
@zope.component.adapter(
zope.interface.Interface,
zope.interface.Interface,
zope.interface.Interface,
zope.schema.interfaces.IField,
zope.interface.Interface)
@zope.interface.named(name)
@zope.interface.implementer(z3c.form.interfaces.IValue)
class FieldCustomization(object):
"""Get a possibly custom value for a schema field.
There are three possible return values depending on the level of
customization:
1) user customized value via IFieldCustomization
2) application customized value via WidgetAttribute with name
"custom-label"
3) default value set on the schema field itself
If no truthy value can be computed `None` is returned from the factory
aka `could not adapt`. This is necessary because ``z3c.form`` expects
an actual value if an adapter is found. The case that the value might
be ``None`` is not handled very well, thus we are not able to use
a ComputedWidgetAttribute.
"""
def __new__(cls, context, request, view, field, widget):
custom_value = cls._get_field_customization(
context, request, field, type)
if not custom_value:
return None
instance = super(FieldCustomization, cls).__new__(cls)
instance.custom_value = custom_value
return instance
def __init__(self, context, request, view, field, widget):
self.request = request
def get(self):
return self.custom_value
@staticmethod
def _get_field_customization(context, request, field, type):
# ``cust_context`` is either an address book or a root folder:
cust_context = icemac.addressbook.interfaces.IAddressBook(None)
customization = icemac.addressbook.interfaces.IFieldCustomization(
cust_context)
try:
return customization.get_value(field, type)
except KeyError:
application_default_value = zope.component.queryMultiAdapter(
(context, None, None, field, None),
name="custom-{}".format(type))
if application_default_value is None:
value = customization.default_value(field, type)
if value:
value = zope.i18n.translate(value, context=request)
value = value.replace('\r', ' ').replace('\n', ' ')
return value
else:
return application_default_value.get()
return FieldCustomization
custom_field_label = get_field_customization('label', 'label')
custom_field_hint = get_field_customization('description', 'title')
@grok.adapter(
zope.interface.Interface,
zope.interface.Interface,
zope.interface.Interface,
z3c.form.interfaces.IButton,
zope.interface.Interface,
name="title")
@grok.implementer(z3c.form.interfaces.IValue)
def restore_button_title(*args):
"""Do not use custom button titles.
For buttons the `title` adapter is used for the label, we do not to want
to change the title of buttons but keep the original value.
"""
return None
class RenameForm(BaseForm, icemac.addressbook.browser.base.BaseEditForm):
"""Rename the title of a pre-defined field on an entity."""
title = _(u'Rename field')
interface = IProxiedField
| 1.734375 | 2 |
vkikriging/kriging_v3.py | rdwight/vkikriging | 3 | 12760362 | <reponame>rdwight/vkikriging
"""
Universal Kriging - version 3 (`kriging_v3`)
============================================
Universal Kriging in d-dimensions. This differs from `kriging_v1` and `kriging_v2`
which implement only simple Kriging.
"""
import numpy as np
from .mylib import Timing
from .covariance import covariance_squaredexponential, covariance_squaredexponential_dxi, covariance_squaredexponential_dxidxi
def F_linear(xi):
"""
Basis functions for parameterization of non-stationary mean. This version of the
function implements a linear basis.
Args:
xi (ndarray): Coordinates of points in parameter space, shape `(n, d)`
Return:
out (ndarray): Matrix F shape `(n, M)`, where `M` is the number of basis functions.
"""
n, d = xi.shape
return np.hstack((np.ones((n, 1)), xi))
def dF_linear(xi):
"""
Derivatives of basis functions defined in F_linear(). (Would be) needed for
non-stationary mean with GEK.
Args:
xi (ndarray): Coordinates of points in parameter space, shape `(n, d)`
Return:
out (ndarray): Tensor of derivatives, shape `(n, M, d)`.
"""
n, d = xi.shape
M = d + 1 # Must be equal to M = F_linear(xi).shape[1]
out = np.zeros((n, M, d))
for i in range(n):
out[i, 1:, :] = np.identity(d)
return out
def kriging(xi, x, observed, sigma_y, F_mean, sd_x, gamma):
"""
Function kriging_v1.kriging() modified for universal Kriging (spatially variable
mean based on general regression). This is achived by introducing a function-basis
F (e.g. `F_linear()`) for representing the *variable* mean, and new unknown vector
\lambda. The mean is then \lambda . F, and the unknown vector x is augmented:
x_a = [x, \lambda],
given which the new observation operator is:
H_a = [H, F].
The prior mean (of the Gaussian process) is now always zero, instead of specifying
the mean `mu_x`, the function-basis must be specified in the argument `F_mean`.
Args:
xi (ndarray): Sample locations (both observations and predictions), shape `(n,d)`
x (ndarray): Sample values (values not at observation locations are not used).
Shape `n`.
observed (ndarray): Bool array specifying which values are observed. Shape `n`,
`True` - observed, `False` - not observed.
sigma_y (float): Standard-deviation of observation error. Scalar.
F_mean (function): A function in the template of F_linear(), providing a basis for
the description of the non-stationary mean (in d-dimensions).
sd_x (float): (Sample) standard-deviation of the approximated function,
used in the prior. Scalars.
gamma (float): Correlation coefficient in all directions. Scalar.
Return:
out (dict): Dictionary of prior and posterior statistics.
"""
### Determine problem dimensions from input.
n, d = xi.shape #
H = np.identity(n)[observed] # Observation operator
y = np.dot(H, x) # Observations
m = y.size # Number of observations
F = F_mean(xi) # Basis for non-stationary mean
Fy = F[observed] # Restricted to observation locations
M = F.shape[1] # Size of basis
Ha = np.hstack((H, Fy)) # Augmented observation operator
### Observation error covar matrix
R = np.diag(np.ones(m) * max(sigma_y, 1.e-4) ** 2)
### Prior mean and covariance at the sample locations. Augmented
### with priors of coefficients (TODO: atm normal dist with large
### std, should be non-informative).
t = Timing()
mua_prior = np.zeros(n + M)
Pa = np.zeros((n + M, n + M))
Pa[:n, :n] = sd_x ** 2 * covariance_squaredexponential(xi, xi, gamma)
Pa[n:, n:] = 1.e6 * np.identity(M) # Prior on mean coefficients
t.monitor('Build prior covariance')
### The gain matrix.
Aa = R + np.dot(Ha, np.dot(Pa, Ha.T))
Ka = np.dot(Pa, np.dot(Ha.T, np.linalg.inv(Aa)))
t.monitor('Invert K')
### Posterior mean and covariance (prediction):
# E(x|y) ("predictor")
muahat = mua_prior + np.dot(Ka, y - np.dot(Ha, mua_prior))
muhat = np.dot(F, muahat[n:]) + muahat[:n]
t.monitor('Evaluate posterior mean')
# Cov(x|y) ("mean-squared error estimator")
covahat = np.dot(np.identity(n + M) - np.dot(Ka, Ha), Pa)
covPhat, covFhat = covahat[:n, :n], covahat[n:, n:]
# covhat = np.dot(F, np.dot(covFhat, F.T)) + covPhat
covhat = covPhat
t.monitor('Evaluate posterior covariance')
### Return all this statistical information.
return {
'mua_prior': mua_prior,
'cov_prior': Pa, # Prior (augmented)
'muahat': muahat,
'covahat': covahat, # Posterior (augmented)
'muhat': muhat,
'Sigmahat': covhat, # Posterior
}
| 2.6875 | 3 |
ner/train_utils.py | bestasoff/adynorm | 1 | 12760363 | <gh_stars>1-10
import logging
import pickle
from collections import defaultdict
from tqdm import tqdm
from .utils import clear_cuda_cache
import torch
import numpy as np
from .ner import get_metrics
logger = logging.getLogger(__name__)
def train(model, optimizer, loader, accum_steps, device):
model.train()
losses_tr = [0]
for i, batch in tqdm(enumerate(loader), total=len(loader)):
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
labels = batch['labels'].to(device)
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
loss = outputs.loss / accum_steps
losses_tr[-1] += loss.item()
loss.backward()
if (i + 1) % accum_steps == 0:
optimizer.step()
losses_tr.append(0)
optimizer.zero_grad()
clear_cuda_cache()
return model, optimizer, np.mean(losses_tr)
def val(model, loader, dataset, tokenizer, id2label, device):
model.eval()
losses_val = []
n = len(loader)
with torch.no_grad():
for batch in tqdm(loader, total=n):
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
labels = batch['labels'].to(device)
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
loss = outputs.loss
losses_val.append(loss.item())
metrics = get_metrics(model, tokenizer, device, loader, dataset, id2label)
return np.mean(losses_val), metrics
def learning_loop(
model, num_epochs, optimizer,
scheduler, train_dataloader, val_dataloader,
val_dataset, tokenizer, accum_steps, id2label,
device, save_model_steps=10, save_losses: str = None, save_metrics: str = None):
losses = {'train': [], 'val': []}
val_metrics = {'precision': [], 'recall': [], 'f1': []}
logger.info(f"*** Learning loop ***")
for epoch in range(1, num_epochs + 1):
clear_cuda_cache()
logger.info(f"*** Train epoch #{epoch} started ***")
model, optimizer, loss = train(model, optimizer, train_dataloader, accum_steps, device)
losses['train'].append(loss)
logger.info(f"*** Train epoch #{epoch} loss *** = {loss}")
# if scheduler:
# scheduler.step()
if val_dataloader is not None:
logger.info(f"*** Validation epoch #{epoch} started ***")
loss, metrics = val(model, val_dataloader, val_dataset, tokenizer, id2label, device)
for k, i in metrics.items():
val_metrics[k].append(i)
losses['val'].append(loss)
logger.info(
f"*** Validation epoch #{epoch} results ***\nloss = {loss},\nprecision = {metrics['precision']},\nrecall = {metrics['recall']},\nf1 = {metrics['f1']}")
if (epoch + 1) % save_model_steps == 0:
torch.save(model.state_dict(), f'ner/trained_ner_models/model_{epoch}.ct')
torch.save(optimizer.state_dict(), f'ner/trained_ner_models/optimizer_{epoch}.ct')
if len(losses['train']) > 2 and abs(losses['train'][-1] - losses['train'][-2]) < 1e-5:
break
torch.save(model.state_dict(), f'ner/trained_ner_models/model_{num_epochs}.ct')
torch.save(optimizer.state_dict(), f'ner/trained_ner_models/optimizer_{num_epochs}.ct')
if save_losses is not None:
with open(save_losses, 'wb') as file:
pickle.dump(losses, file)
if save_metrics is not None:
with open(save_metrics, 'wb') as file:
pickle.dump(val_metrics, file)
return model, optimizer, losses
| 2.09375 | 2 |
tests/integrationtest/api/conftest.py | RasmusGodske/eo-platform-utils | 0 | 12760364 | """
conftest.py according to pytest docs:
https://docs.pytest.org/en/2.7.3/plugins.html?highlight=re#conftest-py-plugins
"""
import pytest
from datetime import datetime, timedelta, timezone
from origin.api import Application
from origin.models.auth import InternalToken
from origin.tokens import TokenEncoder
@pytest.fixture(scope='function')
def app(secret: str):
"""
TODO
"""
yield Application.create(
name='Test API',
secret=secret,
health_check_path='/health',
)
@pytest.fixture(scope='function')
def client(app: Application):
"""
TODO
"""
yield app.test_client
@pytest.fixture(scope='function')
def secret():
"""
TODO
"""
yield 'something secret'
@pytest.fixture(scope='function')
def token_encoder(secret: str):
"""
TODO
"""
yield TokenEncoder(
schema=InternalToken,
secret=secret,
)
@pytest.fixture(scope='function')
def valid_token():
"""
TODO
"""
yield InternalToken(
issued=datetime.now(tz=timezone.utc),
expires=datetime.now(tz=timezone.utc) + timedelta(days=1),
actor='foo',
subject='bar',
scope=['scope1', 'scope2'],
)
@pytest.fixture(scope='function')
def valid_token_encoded(
valid_token: InternalToken,
token_encoder: TokenEncoder[InternalToken],
):
"""
TODO
"""
yield token_encoder.encode(valid_token)
| 2.21875 | 2 |
ccal/unmount_volume.py | kberkey/ccal | 0 | 12760365 | <reponame>kberkey/ccal
from .run_command import run_command
def unmount_volume(volume_name_or_mount_directory_path):
run_command("sudo umount {}".format(volume_name_or_mount_directory_path))
| 1.945313 | 2 |
hostha/common/utils/patches.py | openeuler-mirror/hostha | 0 | 12760366 | <reponame>openeuler-mirror/hostha<gh_stars>0
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import eventlet
EVENTLET_MONKEY_PATCH_MODULES = dict(os=True,
select=True,
socket=True,
thread=True,
time=True)
def patch_all():
"""Apply all patches.
List of patches:
* eventlet's monkey patch for all cases;
* minidom's writexml patch for py < 2.7.3 only.
"""
eventlet_monkey_patch()
patch_minidom_writexml()
def eventlet_monkey_patch():
"""Apply eventlet's monkey patch.
This call should be the first call in application. It's safe to call
monkey_patch multiple times.
"""
eventlet.monkey_patch(**EVENTLET_MONKEY_PATCH_MODULES)
def eventlet_import_monkey_patched(module):
"""Returns module monkey patched by eventlet.
It's needed for some tests, for example, context test.
"""
return eventlet.import_patched(module, **EVENTLET_MONKEY_PATCH_MODULES)
def patch_minidom_writexml():
"""Patch for xml.dom.minidom toprettyxml bug with whitespaces around text
We apply the patch to avoid excess whitespaces in generated xml
configuration files that brakes Hadoop.
(This patch will be applied for all Python versions < 2.7.3)
Issue: http://bugs.python.org/issue4147
Patch: http://hg.python.org/cpython/rev/cb6614e3438b/
Description: http://ronrothman.com/public/leftbraned/xml-dom-minidom-\
toprettyxml-and-silly-whitespace/#best-solution
"""
import sys
if sys.version_info >= (2, 7, 3):
return
import xml.dom.minidom as md
def element_writexml(self, writer, indent="", addindent="", newl=""):
# indent = current indentation
# addindent = indentation to add to higher levels
# newl = newline string
writer.write(indent + "<" + self.tagName)
attrs = self._get_attributes()
a_names = list(attrs.keys())
a_names.sort()
for a_name in a_names:
writer.write(" %s=\"" % a_name)
md._write_data(writer, attrs[a_name].value)
writer.write("\"")
if self.childNodes:
writer.write(">")
if (len(self.childNodes) == 1
and self.childNodes[0].nodeType == md.Node.TEXT_NODE):
self.childNodes[0].writexml(writer, '', '', '')
else:
writer.write(newl)
for node in self.childNodes:
node.writexml(writer, indent + addindent, addindent, newl)
writer.write(indent)
writer.write("</%s>%s" % (self.tagName, newl))
else:
writer.write("/>%s" % (newl))
md.Element.writexml = element_writexml
def text_writexml(self, writer, indent="", addindent="", newl=""):
md._write_data(writer, "%s%s%s" % (indent, self.data, newl))
md.Text.writexml = text_writexml
| 1.773438 | 2 |
setup.py | theelous3/sansio-multipart-parser | 4 | 12760367 | import sys
import os.path
from setuptools import setup
from sansio_multipart import __version__, __author__, __doc__
setup(
name="sansio_multipart",
version=__version__,
description="Parser for multipart/form-data.",
long_description=__doc__,
author=__author__,
author_email="<EMAIL>",
url="https://github.com/theelous3/sansio-multipart-parser",
packages=["sansio_multipart"],
license="MIT",
platforms="any",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries",
"Topic :: Internet :: WWW/HTTP :: HTTP Servers",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Server",
"Programming Language :: Python :: 3",
],
)
| 1.234375 | 1 |
kitchen/demo.py | Renha/kitchen | 0 | 12760368 | <gh_stars>0
from __future__ import annotations
from .config import KitchenConfig
from .kitchen import Kitchen, KitchenReport
from subprocess import Popen, PIPE
from time import sleep
if __name__ == "__main__":
redis_host = "localhost"
redis_port = 7777
run_time = 80
# Quick and dirty way to launch redis-server, just enough for a simple demo:
redis_launched = False
redis_server = Popen(["redis-server", "--port", str(redis_port)], stdout=PIPE)
try:
if not redis_server.stdout is None:
for line in redis_server.stdout:
if "Ready to accept connections" in line.decode(encoding="utf8"):
redis_launched = True
break
if redis_launched:
print(f"Redis server started, host = {redis_host}, port = {redis_port}")
config = KitchenConfig()
robots_reliability = {
"sauce": 0.97,
"cheese": 0.95,
"slice": 0.87,
"pack": 0.90,
"to_oven": 0.99,
"from_oven": 0.99,
}
robots_seconds_per_action: dict[str, float] = {
"take": 1,
"sauce": 2,
"cheese": 3,
"to_oven": 1,
"bake": 10,
"from_oven": 1,
"slice": 2,
"pack": 3,
"put": 1,
}
robots_time_variations: tuple[float, float] = (0.9, 1.2)
manager_commands = [
("order", 2),
("sleep", 10),
("order", 5),
]
kitchen = Kitchen(
config,
robots_reliability,
robots_seconds_per_action,
robots_time_variations,
manager_commands,
)
try:
kitchen(name="kitchen init", redis_host=redis_host, redis_port=redis_port)
sleep(run_time)
finally:
kitchen.shutdown()
report_builder = KitchenReport()
report_builder("report", redis_host, redis_port)
# Could be retrieved from report, but that would include orders manager
# didn't have time to place:
total_orders_amount = sum((v if c == "order" else 0 for c, v in manager_commands))
if total_orders_amount:
print(f"Total orders amount: {total_orders_amount}. Report:")
print(repr(report_builder))
else:
print(f"No orders, nothing to report")
finally:
redis_server.kill()
| 2.453125 | 2 |
txsocks/__init__.py | infin8/txsocks | 0 | 12760369 | #
from socks5 import ClientFactory
| 1.070313 | 1 |
grapl_analyzerlib/schemas/ip_connection_schema.py | wittekm/grapl_analyzerlib | 3 | 12760370 | from grapl_analyzerlib.schemas.schema_builder import NodeSchema, ManyToOne
class IpConnectionSchema(NodeSchema):
def __init__(self) -> None:
super(IpConnectionSchema, self).__init__()
(
self.with_str_prop("src_ip_address")
.with_str_prop("src_port")
.with_str_prop("dst_ip_address")
.with_str_prop("dst_port")
.with_int_prop("created_timestamp")
.with_int_prop("terminated_timestamp")
.with_int_prop("last_seen_timestamp")
.with_forward_edge(
"inbound_ip_connection_to",
ManyToOne(IpAddressSchema),
"ip_connections_from",
)
)
@staticmethod
def self_type() -> str:
return "IpConnection"
from grapl_analyzerlib.schemas.ip_address_schema import IpAddressSchema
| 2.046875 | 2 |
krop/build_rop.py | tszentendrei/henkaku | 534 | 12760371 | #!/usr/bin/env python3
from sys import argv, exit
import tempfile
import os.path
import subprocess
tpl = """
.equ ENC_PAYLOAD_ADDR, {payload_addr}
.equ ENC_PAYLOAD_SIZE, {payload_size}
.equ BASE, {sysmem_base}
.equ SECOND_PAYLOAD, {second_payload}
"""
prefix = "arm-vita-eabi-"
def build(tmp, code):
src_file = os.path.join(tmp, "rop.S")
obj_file = os.path.join(tmp, "rop.o")
bin_file = os.path.join(tmp, "rop.bin")
fout = open(src_file, "wb")
fout.write(code)
fout.close()
subprocess.check_call([prefix + "as", src_file, "-o", obj_file])
subprocess.check_call([prefix + "objcopy", "-O", "binary", obj_file, bin_file])
return bin_file
def analyze(tmp, bin_file):
db_file = os.path.join(tmp, "rop.db")
subprocess.check_call(["python3", "krop/ropfuscator.py", "analyze", bin_file, db_file, "DxT9HVn5"])
return db_file
def obfuscate(tmp, bin_file, db_file):
obf_file = os.path.join(tmp, "rop.obf")
subprocess.check_call(["python3", "krop/ropfuscator.py", "generate", bin_file, obf_file, db_file])
fin = open(obf_file, "rb")
data = fin.read()
fin.close()
return data
def chunk(b, size):
if len(b) % size != 0:
raise RuntimeError("chunk: b % size != 0")
return [b[x * size:(x + 1) * size] for x in range(0, len(b) // size)]
def write_rop_code(krop, relocs, addr_pos, size_shift_pos, size_xor_pos, size_plain_pos, second_payload_pos):
output = ""
output += "from rop import Ret, Load\n"
output += "def krop(rop):\n"
output += " c = rop.caller\n"
output += " d = rop.data\n"
tpl = " c.store({}, d.krop + 0x{:x})\n"
for x, (addr, reloc) in enumerate(zip(krop, relocs)):
addr = int.from_bytes(addr, "little")
if reloc == 0:
s = "0x{:x}".format(addr)
else:
output += " c.add(Load(d.sysmem_base), 0x{:x})\n".format(addr)
s = "Ret"
output += tpl.format(s, x * 4)
output += " c.store(Load(d.kx_loader_addr), d.krop + 0x{:x})\n".format(addr_pos * 4)
# I've hardcoded payload size to be 0x200, deal with it
payload_size = 0x200
output += " c.store(0x{:x}, d.krop + 0x{:x})\n".format((payload_size >> 2) + 0x10, size_shift_pos * 4)
output += " c.store(0x{:x}, d.krop + 0x{:x})\n".format(payload_size ^ 0x40, size_xor_pos * 4)
output += " c.store(0x{:x}, d.krop + 0x{:x})\n".format(payload_size, size_plain_pos * 4)
output += " c.store(d.second_payload, d.krop + 0x{:x})\n".format(second_payload_pos * 4)
return output
def main():
if len(argv) != 3:
print("Usage: build_rop.py rop.S output-directory/")
return -1
fin = open(argv[1], "rb")
code = fin.read()
fin.close()
tags = {
"payload_addr": 0xF0F0F0F0,
"payload_size": 0x0A0A0A00,
"sysmem_base": 0xB0B00000,
"second_payload": 0xC0C0C0C0,
}
with tempfile.TemporaryDirectory() as tmp:
first_bin = build(tmp, tpl.format(payload_addr=0, payload_size=0, sysmem_base=0, second_payload=0).encode("ascii") + code)
db_file = analyze(tmp, first_bin)
first = obfuscate(tmp, first_bin, db_file)
second_bin = build(tmp, tpl.format(**tags).encode("ascii") + code)
second = obfuscate(tmp, second_bin, db_file)
if len(first) != len(second):
print("wtf? got different krop lengths")
return -2
# Find differences in krops, a difference indicates either that this address depends on sysmem base or it's
# payload addr/size
krop = first = chunk(first, 4)
second = chunk(second, 4)
relocs = [0] * len(first)
addr_pos = size_shift_pos = size_xor_pos = size_plain_pos = second_payload_pos = -1
for i, (first_word, second_word) in enumerate(zip(first, second)):
if first_word != second_word:
second = int.from_bytes(second_word, "little")
if second == tags["payload_addr"]:
addr_pos = i
elif second == (tags["payload_size"] >> 2) + 0x10:
size_shift_pos = i
elif second == tags["payload_size"] ^ 0x40:
size_xor_pos = i
elif second == tags["payload_size"]:
size_plain_pos = i
elif second == tags["second_payload"]:
second_payload_pos = i
else:
relocs[i] = 1
if -1 in [addr_pos, size_shift_pos, size_xor_pos, size_plain_pos, second_payload_pos]:
print("unable to resolve positions: addr={}, size_shift={}, size_xor={}, size_plain={}, second_payload={}".format(
addr_pos, size_shift_pos, size_xor_pos, size_plain_pos, second_payload_pos))
return -2
print("Kernel rop size: 0x{:x} bytes".format(len(krop) * 4))
with open(os.path.join(argv[2], "krop.py"), "w") as fout:
fout.write(write_rop_code(krop, relocs, addr_pos, size_shift_pos, size_xor_pos, size_plain_pos, second_payload_pos))
if __name__ == "__main__":
exit(main())
| 2.296875 | 2 |
4_src/3_other/1_surasura-python/q5-4/q5-4.py | hirobel/todoapp | 0 | 12760372 | <gh_stars>0
alpha_num_dict = {
'a':1,
'b':2,
'c':3
}
alpha_num_dict['a'] = 10
print(alpha_num_dict['a']) | 2.703125 | 3 |
CodingInterview2/13_RobotMove/robot_move.py | hscspring/TheAlgorithms-Python | 10 | 12760373 | """
面试题 13:机器人的运动范围
题目:地上有一个 m 行 n 列的方格。一个机器人从坐标 (0, 0) 的格子开始移动,它
每一次可以向左、右、上、下移动一格,但不能进入行坐标和列坐标的数位之和
大于 k 的格子。例如,当 k 为 18 时,机器人能够进入方格 (35, 37),因为 3+5+3+7=18。
但它不能进入方格 (35, 38),因为 3+5+3+8=19。请问该机器人能够到达多少个格子?
"""
def moving_count(rows: int, cols: int, threshold: int) -> int:
"""
Count moving steps under the given rows, clos and threshold.
Parameters
-----------
rows: int
Matrix rows
cols: int
Matrix cols
threshold: int
The given condition
Returns
---------
out: int
How many cells can reach.
Notes
------
We could treat the (row, col) as the center of a cell.
"""
if rows <= 0 or cols <= 0 or threshold < 0:
return 0
visited = []
for i in range(rows):
tmp = []
for j in range(cols):
tmp.append(False)
visited.append(tmp)
res = moving_count_core(rows, cols, 0, 0, threshold, visited)
return res
def moving_count_core(rows, cols, row, col, threshold, visited) -> int:
"""
Recursive caculation.
Notes
--------
The key points are as belows:
- condition threshold
- condition visited
- count 1+ because the cell it stands is surely it can enter.
- do not need to go back
"""
count = 0
if (row >= 0 and row < rows and col >= 0 and col < cols and
get_digit_num(row) + get_digit_num(col) <= threshold and
not visited[row][col]):
visited[row][col] = True
count = 1 + (
moving_count_core(rows, cols, row+1, col, threshold, visited) +
moving_count_core(rows, cols, row-1, col, threshold, visited) +
moving_count_core(rows, cols, row, col+1, threshold, visited) +
moving_count_core(rows, cols, row, col-1, threshold, visited)
)
return count
def get_digit_num(num: int) -> int:
"""
Return sum of each in a number.
"""
res = 0
while num:
res += num % 10
num //= 10
return res
if __name__ == '__main__':
res = moving_count(1, 10, 9)
print(res)
| 3.890625 | 4 |
nrekit/framework.py | ljm0/myOpenNRE | 3 | 12760374 | import tensorflow as tf
import os
import sklearn.metrics
import numpy as np
import sys
import time
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
class re_model:
"""Basic model class, which contains data input and tensorflow graphs, should be inherited"""
def __init__(self, train_data_loader, batch_size, max_length=120):
"""
class construction funciton, model initialization
Args:
train_data_loader: a `file_data_loader object`, which could be `npy_data_loader`
or `json_file_data_loader`
batch_size: how many scopes/instances are included in one batch
max_length: max sentence length, divide sentences into the same length (working
part should be finished in `data_loader`)
Returns:
None
"""
self.word = tf.placeholder(dtype=tf.int32, shape=[None, max_length], name='word')
self.pos1 = tf.placeholder(dtype=tf.int32, shape=[None, max_length], name='pos1')
self.pos2 = tf.placeholder(dtype=tf.int32, shape=[None, max_length], name='pos2')
self.label = tf.placeholder(dtype=tf.int32, shape=[batch_size], name='label')
self.ins_label = tf.placeholder(dtype=tf.int32, shape=[None], name='ins_label')
self.length = tf.placeholder(dtype=tf.int32, shape=[None], name='length')
self.scope = tf.placeholder(dtype=tf.int32, shape=[batch_size, 2], name='scope')
self.train_data_loader = train_data_loader
self.rel_tot = train_data_loader.rel_tot
self.word_vec_mat = train_data_loader.word_vec_mat
def loss(self):
"""training loss, should be overrided in the subclasses"""
raise NotImplementedError
def train_logit(self):
"""training logit, should be overrided in the subclasses"""
raise NotImplementedError
def test_logit(self):
"""test logit, should be overrided in the subclasses"""
raise NotImplementedError
class re_framework:
"""the basic training framework, does all the training and test staffs"""
MODE_BAG = 0 # Train and test the model at bag level.
MODE_INS = 1 # Train and test the model at instance level
def __init__(self, train_data_loader, test_data_loader, max_length=120, batch_size=160):
"""
class construction funciton, framework initialization
Args:
train_data_loader: a `file_data_loader object`, which could be `npy_data_loader`
or `json_file_data_loader`
test_data_loader: similar as the `train_data_loader`
max_length: max sentence length, divide sentences into the same length (working
part should be finished in `data_loader`)
batch_size: how many scopes/instances are included in one batch
Returns:
None
"""
self.train_data_loader = train_data_loader
self.test_data_loader = test_data_loader
self.sess = None # default graph session
def one_step_multi_models(self, sess, models, batch_data_gen, run_array, return_label=True):
"""
run models and multi running tasks via session
Args:
sess: tf.Session() that is going to run
models: a list. this function support multi-model training
batch_data_gen: `data_loader` to generate batch data
run_array: a list, contains all the running models or arrays
return_label: boolean argument. if it is `True`, then the training label
will be returned either
Returns:
result: a tuple/list contains the result
"""
feed_dict = {}
batch_label = []
for model in models:
batch_data = batch_data_gen.next_batch(batch_data_gen.batch_size // len(models))
feed_dict.update({
model.word: batch_data['word'],
model.pos1: batch_data['pos1'],
model.pos2: batch_data['pos2'],
model.label: batch_data['rel'],
model.ins_label: batch_data['ins_rel'],
model.scope: batch_data['scope'],
model.length: batch_data['length'],
})
if 'mask' in batch_data and hasattr(model, "mask"): # mask data is used in PCNN models
feed_dict.update({model.mask: batch_data['mask']})
batch_label.append(batch_data['rel'])
result = sess.run(run_array, feed_dict)
batch_label = np.concatenate(batch_label)
if return_label:
result += [batch_label]
return result
def one_step(self, sess, model, batch_data, run_array):
"""
run one model and multi running tasks via session, usually used in test operation
Args:
sess: tf.Session() that is going to run
model: one model, inherited from `re_model`
batch_data: a dict contains the batch data
run_array: a list, contains all the running models or arrays
Returns:
result: a tuple/list contains the result
"""
feed_dict = {
model.word: batch_data['word'],
model.pos1: batch_data['pos1'],
model.pos2: batch_data['pos2'],
model.label: batch_data['rel'],
model.ins_label: batch_data['ins_rel'],
model.scope: batch_data['scope'],
model.length: batch_data['length'],
}
if 'mask' in batch_data and hasattr(model, "mask"):
feed_dict.update({model.mask: batch_data['mask']})
result = sess.run(run_array, feed_dict)
return result
def train(self, model, model_name, ckpt_dir='./checkpoint', summary_dir='./summary',
test_result_dir='./test_result', learning_rate=0.5, max_epoch=60,
pretrain_model=None, test_epoch=1, optimizer=tf.train.GradientDescentOptimizer,
gpu_nums=1, not_best_stop=20):
"""
training function
Args:
model: `re_model` that is going to train
model_name: a string, to identify models, affecting checkpoint saving
ckpt_dir: checkpoint saving directory
summary_dir: for tensorboard use, to save summary files
test_result_dir: directory to store the final results
learning_rate: learning rate of optimizer
max_epoch: how many epochs you want to train
pretrain_model: a string, containing the checkpoint model path and model name
e.g. ./checkpoint/nyt_pcnn_one
test_epoch: when do you want to test the model. default is `1`, which means
test the result after every training epoch
optimizer: training optimizer, default is `tf.train.GradientDescentOptimizer`
gpu_nums: how many gpus you want to use when training
not_best_stop: if there is `not_best_stop` epochs that not excel at the model
result, the training will be stopped
Returns:
None
"""
assert(self.train_data_loader.batch_size % gpu_nums == 0)
print("Start training...")
# Init
config = tf.ConfigProto(allow_soft_placement=True) # allow cpu computing if there is no gpu available
self.sess = tf.Session(config=config)
optimizer = optimizer(learning_rate)
# Multi GPUs
tower_grads = []
tower_models = []
for gpu_id in range(gpu_nums):
with tf.device("/gpu:%d" % gpu_id):
with tf.name_scope("gpu_%d" % gpu_id):
cur_model = model(self.train_data_loader, self.train_data_loader.batch_size // gpu_nums, self.train_data_loader.max_length)
tower_grads.append(optimizer.compute_gradients(cur_model.loss()))
tower_models.append(cur_model)
tf.add_to_collection("loss", cur_model.loss())
tf.add_to_collection("train_logit", cur_model.train_logit())
loss_collection = tf.get_collection("loss")
loss = tf.add_n(loss_collection) / len(loss_collection)
train_logit_collection = tf.get_collection("train_logit")
train_logit = tf.concat(train_logit_collection, 0)
grads = average_gradients(tower_grads)
train_op = optimizer.apply_gradients(grads)
summary_writer = tf.summary.FileWriter(summary_dir, self.sess.graph)
"""supporting check the scalars on tensorboard"""
_output = tf.cast(tf.argmax(train_logit, -1), tf.int32) # predicted output
_tot_acc = tf.reduce_mean(tf.cast(tf.equal(_output, tower_models[0].label), tf.float32)) # accuracy including N/A relations
_not_na_acc = tf.reduce_mean(tf.cast(tf.logical_and(tf.equal(_output, tower_models[0].label), tf.not_equal(tower_models[0].label, 0)), tf.float32)) # accuracy not including N/A relations
tf.summary.scalar('tot_acc', _tot_acc)
tf.summary.scalar('not_na_acc', _not_na_acc)
# Saver
saver = tf.train.Saver(max_to_keep=None)
if pretrain_model is None:
self.sess.run(tf.global_variables_initializer())
else:
saver.restore(self.sess, pretrain_model)
# Training
merged_summary = tf.summary.merge_all() # merge all scalars and histograms
best_metric = 0
best_prec = None
best_recall = None
not_best_count = 0 # Stop training after several epochs without improvement.
global_cnt = 0 # for record summary steps
for epoch in range(max_epoch):
print('###### Epoch ' + str(epoch) + ' ######')
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
i = 0
time_sum = 0
while True:
time_start = time.time()
try:
summa, iter_loss, iter_logit, _train_op, iter_label = self.one_step_multi_models(self.sess, tower_models, self.train_data_loader, [merged_summary, loss, train_logit, train_op])
except StopIteration:
break
summary_writer.add_summary(summa, global_cnt)
time_end = time.time()
t = time_end - time_start
time_sum += t
iter_output = iter_logit.argmax(-1)
iter_correct = (iter_output == iter_label).sum()
iter_not_na_correct = np.logical_and(iter_output == iter_label, iter_label != 0).sum()
tot_correct += iter_correct
tot_not_na_correct += iter_not_na_correct
tot += iter_label.shape[0]
tot_not_na += (iter_label != 0).sum()
if tot_not_na > 0:
sys.stdout.write("epoch %d step %d time %.2f | loss: %f, not NA accuracy: %f, accuracy: %f\r" % (epoch, i, t, iter_loss, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
i += 1
print("\nAverage iteration time: %f" % (time_sum / i))
if (epoch + 1) % test_epoch == 0:
metric = self.test(model)
if metric > best_metric:
best_metric = metric
best_prec = self.cur_prec
best_recall = self.cur_recall
print("Best model, storing...")
if not os.path.isdir(ckpt_dir):
os.mkdir(ckpt_dir)
path = saver.save(self.sess, os.path.join(ckpt_dir, model_name))
print("Finish storing")
not_best_count = 0
else:
not_best_count += 1
if not_best_count >= not_best_stop:
break
global_cnt += 1
print("######")
print("Finish training " + model_name)
print("Best epoch auc = %f" % (best_metric))
if (not best_prec is None) and (not best_recall is None):
if not os.path.isdir(test_result_dir):
os.mkdir(test_result_dir)
np.save(os.path.join(test_result_dir, model_name + "_x.npy"), best_recall)
np.save(os.path.join(test_result_dir, model_name + "_y.npy"), best_prec)
def test(self, model, ckpt=None, return_result=False, mode=MODE_BAG):
"""
test function, to evaluate model
Args:
model: a `re_model` which has not been instantiated
ckpt: whether there is a pretained checkpoing model
return_result: if True, the predicted result will be returned, either
mode: basically it is at the bag level
Returns:
auc: if return_result is True, return AUC and predicted labels,
else return AUC only
"""
if mode == re_framework.MODE_BAG:
return self.__test_bag__(model, ckpt=ckpt, return_result=return_result)
elif mode == re_framework.MODE_INS:
raise NotImplementedError
else:
raise NotImplementedError
def __test_bag__(self, model, ckpt=None, return_result=False):
"""
test function at bag level
Args:
model: a `re_model` which has not been instantiated
ckpt: whether there is a pretained checkpoing model
return_result: if True, the predicted result will be returned, either
Returns:
auc: if return_result is True, return AUC and predicted labels,
else return AUC only
"""
print("Testing...")
if self.sess == None:
self.sess = tf.Session()
model = model(self.test_data_loader, self.test_data_loader.batch_size, self.test_data_loader.max_length)
if not ckpt is None:
saver = tf.train.Saver()
saver.restore(self.sess, ckpt)
tot_correct = 0
tot_not_na_correct = 0
tot = 0
tot_not_na = 0
entpair_tot = 0
test_result = []
pred_result = []
for i, batch_data in enumerate(self.test_data_loader):
iter_logit = self.one_step(self.sess, model, batch_data, [model.test_logit()])[0]
iter_output = iter_logit.argmax(-1)
iter_correct = (iter_output == batch_data['rel']).sum()
iter_not_na_correct = np.logical_and(iter_output == batch_data['rel'], batch_data['rel'] != 0).sum()
tot_correct += iter_correct
tot_not_na_correct += iter_not_na_correct
tot += batch_data['rel'].shape[0]
tot_not_na += (batch_data['rel'] != 0).sum()
if tot_not_na > 0:
sys.stdout.write("[TEST] step %d | not NA accuracy: %f, accuracy: %f\r" % (i, float(tot_not_na_correct) / tot_not_na, float(tot_correct) / tot))
sys.stdout.flush()
for idx in range(len(iter_logit)):
for rel in range(1, self.test_data_loader.rel_tot):
test_result.append({'score': iter_logit[idx][rel], 'flag': batch_data['multi_rel'][idx][rel]})
if batch_data['entpair'][idx] != "None#None":
pred_result.append({'score': float(iter_logit[idx][rel]), 'entpair': batch_data['entpair'][idx].encode('utf-8'), 'relation': rel})
entpair_tot += 1
sorted_test_result = sorted(test_result, key=lambda x: x['score'])
prec = []
recall = []
correct = 0
for i, item in enumerate(sorted_test_result[::-1]):
correct += item['flag']
prec.append(float(correct) / (i + 1))
recall.append(float(correct) / self.test_data_loader.relfact_tot)
auc = sklearn.metrics.auc(x=recall, y=prec)
print("\n[TEST] auc: {}".format(auc))
print("Finish testing")
self.cur_prec = prec
self.cur_recall = recall
if not return_result:
return auc
else:
return (auc, pred_result)
| 2.9375 | 3 |
sorts/insertion_sort.py | vrshah90/interview | 0 | 12760375 | """
Time:
Best: O(n)
Average: O(n^2)
Worst: O(n^2)
Space:
O(1)
Stable:
Yes
Worst Case Scenario:
Reverse Sorted Array
Algorithm Overview:
This algorithm works like how a person would normally sort a hand of cards from left to right.
You take the second card and compare with all the cards to the left, shifting over each card which is larger than
the card you are trying to sort.
The algorithm starts at the second element, and persists the element's index in a pointer. It then iterates over
all the items to the left by moving the pointer back until either the index pointer is at 0 or it finds an element
on the left hand side which is less than the current item. It then persists the element before that item.
"""
def insertion_sort(arr):
# Assume that the first element in the list is sorted, begin iteration at second element, index 1
for current_index in range(1, len(arr)):
# current item is the not sorted item we want to put in the correct place in the sorted array
# Everything to the left of this item is sorted, and this item and everything to the right is not sorted
current_item = arr[current_index]
# pointer to the index where the above item should go, initialized at the current item's current position
insert_index = current_index
# while your previous index does not go beyond the min index (0)
# and the current item is smaller than the previous item
while insert_index > 0 and current_item < arr[insert_index - 1]:
# Copy the larger item before the current item to the current item's place
arr[insert_index] = arr[insert_index - 1]
# push your insert pointer back
insert_index -= 1
# Either your previous pointer will now be 0 in which case you have reached the start of the array
# or you reached an item smaller than the current item
# This exists the loop
# Now you can move the current item to the empty space created by moving the other items forward
# and begin the next pass of the sorting algorithm
arr[insert_index] = current_item
if __name__ == '__main__':
arr = [8, 3, 1, 2]
insertion_sort(arr)
print(arr)
| 4.5 | 4 |
grafana_backup/save.py | jartigag/grafana-backupper | 1 | 12760376 | from grafana_backup.api_checks import main as api_checks
from grafana_backup.save_dashboards import main as save_dashboards
from grafana_backup.save_datasources import main as save_datasources
from grafana_backup.save_folders import main as save_folders
from grafana_backup.save_alert_channels import main as save_alert_channels
from grafana_backup.archive import main as archive
from grafana_backup.save_orgs import main as save_orgs
from grafana_backup.save_users import main as save_users
import sys
import os.path
from os import path
import shutil
def main(args, settings):
arg_components = args.get('--components', False)
arg_no_archive = args.get('--no-archive', False) or (not settings.get('ARCHIVE_OUTPUT'))
backup_functions = {'dashboards': save_dashboards,
'datasources': save_datasources,
'folders': save_folders,
'alert-channels': save_alert_channels,
'organizations': save_orgs,
'users': save_users}
(status, json_resp, uid_support, paging_support) = api_checks(settings)
# Do not continue if API is unavailable or token is not valid
if not status == 200:
print("server status is not ok: {0}".format(json_resp))
sys.exit(1)
settings.update({'UID_SUPPORT': uid_support})
settings.update({'PAGING_SUPPORT': paging_support})
if settings.get('TIMESTAMP_OUTPUT') == False:
# If we are not timestamping outputs, we should delete any existing output files before generating new ones
# However we don't want to just delete the whole directory since there may be, e.g. a .git directory in there
backup_dir = settings.get('BACKUP_DIR')
print("{0} exists - deleting contents before creating new non-timestamped backup.".format(backup_dir))
# Special-case /alert_channels vs alert-channels in the backup_functions list
if path.exists(backup_dir + '/alert_channels' ):
shutil.rmtree(backup_dir + '/alert_channels')
for subdir in backup_functions.keys():
if path.exists(backup_dir + '/' + subdir ):
shutil.rmtree(backup_dir + '/' + subdir)
if arg_components:
arg_components_list = arg_components.split(',')
# Backup only the components that provided via an argument
for backup_function in arg_components_list:
backup_functions[backup_function](args, settings)
else:
# Backup every component
for backup_function in backup_functions.keys():
backup_functions[backup_function](args, settings)
aws_s3_bucket_name = settings.get('AWS_S3_BUCKET_NAME')
if not arg_no_archive:
archive(args, settings)
| 2.140625 | 2 |
utils/coco/merge_annotations.py | raunilillemets/cvat | 2 | 12760377 | import argparse
import cv2
import glog
import json
import numpy as np
import os
from tqdm import tqdm
from pycocotools import coco as coco_loader
def parse_args():
"""Parse arguments of command line"""
parser = argparse.ArgumentParser(
description='Merge annotations in COCO representation into one'
)
parser.add_argument(
'--input-dir', required=True,
help='directory with input annotations in *.json format'
)
parser.add_argument(
'--output', required=True,
help='output annotation file'
)
parser.add_argument(
'--images-map', required=True,
help='file with map of datasets and its images path (json format)'
)
parser.add_argument(
'--draw', default=None,
help='directory to save images with its segments. By default is disabled'
)
return parser.parse_args()
def draw_bboxes_and_masks(img, annotations, input_dir):
""" Draw bounding boxes and contours of masks on image and save it.
:param img: file name of image (is getting from the same field in annotation)
:param annotations: list of bonding boxes and segments on the image
:param input_dir: base directory to save images
"""
input_file = os.path.join(input_dir, img['file_name'])
save_path = os.path.join(os.path.dirname(input_file), 'draw')
if not os.path.exists(save_path):
os.makedirs(save_path)
output_file = os.path.join(save_path, os.path.basename(input_file))
img = cv2.imread(input_file)
yellow = (0, 255, 255)
red = (0, 0, 255)
for ann in annotations:
cat_id = str(ann['category_id'])
bbox = [int(ann['bbox'][0]), int(ann['bbox'][1]),
int(ann['bbox'][0] + ann['bbox'][2]), int(ann['bbox'][1] + ann['bbox'][3])]
masks = ann['segmentation']
for mask in masks:
i = 0
points = []
while i < len(mask):
x = int(mask[i])
y = int(mask[i + 1])
points.append([x, y])
i += 2
img = cv2.polylines(img, np.int32([points]), True, yellow, 1)
img = cv2.rectangle(img, (bbox[0], bbox[1]), (bbox[2], bbox[3]), red, 1)
x = bbox[0] + (bbox[2] - bbox[0]) // 4
y = bbox[1] + (bbox[3] - bbox[1]) // 2
cv2.putText(img, cat_id, (x, y), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, red, 1)
cv2.imwrite(output_file, img)
def is_json_file(filename):
""" Check if file has a *.json type (just check an extension)
:param filename: name of file
:return: True if file has a *.json type
"""
return True if filename.lower().endswith('.json') else False
def get_anno_list(directory):
""" Get list of files in directory
:param directory: directory to parse
:return: list of files in the directory in format [name1.ext, name2.ext, ...]
"""
files = []
for file in os.listdir(directory):
if is_json_file(file):
files.append(file)
return files
def pretty_string(name_list):
""" Make a string from list of some names
:param name_list: list of names [name#0, name#1, ...]
:return: string in format:
-name#0
-name#1
"""
output_string = ''
for s in name_list:
output_string += '\n -' + s
return output_string
def common_path_images(images_map):
""" Define which part of paths to images is common for all of them
:param images_map: dictionary of matched datasets and its images paths. Format:
{
'dataset1.json': '/path/to/images/for/dataset1',
'dataset2.json': '/path/to/images/for/dataset2',
...
}
:return: string with a common part of the images paths
"""
paths = [path for _, path in images_map.items()]
return os.path.commonpath(paths)
def merge_annotations(directory, anno_list, images_map):
""" Merge several annotations in COCO representation into one
:param directory: base directory where is saved all datasets which is needed to merge
:param anno_list: list of annotations to merge. [dataset1.json, dataset2.json, ...]
:param images_map: dictionary of matched datasets and its images paths
:return: merged annotation, list of used annotations and list of skipped annotations
"""
merged_anno = None
first_step = True
reference_classes = None
common_path = common_path_images(images_map)
valid_annos = []
skipped_annos = []
for anno_file in tqdm(anno_list, 'Parsing annotations...'):
if anno_file not in images_map:
glog.warning('Dataset <{}> is absent in \'images-map\' file and will be ignored!'.format(anno_file))
skipped_annos.append(anno_file)
continue
img_prefix = images_map[anno_file].replace(common_path, '')
if img_prefix[0] == '/':
img_prefix = img_prefix.replace('/', '', 1)
with open(os.path.join(directory, anno_file)) as f:
data = json.load(f)
for img in data['images']:
img['file_name'] = os.path.join(img_prefix, img['file_name'])
if first_step:
merged_anno = data
reference_classes = data['categories']
first_step = False
else:
classes = data['categories']
if classes != reference_classes:
glog.warning('Categories field in dataset <{}> has another classes and will be ignored!'
.format(anno_file))
skipped_annos.append(anno_file)
continue
add_img_id = len(merged_anno['images'])
add_obj_id = len(merged_anno['annotations'])
for img in data['images']:
img['id'] += add_img_id
for ann in data['annotations']:
ann['id'] += add_obj_id
ann['image_id'] += add_img_id
merged_anno['images'].extend(data['images'])
merged_anno['annotations'].extend(data['annotations'])
valid_annos.append(anno_file)
return merged_anno, valid_annos, skipped_annos
def main():
args = parse_args()
anno_list = get_anno_list(args.input_dir)
with open(args.images_map) as f:
images_map = json.load(f)
result_annotation, valid_annos, skipped_annos = merge_annotations(args.input_dir, anno_list, images_map)
assert len(valid_annos) > 0, 'The result annotation is empty! Please check parameters and your \'images_map\' file.'
# Save created annotation
glog.info('Saving annotation...')
with open(args.output, 'w') as outfile:
json.dump(result_annotation, outfile)
glog.info('Annotation was saved in <{}> successfully'.format(args.output))
# Try to load created annotation via cocoapi
try:
glog.info('Trying to load annotation <{}> via cocoapi...'.format(args.output))
coco_loader.COCO(args.output)
except:
raise
else:
glog.info('Annotation in COCO representation <{}> successfully created from: {}'
.format(args.output, pretty_string(valid_annos)))
if len(skipped_annos) > 0:
glog.info('The next annotations were skipped: {}'.format(pretty_string(skipped_annos)))
if args.draw:
for img in tqdm(result_annotation['images'], 'Drawing and saving images...'):
ann_for_img = []
for ann in result_annotation['annotations']:
if ann['image_id'] == img['id']:
ann_for_img.append(ann)
draw_bboxes_and_masks(img, ann_for_img, args.draw)
if __name__ == "__main__":
main()
| 2.59375 | 3 |
src/python/MergeSort/MergeSort-1.py | sheilapaiva/data-structures | 19 | 12760378 | <filename>src/python/MergeSort/MergeSort-1.py<gh_stars>10-100
import random
# DIVIDE A LISTA EM DUAS PARA O MERGE SORT
def separador(lista):
l1 = []
l2 = []
for i in range(0,len(lista)/2):
l1.append(lista[i])
for i in range(len(lista)/2,len(lista)):
l2.append(lista[i])
return l1,l2
# MERGE JUNTA LISTA
def juntalista(lista1,lista2):
novalista = []
while len(lista1) != 0 and len(lista2) != 0:
if lista1[0] >= lista2[0]:
novalista.append(lista2.pop(0))
else:
novalista.append(lista1.pop(0))
if len(lista1) != 0:
for e in lista1:
novalista.append(e)
elif len(lista2) != 0:
for e in lista2:
novalista.append(e)
return novalista
# SORT
def merge(lista):
# CASOS DE PARADA
if len(lista) < 2: return lista
# RECURSIVIDADE
else:
lis1,lis2 = separador(lista)
return juntalista(merge(lis1),merge(lis2))
#Teste
while True:
h = random.sample(range(0,100),100)
if sorted(h) != merge(h):
print h
break
| 3.859375 | 4 |
Examples/Infovis/Python/graph_tree_ring.py | txwhhny/vtk | 3 | 12760379 | <reponame>txwhhny/vtk<filename>Examples/Infovis/Python/graph_tree_ring.py<gh_stars>1-10
#!/usr/bin/env python
from vtk import *
addStringLabel = vtkProgrammableFilter()
def computeLabel():
input = addStringLabel.GetInput()
output = addStringLabel.GetOutput()
output.ShallowCopy(input)
# Create output array
vertexArray = vtkStringArray()
vertexArray.SetName("label")
vertexArray.SetNumberOfTuples(output.GetNumberOfVertices())
# Loop through all the vertices setting the degree for the new attribute array
for i in range(output.GetNumberOfVertices()):
label = '%02d' % (i)
vertexArray.SetValue(i, label)
# Add the new attribute array to the output graph
output.GetVertexData().AddArray(vertexArray)
addStringLabel.SetExecuteMethod(computeLabel)
source = vtkRandomGraphSource()
source.SetNumberOfVertices(15)
source.SetIncludeEdgeWeights(True)
addStringLabel.SetInputConnection(source.GetOutputPort())
conn_comp = vtkBoostConnectedComponents()
bi_conn_comp = vtkBoostBiconnectedComponents()
conn_comp.SetInputConnection(addStringLabel.GetOutputPort())
bi_conn_comp.SetInputConnection(conn_comp.GetOutputPort())
# Cleave off part of the graph
vertexDataTable = vtkDataObjectToTable()
vertexDataTable.SetInputConnection(bi_conn_comp.GetOutputPort())
vertexDataTable.SetFieldType(3) # Vertex data
# Make a tree out of connected/biconnected components
toTree = vtkTableToTreeFilter()
toTree.AddInputConnection(vertexDataTable.GetOutputPort())
tree1 = vtkGroupLeafVertices()
tree1.AddInputConnection(toTree.GetOutputPort())
tree1.SetInputArrayToProcess(0,0, 0, 4, "component")
tree1.SetInputArrayToProcess(1,0, 0, 4, "label")
tree2 = vtkGroupLeafVertices()
tree2.AddInputConnection(tree1.GetOutputPort())
tree2.SetInputArrayToProcess(0,0, 0, 4, "biconnected component")
tree2.SetInputArrayToProcess(1,0, 0, 4, "label")
# Create a tree ring view on connected/biconnected components
view1 = vtkTreeRingView()
view1.SetTreeFromInputConnection(tree2.GetOutputPort())
view1.SetGraphFromInputConnection(bi_conn_comp.GetOutputPort())
view1.SetLabelPriorityArrayName("GraphVertexDegree")
view1.SetAreaColorArrayName("VertexDegree")
view1.SetAreaLabelArrayName("label")
view1.SetAreaHoverArrayName("label")
view1.SetAreaLabelVisibility(True)
view1.SetBundlingStrength(.5)
view1.SetLayerThickness(.5)
view1.Update()
view1.SetColorEdges(True)
view1.SetEdgeColorArrayName("edge weight")
view2 = vtkGraphLayoutView()
view2.AddRepresentationFromInputConnection(bi_conn_comp.GetOutputPort())
view2.SetVertexLabelArrayName("label")
view2.SetVertexLabelVisibility(True)
view2.SetVertexColorArrayName("label")
view2.SetColorVertices(True)
view2.SetLayoutStrategyToSimple2D()
# Apply a theme to the views
theme = vtkViewTheme.CreateOceanTheme()
view1.ApplyViewTheme(theme)
view2.ApplyViewTheme(theme)
theme.FastDelete()
view1.GetRenderWindow().SetSize(600, 600)
view1.ResetCamera()
view1.Render()
view2.GetRenderWindow().SetSize(600, 600)
view2.ResetCamera()
view2.Render()
view1.GetInteractor().Start()
| 2.484375 | 2 |
python/tvm/topi/x86/roi_align.py | zhenlohuang/tvm | 2 | 12760380 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name, no-member, too-many-locals, too-many-arguments, undefined-variable, too-many-nested-blocks, too-many-branches, too-many-statements
"""Non-maximum suppression operator for intel cpu"""
import math
import tvm
from tvm.te import hybrid
from ..tensor import full
from ..utils import get_const_tuple
@hybrid.script
def roi_align_nchw_ir(data, rois, num_rois, w_pc, pos_pc, pooled_size, spatial_scale, sample_ratio):
"""Hybrid routing fo ROI align operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor or numpy NDArray
4-D with shape [batch, channel, height, width]
rois : tvm.te.Tensor or numpy NDArray
2-D with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
num_rois : tvm.tir.IntImm or tvm.tir.Var
Number of roi. We need to pass it in since hybrid script doesn't support
binding variable to symbolic dim.
w_pc : tvm.te.Tensor or numpy NDArray
3-D weight pre-calculation buffer
pos_pc : tvm.te.Tensor or numpy NDArray
3-D position pre-calculation buffer
pooled_size : tvm ConsExpr
[out_height, out_width]
spatial_scale : tvm.tir.const
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
sample_ratio : tvm.tir.const
Sampling ratio of ROI align, using adaptive size by default.
Returns
-------
output : tvm.te.Tensor or numpy NDArray
4-D with shape [num_roi, channel, pooled_size, pooled_size]
"""
channels = data.shape[1]
height = data.shape[2]
width = data.shape[3]
pooled_size_h = pooled_size[0]
pooled_size_w = pooled_size[1]
output = output_tensor((num_rois, channels, pooled_size_h, pooled_size_w), data.dtype)
for n in parallel(num_rois):
roi_batch_index = int32(rois[n, 0])
roi_start_w = rois[n, 1] * spatial_scale
roi_start_h = rois[n, 2] * spatial_scale
roi_end_w = rois[n, 3] * spatial_scale
roi_end_h = rois[n, 4] * spatial_scale
roi_h = max(roi_end_h - roi_start_h, 1.0)
roi_w = max(roi_end_w - roi_start_w, 1.0)
bin_h = roi_h / pooled_size_h
bin_w = roi_w / pooled_size_w
roi_bin_grid_h = sample_ratio
roi_bin_grid_w = roi_bin_grid_h
rounded_bin_h = int32(bin_h) * 1.0
rounded_bin_w = int32(bin_w) * 1.0
if sample_ratio <= 0:
# Cannot use ceil function since hybrid script
# doesn't support Call as indexing
roi_bin_grid_h = int32(bin_h)
roi_bin_grid_w = int32(bin_w)
if rounded_bin_h < bin_h:
roi_bin_grid_h += 1
if rounded_bin_w < bin_w:
roi_bin_grid_w += 1
count = roi_bin_grid_h * roi_bin_grid_w
# Pre-calculate indices and weights shared by all channels.
# This is the key point of optimization.
pre_calc_index = 0
iy_upper = roi_bin_grid_h
ix_upper = roi_bin_grid_w
for ph in range(pooled_size_h):
for pw in range(pooled_size_w):
for iy in range(iy_upper):
yy = roi_start_h + ph * bin_h + (iy + 0.5) * bin_h / roi_bin_grid_h
for ix in range(ix_upper):
xx = roi_start_w + pw * bin_w + (ix + 0.5) * bin_w / roi_bin_grid_w
x = xx
y = yy
if y < -1.0 or y > height or x < -1.0 or x > width:
for i in range(4):
w_pc[n, pre_calc_index, i] = 0.0
pos_pc[n, pre_calc_index, i] = 0
else:
if y < 0.0:
y = 0.0
if x < 0.0:
x = 0.0
y_low = int32(y)
x_low = int32(x)
x_high = x_low + 1
y_high = y_low + 1
if y_low >= height - 1:
y_high = height - 1
y_low = y_high
y = float32(y_low)
if x_low >= width - 1:
x_high = width - 1
x_low = x_high
x = float32(x_low)
ly = y - y_low
lx = x - x_low
hy = 1.0 - ly
hx = 1.0 - lx
w1 = hy * hx
w2 = hy * lx
w3 = ly * hx
w4 = ly * lx
pos_pc[n, pre_calc_index, 0] = x_low
pos_pc[n, pre_calc_index, 1] = x_high
pos_pc[n, pre_calc_index, 2] = y_low
pos_pc[n, pre_calc_index, 3] = y_high
w_pc[n, pre_calc_index, 0] = w1
w_pc[n, pre_calc_index, 1] = w2
w_pc[n, pre_calc_index, 2] = w3
w_pc[n, pre_calc_index, 3] = w4
pre_calc_index += 1
for c in range(channels):
pre_calc_index = 0
for ph in range(pooled_size_h):
for pw in range(pooled_size_w):
output_val = 0.0
for iy in range(roi_bin_grid_h):
for ix in range(roi_bin_grid_w):
output_val += (
w_pc[n, pre_calc_index, 0]
* data[
roi_batch_index,
c,
pos_pc[n, pre_calc_index, 2],
pos_pc[n, pre_calc_index, 0],
]
+ w_pc[n, pre_calc_index, 1]
* data[
roi_batch_index,
c,
pos_pc[n, pre_calc_index, 2],
pos_pc[n, pre_calc_index, 1],
]
+ w_pc[n, pre_calc_index, 2]
* data[
roi_batch_index,
c,
pos_pc[n, pre_calc_index, 3],
pos_pc[n, pre_calc_index, 0],
]
+ w_pc[n, pre_calc_index, 3]
* data[
roi_batch_index,
c,
pos_pc[n, pre_calc_index, 3],
pos_pc[n, pre_calc_index, 1],
]
)
pre_calc_index += 1
output_val /= count
output[n, c, ph, pw] = output_val
return output
def roi_align_nchw(data, rois, pooled_size, spatial_scale, sample_ratio=-1):
"""ROI align operator in NCHW layout.
Parameters
----------
data : tvm.te.Tensor
4-D with shape [batch, channel, height, width]
rois : tvm.te.Tensor
2-D with shape [num_roi, 5]. The last dimension should be in format of
[batch_index, w_start, h_start, w_end, h_end]
pooled_size : int or list/tuple of two ints
output size, or [out_height, out_width]
spatial_scale : float
Ratio of input feature map height (or w) to raw image height (or w). Equals the reciprocal
of total stride in convolutional layers, which should be in range (0.0, 1.0]
sample_ratio : int
Optional sampling ratio of ROI align, using adaptive size by default.
Returns
-------
output : tvm.te.Tensor
4-D with shape [num_roi, channel, pooled_size, pooled_size]
"""
if not isinstance(pooled_size, (tuple, list)):
pooled_size = (pooled_size, pooled_size)
# Pre-allocate intermediate buffer
if sample_ratio > 0:
max_roi_bin_grid_w = max_roi_bin_grid_h = sample_ratio
else:
_, _, height, width = get_const_tuple(data.shape)
max_roi_bin_grid_h = math.ceil(height / pooled_size[0])
max_roi_bin_grid_w = math.ceil(width / pooled_size[1])
num_rois = rois.shape[0]
max_pc_shape = (
rois.shape[0],
max_roi_bin_grid_h * max_roi_bin_grid_w * pooled_size[0] * pooled_size[1],
4,
)
w_pc_buffer = full(max_pc_shape, data.dtype, 0)
pos_pc_buffer = full(max_pc_shape, "int32", 0)
pooled_size = tvm.runtime.convert(pooled_size)
spatial_scale = tvm.tir.const(spatial_scale, "float32")
sample_ratio = tvm.tir.const(sample_ratio, "int32")
return roi_align_nchw_ir(
data, rois, num_rois, w_pc_buffer, pos_pc_buffer, pooled_size, spatial_scale, sample_ratio
)
| 1.484375 | 1 |
amos/image_labelling_service/labelling/test/test_new_city_handler.py | amosproj/2020ws02-computer-vision-for-sights | 2 | 12760381 | <filename>amos/image_labelling_service/labelling/test/test_new_city_handler.py
from _pytest.monkeypatch import MonkeyPatch
from labelling.new_city_handler import persist_google_vision_labels, _get_image_resolution, \
_get_merged_bounding_box_string, _label_image, _parse_landmark_to_bounding_box_str, _read_image_ids_for_labelling, \
LOG_FILE_NAME, log_incident
from mock import patch
from typing import Dict, Union, List
import os
import re
MODULE_PATH = 'labelling.new_city_handler'
def test_persist_google_vision_labels_images_available() -> None:
with patch(f'{MODULE_PATH}._read_image_ids_for_labelling', return_value=[1, 2, 3]) as id_retriever, \
patch(f'{MODULE_PATH}._label_image') as labeller, \
patch(f'{MODULE_PATH}.log_incident'): # omit logging for tests
persist_google_vision_labels('berlin')
assert id_retriever.called
def test_persist_google_vision_labels_no_images_available() -> None:
with patch(f'{MODULE_PATH}._read_image_ids_for_labelling', return_value=None) as id_retriever, \
patch(f'{MODULE_PATH}.log_incident') as logger: # omit logging for tests
persist_google_vision_labels('berlin')
assert id_retriever.called
assert 'No images' in logger.call_args[0][0]
def test_get_image_resolution(image_mock: bytes) -> None:
width, height = _get_image_resolution(image_mock)
assert width == 1337
assert height == 1338
def test_get_merged_bounding_box_string() -> None:
loose_box_strings = [
"(1,2,3,4,Test1)",
"(2,3,4,5,Test2)",
"(3,4,5,6,Test3)",
]
actual_result = _get_merged_bounding_box_string(loose_box_strings)
expected_result = '{' + f'{loose_box_strings[0]},{loose_box_strings[1]},{loose_box_strings[2]}' + '}'
assert actual_result == expected_result
def test_label_image(vision_response_mock: List[Dict[str, Union[str, float, dict, list]]], image_mock: bytes) -> None:
mock_url = 'https://xd.com/awesome.png'
with patch(f'{MODULE_PATH}.exec_dql_query',
return_value=[[image_mock, mock_url]]), \
patch(f'{MODULE_PATH}._get_landmarks_from_vision', return_value=vision_response_mock), \
patch(f'{MODULE_PATH}.exec_dml_query') as persistor:
_label_image(42)
# no error occurring
assert persistor.called
called_query = persistor.call_args_list[0][0][0]
# url extraction correct
assert mock_url in called_query
# both labels saved
assert re.escape(vision_response_mock[0]['description']) in called_query
assert re.escape(vision_response_mock[1]['description']) in called_query
def test_log_incident() -> None:
if os.path.exists(LOG_FILE_NAME):
os.remove(LOG_FILE_NAME)
assert os.path.exists(LOG_FILE_NAME) is False
log_incident('Test :)')
assert os.path.exists(LOG_FILE_NAME) is True
os.remove(LOG_FILE_NAME)
def test_parsing_landmark_to_str(vision_response_mock: List[Dict[str, Union[str, float, dict, list]]],
image_mock: bytes) -> None:
width, height = _get_image_resolution(image_mock)
test_landmark = vision_response_mock[1]
bounding_box_infos = _parse_landmark_to_bounding_box_str(test_landmark, width, height)[2:-2].split(',')
assert all(
map(
lambda relative_position: 0. < float(relative_position) < 1.,
bounding_box_infos[:-1]
)
)
assert bounding_box_infos[-1] == re.escape(test_landmark['description'])
def test_read_image_ids_for_labelling(monkeypatch: MonkeyPatch) -> None:
with patch(f'{MODULE_PATH}.exec_dql_query', return_value=[(1,), (2,), (3,)]):
image_ids_to_label = _read_image_ids_for_labelling('test city')
assert image_ids_to_label is not None
assert image_ids_to_label == [1, 2, 3]
| 2.09375 | 2 |
tests/unit/enrichers/test_gene_enricher.py | bmeg/bmeg-etl | 1 | 12760382 | import bmeg.enrichers.gene_enricher as gene_enricher
def test_simple():
""" straightforward """
tp53 = gene_enricher.get_gene('TP53')
assert(tp53), 'Should exist'
assert tp53 == {'symbol': u'TP53', 'entrez_id': u'7157',
'ensembl_gene_id': u'ENSG00000141510',
'hgnc_id': 'HGNC:11998'}
def test_ambiguous():
""" "ABC1" can point to both "ABCA1" and "HEATR6", """
try:
gene_enricher.get_gene('ABC1')
assert 'Should have raised value error'
except ValueError:
pass
def test_ZUFSP():
""" "ZUFSP" is a previous synonym """
zufsp = gene_enricher.get_gene('ZUFSP')
assert(zufsp), 'Should exist'
def test_FOOBAR():
try:
gene_enricher.get_gene('FOOBAR')
assert 'Should have raised value error'
except ValueError:
pass
| 2.671875 | 3 |
dataHora.py | renatabezerratech/Exercicio_Data_Hora_Python | 0 | 12760383 | <reponame>renatabezerratech/Exercicio_Data_Hora_Python
# Para trabalhar com data é preciso importar o módulo datetime;
import datetime
# Para trabalhar a variável, precisa declarar o valor e em seguida dizer o que quer:
x = datetime.datetime.now()
print(x) # Para exibir a data e a hora atual
x = datetime.datetime.now()
print(x.strftime("%A")) # Para exibir dia da semana
# O método strftime() formata a data em string
#depois
from datetime import date #Aqui importo o módulo datetime e defino o nome da classe -> date
from datetime import time #Aqui importo o módulo datetime e defino o nome da classe -> time
from datetime import datetime # Aqui importo o módulo datetime e defino o nome da classe ->datetime
def trabalhaData(): # Crio a função trabalhaData sem parâmetros
hoje = date.today() # Crio a variável hoje e atribuo o valor date.today() que é a data de hoje
print("Hoje: ",hoje) # Imprime normal
print("Data dividida: ",hoje.day, hoje.month, hoje.year) # Imprime a data organizada e dividida
diaSemana = ["segunda", "terça", "quarta", "quinta", "sexta", "sábado", "domingo"]
print("Hoje é ",diaSemana[hoje.weekday()]) # Se eu quiser localizar o dia da semana por array;
data = datetime.now() # Chama a data e a hora
print("Data e hora atual: ",data)
hora = datetime.time(data) # Usa o valor da variável data para chamar só a hora
print("Hora atual: ",hora)
trabalhaData() # Chama a função
| 3.875 | 4 |
model.py | ChuanHuoGe/Attention-CNN-relation-extraction | 9 | 12760384 | <filename>model.py
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# @Version : Python 3.6
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class Attention_CNN(nn.Module):
def __init__(self, word_vec, class_num, pos_num, config):
super().__init__()
self.word_vec = word_vec
self.class_num = class_num
self.pos_num = pos_num
# hyper parameters and others
self.max_len = config.max_len
self.word_dim = config.word_dim
self.pos_dim = config.pos_dim
self.pos_dis = config.pos_dis
self.tag_dim = config.tag_dim
self.dropout_value = config.dropout
self.filter_num = config.filter_num
self.window = config.window
self.dim = self.word_dim + 2 * self.pos_dim + self.tag_dim
# net structures and operations
self.word_embedding = nn.Embedding.from_pretrained(
embeddings=self.word_vec,
freeze=False,
)
self.pos1_embedding = nn.Embedding(
num_embeddings=2 * self.pos_dis + 3,
embedding_dim=self.pos_dim
)
self.pos2_embedding = nn.Embedding(
num_embeddings=2 * self.pos_dis + 3,
embedding_dim=self.pos_dim
)
self.tag_embedding = nn.Embedding(
num_embeddings=self.pos_num,
embedding_dim=self.tag_dim
)
self.conv = nn.Conv2d(
in_channels=1,
out_channels=self.filter_num,
kernel_size=(self.window, self.dim),
stride=(1, 1),
bias=True,
padding=(1, 0), # same padding
padding_mode='zeros'
)
self.maxpool = nn.MaxPool2d((self.max_len, 1))
self.tanh = nn.Tanh()
self.we = nn.Linear(
in_features=self.dim * 2,
out_features=self.dim * 2,
bias=True
)
self.wa = nn.Linear(
in_features=self.dim*2,
out_features=1,
bias=True
)
self.dense = nn.Linear(
in_features=self.filter_num + 2 * self.dim,
out_features=self.class_num,
bias=True
)
# initialize weight
init.uniform_(self.pos1_embedding.weight, a=-0.1, b=0.1)
init.uniform_(self.pos2_embedding.weight, a=-0.1, b=0.1)
init.uniform_(self.tag_embedding.weight, a=-0.1, b=0.1)
init.uniform_(self.conv.weight, a=-0.1, b=0.1)
init.constant_(self.conv.bias, 0.)
init.uniform_(self.we.weight, a=-0.1, b=0.1)
init.constant_(self.we.bias, 0.)
init.uniform_(self.wa.weight, a=-0.1, b=0.1)
init.constant_(self.wa.bias, 0.)
init.uniform_(self.dense.weight, a=-0.1, b=0.1)
init.constant_(self.dense.bias, 0.)
def encoder_layer(self, token, pos1, pos2, tags):
word_emb = self.word_embedding(token) # B*L*word_dim
pos1_emb = self.pos1_embedding(pos1) # B*L*pos_dim
pos2_emb = self.pos2_embedding(pos2) # B*L*pos_dim
tag_emb = self.tag_embedding(tags) # B*L*tag_dim
emb = torch.cat(tensors=[word_emb, pos1_emb, pos2_emb, tag_emb], dim=-1)
return emb # B*L*D, D=word_dim+2*pos_dim+tag_dim
def conv_layer(self, emb, mask):
emb = emb.unsqueeze(dim=1) # B*1*L*D
conv = self.conv(emb) # B*C*L*1
# mask, remove the effect of 'PAD'
conv = conv.view(-1, self.filter_num, self.max_len) # B*C*L
mask = mask.unsqueeze(dim=1) # B*1*L
mask = mask.expand(-1, self.filter_num, -1) # B*C*L
conv = conv.masked_fill_(mask.eq(0), float('-inf')) # B*C*L
conv = conv.unsqueeze(dim=-1) # B*C*L*1
return conv
def single_maxpool_layer(self, conv):
pool = self.maxpool(conv) # B*C*1*1
pool = pool.view(-1, self.filter_num) # B*C
return pool
def entity_average(self, emb, e_mask):
lengths = torch.sum(e_mask.eq(1), dim=-1).view(-1, 1) # B*1
mask = e_mask.unsqueeze(dim=1).float() # B*1*L
sum_emb = torch.bmm(mask, emb).squeeze(dim=1) # B*D
avg_emb = sum_emb / lengths # B*D, broadcasting
return avg_emb
def attention_layer(self, emb, entity, mask):
entity = entity.unsqueeze(dim=1).expand(-1, self.max_len, -1) # B*L*D
h = torch.cat(tensors=[emb, entity], dim=-1) # B*L*2D
h_flat = h.view(-1, 2*self.dim) # B·L*2D
output = self.tanh(self.we(h_flat)) # B·L*2D
u_flat = self.wa(output) # B·L*1
u = u_flat.view(-1, self.max_len) # B*L
# remove the effect of <PAD>
att_score = u.masked_fill(mask.eq(0), float('-inf')) # B*L
att_weight = F.softmax(att_score, dim=-1).unsqueeze(dim=-1) # B*L*1
reps = torch.bmm(emb.transpose(1, 2), att_weight).squeeze(dim=-1) # B*D*L * B*L*1 -> B*D*1 -> B*D
return reps
def forward(self, data):
token = data[:, 0, :].view(-1, self.max_len)
pos1 = data[:, 1, :].view(-1, self.max_len)
pos2 = data[:, 2, :].view(-1, self.max_len)
mask = data[:, 3, :].view(-1, self.max_len)
tags = data[:, 4, :].view(-1, self.max_len)
e1_mask = data[:, 5, :].view(-1, self.max_len)
e2_mask = data[:, 6, :].view(-1, self.max_len)
emb = self.encoder_layer(token, pos1, pos2, tags)
conv = self.conv_layer(emb, mask)
conv = self.tanh(conv)
pool = self.single_maxpool_layer(conv)
e1_emb = self.entity_average(emb, e1_mask)
e2_emb = self.entity_average(emb, e2_mask)
e1_context = self.attention_layer(emb, e1_emb, mask)
e2_context = self.attention_layer(emb, e2_emb, mask)
e1_context = self.tanh(e1_context)
e2_context = self.tanh(e2_context)
feature = torch.cat(tensors=[pool, e1_context, e2_context], dim=-1) # B* C+2D
logits = self.dense(feature)
return logits
| 2.734375 | 3 |
Problems/Study Plans/Algorithm/Algorithm II/44_minimum_size_subarray_sum.py | andor2718/LeetCode | 1 | 12760385 | <reponame>andor2718/LeetCode
# https://leetcode.com/problems/minimum-size-subarray-sum/
class Solution:
def minSubArrayLen(self, target: int, nums: list[int]) -> int:
min_len = float('inf')
curr_sum = 0
nums_added = 0
for idx in range(len(nums)):
num = nums[idx]
if num >= target:
return 1
curr_sum += num
nums_added += 1
while nums_added > 1 and (
curr_sum - nums[idx - nums_added + 1] >= target):
curr_sum -= nums[idx - nums_added + 1]
nums_added -= 1
if curr_sum >= target:
min_len = min(min_len, nums_added)
return 0 if min_len == float('inf') else min_len
| 3.453125 | 3 |
mirage/utils/backgrounds.py | mtakahiro/mirage | 37 | 12760386 | <filename>mirage/utils/backgrounds.py
#! /usr/bin env python
"""This module contains functions for calculating background signals using
jwst_backgrounds
"""
import astropy.units as u
import datetime
import logging
import numpy as np
import os
from jwst_backgrounds import jbt
from mirage.logging import logging_functions
from mirage.utils.constants import PRIMARY_MIRROR_AREA, PLANCK, LOG_CONFIG_FILENAME, STANDARD_LOGFILE_NAME
from mirage.utils.file_io import read_filter_throughput
from mirage.utils.flux_cal import fluxcal_info
# Percentiles corresponding to "low", "medium", and "high" as used in
# the ETC
LOW = 0.1
MEDIUM = 0.5
HIGH = 0.9
classdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../'))
log_config_file = os.path.join(classdir, 'logging', LOG_CONFIG_FILENAME)
logging_functions.create_logger(log_config_file, STANDARD_LOGFILE_NAME)
def calculate_background(ra, dec, filter_file, use_dateobs, gain_value,
siaf_instance, back_wave=None, back_sig=None, level='medium'):
"""Use the JWST background calculator to come up with an appropriate background
level for the observation.
Parameters
----------
ra : float
RA in degrees
dec : float
Dec in degrees
filter_file : str
Name of ascii file containing filter throughput curve
use_dateobs : bool
Use the observation date to find the background value
photflam : float
Conversion factor from FLAM to ADU/sec
pivot : float
Pivot wavelength for the filter correspondig to ``filter_file``
siaf_instance : pysiaf.Siaf
Siaf instance for the instrument/aperture to be used
back_wave : numpy.ndarray
1D array of wavelength values for the background spectrum.
These are only used in the case where ``use_dateobs`` is True
back_sig : numpy.ndarray
1D array of signal values for the background spectrum.
These are only used in the case where ``use_dateobs`` is True
level : str
'low', 'medium', or 'high'
Returns
-------
bval.value : float
Background value in units of ADU/sec/pixel
"""
from jwst_backgrounds import jbt
from astropy import units as u
from astropy.units.equivalencies import si, cgs
# Read in filter throughput file
filt_wav, filt_thru = read_filter_throughput(filter_file)
# If the user wants a background signal from a particular day,
# then extract that array here
if use_dateobs:
# Interpolate background to match filter wavelength grid
bkgd_interp = np.interp(filt_wav, back_wave, back_sig)
# Combine
filt_bkgd = bkgd_interp * filt_thru
pixelarea = siaf_instance.XSciScale * u.arcsec * siaf_instance.YSciScale * u.arcsec
photon_total = PRIMARY_MIRROR_AREA * (filt_bkgd * u.MJy / u.sr) * (1. / PLANCK) * 1.e-20 * pixelarea.to(u.sr) / (filt_wav * u.micron)
bval = np.trapz(photon_total, x=filt_wav)
bval = bval.value
else:
# If the user has requested background in terms of low/medium/high,
# then we need to examine all the background arrays.
# Loop over each day (in the background)
# info, convolve the background curve with the filter
# throughput curve, and then integrate. THEN, we can
# calculate the low/medium/high values.
bval = low_medium_high_background_value(ra, dec, level, filt_wav, filt_thru, siaf_instance)
# Convert the background signal from e-/sec/pixel to ADU/sec/pixel
bval /= gain_value
return bval
def day_of_year_background_spectrum(ra, dec, observation_date):
"""Call jwst_backgrounds in order to produce an estimate of background
versus wavelength for a particular pointing on a particular day of
year.
Parameters
----------
ra : float
Right Ascention of pointing
dec : float
Declination of pointing
observation_date : str
Requested day of year of the observation i.e. '2021-10-31'
Returns
-------
background_waves : numpy.ndarray
1D array with the wavelength values in microns associated with
``background_signals``
background_sigmals : numpy.ndarray
1D array containing background values in MJy/str
"""
# Generate background spectra for all days
background = jbt.background(ra, dec, 4.)
# Confirm that the target is observable of the requested day of year
obsdate = datetime.datetime.strptime(observation_date, '%Y-%m-%d')
obs_dayofyear = obsdate.timetuple().tm_yday
if obs_dayofyear not in background.bkg_data['calendar']:
raise ValueError(("ERROR: The requested RA, Dec is not observable on {}. Either "
"specify a different day, or set simSignals:use_dateobs_for_background "
"to False.".format(observation_date)))
# Extraxct the spectrum for the requested day
match = obs_dayofyear == background.bkg_data['calendar']
background_waves = background.bkg_data['wave_array']
background_signals = background.bkg_data['total_bg'][match, :][0]
return background_waves, background_signals
def find_low_med_high(array):
"""Given an array of values, find the value corresponding to the
Nth percentile.
Parameters
----------
array : numpy.ndarray
1D array of values
Returns
-------
levels : list
[low, medium, high] percentile values of array
"""
levels = []
for value in [LOW, MEDIUM, HIGH]:
levels.append(find_percentile(array, value))
return levels
def find_percentile(array, percentile):
"""Find the value corresponding to the ``percentile``
percentile of ``array``.
Parameters
----------
array : numpy.ndarray
Array of values to be searched
percentile : float
Percentile to search for. For example, to find the 50th percentile
value, use 0.5
"""
x = np.sort(array)
y = np.arange(1, len(x) + 1) / len(x)
value = np.interp(percentile, y, x)
return value
def low_med_high_background_spectrum(param_dict, detector, module):
"""Call jwst_backgrounds in order to produce an estimate of background
versus wavelength for a particular pointing that corresponds to one of
"low", "medium", or "high", as with the ETC.
Parameters
----------
param_dict : dict
Dictionary of observation information from an input yaml file
detector : str
Name of detector, e.g. "NRCA1"
module : str
Name of module, e.g. "A"
Returns
-------
background_waves : numpy.ndarray
1D array with the wavelength values in microns associated with
``background_signals``
background_spec : numpy.ndarray
1D array containing background values in MJy/str
"""
# Generate background spectra for all days
background = jbt.background(param_dict['Telescope']['ra'], param_dict['Telescope']['dec'], 4.)
# Get basic flux calibration information
vegazp, photflam, photfnu, pivot_wavelength = fluxcal_info(param_dict['Reffiles']['flux_cal'],
param_dict['Inst']['instrument'],
param_dict['Readout']['filter'],
param_dict['Readout']['pupil'], detector, module)
# Extract the spectrum value across all days at the pivot wavelength
wave_diff = np.abs(background.bkg_data['wave_array'] - pivot_wavelength)
bkgd_wave = np.where(wave_diff == np.min(wave_diff))[0][0]
bkgd_at_pivot = background.bkg_data['total_bg'][:, bkgd_wave]
# Now sort and determine the low/medium/high levels
low, medium, high = find_low_med_high(bkgd_at_pivot)
# Find the value based on the level in the yaml file
background_level = param_dict['simSignals']['bkgdrate'].lower()
if background_level == "low":
level_value = low
elif background_level == "medium":
level_value = medium
elif background_level == "high":
level_value = high
else:
raise ValueError(("ERROR: Unrecognized background value: {}. Must be low, mediumn, or high"
.format(param_dict['simSignals']['bkgdrate'])))
# Find the day with the background at the pivot wavelength that
# is closest to the value associated with the requested level
diff = np.abs(bkgd_at_pivot - level_value)
mindiff = np.where(diff == np.min(diff))[0][0]
background_spec = background.bkg_data['total_bg'][mindiff, :]
return background.bkg_data['wave_array'], background_spec
def low_medium_high_background_value(ra, dec, background_level, filter_waves, filter_throughput, siaf_info):
"""Calculate the integrated background flux density for a given filter,
using the filter's throughput curve and the user-input background level
(e.g. "medium")
Parameters
----------
ra : float
Right ascention of the pointing. Units are degrees
dec : float
Declineation of the pointing. Units are degrees
background_level : str
"low", "medium", or "high", just as with the ETC
filter_waves : numpy.ndarray
1d array of wavelengths in microns to be used along with
``filter_throughput``
filter_throughput : numpy.ndarray
1d array of filter throughput values to convolve with the background
spectrum. Normalized units. 1.0 = 100% transmission.
siaf_info : pysiaf.Siaf
Siaf information for the detector/aperture in use
Returns
-------
value : float
Background value corresponding to ``background_level``, integrated
over the filter bandpass. Background units are e-/sec/pixel.
"""
# Get background information
bg = jbt.background(ra, dec, 4.)
back_wave = bg.bkg_data['wave_array']
bsigs = np.zeros(len(bg.bkg_data['total_bg'][:, 0]))
for i in range(len(bg.bkg_data['total_bg'][:, 0])):
back_sig = bg.bkg_data['total_bg'][i, :]
# Interpolate background to match filter wavelength grid
bkgd_interp = np.interp(filter_waves, back_wave, back_sig)
# Combine
filt_bkgd = bkgd_interp * filter_throughput
# Convert from MJy/sr to e-/sec/pixel
pixelarea = siaf_info.XSciScale * u.arcsec * siaf_info.YSciScale * u.arcsec
photon_total = PRIMARY_MIRROR_AREA * (filt_bkgd * u.MJy / u.sr) * (1. / PLANCK) * 1.e-20 * pixelarea.to(u.sr) / (filter_waves * u.micron)
bsigs[i] = np.trapz(photon_total.value, x=filter_waves)
# Now sort and determine the low/medium/high levels
low, medium, high = find_low_med_high(bsigs)
# Find the value based on the level in the yaml file
background_level = background_level.lower()
if background_level == "low":
value = low
elif background_level == "medium":
value = medium
elif background_level == "high":
value = high
else:
raise ValueError(("ERROR: Unrecognized background value: {}. Must be low, mediumn, or high"
.format(background_level)))
return value
def get_1d_background_spectrum(parameters, detector, module):
"""Generate a background spectrum by calling jwst_backgrounds and
returning wavelengths and flux density values based on observation
date or low/medium/high
Parameters
----------
parameters : dict
Nested dictionary containing parameters pertaining to background
generation. Designed around dictionary from reading in a Mirage
input yaml file
detector : str
Detector name (e.g. 'NRCA1')
module : str
Module name (e.g. 'A')
Returns
-------
waves : numpy.ndarray
1D array of wavelength values (in microns)
fluxes : numpy.ndarray
1D array of flux density values
"""
logger = logging.getLogger('mirage.utils.backgrounds.nircam_background_spectrum')
if parameters['simSignals']['use_dateobs_for_background']:
logger.info("Generating background spectrum for observation date: {}"
.format(parameters['Output']['date_obs']))
waves, fluxes = day_of_year_background_spectrum(parameters['Telescope']['ra'],
parameters['Telescope']['dec'],
parameters['Output']['date_obs'])
else:
if isinstance(parameters['simSignals']['bkgdrate'], str):
if parameters['simSignals']['bkgdrate'].lower() in ['low', 'medium', 'high']:
logger.info("Generating background spectrum based on requested level of: {}"
.format(parameters['simSignals']['bkgdrate']))
waves, fluxes = low_med_high_background_spectrum(parameters, detector, module)
else:
raise ValueError("ERROR: Unrecognized background rate. Must be one of 'low', 'medium', 'high'")
else:
raise ValueError(("ERROR: WFSS background rates must be one of 'low', 'medium', 'high', "
"or use_dateobs_for_background must be True "))
return waves, fluxes
def niriss_background_scaling(param_dict, detector, module):
"""Determine the scaling factor needed to translate the pre-existing
NIRISS WFSS background image to the requested signal level, which is
one of "low", "medium", or "high", as with the ETC.
Parameters
----------
param_dict : dict
Dictionary of observation information from an input yaml file
detector : str
Name of detector, e.g. "NRCA1"
module : str
Name of module, e.g. "A"
Returns
-------
ratio : float
Ratio of the background value at the pivot wavelength that
corresponds to the requested level, ratioed to that for the
"medium" level, which is what was used to create the NIRISS
WFSS background images.
"""
# Generate background spectra for all days
background = jbt.background(param_dict['Telescope']['ra'], param_dict['Telescope']['dec'], 4.)
# Get basic flux calibration information
vegazp, photflam, photfnu, pivot_wavelength = fluxcal_info(param_dict['Reffiles']['flux_cal'], 'niriss',
param_dict['Readout']['filter'],
param_dict['Readout']['pupil'], detector, module)
# Extract the spectrum value across all days at the pivot wavelength
wave_diff = np.abs(background.bkg_data['wave_array'] - pivot_wavelength)
bkgd_wave = np.where(wave_diff == np.min(wave_diff))[0][0]
bkgd_at_pivot = background.bkg_data['total_bg'][:, bkgd_wave]
# Now sort and determine the low/medium/high levels
low, medium, high = find_low_med_high(bkgd_at_pivot)
# Find the value based on the level in the yaml file
background_level = param_dict['simSignals']['bkgdrate'].lower()
if background_level == "low":
level_value = low
elif background_level == "medium":
level_value = medium
elif background_level == "high":
level_value = high
else:
raise ValueError(("ERROR: Unrecognized background value: {}. Must be low, medium, or high"
.format(params_dict['simSignals']['bkgdrate'])))
return level_value
| 2.4375 | 2 |
moogle/search/snooper/facebook.py | nimiq/moogle-project | 4 | 12760387 | from ..snooper import BaseSolrSnooper
from tokens.models import Provider
class FacebookSnooper(BaseSolrSnooper):
def __init__(self, user):
self.user = user
self.provider_name = Provider.NAME_FACEBOOK | 1.929688 | 2 |
src/DoingMathInPython/ch_07/_scratch.py | bmoretz/Python-Playground | 0 | 12760388 | import math
from sympy import sin, solve, symbols, Symbol, Limit
# Solve for T
u, t, g, theta = symbols( 'u, t, g, theta' )
solve( u * sin( theta ) -g*t, t )
x = Symbol( 'x', positive = True )
if( x + 5 ) > 0:
print( '+' )
else:
print( '-' )
# Indeterminant Form
Limit( sin(x) / x, x, 0 ).doit() | 3.125 | 3 |
pymap/parsing/specials/mailbox.py | chenliangomc/pymap | 0 | 12760389 |
from typing import Tuple, Optional
from . import AString
from .. import Params, Parseable
from ..modutf7 import modutf7_encode, modutf7_decode
__all__ = ['Mailbox']
class Mailbox(Parseable[str]):
"""Represents a mailbox data object from an IMAP stream.
Args:
mailbox: The mailbox name.
"""
def __init__(self, mailbox: str) -> None:
super().__init__()
if mailbox.upper() == 'INBOX':
self.mailbox = 'INBOX'
self._raw: Optional[bytes] = b'INBOX'
else:
self.mailbox = mailbox
self._raw = None
@property
def value(self) -> str:
"""The mailbox name."""
return self.mailbox
@classmethod
def parse(cls, buf: memoryview, params: Params) \
-> Tuple['Mailbox', memoryview]:
atom, buf = AString.parse(buf, params)
mailbox = atom.value
if mailbox.upper() == b'INBOX':
return cls('INBOX'), buf
return cls(modutf7_decode(mailbox)), buf
def __bytes__(self) -> bytes:
if self._raw is not None:
return self._raw
self._raw = raw = bytes(AString(modutf7_encode(self.value)))
return raw
def __str__(self) -> str:
return self.value
| 2.90625 | 3 |
hammers/scripts/orphan_resource_providers.py | ChameleonCloud/bag-o-hammers | 0 | 12760390 | <reponame>ChameleonCloud/bag-o-hammers<gh_stars>0
# coding: utf-8
'''
.. code-block:: bash
orphan-resource-providers {info, update}
Occasionally, compute nodes are recreated in the Nova database with new UUIDs,
but resource providers in the Placement API database are not updated and still
refer to the old UUIDs. This causes failures to post allocations and results in
errors when launching instances. This detects the issue (``info``) and fixes it
(``update``) by updating the ``uuid`` field of resource providers.
'''
import sys
from hammers import MySqlArgs, osapi, query
from hammers.slack import Slackbot
from hammers.util import base_parser
def resource_providers_fixer(db, describe=False, quiet=False):
if describe:
for row in query.count_orphan_resource_providers(db):
count = row['COUNT(*)']
print('Found %d orphaned resource providers' % count)
return count
else:
count = query.update_orphan_resource_providers(db)
db.db.commit()
print('Updated %d orphaned resource providers' % count)
return count
def main(argv=None):
if argv is None:
argv = sys.argv
parser = base_parser('Fixes issues with orphaned resource providers.')
mysqlargs = MySqlArgs({
'user': 'root',
'password': '',
'host': 'localhost',
'port': 3306,
})
mysqlargs.inject(parser)
parser.add_argument('-q', '--quiet', action='store_true',
help='Quiet mode. No output if there was nothing to do.')
parser.add_argument('action', choices=['info', 'update'],
help='Just display info or actually update them?')
args = parser.parse_args(argv[1:])
mysqlargs.extract(args)
slack = Slackbot(args.slack, script_name='orphan-resource-providers') if args.slack else None
try:
db = mysqlargs.connect()
update_count = resource_providers_fixer(db=db, describe=args.action == 'info', quiet=args.quiet)
if args.action == 'update':
if update_count > 0:
message = (
'Commanded update of *{} resource providers*'
.format(update_count)
)
print(message)
slack.message(message)
elif not args.quiet:
print('No resource providers to delete')
except:
if slack:
slack.exception()
raise
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 1.742188 | 2 |
src/om_aiv_navigation/setup.py | zach-goh/Omron_AMR_ROS2 | 0 | 12760391 | <reponame>zach-goh/Omron_AMR_ROS2
from setuptools import setup
package_name = 'om_aiv_navigation'
setup(
name=package_name,
version='0.0.0',
packages=[package_name],
data_files=[
('share/ament_index/resource_index/packages',
['resource/' + package_name]),
('share/' + package_name, ['package.xml']),
],
install_requires=['setuptools'],
zip_safe=True,
maintainer='omron',
maintainer_email='<EMAIL>',
description='TODO: Package description',
license='TODO: License declaration',
tests_require=['pytest'],
entry_points={
'console_scripts': [
'action_server = om_aiv_navigation.action_server:main',
'goto_goal = om_aiv_navigation.goto_goal:main',
'goto_goal_demo = om_aiv_navigation.goto_goal_demo:main',
'dock = om_aiv_navigation.dock:main',
'goto_point = om_aiv_navigation.goto_point:main',
'localize_at_point = om_aiv_navigation.localize_at_point:main'
],
},
)
| 1.296875 | 1 |
feat/mesh.py | basic-ph/feat | 2 | 12760392 | <gh_stars>1-10
import logging
import math
import sys
import numpy as np
import pygmsh
logger = logging.getLogger(__name__)
def center_in_box(x, y, vertex, side):
return (
vertex[0] < x < vertex[0] + side
) and (
vertex[1] < y < vertex[1] + side
)
def circle_insersect_side(x, y, radius, x1, y1, x2, y2):
"""Weisstein, <NAME>. "Circle-Line Intersection."
From MathWorld--A Wolfram Web Resource.
https://mathworld.wolfram.com/Circle-LineIntersection.html
"""
X1 = x1 - x; Y1 = y1 - y
X2 = x2 - x; Y2 = y2 - y
dx = X2-X1; dy = Y2-Y1
dr = math.sqrt(dx**2 + dy**2)
D = X1*Y2 - X2*Y1
delta = radius**2 * dr**2 - D**2
if delta > 0:
# first intersection point
Xa = (D*dy + math.copysign(dx,dy)*math.sqrt(delta)) / (dr**2)
Ya = (-D*dx + abs(dy)*math.sqrt(delta)) / (dr**2)
# second intersection point
Xb = (D*dy - math.copysign(dx,dy)*math.sqrt(delta)) / (dr**2)
Yb = (-D*dx - abs(dy)*math.sqrt(delta)) / (dr**2)
tol = 1e-9
Xa_collide = (min(X1,X2)-tol <= Xa <= max(X1,X2)+tol)
Ya_collide = (min(Y1,Y2)-tol <= Ya <= max(Y1,Y2)+tol)
Xb_collide = (min(X1,X2)-tol <= Xb <= max(X1,X2)+tol)
Yb_collide = (min(Y1,Y2)-tol <= Yb <= max(Y1,Y2)+tol)
return (Xa_collide and Ya_collide) or (Xb_collide and Yb_collide)
else: # delta <= 0 are considered not colliding
return False
def circle_intersect_box(x, y, radius, vertex, side):
# 1st check center of circle is inside the box?
check1 = center_in_box(x, y, vertex, side)
# logger.debug("check1: %s", check1)
if check1:
return True
# 2nd check: the circle and each side of the box have intersection?
x1 = vertex[0]; y1 = vertex[1]; x2 = vertex[0]+side; y2 = vertex[1]
side1 = circle_insersect_side(x, y, radius, x1, y1, x2, y2)
# logger.debug("side1 intersect: %s", side1)
x1 = vertex[0]+side; y1 = vertex[1]; x2 = vertex[0]+side; y2 = vertex[1]+side
side2 = circle_insersect_side(x, y, radius, x1, y1, x2, y2)
# logger.debug("side2 intersect: %s", side2)
x1 = vertex[0]+side; y1 = vertex[1]+side; x2 = vertex[0]; y2 = vertex[1]+side
side3 = circle_insersect_side(x, y, radius, x1, y1, x2, y2)
# logger.debug("side3 intersect: %s", side3)
x1 = vertex[0]; y1 = vertex[1]+side; x2 = vertex[0]; y2 = vertex[1]
side4 = circle_insersect_side(x, y, radius, x1, y1, x2, y2)
# logger.debug("side4 intersect: %s", side4)
check2 = (side1 or side2 or side3 or side4) # circle intersect one of ths sides?
# logger.debug("check2: %s", check2)
return (check1 or check2)
def get_fiber_centers(rand_gen, number, side, min_distance, offset, max_iter, centers):
get_dist = lambda x_0, y_0, x_1, y_1: math.sqrt((x_0 - x_1)**2 + (y_0 - y_1)**2)
i = 0 # = 0 | counter for array indexing
k = 0 # iterations counter
while k < max_iter:
k += 1
valid = True
x = offset + (side - 2*offset)* rand_gen.random()
y = offset + (side - 2*offset)* rand_gen.random()
# check superposition with other fibers
if centers: # skip if center = []
for j in range(len(centers)):
distance = get_dist(x, y, centers[j][0], centers[j][1])
if distance > min_distance:
valid = True
else:
valid = False
break # exit the loop when the first intersection is found
if valid: # if no intersection is found center coordinates are added to arrays
i += 1
centers.append([x, y, 0.0])
if i == number:
break
if i < (number):
logger.warning("Fiber centers not found!!! exit...")
sys.exit()
return centers
def filter_centers(centers, radius, vertex, side): # TODO use closure for this
filtered = []
for c in centers:
check = circle_intersect_box(c[0], c[1], radius, vertex, side)
if check:
filtered.append(c) # this is valid 'cause centers have to be uniques
return filtered
def create_mesh(geo_path, msh_path, radius, vertex, side, centers, coarse_cl, fine_cl):
geom = pygmsh.opencascade.Geometry()
disks = []
for i in range(len(centers)):
fiber = geom.add_disk(centers[i], radius)
disks.append(fiber)
disk_tags = ", ".join([d.id for d in disks])
rectangle = geom.add_rectangle(vertex, side, side)
geom.add_raw_code(
f"BooleanIntersection{{ Surface{{{disk_tags}}}; Delete; }} "
f"{{ Surface{{{rectangle.id}}}; }}"
)
geom.add_raw_code(
f"t[] = BooleanDifference{{ Surface{{{rectangle.id}}}; Delete; }} " # saving t[] list for fixing fragment problem
f"{{ Surface{{{disk_tags}}}; }};"
)
e = 0.01
geom.add_raw_code(
f"p() = Point In BoundingBox"
f"{{{vertex[0]-e}, {vertex[1]-e}, {vertex[2]-e}, {vertex[0]+e}, {vertex[1]+e}, {vertex[2]+e}}};" # bottom left corner
)
geom.add_raw_code(
f"q() = Curve In BoundingBox"
f"{{{vertex[0]-e}, {vertex[1]-e}, {vertex[2]-e}, {vertex[0]+e}, {vertex[1]+side+e}, {vertex[2]+e}}};" # left side
)
geom.add_raw_code(
f"r() = Curve In BoundingBox"
f"{{{vertex[0]+side-e}, {vertex[1]-e}, {vertex[2]-e}, {vertex[0]+side+e}, {vertex[1]+side+e}, {vertex[2]+e}}};" # right side
)
geom.add_raw_code(
f"boundary[] = Boundary{{ Surface{{{disk_tags}}}; }};" # identify boundaries of fibers for mesh refinement
)
geom.add_raw_code("Field[1] = Distance;")
geom.add_raw_code("Field[1].NNodesByEdge = 100;")
geom.add_raw_code(f"Field[1].EdgesList = {{boundary[]}};")
geom.add_raw_code(
"Field[2] = Threshold;\n"
"Field[2].IField = 1;\n"
f"Field[2].LcMin = {fine_cl};\n"
f"Field[2].LcMax = {coarse_cl};\n"
"Field[2].DistMin = 0.01;\n"
f"Field[2].DistMax = {radius};\n"
"Background Field = 2;"
)
geom.add_raw_code(
"Mesh.CharacteristicLengthExtendFromBoundary = 0;\n"
"Mesh.CharacteristicLengthFromPoints = 0;\n"
"Mesh.CharacteristicLengthFromCurvature = 0;"
)
geom.add_raw_code(
f"Physical Surface(\"matrix\") = {{t[]}};\n"
f"Physical Surface(\"fiber\") = {{{disk_tags}}};\n"
f"Physical Point(\"bottom left corner\") = {{p()}};\n"
f"Physical Curve(\"left side\") = {{q()}};\n"
f"Physical Curve(\"right side\") = {{r()}};\n"
)
mesh = pygmsh.generate_mesh(
geom,
# geo_filename=str(geo_path), # uncomment this for saving geo and msh
# msh_filename=str(msh_path),
verbose=False,
dim=2,
)
return mesh
| 3.25 | 3 |
FeatureProject/cut_td_idf.py | liruifeng-01/nlp_xiaojiang | 1,379 | 12760393 | # -*- coding: UTF-8 -*-
# !/usr/bin/python
# @time :2019/4/1 10:35
# @author :Mo
# @function :cut sentences
from conf.path_config import chicken_and_gossip_path, td_idf_cut_path, td_idf_cut_pinyin
from utils.text_tools import txtWrite, txtRead, get_syboml, strQ2B
from conf.path_config import projectdir
from gensim import corpora, models
import xpinyin
import pickle
import jieba
def cut_td_idf(sources_path, target_path):
"""
结巴切词,汉语
:param path:
:return:
"""
print("cut_td_idf start! ")
corpus = txtRead(sources_path)
governments = []
for corpus_one in corpus:
corpus_one_clear = corpus_one.replace(' ', '').strip()
ques_q2b = strQ2B(corpus_one_clear.strip())
ques_q2b_syboml = get_syboml(ques_q2b)
governments.append(ques_q2b_syboml.strip())
government_ques = list(map(lambda x: ' '.join(jieba.lcut(x)), governments))
topic_ques_all = []
for topic_ques_one in government_ques:
top_ques_aqlq = topic_ques_one.replace(' ', ' ').replace(' ', ' ').strip() + '\n'
topic_ques_all.append(top_ques_aqlq)
txtWrite(topic_ques_all, target_path)
print("cut_td_idf ok! " + sources_path)
def cut_td_idf_pinyin(sources_path, target_path): #获取拼音
"""
汉语转拼音
:param path:
:return:
"""
pin = xpinyin.Pinyin()
corpus = txtRead(sources_path)
topic_ques_all = []
corpus_count = 0
for corpus_one in corpus:
corpus_count += 1
# time1 = time.time()
corpus_one_clear = corpus_one.replace(' ', '').strip()
ques_q2b = strQ2B(corpus_one_clear.strip())
ques_q2b_syboml = get_syboml(ques_q2b)
ques_q2b_syboml_pinying = pin.get_pinyin(ques_q2b_syboml.replace(' ', '').replace(' ', '').strip(), ' ')
topic_ques_all.append(ques_q2b_syboml_pinying + '\n')
# time2 = time.time()
# print(str(corpus_count) + 'time:' + str(time2 - time1))
txtWrite(topic_ques_all, target_path)
print("cut_td_idf_pinyin ok! " + sources_path)
def init_tfidf_chinese_or_pinyin(sources_path):
"""
构建td_idf
:param path:
:return:
"""
questions = txtRead(sources_path)
corpora_documents = []
for item_text in questions:
item_seg = list(jieba.cut(str(item_text).strip()))
corpora_documents.append(item_seg)
dictionary = corpora.Dictionary(corpora_documents)
corpus = [dictionary.doc2bow(text) for text in corpora_documents]
tfidf_model = models.TfidfModel(corpus)
print("init_tfidf_chinese_or_pinyin ok! " + sources_path)
file = open(sources_path.replace(".csv", "_dictionary_model.pkl"), 'wb')
pickle.dump([dictionary, tfidf_model], file)
if __name__ == '__main__':
# path_text = projectdir + '/Data/chicken_gossip.txt'
# sentences = txtRead(path_text)
# sentences_q = []
# for sentences_one in sentences:
# sentences_one_replace = sentences_one.replace(" ", "").replace("\t", "")
# sentences_one_replace_split = sentences_one_replace.split("|")
# sentence_new = sentences_one_replace_split[0] + "\t" + "".join(sentences_one_replace_split[1:])
# sentences_q.append(sentence_new)
# sentences = txtWrite(sentences_q, projectdir + '/Data/chicken_and_gossip.txt')
cut_td_idf(chicken_and_gossip_path, td_idf_cut_path)
cut_td_idf_pinyin(chicken_and_gossip_path, td_idf_cut_pinyin)
init_tfidf_chinese_or_pinyin(td_idf_cut_path)
init_tfidf_chinese_or_pinyin(td_idf_cut_pinyin)
print("corpus ok!")
| 2.515625 | 3 |
mocodo/dynamic.py | JeanHenri79/mocodo | 158 | 12760394 | #!/usr/bin/env python
# encoding: utf-8
class Dynamic(str):
"""Wrapper for the strings that need to be dynamically interpreted by the generated Python files."""
pass | 1.898438 | 2 |
src/view.py | hilyafadhilah/my-convex-hull | 0 | 12760395 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""View module for Find Convex Hull program
Notes
-----
Comments are omitted because code is self-explanatory.
"""
colors = [
'aquamarine',
'azure',
'blue',
'brown',
'chartreuse',
'chocolate',
'coral',
'crimson',
'cyan',
'darkblue',
'darkgreen',
'fuchsia',
'gold',
'green',
'grey',
'indigo',
'khaki',
'lavender',
'lightblue',
'lightgreen',
'lime',
'magenta',
'maroon',
'navy',
'olive',
'orange',
'orangered',
'orchid',
'pink',
'plum',
'purple',
'red',
'salmon',
'sienna',
'tan',
'teal',
'tomato',
'turquoise',
'violet',
'wheat',
'yellow',
'yellowgreen',
]
colors = list(map(lambda x: f"xkcd:{x}", colors))
def displayHeader(title: str) -> None:
n = (60 - len(title) - 4) // 2
if n > 0:
header = f"\n {'=' * n} {title} {'=' * n}\n"
else:
header = f"\n {title}\n"
print(header)
def displayList(lst: list, key = None) -> None:
maxSpace = len(str(len(lst)))
for i in range(len(lst)):
x = lst[i][key] if key else lst[i]
spacing = maxSpace - len(str(i + 1)) + 4
print(f"{' ' * spacing}{i + 1}. {x}")
print('')
def toTitle(label: str) -> str:
return label.replace('_', ' ').title()
def inputInt(prompt: str, minval: int = None, maxval: int = None, exclude: list = None) -> int:
while True:
try:
inp = int(input(prompt + " "))
if (minval is not None and inp < minval) or (maxval is not None and inp > maxval):
print(f"Number must be between {minval} and {maxval}.")
raise Exception
if exclude is not None and inp in exclude:
print(f"Number cannot be {inp}.")
raise Exception
return inp
except ValueError:
print("Invalid number.")
except Exception:
pass
| 3.71875 | 4 |
stubs.min/System/Windows/Controls/__init___parts/DataGridSelectionUnit.py | ricardyn/ironpython-stubs | 1 | 12760396 | <filename>stubs.min/System/Windows/Controls/__init___parts/DataGridSelectionUnit.py
class DataGridSelectionUnit(Enum,IComparable,IFormattable,IConvertible):
"""
Defines constants that specify whether cells,rows,or both,are used for selection in a System.Windows.Controls.DataGrid control.
enum DataGridSelectionUnit,values: Cell (0),CellOrRowHeader (2),FullRow (1)
"""
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self,*args):
""" __format__(formattable: IFormattable,format: str) -> str """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __ne__(self,*args):
pass
def __reduce_ex__(self,*args):
pass
def __str__(self,*args):
pass
Cell=None
CellOrRowHeader=None
FullRow=None
value__=None
| 1.882813 | 2 |
wealthbot/ria/forms/inviteProspect.py | jliev/wealthbot_chatterbot | 1 | 12760397 | from django import forms
from user.models import User
# Skip implementing InviteProspectForm at this moment
class InviteProspectForm(forms.ModelForm):
type = forms.CharField(max_length=255, required=True,
widget=forms.TextInput(
attrs={
'placeholder': '<NAME>',
'class': 'width-150 form-control',
}
)
)
class Meta:
model = User
fields = (
'first_name',
'last_name',
'email',
'groups',
'type',
)
ria = None
def __init__(self, *args, **kwargs):
self.ria = kwargs.pop('ria')
super(InviteProspectForm, self).__init__(*args, **kwargs)
# Get the corresponding RIA
riaId = self.ria.pk
| 2.234375 | 2 |
Leetcode/0905. Sort Array By Parity/0905.py | Next-Gen-UI/Code-Dynamics | 0 | 12760398 | class Solution:
def sortArrayByParity(self, A: List[int]) -> List[int]:
l = 0
r = len(A) - 1
while l < r:
if A[l] % 2 == 1 and A[r] % 2 == 0:
A[l], A[r] = A[r], A[l]
if A[l] % 2 == 0:
l += 1
if A[r] % 2 == 1:
r -= 1
return A
| 3.3125 | 3 |
expresso/expression.py | TheLartians/Symbols | 1 | 12760399 | import _expresso as core
from _expresso import associative,left_associative,right_associative,non_associative,commutative,non_commutative,postfix,prefix
class Expression(core.Expression):
def __init__(self,expr,S):
if isinstance(expr,(core.Expression,Expression)):
super(Expression,self).__init__(expr)
else:
super(Expression,self).__init__(S(expr))
self.S = S
@property
def function(self):
if not hasattr(self,'_function'):
f = super(Expression,self).function()
if f is not None:
F = Function(f,S = self.S)
else:
F = None
return F
@property
def is_function(self):
return super(Expression,self).is_function()
@property
def is_symbol(self):
return super(Expression,self).is_symbol()
@property
def is_atomic(self):
return super(Expression,self).is_atomic()
@property
def is_wildcard_symbol(self):
return super(Expression,self).is_wildcard_symbol()
@property
def is_wildcard_function(self):
return super(Expression,self).is_wildcard_function()
@property
def is_wildcard(self):
if self.is_function:
return self.is_wildcard_function
return self.is_wildcard_symbol
@property
def value(self):
return self.get_value()
@property
def args(self):
args = self.get_arguments()
if args == None:
return []
return [self.S(arg) for arg in args]
@property
def name(self):
if self.is_function:
return self.function.name
return super(Expression,self).__repr__()
def get(self,type):
v = self.value
if isinstance(v,type):
return v
return None
def replace(self,*args):
if len(args) == 1:
if isinstance(args[0],ReplacementMap):
rep = args[0]._replacement_map
else:
rep = ReplacementMap(args[0],self.S)._replacement_map
elif isinstance(args[0],(list,tuple)):
if len(args) == 1 and isinstance(args[0][0],(list,tuple)):
rep = ReplacementMap(args[0],self.S)._replacement_map
else:
rep = ReplacementMap(args,self.S)._replacement_map
elif len(args) == 2:
rep = core.replacement_map()
rep[self.S(args[0])] = self.S(args[1])
else:
raise ValueError('invalid substitution arguments')
#from evaluator import ReplaceEvaluator
#return ReplaceEvaluator(rep,S=self.S)(self)
return self.S(core.replace(self,rep))
def match(self,search):
for expr in core.commutative_permutations(search):
res = core.match(self,expr)
if res:
return ReplacementMap(core.match(self,expr),self.S)
return None
def WrappedType(T,**parameters):
class Wrapped(T):
@staticmethod
def _get_wrapped_parameter(name):
return parameters.get(name)
def __init__(self,*args,**kwargs):
kwargs.update(parameters)
super(Wrapped,self).__init__(*args,**kwargs)
Wrapped.__name__ = "Wrapped" + T.__name__
return Wrapped
WrappedExpression = lambda S:WrappedType(Expression,S=S)
class Function(object):
def __init__(self,F,argc = None,S = None):
if S == None:
raise ValueError('no parameter S provided')
self.S = S
if argc is not None:
if isinstance(argc,int):
argc = [argc]
self.argc = argc
self._function = F
def __call__(self,*args):
if hasattr(self,'argc') and len(args) not in self.argc:
raise ValueError("%s takes %s arguments" % (str(self),' or '.join([str(v) for v in self.argc])))
return self.S(core.create_call(self._function,[self.S(arg) for arg in args]))
def __repr__(self):
return self.name
@property
def name(self):
return self._function.get_name()
@property
def symbol(self):
return self._function.get_symbol()
@property
def is_commutative(self):
return self._function.is_commutative()
@property
def is_associative(self):
return self._function.is_associative()
@property
def is_prefix(self):
return self._function.is_prefix()
@property
def is_postfix(self):
return self._function.is_postfix()
@property
def is_operator(self):
return isinstance(self._function,core.Operator)
@property
def is_unary_operator(self):
return isinstance(self._function,core.UnaryOperator)
@property
def is_binary_operator(self):
return isinstance(self._function,core.BinaryOperator)
@property
def precedence(self):
return self._function.get_precedence()
def __hash__(self):
return self.name.__hash__()
def __eq__(self,other):
if not isinstance(other,Function):
return False
return self.name == other.name
def __ne__(self,other):
return not self == other
def WrappedFunction(F,S,argc = None):
if argc == None:
class WrappedFunction(Function):
def __init__(self,*args,**kwargs):
argc = kwargs.pop('argc',None)
super(WrappedFunction,self).__init__(F(*args,**kwargs),argc,S)
else:
class WrappedFunction(Function):
def __init__(self,*args,**kwargs):
super(WrappedFunction,self).__init__(F(*args,**kwargs),argc,S)
WrappedFunction.__name__ = "Wrapped" + F.__name__
return WrappedFunction
class ReplacementMap(object):
def __init__(self,rep = None,S = None):
if isinstance(rep,core.replacement_map):
self._replacement_map = rep
elif rep != None:
self._replacement_map = core.replacement_map()
for key, value in dict(rep).iteritems():
self._replacement_map[S(key)] = S(value)
else:
self._replacement_map = core.replacement_map()
self.S = S
def __str__(self):
return str(dict(self))
def __iter__(self):
def generator():
for v in self._replacement_map:
yield (self.S(v.key()),self.S(v.data()))
return generator()
def __getitem__(self,key):
return self.S(self._replacement_map[self.S(key)])
def __setitem__(self,key,value):
self._replacement_map[self.S(key)] = self.S(value)
WrappedReplacementMap = lambda S:WrappedType(ReplacementMap,S=S)
class Group(object):
def __init__(self,operation,inverse = None,neutral = None,S = None):
if isinstance(operation,core.Group):
self._group = operation
else:
self._group = core.Group(operation._function,inverse._function,S(neutral))
self.S = S
@property
def operation(self):
return Function(self._group.get_operation(),self.S)
@property
def inverse(self):
return Function(self._group.get_inverse(),self.S)
@property
def neutral(self):
return self.S(self._group.neutral)
def __repr__(self):
return "Group" + str((self.operation,self.inverse,self.neutral))
WrappedGroup = lambda S:WrappedType(Group,S=S)
class Field(object):
def __init__(self,additive_group,multiplicative_group,S):
self._field = core.Field(additive_group._group,multiplicative_group._group)
self.S = S
@property
def additive_group(self):
return Group(self._field.additive_group,self.S)
@property
def multiplicative_group(self):
return Group(self._field.multiplicative_group,self.S)
WrappedField = lambda S:WrappedType(Field,S=S)
class MulplicityList(object):
def __init__(self,arg,operation_group = None,mulplicity_function = None,field = None,S = None):
if S == None:
raise ValueError("Parameter S undefined")
if arg == None:
self._mlist = core.MulplicityList(operation_group._group,mulplicity_function._function,field._field)
elif isinstance(arg,core.MulplicityList):
self._mlist = arg
else:
self._mlist = core.MulplicityList(S(arg),operation_group._group,mulplicity_function._function,field._field)
self.S = S
def __len__(self):
return len(self._mlist)
def __iter__(self):
def generator():
for v in self._mlist:
yield (self.S(v.value),self.S(v.mulplicity))
return generator()
def __getitem__(self,index):
if index >= len(self._mlist):
raise ValueError("index out of range")
v = self._mlist[index]
return (self.S(v.value),self.S(v.mulplicity))
def __repr__(self):
return str(list(self))
def as_expression(self):
return self.S(self._mlist.as_expression())
def _wrap(self,mlist):
return MulplicityList(mlist,S = self.S)
def intersection(self,other,get_inner = None):
if get_inner:
return self._wrap(self._mlist.intersection(other._mlist,lambda a,b:get_inner(self.S(a),self.S(b))))
return self._wrap(self._mlist.intersection(other._mlist))
def sub(self,other):
return self._wrap(self._mlist.difference(other._mlist))
def sum(self,other):
return self._wrap(self._mlist.sum(other._mlist))
def pow(self,expr):
return self._wrap(self._mlist.power(self.S(expr)))
def __sub__(self,other):
return self.sub(other)
def __add__(self,other):
return self.sum(other)
def __pow__(self,other):
return self.pow(other)
WrappedMulplicityList = lambda S:WrappedType(MulplicityList,S=S)
def wrapped_postorder_traversal(S):
def postorder_traversal(expr):
for expr in core.postorder_traversal(expr):
yield S(expr)
return postorder_traversal
def wrapped_preorder_traversal(S):
def preorder_traversal(expr):
for expr in core.preorder_traversal(expr):
yield S(expr)
return preorder_traversal
def wrapped_commutative_permutations(S):
def commutative_permutations(expr):
for expr in core.commutative_permutations(expr):
yield S(expr)
return commutative_permutations
| 2.984375 | 3 |
interactions/ext/enhanced/subcommands.py | Toricane/interactions-better-components | 0 | 12760400 | <reponame>Toricane/interactions-better-components
"""
subcommands
Content:
* subcommand_base: base for subcommands
* ext_subcommand_base: base for subcommands in extensions
GitHub: https://github.com/interactions-py/enhanced/blob/main/interactions/ext/enhanced/subcommands.py
(c) 2022 interactions-py.
"""
from inspect import getdoc, signature
from typing import Any, Callable, Coroutine, Dict, List, Optional, Union
from interactions.client.decor import command
from typing_extensions import _AnnotatedAlias
from interactions import (
MISSING,
ApplicationCommand,
ApplicationCommandType,
Client,
Extension,
Guild,
InteractionException,
Option,
OptionType,
)
from ._logging import get_logger
from .command_models import EnhancedOption, parameters_to_options
log = get_logger("subcommand")
class Subcommand:
"""
A class that represents a subcommand.
DO NOT INITIALIZE THIS CLASS DIRECTLY.
Parameters:
* `name: str`: The name of the subcommand.
* `description: str`: The description of the subcommand.
* `coro: Coroutine`: The coroutine to run when the subcommand is called.
* `options: dict`: The options of the subcommand.
Attributes other than above:
* `_options: Option`: The subcommand as an `Option`.
"""
def __init__(
self,
name: str,
description: str,
coro: Coroutine,
options: List[Option] = MISSING,
):
log.debug(f"Subcommand.__init__: {name=}")
self.name: str = name
self.description: str = description
self.coro: Coroutine = coro
self.options: List[Option] = options
if options is MISSING:
self._options: Option = Option(
type=OptionType.SUB_COMMAND,
name=name,
description=description,
)
else:
self._options: Option = Option(
type=OptionType.SUB_COMMAND,
name=name,
description=description,
options=options,
)
class Group:
"""
A class that represents a subcommand group.
DO NOT INITIALIZE THIS CLASS DIRECTLY.
Parameters:
* `group: str`: The name of the subcommand group.
* `description: str`: The description of the subcommand group.
* `subcommand: Subcommand`: The initial subcommand in the group.
Properties:
* `_options: Option`: The subcommand group as an `Option`.
"""
def __init__(self, group: str, description: str, subcommand: Subcommand):
log.debug(f"Group.__init__: {group=}, {subcommand=}")
self.group: str = group
self.description: str = description
self.subcommands: List[Subcommand] = [subcommand]
@property
def _options(self) -> Option:
"""
Returns the subcommand group as an option.
The subcommands of the group are in the ``options=`` field of the option.
"""
return Option(
type=OptionType.SUB_COMMAND_GROUP,
name=self.group,
description=self.description,
options=[subcommand._options for subcommand in self.subcommands],
)
class GroupSetup:
"""
A class that allows a shortcut to creating a group subcommand in the original `SubcommandSetup`.
```py
base_var: SubcommandSetup = client.subcommand_base("base_name", ...)
group_var: GroupSetup = base_var.group("group_name")
group_var.subcommand(...)
async def group_subcommand(ctx, ...):
...
```
Parameters:
* `group: str`: The name of the subcommand group.
* `subcommand_setup: SubcommandSetup`: The `SubcommandSetup` to add the group subcommand to.
"""
def __init__(self, group: str, subcommand_setup: "SubcommandSetup"):
log.debug(f"GroupSetup.__init__: {group=}, {subcommand_setup=}")
self.group: str = group
self.subcommand_setup: "SubcommandSetup" = subcommand_setup
def subcommand(
self,
_coro: Optional[Coroutine] = MISSING,
*,
name: Optional[str] = MISSING,
description: Optional[str] = MISSING,
options: Optional[List[Option]] = MISSING,
) -> Callable[..., Any]:
"""
Creates a subcommand with the specified group and parameters.
```py
base_var: SubcommandSetup = client.subcommand_base("base_name", ...)
group_var: GroupSetup = base_var.group("group_name")
group_var.subcommand(...)
async def group_subcommand(ctx, ...):
...
```
Parameters:
* `?name: str`: The name of the subcommand.
* `?description: str`: The description of the subcommand.
* `?options: List[Option]`: The options of the subcommand.
"""
def decorator(coro):
self.subcommand_setup.subcommand(
group=self.group,
name=name,
description=description,
options=options,
)(coro)
return coro
if _coro is not MISSING:
return decorator(_coro)
return decorator
class SubcommandSetup:
"""
A class you get when using `base_var = client.subcommand_base("base_name", ...)`
Use this class to create subcommands by using the `@base_name.subcommand(...)` decorator.
Parameters:
* `(?)client: Client`: The client that the subcommand belongs to. *Not required if you load the extension.*
* `base: str`: The base name of the subcommand.
* `?description: str`: The description of the subcommand. Defaults to `"No description"`.
* `?scope: int | Guild | list[int] | list[Guild]`: The scope of the subcommand.
* `?debug_scope: bool`: Whether to use debug_scope for this command. Defaults to `True`.
"""
def __init__(
self,
client: Client,
base: str,
description: Optional[str] = "No description",
scope: Optional[Union[int, Guild, List[int], List[Guild]]] = MISSING,
debug_scope: Optional[bool] = True,
):
log.debug(f"SubcommandSetup.__init__: {base=}")
self.client: Client = client
self.base: str = base
self.description: str = description
self.scope: Union[int, Guild, List[int], List[Guild]] = (
getattr(client, "__debug_scope")
if scope is MISSING and hasattr(client, "__debug_scope") and debug_scope
else scope
)
self.groups: Dict[str, Group] = {}
self.subcommands: Dict[str, Subcommand] = {}
self.commands: List[ApplicationCommand] = MISSING
def group(self, group: str) -> GroupSetup:
"""
Function to get a `GroupSetup` object, a shortcut to creating group subcommands.
This is also in `ExternalSubcommandSetup`.
```py
base_var: SubcommandSetup = client.subcommand_base("base_name", ...)
group_var: GroupSetup = base_var.group("group_name")
```
Parameters:
* `group: str`: The name of the group.
Returns:
`GroupSetup`
"""
return GroupSetup(group=group, subcommand_setup=self)
def subcommand(
self,
_coro: Optional[Coroutine] = MISSING,
*,
group: Optional[str] = MISSING,
name: Optional[str] = MISSING,
description: Optional[str] = MISSING,
options: Optional[List[Option]] = MISSING,
) -> Callable[..., Any]:
"""
Decorator that creates a subcommand for the corresponding base.
`name` is required.
```py
@base_var.subcommand(
group="group_name",
name="subcommand_name",
description="subcommand_description",
options=[...]
)
```
Parameters:
* `?group: str`: The group of the subcommand.
* `name: str`: The name of the subcommand.
* `?description: str`: The description of the subcommand.
* `?options: list[Option]`: The options of the subcommand.
"""
log.debug(f"SubcommandSetup.subcommand: {self.base=}, {group=}, {name=}")
def decorator(coro: Coroutine) -> Coroutine:
_name = coro.__name__ if name is MISSING else name
_description = (
(getdoc(coro) or "No description") if description is MISSING else description
).split("\n")[0]
if len(_description) > 100:
raise ValueError("Description must be less than 100 characters.")
params = signature(coro).parameters
_options = (
getattr(coro, "__decor_options")
if hasattr(coro, "__decor_options")
else parameters_to_options(params)
if options is MISSING and len(params) > 1
else options
)
if not params:
raise InteractionException(
11,
message="Your command needs at least one argument to return context.",
)
if group is MISSING:
self.subcommands[_name] = Subcommand(_name, _description, coro, _options)
elif group not in self.groups:
self.groups[group] = Group(
group,
_description,
subcommand=Subcommand(_name, _description, coro, _options),
)
else:
self.groups[group].subcommands.append(
Subcommand(_name, _description, coro, _options)
)
return coro
if _coro is not MISSING:
return decorator(_coro)
return decorator
def finish(self) -> Callable[..., Any]:
"""
Function that finishes the setup of the base command.
Use this when you are done creating subcommands for a specified base.
```py
base_var.finish()
```
"""
log.debug(f"SubcommandSetup.finish: {self.base=}")
group_options = [group._options for group in self.groups.values()] if self.groups else []
subcommand_options = (
[subcommand._options for subcommand in self.subcommands.values()]
if self.subcommands
else []
)
options = (group_options + subcommand_options) or None
self.commands: List[ApplicationCommand] = command(
type=ApplicationCommandType.CHAT_INPUT,
name=self.base,
description=self.description,
scope=self.scope,
options=options,
)
async def inner(ctx, *args, sub_command_group=None, sub_command=None, **kwargs) -> None:
if sub_command_group:
group = self.groups[sub_command_group]
subcommand = next(
(sub for sub in group.subcommands if sub.name == sub_command), None
)
else:
subcommand = self.subcommands[sub_command]
return await subcommand.coro(ctx, *args, **kwargs)
inner._command_data = self.commands
self.client._Client__command_coroutines.append(inner)
# OLD:
# if self.client._automate_sync:
# if self.client._loop.is_running():
# [
# self.client._loop.create_task(self.client._synchronize(command))
# for command in self.commands
# ]
# else:
# [
# self.client._loop.run_until_complete(self.client._synchronize(command))
# for command in self.commands
# ]
# END OLD
if self.scope is not MISSING:
if isinstance(self.scope, list):
[self.client._scopes.add(_ if isinstance(_, int) else _.id) for _ in self.scope]
else:
self.client._scopes.add(
self.scope if isinstance(self.scope, int) else self.scope.id
)
return self.client.event(inner, name=f"command_{self.base}")
def autocomplete(self, option: str) -> Callable[..., Any]:
"""
Decorator for building autocomplete for options in the current base.
**IMPORTANT**: You must `base_var.finish()` before using this decorator.
Example:
```py
base = client.subcommand_base("base_name", ...)
@base.subcommand()
@option("auto", autocomplete=True)
async def subcommand(ctx, auto: str):
...
...
base.finish()
@base.autocomplete("auto")
async def auto_complete(ctx, user_input: str = ""):
await ctx.populate([
interactions.Choice(...),
interactions.Choice(...),
...
])
```
Parameters:
* `option: str`: The option to build autocomplete for.
"""
def decorator(coro: Coroutine) -> Callable[..., Any]:
if self.commands is MISSING:
raise RuntimeError(
"You must `base_var.finish()` the setup of the subcommands before providing autocomplete."
)
command: str = self.base
self.client._Client__name_autocomplete[command] = {"coro": coro, "name": option}
return coro
# OLD:
# _command_obj: ApplicationCommand = self.client._http.cache.interactions.get(command)
# if not _command_obj or not _command_obj.id:
# if getattr(_command_obj, "guild_id", None) or self.client._automate_sync:
# _application_commands: List[
# ApplicationCommand
# ] = self.client._loop.run_until_complete(
# self.client._http.get_application_commands(
# application_id=self.client.me.id,
# guild_id=_command_obj.guild_id
# if hasattr(_command_obj, "guild_id")
# else None,
# )
# )
# _command_obj: ApplicationCommand = self.client._find_command(
# _application_commands, command
# )
# else:
# for _scope in self.client._scopes:
# _application_commands: List[
# ApplicationCommand
# ] = self.client._loop.run_until_complete(
# self.client._http.get_application_commands(
# application_id=self.client.me.id, guild_id=_scope
# )
# )
# _command_obj: ApplicationCommand = self.client._find_command(
# _application_commands, command
# )
# _command: Union[Snowflake, int] = int(_command_obj.id)
# return self.client.event(coro, name=f"autocomplete_{_command}_{option}")
# END OLD
return decorator
class ExternalSubcommandSetup(SubcommandSetup):
"""
A class you get when using `base_var = extension_base("base_name", ...)`
Use this class to create subcommands by using the `@base_name.subcommand(...)` decorator.
Parameters:
* `base: str`: The base name of the subcommand.
* `?description: str`: The description of the subcommand.
* `?scope: int | Guild | list[int] | list[Guild]`: The scope of the subcommand.
"""
def __init__(
self,
base: str,
description: Optional[str] = "No description",
scope: Optional[Union[int, Guild, List[int], List[Guild]]] = MISSING,
):
log.debug(f"ExternalSubcommandSetup.__init__: {base=}")
super().__init__(
client=None,
base=base,
description=description,
scope=scope,
)
self.raw_commands = None
self.full_command = None
self.__self = None
self._autocomplete_options: Dict[str, Callable] = {}
def subcommand(
self,
_coro: Optional[Coroutine] = MISSING,
*,
group: Optional[str] = MISSING,
name: Optional[str] = MISSING,
description: Optional[str] = MISSING,
options: Optional[List[Option]] = MISSING,
) -> Callable[..., Any]:
"""
Decorator that creates a subcommand for the corresponding base.
`name` is required.
```py
@base_var.subcommand(
group="group_name",
name="subcommand_name",
description="subcommand_description",
options=[...]
)
```
Parameters:
* `?group: str`: The group of the subcommand.
* `name: str`: The name of the subcommand.
* `?description: str`: The description of the subcommand.
* `?options: list[Option]`: The options of the subcommand.
"""
log.debug(f"ExternalSubcommandSetup.subcommand: {self.base=}, {group=}, {name=}")
def decorator(coro: Coroutine) -> Coroutine:
coro.__subcommand__ = True
coro.__base__ = self.base
coro.__data__ = self
_name = coro.__name__ if name is MISSING else name
_description = (
(getdoc(coro) or "No description") if description is MISSING else description
).split("\n")[0]
if len(_description) > 100:
raise ValueError("Description must be less than 100 characters.")
params = signature(coro).parameters
_options = (
getattr(coro, "__decor_options")
if hasattr(coro, "__decor_options")
else parameters_to_options(params)
if options is MISSING
and len(params) > 1
and any(
isinstance(param.annotation, (EnhancedOption, _AnnotatedAlias))
for _, param in params.items()
)
else options
)
if not params:
raise InteractionException(
11,
message="Your command needs at least one argument to return context.",
)
if group is MISSING:
self.subcommands[_name] = Subcommand(_name, _description, coro, _options)
elif group not in self.groups:
self.groups[group] = Group(
group,
description,
subcommand=Subcommand(_name, _description, coro, _options),
)
else:
self.groups[group].subcommands.append(
Subcommand(_name, _description, coro, _options)
)
return coro
if _coro is not MISSING:
return decorator(_coro)
return decorator
def finish(self) -> Callable[..., Any]:
"""
Function that finishes the setup of the base command.
Use this when you are done creating subcommands for a specified base.
```py
base_var.finish()
```
"""
log.debug(f"ExternalSubcommandSetup.finish: {self.base=}")
group_options = [group._options for group in self.groups.values()] if self.groups else []
subcommand_options = (
[subcommand._options for subcommand in self.subcommands.values()]
if self.subcommands
else []
)
options = (group_options + subcommand_options) or MISSING
self.commands: List[ApplicationCommand] = command(
type=ApplicationCommandType.CHAT_INPUT,
name=self.base,
description=self.description,
scope=self.scope,
options=options,
)
self.raw_commands = self.commands
def autocomplete(self, option: str) -> Callable[..., Any]:
"""
Decorator for building autocomplete for options in the current base.
**IMPORTANT**: You must `base_var.finish()` before using this decorator.
Example:
```py
base = client.subcommand_base("base_name", ...)
@base.subcommand()
@option("auto", autocomplete=True)
async def subcommand(ctx, auto: str):
...
...
base.finish()
@base.autocomplete("auto")
async def auto_complete(ctx, user_input: str = ""):
await ctx.populate([
interactions.Choice(...),
interactions.Choice(...),
...
])
```
Parameters:
* `option: str`: The option to build autocomplete for.
"""
def decorator(coro: Coroutine) -> Callable[..., Any]:
if self.commands is MISSING:
raise RuntimeError(
"You must `base_var.finish()` the setup of the subcommands before providing autocomplete."
)
self._autocomplete_options[option] = coro
return coro
return decorator
def _super_autocomplete(self, client: Client):
self.client = client
if not self._autocomplete_options:
return
for option, coro in self._autocomplete_options.items():
async def new_coro(*args, **kwargs):
return await coro(self.__self, *args, **kwargs)
super().autocomplete(option)(new_coro)
async def inner(self, ctx, *args, sub_command_group=None, sub_command=None, **kwargs) -> None:
if sub_command_group:
group = self.groups[sub_command_group]
subcommand = next((sub for sub in group.subcommands if sub.name == sub_command), None)
else:
subcommand = self.subcommands[sub_command]
return await subcommand.coro(self.__self, ctx, *args, **kwargs)
def set_self(self, __self: Extension) -> None:
"""
Allows ability to access Extension attributes
:param Extension __self: The extension
"""
self.__self = __self
def subcommand_base(
self: Client,
base: str,
*,
description: Optional[str] = "No description",
scope: Optional[Union[int, Guild, List[int], List[Guild]]] = MISSING,
debug_scope: Optional[bool] = True,
) -> SubcommandSetup:
"""
Use this function to initialize a base for future subcommands.
Kwargs are optional.
To use this function without loading the extension, pass in the client as the first argument.
```py
base_name = client.subcommand_base(
"base_name",
description="Description of the base",
scope=123456789,
)
# or
from interactions.ext.enhanced import subcommand_base
base_name = subcommand_base(
client,
"base_name",
description="Description of the base",
scope=123456789,
)
```
Parameters:
* `(?)self: Client`: The client that the base belongs to. *Not needed if you load the extension and use `client.base(...)`.*
* `base: str`: The base name of the base.
* `?description: str`: The description of the base.
* `?scope: int | Guild | list[int] | list[Guild]`: The scope of the base.
* `?debug_scope: bool`: Whether to use debug_scope for this command. Defaults to `True`.
"""
log.debug(f"base: {base=}")
return SubcommandSetup(self, base, description, scope, debug_scope)
def ext_subcommand_base(
base: str,
*,
description: Optional[str] = "No description",
scope: Optional[Union[int, Guild, List[int], List[Guild]]] = MISSING,
) -> ExternalSubcommandSetup:
"""
Use this function to initialize a base for future subcommands inside extensions.
Kwargs are optional.
```py
base_name = ext_subcommand_base(
"base_name",
description="Description of the base",
scope=123456789,
)
```
Parameters:
* `base: str`: The base name of the base.
* `?description: str`: The description of the base.
* `?scope: int | Guild | list[int] | list[Guild]`: The scope of the base.
"""
log.debug(f"extension_base: {base=}")
return ExternalSubcommandSetup(base, description, scope)
| 2.09375 | 2 |
src/the_tale/the_tale/game/cards/tests/test_keeper_goods.py | Alacrate/the-tale | 85 | 12760401 | <reponame>Alacrate/the-tale<filename>src/the_tale/the_tale/game/cards/tests/test_keeper_goods.py<gh_stars>10-100
import smart_imports
smart_imports.all()
class KeepersGoodsMixin(helpers.CardsTestMixin):
CARD = None
def setUp(self):
super(KeepersGoodsMixin, self).setUp()
places_tt_services.effects.cmd_debug_clear_service()
self.place_1, self.place_2, self.place_3 = game_logic.create_test_map()
self.account_1 = self.accounts_factory.create_account()
self.account_2 = self.accounts_factory.create_account(is_fast=True)
self.storage = game_logic_storage.LogicStorage()
self.storage.load_account_data(self.account_1.id)
self.storage.load_account_data(self.account_2.id)
self.hero = self.storage.accounts_to_heroes[self.account_1.id]
self.card = self.CARD.effect.create_card(type=self.CARD, available_for_auction=True)
def test_use(self):
self.place_1.attrs.size = 10
self.place_1.refresh_attributes()
with self.check_almost_delta(lambda: round(self.place_1.attrs.production, 2), self.CARD.effect.modificator):
result, step, postsave_actions = self.CARD.effect.use(**self.use_attributes(hero=self.hero,
card=self.card,
value=self.place_1.id))
self.assertEqual((result, step, postsave_actions), (game_postponed_tasks.ComplexChangeTask.RESULT.SUCCESSED,
game_postponed_tasks.ComplexChangeTask.STEP.SUCCESS,
()))
def test_use_for_wrong_place_id(self):
with self.check_not_changed(lambda: self.place_1.attrs.production):
self.assertEqual(self.CARD.effect.use(**self.use_attributes(hero=self.hero, value=666, storage=self.storage)),
(game_postponed_tasks.ComplexChangeTask.RESULT.FAILED,
game_postponed_tasks.ComplexChangeTask.STEP.ERROR,
()))
class KeepersGoodsCommonTests(KeepersGoodsMixin, utils_testcase.TestCase):
CARD = types.CARD.KEEPERS_GOODS_COMMON
class KeepersGoodsUncommonTests(KeepersGoodsMixin, utils_testcase.TestCase):
CARD = types.CARD.KEEPERS_GOODS_UNCOMMON
class KeepersGoodsRareTests(KeepersGoodsMixin, utils_testcase.TestCase):
CARD = types.CARD.KEEPERS_GOODS_RARE
class KeepersGoodsEpicTests(KeepersGoodsMixin, utils_testcase.TestCase):
CARD = types.CARD.KEEPERS_GOODS_EPIC
class KeepersGoodsLegendaryTests(KeepersGoodsMixin, utils_testcase.TestCase):
CARD = types.CARD.KEEPERS_GOODS_LEGENDARY
| 2.03125 | 2 |
tests/test_repr.py | dolfinus/pexpect | 2,132 | 12760402 | """ Test __str__ methods. """
import pexpect
from . import PexpectTestCase
class TestCaseMisc(PexpectTestCase.PexpectTestCase):
def test_str_spawnu(self):
""" Exercise spawnu.__str__() """
# given,
p = pexpect.spawnu('cat')
# exercise,
value = str(p)
# verify
assert isinstance(value, str)
def test_str_spawn(self):
""" Exercise spawn.__str__() """
# given,
p = pexpect.spawn('cat')
# exercise,
value = str(p)
# verify
assert isinstance(value, str)
def test_str_before_spawn(self):
""" Exercise derived spawn.__str__() """
# given,
child = pexpect.spawn(None, None)
child.read_nonblocking = lambda size, timeout: b''
try:
child.expect('alpha', timeout=0.1)
except pexpect.TIMEOUT as e:
str(e) # Smoketest
else:
assert False, 'TIMEOUT exception expected. No exception raised.'
| 2.71875 | 3 |
function.py | enverbisevac/python-function-overloading | 0 | 12760403 | <reponame>enverbisevac/python-function-overloading
from inspect import getfullargspec
class Function(object):
"""Function is a wrap over standard python function.
"""
def __init__(self, fn, namespace):
self.fn = fn
self.namespace = namespace
def __call__(self, *args, **kwargs):
"""Overriding the __call__ function which makes the
instance callable.
"""
# fetching the function to be invoked from the virtual namespace
# through the arguments.
fn = self.namespace.get_instance().get(self.fn, *args)
if not fn:
raise Exception("no matching function found.")
# invoking the wrapped function and returning the value.
return fn(*args, **kwargs)
def key(self, args=None):
"""Returns the key that will uniquely identify
a function (even when it is overloaded).
"""
# if args not specified, extract the argumentsfrom the
# function definition
if args is None:
args = getfullargspec(self.fn).args
return tuple([
self.fn.__module__,
self.fn.__class__,
self.fn.__name__,
len(args or []),
]) | 3.453125 | 3 |
yaraChk.py | noelpat/YARA-rule-checking-script | 0 | 12760404 | <reponame>noelpat/YARA-rule-checking-script<gh_stars>0
# This is a short python script meant for testing YARA rules against a specified directory.
# You can get a dataset of malware to test YARA rules with from a website such as virusshare.com.
import subprocess
import os
import stat
import errno
import sys
from io import StringIO
import yara
def Yara_check(directory):
matchCnt = 0
totalCnt = 0
print("Scanning files in directory:", directory)
rules = yara.compile(filepath='Hajime.yara')
# Iterate through data/downloads directory
for file in os.listdir(directory):
filename = os.fsdecode(file)
# print("Value of file:", file)
realLocation = directory + filename
out = subprocess.Popen(['file', realLocation],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout,stderr = out.communicate()
if b'executable' in stdout: # if file is executable
totalCnt += 1
# Check file/match with YARA
matches = rules.match(realLocation, timeout=60)
# print("Value of matches:", matches)
if not matches:
print("Undetected sample found:", filename)
else:
matchCnt += 1
print("File matches YARA rule:", filename)
else:
print("Not a executable file:", filename)
print("Matches found:", matchCnt)
print("Total files checked:", totalCnt)
def main():
# Set target directory for testing the yara rule
directory = "/malwareSamples/"
Yara_check(directory) # check malware files with Yara
if __name__ == "__main__":
main()
| 2.953125 | 3 |
tests/test_models/test_jobs.py | PhilippeGalvan/jinete | 0 | 12760405 | <filename>tests/test_models/test_jobs.py
import unittest
from copy import deepcopy
import jinete as jit
from tests.utils import (
generate_trips,
generate_one_job,
)
class TestJob(unittest.TestCase):
def test_construction(self):
trips = generate_trips(3)
objective_cls = jit.HashCodeObjective
job = jit.Job(trips, objective_cls)
self.assertIsInstance(job, jit.Job)
self.assertEqual(job.trips, trips)
self.assertEqual(job.objective_cls, objective_cls)
def test_deepcopy(self):
job = generate_one_job(True)
copied_job = deepcopy(job)
self.assertEqual(job, copied_job)
if __name__ == '__main__':
unittest.main()
| 2.625 | 3 |
DSL/DSL_1_A.py | yu8ikmnbgt6y/MyAOJ | 1 | 12760406 | <gh_stars>1-10
import sys
import io
import time
import pprint
input_txt = """
8 18
0 0 1
0 2 3
0 4 5
0 7 6
0 6 5
1 1 2
0 5 4
1 3 6
0 7 4
0 7 4
1 6 7
0 0 1
0 1 0
1 7 3
0 2 6
1 3 4
0 0 4
1 1 2
"""
#sys.stdin = io.StringIO(input_txt); tmp = input()
sys.stdin = open("DSL_1_A_in32.txt")
#sys.stdout = open("out.dat","w")
start = time.time()
# copy the below part and paste to the submission form.
# ---------function------------
import sys
from typing import Dict, List
def find_group(groups: Dict[int, List], tgt: str) -> (bool, str):
for k, v in groups.items():
if tgt in v:
return k
return "-1"
def main():
n, q = map(int, input().split())
query_lines = sys.stdin.readlines()
queries = [query_lines[x].split() for x in range(q)]
answers = [None] * q
num_ans = 0
groups = {str(i): [str(i)] for i in range(n)}
groups_memo = {str(i): str(i) for i in range(n)}
for query in queries:
if query[0] == '0': # unite
q1 = query[1]
q2 = query[2]
g1 = find_group(groups, q1)
g2 = find_group(groups, q2)
if g1 == g2:
continue
assert g1 != -1 and g2 != -1
for item in groups[g2]:
groups_memo[item] = g1
groups[g1] = groups[g1] + groups[g2]
del groups[g2]
else: # same
if groups_memo[query[1]] == groups_memo[query[2]]:
answers[num_ans] = 1
else:
answers[num_ans] = 0
num_ans += 1
[print(answers[x]) for x in range(num_ans)]
return
main()
# -----------------------------
print("elapsed:", time.time()-start)
sys.stdin = sys.__stdin__ | 2.59375 | 3 |
corehq/motech/dhis2/utils.py | dimagilg/commcare-hq | 1 | 12760407 | from datetime import date, timedelta
from dateutil.relativedelta import relativedelta
from dimagi.utils.dates import DateSpan
from corehq.apps.userreports.models import ReportConfiguration
from corehq.apps.userreports.reports.data_source import (
ConfigurableReportDataSource,
)
from corehq.util.couch import get_document_or_not_found
def get_report_config(domain_name, ucr_id):
report_config = get_document_or_not_found(ReportConfiguration, domain_name, ucr_id)
return report_config
def get_date_filter(report_config):
"""
Returns the first date filter, or None.
Assumes the first date filter is the one to use.
.. NOTE: The user might not want to filter by date for DHIS2
integration. They can use a "period" column to return
rows for multiple periods, or set a period for the report
if it is always for the same period.
"""
date_filter = next((f for f in report_config.filters if f['type'] == 'date'), None)
return date_filter
def get_previous_month(send_date):
enddate = date(year=send_date.year, month=send_date.month, day=1) - timedelta(days=1)
startdate = date(year=enddate.year, month=enddate.month, day=1)
return DateSpan(startdate, enddate)
def get_previous_quarter(send_date):
current_quarter_start = (((send_date.month - 1) // 3) * 3) + 1
startdate = date(year=send_date.year, month=current_quarter_start, day=1) - relativedelta(months=3)
enddate = date(year=send_date.year, month=current_quarter_start, day=1) + relativedelta(months=4) - \
timedelta(days=1) - relativedelta(months=3)
return DateSpan(startdate, enddate)
def get_date_params(slug, date_span):
"""
Mimics date filter request parameters
"""
startdate = date_span.startdate.strftime('%Y-%m-%d')
enddate = date_span.enddate.strftime('%Y-%m-%d')
return {
slug: "{}+to+{}".format(startdate, enddate),
slug + '-start': startdate,
slug + '-end': enddate,
}
def get_ucr_data(report_config, date_filter, date_span):
from corehq.apps.userreports.reports.view import get_filter_values
data_source = ConfigurableReportDataSource.from_spec(report_config, include_prefilters=True)
filter_params = get_date_params(date_filter['slug'], date_span) if date_filter else {}
filter_values = get_filter_values(report_config.ui_filters, filter_params)
data_source.set_filter_values(filter_values)
return data_source.get_data()
| 2.234375 | 2 |
d042.py | wgalimberti/Python-Exercises | 0 | 12760408 | peso = float(input('Peso em Kilogramas: '))
altura = float(input('Altura em Centimetros: '))
imc = peso / ((altura / 100) ** 2)
print('O IMC esta em {:.2f}'.format(imc))
if imc < 18.5:
print('Abaixo do Peso!')
elif imc <= 25:
print('Peso Ideal')
elif imc <= 30:
print('Sobrepeso')
elif imc <= 40:
print('Obesidade')
elif imc > 40:
print('Obesidade Morbida')
| 3.8125 | 4 |
excise/study1.py | pingao2019/DS-Unit-3-Sprint-2-SQL-and-Databases | 0 | 12760409 | import sqlite3
conn = sqlite3. connect('study_part1.sqlite3')
cur = conn.cursor()
create table study1(student VARCHAR(20). )
import sqlite3
connection = sqlite3.connect('study_part1.sqlite3')
curs= connection.cursor()
table_1 = '''
CREATE TABLE table_1(
student VARCHAR(20),
studied VARCHAR(20),
grade INT,
age INT,
sex VARCHAR(20)
);
'''
curs.execute(table_1)
insert_1 = '''
INSERT INTO table_1
(student, studied, grade, age, sex)
VALUES
('Lion-O', 'True', 85, 24, 'Male'),
('Cheetara', 'True', 95, 22, 'Female'),
('Mumm-Ra', 'False', 65, 153, 'Male'),
('Snarf', 'False', 70, 15, 'Male'),
('Panthro', 'True', 80, 30, 'Male');
'''
curs.execute(insert_1)
connection.commit()
query_age= '''SELECT AVG(age)
FROM table_1;'''
print('avarage age', curs.excute(query_age).fetchone())
query_name = '''SELECT student
FROM table_1
where sex = 'Female';'''
print("name of female:", curs.excute(query_name).fetchall())
query_studied = '''SELECT count(studied)
FROM table_1
where studied = 'Ture';'''
print("There are :", curs.excute(query_studied).fetchone())
query_order = '''SELECT *
FROM table_1
order by student;'''
print(curs.excute(query_order.fetchone())
| 3.59375 | 4 |
jobboard/views.py | yaelerdr/SJMaster | 0 | 12760410 | <filename>jobboard/views.py
from django.shortcuts import render
def board(request):
return render(request, 'jobboard/board.html')
| 1.585938 | 2 |
usig_normalizador_amba/Callejero.py | hogasa/normalizador-amba | 0 | 12760411 | # coding: UTF-8
'''
Created on Apr 21, 2014
@author: hernan
'''
from __future__ import absolute_import
import urllib2
import re
import json
from bisect import bisect_left
from usig_normalizador_amba.Calle import Calle
from usig_normalizador_amba.settings import default_settings
from usig_normalizador_amba.commons import normalizarTexto
class Callejero:
'''
@cvar server: URL del servidor de datos de calles. Tiene un valor por defecto.
@type server: String
@cvar data: La base de datos del callejero [id_calle, nombre_calle, keywords, array_de_rango_de_alturas, array_de_cruces]
@type data: Array
@cvar osm_ids: Array con los osm ids de calle para la busqueda binaria [id_calle]
@type osm_ids: Array
@ivar partido: Partido de la direccion
@type partido: Partido
'''
server = ''
data = []
osm_ids = []
partido = None
# Minicache [calle, opts]
# calle: la calle ingresada por parametro a matcheaCalle
# opts: el resultado que devuelve
minicache = ['gslgimigawakaranaigslg', []]
def __init__(self, partido, config={}):
'''
Carga el callejero
'''
# default config
self.config = default_settings.copy()
# custom config
self.config.update(config)
self.partido = partido
try:
self.cargarCallejero()
except Exception, e:
raise e
def cargarCallejero(self):
try:
if self.partido.codigo == 'caba':
server = '{0}?full=1'.format(self.config['callejero_caba_server'])
encoding = 'latin-1'
else:
server = '{0}callejero/?partido={1}'.format(self.config['callejero_amba_server'], self.partido.codigo)
encoding = 'utf8'
data = urllib2.urlopen(server).read()
self.data = json.loads(data, encoding)
for d in self.data:
if self.partido.codigo == 'caba':
d.append('CABA')
d.append(set(normalizarTexto(d[1], separador=' ', lower=False).split(' ')))
self.data.sort() # Ordeno por id
self.osm_ids = [k[0] for k in self.data] # Armo lista de osm_ids
except urllib2.HTTPError, e:
e.detalle = 'Se produjo un error al intentar cargar la información de calles.'
raise e
except Exception, e:
raise e
def buscarCodigo(self, codigo):
'''
Busca calle por codigo y devuelve una instancia de Calle
@param codigo: Codigo de calle
@type calle: Int
@return: instancias de Calle
@rtype: Calle
'''
pos = bisect_left(self.osm_ids, codigo)
if pos < len(self.data) and self.data[pos][0] == codigo:
retval = [self.data[pos]]
if pos - 1 > 0 and self.data[pos - 1][0] == codigo:
retval.append(self.data[pos - 1])
if pos + 1 < len(self.data) and self.data[pos + 1][0] == codigo:
retval.append(self.data[pos + 1])
return retval
else:
return []
def buscarCalle(self, calle, limit=0):
'''
Busca calles cuyo nombre se corresponda con calle y devuelve un array con todas las instancias de Calle halladas
@param calle: String a matchear
@type calle: String
@param limit: Maximo numero de respuestas a devolver. Cero es sin limite.
@type limit: Integer
@return: Array de instancias de Calle que matchearon calle
@rtype: Array de Calle
'''
if self.minicache[0] == calle:
return self.minicache[1] if limit == 0 else self.minicache[1][:limit]
res = [[], [], [], []]
calleNorm1 = normalizarTexto(calle, separador=' ', lower=False)
words1 = list(set(calleNorm1.split(' ')))
words1.sort(key=len, reverse=True)
regexps1 = map(lambda x: re.compile(ur'^{0}| {1}'.format(re.escape(x), re.escape(x))), words1)
words1 = set(words1)
# No utilizo commons.matcheaTexto por cuestiones de optimizacion
# No podo la busqueda en limit para buscar las mejores soluciones
for data in self.data:
if calle == data[1]: # Match exacto con el nombre
res[0].append(Calle(data[0], data[1], data[3], data[4], self.partido, data[5]))
else: # Match permutado con el nombre
if (words1 == data[6]):
res[1].append(Calle(data[0], data[1], data[3], data[4], self.partido, data[5]))
elif (words1 == words1 & data[6]): # Match incluido con el nombre
res[2].append(Calle(data[0], data[1], data[3], data[4], self.partido, data[5]))
else: # Match con las keywords de la calle
match = True
for regexp in regexps1:
if regexp.search(data[2]) is None:
match = False
break
if match:
res[3].append(Calle(data[0], data[1], data[3], data[4], self.partido, data[5]))
res = res[0] + res[1] + res[2] + res[3]
self.minicache = [calle, res]
return res if limit == 0 else res[:limit]
| 1.96875 | 2 |
parlai/agents/ryan/ryan.py | roholazandie/ParlAI | 0 | 12760412 | import os, re
from datetime import datetime
import xml.etree.ElementTree as ET
from typing import Optional
import yaml
# import logging.config
from parlai.agents.programr.aiml_manager import AIMLManager
from parlai.agents.programr.config.programrconfig import ProgramrConfiguration
from parlai.agents.programr.dialog.dialogmanager import Dialog
from parlai.agents.programr.mappings.user_preferences import UserPreferencesMapping
from parlai.agents.programr.robot.facial_expression_recognition import FacialExpressionRecognition
from parlai.agents.programr.robot.sentimentdata import SentimentData
# from parlai.agents.programr.utils.logging.ylogger import YLogger
from parlai.agents.transformer.transformer import TransformerGeneratorAgent
from parlai.core.agents import Agent
import parlai.utils.logging as logging
from parlai.core.message import Message
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
from parlai.core.torch_agent import History
class RyanAgent(TransformerGeneratorAgent):
P1_TOKEN = '__<PASSWORD>'
P2_TOKEN = <PASSWORD>'
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
parser.add_argument_group("programr arguments")
parser.add_argument("-pc", "--programr-config", help="the config file of programr")
parser.add_argument("-cf", "--config-format", help="the format of config, could be yaml or json")
parser.add_argument("-lp", "--logging-file", help="the logging file of programr")
parser.add_argument("-tt", "--text-truncate", type=int, help="")
parser.add_argument("-hs", "--history-size", type=int, help="the size of the history (in tokens) to pass to the model")
parser.add_argument("-ns", "--num-steps", type=int, help="specifies the number of steps to switch from program-r to blender")
return parser
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.opt = opt
self.configuration = self.load_configuration()
self.dialog = Dialog(self.configuration)
self.user_preferences = UserPreferencesMapping() # These are the "variables" from aiml set and get tags.
self.message_number = 1
self.sentiment_data = SentimentData()
self.facial_expression_recognition = FacialExpressionRecognition()
if not shared:
self.aiml_manager = AIMLManager.get_instance()
self.aiml_manager.load_configuration(self.configuration.bot.brain) # todo: is it safe to load configs once?
logging.info('ryan model loaded')
else:
self.aiml_manager = shared["aiml_manager"]
self.chatbot = "programr"
self._question_depth = 0
self._question_start_time = None
self._conversation_storage = None
self._num_steps = self.opt.get("num_steps", 10)
def share(self):
shared = super().share()
# share program-r too
shared["aiml_manager"] = self.aiml_manager
return shared
def load_configuration(self):
programr_config = self.opt.get("programr_config", None)
config_format = self.opt.get("config_format")
if programr_config is not None:
self.bot_root = os.path.dirname(programr_config)
configuration = ProgramrConfiguration.from_file(programr_config, config_format)
else:
self.bot_root = "."
raise Exception("No config file is provided")
# setattr(self._configuration, 'brain_in_memory', os.getenv("BRAIN_IN_MEMORY", 'True').lower() in ('true', '1', 'yes', 'y', 't'))
return configuration
def strip_oob(self, response):
match = re.compile(r"(.*)(<\s*oob\s*>.*<\/\s*oob\s*>)(.*)")
groupings = match.match(response)
if groupings is not None:
front = groupings.group(1).strip()
back = groupings.group(3).strip()
response = ""
if front != "":
response = front + " "
response += back
oob = groupings.group(2)
return response, oob
return response, None
def process_oob(self, oob_command):
oob_content = ET.fromstring(oob_command)
if oob_content.tag == 'oob':
for child in oob_content.findall('./'):
if child.tag in self.aiml_manager._oob:
oob_class = self.aiml_manager._oob[child.tag]
return oob_class.process_out_of_bounds(child)
return self.aiml_manager._default_oob.process_out_of_bounds(child)
return ""
def resolve_matched_template_with_options(self, match_context):
template_node = match_context.template_node()
# YLogger.debug(self, "AIML Parser evaluating template [%s]", template_node.to_string())
response = template_node.template.resolve(self)
# todo the following can go inside template nodes
options = {}
if "<oob>" in response:
response, oob = self.strip_oob(response)
if oob is not None:
options = self.process_oob(oob)
return response, options
def set_variable(self, key, value):
self.user_preferences.add_variable(key, value)
def get_variable(self, name):
if name in self.user_preferences.mapping:
return self.user_preferences.mapping[name]
return None
def process_sentence(self, sentence):
try:
topic_pattern = self.user_preferences.variables("topic")
logging.debug(f"Got Topic: {topic_pattern}")
except Exception as e:
self.user_preferences.add_variable("topic", "*")
topic_pattern = "*"
logging.debug(f"Couldn't find topic from user preferences because - {e}")
logging.debug("setting to default \"*\"")
that_pattern = self.dialog.conversation.get_that_pattern()
match_context = self.aiml_manager._aiml_parser.match_sentence(self.aiml_manager.nlp,
sentence,
topic_pattern=topic_pattern,
that_pattern=that_pattern)
if match_context is not None:
response, options = self.resolve_matched_template_with_options(match_context)
post_processed_response = self.aiml_manager.postprocessors.process(response)
return self.dialog.handle_response(post_processed_response, options)
else:
options = {"robot": None}
return self.dialog.handle_none_response(options)
def mark_question_start(self, question):
# YLogger.debug(self, "Question (%s): %s", self.id, question)
if self._question_depth == 0:
self._question_start_time = datetime.now()
self._question_depth += 1
def pre_process_text(self, text, srai):
text = text.upper()
if srai is False:
pre_processed = self.aiml_manager.preprocessors.process(text)
# YLogger.debug(self, "Pre Processed (%s): %s", self.id, pre_processed)
else:
pre_processed = text
if pre_processed is None or pre_processed == "":
pre_processed = self.configuration.empty_string
return pre_processed
def ask_question(self, text, srai=False):
try:
self.mark_question_start(text)
pre_processed = self.pre_process_text(text, srai)
question = self.dialog.get_question(pre_processed, srai)
self.dialog.record_question(question) # this is partial recording, as we go we add more info like response
answer_sentence, option = self.process_sentence(question.sentences[0])
self.dialog.add_response_to_question(question, answer_sentence)
self.dialog.record_answer(answer_sentence, option, srai)
if srai:
self.dialog.conversation.pop_dialog()
answer_sentence_text = " ".join(answer_sentence.words)
return answer_sentence_text, option
except Exception as e:
print(e)
# YLogger.error(self, "Exception caught in ask_question of bot.")
# YLogger.error(self, e)
def act(self):
reply = Message(
{
'id': self.getID(),
'label_candidates': [],
'episode_done': False,
}
)
if len(self.history.history_strings) > self._num_steps: #should be dived by two becuase it's a round
logging.info("using blender")
response = self.batch_act([self.observation])[0]
reply["text"] = response['text']
else:
logging.info("using progrmr")
sentence_text, options = self.ask_question(self.observation['text'])
reply['text'] = sentence_text
self.self_observe(reply) #self_observe called twice: one in super().act() and once here
return reply
# def act(self):
# reply = Message(
# {
# 'id': self.getID(),
# 'label_candidates': [],
# 'episode_done': False,
# }
# )
#
# if self.chatbot == "blender":
# # after certain number of steps we can reset and go back to program-r
# if len(self.history.history_strings) > self._num_steps:
# self.chatbot = "programr"
#
# sentence_text, options = self.ask_question("stop")
# reply['text'] = sentence_text
# else:
# logging.info("using blender")
# response = self.batch_act([self.observation])[0]
# reply["text"] = response['text']
#
# else:
# logging.info("using programr")
# sentence_text, options = self.ask_question(self.observation['text'])
# logging.info(f"the respose: {sentence_text}")
# reply['text'] = sentence_text
# if options:
# if "robot" in options and options["robot"]:
# if "options" in options["robot"] and options["robot"]["options"]:
# if options["robot"]["options"].lower() == "blender":
# self.chatbot = "blender"
#
# self.self_observe(reply)
#
# return reply
# def observe(self, observation):
# observation = Message(observation)
# self.observation = observation
#
# self.history.update_history(
# observation, temp_history=None
# )
#
# return observation
def self_observe(self, self_message: Message) -> None:
"""
Observe one's own utterance.
This is used so that the agent can incorporate its own response into
the dialogue history after a batch_act. Failure to implement this will
result in an agent that cannot hear itself speak.
:param self_message:
The message corresponding to the output from batch_act.
"""
use_reply = self.opt.get('use_reply', 'label')
assert self.observation is not None
if self.observation['episode_done']:
# oh this was the last example in the episode. reset the history
self.history.reset()
# additionally mark the last observation as invalid
self.observation = None
# and clear the safety check
self.__expecting_clear_history = False
return
# We did reply! Safety check is good next round.
self.__expecting_to_reply = False
# actually ingest the label
if use_reply == 'none':
# we're not including our own responses anyway.
return
elif use_reply == 'label':
# first look for the true label
label_key = (
'labels'
if 'labels' in self.observation
else 'eval_labels'
if 'eval_labels' in self.observation
else None
)
if label_key is not None:
lbls = self.observation[label_key]
last_reply = lbls[0] if len(lbls) == 1 else self.random.choice(lbls)
self.history.add_reply(last_reply)
return
# you might expect a hard failure here, but in interactive mode we'll
# never get a label
# otherwise, we use the last output the model generated
if self_message is not None:
last_reply = self_message['text']
self.history.add_reply(last_reply)
return
raise RuntimeError("Unexpected case in self_observe.")
| 1.992188 | 2 |
backend_wagtail_api/users/graphql/createUser.py | Audiotuete/backend_wagtail_api | 0 | 12760413 | <reponame>Audiotuete/backend_wagtail_api
import graphene
from django.apps import apps as django_apps
from django.utils.crypto import get_random_string
from django.conf import settings
#Types
from .__types import UserType
#Models
from ..models import User
class CreateUserMutation(graphene.Mutation):
user = graphene.Field(UserType)
class Arguments:
# username = graphene.String(required=True)
# email = graphene.String(required=True)
# password = graphene.String(required=True)
challengeId = graphene.ID(required=True)
browserInfo = graphene.String(required=True)
osInfo = graphene.String(required=True)
def mutate(self, info, challengeId, browserInfo, osInfo):
# if len(username) < 3:
# raise Exception('Username must have at least 3 characters!')
# if len(password) < 8:
# raise Exception('The password must be at least 8 characters long!')
# if User.objects.filter(username = username_lowercase):
# raise Exception('Username already exists!')
generated_username = get_random_string(length=16).lower()
while User.objects.filter(username = generated_username):
generated_username = get_random_string(length=16).lower()
Challenge = django_apps.get_model('app_challenges', 'Challenge')
match_challenge = Challenge.objects.get(id = challengeId)
user = User(
username = generated_username,
# email = email,
current_challenge = match_challenge,
browser_info = browserInfo,
os_info = osInfo,
)
user.set_password(settings.USER_PASSWORD)
user.save()
return CreateUserMutation(user=user)
class CreateUser(graphene.ObjectType):
create_user = CreateUserMutation.Field() | 2.421875 | 2 |
cogs/owner.py | MisileLab/Crayonbot | 0 | 12760414 | <gh_stars>0
import discord
from discord.embeds import Embed
from discord.ext import commands, tasks
import asyncio
import random
import os
from discord.ext.menus import Button
from discord_components import component
from discord_components import Button, ButtonStyle, SelectOption, Select
import pytz
import aiosqlite
import discordSuperUtils
import datetime
from PycordPaginator import Paginator
class Owner(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(
name = "서버리스트",
aliases = ['serverlist']
)
@commands.is_owner()
async def owner_serverlist(self, ctx):
with open("guilds.txt", 'w', -1, "utf-8") as a: # 'guilds.txt' 파일을 생성하고 그것을 'a' 로 지정한다
a.write(str(self.bot.guilds)) # 'a' 에 봇이 접속한 서버들을 나열한다
file1 = discord.File("guilds.txt") # 'file1' 을 'guilds.txt' 로 정의한다
await ctx.author.send(file=file1) # 명령어를 수행한 멤버의 DM으로 'file1' 을 발송한다
os.remove("guilds.txt")
await ctx.reply(f"DM으로 서버 리스트 발송을 완료했습니다!")
@commands.command(
name="Check-Error",
aliases=["elog"],
usage="elog [code]",
help=" 코인의 에러 로그를 확인할수 있습니다.",
hidden=True,
)
@commands.is_owner()
async def owner_elog(self, ctx, code):
try:
f = open(f"data/error_logs/{code}", "r", encoding="utf8")
data = f.read()
await ctx.send(f"```py\n{data}\n```")
f.close()
except:
await ctx.send(
content=code, file=discord.File(fp=data, filename=f"{code}.txt")
)
@commands.command(name="공지")
@commands.is_owner()
async def broadcasting(self, ctx, *, value):
em = discord.Embed(
title="짱구 봇 공지사항",
description=value,
colour=discord.Colour.random()
)
em.set_thumbnail(url=self.bot.user.avatar_url)
em.set_image(
url="https://media.discordapp.net/attachments/921555509935480853/921555519578189834/c265877614d80026.png?width=400&height=144")
em.set_footer(text="특정 채널에 받고싶다면 '짱구야 설정'으로 설정하세요! 권한 확인 필수!")
msg = await ctx.reply("발송중...")
guilds = self.bot.guilds
ok = []
ok_guild = []
success = 0
failed = 0
for guild in guilds:
channels = guild.text_channels
for channel in channels:
if guild.id in [653083797763522580, 786470326732587008]:
break
if (
channel.topic is not None
and str(channel.topic).find("-HOnNt") != -1
):
ok.append(channel.id)
ok_guild.append(guild.id)
break
for guild in guilds:
channels = guild.text_channels
for _channel in channels:
if guild.id in ok_guild:
break
if guild.id in [653083797763522580, 786470326732587008]:
break
random_channel = random.choices(channels)
ok.append(random_channel[0].id)
break
for i in ok:
channel = self.bot.get_channel(i)
try:
await channel.send(embed=em)
success += 1
except discord.Forbidden:
failed += 1
await msg.edit("발송완료!\n성공: `{ok}`\n실패: `{no}`".format(ok=success, no=failed))
@commands.group(name="블랙",invoke_without_command=True)
async def blacklist(self,ctx:commands.Context):
database = await aiosqlite.connect("db/db.sqlite")
cur = await database.execute("SELECT * FROM black WHERE user = ?", (ctx.author.id,))
if await cur.fetchone() == None:
return await ctx.reply(f"{ctx.author}님은 블랙리스트에 등록되어있지 않아요.")
data = await cur.fetchone()
await ctx.reply(f"블랙사유: {data[1]}")
@blacklist.command(name= '추가', aliases=['black','블랙','blackadd'])
@commands.is_owner()
async def mod_black(self, ctx, user_id:int,*,reason):
user = await self.bot.fetch_user(user_id)
db = await aiosqlite.connect("db/db.sqlite")
cur = await db.execute("SELECT * FROM black WHERE user = ?", (user_id,))
datas = await cur.fetchone()
if datas != None:
embed = discord.Embed(
title = f"블랙",
description = f"{user}님은 블랙리스트에 등록되어있어요. \n사유: {datas[1]}",
colour = discord.Colour.random(),
timestamp = ctx.message.created_at
)
await ctx.send(embed=embed)
await db.execute("INSERT INTO black(user,reason,username) VALUES (?,?,?)", (user_id, reason, user.name))
await db.commit()
embed2=discord.Embed(
title="블랙",
description = f"__봇관리자로 부터 블랙 등록되었음을 알려드립니다__ \n\n 관리자가 아래의 사유로 블랙을 등록하셨어요.\n\n 사유 : {reason}",
colour=discord.Colour.random() )
try:
await user.send(embed=embed2)
except:
pass
await ctx.reply("등록완료!")
@blacklist.command(name= '삭제', aliases=['blackdel','제거'])
@commands.is_owner()
async def mod_black_del(self, ctx, user_id:int):
user = await self.bot.fetch_user(user_id)
db = await aiosqlite.connect("db/db.sqlite")
cur = await db.execute("SELECT * FROM black WHERE user = ?", (user_id,))
datas = await cur.fetchone()
embed=discord.Embed(title="블랙", description=f"{user}님은 블랙리스트에 등록되어있지않아요.",colour=discord.Colour.random())
if datas == None:
return await ctx.send(embed=embed)
await db.execute("DELETE FROM black WHERE user = ?", (user_id,))
await db.commit()
embed2=discord.Embed(title="블랙", description="__봇 관리자로부터 블랙해제됨.__\n\n 봇관리자가 블랙해제하셨어요.",colour=discord.Colour.random())
try:
await user.send(embed=embed2)
except:
print
await ctx.reply("해제완료")
@blacklist.command(name= '목록')
@commands.is_owner()
async def mod_black_jo(self, ctx):
database = await aiosqlite.connect("db/db.sqlite")
cur = await database.execute("SELECT * FROM black")
datas = await cur.fetchall()
black_list = []
for i in datas:
black_list.append(f"```유저아이디|{i[0]} \n사유|{i[1]} \n이름|{i[2]}```")
e = Paginator(
client=self.bot.components_manager,
embeds=discordSuperUtils.generate_embeds(
black_list,
title=f"블랙목록에 유저들이 등록되어있어요.",
fields=10,
description="```블랙해제를 하실거면 \n짱구야 블랙 제거 [유저아이디]를 해주시면 됩니다!```",
),
channel=ctx.channel,
only=ctx.author,
ctx=ctx,
use_select=False)
await e.start()
#await ctx.send(templates[1])
@blacklist.command(name= '초기화', aliases=["reset"])
@commands.is_owner()
async def black_rest(self, ctx):
db = await aiosqlite.connect("db/db.sqlite")
await db.execute("DELETE FROM black")
await db.commit()
cur = await db.execute("SELECT * FROM black")
datas = await cur.fetchall()
if datas != None:
await ctx.reply("초기화 완료")
# for i in self.bot.guilds:
# for j in i.text_channels:
# if ("코인" in j.topic):
# try:
# await j.send(embed=embed)
# count += 1
# channel.append(f"{i.name} - {j.name}")
# except:
# for k in i.text_channels:
# if ("봇" in k.name):
# try:
# await k.send(embed=embed)
# count += 1
# channel.append(f"{i.name} - {j.name}")
# except:
# for l in i.text_channels:
# if ("공지" in l.name):
# try:
# await i.send(embed = embed)
# count += 1
# channel.append(f"{i.name} - {l.name}")
# except:
# channel.append(f"{i.name} 전송 실패")
# break
# else:
# break
# await ctx.send(f"{count}개의 길드에 공지를 전송했습니다!")
def setup(bot):
bot.add_cog(Owner(bot)) | 2.46875 | 2 |
ats_log_parser/profit_data_daily/parse_others.py | hong142101/ats_log_parser | 1 | 12760415 | <filename>ats_log_parser/profit_data_daily/parse_others.py
# -*- coding: utf-8 -*-
import re
import os
import datetime as dt
import xml.etree.ElementTree as ElementTree
########################################################################################################################
class ParseSymbolTree:
def __init__(self):
if os.path.exists(r'.\module\symbol_tree.xml'):
self.file = r'.\module\symbol_tree.xml'
else:
self.file = 'symbol_tree.xml'
self.tree = ElementTree.parse(self.file)
self.root = self.tree.getroot()
# ----------------------------------------------------------------------------------------------------------------------
def trade_time_from_to(self):
# 得到symbol tree内的所有信息
whole_time_from_to = []
for area in self.root:
for exchange in area:
for derivative in exchange:
for contract in derivative:
for time_rule in contract:
for stage in time_rule:
whole_time_from_to.append(
[str(area.tag),
str(exchange.tag),
str(derivative.tag),
str(contract.tag),
int(re.findall(r'(^[0-9]+)T.*', time_rule.get('from'))[0]),
int(re.findall(r'(^[0-9]+)T.*', time_rule.get('to'))[0]),
int(stage.get('from')),
int(stage.get('to'))])
return whole_time_from_to
# ----------------------------------------------------------------------------------------------------------------------
@staticmethod
def get_time_from_to(whole_time_from_to, futures, date):
# 某个品种在某日的交易时间规则
time_from_to = []
for i in range(len(whole_time_from_to)):
if (futures == whole_time_from_to[i][3]) and \
((date >= whole_time_from_to[i][4]) and (date < whole_time_from_to[i][5])):
time_from_to.append(whole_time_from_to[i])
return time_from_to
########################################################################################################################
def get_new_profit_data(md, ci, pst, account, new_trades_position_data):
"""
遍历单账户内需要更新的多日成交和持仓
并计算单账户的成交和持仓收益
"""
new_profit_data = []
for daily_position in new_trades_position_data:
settle_date = daily_position['settle_date']
closed_trades = daily_position['closed_trades']
holding_positions = daily_position['holding_positions']
# 当日成交收益
closed_trades_profit = round(sum(closed_trades['net_profit']), 2)
# 当日持仓结算,按照收盘价来计算
holding_positions_profit = round(parse_every_day_holding(md, ci, pst, settle_date, holding_positions), 2)
whole_profit = round(closed_trades_profit + holding_positions_profit, 2)
new_profit_data.append(
[settle_date,
whole_profit,
closed_trades_profit,
holding_positions_profit]
)
print(account, settle_date)
return new_profit_data
# ----------------------------------------------------------------------------------------------------------------------
def parse_every_day_holding(md, ci, pst, settle_date, holding_positions):
"""计算单账户下的单日持仓收益"""
# futures_info = pd.read_csv(r'.\futures.csv')
date = int(dt.datetime.strptime(settle_date, "%Y-%m-%d").strftime("%Y%m%d"))
futures_holding_profit = list()
whole_time_from_to = pst.trade_time_from_to()
for j in holding_positions.index:
# 获取持仓信息内的各种key
futures = holding_positions['symbol'][j]
contract = holding_positions['contract'][j]
if futures is None:
futures = re.findall(r'(^[a-zA-Z]+)[0-9]+$', contract)[0]
mul = int(ci.get_contract_info(contract, str(date))['VolumeMultiple'])
time_from_to = pst.get_time_from_to(whole_time_from_to, futures, date)
close_time = 0
for piece in time_from_to:
if piece[7] <= 200000:
if piece[7] > close_time:
close_time = piece[7]
close_datetime = ' '.join((settle_date, str(close_time)))
close_datetime = dt.datetime.strptime(close_datetime, "%Y-%m-%d %H%M%S").strftime("%Y-%m-%d %H:%M:%S")
# 抓取wind和实时行情数据的last_price
close_price = md.get_close_price(contract, close_datetime)
if not type(close_price) is str:
profit = (close_price - holding_positions['open_deal_price'][j]) * \
holding_positions['dir'][j] * \
holding_positions['quantity'][j] * mul
else:
print("ERROR", futures, settle_date, close_price)
profit = 0
futures_holding_profit.append(profit)
return sum(futures_holding_profit)
def clean_profit_data(profit_data):
"""清理权益全为0的垃圾结算日"""
for i in list(range(len(profit_data)))[::-1]:
profit = profit_data[i][1] == 0
closed = profit_data[i][2] == 0
hold = profit_data[i][3] == 0
if profit and closed and hold:
profit_data.pop(i)
return profit_data
if __name__ == "__main__":
print(ParseSymbolTree().trade_time_from_to())
| 2.578125 | 3 |
setup.py | tianer2820/cmdWrapper | 1 | 12760416 | from setuptools import setup
setup(
name='cmdWrapper',
version='0.1',
packages=['cmdWrapper'],
url='https://github.com/tianer2820/cmdWrapper',
license='MIT',
author='Toby',
author_email='<EMAIL>',
description='a vary simple gui lib based on wxpython',
requires=['wxPython']
)
| 1.164063 | 1 |
virtual/lib/python3.6/site-packages/auth_token/migrations/0010_auto_20190723_1410.py | AudreyCherrie/ihood | 6 | 12760417 | # Generated by Django 2.0 on 2019-07-23 12:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auth_token', '0009_auto_20190622_1920'),
]
operations = [
migrations.AlterField(
model_name='devicekey',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='created at'),
),
migrations.AlterField(
model_name='devicekey',
name='is_active',
field=models.BooleanField(default=True, verbose_name='is active'),
),
migrations.AlterField(
model_name='devicekey',
name='last_login',
field=models.DateTimeField(blank=True, null=True, verbose_name='last login'),
),
migrations.AlterField(
model_name='devicekey',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL,
verbose_name='user'),
),
migrations.AlterField(
model_name='devicekey',
name='uuid',
field=models.CharField(max_length=32, verbose_name='UUID'),
),
]
| 1.742188 | 2 |
ad_api/sp_brands/brands.py | 854350999/python-amazon-advertising-api | 0 | 12760418 |
from ..client import Client
class Brands(Client):
def get_brands(self, brand_type_filter):
self.uri_path = "/brands"
self.method = "get"
self.params = {
"brandTypeFilter": brand_type_filter
}
return self.execute()
| 2.34375 | 2 |
data_control.py | DudkinON/catalog | 0 | 12760419 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from validate_email import validate_email
from random import choice
from string import ascii_uppercase as uppercase, digits
from settings import *
def allowed_file(filename, extensions):
"""
Check file is image
:param filename: string
:param extensions: list
:return bool:
"""
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in extensions
def email_is_valid(email):
"""
Check email is valid
:param email:
:return bool:
"""
return validate_email(email)
def get_unique_str(amount):
"""
Return a unique name string amount characters
:return:
"""
return ''.join(choice(uppercase + digits) for x in xrange(amount))
def get_path(filename, folder):
"""
Generate a unique path to image like folder/xx/xx/xxxxxxxxxxxxxx.jpg
:param filename:
:param folder:
:return string:
"""
ext = filename.split('.')[-1]
u_name = get_unique_str(18).lower()
path = os.path.join(folder, u_name[:2], u_name[2:4])
abs_path = ''.join([BASE_DIR, path])
try:
os.makedirs(abs_path)
except OSError:
pass
full_path = os.path.join(folder, u_name[:2], u_name[2:4], u_name[4:])
return '.'.join([full_path, ext])
| 3.453125 | 3 |
bw_recipe_2016/categories/land.py | lvandepaer/bw_recipe_2016 | 0 | 12760420 | <gh_stars>0
from ..base import ReCiPe2016
from ..strategies import (
match_multiple,
generic_reformat,
final_method_name,
check_duplicate_cfs,
)
from ..strategies.land import (
add_missing_flows,
complete_method_name,
reset_categories,
set_unit,
)
from functools import partial
class LandTransformation(ReCiPe2016):
previous_reference = (
"ReCiPe Midpoint (E) V1.13",
"natural land transformation",
"NLTP",
)
def __init__(self, data, biosphere, version=2):
super().__init__(data, biosphere, version)
self.strategies = [
partial(generic_reformat, config=self.config),
set_unit,
partial(
complete_method_name, name="Land transformation", config=self.config
),
partial(match_multiple, other=self.biosphere,),
final_method_name,
partial(check_duplicate_cfs, biosphere=biosphere),
]
class LandOccupation(ReCiPe2016):
previous_reference = (
"ReCiPe Midpoint (E) V1.13",
"agricultural land occupation",
"ALOP",
)
def __init__(self, data, biosphere, version=2):
super().__init__(data, biosphere, version)
self.strategies = [
partial(generic_reformat, config=self.config),
set_unit,
add_missing_flows,
reset_categories,
partial(complete_method_name, name="Land occupation", config=self.config),
partial(match_multiple, other=self.biosphere,),
final_method_name,
partial(check_duplicate_cfs, biosphere=biosphere),
]
| 1.8125 | 2 |
Petstagram/pets/migrations/0002_auto_20201027_1528.py | Irinkaaa/Petstagram_Django_Project | 0 | 12760421 | # Generated by Django 3.1.2 on 2020-10-27 13:28
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('pets', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='like',
name='field',
field=models.CharField(default='aa', max_length=2),
),
migrations.AlterField(
model_name='like',
name='pet',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='pets.pet'),
),
migrations.AlterField(
model_name='pet',
name='type',
field=models.CharField(choices=[('cat', 'Cat'), ('dog', 'Dog'), ('parrot', 'Parrot')], max_length=6),
),
]
| 1.773438 | 2 |
googleSentimentAnalysis/googleAnalyze.py | yanboyang713/crossLanguageSentimentAnalysis | 0 | 12760422 | import openpyxl
import sys
from googleSentiment import Google
def main():
fileName = 'Ranking30.xlsx'
# Select which lines of the input sentences you wish to use
input_selection = [1, 18976]
google = Google()
try:
file = openpyxl.load_workbook(fileName)
except:
file = openpyxl.Workbook()
sheet = file['Sheet1']
for line_counter in range(input_selection[0], input_selection[1]):
try:
sentence = sheet.cell(row=line_counter, column=24).value
sentence = sentence.encode()
#zh for Chinese es for English
languageIn = "es"
google.analyze(sentence, languageIn)
sheet.cell(row = line_counter, column = 25).value = google.getScore()
sheet.cell(row = line_counter, column = 26).value = google.getMagnitude()
except Exception as exception:
print (exception)
continue
# Save the file and notify the user
file.save(fileName)
print("google analyze finish")
if __name__ == "__main__":
main()
| 3.421875 | 3 |
q2/a2/lcss.py | WilhelmStein/HackeMining2 | 0 | 12760423 | <gh_stars>0
import numpy as np
from harversine import harvesine
def lcss(u, v, match_error_margin):
def bt(t, u, v, match_error_margin, i , j): # Backtrack through matrix t in order to find the lcss
if i == 0 or j == 0:
return []
if harvesine(u[i - 1], v[j - 1]) <= match_error_margin:
return bt(t, u, v, match_error_margin, i - 1, j - 1) + [v[j - 1]]
elif t[i][j - 1] > t[i - 1][j]:
return bt(t, u, v, match_error_margin, i, j - 1)
else:
return bt(t, u, v, match_error_margin, i - 1, j)
t = np.zeros( (len(u) + 1,len(v) + 1) ) # LCSS computation table
for i in xrange(1,len(u) + 1):
for j in xrange(1,len(v) + 1):
if harvesine(u[i - 1], v[j - 1]) <= match_error_margin:
t[i][j] = t[i - 1][j - 1] + 1
else:
t[i][j] = max( [ t[i][j - 1], t[i - 1][j] ] )
return bt(t, u, v, match_error_margin, len(u), len(v))
| 2.28125 | 2 |
others/KUPC/2020/d.py | fumiyanll23/AtCoder | 0 | 12760424 | <filename>others/KUPC/2020/d.py<gh_stars>0
from math import sqrt
N = int(input())
n = int(sqrt(N))
stick = [2*(i+1)-1 for i in range(N)]
if N%2 == 0:
print(N // 2)
for i in range(N//2):
print(2, stick[i], stick[-(i+1)])
elif n**2 == N:
idx = [(i%n-i//n)%n for i in range(n)]
print(n)
for i in range(n):
else:
print('impossible')
| 3.40625 | 3 |
src/sentry/migrations/0029_discover_query_upgrade.py | pierredup/sentry | 0 | 12760425 | # -*- coding: utf-8 -*-
# Generated by Hand
from __future__ import unicode_literals, print_function
from sentry.utils.query import RangeQuerySetWrapperWithProgressBar
from django.db import migrations
# SearchVisitor.numeric_keys + SearchVisitor.date_keys
OPERATOR_KEYS = set(
[
"project_id",
"project.id",
"issue.id",
"device.battery_level",
"device.charging",
"device.online",
"device.simulator",
"error.handled",
"stack.colno",
"stack.in_app",
"stack.lineno",
"stack.stack_level",
"transaction.duration",
"apdex",
"impact",
"p75",
"p95",
"p99",
"error_rate",
"start",
"end",
"first_seen",
"last_seen",
"time",
"timestamp",
"transaction.start_time",
"transaction.end_time",
]
)
# Aggregates are now fields
def convert_field(fieldname, unique, reverse):
if fieldname == "count":
fieldname = u"count()"
elif unique:
fieldname = u"count_unique({})".format(fieldname)
fieldname = u"-{}".format(fieldname) if reverse else fieldname
return fieldname
def prepare_value(value):
value = value.replace("%", "*")
if " " in value and not value.startswith('"'):
value = u'"{}"'.format(value)
return value
def convert(DiscoverSavedQuery, DiscoverSavedQueryProject, saved_query, name_extra=" (migrated from legacy discover)"):
""" Create a v2 query from a v1 query"""
if saved_query.version == 2:
# nothing to do! Already v2 :)
return saved_query
updated_query = {
u"environment": [],
u"fields": saved_query.query.get('fields', []),
u"orderby": u"",
u"query": [], # Will become a string later via join
}
if "range" in saved_query.query:
updated_query["range"] = saved_query.query["range"]
elif "start" in saved_query.query and "end" in saved_query.query:
updated_query["start"] = saved_query.query["start"]
updated_query["end"] = saved_query.query["end"]
else:
updated_query["range"] = "14d"
for aggregate in saved_query.query.get("aggregations", []):
if aggregate[0] == "uniq":
field = convert_field(aggregate[1], True, False)
else:
field = convert_field(aggregate[0], False, False)
if field:
updated_query["fields"].append(field)
# Order by
orderby = saved_query.query.get('orderby', "")
unique = reverse = False
if orderby.startswith('-'):
reverse = True
orderby = orderby[1:]
if orderby.startswith('uniq_'):
unique = True
orderby = orderby[5:].replace('_', '.')
field = convert_field(orderby, unique, reverse)
if field:
updated_query['orderby'] = field
if reverse:
field = field[1:]
if field not in updated_query["fields"]:
updated_query["fields"].append(field)
# Conditions become a query now
for condition in saved_query.query.get("conditions", []):
column, operator, value = condition
if column in ['contexts.key']:
column = "tags[contexts.key]"
if column == "environment" and operator == "=":
updated_query['environment'].append(value.strip('"'))
elif operator == 'IS NOT NULL':
updated_query["query"].append(u"has:{}".format(column))
elif operator == 'IS NULL':
updated_query["query"].append(u"!has:{}".format(column))
elif column in OPERATOR_KEYS:
updated_query["query"].append(u"{}:{}{}".format(
column,
operator if operator != '=' else '',
value
))
elif operator in ['LIKE', '=']:
updated_query["query"].append(u"{}:{}".format(column, prepare_value(value)))
elif operator in ['NOT LIKE', '!=']:
updated_query["query"].append(u"!{}:{}".format(column, prepare_value(value)))
updated_query["query"] = ' '.join(updated_query["query"])
# Create the version 2 query
new_query = DiscoverSavedQuery.objects.create(
organization=saved_query.organization,
name=saved_query.name + name_extra,
query=updated_query,
version=2,
)
# Set project_ids
saved_query_project_ids = DiscoverSavedQueryProject.objects.filter(
discover_saved_query=saved_query
).values_list("project", flat=True)
# This is DiscoverSavedQueryProject.set_projects
DiscoverSavedQueryProject.objects.filter(discover_saved_query=new_query).exclude(
project__in=saved_query_project_ids
).delete()
existing_project_ids = DiscoverSavedQueryProject.objects.filter(
discover_saved_query=new_query
).values_list("project", flat=True)
new_project_ids = list(set(saved_query_project_ids) - set(existing_project_ids))
DiscoverSavedQueryProject.objects.bulk_create(
[
DiscoverSavedQueryProject(project_id=project_id, discover_saved_query=new_query)
for project_id in new_project_ids
]
)
return new_query
def migrate_v1_queries(apps, schema_editor):
"""
Creates v2 versions of existing v1 queries
"""
DiscoverSavedQuery = apps.get_model("sentry", "DiscoverSavedQuery")
DiscoverSavedQueryProject = apps.get_model("sentry", "DiscoverSavedQueryProject")
""" Seq Scan on sentry_discoversavedquery
(cost=0.00..102.86 rows=1601 width=284)
(actual time=0.027..1.158 rows=1275 loops=1)
Filter: (version = 1)
Rows Removed by Filter: 69
Planning time: 0.929 ms
Execution time: 1.296 ms
"""
queryset = DiscoverSavedQuery.objects.filter(version=1)
for query in RangeQuerySetWrapperWithProgressBar(queryset):
convert(DiscoverSavedQuery, DiscoverSavedQueryProject, query)
class Migration(migrations.Migration):
# This flag is used to mark that a migration shouldn't be automatically run in
# production. We set this to True for operations that we think are risky and want
# someone from ops to run manually and monitor.
# General advice is that if in doubt, mark your migration as `is_dangerous`.
# Some things you should always mark as dangerous:
# - Adding indexes to large tables. These indexes should be created concurrently,
# unfortunately we can't run migrations outside of a transaction until Django
# 1.10. So until then these should be run manually.
# - Large data migrations. Typically we want these to be run manually by ops so that
# they can be monitored. Since data migrations will now hold a transaction open
# this is even more important.
# - Adding columns to highly active tables, even ones that are NULL.
is_dangerous = False
atomic = False
dependencies = [
("sentry", "0028_user_reports"),
]
operations = [
migrations.RunPython(migrate_v1_queries, reverse_code=migrations.RunPython.noop),
]
| 1.953125 | 2 |
time_management/main.py | artorias111/time-management | 0 | 12760426 | <gh_stars>0
import interface_mode
import facade_tasks
import kronos
import os
import getpass
import ddl
import sqlitedb
import speech_cadence_emulator
def main():
on_startup()
def on_startup():
# Initialize database
db_v1 = sqlitedb.SQLiteDatabase(os.path.join(os.path.dirname(__file__), "TM_v1.db"))
# Scan for and create tables
data_def = ddl.DataDefinitionLanguage(db_v1)
data_def.create_all_tables()
# Create launch message
tasks_facade = facade_tasks.TasksFacade(db_v1)
time_of_day = kronos.get_time_of_day()
user = getpass.getuser()
number_of_overdue_items = len(tasks_facade.get_overdue_tasks())
welcome_statement = f"\nGood {time_of_day} {user}. You have {number_of_overdue_items} overdue items.\n"
speech_cadence_emulator.emulate_speech_cadence(welcome_statement)
# Launch MODE interface
mode = interface_mode.InterfaceMode(db_v1)
mode.run_menu_loop_mode()
if __name__ == "__main__":
main()
| 2.59375 | 3 |
TBOT/tbot/tbot_single_site_status.py | omikabir/omEngin | 0 | 12760427 | import pandas as pd
import cx_Oracle
def query(code):
conn = cx_Oracle.connect('SOC_READ', 'soc_read', 'ossam-cluster-scan.robi.com.bd:1721/RBPB.robi.com.bd')
print(conn)
qry1 = """Select * from (select distinct Summary AlarmText,(Case when Summary like '%2G%' then '2G' when
Summary like '%3G%' then '3G' else '4G' end) as Technology,CUSTOMATTR15 as SITECODE,FIRSTOCCURRENCE StartTime,
ROUND((Sysdate-FIRSTOCCURRENCE)*24*60,2) DurationMIn,CLEARTIMESTAMP EndTime,CUSTOMATTR26 CRNumber,TTRequestTime, TTSequence, CUSTOMATTR23 as CI from alerts_status
where FirstOccurrence between TO_DATE(TO_CHAR(SYSDATE - 7, 'YYYYMMDD') || '0000', 'YYYYMMDDHH24MI') and TO_DATE(TO_CHAR(SYSDATE, 'YYYYMMDD') || '2359', 'YYYYMMDDHH24MI')
and X733EventType = 100 and agent != 'Total Site Down'--and CUSTOMATTR15 != 'UNKNOWN'
and Severity!= 0 and CustomAttr27 in (0,1) and Manager <> 'TSD Automation')t where t.Technology IN ('2G','3G','4G') and SITECODE like '%"""
qry2 = qry1 + code + "%'"
try:
df = pd.read_sql(qry2, con=conn)
print('try success')
conn.close()
except:
connx = cx_Oracle.connect('SOC_READ', 'soc_read', 'ossam-cluster-scan.robi.com.bd:1721/RBPB.robi.com.bd')
df = pd.read_sql(qry2, con=connx)
print('Except trigger')
connx.close()
print(df)
rows = df.shape[0]
heap = code + ":"
if rows != 0:
for i in range(0, len(df)):
tech = df.iloc[i]['TECHNOLOGY']
tm = df.iloc[i]['STARTTIME']
if '2G' in tech:
heap = heap + '\n' + "2G: Down, " + "Downtime: " + str(tm)
if '3G' in tech:
heap = heap + '\n' + "3G: Down, " + "Downtime: " + str(tm)
if '4G' in tech:
heap = heap + '\n' + "4G: Down, " + "Downtime: " + str(tm)
# print(heap)
else:
return heap + '\nAll Tech are up'
return heap | 2.359375 | 2 |
controlcenter/__init__.py | EnriqueSoria/django-controlcenter | 980 | 12760428 | from .dashboards import Dashboard # NOQA
| 1.03125 | 1 |
emr/concatenatepv3.py | TuomoKareoja/phone-sentiment-analysis | 0 | 12760429 | #!/usr/bin/python
"""
To use, please type in:
python concatenate.py
The script will only look at files that are within
folders that are one level below the directory supplied
to the script. For example, if no argument is given to
the script, script will parse all folders within the
current working directory as such:
./output_folder1/output_file1
./output_folder1/output_file2
./output_folder2/output_file1
./output_folder2/output_file2
./output_folder_n/output_file_n
"""
import pandas as pd
import sys
import re
import os
raw_path = os.path.join("data", "raw")
def main():
# combine all files in all subdirectories
combineFiles(raw_path)
# add headers, indices, remove tuple parentheses
df = pd.read_csv(
os.path.join(raw_path, "combinedFile.csv"), delimiter=",", quotechar='"'
)
headerLabels = [
"url",
"iphone",
"samsunggalaxy",
"sonyxperia",
"nokialumina",
"htcphone",
"ios",
"googleandroid",
"iphonecampos",
"samsungcampos",
"sonycampos",
"nokiacampos",
"htccampos",
"iphonecamneg",
"samsungcamneg",
"sonycamneg",
"nokiacamneg",
"htccamneg",
"iphonecamunc",
"samsungcamunc",
"sonycamunc",
"nokiacamunc",
"htccamunc",
"iphonedispos",
"samsungdispos",
"sonydispos",
"nokiadispos",
"htcdispos",
"iphonedisneg",
"samsungdisneg",
"sonydisneg",
"nokiadisneg",
"htcdisneg",
"iphonedisunc",
"samsungdisunc",
"sonydisunc",
"nokiadisunc",
"htcdisunc",
"iphoneperpos",
"samsungperpos",
"sonyperpos",
"nokiaperpos",
"htcperpos",
"iphoneperneg",
"samsungperneg",
"sonyperneg",
"nokiaperneg",
"htcperneg",
"iphoneperunc",
"samsungperunc",
"sonyperunc",
"nokiaperunc",
"htcperunc",
"iosperpos",
"googleperpos",
"iosperneg",
"googleperneg",
"iosperunc",
"googleperunc",
]
df.columns = headerLabels
df.index.name = "id"
# output factor and url files
df.to_csv(
os.path.join(raw_path, "concatenated_websites.csv"),
columns=headerLabels[:1],
quotechar='"',
sep=",",
header=True,
)
df.to_csv(
os.path.join(raw_path, "concatenated_factors.csv"),
columns=headerLabels[1:],
quotechar='"',
sep=",",
header=True,
)
# cleanup
os.remove(os.path.join(raw_path, "combinedFile.csv"))
print("Successfully processed " + str(fileCount) + " files")
sys.exit()
def combineFiles(file):
outfile = open(os.path.join(raw_path, "combinedFile.csv"), "w+")
global fileCount
fileCount = 0
httpRe = re.compile(r".*?[http]")
for dirname, dirnames, filenames in os.walk(file):
# For each sub folder
for subdirname in dirnames:
subdirpath = os.path.join(dirname, subdirname)
for fileName in os.listdir(subdirpath):
fileCount += 1
print("Processing " + fileName + "...")
with open(subdirpath + "/" + fileName) as infile:
for line in infile:
# make sure we're reading reducer output files
if len(httpRe.findall(line)) > 0:
outfile.write(line)
return None
if __name__ == "__main__":
main()
| 3.421875 | 3 |
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/server/handlers/graphql/__init__.py | Maximilien-R/cookiecutter-tartiflette-aiohttp | 3 | 12760430 | from .handler import handle_graphql
__all__ = ("handle_graphql",)
| 1.070313 | 1 |
libp2p/routing/kademlia/kademlia_content_router.py | ChihChengLiang/py-libp2p | 0 | 12760431 | <reponame>ChihChengLiang/py-libp2p
from libp2p.routing.interfaces import IContentRouting
class KadmeliaContentRouter(IContentRouting):
def provide(self, cid, announce=True):
"""
Provide adds the given cid to the content routing system. If announce is True,
it also announces it, otherwise it is just kept in the local
accounting of which objects are being provided.
"""
# the DHT finds the closest peers to `key` using the `FIND_NODE` RPC
# then sends a `ADD_PROVIDER` RPC with its own `PeerInfo` to each of these peers.
def find_provider_iter(self, cid, count):
"""
Search for peers who are able to provide a given key
returns an iterator of peer.PeerInfo
"""
| 2.90625 | 3 |
src/sisl_games/pursuit/test.py | weepingwillowben/pymarl | 0 | 12760432 | <reponame>weepingwillowben/pymarl
import numpy as np
from .pursuit import env as _env
import time
# from .utils import two_d_maps
xs = 5
ys = 5
obs_range = 3
n_evaders = 1
n_pursuers = 2
# obs_range should be odd 3, 5, 7, etc
# Test with 2 pursuers, 1 evader
env = _env(n_pursuers = n_pursuers, n_evaders = n_evaders, xs = xs, ys = ys, obs_range = obs_range)
# Use the default env
# env = _env()
done = False
global _quit_loop, _actions, _agent_id
_quit_loop = np.array([0])
_actions = np.array([4]*env.num_agents)
_agent_id = np.array([0])
env.reset()
# controlling only the pursuers
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
def on_key(event):
# print('you pressed', event.key)
if event.key == "escape":
print("escape")
_quit_loop[0] = 1
# break
if event.key == "backspace":
env.reset()
if event.key == "j":
# pressing 'j' moves the focus of control to the next agent
# control rolls over to the first agent
_agent_id[0] = (_agent_id[0] + 1) % env.num_agents
if event.key == "left":
# p1: left
_actions[_agent_id[0]] = 0
if event.key == "right":
# p1: right
_actions[_agent_id[0]] = 1
if event.key == "up":
# p1: up
_actions[_agent_id[0]] = 3
if event.key == "down":
# p1: down
_actions[_agent_id[0]] = 2
cid = fig.canvas.mpl_connect('key_press_event', on_key)
done = False
num_frames = 0
total_reward = 0
# start = time.time()
# for _ in range(100):
while not done:
env.render()
if _quit_loop[0]:
break
# actions should be a dict of numpy arrays
action_dict = dict(zip(env.agent_ids, _actions))
observation, rewards, done_dict, info = env.step(action_dict)
done = any(list(done_dict.values()))
total_reward += sum(rewards.values())
print("step reward = ", sum(rewards.values()))
if done:
print("Total reward", total_reward, done)
_actions = np.array([4]*env.num_agents)
# end = time.time()
# print("FPS = ", 100/(end-start))
env.render()
time.sleep(2)
env.close()
| 2.71875 | 3 |
samtranslator/__init__.py | armaciej/serverless-application-model | 24 | 12760433 | <reponame>armaciej/serverless-application-model<filename>samtranslator/__init__.py
__version__ = "1.31.0"
| 0.941406 | 1 |
exercises/pt/test_04_04.py | tuanducdesign/spacy-course | 0 | 12760434 | <reponame>tuanducdesign/spacy-course<gh_stars>0
def test():
assert (
'spacy.blank("en")' in __solution__
), "Você inicializou um fluxo de processamento em Inglês vazio?"
assert "DocBin(docs=docs)" in __solution__, "Você criou o DocBin corretamente?"
assert "doc_bin.to_disk(" in __solution__, "Você utilizou o método to_disk?"
assert "train.spacy" in __solution__, "Você criou um arquivo com o nome correto?"
__msg__.good("Muito bem! Tudo certo por aqui.")
| 2.3125 | 2 |
python/pumapy/io/export_texgen_weave.py | EricAtORS/puma | 14 | 12760435 | from TexGen.Core import *
import numpy as np
from os import path
def export_weave_vtu(filename, weave, domain, max_dim_nvox, round_vox_up=True, export_orientation=True):
""" Exporting weave to vtu, to be read by pumapy
:param filename: filepath and name
:type filename: string
:param weave: weave object, as defined in TexGen
:type weave: CTextile or child class of CTextile
:param domain: domain size object, as defined in TexGen
:type domain: CDomainPlanes
:param max_dim_nvox: number of voxels to add in the largest domain dimension
:type max_dim_nvox: int
:param round_vox_up: for the shorter dimensions, round number of voxels up (for +/-1 vox)
:type round_vox_up: bool
:param export_orientation: specify whether to export orientation
:type export_orientation: bool
:return: filename of weave exported (input filename + dimensions)
:rtype: string
"""
if not isinstance(domain, CDomainPlanes):
raise Exception("Domain needs to be of CDomainPlanes type.")
if not isinstance(filename, str):
raise Exception("Filename has to be a string.")
if not path.exists(path.split(filename)[0]):
raise Exception("Directory " + path.split(filename)[0] + " not found.")
min_bounds = XYZ()
max_bounds = XYZ()
domain.GetBoxLimits(min_bounds, max_bounds)
weave.AssignDomain(CDomainPlanes(min_bounds, max_bounds))
lengths = np.array([max_bounds.x - min_bounds.x, max_bounds.y - min_bounds.y, max_bounds.z - min_bounds.z])
max_len = np.max(lengths)
mask = np.zeros(3, dtype=bool)
mask[lengths == max_len] = True
voxel_length = max_len / float(max_dim_nvox)
nvox = np.zeros(3, dtype=int)
nvox[mask] = max_dim_nvox
nvox[~mask] = (lengths[~mask] / voxel_length).astype(int) # truncates
rem = np.zeros(3, dtype=float)
rem[~mask] = lengths[~mask] - voxel_length * nvox[~mask]
if round_vox_up:
rem[~mask] = voxel_length - rem[~mask]
max_bounds = XYZ(max_bounds.x + rem[0],
max_bounds.y + rem[1],
max_bounds.z + rem[2])
nvox[~mask] += 1
else:
max_bounds = XYZ(max_bounds.x - rem[0], max_bounds.y - rem[1], max_bounds.z - rem[2])
weave.AssignDomain(CDomainPlanes(min_bounds, max_bounds))
mesh = CRectangularVoxelMesh()
print("Exporting " + filename + ".vtu ... ", end='')
filename += "_" + str(nvox[0]) + "_" + str(nvox[1]) + "_" + str(nvox[2])
mesh.SaveVoxelMesh(weave, filename, int(nvox[0]), int(nvox[1]), int(nvox[2]), False, export_orientation,
MATERIAL_CONTINUUM, 0, VTU_EXPORT)
print("Done")
return filename
| 2.546875 | 3 |
recipes/able_recipe/__init__.py | internetoftobi/Eja-mobile-app | 24 | 12760436 | <gh_stars>10-100
"""
Android Bluetooth Low Energy
"""
from pythonforandroid.recipe import PythonRecipe
from pythonforandroid.toolchain import current_directory, info, shprint
import sh
from os.path import join
class AbleRecipe(PythonRecipe):
name = 'able_recipe'
depends = ['python3', 'setuptools', 'android']
call_hostpython_via_targetpython = False
install_in_hostpython = True
def prepare_build_dir(self, arch):
build_dir = self.get_build_dir(arch)
assert build_dir.endswith(self.name)
shprint(sh.rm, '-rf', build_dir)
shprint(sh.mkdir, build_dir)
for filename in ('../../able', 'setup.py'):
shprint(sh.cp, '-a', join(self.get_recipe_dir(), filename),
build_dir)
def postbuild_arch(self, arch):
super(AbleRecipe, self).postbuild_arch(arch)
info('Copying able java class to classes build dir')
with current_directory(self.get_build_dir(arch.arch)):
shprint(sh.cp, '-a', join('able', 'src', 'org'),
self.ctx.javaclass_dir)
recipe = AbleRecipe()
| 2.046875 | 2 |
bin/remove_numbers.py | olesmith/SmtC | 0 | 12760437 | #!/usr/bin/python
import os,re,glob
entries=glob.glob("*")
entries=sorted(entries)
files={}
for filename in entries:
if (os.path.isfile(filename)):
files[ filename ]=True
files=files.keys()
files.sort()
for filename in files:
rfilename=filename
regex='^\d+'
rfilename=re.sub(regex,'',rfilename)
regex='^\s+'
rfilename=re.sub(regex,'_',rfilename)
regex='^[_\-\.\&]+'
rfilename=re.sub(regex,'',rfilename)
regex='[\&]'
filename=re.sub(regex,'\\)',filename)
regex='[\(]'
filename=re.sub(regex,'\\(',filename)
rfilename=re.sub(regex,'_',rfilename)
regex='[\)]'
filename=re.sub(regex,'\\)',filename)
rfilename=re.sub(regex,'_',rfilename)
regex='[\']'
filename=re.sub(regex,'\\\'',filename)
rfilename=re.sub(regex,'\\\'',rfilename)
regex='_[_]+'
rfilename=re.sub(regex,'_',rfilename)
regex='\s+'
filename=re.sub(regex,'\\ ',filename)
rfilename=re.sub(regex,'_',rfilename)
if (filename!=rfilename):
commands=[
#"echo",
"/bin/mv",
"-f",
filename,
rfilename
]
print " ".join(commands)
try:
output=os.system( " ".join(commands) )
except:
print "Unable to move: "+filename+" --> "+rfilename
#exit()
| 2.78125 | 3 |
ppqm/utils/files.py | hagenmuenkler/ppqm | 0 | 12760438 | <gh_stars>0
import tempfile
import weakref as _weakref
from pathlib import Path
class WorkDir(tempfile.TemporaryDirectory):
""" Like TemporaryDirectory, with the possiblity of keeping log files for debug"""
def __init__(self, suffix=None, prefix=None, dir=None, keep=False):
# super().__init__(suffix=suffix, dir=dir, prefix=prefix)
self.keep_directory = keep
self.name = tempfile.mkdtemp(suffix, prefix, dir)
if not keep:
self._finalizer = _weakref.finalize(
self,
super()._cleanup,
self.name,
warn_message="Implicitly cleaning up {!r}".format(self),
)
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
if not self.keep_directory:
super().__exit__(exc_type, exc_val, exc_tb)
def get_path(self):
return Path(self.name)
| 2.390625 | 2 |
Source Code/client/RaspberryPi_backup/tcp_sender_multi_connection.py | PerryLai/AutoTest-Platform | 0 | 12760439 | <reponame>PerryLai/AutoTest-Platform
#!/usr/bin/env python3
import socket
import time
def main():
payload = [0x0b] * 1024
counter = 1
try:
while True:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, 0)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.setsockopt(socket.SOL_SOCKET, 25, str("net0.2" + '\0').encode('utf-8'))
s.bind(("fd53:fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b", 13400))
s.settimeout(2)
s.connect(("fd53:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b", counter, 0, 0))
s.send(bytes(payload))
counter = counter + 1
s.shutdown(socket.SHUT_RDWR)
s.close()
time.sleep(2)
except socket.timeout:
print("except - socket.timeout")
pass
except KeyboardInterrupt:
print("except - KeyboardInterrupt")
pass
print("counter = %d" % counter)
if __name__ == '__main__':
main()
| 2.453125 | 2 |
sdks/python/apache_beam/examples/complete/estimate_pi_test.py | aaltay/incubator-beam | 9 | 12760440 | <gh_stars>1-10
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test for the estimate_pi example."""
# pytype: skip-file
import logging
import unittest
import pytest
from apache_beam.examples.complete import estimate_pi
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import BeamAssertException
from apache_beam.testing.util import assert_that
def in_between(lower, upper):
def _in_between(actual):
_, _, estimate = actual[0]
if estimate < lower or estimate > upper:
raise BeamAssertException(
'Failed assert: %f not in [%f, %f]' % (estimate, lower, upper))
return _in_between
@pytest.mark.examples_postcommit
class EstimatePiTest(unittest.TestCase):
def test_basics(self):
with TestPipeline() as p:
result = p | 'Estimate' >> estimate_pi.EstimatePiTransform(5000)
# Note: Probabilistically speaking this test can fail with a probability
# that is very small (VERY) given that we run at least 500 thousand
# trials.
assert_that(result, in_between(3.125, 3.155))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| 2.09375 | 2 |
storydb/core/models/timeinterval.py | brettkromkamp/story-db | 10 | 12760441 | <reponame>brettkromkamp/story-db
"""
Place class. Part of the StoryTechnologies project.
August 17, 2019
<NAME> (<EMAIL>)
"""
class TimeInterval:
def __init__(self, from_time_point: str, to_time_point: str) -> None:
self.from_time_point = from_time_point
self.to_time_point = to_time_point
| 2.59375 | 3 |
Python/OneLang/One/Transforms/LambdaCaptureCollector.py | onelang/OneLang-CrossCompiled | 2 | 12760442 | <reponame>onelang/OneLang-CrossCompiled
from onelang_core import *
import OneLang.One.AstTransformer as astTrans
import OneLang.One.Ast.AstTypes as astTypes
import OneLang.One.Ast.Types as types
import OneLang.One.Ast.Interfaces as ints
import OneLang.One.Ast.References as refs
class LambdaCaptureCollector(astTrans.AstTransformer):
def __init__(self):
self.scope_var_stack = []
self.scope_vars = None
self.captured_vars = None
super().__init__("LambdaCaptureCollector")
def visit_lambda(self, lambda_):
if self.scope_vars != None:
self.scope_var_stack.append(self.scope_vars)
self.scope_vars = dict()
self.captured_vars = dict()
super().visit_lambda(lambda_)
lambda_.captures = []
for capture in self.captured_vars.keys():
lambda_.captures.append(capture)
self.scope_vars = self.scope_var_stack.pop() if len(self.scope_var_stack) > 0 else None
return lambda_
def visit_variable(self, variable):
if self.scope_vars != None:
self.scope_vars[variable] = None
return variable
def visit_variable_reference(self, var_ref):
if isinstance(var_ref, refs.StaticFieldReference) or isinstance(var_ref, refs.InstanceFieldReference) or isinstance(var_ref, refs.StaticPropertyReference) or isinstance(var_ref, refs.InstancePropertyReference) or self.scope_vars == None:
return var_ref
vari = var_ref.get_variable()
if not vari in self.scope_vars:
self.captured_vars[vari] = None
return var_ref | 1.921875 | 2 |
toolcli/__init__.py | sslivkoff/toolcli | 0 | 12760443 | """toolcli makes it easy to create structured hierarchical cli tools"""
from .command_utils import *
from .capture_utils import *
from .file_edit_utils import *
from .file_validate_utils import *
from .input_utils import *
from .style_utils import *
from .terminal_utils import *
from .spec import *
__version__ = '0.5.3'
| 0.980469 | 1 |
nexus/library/gamess_input.py | bwvdg/qmcpack | 0 | 12760444 | <filename>nexus/library/gamess_input.py
##################################################################
## (c) Copyright 2015- by <NAME> ##
##################################################################
#====================================================================#
# gamess_input.py #
# Support for GAMESS input file I/O #
# #
# Content summary: #
# GamessInput #
# Input class for the GAMESS code. #
# Capable of reading/writing arbitrary GAMESS input files. #
# #
# generate_gamess_input #
# User function to create arbitrary GAMESS input. #
# #
# KeywordGroup #
# Represents an arbitrary keyword group in the input file. #
# #
# KeywordSpecGroup #
# Base class for specialized keyword groups. #
# Derived classes enforce the keyword specification. #
# See ContrlGroup, SystemGroup, GuessGroup, ScfGroup, #
# McscfGroup, DftGroup, GugdiaGroup, DrtGroup, CidrtGroup, #
# and DetGroup #
# #
# FormattedGroup #
# Represents strict machine-formatted input groups. #
# #
#====================================================================#
import os
from numpy import array,ndarray,abs
from generic import obj
from periodic_table import pt
from developer import DevBase
from nexus_base import nexus_noncore
from simulation import SimulationInput
from debug import *
class GIbase(DevBase):
def message(self,msg,**kwargs):
self.error(msg,**kwargs)
#end def message
#end class GIbase
class GIarray(GIbase):
def __init__(self,d):
for n,v in d.iteritems():
if not isinstance(n,int):
self.error("keys must be integers\nattempted to initialize array from input provided: {0}\nnote that dict's are used only for arrays".format(d))
#end if
if isinstance(v,(tuple,list,ndarray)):
nv = array(v,type(v[0]))
else:
nv = array([v],type[v])
#end if
self[n]=nv
#end for
#end def __init__
#end class GIarray
class Group(GIbase):
def __init__(self,text=None,**kwargs):
if text!=None:
self.read(text)
#end if
self.set(**kwargs)
#end def __init__
def read(self,text):
self.not_implemented()
#end def read
def write(self,text):
self.not_implemented()
#end def read
#end class Group
class KeywordGroup(Group):
linewrap = 70
lineindent = ' '
booldict = {'.true.':True,'.TRUE.':True,'.t.':True,'.T.':True,
'.false.':False,'.FALSE.':False,'.f.':False,'.F.':False}
def readval(self,val):
fail = False
if val in self.booldict:
v = self.booldict[val]
else:
try:
v = int(val)
except:
try:
v = float(val.replace('d','e'))
except:
#val = val.replace(',',' ')
if ' ' in val:
val = val.split()
try:
v = array(val,dtype=int)
except:
try:
v = array(val,dtype=float)
except:
try:
v = array(val,dtype=str)
except:
fail = True
#end try
#end try
#end try
else:
v = val
#end if
#end try
#end try
#end if
if fail:
self.error('failed to read value: "{0}"'.format(val))
#end if
return v
#end def readval
def read(self,text):
tokens = text.replace(',',' ').split()
for token in tokens:
if '=' in token:
var,val = token.split('=')
var = var.lower()
val = val.lower()
self[var]=val
else:
self[var]+=' '+token.lower()
#end if
#end for
vars = list(self.keys())
for var in vars:
val = self.readval(self[var])
if not '(' in var:
self[var] = val
else:
del self[var]
var,index = var.replace('(',' ').replace(')','').split()
index = int(index)
if not var in self:
arr = GIarray({index:val})
self[var] = arr
else:
self[var][index]=val
#end if
#end if
#end for
#end def read
def writeval(self,val):
if isinstance(val,bool):
if val:
sval = '.true.'
else:
sval = '.false.'
#end if
elif isinstance(val,str):
sval = val
elif isinstance(val,int):
sval = str(val)
elif isinstance(val,float):
sval = str(val).replace('e','d')
elif isinstance(val,(ndarray,list)):
sval = ''
for v in val:
vs = str(v)+','
if len(sval)+len(vs)<self.linewrap:
sval+=vs
else:
sval+='\n'+self.lineindent+vs
#end if
#end for
sval = sval[0:-1]
else:
self.error('unknown type encountered on write: {0}'.format(val))
#end if
return sval
#end def writeval
def write(self,name):
text = ''
line = ' ${0:<6} '.format(name)
for var in sorted(self.keys()):
val = self[var]
if not isinstance(val,GIarray):
vtext='{0}={1} '.format(var,self.writeval(val))
if len(line)+len(vtext) < self.linewrap:
line+=vtext
else:
text+=line+'\n'
line = self.lineindent+vtext
#end if
else:
for n in sorted(val.keys()):
vtext = '{0}({1})={2} '.format(var,n,self.writeval(val[n]))
if len(line)+len(vtext) < self.linewrap:
line+=vtext
else:
text+=line+'\n'
line = self.lineindent+vtext
#end if
#end for
#end if
#end for
text += line+' $end\n'
return text
#end def write
#end class KeywordGroup
class CardGroup(Group):
#input spec page numbers
# ecp 287
# data 37
def readval(self,val):
try:
v = int(val)
except:
try:
v = float(val.replace('d','e'))
except:
v = val
#end try
#end try
return v
#end def readval
def read_tokens(self,line):
tokens = []
for token in line.split():
tokens.append(self.readval(token))
#end for
return tokens
#end def read_tokens
def read_line_tokens(self,text):
line_tokens = []
for line in text.splitlines():
line_tokens.append(self.read_tokens(line))
#end for
return line_tokens
#end def read_line_tokens
def append_text(self,text):
for tokens in self.read_line_tokens(text):
self.append(tokens)
#end for
#end def append_text
def append_list(self,lst):
for tokens in lst:
self.append(tokens)
#end for
#end def append_list
def read(self,inp):
self.clear()
if isinstance(inp,str):
self.append_text(inp)
elif isinstance(inp,list):
self.append_list(inp)
#end if
#end def read
def writeval(self,val):
if isinstance(val,float):
sval = str(val).replace('e','d')
if len(sval)>8 and abs(val)>=10.0:
sval = '{0:16.8e}'.format(val).replace('e','d')
#end if
else:
sval = str(val)
#end if
return sval
#end def writeval
def write(self,name):
text = ' ${0}\n'.format(name)
contents = ''
for n in range(len(self)):
for token in self[n]:
contents += self.writeval(token)+' '
#end for
contents += '\n'
#end for
text+= contents.lstrip()
text+=' $end\n'
return text
#end def write
def list(self):
lst = []
for n in range(len(self)):
lst.append(self[n])
#end for
return lst
#end def list
#end class CardGroup
class FormattedGroup(Group):
def read(self,text):
self.text = str(text)
#end def read
def write(self,name):
#return ' ${0}\n{1} $END\n'.format(name.upper(),self.text.lstrip())
return ' ${0}\n{1} $END\n'.format(name.upper(),self.text)
#end def write
#end class FormattedGroup
# detailed keyword specification groups to check names and types of keyword inputs
class KeywordSpecGroup(KeywordGroup):
keywords = set()
integers = set()
reals = set()
bools = set()
strings = set()
arrays = set()
allowed_values = obj()
def is_consistent(self):
return len(set(self.keys())-self.keywords)==0
#end def is_consistent
def is_valid(self):
valid = self.is_consistent()
for name,val in self.iteritems():
if name in self.allowed_values:
if isinstance(val,str):
val = val.lower()
#end if
valid &= val in self.allowed_values[name]
#end if
#end for
return valid
#end def is_valid
#end class KeywordSpecGroup
class ContrlGroup(KeywordSpecGroup):
keywords = set([
'scftyp','dfttyp','tddft' ,'vbtyp' ,'mplevl','cityp' ,'cctyp' ,
'cimtyp','relwfn','runtyp','numgrd','exetyp','icharg','mult' ,
'coord' ,'units' ,'nzvar' ,'pp' ,'local' ,'ispher','qmttol',
'maxit' ,'molplt','pltorb','aimpac','friend','nfflvl','nprint',
'nosym' ,'etollz','inttyp','grdtyp','normf' ,'normp' ,'itol' ,
'icut' ,'iskprp','irest' ,'geom' ,'ecp' ,'casino'
])
integers = set([
'mplevl','icharg','mult' ,'nzvar' ,'ispher','maxit' ,'nfflvl',
'nprint','nosym' ,'normf','normp' ,'itol' ,'icut' ,'iskprp',
'irest'
])
reals = set(['qmttol' ,'etollz'])
bools = set(['numgrd' ,'molplt','pltorb','aimpac','casino'])
strings = set([
'scftyp','dfttyp','tddft' ,'vbtyp' ,'cityp' ,'cctyp' ,'cimtyp',
'relwfn','runtyp','exetyp','coord' ,'units' ,'pp' ,'local' ,
'friend','inttyp','grdtyp','geom' ,'ecp'
])
allowed_values = obj(
scftyp = set(['rhf','uhf','rohf','gvb','mcscf','none']),
dfttyp = set(['none','slater','becke','gill','optx','pw91x','pbex',
'vwn','vwn3','vwn1rpa','pz81','p86','lyp','pw91c','pbec',
'op','svwn','wvwn1rpa','blyp','bop','bp86','gvwn','gpw91',
'pbevwn','pbeop','olyp','pw91','pbe','edf1','pbe','revpbe',
'rpbe','pbesol','hcth93','hcth120','hcth147','hcth407',
'sogga','mohlyp','b97-d','sogga11','bhhlyp','b3pw91',
'b3lyp','b3lypv1r','b3lypv3','b3p86','b3p86v1r','b3p86v5',
'b97','b97-1','b97-2','b97-3','b97-k','b98','pbe0','x3lyp',
'sogga11x','camb3lyp','wb97','wb97x','wb97x-d','b2plyp',
'wb97x-2','wb97x-2l','vs98','pkzb','thcth','thcthhyb','bmk',
'tpss','tpssh','tpssm','revtpss','dldf','m05','m05-2x',
'm06','m06-l','m06-2x','m06-hf','m08-hx','m08-s0','m11','m11-l',
'none','xalpha','slater','becke','depristo','cama','half',
'vwn','pwloc','lyp','bvwn','blyp','bpwloc','b3lyp','camb',
'xvwn','xpwloc','svwn','spwloc','wigner','ws','wigexp']),
tddft = set(['none','excite','spnflp']),
vbtyp = set(['none','vb2000']),
mplevl = set([0,2]),
cityp = set(['none','cis','sfcis','aldet','ormas','fsoci','genci','guga']),
cctyp = set(['none','lccd','ccd','ccsd','ccsd(t)','r-cc','cr-cc','cr-ccl',
'ccsd(tq)','cr-cc(q)','eom-ccsd','cr-eom','cr-eoml','ip-eom2',
'ip-eom2','ip-eom3a','ea-eom2','ea-eom3a']),
cimtyp = set(['none','secim','decim','gsecim']),
relwfn = set(['none','iotc','dk','resc','nesc']),
runtyp = set(['energy','gradient','hessian','gamma','optimize','trudge',
'sadpoint','mex','conical','irc','vscf','drc','md','globop',
'optfmo','gradextr','surface','comp','g3mp2','prop','raman',
'nacme','nmr','eda','qmefpea','transitn','ffield','tdhf',
'tdhfx','makefp','fmo0']),
exetyp = set(['run','check']),
coord = set(['unique','hint','prinaxis','zmt','zmtmpc','fragonly']),
units = set(['angs','bohr']),
pp = set(['none','read','sbkjc','hw','mcp']),
local = set(['none','boys','ruednbrg','pop','svd']),
ispher = set([-1,0,1]),
friend = set(['hondo','meldf','gamessuk','gaussian','all']),
nfflvl = set([2,3]),
nprint = set([-7,-6,-5,-4,-3,-2,1,2,3,4,5,6,7,8,9]),
nosym = set([0,1]),
inttyp = set(['best','rotaxis','eric','rysquad']),
grdtyp = set(['best rsyquad']),
normf = set([0,1]),
normp = set([0,1]),
iskprp = set([0,1]),
irest = set([-1,0,1,2,3,4]),
geom = set(['input','daf']),
)
#end class ContrlGroup
class SystemGroup(KeywordSpecGroup):
keywords = set(['mwords','memddi','timlim','parall','kdiag','corefl',
'baltyp','mxseq2','mxseq3','nodext','iosmp','modio' ,
'memory'])
integers = set(['mwords','memddi','kdiag','mxseq2','mxseq3','modio','memory'])
reals = set(['timlim'])
bools = set(['parall','corefl'])
strings = set(['baltyp'])
arrays = set(['nodext','iosmp'])
allowed_values = obj(
kdiag = set([0,1,2,3]),
baltyp = set(['slb','dlb','loop','nxtval']),
modio = set([1,2,4,8,15]),
)
#end class SystemGroup
class GuessGroup(KeywordSpecGroup):
keywords = set(['guess' ,'prtmo' ,'punmo' ,'mix' ,'norb','norder','iorder',
'jorder','insorb','purify','tolz','tole','symden'])
integers = set(['norb','norder','insorb'])
reals = set(['tolz','tole'])
bools = set(['prtmo','punmo','mix','purify','symden'])
strings = set(['guess'])
arrays = set(['iorder','jorder'])
allowed_values = obj(
guess = set(['huckel','hcore','moread','rdmini','mosaved','skip','fmo','hucsub','dmread']),
norder = set([0,1]),
)
#end class GuessGroup
class ScfGroup(KeywordSpecGroup):
keywords = set([
'dirscf','fdiff' ,'noconv','diis' ,'soscf' ,'extrap','damp' ,
'shift' ,'rstrct','dem' ,'cuhf' ,'conv' ,'sogtol','ethrsh',
'maxdii','swdiis','locopt','demcut','dmpcut','uhfnos','vvos' ,
'mvoq' ,'acavo' ,'pacavo','uhfchk','nhomo' ,'nlumo' ,'mom' ,
'kproj' ,'nco' ,'nseto' ,'no' ,'npair' ,'cicoef','couple',
'f' ,'alpha' ,'beta' ,'npunch','npreo' ,'vtscal','scalf' ,
'maxvt' ,'vtconv'
])
integers = set([
'maxdii','mvoq' ,'nhomo' ,'nlumo' ,'kproj','nco','nseto',
'npair' ,'npunch','maxvt'
])
reals = set([
'conv' ,'sogtol','ethrsh' ,'swdiis','demcut','dmpcut',
'scalf' ,'vtconv'
])
bools = set([
'dirscf','fdiff' ,'noconv' ,'diis' ,'soscf' ,'extrap',
'damp' ,'shift' ,'rstrct' ,'dem' ,'cuhf' ,'locopt',
'uhfnos','vvos' ,'acavo' ,'uhfchk','mom' ,'couple',
'vtscal'
])
arrays = set([
'pacavo','no' ,'cicoef','f' ,'alpha' ,'beta' ,
'npreo'
])
allowed_values = obj(
kproj = set([0,1,2]),
)
#end class ScfGroup
class McscfGroup(KeywordSpecGroup):
keywords = set([
'cistep','focas' ,'soscf' ,'fullnr','quad' ,'jacobi','acurcy',
'engtol','maxit' ,'micit' ,'nword' ,'fors' ,'canonc','finci' ,
'diabat','ekt' ,'npunch','npflg' ,'nofo' ,'mcfmo' ,'casdii',
'cashft','nrmcas','qudthr','damp' ,'method','linser','fcore' ,
'mofrz' ,'norb' ,'norot' ,'dropc'
])
integers = set(['maxit','micit','nword','npunch','nofo','mcfmo','nrmcas','norb'])
reals = set(['acurcy','engtol','casdii','cashft','qudthr','damp'])
bools = set(['focas','soscf','fullnr','quad','jacobi','fors','canonc',
'diabat','ekt','linser','fcore','dropc'])
strings = set(['cistep','finci','method'])
arrays = set(['npflg','mofrz','norot'])
allowed_values = obj(
cistep = set(['aldet','ormas','guga','genci','gmcci']),
finci = set(['none','mos','nos']),
nrmcas = set([0,1]),
method = set(['dm2','tei']),
)
#end class McscfGroup
class DftGroup(KeywordSpecGroup):
keywords = set([
'method','dc' ,'idcver','dcchg' ,'dcabc' ,'dcalp' ,'dcsr' ,
'dcs6' ,'dcs8' ,'lrdflg','mltint','lambda','kappa' ,'rzero' ,
'prpol' ,'prcoef','prpair','lc' ,'mu' ,'chf' ,'cmp2' ,
'nrad' ,'nleb' ,'sg1' ,'jans' ,'nrad' ,'nthe' ,'nphi' ,
'swoff' ,'switch','nrad0' ,'nleb0' ,'nthe0' ,'nphi0' ,'thresh',
'gthre' ,'auxfun','three'
])
integers = set(['idcver','prcoef','prpair','nrad','nleb','jans','nthe',
'nphi','nrad0','nleb0','nthe0','nphi0','gthre'])
reals = set(['dcalp','dcsr','dcs6','dcs8','lambda','kappa','rzero',
'mu','chf','cmp2','swoff','switch','thresh'])
bools = set(['dc','dcchg','dcabc','lrdflg','mltint','prpol','lc','sg1',
'three'])
strings = set(['method','auxfun'])
allowed_values = obj(
method = set(['grid','gridfree']),
idcver = set([1,2,3]),
jans = set([1,2]),
auxfun = set(['aux0','aux3']),
)
#end class DftGroup
class GugdiaGroup(KeywordSpecGroup):
keywords = set([
'nstate','prttol','mxxpan','itermx','cvgtol' ,'nword' ,'maxham',
'maxdia','nimprv','nselct','selthr','nextra','kprint','nref','eref'
])
integers = set(['nstate','mxxpan','itermx','nword','maxham','maxdia',
'nimprv','nselct','nextra','nref'])
reals = set(['prttol','cvgtol','selthr','eref'])
arrays = set(['kprint'])
#end class GugdiaGroup
class DrtGroup(KeywordSpecGroup):
keywords = set([
'group','fors' ,'foci' ,'soci','iexcit','intact','nmcc',
'ndoc' ,'naos' ,'nbos' ,'nalp','nval' ,'next' ,'nfzv','stsym',
'noirr','mxnint','mxneme','nprt'
])
integers = set(['iexcit','nmcc','ndoc','naos','nbos','nalp','nval',
'next','nfzv','noirr','mxnint','mxneme','nprt'])
bools = set(['fors','foci','soci','intact'])
strings = set(['group','stsym'])
allowed_values = obj(
group = set(['c1','c2','ci','cs','c2v','c2h','d2','d2h','c4v','d4','d4h']),
stsym = set(['a','ag','au','ap','app','a','b','a1','a2','b1','b2','ag',
'bu','bg','au','a','b1','b2','b3','ag','b1g','b2g','b3g',
'au','b1u','b2u','b3u']),
nprt = set([0,1,2,3]),
)
#end class DrtGroup
class CidrtGroup(KeywordSpecGroup):
keywords = set([
'group','fors' ,'foci' ,'soci','iexcit','intact','nfzc' ,
'ndoc' ,'naos' ,'nbos' ,'nalp','nval' ,'next' ,'nfzv' ,'stsym',
'noirr','mxnint','mxneme','nprt'
])
integers = set(['iexcit','nfzc','ndoc','naos','nbos','nalp','nval',
'next','nfzv','noirr','mxnint','mxneme','nprt'])
bools = set(['fors','foci','soci','intact'])
strings = set(['group','stsym'])
allowed_values = obj(
group = set(['c1','c2','ci','cs','c2v','c2h','d2','d2h','c4v','d4','d4h']),
stsym = set(['a','ag','au','ap','app','a','b','a1','a2','b1','b2','ag',
'bu','bg','au','a','b1','b2','b3','ag','b1g','b2g','b3g',
'au','b1u','b2u','b3u']),
nprt = set([0,1,2,3]),
)
#end class CidrtGroup
class DetGroup(KeywordSpecGroup):
keywords = set([
'ncore' ,'nact' ,'nels' ,'sz' ,'group' ,'stsym' ,'irreps',
'nstate','prttol','analys','itermx','cvgtol','nhgss' ,'nstgss',
'mxxpan','clobbr','pures' ,'iroot' ,'nflgdm','saflg' ,'wstate',
'idwref','dwparm'
])
integers = set(['ncore','nact','nels','nstate','itermx','nhgss','nstgss',
'mxxpan','iroot','idwref'])
reals = set(['sz','prttol','cvgtol','dwparm'])
bools = set(['analys','clobbr','pures','saflg'])
strings = set(['group','stsym'])
arrays = set(['irreps','nflgdm','wstate'])
allowed_values = obj(
group = set(['c1','c2','ci','cs','c2v','c2h','d2','d2h','c4v','d4','d4h']),
stsym = set(['a','ag','au','ap','app','a','b','a1','a2','b1','b2','ag',
'bu','bg','au','a','b1','b2','b3','ag','b1g','b2g','b3g',
'au','b1u','b2u','b3u']),
)
#end class DetGroup
class BasisGroup(KeywordSpecGroup):
keywords = set([
'gbasis','ngauss','ndfunc','npfunc','diffsp','diffs',
'polar' ,'split2','split3','basnam','extfil'
])
integers = set(['ngauss','ndfunc','nffunc'])
bools = set(['diffsp','diffs','extfil'])
strings = set(['gbasis','polar'])
arrays = set(['split2','split3','basname'])
allowed_values = obj(
#gbasis = set(['sto','n21','n31','n311','g3l','g3lx','mini','midi','dzv',
# 'dh','tzv','mc']) # many others
ndfunc = set([0,1,2,3]),
nffunc = set([0,1]),
polar = set(['common','popn31','popn311','dunning','huzinaga','hondo7']),
)
#end class BasisGroup
#class XGroup(KeywordSpecGroup):
# keywords = set([''])
# integers = set([''])
# reals = set([''])
# bools = set([''])
# strings = set([''])
# arrays = set([''])
# allowed_values = obj(
# = set([]),
# )
##end class XGroup
class GamessInput(SimulationInput,GIbase):
group_order = '''
contrl system basis ecp data zmat libe
scf scfmi dft tddft cis cisvec mp2
rimp2 auxbas ccinp eominp mopac guess vec
mofrz statpt trudge trurst force cphf cpmchf
mass hess grad dipdr vib vib2 vscf
vibscf gamma eqgeom hlowt glowt irc drc
mex conicl md rdf globop gradex surf
local truncn elmom elpot eldens elfldg points
grid pdc mgc radial molgrf stone raman
alpdr comp nmr morokm lmoeda qmefp ffcalc
tdhf tdhfx efrag fragname frgrpl ewald makefp
prtefp damp dampgs pcm pcmgrd mcpcav tescav
newcav iefpcm pcmitr disbs disrep svp svpirf
cosgms scrf mcp relwfn efield intgrl fmm
trans fmo fmoprp fmoxyz optfmo fmohyb fmobnd
fmoenm fmoend optrst gddi elg dandc dccorr
subscf subcor mp2res ccres ciminp cimatm cimfrg
ffdata ffpdb ciinp det cidet gen cigen
ormas ceeis cedata gcilst gmcpt pdet adddet
remdet sodet drt cidrt mcscf mrmp detpt
mcqdpt excorr casci ivoorb cisort gugem gugdia
gugdm gugdm2 lagran trfdm2 diabat transt
drt1 drt2 vec1 vec2 det1 det2 hess2
'''.split()
all_groups = set(group_order)
key_groups = set(['contrl','system','guess','scf','mcscf','dft',
'gugdia','drt','cidrt','det','basis'])
card_groups = set()
#card_groups = set(['ecp','data','mcp','gcilst','points','stone','efrag',
# 'fragname','frgrpl','dampgs'])#,'fmoxyz'])
formatted_groups = set()
# detailed specifications for certain groups
keyspec_groups = obj(
contrl = ContrlGroup,
system = SystemGroup,
guess = GuessGroup,
scf = ScfGroup,
mcscf = McscfGroup,
dft = DftGroup,
gugdia = GugdiaGroup,
drt = DrtGroup,
cidrt = CidrtGroup,
det = DetGroup,
basis = BasisGroup
)
keyspec_group_order = []
for gname in group_order:
if gname in keyspec_groups:
keyspec_group_order.append(gname)
#end if
#end for
all_keywords = set()
for g in keyspec_groups:
all_keywords |= g.keywords
#end for
group_keyword_overlap = all_groups & all_keywords
all_names = all_groups | all_keywords
#cardspec_groups = obj()
# aliases for generate_gamess_input
group_aliases = obj()
for gname in group_order:
group_aliases['gamess_'+gname]=gname
#end for
all_group_aliases = all_groups | set(group_aliases.keys())
all_name_aliases = all_group_aliases | all_keywords
# gamess file I/O
file_units = obj(
#MCPPATH = -5,BASPATH = -4,EXTCAB = -3,
#MAKEFP = 1, ERICFMT = 2, EXTBAS = 3,
TRAJECT = 4, INPUT = 5,
OUTPUT = 6, PUNCH = 7, AOINTS = 8, MOINTS = 9, DICTNRY = 10,
DRTFILE = 11, CIVECTR = 12, CASINTS = 13, CIINTS = 14, WORK15 = 15,
WORK16 = 16, CSFSAVE = 17, FOCKDER = 18, WORK19 = 19, DASORT = 20,
DFTINTS = 21, DFTGRID = 22, JKFILE = 23, ORDINT = 24, EFPIND = 25,
PCMDATA = 26, PCMINTS = 27, MLTPL = 28, MLTPLT = 29, DAFL30 = 30,
RESTART = 35, HESSIAN = 38, SOCCDAT = 40, AABB41 = 41, BBAA42 = 42,
BBBB43 = 43, REMD = 44, MCQD50 = 50, MCQD51 = 51, MCQD52 = 52,
MCQD53 = 53, MCQD54 = 54, MCQD55 = 55, MCQD56 = 56, MCQD57 = 57,
MCQD58 = 58, MCQD59 = 59, MCQD60 = 60, MCQD61 = 61, MCQD62 = 62,
MCQD63 = 63, MCQD64 = 64, DCPHFH2 = 67, NMRINT1 = 61, CCREST = 70,
CCDIIS = 71, CCINTS = 72, CCT1AMP = 73, CCT2AMP = 74, CCT3AMP = 75,
CCVM = 76, CCVE = 77, CCQUADS = 78, QUADSVO = 79, EOMSTAR = 80,
EOMVEC1 = 81, EOMVEC2 = 82, EOMHC1 = 83, EOMHC2 = 84, EOMHHHH = 85,
EOMPPPP = 86, EOMRAMP = 87, EOMRTMP = 88, EOMDG12 = 89, MMPP = 90,
MMHPP = 91, MMCIVEC = 92, MMCIVC1 = 93, MMCIITR = 94, EOMVL1 = 95,
EOMVL2 = 96, EOMLVEC = 97, EOMHL1 = 98, EOMHL2 = 99, EFMOI = 102,
EFMOF = 103
)
def __init__(self,filepath=None):
if filepath!=None:
self.read(filepath)
#end if
#end def __init__
def read_text(self,contents,filepath=None):
#print 8*'\n'
#print contents
groups = obj()
lines = contents.splitlines()
ingroup = False
incard = False
group_name = None
group_text = ''
gname = ''
gtext = ''
n=0
for line in lines:
ended = False
ls = line.strip()
# specialized parsing for unknown card groups
if ingroup and ls!='$END' and ls!='$end':
gtext+=line+'\n'
#end if
if incard:
ended = ls=='$END' or ls=='$end'
ingroup = not ended
incard = not ended
if ended:
groups[group_name] = group_text
group_name = None
group_text = ''
else:
group_text+=line+'\n'
#end if
elif len(line)>0 and line[0]==' ' and ls!='':
if len(line)>1 and line[1]=='$' and not ingroup:
if not ' ' in ls:
group_name = ls.replace('$','').lower()
gname = group_name
ingroup = True
else:
group_name,ls = ls.split(' ',1)
group_name = group_name.replace('$','').lower()
gname = group_name
text,ended = self.process_line(ls)
group_text += text
ingroup = not ended
if ended:
groups[group_name] = group_text
group_name = None
group_text = ''
#end if
#end if
incard = group_name in self.card_groups
elif ingroup:
text,ended = self.process_line(ls)
group_text += text
ingroup = not ended
if ended:
groups[group_name] = group_text
group_name = None
group_text = ''
#end if
elif not ingroup:
None
else:
self.error('invalid text encountered during read of line number {0}:\n{1}'.format(n,line))
#end if
elif ls=='' or line[0]!=' ' or not ingroup:
None
else:
self.error('invalid text encountered during read of line number {0}:\n{1}'.format(n,line))
#end if
# specialized parsing for unknown card groups
if ended:
if not '=' in groups[gname]:
groups[gname]=gtext
#end if
gtext = ''
gname = ''
#end if
#end for
#print groups
for group_name,group_text in groups.iteritems():
failed = False
if group_name in self.keyspec_groups:
self[group_name] = self.keyspec_groups[group_name](group_text)
#elif group_name in self.cardspec_groups:
# self[group_name] = self.cardspec_groups[group_name](group_text)
elif group_name in self.key_groups:
self[group_name] = KeywordGroup(group_text)
elif group_name in self.card_groups:
self[group_name] = CardGroup(group_text)
elif '=' in group_text:
try:
self[group_name] = KeywordGroup(group_text)
except:
try:
self[group_name] = FormattedGroup(group_text)
except:
failed = True
#end try
#end try
else:
try:
self[group_name] = FormattedGroup(group_text)
except:
failed = True
#end try
#end if
if failed:
self.message('Read failure: group "{0}" does not appear to be a keyword group\nand a generic read of card data failed\ndata for this group will not be available'.format(group_name))
#end if
#end for
#print self
#exit()
#end def read_text
def process_line(self,ls):
ended = True
if ls.endswith('$END'):
text = ls.replace('$END','')
elif ls.endswith('$end'):
text = ls.replace('$end','')
else:
text = ls
ended = False
#end if
cloc = text.find('!')
if cloc!=-1:
text = text[0:cloc]
#end if
text +='\n'
return text,ended
#end def process_line
def write_text(self,filepath=None):
contents = ''
extra_groups = set(self.keys())-set(self.group_order)
if len(extra_groups)>0:
self.error('write failed\nthe following groups are unknown: {0}'.format(sorted(extra_groups)))
#end if
for group in self.group_order:
if group in self and isinstance(self[group],KeywordGroup):
contents += self[group].write(group)
#end if
#end for
for group in self.group_order:
if group in self and isinstance(self[group],(CardGroup,FormattedGroup)):
contents += self[group].write(group)
#end if
#end for
return contents
#end def write_text
def incorporate_system(self,system):
self.not_implemented()
#end def incorporate_system
#end class GamessInput
def generate_gamess_input(**kwargs):
if 'input_type' in kwargs:
input_type = kwargs['input_type']
del kwargs['input_type']
else:
input_type = 'general'
#end if
if input_type=='general':
gi = generate_any_gamess_input(**kwargs)
else:
GamessInput.class_error('input_type {0} is unrecognized\nvalid options are: general'.format(input_type))
#end if
return gi
#end def generate_gamess_input
ps_inputs = set('descriptor symmetry system pseudos pseudo_bases bases'.split())
ps_defaults = obj()
for var in ps_inputs:
ps_defaults[var]=None
#end for
ps_defaults.set(
descriptor = 'A molecule.',
symmetry = 'C1'
)
kw_defaults = obj()
for var in GamessInput.all_keywords:
kw_defaults[var]=None
#end for
def generate_any_gamess_input(**kwargs):
kwset = set(kwargs.keys())
pskw = ps_defaults.copy()
ps_overlap = ps_inputs & kwset
if len(ps_overlap)>0:
pskw.move_from(kwargs,ps_overlap)
kwset = set(kwargs.keys())
#end if
for name in kwargs.keys():
val = kwargs[name]
if isinstance(val,dict):
kwargs[name] = GIarray(val)
#end if
#end for
kw = kw_defaults.copy()
kw.set(**kwargs)
kwrem = obj(**kwargs)
invalid_names = kwset-GamessInput.all_name_aliases
if len(invalid_names)>0:
GamessInput.class_error('invalid group names or keywords encountered\ninvalid names/keywords provided: {0}\nplease check if these group names or keywords are actually valid GAMESS inputs\nif so, unsupported groups can be generated by providing the keywords as a single argument:\ngenerate_gamess_input(\n ...,\n group_name = obj(assign keywords),\n ...,\n )'.format(sorted(invalid_names)),'generate_gamess_input')
#end if
gi = GamessInput()
# handle groups provided directly by the user
# use aliases to guard against namespace collisions w/ nexus (e.g. system)
group_names = kwset & GamessInput.all_group_aliases
for name in group_names:
group_info = kw[name]
vname = name
if name in GamessInput.group_aliases:
name = GamessInput.group_aliases[name]
#end if
if isinstance(group_info,obj):
for n in group_info.keys():
v = group_info[n]
if isinstance(v,dict):
group_info[n] = GIarray(v)
#end if
#end for
if isinstance(group_info,Group):
gi[name] = group_info
elif name in GamessInput.keyspec_groups:
gi[name] = GamessInput.keyspec_groups[name](**group_info)
#elif name in GamessInput.cardspec_groups:
# gi[name] = GamessInput.cardspec_groups[name](**group_info)
elif name in GamessInput.key_groups:
gi[name] = KeywordGroup(**group_info)
elif name in GamessInput.card_groups:
GamessInput.class_error('card group {0} cannot be generated from a keyword list\nkeyword list provided:\n{1}'.format(name,group_info),'generate_gamess_input')
elif name in GamessInput.formatted_groups:
GamessInput.class_error('formatted group {0} cannot be generated from a keyword list\nkeyword list provided:\n{1}'.format(name,group_info),'generate_gamess_input')
else:
gi[name] = KeywordGroup(**group_info) # assume keyword group
#end if
del kw[vname]
del kwrem[vname]
elif name in GamessInput.group_keyword_overlap:
None
else:
GamessInput.class_error('invalid information provided to initialize group {0}\nyou must provide a dict, obj, or Group\nyou provided {1}'.format(vname,group_info),'generate_gamess_input')
#end if
#end for
# load keywords into groups by group order
# this may not be correct for overlapping keywords between groups!
# user will have to supply explicit keyword subsets by group in obj's as above
for name in GamessInput.keyspec_group_order:
group_type = GamessInput.keyspec_groups[name]
keywords = group_type.keywords & set(kwrem.keys())
if len(keywords)>0:
group_info = obj()
group_info.move_from(kwrem,keywords)
gi[name] = group_type(**group_info)
#end if
#end for
if len(kwrem)>0:
GamessInput.class_error('encountered unrecognized keywords\nunrecognized keywords: {0}\nthese keywords may belong to groups not fully implemented here\nfully supported groups: {1}\nunsupported groups can be generated by providing the keywords as a single argument: group_name = obj(assign keywords)'.format(sorted(kwrem),GamessInput.keyspec_group_order))
#end if
# handle nexus specific input generation keywords
# ecp 287
# data 37
if pskw.system!=None and not 'data' in gi:
system = pskw.system
if not 'contrl' in gi:
gi.contrl = ContrlGroup()
#end if
# allow user override of charge and multiplicity from physical system
gi.contrl.set_optional(
icharg = system.net_charge,
mult = system.net_spin+1,
)
s = system.structure
if s.has_folded():
sf = s.folded_structure
else:
sf = s
#end if
elem_ecp = s.elem
elem = sf.elem
pos = sf.pos
pskw.symmetry = pskw.symmetry.strip()
data = '{0}\n{1}\n'.format(pskw.descriptor,pskw.symmetry)
if pskw.symmetry!='C1':
data+='\n'
#end if
if pskw.pseudos is None:
if pskw.bases!=None:
bss = nexus_noncore.basissets.bases_by_atom(*pskw.bases)
else:
bss = obj()
if 'coord' not in gi.contrl:
gi.contrl.coord = 'unique'
#end if
#end if
for i in range(len(elem)):
a = elem[i]
Z = pt[a].atomic_number
data+='{0} {1:3.2f} {2:16.8f} {3:16.8f} {4:16.8f}\n'.format(a,Z,*pos[i])
if a in bss:
data+=bss[a].text+'\n\n'
#end if
#end for
else:
gi.contrl.set(
coord = 'unique',
ecp = 'read'
)
pps = nexus_noncore.pseudopotentials.pseudos_by_atom(*pskw.pseudos)
for i,a in enumerate(elem):
Z = pt[a].atomic_number
data+='{0} {1} {2:16.8f} {3:16.8f} {4:16.8f}\n'.format(a,Z,*pos[i])
if a in pps:
data += pps[a].basis_text+'\n\n'
#end if
#end for
ecp = ''
atoms = set()
for i,a in enumerate(elem_ecp):
if a in pps:
pp = pps[a]
if a in atoms:
ecp += pp.pp_name+'\n'
else:
ecp += pp.pp_text+'\n'
#end if
#end if
atoms.add(a)
#end for
gi.ecp = FormattedGroup(ecp)
#end if
gi.data = FormattedGroup(data)
#end if
return gi
#end def generate_any_gamess_input
def check_keyspec_groups():
print 'checking GamessInput KeywordSpecGroups'
groups = GamessInput.keyspec_groups
group_order = GamessInput.group_order
glist = []
for group_name in group_order:
if group_name in groups:
glist.append(group_name)
#end if
#end for
failed = False
#check for unrecognized groups
extra_groups = set(groups.keys())-set(group_order)
if len(extra_groups)>0:
failed = True
print ' encountered unrecognized keyspec groups: {0}'.format(sorted(extra_groups))
#end if
#check that integers, reals, bools, strings, and arrays are non-overlapping subsets of keywords
#check that allowed_values are a subset of keywords and values specified are of the correct type
for group_name in glist:
g = groups[group_name]
go = obj(
integers = g.integers,
reals = g.reals,
bools = g.bools,
strings = g.strings,
arrays = g.arrays
)
overlaps = obj()
for tname1,tset1 in go.iteritems():
for tname2,tset2 in go.iteritems():
if tname1!=tname2:
overlap = tset1 & tset2
if len(overlap)>0:
overlaps[tname1,tname2] = sorted(overlap)
#end if
#end if
#end for
#end for
if len(overlaps)>0:
failed = True
msg = ' keyspec group {0} has overlapping keywords'.format(g.__name__)
for tname1,tname2 in sorted(overlaps.keys()):
msg += ' \n {0} {1} overlap: {2}'.format(tname1,tname2,overlaps[tname1,tname2])
#end for
print msg
#end if
for tname in sorted(go.keys()):
extra_keys = go[tname]-g.keywords
if len(extra_keys)>0:
failed = True
print ' keyspec group {0} has unrecognized {1} keywords:\n {2}'.format(g.__name__,tname,sorted(extra_keys))
#end if
#end for
extra_keys = set(g.allowed_values.keys())-g.keywords
if len(extra_keys)>0:
failed = True
print ' keyspec group {0} has unrecognized allowed_value keywords:\n {1}'.format(g.__name__,sorted(extra_keys))
#end if
type_keys = set()
for keys in go:
type_keys |= keys
#end for
undefined = g.keywords-type_keys
if len(undefined)>0:
print ' keyspec group {0} has keywords w/o type assignment:\n {1}'.format(g.__name__,sorted(undefined))
#end if
#check that allowed values for each keyword have the right type
to = obj(
integers = int,
reals = float,
bools = bool,
strings = str,
arrays = ndarray
)
for tname in sorted(go.keys()):
type = to[tname]
for kw in sorted(go[tname]):
if kw in g.allowed_values:
for val in g.allowed_values[kw]:
if not isinstance(val,type):
failed = True
print ' allowed values of {0} keyword {1} are not all {2}: {3}'.format(g.__name__,kw,tname,sorted(g.allowed_values[kw]))
break
#end if
#end for
#end if
#end for
#end for
#end for
#note any overlapping keywords between groups (this is a feature, not an error)
overlaps = obj()
for gname1 in glist:
kw1 = groups[gname1].keywords
for gname2 in glist:
kw2 = groups[gname2].keywords
if gname1!=gname2:
overlap = kw1 & kw2
if len(overlap)>0:
tup = tuple(sorted((gname1,gname2)))
overlaps[tup] = sorted(overlap)
#end if
#end if
#end for
#end for
if len(overlaps)>0:
print '\n Note: some groups have overlapping keywords'
for gname1,gname2 in sorted(overlaps.keys()):
print ' groups {0} and {1} have overlapping keywords:\n {2}'.format(gname1,gname2,overlaps[gname1,gname2])
#end for
#end if
#note any overlapping keyword and group names (also a feature)
overlap = GamessInput.all_keywords & set(GamessInput.group_order)
if len(overlap)>0:
print '\n Note: some group names overlap with keywords:\n {0}'.format(sorted(overlap))
#end if
if failed:
print '\ncheck failed, see messages above and fix implementation'
else:
print '\ncheck passed'
#end if
exit()
#end def check_keyspec_groups
#check_keyspec_groups() # uncomment this to check keyword spec group self-consistency
| 1.828125 | 2 |
ecommerce/core/migrations/0006_auto_20200911_0447.py | berrondo/ecommerce | 1 | 12760445 | # Generated by Django 3.1.1 on 2020-09-11 04:47
from django.conf import settings
from django.db import migrations
class Migration(migrations.Migration):
atomic = False
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0012_alter_user_first_name_max_length'),
('admin', '0003_logentry_add_action_flag_choices'),
('core', '0005_auto_20200911_0435'),
]
operations = [
migrations.RenameModel(
old_name='Costumer',
new_name='Customer',
),
migrations.RenameField(
model_name='cart',
old_name='user',
new_name='customer',
),
]
| 1.796875 | 2 |
train.py | xinlongye/classification-tf2 | 1 | 12760446 | <reponame>xinlongye/classification-tf2
from functools import partial
import numpy as np
import tensorflow as tf
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
from tensorflow.keras.optimizers import Adam
from nets import freeze_layers, get_model_from_name
from utils.callbacks import (ExponentDecayScheduler, LossHistory,
ModelCheckpoint)
from utils.dataloader import ClsDatasets
from utils.utils import get_classes
from utils.utils_fit import fit_one_epoch
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
# ----------------------------------------#
# 主函数
# ----------------------------------------#
if __name__ == "__main__":
# ----------------------------------------------------#
# 是否使用eager模式训练
# ----------------------------------------------------#
eager = False
# ------------------------------------------------------#
# 训练自己的数据集的时候一定要注意修改classes_path
# 修改成自己数据集所区分的种类对应的txt文件
# ------------------------------------------------------#
classes_path = 'model_data/cls_classes.txt'
# ------------------------------------------------------#
# 输入的图片大小
# ------------------------------------------------------#
input_shape = [224, 224]
# ------------------------------------------------------#
# 所用模型种类:
# mobilenet、resnet50、vgg16是常用的分类网络
# ------------------------------------------------------#
backbone = "mobilenet"
# ------------------------------------------------------#
# 当使用mobilenet的alpha值
# 仅在backbone='mobilenet'的时候有效
# ------------------------------------------------------#
alpha = 0.25
# ----------------------------------------------------------------------------------------------------------------------------#
# 权值文件的下载请看README,可以通过网盘下载。模型的 预训练权重 对不同数据集是通用的,因为特征是通用的。
# 模型的 预训练权重 比较重要的部分是 主干特征提取网络的权值部分,用于进行特征提取。
# 预训练权重对于99%的情况都必须要用,不用的话主干部分的权值太过随机,特征提取效果不明显,网络训练的结果也不会好
#
# 如果训练过程中存在中断训练的操作,可以将model_path设置成logs文件夹下的权值文件,将已经训练了一部分的权值再次载入。
# 同时修改下方的 冻结阶段 或者 解冻阶段 的参数,来保证模型epoch的连续性。
#
# 当model_path = ''的时候不加载整个模型的权值。
#
# 此处使用的是整个模型的权重,因此是在train.py进行加载的。
# 如果想要让模型从主干的预训练权值开始训练,则设置model_path为主干网络的权值,此时仅加载主干。
# 如果想要让模型从0开始训练,则设置model_path = '',Freeze_Train = Fasle,此时从0开始训练,且没有冻结主干的过程。
# ----------------------------------------------------------------------------------------------------------------------------#
model_path = "model_data/mobilenet_2_5_224_tf_no_top.h5"
# ------------------------------------------------------#
# 是否进行冻结训练,默认先冻结主干训练后解冻训练。
# ------------------------------------------------------#
Freeze_Train = True
# ------------------------------------------------------#
# 获得图片路径和标签
# ------------------------------------------------------#
annotation_path = "cls_train.txt"
# ------------------------------------------------------#
# 进行训练集和验证集的划分,默认使用10%的数据用于验证
# ------------------------------------------------------#
val_split = 0.1
# ------------------------------------------------------#
# 用于设置是否使用多线程读取数据,1代表关闭多线程
# 开启后会加快数据读取速度,但是会占用更多内存
# 在IO为瓶颈的时候再开启多线程,即GPU运算速度远大于读取图片的速度。
# ------------------------------------------------------#
num_workers = 1
# ------------------------------------------------------#
# 获取classes
# ------------------------------------------------------#
class_names, num_classes = get_classes(classes_path)
assert backbone in ["mobilenet", "resnet50", "vgg16"]
# ------------------------------------------------------#
# 创建分类模型
# ------------------------------------------------------#
if backbone == "mobilenet":
model = get_model_from_name[backbone](input_shape=[input_shape[0], input_shape[1], 3], classes=num_classes,
alpha=alpha)
else:
model = get_model_from_name[backbone](input_shape=[input_shape[0], input_shape[1], 3], classes=num_classes)
if model_path != "":
# ------------------------------------------------------#
# 载入预训练权重
# ------------------------------------------------------#
print('Load weights {}.'.format(model_path))
model.load_weights(model_path, by_name=True, skip_mismatch=True)
# -------------------------------------------------------------------------------#
# 训练参数的设置
# logging表示tensorboard的保存地址
# checkpoint用于设置权值保存的细节,period用于修改多少epoch保存一次
# reduce_lr用于设置学习率下降的方式
# early_stopping用于设定早停,val_loss多次不下降自动结束训练,表示模型基本收敛
# -------------------------------------------------------------------------------#
logging = TensorBoard(log_dir='logs/')
checkpoint = ModelCheckpoint('logs/ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
monitor='val_loss', save_weights_only=True, save_best_only=False, period=1)
reduce_lr = ExponentDecayScheduler(decay_rate=0.94, verbose=1)
early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=10, verbose=1)
loss_history = LossHistory('logs/')
# ----------------------------------------------------#
# 验证集的划分在train.py代码里面进行
# ----------------------------------------------------#
with open(annotation_path, "r") as f:
lines = f.readlines()
np.random.seed(10101)
np.random.shuffle(lines)
np.random.seed(None)
num_val = int(len(lines) * val_split)
num_train = len(lines) - num_val
if Freeze_Train:
for i in range(freeze_layers[backbone]):
model.layers[i].trainable = False
# ------------------------------------------------------#
# 主干特征提取网络特征通用,冻结训练可以加快训练速度
# 也可以在训练初期防止权值被破坏。
# Init_Epoch为起始世代
# Freeze_Epoch为冻结训练的世代
# Epoch总训练世代
# 提示OOM或者显存不足请调小batch_size
# ------------------------------------------------------#
if True:
# --------------------------------------------#
# batch_size不要太小,不然训练效果很差
# --------------------------------------------#
batch_size = 32
Lr = 1e-3
Init_Epoch = 0
Freeze_Epoch = 50
epoch_step = num_train // batch_size
epoch_step_val = num_val // batch_size
if epoch_step == 0 or epoch_step_val == 0:
raise ValueError('数据集过小,无法进行训练,请扩充数据集。')
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
train_dataloader = ClsDatasets(lines[:num_train], input_shape, batch_size, num_classes, train=True)
val_dataloader = ClsDatasets(lines[num_train:], input_shape, batch_size, num_classes, train=False)
if eager:
gen = tf.data.Dataset.from_generator(partial(train_dataloader.generate), (tf.float32, tf.float32))
gen_val = tf.data.Dataset.from_generator(partial(val_dataloader.generate), (tf.float32, tf.float32))
gen = gen.shuffle(buffer_size=batch_size).prefetch(buffer_size=batch_size)
gen_val = gen_val.shuffle(buffer_size=batch_size).prefetch(buffer_size=batch_size)
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=Lr, decay_steps=epoch_step, decay_rate=0.95, staircase=True
)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
for epoch in range(Init_Epoch, Freeze_Epoch):
fit_one_epoch(model, loss_history, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val,
Freeze_Epoch)
else:
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=Lr),
metrics=['categorical_accuracy'])
model.fit_generator(
generator=train_dataloader,
steps_per_epoch=epoch_step,
validation_data=val_dataloader,
validation_steps=epoch_step_val,
epochs=Freeze_Epoch,
initial_epoch=Init_Epoch,
use_multiprocessing=True if num_workers > 1 else False,
workers=num_workers,
callbacks=[logging, checkpoint, reduce_lr, early_stopping, loss_history]
)
for i in range(freeze_layers[backbone]):
model.layers[i].trainable = True
if True:
# --------------------------------------------#
# batch_size不要太小,不然训练效果很差
# --------------------------------------------#
batch_size = 32
Lr = 1e-4
Freeze_Epoch = 50
Epoch = 100
epoch_step = num_train // batch_size
epoch_step_val = num_val // batch_size
if epoch_step == 0 or epoch_step_val == 0:
raise ValueError("数据集过小,无法进行训练,请扩充数据集。")
print('Train on {} samples, val on {} samples, with batch size {}.'.format(num_train, num_val, batch_size))
train_dataloader = ClsDatasets(lines[:num_train], input_shape, batch_size, num_classes, train=True)
val_dataloader = ClsDatasets(lines[num_train:], input_shape, batch_size, num_classes, train=False)
if eager:
gen = tf.data.Dataset.from_generator(partial(train_dataloader.generate), (tf.float32, tf.float32))
gen_val = tf.data.Dataset.from_generator(partial(val_dataloader.generate), (tf.float32, tf.float32))
gen = gen.shuffle(buffer_size=batch_size).prefetch(buffer_size=batch_size)
gen_val = gen_val.shuffle(buffer_size=batch_size).prefetch(buffer_size=batch_size)
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=Lr, decay_steps=epoch_step, decay_rate=0.95, staircase=True
)
optimizer = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
for epoch in range(Freeze_Epoch, Epoch):
fit_one_epoch(model, loss_history, optimizer, epoch, epoch_step, epoch_step_val, gen, gen_val, Epoch)
else:
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=Lr),
metrics=['categorical_accuracy'])
model.fit_generator(
generator=train_dataloader,
steps_per_epoch=epoch_step,
validation_data=val_dataloader,
validation_steps=epoch_step_val,
epochs=Epoch,
initial_epoch=Freeze_Epoch,
use_multiprocessing=True if num_workers > 1 else False,
workers=num_workers,
callbacks=[logging, checkpoint, reduce_lr, early_stopping, loss_history]
)
| 2.1875 | 2 |
src/finmag/util/mesh_templates_test.py | davidcortesortuno/finmag | 10 | 12760447 | #!/usr/bin/env python
import pytest
import os
import numpy as np
import dolfin as df
from math import pi
from meshes import mesh_volume
from mesh_templates import *
import logging
# loose tolerance for bad approximations (e.g. for a spherical mesh)
TOL1 = 1e-2
TOL2 = 1e-7 # intermediate tolerance (used e.g. for the sum of two meshes;
# the strict tolerance won't work here because Netgen seems to
# mesh combined meshes slightly differently than their components)
# strict tolerance where we expect almost exact values (e.g. for a box mesh)
TOL3 = 1e-14
logger = logging.getLogger("finmag")
def check_mesh_volume(mesh, vol_expected, rtol, atol=0.0):
vol_mesh = mesh_volume(mesh)
logger.debug("Checking mesh volume. Expected: {}, got: {} (relative error: {})".format(
vol_expected, vol_mesh, abs((vol_expected - vol_mesh) / vol_expected)))
if not (np.allclose(vol_mesh, vol_expected, atol=atol, rtol=rtol)):
print "[DDD] Expected volume: {}".format(vol_expected)
print "[DDD] Computed volume: {}".format(vol_mesh)
assert(np.allclose(vol_mesh, vol_expected, atol=atol, rtol=rtol))
def test_mesh_templates(tmpdir):
os.chdir(str(tmpdir))
proto = MeshTemplate()
with pytest.raises(NotImplementedError):
proto.create_mesh('generic_mesh.xml.gz')
def test_disallowed_names(tmpdir):
"""
Check that a ValueError is raised if the user tried to use a name
for the mesh template that coincides with a Netgen primitive.
"""
for name in netgen_primitives:
with pytest.raises(ValueError):
_ = Sphere(r=10, name=name)
def test_hash():
sphere = Sphere(r=10, name='MySphere')
h1 = sphere.hash(maxh=3.0)
h2 = sphere.hash(maxh_MySphere=3.0)
h3 = sphere.hash(maxh=4.0)
assert h1 == '50f3b55770e40ba7a5f8e62d7ff7d327'
assert h1 == h2
assert h3 == '1ee55186811cfc21f22e17fbad35bfed'
def test_sphere(tmpdir):
os.chdir(str(tmpdir))
r = 20.0
sphere = Sphere(r, center=(2, 3, -4))
sphere.create_mesh(maxh=8.0, save_result=True, directory='foo')
sphere.create_mesh(
maxh=10.0, save_result=True, filename='bar/sphere.xml.gz')
assert(
os.path.exists('foo/sphere__center_2_0_3_0_-4_0__r_20_0__maxh_8_0.xml.gz'))
assert(os.path.exists('bar/sphere.xml.gz'))
mesh = sphere.create_mesh(maxh=2.5, save_result=False)
check_mesh_volume(mesh, 4. / 3 * pi * r ** 3, TOL1)
def test_elliptical_nanodisk(tmpdir):
os.chdir(str(tmpdir))
d1 = 30.0
d2 = 20.0
h = 5.0
nanodisk1 = EllipticalNanodisk(
d1, d2, h, center=(2, 3, -4), valign='bottom')
assert(nanodisk1.valign == 'bottom')
nanodisk2 = EllipticalNanodisk(
d1, d2, h, center=(2, 3, -4), valign='center')
assert(nanodisk2.valign == 'center')
nanodisk3 = EllipticalNanodisk(d1, d2, h, center=(2, 3, -4), valign='top')
assert(nanodisk3.valign == 'top')
with pytest.raises(ValueError):
# 'valign' must be one of 'top', 'bottom', 'center'
EllipticalNanodisk(d1, d2, h, center=(2, 3, -4), valign='foo')
mesh = nanodisk1.create_mesh(maxh=2.5)
assert(os.path.exists(
'elliptical_nanodisk__d1_30_0__d2_20_0__h_5_0__center_2_0_3_0_-4_0__valign_bottom__maxh_2_5.xml.gz'))
check_mesh_volume(mesh, pi * (0.5 * d1) * (0.5 * d2) * h, TOL1)
def test_nanodisk(tmpdir):
os.chdir(str(tmpdir))
d = 20.0
h = 5.0
nanodisk1 = Nanodisk(d, h, center=(2, 3, -4), valign='bottom')
assert(nanodisk1.valign == 'bottom')
nanodisk2 = Nanodisk(d, h, center=(2, 3, -4), valign='center')
assert(nanodisk2.valign == 'center')
nanodisk3 = Nanodisk(d, h, center=(2, 3, -4), valign='top')
assert(nanodisk3.valign == 'top')
with pytest.raises(ValueError):
Nanodisk(d, h, center=(2, 3, -4), valign='foo')
mesh = nanodisk1.create_mesh(maxh=2.5)
assert(os.path.exists(
'nanodisk__d_20_0__h_5_0__center_2_0_3_0_-4_0__valign_bottom__maxh_2_5.xml.gz'))
check_mesh_volume(mesh, pi * (0.5 * d) ** 2 * h, TOL1)
def test_mesh_sum(tmpdir):
os.chdir(str(tmpdir))
r1 = 10.0
r2 = 18.0
r3 = 12.0
maxh = 2.0
# This should raise an error because the two spheres have the same name
# (which is given automatically)
sphere1 = Sphere(r1, center=(-30, 0, 0))
sphere2 = Sphere(r2, center=(+30, 0, 0))
with pytest.raises(ValueError):
_ = sphere1 + sphere2
# Same again, but with different names
sphere1 = Sphere(r1, center=(-30, 0, 0), name='sphere_1')
sphere2 = Sphere(r2, center=(+30, 0, 0), name='sphere_2')
sphere3 = Sphere(r3, center=(0, 10, 0), name='sphere_3')
three_spheres = sphere1 + sphere2 + sphere3
mesh = three_spheres.create_mesh(
maxh=maxh, save_result=True, directory=str(tmpdir))
meshfilename = "mesh_sum__3c528b79a337a0ffa711746e7d346c81.xml.gz"
import glob # for debugging only; will be removed again soon
print("[DDD] Potential mesh files found: {}".format(glob.glob('*.xml.gz')))
assert(os.path.exists(os.path.join(str(tmpdir), meshfilename)))
vol1 = mesh_volume(sphere1.create_mesh(maxh=maxh))
vol2 = mesh_volume(sphere2.create_mesh(maxh=maxh))
vol3 = mesh_volume(sphere3.create_mesh(maxh=maxh))
vol_exact = sum([4. / 3 * pi * r ** 3 for r in [r1, r2, r3]])
check_mesh_volume(mesh, vol_exact, TOL1)
check_mesh_volume(mesh, vol1 + vol2 + vol3, TOL2)
def test_mesh_difference(tmpdir):
"""
Create two boxes with some overlap and subtract the second from the first.
Then check that the volume of the remaining part is as expected.
"""
os.chdir(str(tmpdir))
# Coordinates of the top-right-rear corner of box1 and
# the bottom-left-front corner of box2.
x1, y1, z1 = 50.0, 30.0, 20.0
x2, y2, z2 = 30.0, 20.0, 15.0
# Create the overlapping boxes
box1 = Box(0, 0, 0, x1, y1, z1, name='box1')
box2 = Box(x2, y2, z2, x1 + 10, y1 + 10, z1 + 10, name='box2')
box1_minus_box2 = box1 - box2
mesh = box1_minus_box2.create_mesh(
maxh=10.0, save_result=True, directory=str(tmpdir))
meshfilename = "mesh_difference__dd77171c4364ace36c40e5f5fe94951f.xml.gz"
assert(os.path.exists(os.path.join(str(tmpdir), meshfilename)))
vol_box1_exact = x1 * y1 * z1
vol_overlap_exact = (x1 - x2) * (y1 - y2) * (z1 - z2)
vol_exact = vol_box1_exact - vol_overlap_exact
check_mesh_volume(mesh, vol_exact, TOL3)
def test_maxh_with_mesh_primitive(tmpdir):
os.chdir(str(tmpdir))
prim = MeshPrimitive(name='foo')
assert(prim._get_maxh(maxh=2.0, maxh_foo=5.0) == 5.0)
assert(prim._get_maxh(maxh=2.0, maxh_bar=5.0) == 2.0)
with pytest.raises(ValueError):
prim._get_maxh(random_arg=42)
# We don't use full CSG strings here because we only want to test the maxh
# functionality
prim = MeshPrimitive(name='foo', csg_string='-maxh = {maxh_foo}')
assert(prim.csg_stub(maxh=2.0) == '-maxh = 2.0')
assert(prim.csg_stub(maxh_foo=3.0) == '-maxh = 3.0')
# 'personal' value of maxh should take precedence over generic one
assert(prim.csg_stub(maxh=2.0, maxh_foo=3.0) == '-maxh = 3.0')
with pytest.raises(ValueError):
prim.csg_stub(maxh_bar=4.0)
s = Sphere(r=10.0)
s.csg_stub(maxh=2.0)
s = Sphere(r=5.0, name='my_sphere')
s.csg_stub(maxh_my_sphere=3.0)
def test_mesh_specific_maxh(tmpdir):
"""
Check that we can pass in mesh-specific values of maxh by
providing a keyword argument of the form 'maxh_NAME', where
NAME is the name of the MeshTemplate.
"""
os.chdir(str(tmpdir))
sphere = Sphere(r=10.0, name='foobar')
mesh1 = sphere.create_mesh(maxh=5.0)
mesh2 = sphere.create_mesh(maxh_foobar=5.0)
with pytest.raises(ValueError):
sphere.create_mesh(maxh_quux=5.0)
def test_global_maxh_can_be_omitted_if_specific_maxh_is_provided(tmpdir):
os.chdir(str(tmpdir))
# Providing a global value for maxh or only the value specific to the
# sphere should both work.
sphere = Sphere(r=10.0, name='foobar')
mesh1 = sphere.create_mesh(maxh=3.0)
mesh2 = sphere.create_mesh(maxh_foobar=3.0)
# Same with a combined mesh: if all specific values for maxh are
# given then the global maxh can be omitted.
sphere1 = Sphere(r=10, name='sphere1')
sphere2 = Sphere(r=10, center=(20, 0, 0), name='sphere2')
two_spheres = sphere1 + sphere2
mesh = two_spheres.create_mesh(maxh_sphere1=4.0, maxh_sphere2=5.0)
def test_different_mesh_discretisations_for_combined_meshes(tmpdir):
"""
Check that we can create a mesh consisting of two spheres for which
we provide a generic value of maxh as well as a specific value for
the second spheres.
"""
os.chdir(str(tmpdir))
r1 = 10.0
r2 = 20.0
sphere1 = Sphere(r1, center=(-30, 0, 0), name='sphere1')
sphere2 = Sphere(r2, center=(+30, 0, 0), name='sphere2')
two_spheres = sphere1 + sphere2
# This should render the two spheres with different mesh discretisations.
# XXX TODO: How to best check that this worked correctly?!? Currently my best idea is
# to create the mesh twice, once with a fine and once with a coarse discretisation
# for the second sphere, and to check that the second mesh has fewer
# vertices.
mesh1 = two_spheres.create_mesh(
maxh=5.0, maxh_sphere2=8.0, save_result=True, directory=str(tmpdir))
mesh2 = two_spheres.create_mesh(
maxh=5.0, maxh_sphere2=10.0, save_result=True, directory=str(tmpdir))
assert(mesh1.num_vertices() > mesh2.num_vertices())
def test_box(tmpdir):
os.chdir(str(tmpdir))
x0, y0, z0 = 0, 0, 0
x1, y1, z1 = 10, 20, 30
box = Box(x0, y0, z0, x1, y1, z1)
box.create_mesh(maxh=8.0, save_result=True, directory='foo')
box.create_mesh(maxh=10.0, save_result=True, filename='bar/box.xml.gz')
assert(
os.path.exists('foo/box__0_0__0_0__0_0__10_0__20_0__30_0__maxh_8_0.xml.gz'))
assert(os.path.exists('bar/box.xml.gz'))
mesh = df.Mesh('bar/box.xml.gz')
check_mesh_volume(mesh, (x1 - x0) * (y1 - y0) * (z1 - z0), TOL3)
| 2.390625 | 2 |
uiautomator2/_init.py | snakx/uiautomator2 | 1 | 12760448 | import requests
import _main
import _pk as p
import _x86 as j
import os
import logging
class _Service():
# Instrumentation
def _apk_cache_i(self):
try:
logging.debug("Download instrumentation apk {}".format(p._apk()))
url = 'https://github.com/snakx/x86-uiautomator2-server/raw/main/bin/{}'.format(p._apk())
logging.debug(url)
r = requests.get(url, allow_redirects=True)
except Exception as e:
logging.error(e.__context__)
return False
try:
open(p._apk(), 'wb').write(r.content)
logging.debug('Download instrumentation apk successfully completed')
return True
except Exception as e:
logging.error(e.__context__)
return False
# Release
def _apk_cache_r(self):
try:
logging.debug("Download release apk {}".format(p._apk2()))
url = 'https://github.com/snakx/x86-uiautomator2-server/raw/main/bin/{}'.format(p._apk2())
logging.debug(url)
r = requests.get(url, allow_redirects=True)
except Exception as e:
logging.error(e.__context__)
return False
try:
open(p._apk2(), 'wb').write(r.content)
logging.debug('Download release apk successfully completed')
return True
except Exception as e:
logging.error(e.__context__)
return False
# Jar
def _jar_cache(self):
try:
logging.debug("Download x86 jar {}".format(p._apk2()))
url = 'https://github.com/snakx/x86-uiautomator2-server/raw/main/out/artifacts/x86_uiautomator2_server_jar/{}'.format(j._jar())
logging.debug(url)
r = requests.get(url, allow_redirects=True)
except Exception as e:
logging.error(e.__context__)
return False
try:
open(j._jar(), 'wb').write(r.content)
logging.debug('Download x86 jar successfully completed')
return True
except Exception as e:
logging.error(e.__context__)
return False
# vbs
def _vbs_cache(self):
try:
logging.debug("Download vbs script {}".format(p._apk2()))
url = 'https://github.com/snakx/x86-uiautomator2-server/raw/main/bin/uiautomator2.vbs'
logging.debug(url)
r = requests.get(url, allow_redirects=True)
except Exception as e:
logging.error(e.__context__)
return False
try:
open('uiautomator2.vbs', 'wb').write(r.content)
logging.debug('Download vbs script successfully completed')
return True
except Exception as e:
logging.error(e.__context__)
return False
# bat
def _bat_cache(self):
try:
logging.debug("Download shell script {}".format(p._apk2()))
url = 'https://github.com/snakx/x86-uiautomator2-server/raw/main/bin/uiautomator2.bat'
logging.debug(url)
r = requests.get(url, allow_redirects=True)
except Exception as e:
logging.error(e.__context__)
return False
try:
open('uiautomator2.bat', 'wb').write(r.content)
logging.debug('Download shell script successfully completed')
return True
except Exception as e:
logging.error(e.__context__)
return False | 2.609375 | 3 |
Asteroid_Shooter_v2/powerups/bulletspam.py | fifiman/Asteroid-Shooter-v2 | 0 | 12760449 | <gh_stars>0
from ..assets import bulletSpamImage
from ..utils.timer import Timer
from ..config import bulletSpamTimeActive, bulletSpamTimeAlive
class BulletSpamPowerup():
def __init__(self, screen, pos, bulletController):
self.screen = screen
self.image = bulletSpamImage
self.rect = None
self.pos = pos
self.bulletController = bulletController
self.alive = 1
self.active = 0
# Timers for powerup.
self.timers = []
# Add timer for how long powerup is alive.
self.timers.append(Timer(bulletSpamTimeAlive, self.killPowerup,
callLimit=1))
# Update initial bounding box.
self.updateRect()
def activatePowerup(self):
# Mark powerup as active.
self.active = 1
# Activate bullet spam through bullet controller.
self.bulletController.activateBulletSpam()
self.timers[:] = []
self.timers.append(Timer(bulletSpamTimeActive, self.deactivatePowerup,
callLimit=1))
def deactivatePowerup(self):
# Deactivate bullet spam through bullet controller.
self.bulletController.deactivateBulletSpam()
self.active = 0
self.alive = 0
def killPowerup(self):
if self.active:
self.deactivatePowerup()
self.alive = 0
def update(self, timePassed):
for timer in self.timers:
timer.update(timePassed)
def updateRect(self):
imageWidth, imageHeight = self.image.get_size()
self.rect = self.image.get_rect().move(
self.pos.x - imageWidth / 2,
self.pos.y - imageHeight / 2)
def blitMe(self):
# Draw only if alive and not activated yet.
if self.alive and not self.active:
# First update bounding box.
self.updateRect()
# Draw asteroid to screen.
self.screen.blit(self.image, self.rect)
| 2.53125 | 3 |
pro_upload/img_db/models.py | yongfang117/pro_useful_code | 0 | 12760450 | from django.db import models
class IMG(models.Model):
img = models.ImageField(upload_to='img') # upload_to 指定图片存储的文件夹名称,上传文件后自动创建
name = models.CharField(max_length=100)
| 2.21875 | 2 |