hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a2cef5581d6639f72a0f834dc67419807bab8ec4
| 759
|
py
|
Python
|
dear_petition/petition/migrations/0008_auto_20200208_0222.py
|
robert-w-gries/dear-petition
|
35244afc8e967b41ae5265ae31fd13b26e4e835a
|
[
"MIT"
] | 4
|
2020-04-01T14:42:45.000Z
|
2021-12-12T21:11:11.000Z
|
dear_petition/petition/migrations/0008_auto_20200208_0222.py
|
robert-w-gries/dear-petition
|
35244afc8e967b41ae5265ae31fd13b26e4e835a
|
[
"MIT"
] | 142
|
2019-08-12T19:08:34.000Z
|
2022-03-29T23:05:35.000Z
|
dear_petition/petition/migrations/0008_auto_20200208_0222.py
|
robert-w-gries/dear-petition
|
35244afc8e967b41ae5265ae31fd13b26e4e835a
|
[
"MIT"
] | 8
|
2020-02-04T20:37:00.000Z
|
2021-03-28T13:28:32.000Z
|
# Generated by Django 2.2.4 on 2020-02-08 02:22
from django.db import migrations
def move_batch_fks(apps, schema_editor):
Batch = apps.get_model("petition", "Batch")
CIPRSRecord = apps.get_model("petition", "CIPRSRecord")
for batch in Batch.objects.all():
print(f"Adding batch {batch.pk} to {batch.records.count()} records")
batch.records.update(batch=batch)
first_batch = Batch.objects.order_by("pk").first()
for record in CIPRSRecord.objects.all():
if not record.batch:
record.batch = first_batch
record.save()
class Migration(migrations.Migration):
dependencies = [
("petition", "0007_auto_20200208_0221"),
]
operations = [migrations.RunPython(move_batch_fks)]
| 29.192308
| 76
| 0.671937
| 172
| 0.226614
| 0
| 0
| 0
| 0
| 0
| 0
| 187
| 0.246377
|
a2cf483b7a318378a4b51126b7de177267f4c55e
| 23
|
py
|
Python
|
auto_ml/_version.py
|
amlanbanerjee/auto_ml
|
db8e1d2cfa93f13a21e55739acfc8d99837e91b0
|
[
"MIT"
] | 1,671
|
2016-08-09T04:44:48.000Z
|
2022-03-27T01:29:23.000Z
|
auto_ml/_version.py
|
amlanbanerjee/auto_ml
|
db8e1d2cfa93f13a21e55739acfc8d99837e91b0
|
[
"MIT"
] | 428
|
2016-08-08T00:13:04.000Z
|
2022-01-19T10:09:05.000Z
|
auto_ml/_version.py
|
amlanbanerjee/auto_ml
|
db8e1d2cfa93f13a21e55739acfc8d99837e91b0
|
[
"MIT"
] | 334
|
2016-08-29T12:34:18.000Z
|
2022-01-31T09:14:30.000Z
|
__version__ = "2.9.10"
| 11.5
| 22
| 0.652174
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.347826
|
a2d07750f771787adbd733681780afac8dc73bc5
| 3,442
|
py
|
Python
|
maya/libs/sceneutils.py
|
bhsingleton/dcc
|
9ad59f1cb8282df938062e15c020688dd268a722
|
[
"MIT"
] | 1
|
2021-08-06T16:04:24.000Z
|
2021-08-06T16:04:24.000Z
|
maya/libs/sceneutils.py
|
bhsingleton/dcc
|
9ad59f1cb8282df938062e15c020688dd268a722
|
[
"MIT"
] | null | null | null |
maya/libs/sceneutils.py
|
bhsingleton/dcc
|
9ad59f1cb8282df938062e15c020688dd268a722
|
[
"MIT"
] | 1
|
2021-08-06T16:04:31.000Z
|
2021-08-06T16:04:31.000Z
|
import maya.cmds as mc
import os
import logging
logging.basicConfig()
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
def isNewScene():
"""
Method used to check if this is an untitled scene file.
:rtype: bool
"""
return len(mc.file(query=True, sceneName=True)) == 0
def isSaveRequired():
"""
Method used to check if the open scene file has changes that need to be saved.
:rtype: bool
"""
return mc.file(query=True, modified=True)
def currentFilePath():
"""
Convenience method used to retrieve the path of the open scene file.
:rtype: str
"""
if not isNewScene():
return os.path.normpath(mc.file(query=True, sceneName=True))
else:
return ''
def currentFilename():
"""
Convenience method used to retrieve the name of the open scene file.
:rtype: str
"""
return os.path.split(currentFilePath())[1]
def currentDirectory():
"""
Convenience method used to retrieve the directory of the open scene file.
:rtype: str
"""
return os.path.split(currentFilePath())[0]
def removeUserAttributes():
"""
Convenience method used to removed any user attributes that have carried over using fbx.
:rtype: None
"""
# Iterate through selection
#
nodeNames = mc.ls(sl=True)
for nodeName in nodeNames:
# Check if node has any user attributes
#
attrNames = mc.listAttr(nodeName, userDefined=True)
if attrNames is None:
continue
for attrName in attrNames:
log.info('Removing "%s.%s" attribute.' % (nodeName, attrName))
mc.deleteAttr('%s.%s' % (nodeName, attrName))
def unloadTurtlePlugin():
"""
Convenience method used to unload the turtle plugin from the open scene file.
:rtype: None
"""
# Check if turtle is loaded
#
isLoaded = mc.pluginInfo('Turtle', query=True, loaded=True)
if not isLoaded:
log.info('Could not locate "Turtle" in the open scene file.')
return
# Remove all node types associated with turtle
#
nodeTypes = mc.pluginInfo('Turtle', query=True, dependNode=True)
for nodeType in nodeTypes:
# List all nodes by type
#
nodeNames = mc.ls(type=nodeType)
numNodeNames = len(nodeNames)
if numNodeNames == 0:
continue
# Unlock and remove nodes
#
mc.lockNode(nodeNames, lock=False)
mc.delete(nodeNames)
# Flush undo queue
#
mc.flushUndo()
# Remove shelf from tab bar
#
if mc.shelfLayout('TURTLE', query=True, exists=True):
log.info('Removing "TURTLE" from the shelf tab!')
mc.deleteUI('TURTLE', layout=True)
# Unlock plugin
#
mc.unloadPlugin('Turtle')
def resetWindowPositions():
"""
Method used to move all of the active maya windows to the top left corner.
:rtype: None
"""
# Collect all windows
#
windowNames = mc.lsUI(windows=True)
for windowName in windowNames:
log.info('Resetting "%s" window...' % windowName)
mc.window(windowName, edit=True, topLeftCorner=[0, 0])
def resetStartupCameras():
"""
Method used to fix the startup cameras when they're thrown out of wack.
:rtype: None
"""
mc.viewSet('top', home=True)
mc.viewSet('front', home=True)
mc.viewSet('side', home=True)
| 20.011628
| 92
| 0.621732
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,446
| 0.420105
|
a2d10542879056ad7800cdebe98204d350251551
| 346
|
py
|
Python
|
diffir/__init__.py
|
capreolus-ir/diffir
|
90906ce4b7d5f23d6190eea26020f9e4096cb0cd
|
[
"Apache-2.0"
] | 12
|
2021-03-10T17:04:05.000Z
|
2022-01-13T15:44:34.000Z
|
diffir/__init__.py
|
capreolus-ir/diffir
|
90906ce4b7d5f23d6190eea26020f9e4096cb0cd
|
[
"Apache-2.0"
] | 7
|
2021-05-19T21:28:52.000Z
|
2021-12-16T16:01:40.000Z
|
diffir/__init__.py
|
capreolus-ir/diffir
|
90906ce4b7d5f23d6190eea26020f9e4096cb0cd
|
[
"Apache-2.0"
] | null | null | null |
__version__ = "0.2.0"
from diffir.weight import Weight
from diffir.weight.custom import CustomWeight
from diffir.weight.unsupervised import ExactMatchWeight
from diffir.measure import Measure
from diffir.measure.qrels import QrelMeasure
from diffir.measure.unsupervised import TopkMeasure
from diffir.weight.weights_builder import WeightBuilder
| 34.6
| 55
| 0.858382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 7
| 0.020231
|
a2d1763a00e0070a7178e1445d0a7e1fdef3a6a9
| 34,160
|
py
|
Python
|
tool/pylib/generator/output/PartBuilder.py
|
mever/qooxdoo
|
2bb08cb6c4ddfaf2425e6efff07deb17e960a050
|
[
"MIT"
] | 1
|
2021-02-05T23:00:25.000Z
|
2021-02-05T23:00:25.000Z
|
tool/pylib/generator/output/PartBuilder.py
|
mever/qooxdoo
|
2bb08cb6c4ddfaf2425e6efff07deb17e960a050
|
[
"MIT"
] | 3
|
2019-02-18T04:22:52.000Z
|
2021-02-21T15:02:54.000Z
|
tool/pylib/generator/output/PartBuilder.py
|
mever/qooxdoo
|
2bb08cb6c4ddfaf2425e6efff07deb17e960a050
|
[
"MIT"
] | 1
|
2021-06-03T23:08:44.000Z
|
2021-06-03T23:08:44.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
################################################################################
#
# qooxdoo - the new era of web development
#
# http://qooxdoo.org
#
# Copyright:
# 2006-2010 1&1 Internet AG, Germany, http://www.1und1.de
#
# License:
# MIT: https://opensource.org/licenses/MIT
# See the LICENSE file in the project's top-level directory for details.
#
# Authors:
# * Sebastian Werner (wpbasti)
# * Thomas Herchenroeder (thron7)
# * Richard Sternagel (rsternagel)
#
################################################################################
##
# PartBuilder -- create packages and associates parts to packages, from parts configuration and class list
#
# Interface:
# - PartBuilder.getPackages()
##
from misc.Collections import OrderedDict
from misc.Collections import DefaultOrderedDict
from generator.output.Part import Part
from generator.output.Package import Package
from generator.code.Class import CompileOptions
from generator.config.Config import ConfigurationError
class PartBuilder(object):
def __init__(self, console, depLoader):
self._console = console
self._depLoader = depLoader
##
# interface method
def getPackages(self, partIncludes, smartExclude, jobContext, script):
# Get config settings
jobConfig = jobContext["jobconf"]
self._jobconf = jobConfig
minPackageSize = jobConfig.get("packages/sizes/min-package", 0)
minPackageSizeForUnshared = jobConfig.get("packages/sizes/min-package-unshared", None)
partsCfg = jobConfig.get("packages/parts", {})
collapseCfg = jobConfig.get("packages/collapse", [])
boot = jobConfig.get("packages/init", "boot")
script.boot = boot
# Automatically add boot part to collapse list
if boot in partsCfg and not boot in collapseCfg:
collapseCfg.append(boot)
# Preprocess part data
script.parts = {} # map of Parts
script.parts = self._getParts(partIncludes, partsCfg, script)
self._checkPartsConfig(script.parts)
script.parts = self._getPartDeps(script, smartExclude)
# Compute packages
script.packages = [] # array of Packages
script.packages = self._createPackages(script)
self._checkPackagesAgainstClassList(script)
script.sortParts()
self._printPartStats(script)
# Collapse parts by collapse order
self._console.info("Collapsing parts ", feed=False)
self.collapsePartsByOrder(script)
# Collapse parts by package size
self.collapsePartsBySize(script, minPackageSize, minPackageSizeForUnshared)
# Size collapsing might introduce new dependencies to the boot part
# try to assure a single package
if len(script.parts[script.boot].packages) > 1:
quickCollapseConfig = { 0 : set((script.parts[script.boot],))}
self.collapsePartsByOrder(script, quickCollapseConfig)
assert len(script.parts[script.boot].packages) == 1
self._printPartStats(script)
# Post process results
resultParts = self._getFinalPartData(script)
# re-sort part packages, to clean up ordering issues from merging
# - (the issue here is that part packages are only re-sorted during merges
# when actually new packages are added to the part, but *not* when existing
# packages receive a merge package whos package dependencies are already
# fullfilled in the part; still package dependencies among the existing
# packages might change so a re-sorting is necessary to support proper
# load order)
script.sortParts()
script = self._getFinalClassList(script)
#resultClasses = util.dictToList(resultClasses) # turn map into list, easier for upstream methods
self._console.dotclear()
if True: #self._console.getLevel() < self._console._levels["info"]: # - not working!
self.verifyParts(script.parts, script)
# Return
# {Map} resultParts[partId] = [packageId1, packageId2]
# {Array} resultClasses[packageId] = [class1, class2]
#return boot, resultParts, resultClasses
return resultParts, script
##
# Check head lists (part.initial_deps) are non-overlapping
# @param {Map} { partId : generator.code.Part }
def _checkPartsConfig(self, parts):
headclasses = dict((x.name,set(x.initial_deps)) for x in parts.values())
for partid, parthead in headclasses.items():
for partid1, parthead1 in ((x,y) for x,y in headclasses.items() if x!=partid):
#print "Checking %s - %s" % (partid, partid1)
overlap = parthead.intersection(parthead1)
if overlap:
raise ConfigurationError("Part '%s' and '%s' have overlapping includes: %r" % (partid, partid1, list(overlap)))
##
# Check all classes from the global class list are contained in
# *some* package.
def _checkPackagesAgainstClassList(self, script):
allpackageclasses = set([])
for package in script.packages:
allpackageclasses.update(package.classes)
missingclasses = set(script.classesObj).difference(allpackageclasses)
if missingclasses:
raise ValueError("These necessary classes are not covered by parts: %r" % list(missingclasses))
def verifyParts(self, partsMap, script):
def handleError(msg):
if bomb_on_error:
raise RuntimeError(msg)
else:
self._console.warn("! "+msg)
self._console.info("Verifying parts ", feed=False)
self._console.indent()
bomb_on_error = self._jobconf.get("packages/verifier-bombs-on-error", True)
allpartsclasses = []
# 5) Check consistency between package.part_mask and part.packages
self._console.debug("Verifying packages-to-parts relations...")
self._console.indent()
for package in script.packages:
for part in partsMap.values():
if package.part_mask & part.bit_mask:
if package not in part.packages:
handleError("Package '%d' supposed to be in part '%s', but isn't" % (package.id, part.name))
self._console.outdent()
self._console.debug("Verifying individual parts...")
#self._console.indent()
for part in partsMap.values():
if part.is_ignored: # skip ignored parts
continue
self._console.debug("Part: %s" % part.name)
self._console.dot()
self._console.indent()
# get set of current classes in this part
classList = []
classPackage = []
for packageIdx, package in enumerate(part.packages): # TODO: not sure this is sorted
for pos,classId in enumerate(x.id for x in package.classes):
classList.append(classId)
classPackage.append((package.id,pos))
allpartsclasses.extend(classList)
# 1) Check the initial part defining classes are included (trivial sanity)
for classId in part.initial_deps:
if classId not in classList:
handleError("Defining class not included in part: '%s'" % (classId,))
# 2) Check individual class deps are fullfilled in part
# 3) Check classes are in load-order
# alternative: check part.deps against classSet
classIdx = -1
for packageIdx, package in enumerate(part.packages):
for clazz in package.classes:
classIdx += 1
classDeps, _ = clazz.getCombinedDeps(script.classesAll, script.variants, script.jobconfig)
loadDeps = set(x.name for x in classDeps['load'])
ignoreDeps = set(x.name for x in classDeps['ignore'])
# we cannot enforce runDeps here, as e.g. the 'boot'
# part necessarily lacks classes from subsequent parts
# (that's the whole point of parts)
for depsId in loadDeps.difference(ignoreDeps):
try:
depsIdx = classList.index(depsId)
except ValueError:
handleError("Unfullfilled dependency of class '%s'[%d,%d]: '%s'" %
(clazz.id, package.id, classIdx, depsId))
continue
if depsId in loadDeps and classIdx < depsIdx:
handleError("Load-dep loaded after using class ('%s'[%d,%d]): '%s'[%d,%d]" %
(clazz.id, package.id, classIdx,
depsId, classPackage[depsIdx][0], classPackage[depsIdx][1]))
#if missingDeps: # there is a load dep not in the part
# self._console.warn("Unfullfilled load dependencies of class '%s': %r" % (classId, tuple(missingDeps)))
self._console.outdent()
#self._console.outdent()
# 4) Check all classes from the global class list are contained in
# *some* part
missingclasses = set(x.id for x in script.classesObj).difference(allpartsclasses)
if missingclasses:
handleError("These necessary classes are not covered by parts: %r" % list(missingclasses))
self._console.dotclear()
self._console.outdent()
return
##
# create the set of parts, each part with a unique single-bit bit mask
# @returns {Map} parts = { partName : Part() }
def _getParts(self, partIncludes, partsCfg, script):
self._console.debug("Creating part structures...")
self._console.indent()
parts = {}
for partPos, partId in enumerate(partIncludes):
npart = Part(partId) # create new Part object
npart.bit_mask = script.getPartBitMask() # add unique bit
initial_deps = list(set(partIncludes[partId]).difference(script.excludes)) # defining classes from config minus expanded excludes
npart.initial_deps = initial_deps # for later cross-part checking
npart.deps = initial_deps[:] # own copy, as this will get expanded
if 'expected-load-order' in partsCfg[partId]:
npart.collapse_index = partsCfg[partId]['expected-load-order']
if 'no-merge-private-package' in partsCfg[partId]:
npart.no_merge_private_package = partsCfg[partId]['no-merge-private-package']
parts[partId] = npart
self._console.debug("Part #%s => %s" % (partId, npart.bit_mask))
self._console.outdent()
return parts
##
# create the complete list of class dependencies for each part
def _getPartDeps(self, script, smartExclude):
parts = script.parts
variants = script.variants
globalClassList = [x.id for x in script.classesObj]
self._console.debug("")
self._console.info("Assembling parts")
self._console.indent()
for part in parts.values():
self._console.info("part %s " % part.name, feed=False)
# Exclude initial classes of other parts
partExcludes = []
for otherPartId in parts:
if otherPartId != part.name:
partExcludes.extend(parts[otherPartId].initial_deps)
# Extend with smart excludes
partExcludes.extend(smartExclude)
# Remove unknown classes before checking dependencies
for classId in part.deps[:]:
if not classId in globalClassList :
part.deps.remove(classId)
# Checking we have something to include
if len(part.deps) == 0:
self._console.info("Part #%s is ignored in current configuration" % part.name)
part.is_ignored = True
continue
# Finally resolve the dependencies
# do not allow blocked loaddeps, as this would make the part unloadable
partClasses = self._depLoader.classlistFromInclude(part.deps, partExcludes, variants, script=script, allowBlockLoaddeps=False)
# Remove all unknown classes -- TODO: Can this ever happen here?!
for classId in partClasses[:]: # need to work on a copy because of changes in the loop
#if not classId in globalClassList:
if not classId in script.classes: # check against application class list
self._console.warn("Removing unknown class dependency '%s' from config of part #%s" % (classId, part.name))
partClasses.remove(classId)
# Store
self._console.debug("Part #%s depends on %s classes" % (part.name, len(partClasses)))
part.deps = partClasses
self._console.outdent()
return parts
##
# Cut an initial set of packages out of the set of classes needed by the parts
# @returns {Array} [ Package ]
def _createPackages(self, script):
##
# Collect classes from parts, recording which class is used in which part
# @returns {Map} { classId : parts_bit_mask }
def getClassesFromParts(partObjs):
allClasses = DefaultOrderedDict(lambda: 0)
for part in partObjs:
for classId in part.deps:
allClasses[classId] |= part.bit_mask # a class used by multiple parts gets multiple bits
return allClasses
##
# Create packages from classes
# @returns {Array} [ Package ]
def getPackagesFromClasses(allClasses):
packages = {}
for classId in allClasses:
pkgId = allClasses[classId]
# create a Package if necessary
if pkgId not in packages:
package = Package(pkgId)
packages[pkgId] = package
# store classId with this package
#packages[pkgId].classes.append(classId)
packages[pkgId].classes.append(classesObj[classId])
return packages.values()
# ---------------------------------------------------------------
self._console.indent()
parts = script.parts.values()
classesObj = OrderedDict((cls.id, cls) for cls in script.classesObj)
# generate list of all classes from the part dependencies
allClasses = getClassesFromParts(parts)
# Create a package for each set of classes which
# are used by the same parts
packages = getPackagesFromClasses(allClasses)
# Register packages with using parts
for package in packages:
for part in parts:
if package.id & part.bit_mask:
part.packages.append(package)
# Register dependencies between packages
for package in packages:
# get all direct (load)deps of this package
allDeps = set(())
for clazz in package.classes:
classDeps, _ = clazz.getCombinedDeps(script.classesAll, script.variants, script.jobconfig)
loadDeps = set(x.name for x in classDeps['load'])
allDeps.update(loadDeps)
# record the other packages in which these classes are contained
for classId in allDeps:
for otherpackage in packages:
if otherpackage != package and classId in (x.id for x in otherpackage.classes):
package.packageDeps.add(otherpackage)
self._console.outdent()
return packages
def _computePackageSize(self, package, variants, script):
packageSize = 0
compOptions = CompileOptions()
compOptions.optimize = script.optimize
compOptions.format = True
compOptions.variantset = variants
self._console.indent()
for clazz in package.classes:
packageSize += clazz.getCompiledSize(compOptions, featuremap=script._featureMap)
self._console.outdent()
return packageSize
##
# Support for merging small packages.
#
# Small (as specified in the config) packages are detected, starting with
# those that are used by the fewest parts, and are merged into packages that
# are used by the same and more parts.
def collapsePartsBySize(self, script, minPackageSize, minPackageSizeForUnshared):
if minPackageSize == None or minPackageSize == 0:
return
variants = script.variants
self._console.debug("")
self._console.debug("Collapsing parts by package sizes...")
self._console.indent()
self._console.debug("Minimum size: %sKB" % minPackageSize)
self._console.indent()
if minPackageSizeForUnshared == None:
minPackageSizeForUnshared = minPackageSize
# Start at the end with the sorted list
# e.g. merge 4->7 etc.
allPackages = script.packagesSorted()
allPackages.reverse()
# make a dict {part.bit_mask: part}
allPartBitMasks = {}
[allPartBitMasks.setdefault(x.bit_mask, x) for x in script.parts.values()]
oldpackages = set([])
while oldpackages != set(script.packages):
oldpackages = set(script.packages)
allPackages = script.packagesSorted()
allPackages.reverse()
# Test and optimize 'fromId'
for fromPackage in allPackages:
self._console.dot()
# possibly protect part-private package from merging
if fromPackage.id in allPartBitMasks.keys(): # fromPackage.id == a part's bit mask
if allPartBitMasks[fromPackage.id].no_merge_private_package:
self._console.debug("Skipping private package #%s" % (fromPackage.id,))
continue
packageSize = self._computePackageSize(fromPackage, variants, script) / 1024
self._console.debug("Package #%s: %sKB" % (fromPackage.id, packageSize))
# check selectablility
if (fromPackage.part_count == 1) and (packageSize >= minPackageSizeForUnshared):
continue
if (fromPackage.part_count > 1) and (packageSize >= minPackageSize):
continue
# assert: the package is shared and smaller than minPackageSize
# or: the package is unshared and smaller than minPackageSizeForUnshared
self._console.indent()
mergedPackage, targetPackage = self._mergePackage(fromPackage, script, script.packages)
if mergedPackage: # mergedPackage == fromPackage on success
script.packages.remove(fromPackage)
self._console.outdent()
self._console.dotclear()
self._console.outdent()
self._console.outdent()
##
# get the "smallest" package (in the sense of _sortPackages()) that is
# in all parts mergePackage is in, and is earlier in the corresponding
# packages lists
def _findMergeTarget(self, mergePackage, packages):
##
# if another package id has the same bits turned on, it is available
# in the same parts.
def areInSameParts(mergePackage, package):
return (mergePackage.id & package.id) == mergePackage.id
##
# check if any of the deps of mergePackage depend on targetPackage -
# if merging mergePackage into targetPackage, this would be creating
# circular dependencies
def noCircularDeps(mergePackage, targetPackage):
for package in mergePackage.packageDeps:
if targetPackage in package.packageDeps:
return False
return True
##
# check that the targetPackage is loaded in those parts
# where mergePackage's deps are loaded
def depsAvailWhereTarget (mergePackage, targetPackage):
for depsPackage in mergePackage.packageDeps:
if not areInSameParts(targetPackage, depsPackage):
return False
return True
# ----------------------------------------------------------------------
allPackages = reversed(Package.sort(packages))
# sorting and reversing assures we try "smaller" package id's first
addtl_merge_constraints = self._jobconf.get("packages/additional-merge-constraints", True)
for targetPackage in allPackages:
if mergePackage.id == targetPackage.id: # no self-merging ;)
continue
if not areInSameParts(mergePackage, targetPackage):
self._console.debug("Problematic #%d (different parts using)" % targetPackage.id)
continue
if not noCircularDeps(mergePackage, targetPackage):
self._console.debug("Problematic #%d (circular dependencies)" % targetPackage.id)
if addtl_merge_constraints:
continue
# why accept this by default?
if not depsAvailWhereTarget(mergePackage, targetPackage):
self._console.debug("Problematic #%d (dependencies not always available)" % targetPackage.id)
if addtl_merge_constraints:
continue
# why accept this by default?
yield targetPackage
yield None
##
# Support for collapsing parts along their expected load order
#
# Packages are merged in parts that define an expected load order, starting
# with the boot part and continuing with groups of parts that have the same
# load index, in increasing order. Within a group, packages are merged from
# least used to more often used, and with packages unique to one of the parts
# in the group to packages that are common to all parts.
# Target packages for one group are blocked for the merge process of the next,
# to avoid merging all packages into one "monster" package that all parts
# share eventually.
def collapsePartsByOrder(self, script, collapse_groups=None):
def getCollapseGroupsOrdered(parts, ):
# returns dict of parts grouped by collapse index
# { 0 : set('boot'), 1 : set(part1, part2), 2 : ... }
collapseGroups = {}
# pre-define boot part with collapse index 0
boot = self._jobconf.get("packages/init", "boot")
collapseGroups[0] = set((parts[boot],))
# get configured load groups
for partname in parts:
part = parts[partname]
collidx = getattr(part, 'collapse_index', None)
if collidx:
if collidx < 1 : # not allowed
raise RuntimeError, "Collapse index must be 1 or greater (Part: %s)" % partname
else:
if collidx not in collapseGroups:
collapseGroups[collidx] = set(())
collapseGroups[collidx].add(part)
return collapseGroups
def isUnique(package, collapse_group):
return sum([int(package in part.packages) for part in collapse_group]) == 1
def isCommon(package, collapse_group):
return sum([int(package in part.packages) for part in collapse_group]) == len(collapse_group)
def getUniquePackages(part, collapse_group):
uniques = {}
for package in part.packages:
if isUnique(package, collapse_group):
if (package.id == part.bit_mask and # possibly protect "private" package
part.no_merge_private_package):
pass
else:
uniques[package.id] = package
return uniques
getUniquePackages.key = 'unique'
def getCommonPackages(part, collapse_group):
commons = {}
for package in part.packages:
if isCommon(package, collapse_group):
commons[package.id] = package
return commons
getCommonPackages.key = 'common'
def mergeGroupPackages(selectFunc, collapse_group, script, seen_targets):
self._console.debug("collapsing %s packages..." % selectFunc.key)
self._console.indent()
curr_targets = set(())
for part in collapse_group:
oldpackages = []
while oldpackages != part.packages:
oldpackages = part.packages[:]
for package in reversed(part.packagesSorted): # start with "smallest" package
selected_packages = selectFunc(part, collapse_group) # re-calculate b.o. modified part.packages
if package.id in selected_packages:
(mergedPackage,
targetPackage) = self._mergePackage(package, script,
selected_packages.values(), # TODO: How should areInSameParts() ever succeed with uniquePackages?!
seen_targets)
if mergedPackage: # on success: mergedPackage == package
script.packages.remove(mergedPackage)
curr_targets.add(targetPackage)
seen_targets.update(curr_targets)
self._console.outdent()
return script.parts, script.packages
# ---------------------------------------------------------------------
self._console.debug("")
self._console.debug("Collapsing parts by collapse order...")
self._console.indent()
if collapse_groups == None:
collapse_groups = getCollapseGroupsOrdered(script.parts, )
seen_targets = set(())
for collidx in sorted(collapse_groups.keys()): # go through groups in load order
self._console.dot()
collgrp = collapse_groups[collidx]
self._console.debug("Collapse group %d %r" % (collidx, [x.name for x in collgrp]))
self._console.indent()
script.parts, script.packages = mergeGroupPackages(getUniquePackages, collgrp, script, seen_targets)
script.parts, script.packages = mergeGroupPackages(getCommonPackages, collgrp, script, seen_targets)
self._console.outdent()
self._console.dotclear()
self._console.outdent()
return
##
# Try to find a target package for <fromPackage> within <packages> and, if
# found, merge <fromPackage> into the target package. <seen_targets>, if
# given, is a skip list for pot. targets.
#
# On merge, maintains package.dependencies and part.packages, but leaves it
# to the caller to pot. remove <fromPackage> from script.packages.
# @return (<fromPackage>,<toPackage>) on success, else (None,None)
def _mergePackage(self, fromPackage, script, packages, seen_targets=None):
def updatePartDependencies(part, packageDeps):
for package in packageDeps:
if package not in part.packages:
# add package
part.packages.append(package)
# update package's part bit mask
package.part_mask |= part.bit_mask
# make sure the new package's dependencies are also included
updatePartDependencies(part, package.packageDeps)
return
def mergeContAndDeps(fromPackage, toPackage):
# Merging package content
toPackage.classes.extend(fromPackage.classes)
# Merging package dependencies
depsDelta = fromPackage.packageDeps.difference(set((toPackage,))) # make sure toPackage is not included
self._console.debug("Adding packages dependencies to target package: %s" % (map(str, sorted([x.id for x in depsDelta])),))
toPackage.packageDeps.update(depsDelta)
toPackage.packageDeps.difference_update(set((fromPackage,))) # remove potential dependency to fromPackage
self._console.debug("Target package #%s now depends on: %s" % (toPackage.id, map(str, sorted([x.id for x in toPackage.packageDeps]))))
return toPackage
# ----------------------------------------------------------------------
self._console.debug("Search a target package for package #%s" % (fromPackage.id,))
self._console.indent()
# find toPackage
toPackage = None
for toPackage in self._findMergeTarget(fromPackage, packages):
if toPackage == None:
break
elif seen_targets != None:
if toPackage not in seen_targets:
break
else:
break
if not toPackage:
self._console.outdent()
return None, None
self._console.debug("Merge package #%s into #%s" % (fromPackage.id, toPackage.id))
self._console.indent()
# Merge package content and dependencies
toPackage = mergeContAndDeps(fromPackage, toPackage)
# Update package dependencies:
# all packages that depended on fromPackage depend now on toPackage
for package in script.packages:
if fromPackage in package.packageDeps:
# replace fromPackage with toPackage
package.packageDeps.difference_update(set((fromPackage,)))
package.packageDeps.update(set((toPackage,)))
# Update part information:
# remove the fromPackage from all parts using it, and add new dependencies to parts
# using toPackage
for part in script.parts.values():
# remove the merged package
if fromPackage in part.packages:
# we can simply remove the package, as we know the target package is also there
part.packages.remove(fromPackage)
# check additional dependencies for all parts
if toPackage in part.packages:
# this could be a part method
# if the toPackage is in part, we might need to add additional packages that toPackage now depends on
updatePartDependencies(part, fromPackage.packageDeps)
# remove of fromPackage from global packages list is easier handled in the caller
self._console.outdent()
self._console.outdent()
return fromPackage, toPackage # to allow caller check for merging and further clean-up fromPackage
def _getFinalPartData(self, script):
parts = script.parts
packageIds = [x.id for x in script.packagesSorted()]
resultParts = {}
for toId, fromId in enumerate(packageIds):
for partId in parts:
if fromId in parts[partId].packages:
if not partId in resultParts:
resultParts[partId] = [toId]
else:
resultParts[partId].append(toId)
return resultParts
def _getFinalClassList(self, script):
packages = script.packagesSorted()
for package in packages:
# TODO: temp. kludge, to pass classIds to sortClasses()
# sortClasses() should take Class() objects directly
classMap = OrderedDict((cls.id, cls) for cls in package.classes)
classIds = self._depLoader.sortClasses(classMap.keys(), script.variants, script.buildType)
package.classes = [classMap[x] for x in classIds]
return script
##
# <currently not used>
def _sortPackagesTopological(self, packages): # packages : [Package]
import graph
# create graph object
gr = graph.digraph()
# add classes as nodes
gr.add_nodes(packages)
# for each load dependency add a directed edge
for package in packages:
for dep in package.packageDeps:
gr.add_edge(package, dep)
# cycle check?
cycle_nodes = gr.find_cycle()
if cycle_nodes:
raise RuntimeError("Detected circular dependencies between packages: %r" % cycle_nodes)
packageList = gr.topological_sorting()
return packageList
def _printPartStats(self, script):
packages = dict([(x.id,x) for x in script.packages])
parts = script.parts
packageIds = packages.keys()
packageIds.sort()
packageIds.reverse()
self._console.debug("")
self._console.debug("Package summary : %d packages" % len(packageIds))
self._console.indent()
for packageId in packageIds:
self._console.debug("Package #%s contains %s classes" % (packageId, len(packages[packageId].classes)))
self._console.debug("%r" % packages[packageId].classes)
self._console.debug("Package #%s depends on these packages: %s" % (packageId, map(str, sorted([x.id for x in packages[packageId].packageDeps]))))
self._console.outdent()
self._console.debug("")
self._console.debug("Part summary : %d parts" % len(parts))
self._console.indent()
packages_used_in_parts = 0
for part in parts.values():
packages_used_in_parts += len(part.packages)
self._console.debug("Part #%s packages(%d): %s" % (part.name, len(part.packages), ", ".join('#'+str(x.id) for x in part.packages)))
self._console.debug("")
self._console.debug("Total of packages used in parts: %d" % packages_used_in_parts)
self._console.outdent()
self._console.debug("")
| 42.593516
| 157
| 0.600907
| 33,084
| 0.968501
| 2,378
| 0.069614
| 0
| 0
| 0
| 0
| 10,618
| 0.310831
|
a2d2d2628caff1c2156c6ad988dc74d14a5fd8cd
| 6,486
|
py
|
Python
|
factorizer/datasets/wmh.py
|
pashtari/factorizer
|
730f295b403a90c1c691f99b529d5d32b635d0c6
|
[
"Apache-2.0"
] | 7
|
2022-03-05T00:43:29.000Z
|
2022-03-07T01:23:08.000Z
|
factorizer/datasets/wmh.py
|
pashtari/factorizer
|
730f295b403a90c1c691f99b529d5d32b635d0c6
|
[
"Apache-2.0"
] | null | null | null |
factorizer/datasets/wmh.py
|
pashtari/factorizer
|
730f295b403a90c1c691f99b529d5d32b635d0c6
|
[
"Apache-2.0"
] | 1
|
2022-03-21T05:28:23.000Z
|
2022-03-21T05:28:23.000Z
|
import sys
import numpy as np
import torch
from monai import transforms, data
from ..data import DataModule, ReadImaged, Renamed, Inferer
###################################
# Transform
###################################
def wmh_train_transform(
spacing=(1.0, 1.0, 1.0), spatial_size=(128, 128, 128), num_patches=1
):
train_transform = [
ReadImaged(["image", "label"]),
transforms.Lambdad("label", lambda x: (x == 1).astype(np.float32)),
transforms.AddChanneld("label"),
transforms.CropForegroundd(["image", "label"], source_key="image"),
transforms.NormalizeIntensityd("image", channel_wise=True),
transforms.Spacingd(
["image", "label"], pixdim=spacing, mode=("bilinear", "bilinear"),
),
transforms.SpatialPadd(["image", "label"], spatial_size=spatial_size),
transforms.RandCropByPosNegLabeld(
["image", "label"],
label_key="label",
spatial_size=spatial_size,
pos=1,
neg=1,
num_samples=num_patches,
image_key="image",
image_threshold=0,
),
transforms.RandAffined(
["image", "label"],
prob=0.15,
spatial_size=spatial_size,
rotate_range=[30 * np.pi / 180] * 3,
scale_range=[0.3] * 3,
mode=("bilinear", "bilinear"),
as_tensor_output=False,
),
transforms.RandFlipd(["image", "label"], prob=0.5, spatial_axis=0),
transforms.RandFlipd(["image", "label"], prob=0.5, spatial_axis=1),
transforms.RandFlipd(["image", "label"], prob=0.5, spatial_axis=2),
transforms.RandGaussianNoised("image", prob=0.15, std=0.1),
transforms.RandGaussianSmoothd(
"image",
prob=0.15,
sigma_x=(0.5, 1.5),
sigma_y=(0.5, 1.5),
sigma_z=(0.5, 1.5),
),
transforms.RandScaleIntensityd("image", prob=0.15, factors=0.3),
transforms.RandShiftIntensityd("image", prob=0.15, offsets=0.1),
transforms.RandAdjustContrastd("image", prob=0.15, gamma=(0.7, 1.5)),
transforms.AsDiscreted("label", threshold=0.5),
transforms.ToTensord(["image", "label"]),
Renamed(),
]
train_transform = transforms.Compose(train_transform)
return train_transform
def wmh_val_transform():
val_transform = [
ReadImaged(["image", "label"], allow_missing_keys=True),
transforms.Lambdad(
"label",
lambda x: (x == 1).astype(np.float32),
allow_missing_keys=True,
),
transforms.AddChanneld("label", allow_missing_keys=True),
transforms.NormalizeIntensityd(
"image", nonzero=True, channel_wise=True
),
transforms.ToTensord(["image", "label"], allow_missing_keys=True),
Renamed(),
]
val_transform = transforms.Compose(val_transform)
return val_transform
def wmh_test_transform():
return wmh_val_transform()
def wmh_vis_transform(spacing=(1.0, 1.0, 1.0)):
vis_transform = [
ReadImaged(["image", "label"], allow_missing_keys=True),
transforms.Lambdad(
"label",
lambda x: (x == 1).astype(np.float32),
allow_missing_keys=True,
),
transforms.AddChanneld("label", allow_missing_keys=True),
transforms.NormalizeIntensityd("image", channel_wise=True),
transforms.Spacingd(
keys=["image", "label"],
pixdim=spacing,
mode=("bilinear", "nearest"),
),
transforms.ToTensord(["image", "label"], allow_missing_keys=True),
Renamed(),
]
vis_transform = transforms.Compose(vis_transform)
return vis_transform
###################################
# Data module
###################################
class WMHDataModule(DataModule):
def __init__(
self,
data_properties,
spacing=(1.0, 1.0, 1.0),
spatial_size=(128, 128, 128),
num_patches=1,
num_splits=5,
split=0,
batch_size=2,
num_workers=None,
cache_num=sys.maxsize,
cache_rate=1.0,
progress=True,
copy_cache=True,
seed=42,
):
dataset_cls_params = {
"cache_num": cache_num,
"cache_rate": cache_rate,
"num_workers": num_workers,
"progress": progress,
"copy_cache": copy_cache,
}
dataset_cls = (data.CacheDataset, dataset_cls_params)
train_transform = wmh_train_transform(
spacing, spatial_size, num_patches
)
val_transform = wmh_val_transform()
test_transform = wmh_test_transform()
vis_transform = wmh_vis_transform(spacing)
super().__init__(
data_properties,
train_dataset_cls=dataset_cls,
val_dataset_cls=dataset_cls,
test_dataset_cls=dataset_cls,
train_transform=train_transform,
val_transform=val_transform,
test_transform=test_transform,
vis_transform=vis_transform,
num_splits=num_splits,
split=split,
batch_size=batch_size,
num_workers=num_workers,
seed=seed,
)
# alias
WMH = WMHDataModule
###################################
# Inference
###################################
class WMHInferer(Inferer):
def __init__(
self,
spacing=(1.0, 1.0, 1.0),
spatial_size=(128, 128, 128),
post=None,
write_dir=None,
output_dtype=None,
**kwargs,
) -> None:
# postprocessing transforms
if post == "logit":
post = transforms.Lambdad("input", lambda x: x)
output_dtype = np.float32 if output_dtype is None else output_dtype
elif post == "prob":
post = transforms.Lambdad("input", torch.sigmoid)
output_dtype = np.float32 if output_dtype is None else output_dtype
elif post == "class":
post = transforms.Lambdad("input", lambda x: x >= 0)
output_dtype = np.uint8 if output_dtype is None else output_dtype
else:
post = post
super().__init__(
spacing=spacing,
spatial_size=spatial_size,
post=post,
write_dir=write_dir,
output_dtype=output_dtype,
**kwargs,
)
| 31.033493
| 79
| 0.561671
| 2,489
| 0.38375
| 0
| 0
| 0
| 0
| 0
| 0
| 773
| 0.11918
|
a2d721ef72b39de52022137d721dac292cbddcad
| 890
|
py
|
Python
|
Python/Topics/Sending-Email/05-pdf-attachment.py
|
shihab4t/Software-Development
|
0843881f2ba04d9fca34e44443b5f12f509f671e
|
[
"Unlicense"
] | null | null | null |
Python/Topics/Sending-Email/05-pdf-attachment.py
|
shihab4t/Software-Development
|
0843881f2ba04d9fca34e44443b5f12f509f671e
|
[
"Unlicense"
] | null | null | null |
Python/Topics/Sending-Email/05-pdf-attachment.py
|
shihab4t/Software-Development
|
0843881f2ba04d9fca34e44443b5f12f509f671e
|
[
"Unlicense"
] | null | null | null |
import imghdr
import smtplib
import os
from email.message import EmailMessage
EMAIL_ADDRESS = os.environ.get("GMAIL_ADDRESS")
EMAIL_PASSWORD = os.environ.get("GMAIL_APP_PASS")
pdfs = ["/home/shihab4t/Downloads/Profile.pdf"]
with smtplib.SMTP_SSL("smtp.gmail.com", 465) as smtp:
smtp.login(EMAIL_ADDRESS, EMAIL_PASSWORD)
reciver = "shihab4tdev@gmail.com"
msg = EmailMessage()
msg["Subject"] = "Grab dinner this weekend? 2"
msg["From"] = EMAIL_ADDRESS
msg["To"] = reciver
msg.set_content("How about dinner at 6pm this Saturday")
for pdf in pdfs:
with open(pdf, "rb") as pdf:
pdf_data = pdf.read()
pdf_name = pdf.name
msg.add_attachment(pdf_data, maintype="application",
subtype="octet-stream", filename=pdf_name)
smtp.send_message(msg)
print(f"Email was sented to {reciver}")
| 26.969697
| 69
| 0.665169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 258
| 0.289888
|
a2d7927bd74ff2bc70037658a7110cb4dffa918c
| 43
|
py
|
Python
|
rcds/project/__init__.py
|
jordanbertasso/rcds
|
d3d655a59a350042d65476793db84e761de04829
|
[
"BSD-3-Clause"
] | 5
|
2020-07-13T12:40:02.000Z
|
2021-08-21T11:18:28.000Z
|
rcds/project/__init__.py
|
jordanbertasso/rcds
|
d3d655a59a350042d65476793db84e761de04829
|
[
"BSD-3-Clause"
] | 144
|
2020-07-06T11:26:49.000Z
|
2022-02-01T14:33:28.000Z
|
rcds/project/__init__.py
|
jordanbertasso/rcds
|
d3d655a59a350042d65476793db84e761de04829
|
[
"BSD-3-Clause"
] | 7
|
2020-07-22T12:38:32.000Z
|
2021-12-21T14:27:54.000Z
|
from .project import Project # noqa: F401
| 21.5
| 42
| 0.744186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 0.27907
|
a2d972366674ffee05dbeed1f54a9dc88de6bb40
| 163
|
py
|
Python
|
MyEircode.py
|
MrBrianMonaghan/mapping
|
1b525eaaad3b22709a53167b46c901ece365ecab
|
[
"Apache-2.0"
] | null | null | null |
MyEircode.py
|
MrBrianMonaghan/mapping
|
1b525eaaad3b22709a53167b46c901ece365ecab
|
[
"Apache-2.0"
] | null | null | null |
MyEircode.py
|
MrBrianMonaghan/mapping
|
1b525eaaad3b22709a53167b46c901ece365ecab
|
[
"Apache-2.0"
] | null | null | null |
import selenium
from selenium import webdriver
try:
browser = webdriver.Firefox()
browser.get('mikekus.com')
except KeyboardInterrupt:
browser.quit()
| 18.111111
| 33
| 0.742331
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 13
| 0.079755
|
a2d986e45466635f24a005d6cc044f9cdfb62b88
| 740
|
py
|
Python
|
tests/test_rotor/rotor_test.py
|
axevalley/enigma
|
fdfa5a85dbd4675f195e00e4b7e22d976a3d9015
|
[
"MIT"
] | null | null | null |
tests/test_rotor/rotor_test.py
|
axevalley/enigma
|
fdfa5a85dbd4675f195e00e4b7e22d976a3d9015
|
[
"MIT"
] | 28
|
2019-07-30T16:15:52.000Z
|
2022-03-14T19:14:25.000Z
|
tests/test_rotor/rotor_test.py
|
lukeshiner/enigma
|
917066c8f33f67b43f092800ba46220d107f622b
|
[
"MIT"
] | null | null | null |
"""Base class for rotor tests."""
import unittest
from enigma.rotor.reflector import Reflector
from enigma.rotor.rotor import Rotor
class RotorTest(unittest.TestCase):
"""Provides tools testing rotors."""
def get_rotor(
self,
wiring="EKMFLGDQVZNTOWYHXUSPAIBRCJ",
ring_setting=1,
position="A",
turnover_positions=["R"],
):
"""Return Rotor object."""
return Rotor(
wiring=wiring,
ring_setting=ring_setting,
position=position,
turnover_positions=turnover_positions,
)
def get_reflector(self, wiring="YRUHQSLDPXNGOKMIEBFZCWVJAT"):
"""Return Reflector object."""
return Reflector(wiring=wiring)
| 24.666667
| 65
| 0.631081
| 603
| 0.814865
| 0
| 0
| 0
| 0
| 0
| 0
| 187
| 0.252703
|
a2deabeee99e67fa9e9a47d417ca86a406f16c31
| 2,186
|
py
|
Python
|
kyu_8/check_the_exam/test_check_exam.py
|
pedrocodacyorg2/codewars
|
ba3ea81125b6082d867f0ae34c6c9be15e153966
|
[
"Unlicense"
] | 1
|
2022-02-12T05:56:04.000Z
|
2022-02-12T05:56:04.000Z
|
kyu_8/check_the_exam/test_check_exam.py
|
pedrocodacyorg2/codewars
|
ba3ea81125b6082d867f0ae34c6c9be15e153966
|
[
"Unlicense"
] | 182
|
2020-04-30T00:51:36.000Z
|
2021-09-07T04:15:05.000Z
|
kyu_8/check_the_exam/test_check_exam.py
|
pedrocodacyorg2/codewars
|
ba3ea81125b6082d867f0ae34c6c9be15e153966
|
[
"Unlicense"
] | 4
|
2020-04-29T22:04:20.000Z
|
2021-07-13T20:04:14.000Z
|
# Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
# FUNDAMENTALS ARRAYS NUMBERS BASIC LANGUAGE FEATURES
import unittest
import allure
from utils.log_func import print_log
from kyu_8.check_the_exam.check_exam import check_exam
@allure.epic('8 kyu')
@allure.parent_suite('Beginner')
@allure.suite("Data Structures")
@allure.sub_suite("Unit Tests")
@allure.feature("Lists")
@allure.story('Check the exam')
@allure.tag('FUNDAMENTALS', 'ARRAYS', 'NUMBERS', 'BASIC LANGUAGE FEATURES')
@allure.link(url='https://www.codewars.com/kata/5a3dd29055519e23ec000074/train/python',
name='Source/Kata')
class CheckExamTestCase(unittest.TestCase):
"""
Testing check_exam function
"""
def test_check_exam(self):
"""
Testing check_exam function
The function should return the score
for this array of answers, giving +4
for each correct answer, -1 for each
incorrect answer, and +0 for each blank
answer(empty string).
:return:
"""
allure.dynamic.title("Testing check_exam function")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html('<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p></p>")
with allure.step("Enter arr1 and arr2 and verify the output"):
data = [
(["a", "a", "b", "b"], ["a", "c", "b", "d"], 6),
(["a", "a", "c", "b"], ["a", "a", "b", ""], 7),
(["a", "a", "b", "c"], ["a", "a", "b", "c"], 16),
(["b", "c", "b", "a"], ["", "a", "a", "c"], 0),
]
for arr1, arr2, expected in data:
print_log(arr1=arr1,
arr2=arr2,
expected=expected)
self.assertEqual(expected,
check_exam(arr1, arr2))
| 34.15625
| 94
| 0.531565
| 1,512
| 0.691674
| 0
| 0
| 1,886
| 0.862763
| 0
| 0
| 1,001
| 0.457914
|
a2dec2415ed78800e66aae16391df2b37d8f56eb
| 1,193
|
py
|
Python
|
pysoup/venv/__init__.py
|
illBeRoy/pysoup
|
742fd6630e1be27c275cb8dc6ee94412472cb20b
|
[
"MIT"
] | 4
|
2016-02-21T12:40:44.000Z
|
2019-06-13T13:23:19.000Z
|
pysoup/venv/__init__.py
|
illBeRoy/pysoup
|
742fd6630e1be27c275cb8dc6ee94412472cb20b
|
[
"MIT"
] | null | null | null |
pysoup/venv/__init__.py
|
illBeRoy/pysoup
|
742fd6630e1be27c275cb8dc6ee94412472cb20b
|
[
"MIT"
] | 1
|
2020-07-16T12:22:12.000Z
|
2020-07-16T12:22:12.000Z
|
import os.path
from twisted.internet import defer
import pysoup.utils
class Virtualenv(object):
def __init__(self, display_pip, path):
self._display_pipe = display_pip
self._path = path
@property
def path(self):
return self._path
@property
def venv_path(self):
return os.path.join(self._path, 'venv')
@property
def source_path(self):
return os.path.join(self.venv_path, 'bin/activate')
@defer.inlineCallbacks
def create(self):
self._display_pipe.log('Ensuring virtualenv environment at {0}'.format(self._path))
code = yield pysoup.utils.execute_shell_command('mkdir -p {0} && virtualenv --no-site-packages -q {0}'.format(self.venv_path))
if code != 0:
self._display_pipe.error('Failed to setup virtualenv at target! ({0})'.format(self._path))
raise Exception('Could not create virtualenv')
self._display_pipe.notify('Virtualenv is ready')
@defer.inlineCallbacks
def execute_in_venv(self, command):
code = yield pysoup.utils.execute_shell_command('source {0} && {1}'.format(self.source_path, command))
defer.returnValue(code)
| 29.097561
| 134
| 0.668064
| 1,118
| 0.937133
| 665
| 0.557418
| 952
| 0.797988
| 0
| 0
| 228
| 0.191115
|
a2df2293ad90461c1622171c3d5669f2f6f7fd84
| 2,791
|
py
|
Python
|
yggdrasil/metaschema/datatypes/InstanceMetaschemaType.py
|
astro-friedel/yggdrasil
|
5ecbfd083240965c20c502b4795b6dc93d94b020
|
[
"BSD-3-Clause"
] | null | null | null |
yggdrasil/metaschema/datatypes/InstanceMetaschemaType.py
|
astro-friedel/yggdrasil
|
5ecbfd083240965c20c502b4795b6dc93d94b020
|
[
"BSD-3-Clause"
] | null | null | null |
yggdrasil/metaschema/datatypes/InstanceMetaschemaType.py
|
astro-friedel/yggdrasil
|
5ecbfd083240965c20c502b4795b6dc93d94b020
|
[
"BSD-3-Clause"
] | null | null | null |
from yggdrasil.metaschema.datatypes import MetaschemaTypeError
from yggdrasil.metaschema.datatypes.MetaschemaType import MetaschemaType
from yggdrasil.metaschema.datatypes.JSONObjectMetaschemaType import (
JSONObjectMetaschemaType)
from yggdrasil.metaschema.properties.ArgsMetaschemaProperty import (
ArgsMetaschemaProperty)
class InstanceMetaschemaType(MetaschemaType):
r"""Type for evaluating instances of Python classes."""
name = 'instance'
description = 'Type for Python class instances.'
properties = ['class', 'args']
definition_properties = ['class']
metadata_properties = ['class', 'args']
extract_properties = ['class', 'args']
python_types = (object, )
cross_language_support = False
@classmethod
def validate(cls, obj, raise_errors=False):
r"""Validate an object to check if it could be of this type.
Args:
obj (object): Object to validate.
raise_errors (bool, optional): If True, errors will be raised when
the object fails to be validated. Defaults to False.
Returns:
bool: True if the object could be of this type, False otherwise.
"""
# Base not called because every python object should pass validation
# against the object class
try:
ArgsMetaschemaProperty.instance2args(obj)
return True
except MetaschemaTypeError:
if raise_errors:
raise ValueError("Class dosn't have an input_args attribute.")
return False
@classmethod
def encode_data(cls, obj, typedef):
r"""Encode an object's data.
Args:
obj (object): Object to encode.
typedef (dict): Type definition that should be used to encode the
object.
Returns:
string: Encoded object.
"""
args = ArgsMetaschemaProperty.instance2args(obj)
if isinstance(typedef, dict) and ('args' in typedef):
typedef_args = {'properties': typedef['args']}
else:
typedef_args = None
return JSONObjectMetaschemaType.encode_data(args, typedef_args)
@classmethod
def decode_data(cls, obj, typedef):
r"""Decode an object.
Args:
obj (string): Encoded object to decode.
typedef (dict): Type definition that should be used to decode the
object.
Returns:
object: Decoded object.
"""
# TODO: Normalization can be removed if metadata is normalized
typedef = cls.normalize_definition(typedef)
args = JSONObjectMetaschemaType.decode_data(
obj, {'properties': typedef.get('args', {})})
return typedef['class'](**args)
| 34.036585
| 78
| 0.636331
| 2,455
| 0.879613
| 0
| 0
| 2,023
| 0.72483
| 0
| 0
| 1,284
| 0.46005
|
a2df9c5cd443a1cdbe81e54c4e448271480f6781
| 368
|
py
|
Python
|
battleships/migrations/0004_auto_20181202_1852.py
|
ArturAdamczyk/Battleships
|
748e4fa87ed0c17c57abbdf5a0a2bca3c91dff24
|
[
"MIT"
] | null | null | null |
battleships/migrations/0004_auto_20181202_1852.py
|
ArturAdamczyk/Battleships
|
748e4fa87ed0c17c57abbdf5a0a2bca3c91dff24
|
[
"MIT"
] | null | null | null |
battleships/migrations/0004_auto_20181202_1852.py
|
ArturAdamczyk/Battleships
|
748e4fa87ed0c17c57abbdf5a0a2bca3c91dff24
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1.3 on 2018-12-02 17:52
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('battleships', '0003_auto_20181202_1832'),
]
operations = [
migrations.RenameField(
model_name='coordinate',
old_name='ship',
new_name='ship1',
),
]
| 19.368421
| 51
| 0.589674
| 283
| 0.769022
| 0
| 0
| 0
| 0
| 0
| 0
| 110
| 0.298913
|
a2e147bc50d8522b84f76610398b1cf2e73d60bb
| 11,168
|
py
|
Python
|
jme/stagecache/text_metadata.py
|
jmeppley/stagecache
|
a44f93b7936e1c6ea40dec0a31ad9c19d2415f3a
|
[
"MIT"
] | null | null | null |
jme/stagecache/text_metadata.py
|
jmeppley/stagecache
|
a44f93b7936e1c6ea40dec0a31ad9c19d2415f3a
|
[
"MIT"
] | null | null | null |
jme/stagecache/text_metadata.py
|
jmeppley/stagecache
|
a44f93b7936e1c6ea40dec0a31ad9c19d2415f3a
|
[
"MIT"
] | null | null | null |
"""
Functions for storing and retrieving cache metadata from text files.
Each Cache asset is a path: /path/filename
There are four metadata files in the cache for each:
/path/.stagecache.filename/size The size of the asset in bytes
/path/.stagecache.filename/cache_lock The requested end time of the cache
/path/.stagecache.filename/log A record of past requests
/path/.stagecache.filename/write_lock
Exists if cache being updated
There are also global metadata files in cache_root:
.stagecache.global/asset_list list of assets in this cache
.stagecache.global/write_lock
Usage:
Initialize TargetMetadata() class with cache_root and target paths.
Initialize CacheMetadata() class with cache_root path
TargetMetadata Functions:
get_cached_target_size(): returns size and date from file
set_cached_target_size(size): writes size to file
get_last_lock_date(): returns the most recent lock end date
set_cache_lock_date(date): writes new date to lock file
get_write_lock():
mark file as in progress (wait for existing lock)
release_write_lock(): remove in_progress mark
CacheMetadata Functions:
get_write_lock()
iter_cached_files(locked=None):
return list of assets with sizes and lock dates
remove_cached_file(path): remove record of asset
add_cached_file(path): add record of asset
All functions take cache=cache_root as a kwarg
All get_ functions throw FileNotFound exception if asset not yet in cache
"""
import logging
import os
import time
import stat
from contextlib import contextmanager
LOGGER = logging.getLogger(name='metadata')
def get_cached_target(cache_root, target_path):
return os.path.abspath(cache_root + target_path)
def makedirs(path, mode=509):
if not os.path.exists(path):
makedirs(os.path.dirname(path), mode)
try:
os.mkdir(path)
os.chmod(path, mode=mode)
except FileExistsError as f_e_e:
# competing threads?
pass
class Lockable():
def __init__(self, cache):
self.umask = cache.config['cache_umask']
self.umask_dir = self.umask + 0o111
@contextmanager
def lock(self, sleep_interval=3, force=False, dry_run=False):
"""
Aquire and relase lock as a context manager.
EG:
with target.lock():
...
see get_write_lock for arguments
"""
try:
self.get_write_lock(sleep_interval, force, dry_run)
yield None
LOGGER.debug('Done with lock...')
finally:
# only release lock if it was NOT a dry run
if not dry_run:
self.release_write_lock()
def get_write_lock(self, sleep_interval=3, force=False, dry_run=False):
""" mark file as in progress (wait for existing lock) """
LOGGER.debug('Creating lock...')
if os.path.exists(self.write_lock):
if force:
os.remove(self.write_lock)
if dry_run:
return
LOGGER.info('Waiting for lock...')
LOGGER.debug("force is "+ str(force))
while os.path.exists(self.write_lock):
time.sleep(sleep_interval)
if dry_run:
return
with open(self.write_lock, 'wt') as LOCK:
LOCK.write('locked')
os.chmod(self.write_lock, self.umask)
def release_write_lock(self):
""" remove in_progress mark """
LOGGER.debug('Releasing lock (%s)...', self.write_lock)
try:
os.remove(self.write_lock)
except:
pass
class TargetMetadata(Lockable):
def __init__(self, cache, target_path, atype):
super().__init__(cache)
self.cache_root = os.path.abspath(cache.cache_root)
self.target_path = target_path
self.atype = atype
self.cached_target = get_cached_target(self.cache_root,
self.target_path,
)
cache_dir, cache_name = os.path.split(self.cached_target)
self.md_dir = os.path.join(cache_dir, '.stagecache.' + cache_name)
if not os.path.exists(self.md_dir):
makedirs(self.md_dir, mode=self.umask_dir)
self.write_lock = os.path.join(self.md_dir, 'write_lock')
LOGGER.debug("""created TargetMetadata:
cache_root=%s
target_path=%s
cached_target=%s
cache_dir=%s
md_dir=%s
write_lock=%s""",
self.cache_root, self.target_path, self.cached_target,
cache_dir, self.md_dir, self.write_lock)
def get_md_value(self, md_type, delete=False):
""" returns mtime of md file and int value from file """
md_file = os.path.join(self.md_dir, md_type)
if not os.path.exists(md_file):
# file not in cache!
return (0, None)
mtime = os.path.getmtime(md_file)
with open(md_file, 'rt') as md_handle:
value = int(md_handle.readlines()[0].strip())
if delete:
os.remove(md_file)
return value, mtime
def set_md_value(self, md_type, value):
""" writes value to md file """
md_file = os.path.join(self.md_dir, md_type)
if os.path.exists(md_file):
self.catalog(md_type)
with open(md_file, 'wt') as SIZE:
SIZE.write(str(int(value)))
os.chmod(md_file, self.umask)
def catalog(self, md_type):
""" archives old md and returns value """
log_file = os.path.join(self.md_dir, 'log')
value, mtime = self.get_md_value(md_type, delete=True)
with open(log_file, 'at') as LOG:
LOG.write("\t".join((
md_type,
str(mtime),
time.ctime(mtime),
str(value),
)) + "\n")
os.chmod(log_file, self.umask)
return value
def get_cached_target_size(self):
""" returns size and date """
return self.get_md_value('size')
def set_cached_target_size(self, size):
""" writes size to file """
self.set_md_value('size', size)
def get_last_lock_date(self):
""" returns the most recent lock end date """
lock_date = self.get_md_value('cache_lock')[0]
return lock_date
def set_cache_lock_date(self, date):
""" writes new expiration date to file """
self.set_md_value('cache_lock', date)
def is_lock_valid(self):
""" checks if lock date has passed """
lock_date = self.get_last_lock_date()
return lock_date > time.time()
def remove_target(self):
""" archive metadata for this asset """
self.catalog('cache_lock')
return self.catalog('size')
class CacheMetadata(Lockable):
def __init__(self, cache):
super().__init__(cache)
self.cache = cache
self.md_dir = os.path.abspath(
os.path.join(self.cache.cache_root, '.stagecache.global')
)
self.write_lock = os.path.join(self.md_dir, "write_lock")
self.asset_list = os.path.join(self.md_dir, "asset_list")
if not os.path.exists(self.md_dir):
makedirs(self.md_dir, self.umask_dir)
LOGGER.debug("""created CacheMetadata:
cache_root=%s
md_dir=%s
write_lock=%s""",
self.cache.cache_root, self.md_dir, self.write_lock)
def iter_cached_files(self, locked=None):
""" return list of assets with sizes and lock dates """
LOGGER.debug("Checking asset list: %s", self.asset_list)
for target_path, atype in self.list_assets():
target_metadata = TargetMetadata(self.cache,
target_path,
atype)
if locked is None or target_metadata.is_lock_valid() == locked:
yield target_metadata
def list_assets(self):
""" return list of path, type tuples in cache """
LOGGER.debug("Fetching asset list: %s", self.asset_list)
if os.path.exists(self.asset_list):
asset_list = list()
with open(self.asset_list) as assets:
for asset_line in assets:
asset_line = asset_line.strip()
if len(asset_line) == 0:
continue
asset = tuple(a.strip() for a in asset_line.split('\t'))
if len(asset) != 2:
raise Exception("Asset tuple is NOT length 2!\n%r" % (asset,))
asset_list.append(asset)
LOGGER.debug("Found %d assets in %s",
len(asset_list),
self.asset_list,
)
return asset_list
else:
return []
def remove_cached_file(self, target_metadata):
""" remove record of cached file, return size """
count = 0
# read asset list
asset_list = self.list_assets()
# write new (edited) asset list
with open(self.asset_list, 'wt') as assets:
for target_path, atype in asset_list:
if target_path != target_metadata.target_path:
assets.write(target_path + "\t" + atype + "\n")
else:
count += 1
os.chmod(self.asset_list, self.umask)
if count == 0:
LOGGER.error("No match for " + target_metadata.target_path)
raise Exception("Error recording assets")
if count > 1:
LOGGER.warning("Found {} listings for {}".format(count,
target_metadata.target_path))
return target_metadata.remove_target()
def add_cached_file(self, target_metadata, target_size, lock_end_date):
""" add record of asset """
# add to global md
paths_in_cache = set(a[0] for a in self.list_assets())
if target_metadata.target_path not in paths_in_cache:
LOGGER.debug("%s not in %s, adding...",
target_metadata.target_path,
paths_in_cache)
# add to list if not there yet
with open(self.asset_list, 'at') as assets:
assets.write(target_metadata.target_path + "\t" \
+ target_metadata.atype + "\n")
os.chmod(self.asset_list, self.umask)
added_to_list = True
else:
LOGGER.debug("%s alread in asset list",
target_metadata.target_path)
added_to_list = False
# add file specific md
target_metadata.set_cached_target_size(target_size)
target_metadata.set_cache_lock_date(lock_end_date)
return added_to_list
| 36.980132
| 86
| 0.576916
| 9,065
| 0.811694
| 1,039
| 0.093034
| 553
| 0.049516
| 0
| 0
| 3,480
| 0.311605
|
a2e1fed84d2ed3d71ec400a1f6a513cfa6d50f07
| 3,858
|
py
|
Python
|
lib/roi_data/minibatch.py
|
BarneyQiao/pcl.pytorch
|
4e0280e5e1470f705e620eda26f881d627c5016c
|
[
"MIT"
] | 233
|
2019-05-10T07:17:42.000Z
|
2022-03-30T09:24:16.000Z
|
lib/roi_data/minibatch.py
|
Michael-Steven/Crack_Image_WSOD
|
4e8591a7c0768cee9eb7240bb9debd54824f5b33
|
[
"MIT"
] | 78
|
2019-05-10T21:10:47.000Z
|
2022-03-29T13:57:32.000Z
|
lib/roi_data/minibatch.py
|
Michael-Steven/Crack_Image_WSOD
|
4e8591a7c0768cee9eb7240bb9debd54824f5b33
|
[
"MIT"
] | 57
|
2019-05-10T07:17:37.000Z
|
2022-03-24T04:43:24.000Z
|
import numpy as np
import numpy.random as npr
import cv2
from core.config import cfg
import utils.blob as blob_utils
def get_minibatch_blob_names(is_training=True):
"""Return blob names in the order in which they are read by the data loader.
"""
# data blob: holds a batch of N images, each with 3 channels
blob_names = ['data', 'rois', 'labels']
return blob_names
def get_minibatch(roidb, num_classes):
"""Given a roidb, construct a minibatch sampled from it."""
# We collect blobs from each image onto a list and then concat them into a
# single tensor, hence we initialize each blob to an empty list
blobs = {k: [] for k in get_minibatch_blob_names()}
# Get the input image blob
im_blob, im_scales = _get_image_blob(roidb)
assert len(im_scales) == 1, "Single batch only"
assert len(roidb) == 1, "Single batch only"
blobs['data'] = im_blob
rois_blob = np.zeros((0, 5), dtype=np.float32)
labels_blob = np.zeros((0, num_classes), dtype=np.float32)
num_images = len(roidb)
for im_i in range(num_images):
labels, im_rois = _sample_rois(roidb[im_i], num_classes)
# Add to RoIs blob
rois = _project_im_rois(im_rois, im_scales[im_i])
batch_ind = im_i * np.ones((rois.shape[0], 1))
rois_blob_this_image = np.hstack((batch_ind, rois))
if cfg.DEDUP_BOXES > 0:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(rois_blob_this_image * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(hashes, return_index=True,
return_inverse=True)
rois_blob_this_image = rois_blob_this_image[index, :]
rois_blob = np.vstack((rois_blob, rois_blob_this_image))
# Add to labels blob
labels_blob = np.vstack((labels_blob, labels))
blobs['rois'] = rois_blob
blobs['labels'] = labels_blob
return blobs, True
def _sample_rois(roidb, num_classes):
"""Generate a random sample of RoIs"""
labels = roidb['gt_classes']
rois = roidb['boxes']
if cfg.TRAIN.BATCH_SIZE_PER_IM > 0:
batch_size = cfg.TRAIN.BATCH_SIZE_PER_IM
else:
batch_size = np.inf
if batch_size < rois.shape[0]:
rois_inds = npr.permutation(rois.shape[0])[:batch_size]
rois = rois[rois_inds, :]
return labels.reshape(1, -1), rois
def _get_image_blob(roidb):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
# Sample random scales to use for each image in this batch
scale_inds = np.random.randint(
0, high=len(cfg.TRAIN.SCALES), size=num_images)
processed_ims = []
im_scales = []
for i in range(num_images):
im = cv2.imread(roidb[i]['image'])
assert im is not None, \
'Failed to read image \'{}\''.format(roidb[i]['image'])
# If NOT using opencv to read in images, uncomment following lines
# if len(im.shape) == 2:
# im = im[:, :, np.newaxis]
# im = np.concatenate((im, im, im), axis=2)
# # flip the channel, since the original one using cv2
# # rgb -> bgr
# im = im[:, :, ::-1]
if roidb[i]['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = blob_utils.prep_im_for_blob(
im, cfg.PIXEL_MEANS, [target_size], cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale[0])
processed_ims.append(im[0])
# Create a blob to hold the input images [n, c, h, w]
blob = blob_utils.im_list_to_blob(processed_ims)
return blob, im_scales
def _project_im_rois(im_rois, im_scale_factor):
"""Project image RoIs into the rescaled training image."""
rois = im_rois * im_scale_factor
return rois
| 33.842105
| 80
| 0.630897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,106
| 0.286677
|
a2e200b1e2fac4ccc713c3e1526076efebc09cea
| 1,288
|
py
|
Python
|
src/PrimaryInputs.py
|
elastacloud/input-output-tables
|
82f932c8627071bc245e178f5b47a7c1104c4e4c
|
[
"Apache-2.0"
] | null | null | null |
src/PrimaryInputs.py
|
elastacloud/input-output-tables
|
82f932c8627071bc245e178f5b47a7c1104c4e4c
|
[
"Apache-2.0"
] | null | null | null |
src/PrimaryInputs.py
|
elastacloud/input-output-tables
|
82f932c8627071bc245e178f5b47a7c1104c4e4c
|
[
"Apache-2.0"
] | null | null | null |
import abc
import os
import pandas as pd
import numpy as np
from EoraReader import EoraReader
class PrimaryInputs(EoraReader):
def __init__(self, file_path):
super().__init__(file_path)
self.df = None
def get_dataset(self, extended = False):
"""
Returns a pandas dataframe containing domestic transactions from the input-output table
"""
value_add_coefficients = []
primary_inputs = []
industry_count = self.industry_header.count("Industries")
primary_inputs_pos = 0
line = self.file.readline().strip().split('\t')
while line[2] != "Primary Inputs":
line = self.file.readline().strip().split('\t')
while line[2] == "Primary Inputs":
primary_inputs.append(line[3])
value_add_coefficients.append(line[4:(4 + industry_count)])
line = self.file.readline().strip().split('\t')
numpy_data = np.array(value_add_coefficients)
df = pd.DataFrame(data = numpy_data, index = primary_inputs)
df.columns = self.industries[0:industry_count]
if extended:
df.loc[:, 'year'] = self.year
df.loc[:, 'country'] = self.country
self.df = df
self.extended = extended
return df
| 35.777778
| 96
| 0.615683
| 1,192
| 0.925466
| 0
| 0
| 0
| 0
| 0
| 0
| 183
| 0.142081
|
a2e589c4ee6ca6ac8b468da944e0f2d14d31872f
| 695
|
py
|
Python
|
toto/methods/client_error.py
|
VNUELIVE/Toto
|
6940b4114fc6b680e0d40ae248b7d2599c954f81
|
[
"MIT"
] | null | null | null |
toto/methods/client_error.py
|
VNUELIVE/Toto
|
6940b4114fc6b680e0d40ae248b7d2599c954f81
|
[
"MIT"
] | null | null | null |
toto/methods/client_error.py
|
VNUELIVE/Toto
|
6940b4114fc6b680e0d40ae248b7d2599c954f81
|
[
"MIT"
] | null | null | null |
import logging
from toto.invocation import *
@requires('client_error', 'client_type')
def invoke(handler, parameters):
'''A convenince method for writing browser errors
to Toto's server log. It works with the ``registerErrorHandler()`` method in ``toto.js``.
The "client_error" parameter should be set to the string to be written to Toto's log.
Currently, the "client_type" parameter must be set to "browser_js" for an event
to be written. Otherwise, this method has no effect.
Requires: ``client_error``, ``client_type``
'''
if parameters['client_type'] != 'browser_js':
return {'logged': False}
logging.error(str(parameters['client_error']))
return {'logged': True}
| 36.578947
| 91
| 0.723741
| 0
| 0
| 0
| 0
| 648
| 0.932374
| 0
| 0
| 502
| 0.722302
|
a2e5b6c37644bb0cda6e0ffc3d078b3332260604
| 1,945
|
py
|
Python
|
parallelformers/policies/gptj.py
|
Oaklight/parallelformers
|
57fc36f81734c29aaf814e092ce13681d3c28ede
|
[
"Apache-2.0"
] | 454
|
2021-07-18T02:51:23.000Z
|
2022-03-31T04:00:53.000Z
|
parallelformers/policies/gptj.py
|
Oaklight/parallelformers
|
57fc36f81734c29aaf814e092ce13681d3c28ede
|
[
"Apache-2.0"
] | 16
|
2021-07-18T10:47:21.000Z
|
2022-03-22T18:49:57.000Z
|
parallelformers/policies/gptj.py
|
Oaklight/parallelformers
|
57fc36f81734c29aaf814e092ce13681d3c28ede
|
[
"Apache-2.0"
] | 33
|
2021-07-18T04:48:28.000Z
|
2022-03-14T22:16:36.000Z
|
# Copyright 2021 TUNiB inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers.models.gptj.modeling_gptj import GPTJBlock
from parallelformers.policies.base import Layer, Policy
from parallelformers.utils import AllReduceLinear
class GPTJPolicy(Policy):
@staticmethod
def replace_arguments(config, world_size):
return {
# 1. reduce hidden size
"attn.embed_dim": config.hidden_size // world_size,
# 2. reduce number of heads
"attn.num_attention_heads": config.n_head // world_size,
}
@staticmethod
def attn_qkv():
return [
Layer(weight="attn.q_proj.weight"),
Layer(weight="attn.k_proj.weight"),
Layer(weight="attn.v_proj.weight"),
]
@staticmethod
def attn_out():
return [
Layer(
weight="attn.out_proj.weight",
replace=AllReduceLinear,
),
]
@staticmethod
def mlp_in():
return [
Layer(
weight="mlp.fc_in.weight",
bias="mlp.fc_in.bias",
),
]
@staticmethod
def mlp_out():
return [
Layer(
weight="mlp.fc_out.weight",
bias="mlp.fc_out.bias",
replace=AllReduceLinear,
),
]
@staticmethod
def original_layer_class():
return GPTJBlock
| 28.188406
| 74
| 0.607712
| 1,204
| 0.619023
| 0
| 0
| 1,144
| 0.588175
| 0
| 0
| 801
| 0.411825
|
a2e61afbf4f6a03e376d0464c7acf87dc5bb080e
| 503
|
py
|
Python
|
app/modules/checkerbox.py
|
hboueix/PyCheckers
|
c1339a004f30f76a33461b52f9633bbbd1204bb0
|
[
"MIT"
] | null | null | null |
app/modules/checkerbox.py
|
hboueix/PyCheckers
|
c1339a004f30f76a33461b52f9633bbbd1204bb0
|
[
"MIT"
] | null | null | null |
app/modules/checkerbox.py
|
hboueix/PyCheckers
|
c1339a004f30f76a33461b52f9633bbbd1204bb0
|
[
"MIT"
] | null | null | null |
import pygame
class Checkerbox(pygame.sprite.Sprite):
def __init__(self, size, color, coords):
super().__init__()
self.rect = pygame.Rect(coords[0], coords[1], size, size)
self.color = color
self.hovered = False
def draw(self, screen):
pygame.draw.rect(screen, self.color, self.rect)
def set_hovered(self):
if self.rect.collidepoint(pygame.mouse.get_pos()):
self.hovered = True
else:
self.hovered = False
| 23.952381
| 65
| 0.606362
| 485
| 0.964215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a2e6d1a1d562ff46afccc16626cb0e1d9bd964d4
| 1,319
|
py
|
Python
|
tests/python/test_talos_walk_sl1m_topt.py
|
daeunSong/multicontact-locomotion-planning
|
0aeabe6a7a8d49e54d6996a6126740cc90aa0050
|
[
"BSD-2-Clause"
] | 31
|
2019-11-08T14:46:03.000Z
|
2022-03-25T08:09:16.000Z
|
tests/python/test_talos_walk_sl1m_topt.py
|
pFernbach/multicontact-locomotion-planning
|
86c3e64fd0ee57b1e4061351a16e43e6ba0e15c2
|
[
"BSD-2-Clause"
] | 21
|
2019-04-12T13:13:31.000Z
|
2021-04-02T14:28:15.000Z
|
tests/python/test_talos_walk_sl1m_topt.py
|
pFernbach/multicontact-locomotion-planning
|
86c3e64fd0ee57b1e4061351a16e43e6ba0e15c2
|
[
"BSD-2-Clause"
] | 11
|
2019-04-12T13:03:55.000Z
|
2021-11-22T08:19:06.000Z
|
# Copyright (c) 2020, CNRS
# Authors: Pierre Fernbach <pfernbac@laas.fr>
import unittest
import subprocess
import time
from mlp import LocoPlanner, Config
from utils import check_motion
from hpp.corbaserver.rbprm.utils import ServerManager
class TestTalosWalkSl1mTopt(unittest.TestCase):
def test_talos_walk_sl1m_topt(self):
cfg = Config()
cfg.load_scenario_config("talos_flatGround")
cfg.contact_generation_method = "sl1m"
cfg.centroidal_method = "momentumopt"
cfg.IK_store_centroidal = True
cfg.IK_store_zmp = True
cfg.IK_store_effector = True
cfg.IK_store_contact_forces = True
cfg.IK_store_joints_derivatives = True
cfg.IK_store_joints_torque = True
cfg.ITER_DYNAMIC_FILTER = 0
cfg.TIMEOPT_CONFIG_FILE="cfg_softConstraints_timeopt_talos.yaml"
with ServerManager('hpp-rbprm-server'):
loco_planner = LocoPlanner(cfg)
loco_planner.run()
check_motion(self, loco_planner)
self.assertNotEqual(loco_planner.cs.contactPhases[-1].timeFinal, loco_planner.cs_com.contactPhases[-1].timeFinal)
self.assertEqual(loco_planner.cs_com.contactPhases[-1].timeFinal, loco_planner.cs_wb.contactPhases[-1].timeFinal)
if __name__ == '__main__':
unittest.main()
| 36.638889
| 125
| 0.718726
| 1,028
| 0.779378
| 0
| 0
| 0
| 0
| 0
| 0
| 176
| 0.133434
|
a2e7779c3e2b321cf059e7d364c94dc2593aa13c
| 212
|
py
|
Python
|
definitions.py
|
elpeix/kaa
|
b840613cb5eba876d937faf32031651332e5b5f6
|
[
"MIT"
] | null | null | null |
definitions.py
|
elpeix/kaa
|
b840613cb5eba876d937faf32031651332e5b5f6
|
[
"MIT"
] | null | null | null |
definitions.py
|
elpeix/kaa
|
b840613cb5eba876d937faf32031651332e5b5f6
|
[
"MIT"
] | null | null | null |
import os
import logging
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
DEBUG = True
LOG = logging.getLogger()
NAME = 'Sample Server'
VERSION = 'v1.0'
SERVER = 'example.SampleServer'
ENABLE_CORS = True
| 16.307692
| 53
| 0.740566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 43
| 0.20283
|
a2e9b6f6bd695b4f20c44aff1b1aeaa6c236f680
| 9,567
|
py
|
Python
|
uncertainty_baselines/datasets/smcalflow.py
|
y0ast/uncertainty-baselines
|
8d32c77ba0803ed715c1406378adf10ebd61ab74
|
[
"Apache-2.0"
] | null | null | null |
uncertainty_baselines/datasets/smcalflow.py
|
y0ast/uncertainty-baselines
|
8d32c77ba0803ed715c1406378adf10ebd61ab74
|
[
"Apache-2.0"
] | null | null | null |
uncertainty_baselines/datasets/smcalflow.py
|
y0ast/uncertainty-baselines
|
8d32c77ba0803ed715c1406378adf10ebd61ab74
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""SMCalflow dataset builder.
The SMCalFlow dataset is from the following paper:
Task-Oriented Dialogue as Dataflow Synthesis (Andreas et al., 2020)
The MultiWoz 2.1 dataset is the released version from the following paper:
Task-Oriented Dialogue as Dataflow Synthesis (Andreas et al., 2020)
The dataset is originally published at:
MultiWOZ 2.1: A Consolidated Multi-Domain Dialogue Dataset with State
Corrections and State Tracking Baselines (Eric et al., 2019)
The released version is processed by:
Transferable Multi-Domain State Generator for Task-Oriented Dialogue Systems
(Wu et al., 2019)
Processed following the directions in:
https://github.com/microsoft/task_oriented_dialogue_as_dataflow_synthesis
"""
import os.path
from typing import Any, Dict, Optional, Type
import seqio
import t5.data
import tensorflow as tf
import tensorflow_datasets as tfds
from uncertainty_baselines.datasets import base
_NUM_TRAIN_SMCALFLOW = 121200
_NUM_VAL_SMCALFLOW = 13499
_NUM_TRAIN_MULTIWOZ = 56668
_NUM_VAL_MULTIWOZ = 7374
_NUM_TEST_MULTIWOZ = 7368
_FEATURES = [
'encoder_input_tokens', 'decoder_target_tokens', 'decoder_input_tokens',
'encoder_segment_ids', 'decoder_segment_ids'
]
def _get_num_examples(name: str) -> Dict[str, int]:
"""Retrieves the number of examples and filenames according to task name."""
if name == 'smcalflow':
num_examples = {
tfds.Split.TRAIN: _NUM_TRAIN_SMCALFLOW,
tfds.Split.VALIDATION: _NUM_VAL_SMCALFLOW,
}
elif name == 'multiwoz':
num_examples = {
tfds.Split.TRAIN: _NUM_TRAIN_MULTIWOZ,
tfds.Split.VALIDATION: _NUM_VAL_MULTIWOZ,
tfds.Split.TEST: _NUM_TEST_MULTIWOZ,
}
else:
raise ValueError('"name" can only be one of "smcalflow" or "multiwoz". '
'Got "{}".'.format(name))
return num_examples
def _has_test_split(name: str) -> bool:
return name == 'multiwoz'
class _SMCalflowDatasetBuilder(tfds.core.DatasetBuilder):
"""Minimal TFDS DatasetBuilder for SMCalflow and MultiWoZ, does not support downloading."""
VERSION = tfds.core.Version('0.0.0')
def __init__(self, name: str, data_dir: str, max_seq_length: int,
vocabulary: seqio.Vocabulary,
feature_converter_cls: Type[seqio.FeatureConverter],
**unused_kwargs: Dict[str, Any]):
self._max_seq_length = max_seq_length
self._task = self._build_task(name, data_dir, vocabulary)
self._feature_converter = feature_converter_cls()
super().__init__(
data_dir=data_dir, **unused_kwargs)
# We have to override self._data_dir to prevent the parent class from
# appending the class name and version.
self._data_dir = data_dir
def _download_and_prepare(self, dl_manager, download_config=None):
"""Downloads and prepares dataset for reading."""
raise NotImplementedError
def _build_task(self, task_name: str, data_dir: str,
vocabulary: seqio.Vocabulary) -> seqio.Task:
split_to_filepattern = {
tfds.Split.TRAIN: os.path.join(data_dir, 'train.tfr*'),
tfds.Split.VALIDATION: os.path.join(data_dir, 'valid.tfr*')
}
if _has_test_split(task_name):
split_to_filepattern[tfds.Split.TEST] = os.path.join(
data_dir, 'test.tfr*')
source_features = {
'inputs': tf.io.FixedLenFeature([], tf.string, ''),
'targets': tf.io.FixedLenFeature([], tf.string, '')
}
data_source = seqio.TFExampleDataSource(
split_to_filepattern=split_to_filepattern,
feature_description=source_features,
num_input_examples=_get_num_examples(task_name))
output_features = {
'inputs':
seqio.Feature(vocabulary=vocabulary, add_eos=True, required=False),
'targets':
seqio.Feature(vocabulary=vocabulary, add_eos=True)
}
task = seqio.Task(
name=task_name,
source=data_source,
output_features=output_features,
preprocessors=[
seqio.preprocessors.tokenize, seqio.preprocessors.append_eos
],
shuffle_buffer_size=None # disable shuffling.
)
return task
def _as_dataset(self,
split: tfds.Split,
decoders=None,
read_config=None,
shuffle_files=False) -> tf.data.Dataset:
"""Constructs a `tf.data.Dataset`."""
del decoders
del read_config
del shuffle_files
task_feature_lengths = {
'inputs': self._max_seq_length,
'targets': self._max_seq_length
}
dataset = self._task.get_dataset(
sequence_length=task_feature_lengths, split=split, shuffle=False)
return self._feature_converter(dataset, task_feature_lengths)
def _info(self) -> tfds.core.DatasetInfo:
"""Returns the `tfds.core.DatasetInfo` object."""
features = {
feature_name:
tfds.features.Tensor(shape=[self._max_seq_length], dtype=tf.int32)
for feature_name in _FEATURES
}
info = tfds.core.DatasetInfo(
builder=self,
description=self._task.name,
features=tfds.features.FeaturesDict(features),
metadata=None)
# Instead of having a single element shard_lengths, we should really have a
# list of the number of elements in each file shard in each split.
splits = [tfds.Split.TRAIN, tfds.Split.VALIDATION]
if _has_test_split(self._task.name):
splits.append(tfds.Split.TEST)
split_infos = []
for split in splits:
split_infos.append(
tfds.core.SplitInfo(
name=split,
shard_lengths=[self._task.num_input_examples(split)],
num_bytes=0,
))
split_dict = tfds.core.SplitDict(split_infos, dataset_name=self.name)
info.set_splits(split_dict)
return info
class _SMCalflowDataset(base.BaseDataset):
"""SMCalflow dataset builder class."""
def __init__(
self,
name: str,
split: str,
data_dir: Optional[str] = None,
max_seq_length: int = 512,
vocabulary: Optional[seqio.Vocabulary] = t5.data.get_default_vocabulary(),
feature_converter_cls: Optional[Type[
seqio.FeatureConverter]] = seqio.EncDecFeatureConverter,
is_training: Optional[bool] = None,
num_parallel_parser_calls: int = 64,
shuffle_buffer_size: Optional[int] = None):
"""Create a SMCalflow tf.data.Dataset builder.
Args:
name: the name of this dataset.
split: a dataset split, either a custom tfds.Split or one of the
tfds.Split enums [TRAIN, VALIDAITON, TEST] or their lowercase string
names.
data_dir: Path to a directory containing the Criteo datasets, with
filenames train-*-of-*', 'validate.tfr', 'test.tfr'.
max_seq_length: the maximum sequence length for the input and target of an
example.
vocabulary: the vocabulary used for tokenization. Must be a subclass of
seqio.Vocabulary.
feature_converter_cls: the type of the feature converter converting
examples of {'input', 'target'} into model specific outputs. Must be a
subclass of seqio.FeatureConverter.
is_training: Whether or not the given `split` is the training split. Only
required when the passed split is not one of ['train', 'validation',
'test', tfds.Split.TRAIN, tfds.Split.VALIDATION, tfds.Split.TEST].
num_parallel_parser_calls: the number of parallel threads to use while
preprocessing in tf.data.Dataset.map().
shuffle_buffer_size: the number of example to use in the shuffle buffer
for tf.data.Dataset.shuffle().
"""
super().__init__(
name=name,
dataset_builder=_SMCalflowDatasetBuilder(
name=name,
data_dir=data_dir,
max_seq_length=max_seq_length,
vocabulary=vocabulary,
feature_converter_cls=feature_converter_cls),
split=split,
is_training=is_training,
shuffle_buffer_size=shuffle_buffer_size,
num_parallel_parser_calls=num_parallel_parser_calls)
def _create_process_example_fn(self) -> base.PreProcessFn:
"""Create a pre-process function to return labels and sentence tokens."""
def _example_parser(example: Dict[str, tf.Tensor]) -> Dict[str, Any]:
"""Parse sentences and labels from a serialized tf.train.Example."""
return {feature: example[feature] for feature in _FEATURES}
return _example_parser
class SMCalflowDataset(_SMCalflowDataset):
"""SMCalflow dataset builder class."""
def __init__(self, data_dir: Optional[str] = None, **kwargs: Dict[str, Any]):
super().__init__(
name='smcalflow', data_dir=data_dir, **kwargs)
class MultiWoZDataset(_SMCalflowDataset):
"""MultiWoZ dataset builder class."""
def __init__(self, data_dir: Optional[str] = None, **kwargs: Dict[str, Any]):
super().__init__(
name='multiwoz', data_dir=data_dir, **kwargs)
| 35.831461
| 93
| 0.691126
| 7,010
| 0.732727
| 0
| 0
| 0
| 0
| 0
| 0
| 3,764
| 0.393436
|
a2eb8907fa9fa5c982005554035cbb22b3ce7287
| 1,098
|
py
|
Python
|
1-FrequencyDivisionMultiplexing.py
|
mahnooranjum/Demo_CommunicationSystems
|
6c3be46f9ad4a38bfe553b9a01855156713e49d9
|
[
"MIT"
] | null | null | null |
1-FrequencyDivisionMultiplexing.py
|
mahnooranjum/Demo_CommunicationSystems
|
6c3be46f9ad4a38bfe553b9a01855156713e49d9
|
[
"MIT"
] | null | null | null |
1-FrequencyDivisionMultiplexing.py
|
mahnooranjum/Demo_CommunicationSystems
|
6c3be46f9ad4a38bfe553b9a01855156713e49d9
|
[
"MIT"
] | null | null | null |
'''
==============================================================================
Author:
Mahnoor Anjum
Description:
Digital Multiplexing Techniques:
1- Frequency Division Multiplexing
Contact:
manomaq@gmail.com
==============================================================================
'''
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0, 10, 0.1);
m1 = np.sin(x)*1000
m2 = np.array(x*x)*10
m3 = np.array(80*x)
plt.plot(x, m1)
plt.plot(x, m2)
plt.plot(x, m3)
plt.title('Sine wave')
plt.xlabel('Time')
plt.ylabel('Messages')
plt.axhline(y=0, color='k')
plt.show()
'''
We will send all the signals at the same time through the channel
but at different frequencies.
Here we show frequency bands by the numbers on rcv1, rcv2, rcv3
'''
rcv1 = []
rcv2 = []
rcv3 = []
for i in range(x.size):
rcv1.append(m1[i])
rcv2.append(m2[i])
rcv3.append(m3[i])
plt.plot(x, rcv1)
plt.plot(x, rcv2)
plt.plot(x, rcv3)
plt.title('FDM')
plt.xlabel('Time')
plt.ylabel('Received')
plt.axhline(y=0, color='k')
plt.show()
| 21.115385
| 78
| 0.547359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 577
| 0.525501
|
a2ebe5b887b32f0561c68f37282697177b6753ec
| 3,880
|
py
|
Python
|
deep-rl/lib/python2.7/site-packages/OpenGL/GL/ATI/text_fragment_shader.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 210
|
2016-04-09T14:26:00.000Z
|
2022-03-25T18:36:19.000Z
|
deep-rl/lib/python2.7/site-packages/OpenGL/GL/ATI/text_fragment_shader.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 72
|
2016-09-04T09:30:19.000Z
|
2022-03-27T17:06:53.000Z
|
deep-rl/lib/python2.7/site-packages/OpenGL/GL/ATI/text_fragment_shader.py
|
ShujaKhalid/deep-rl
|
99c6ba6c3095d1bfdab81bd01395ced96bddd611
|
[
"MIT"
] | 64
|
2016-04-09T14:26:49.000Z
|
2022-03-21T11:19:47.000Z
|
'''OpenGL extension ATI.text_fragment_shader
This module customises the behaviour of the
OpenGL.raw.GL.ATI.text_fragment_shader to provide a more
Python-friendly API
Overview (from the spec)
The ATI_fragment_shader extension exposes a powerful fragment
processing model that provides a very general means of expressing
fragment color blending and dependent texture address modification.
The processing is termed a fragment shader or fragment program and
is specifed using a register-based model in which there are fixed
numbers of instructions, texture lookups, read/write registers, and
constants.
ATI_fragment_shader provides a unified instruction set
for operating on address or color data and eliminates the
distinction between the two. That extension provides all the
interfaces necessary to fully expose this programmable fragment
processor in GL.
ATI_text_fragment_shader is a redefinition of the
ATI_fragment_shader functionality, using a slightly different
interface. The intent of creating ATI_text_fragment_shader is to
take a step towards treating fragment programs similar to other
programmable parts of the GL rendering pipeline, specifically
vertex programs. This new interface is intended to appear
similar to the ARB_vertex_program API, within the limits of the
feature set exposed by the original ATI_fragment_shader extension.
The most significant differences between the two extensions are:
(1) ATI_fragment_shader provides a procedural function call
interface to specify the fragment program, whereas
ATI_text_fragment_shader uses a textual string to specify
the program. The fundamental syntax and constructs of the
program "language" remain the same.
(2) The program object managment portions of the interface,
namely the routines used to create, bind, and delete program
objects and set program constants are managed
using the framework defined by ARB_vertex_program.
(3) ATI_fragment_shader refers to the description of the
programmable fragment processing as a "fragment shader".
In keeping with the desire to treat all programmable parts
of the pipeline consistently, ATI_text_fragment_shader refers
to these as "fragment programs". The name of the extension is
left as ATI_text_fragment_shader instead of
ATI_text_fragment_program in order to indicate the underlying
similarity between the API's of the two extensions, and to
differentiate it from any other potential extensions that
may be able to move even further in the direction of treating
fragment programs as just another programmable area of the
GL pipeline.
Although ATI_fragment_shader was originally conceived as a
device-independent extension that would expose the capabilities of
future generations of hardware, changing trends in programmable
hardware have affected the lifespan of this extension. For this
reason you will now find a fixed set of features and resources
exposed, and the queries to determine this set have been deprecated
in ATI_fragment_shader. Further, in ATI_text_fragment_shader,
most of these resource limits are fixed by the text grammar and
the queries have been removed altogether.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ATI/text_fragment_shader.txt
'''
from OpenGL import platform, constant, arrays
from OpenGL import extensions, wrapper
import ctypes
from OpenGL.raw.GL import _types, _glgets
from OpenGL.raw.GL.ATI.text_fragment_shader import *
from OpenGL.raw.GL.ATI.text_fragment_shader import _EXTENSION_NAME
def glInitTextFragmentShaderATI():
'''Return boolean indicating whether this extension is available'''
from OpenGL import extensions
return extensions.hasGLExtension( _EXTENSION_NAME )
### END AUTOGENERATED SECTION
| 46.190476
| 71
| 0.802835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 3,485
| 0.898196
|
a2ed46d6b33e4e8573f56ac8afc0ade0ec58667b
| 7,311
|
py
|
Python
|
vhog3d.py
|
parthsuresh/3dvhog
|
9a439687a0ce30b86b7730a61733b3f3845d27c5
|
[
"MIT"
] | 3
|
2021-05-18T07:48:39.000Z
|
2021-12-23T06:35:41.000Z
|
vhog3d.py
|
parthsuresh/3dvhog
|
9a439687a0ce30b86b7730a61733b3f3845d27c5
|
[
"MIT"
] | null | null | null |
vhog3d.py
|
parthsuresh/3dvhog
|
9a439687a0ce30b86b7730a61733b3f3845d27c5
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
from scipy.ndimage import convolve
from tqdm import tqdm
def hog3d(vox_volume, cell_size, block_size, theta_histogram_bins, phi_histogram_bins, step_size=None):
"""
Inputs
vox_volume : a [x x y x z] numpy array defining voxels with values in the range 0-1
cell_size : size of a 3d cell (int)
block_size : size of a 3d block defined in cells
theta_histogram_bins : number of bins to break the angles in the xy plane - 180 degrees
phi_histogram_bins : number of bins to break the angles in the xz plane - 360 degrees
step_size : OPTIONAL integer defining the number of cells the blocks should overlap by.
"""
if step_size is None:
step_size = block_size
c = cell_size
b = block_size
sx, sy, sz = vox_volume.shape
num_x_cells = math.floor(sx / cell_size)
num_y_cells = math.floor(sy / cell_size)
num_z_cells = math.floor(sz / cell_size)
# Get cell positions
x_cell_positions = np.array(list(range(0, (num_x_cells * cell_size), cell_size)))
y_cell_positions = np.array(list(range(0, (num_y_cells * cell_size), cell_size)))
z_cell_positions = np.array(list(range(0, (num_z_cells * cell_size), cell_size)))
# Get block positions
x_block_positions = (x_cell_positions[0: num_x_cells: block_size])
y_block_positions = (y_cell_positions[0: num_y_cells: block_size])
z_block_positions = (z_cell_positions[0: num_z_cells: block_size])
# Check if last block in each dimension has enough voxels to be a full block. If not, discard it.
if x_block_positions[-1] > ((sx + 1) - (cell_size * block_size)):
x_block_positions = x_block_positions[:-2]
if y_block_positions[-1] > ((sy + 1) - (cell_size * block_size)):
y_block_positions = y_block_positions[:-2]
if z_block_positions[-1] > ((sz + 1) - (cell_size * block_size)):
z_block_positions = z_block_positions[:-2]
# Number of blocks
num_x_blocks = len(x_block_positions)
num_y_blocks = len(y_block_positions)
num_z_blocks = len(z_block_positions)
# Create 3D gradient vectors
# X filter and vector
x_filter = np.zeros((3, 3, 3))
x_filter[0, 1, 1], x_filter[2, 1, 1] = 1, -1
x_vector = convolve(vox_volume, x_filter, mode='constant', cval=0)
# Y filter and vector
y_filter = np.zeros((3, 3, 3))
y_filter[1, 0, 0], y_filter[1, 2, 0] = 1, -1
y_vector = convolve(vox_volume, y_filter, mode='constant', cval=0)
# Z filter and vector
z_filter = np.zeros((3, 3, 3))
z_filter[1, 1, 0], z_filter[1, 1, 2] = 1, -1
z_vector = convolve(vox_volume, z_filter, mode='constant', cval=0)
magnitudes = np.zeros([sx, sy, sz])
for i in range(sx):
for j in range(sy):
for k in range(sz):
magnitudes[i, j, k] = (x_vector[i, j, k] ** 2 + y_vector[i, j, k] ** 2 + z_vector[i, j, k] ** 2) ** (
0.5)
# Voxel Weights
kernel_size = 3
voxel_filter = np.full((kernel_size, kernel_size, kernel_size), 1 / (kernel_size * kernel_size * kernel_size))
weights = convolve(vox_volume, voxel_filter, mode='constant', cval=0)
weights = weights + 1
# Gradient vector
grad_vector = np.zeros((sx, sy, sz, 3))
for i in range(sx):
for j in range(sy):
for k in range(sz):
grad_vector[i, j, k, 0] = x_vector[i, j, k]
grad_vector[i, j, k, 1] = y_vector[i, j, k]
grad_vector[i, j, k, 2] = z_vector[i, j, k]
theta = np.zeros((sx, sy, sz))
phi = np.zeros((sx, sy, sz))
for i in range(sx):
for j in range(sy):
for k in range(sz):
theta[i, j, k] = math.acos(grad_vector[i, j, k, 2])
phi[i, j, k] = math.atan2(grad_vector[i, j, k, 1], grad_vector[i, j, k, 0])
phi[i, j, k] += math.pi
# Binning
b_size_voxels = int(c * b)
t_hist_bins = math.pi / theta_histogram_bins
p_hist_bins = (2 * math.pi) / phi_histogram_bins
block_inds = np.zeros((num_x_blocks * num_y_blocks * num_z_blocks, 3))
i = 0
for z_block in range(num_z_blocks):
for y_block in range(num_y_blocks):
for x_block in range(num_x_blocks):
block_inds[i] = np.array(
[x_block_positions[x_block], y_block_positions[y_block], z_block_positions[z_block]])
i += 1
num_blocks = len(block_inds)
error_count = 0
features = []
for i in range(num_blocks):
full_empty = vox_volume[int(block_inds[i, 0]):int(block_inds[i, 0] + b_size_voxels),
int(block_inds[i, 1]):int(block_inds[i, 1] + b_size_voxels),
int(block_inds[i, 2]):int(block_inds[i, 2] + b_size_voxels)]
if np.sum(full_empty) != 0 and np.sum(full_empty) != full_empty.size:
feature = np.zeros((b, b, b, theta_histogram_bins, phi_histogram_bins))
t_weights = weights[int(block_inds[i, 0]):int(block_inds[i, 0] + b_size_voxels),
int(block_inds[i, 1]):int(block_inds[i, 1] + b_size_voxels),
int(block_inds[i, 2]):int(block_inds[i, 2] + b_size_voxels)]
t_magnitudes = magnitudes[int(block_inds[i, 0]):int(block_inds[i, 0] + b_size_voxels),
int(block_inds[i, 1]):int(block_inds[i, 1] + b_size_voxels),
int(block_inds[i, 2]):int(block_inds[i, 2] + b_size_voxels)]
t_theta = theta[int(block_inds[i, 0]):int(block_inds[i, 0] + b_size_voxels),
int(block_inds[i, 1]):int(block_inds[i, 1] + b_size_voxels),
int(block_inds[i, 2]):int(block_inds[i, 2] + b_size_voxels)]
t_phi = phi[int(block_inds[i, 0]):int(block_inds[i, 0] + b_size_voxels),
int(block_inds[i, 1]):int(block_inds[i, 1] + b_size_voxels),
int(block_inds[i, 2]):int(block_inds[i, 2] + b_size_voxels)]
for l in range(b_size_voxels):
for m in range(b_size_voxels):
for n in range(b_size_voxels):
cell_pos_x = math.ceil(l / c) - 1
cell_pos_y = math.ceil(m / c) - 1
cell_pos_z = math.ceil(n / c) - 1
hist_pos_theta = math.ceil(t_theta[l, m, n] / t_hist_bins) - 1
hist_pos_phi = math.ceil(t_phi[l, m, n] / p_hist_bins) - 1
if phi_histogram_bins >= hist_pos_phi > 0 and theta_histogram_bins >= hist_pos_theta > 0:
feature[cell_pos_x, cell_pos_y, cell_pos_z, hist_pos_theta, hist_pos_phi] += (
t_magnitudes[l, m, n] * t_weights[l, m, n])
else:
error_count += 1
feature = np.reshape(feature, ((b * b * b), theta_histogram_bins, phi_histogram_bins))
l2 = np.linalg.norm(feature)
if l2 != 0:
norm_feature = feature / l2
else:
norm_feature = feature
norm_feature = np.reshape(norm_feature, ((b * b * b), (theta_histogram_bins * phi_histogram_bins)))
features.append(norm_feature)
features = np.array(features)
return features
| 43.778443
| 117
| 0.591164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 804
| 0.109971
|
a2ee6d19098aed822e580f589bbcc0c4df0bf0c1
| 320
|
py
|
Python
|
tests/urls.py
|
skioo/django-datatrans
|
c2159b08935cd0c70355ca6e8ff92bbe86d372cd
|
[
"MIT"
] | 9
|
2017-09-12T12:45:30.000Z
|
2022-03-30T13:53:57.000Z
|
tests/urls.py
|
skioo/django-datatrans
|
c2159b08935cd0c70355ca6e8ff92bbe86d372cd
|
[
"MIT"
] | null | null | null |
tests/urls.py
|
skioo/django-datatrans
|
c2159b08935cd0c70355ca6e8ff92bbe86d372cd
|
[
"MIT"
] | 1
|
2021-11-08T10:21:01.000Z
|
2021-11-08T10:21:01.000Z
|
from django.urls import include, path
from datatrans.views import example
urlpatterns = [
path(r'^datatrans/', include('datatrans.urls')),
path(r'^example/register-credit-card$', example.register_credit_card, name='example_register_credit_card'),
path(r'^example/pay$', example.pay, name='example_pay'),
]
| 32
| 111
| 0.7375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 122
| 0.38125
|
a2f252e2b9ab4a63f342c14ab8d8666d4956f841
| 11,160
|
py
|
Python
|
gibbs/minimization.py
|
volpatto/gibbs
|
776acff6166dd4fd3039d55074542d995ac91754
|
[
"MIT"
] | 28
|
2019-05-25T14:50:00.000Z
|
2022-01-18T00:54:22.000Z
|
gibbs/minimization.py
|
volpatto/gibbs
|
776acff6166dd4fd3039d55074542d995ac91754
|
[
"MIT"
] | 10
|
2019-06-15T06:07:14.000Z
|
2021-09-01T04:32:50.000Z
|
gibbs/minimization.py
|
volpatto/gibbs
|
776acff6166dd4fd3039d55074542d995ac91754
|
[
"MIT"
] | 5
|
2019-08-04T05:37:34.000Z
|
2022-01-18T10:10:40.000Z
|
import attr
import types
from typing import Union
from enum import Enum
import numpy as np
from scipy.optimize import differential_evolution
import pygmo as pg
class OptimizationMethod(Enum):
"""
Available optimization solvers.
"""
SCIPY_DE = 1
PYGMO_DE1220 = 2
@attr.s(auto_attribs=True)
class ScipyDifferentialEvolutionSettings:
"""
Optional arguments to pass for SciPy's differential evolution caller.
Members
----------------
:ivar str strategy:
The differential evolution strategy to use. Should be one of: - 'best1bin' - 'best1exp' - 'rand1exp' -
'randtobest1exp' - 'currenttobest1exp' - 'best2exp' - 'rand2exp' - 'randtobest1bin' - 'currenttobest1bin' -
'best2bin' - 'rand2bin' - 'rand1bin' The default is 'best1bin'.
:ivar float recombination:
The recombination constant, should be in the range [0, 1]. In the literature this is also known as the crossover
probability, being denoted by CR. Increasing this value allows a larger number of mutants to progress into the
next generation, but at the risk of population stability.
:ivar float mutation:
The mutation constant. In the literature this is also known as differential weight, being denoted by F. If
specified as a float it should be in the range [0, 2].
:ivar float tol:
Relative tolerance for convergence, the solving stops when `np.std(pop) = atol + tol * np.abs(np.mean(population_energies))`,
where and `atol` and `tol` are the absolute and relative tolerance respectively.
:ivar int|numpy.random.RandomState seed:
If `seed` is not specified the `np.RandomState` singleton is used. If `seed` is an int, a new
`np.random.RandomState` instance is used, seeded with seed. If `seed` is already a `np.random.RandomState instance`,
then that `np.random.RandomState` instance is used. Specify `seed` for repeatable minimizations.
:ivar int workers:
If `workers` is an int the population is subdivided into `workers` sections and evaluated in parallel
(uses `multiprocessing.Pool`). Supply -1 to use all available CPU cores. Alternatively supply a map-like
callable, such as `multiprocessing.Pool.map` for evaluating the population in parallel. This evaluation is
carried out as `workers(func, iterable)`.
:ivar bool disp:
Display status messages during optimization iterations.
:ivar polish:
If True (default), then `scipy.optimize.minimize` with the `L-BFGS-B` method is used to polish the best
population member at the end, which can improve the minimization slightly.
"""
number_of_decision_variables: int
strategy: str = 'best1bin'
recombination: float = 0.3
mutation: float = 0.6
tol: float = 1e-5
seed: Union[np.random.RandomState, int] = np.random.RandomState()
workers: int = 1
disp: bool = False
polish: bool = True
popsize: int = None
population_size_for_each_variable: int = 15
total_population_size_limit: int = 100
def __attrs_post_init__(self):
if self.popsize is None:
self.popsize = self._estimate_population_size()
elif self.popsize <= 0:
raise ValueError('Number of individuals must be greater than 0.')
if type(self.popsize) != int:
raise TypeError('Population size must be an integer number.')
if not 0 < self.recombination <= 1:
raise ValueError('Recombination must be a value between 0 and 1.')
if type(self.mutation) == tuple:
mutation_dithering_array = np.array(self.mutation)
if len(self.mutation) > 2:
raise ValueError('Mutation can be a tuple with two numbers, not more.')
if mutation_dithering_array.min() < 0 or mutation_dithering_array.max() > 2:
raise ValueError('Mutation must be floats between 0 and 2.')
elif mutation_dithering_array.min() == mutation_dithering_array.max():
raise ValueError("Values for mutation dithering can't be equal.")
else:
if type(self.mutation) != int and type(self.mutation) != float:
raise TypeError('When mutation is provided as a single number, it must be a float or an int.')
if not 0 < self.mutation < 2:
raise ValueError('Mutation must be a number between 0 and 2.')
if self.tol < 0:
raise ValueError('Tolerance must be a positive float.')
def _estimate_population_size(self):
population_size = self.population_size_for_each_variable * self.number_of_decision_variables
if population_size > self.total_population_size_limit:
population_size = self.total_population_size_limit
return population_size
@attr.s(auto_attribs=True)
class PygmoSelfAdaptiveDESettings:
# TODO: docs and validations
gen: int
popsize: int
allowed_variants: list = [2, 6, 7]
variant_adptv: int = 2
ftol: float = 1e-6
xtol: float = 1e-6
memory: bool = True
seed: int = int(np.random.randint(0, 2000))
polish: bool = True
polish_method: str = 'tnewton_precond_restart'
parallel_execution: bool = False
number_of_islands: int = 2
archipelago_gen: int = 50
@attr.s(auto_attribs=True)
class PygmoOptimizationProblemWrapper:
# TODO: docs and validations
objective_function: types.FunctionType
bounds: list
args: list = []
def fitness(self, x):
return [self.objective_function(x, *self.args)]
def get_bounds(self):
return self._transform_bounds_to_pygmo_standard
def gradient(self, x):
return pg.estimate_gradient_h(lambda x: self.fitness(x), x)
@property
def _transform_bounds_to_pygmo_standard(self):
bounds_numpy = np.array(self.bounds, dtype=np.float64)
lower_bounds = list(bounds_numpy[:, 0])
upper_bounds = list(bounds_numpy[:, 1])
return lower_bounds, upper_bounds
@attr.s(auto_attribs=True)
class PygmoSolutionWrapperSerial:
# TODO: docs and validations
solution: pg.core.population
@property
def fun(self):
return self.solution.champion_f
@property
def x(self):
return self.solution.champion_x
@attr.s(auto_attribs=True)
class PygmoSolutionWrapperParallel:
# TODO: docs and validations
champion_x: np.ndarray
champion_f: Union[float, np.float64, np.ndarray]
@property
def fun(self):
return self.champion_f
@property
def x(self):
return self.champion_x
@attr.s(auto_attribs=True)
class OptimizationProblem:
"""
This class stores and solve optimization problems with the available solvers.
"""
# TODO: docs and validations
objective_function: types.FunctionType
bounds: list
optimization_method: OptimizationMethod
solver_args: Union[ScipyDifferentialEvolutionSettings, PygmoSelfAdaptiveDESettings]
args: list = []
def __attrs_post_init__(self):
if self.optimization_method == OptimizationMethod.SCIPY_DE and self.solver_args is None:
self.solver_args = ScipyDifferentialEvolutionSettings(self._number_of_decision_variables)
@property
def _number_of_decision_variables(self):
return len(self.bounds)
def solve_minimization(self):
if self.optimization_method == OptimizationMethod.SCIPY_DE:
result = differential_evolution(
self.objective_function,
bounds=self.bounds,
args=self.args,
strategy=self.solver_args.strategy,
popsize=self.solver_args.popsize,
recombination=self.solver_args.recombination,
mutation=self.solver_args.mutation,
tol=self.solver_args.tol,
disp=self.solver_args.disp,
polish=self.solver_args.polish,
seed=self.solver_args.seed,
workers=self.solver_args.workers
)
return result
elif self.optimization_method == OptimizationMethod.PYGMO_DE1220:
problem_wrapper = PygmoOptimizationProblemWrapper(
objective_function=self.objective_function,
bounds=self.bounds,
args=self.args
)
pygmo_algorithm = pg.algorithm(
pg.de1220(
gen=self.solver_args.gen,
allowed_variants=self.solver_args.allowed_variants,
variant_adptv=self.solver_args.variant_adptv,
ftol=self.solver_args.ftol,
xtol=self.solver_args.xtol,
memory=self.solver_args.memory,
seed=self.solver_args.seed
)
)
pygmo_problem = pg.problem(problem_wrapper)
if self.solver_args.parallel_execution:
solution_wrapper = self._run_pygmo_parallel(
pygmo_algorithm,
pygmo_problem,
number_of_islands=self.solver_args.number_of_islands,
archipelago_gen=self.solver_args.archipelago_gen
)
else:
pygmo_solution = self._run_pygmo_serial(pygmo_algorithm, pygmo_problem)
if self.solver_args.polish:
pygmo_solution = self._polish_pygmo_population(pygmo_solution)
solution_wrapper = PygmoSolutionWrapperSerial(pygmo_solution)
return solution_wrapper
else:
raise NotImplementedError('Unavailable optimization method.')
@staticmethod
def _select_best_pygmo_archipelago_solution(champions_x, champions_f):
best_index = np.argmin(champions_f)
return champions_x[best_index], champions_f[best_index]
def _run_pygmo_parallel(self, algorithm, problem, number_of_islands=2, archipelago_gen=50):
pygmo_archipelago = pg.archipelago(
n=number_of_islands,
algo=algorithm,
prob=problem,
pop_size=self.solver_args.popsize,
seed=self.solver_args.seed
)
pygmo_archipelago.evolve(n=archipelago_gen)
pygmo_archipelago.wait()
champions_x = pygmo_archipelago.get_champions_x()
champions_f = pygmo_archipelago.get_champions_f()
champion_x, champion_f = self._select_best_pygmo_archipelago_solution(champions_x, champions_f)
return PygmoSolutionWrapperParallel(champion_x=champion_x, champion_f=champion_f)
def _run_pygmo_serial(self, algorithm, problem):
population = pg.population(
prob=problem,
size=self.solver_args.popsize,
seed=self.solver_args.seed
)
solution = algorithm.evolve(population)
return solution
def _polish_pygmo_population(self, population):
pygmo_nlopt_wrapper = pg.nlopt(self.solver_args.polish_method)
nlopt_algorithm = pg.algorithm(pygmo_nlopt_wrapper)
solution_wrapper = nlopt_algorithm.evolve(population)
return solution_wrapper
| 38.088737
| 133
| 0.664606
| 10,817
| 0.969265
| 0
| 0
| 10,858
| 0.972939
| 0
| 0
| 3,109
| 0.278584
|
a2f4994690266aa4a640429912d46124db104724
| 1,461
|
py
|
Python
|
tests/unittests/types/test_array.py
|
TrigonDev/apgorm
|
5b593bfb5a200708869e079248c25786608055d6
|
[
"MIT"
] | 8
|
2022-01-21T23:07:29.000Z
|
2022-03-26T12:03:57.000Z
|
tests/unittests/types/test_array.py
|
TrigonDev/apgorm
|
5b593bfb5a200708869e079248c25786608055d6
|
[
"MIT"
] | 22
|
2021-12-23T00:43:41.000Z
|
2022-03-23T13:17:32.000Z
|
tests/unittests/types/test_array.py
|
TrigonDev/apgorm
|
5b593bfb5a200708869e079248c25786608055d6
|
[
"MIT"
] | 3
|
2022-01-15T20:58:33.000Z
|
2022-01-26T21:36:13.000Z
|
# MIT License
#
# Copyright (c) 2021 TrigonDev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import pytest
from apgorm.types import Array, Int # for subtypes
@pytest.mark.parametrize("subtype", [Int(), Array(Int()), Array(Array(Int()))])
def test_array_init(subtype):
a = Array(subtype)
assert a.subtype is subtype
def test_array_sql():
assert Array(Int())._sql == "INTEGER[]"
assert Array(Array(Int()))._sql == "INTEGER[][]"
| 38.447368
| 79
| 0.750171
| 0
| 0
| 0
| 0
| 165
| 0.112936
| 0
| 0
| 1,130
| 0.773443
|
a2f56add77b1581d6619a3c899c2460cc7dc3102
| 137
|
py
|
Python
|
cisco_support/__version__.py
|
rothdennis/cisco_support
|
c20b955794400eb565fa5c178749c2ee6ef7dc0f
|
[
"MIT"
] | 4
|
2021-09-09T07:24:13.000Z
|
2022-03-04T19:51:01.000Z
|
cisco_support/__version__.py
|
rothdennis/cisco_support
|
c20b955794400eb565fa5c178749c2ee6ef7dc0f
|
[
"MIT"
] | null | null | null |
cisco_support/__version__.py
|
rothdennis/cisco_support
|
c20b955794400eb565fa5c178749c2ee6ef7dc0f
|
[
"MIT"
] | null | null | null |
__title__ = 'cisco_support'
__description__ = 'Cisco Support APIs'
__version__ = '0.1.0'
__author__ = 'Dennis Roth'
__license__ = 'MIT'
| 19.571429
| 38
| 0.737226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 60
| 0.437956
|
a2f8a7986f7bf085148eeaed0a44176810f81182
| 747
|
py
|
Python
|
code/searchers.py
|
trunc8/mespp
|
8348bdd0ba8f584ef7196c0064b8e5bafa38a0fb
|
[
"MIT"
] | 2
|
2021-07-07T17:01:17.000Z
|
2022-03-30T05:28:44.000Z
|
code/searchers.py
|
trunc8/mespp
|
8348bdd0ba8f584ef7196c0064b8e5bafa38a0fb
|
[
"MIT"
] | null | null | null |
code/searchers.py
|
trunc8/mespp
|
8348bdd0ba8f584ef7196c0064b8e5bafa38a0fb
|
[
"MIT"
] | 1
|
2021-07-07T17:00:54.000Z
|
2021-07-07T17:00:54.000Z
|
#!/usr/bin/env python3
# trunc8 did this
import numpy as np
class Searchers:
def __init__(self, g,
N=100,
M=2,
initial_positions=np.array([90,58]),
target_initial_position=45):
'''
g: Graph of environment
N: Number of vertices
M: Number of searchers
initial_positions: Starting positions of searchers
'''
self.N = N
self.M = M
self.initial_positions = initial_positions
self.positions = self.initial_positions.copy()
self.initial_belief = np.zeros(N+1)
capture_offset = 1 # For less confusion while indexing
vertex = target_initial_position+capture_offset
self.initial_belief[vertex] = 1
def updatePositions(self):
pass
| 25.758621
| 58
| 0.649264
| 685
| 0.917001
| 0
| 0
| 0
| 0
| 0
| 0
| 221
| 0.29585
|
a2fa1506f35030e5726f14dab7372d11ea530f9d
| 1,015
|
py
|
Python
|
vogue/api/api_v1/api.py
|
mayabrandi/vogue
|
463e6417a9168eadb0d11dea2d0f97919494bcd3
|
[
"MIT"
] | 1
|
2021-12-16T19:29:17.000Z
|
2021-12-16T19:29:17.000Z
|
vogue/api/api_v1/api.py
|
mayabrandi/vogue
|
463e6417a9168eadb0d11dea2d0f97919494bcd3
|
[
"MIT"
] | 188
|
2018-10-25T06:13:17.000Z
|
2022-02-25T19:47:06.000Z
|
vogue/api/api_v1/api.py
|
mayabrandi/vogue
|
463e6417a9168eadb0d11dea2d0f97919494bcd3
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI
from vogue.api.api_v1.endpoints import (
insert_documents,
home,
common_trends,
sequencing,
genootype,
reagent_labels,
prepps,
bioinfo_covid,
bioinfo_micro,
bioinfo_mip,
update,
)
from vogue.settings import static_files
app = FastAPI()
app.mount(
"/static",
static_files,
name="static",
)
app.include_router(home.router, tags=["home"])
app.include_router(common_trends.router, tags=["common_trends"])
app.include_router(sequencing.router, tags=["sequencing"])
app.include_router(genootype.router, tags=["genotype"])
app.include_router(reagent_labels.router, tags=["index"])
app.include_router(prepps.router, tags=["preps"])
app.include_router(bioinfo_micro.router, tags=["bioinfo_micro"])
app.include_router(bioinfo_covid.router, tags=["bioinfo_covid"])
app.include_router(bioinfo_mip.router, tags=["bioinfo_mip"])
app.include_router(update.router, tags=["update"])
app.include_router(insert_documents.router, tags=["sample"])
| 27.432432
| 64
| 0.747783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 133
| 0.131034
|
a2fa916053116744cb58a54f835b741f35144a4f
| 1,090
|
py
|
Python
|
models/dgcnn.py
|
veronicatozzo/SimpleView
|
70dbde727b25db8fdd9dc486ac1f74ff31a89821
|
[
"BSD-3-Clause"
] | 95
|
2021-06-09T09:44:14.000Z
|
2022-03-13T12:10:50.000Z
|
models/dgcnn.py
|
veronicatozzo/SimpleView
|
70dbde727b25db8fdd9dc486ac1f74ff31a89821
|
[
"BSD-3-Clause"
] | 7
|
2021-06-23T04:44:25.000Z
|
2022-01-14T15:45:27.000Z
|
models/dgcnn.py
|
veronicatozzo/SimpleView
|
70dbde727b25db8fdd9dc486ac1f74ff31a89821
|
[
"BSD-3-Clause"
] | 13
|
2021-07-01T23:55:15.000Z
|
2022-01-04T12:29:02.000Z
|
import torch.nn as nn
import torch.nn.functional as F
from dgcnn.pytorch.model import DGCNN as DGCNN_original
from all_utils import DATASET_NUM_CLASS
class DGCNN(nn.Module):
def __init__(self, task, dataset):
super().__init__()
self.task = task
self.dataset = dataset
if task == "cls":
num_classes = DATASET_NUM_CLASS[dataset]
# default arguments
class Args:
def __init__(self):
self.k = 20
self.emb_dims = 1024
self.dropout = 0.5
self.leaky_relu = 1
args = Args()
self.model = DGCNN_original(args, output_channels=num_classes)
else:
assert False
def forward(self, pc, cls=None):
pc = pc.to(next(self.parameters()).device)
pc = pc.permute(0, 2, 1).contiguous()
if self.task == 'cls':
assert cls is None
logit = self.model(pc)
out = {'logit': logit}
else:
assert False
return out
| 27.25
| 74
| 0.534862
| 937
| 0.859633
| 0
| 0
| 0
| 0
| 0
| 0
| 36
| 0.033028
|
a2fcc2dcdf1e51df954863eb81bc306011453b3d
| 283
|
py
|
Python
|
atcoder/arc/a036.py
|
tomato-300yen/coding
|
db6f440a96d8c83f486005c650461a69f27e3926
|
[
"MIT"
] | null | null | null |
atcoder/arc/a036.py
|
tomato-300yen/coding
|
db6f440a96d8c83f486005c650461a69f27e3926
|
[
"MIT"
] | null | null | null |
atcoder/arc/a036.py
|
tomato-300yen/coding
|
db6f440a96d8c83f486005c650461a69f27e3926
|
[
"MIT"
] | null | null | null |
from collections import deque
N, K = map(int, input().split())
T = [int(input()) for _ in range(N)]
ans_dq = deque([0, 0, 0])
for i, t in enumerate(T):
ans_dq.append(t)
ans_dq.popleft()
if sum(ans_dq) < K and i > 1:
print(i + 1)
break
else:
print(-1)
| 21.769231
| 36
| 0.568905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
a2fcecf1decf4817a91d5d880a0ea9320b043380
| 238
|
py
|
Python
|
Python/Curos_Python_curemvid/Exercicios_dos_videos/Ex029.py
|
Jhonattan-rocha/Meus-primeiros-programas
|
f5971b66c0afd049b5d0493e8b7a116b391d058e
|
[
"MIT"
] | null | null | null |
Python/Curos_Python_curemvid/Exercicios_dos_videos/Ex029.py
|
Jhonattan-rocha/Meus-primeiros-programas
|
f5971b66c0afd049b5d0493e8b7a116b391d058e
|
[
"MIT"
] | null | null | null |
Python/Curos_Python_curemvid/Exercicios_dos_videos/Ex029.py
|
Jhonattan-rocha/Meus-primeiros-programas
|
f5971b66c0afd049b5d0493e8b7a116b391d058e
|
[
"MIT"
] | null | null | null |
velocidade = float(input("Digite a sua velocidade em Km/h: "))
if velocidade > 80:
amais = velocidade - 80
amais = amais*7
print("Você foi multado, devera pagar uma multa de: R${:.2f}".format(amais))
print("FIM, não se mate")
| 34
| 80
| 0.663866
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 110
| 0.458333
|
a2fdf1816d77bc5926536585a5ffc8b6a4ac1f23
| 3,746
|
py
|
Python
|
research/radar-communication/dqn_agent.py
|
hieunq95/keras-rl
|
d965ea951220b5ede5ea1e11fab7d7eb45a8c2c5
|
[
"MIT"
] | null | null | null |
research/radar-communication/dqn_agent.py
|
hieunq95/keras-rl
|
d965ea951220b5ede5ea1e11fab7d7eb45a8c2c5
|
[
"MIT"
] | null | null | null |
research/radar-communication/dqn_agent.py
|
hieunq95/keras-rl
|
d965ea951220b5ede5ea1e11fab7d7eb45a8c2c5
|
[
"MIT"
] | null | null | null |
import numpy as np
import gym
import argparse
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Convolution2D
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import LinearAnnealedPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory
from rl.callbacks import FileLogger, ModelIntervalCheckpoint
from environment import AV_Environment
from config import test_parameters, transition_probability, unexpected_ev_prob, state_space_size, action_space_size
from logger import Logger
from AV_Processor import AVProcessor
TEST_ID = test_parameters['test_id']
NB_STEPS = test_parameters['nb_steps']
EPSILON_LINEAR_STEPS = test_parameters['nb_epsilon_linear']
TARGET_MODEL_UPDATE = test_parameters['target_model_update']
GAMMA = test_parameters['gamma']
# ALPHA = test_parameters['alpha']
ALPHA = 0.001
DOUBLE_DQN = False
parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices=['train', 'test'], default='train')
parser.add_argument('--env-name', type=str, default='AV_Radar-v1')
parser.add_argument('--weights', type=str, default=None)
args = parser.parse_args()
env = AV_Environment()
nb_actions = env.action_space.n
# policy = LinearAnnealedPolicy(EpsGreedyQPolicy(), attr='eps', value_max=1., value_min=.1, value_test=.05,
# nb_steps=EPSILON_LINEAR_STEPS)
policy = EpsGreedyQPolicy(eps=.1)
processor = AVProcessor(env)
memory = SequentialMemory(limit=50000, window_length=1)
model = Sequential()
model.add(Flatten(input_shape=(1,) + env.observation_space.nvec.shape))
model.add(Dense(32, activation='relu'))
model.add(Dense(32, activation='relu'))
model.add(Dense(nb_actions, activation='linear'))
print(model.summary())
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=100,
target_model_update=TARGET_MODEL_UPDATE, policy=policy, processor=processor,
enable_double_dqn=DOUBLE_DQN, gamma=GAMMA)
dqn.compile(Adam(lr=ALPHA), metrics=['mae'])
processor.add_agent(dqn)
print('********************* Start {}DQN - test-id: {} ***********************'.
format('DOUBLE-' if DOUBLE_DQN else '', TEST_ID))
print('************************************************************************** \n '
'**************************** Simulation parameters*********************** \n'
'{} \n {} \n {} \n {} \n {} \n'.format(transition_probability, unexpected_ev_prob, state_space_size,
action_space_size, test_parameters)
+ '*************************************************************************** \n')
if args.mode == 'train':
weights_filename = './logs/dqn_{}_weights_{}.h5f'.format(args.env_name, TEST_ID)
checkpoint_weights_filename = './logs/dqn_' + args.env_name + '_weights_{step}.h5f'
log_filename = './logs/{}dqn_{}_log_{}.json'.format('d-' if DOUBLE_DQN else '', args.env_name, TEST_ID)
callbacks = [ModelIntervalCheckpoint(checkpoint_weights_filename, interval=NB_STEPS/2)]
callbacks += [Logger(log_filename, environment=env, interval=100)]
dqn.fit(env, nb_steps=NB_STEPS, visualize=False, verbose=2, nb_max_episode_steps=None, callbacks=callbacks)
dqn.save_weights(weights_filename, overwrite=True)
dqn.test(env, nb_episodes=10, visualize=False)
elif args.mode == 'test':
weights_filename = './logs/dqn_{}_weights_{}.h5f'.format(args.env_name, TEST_ID)
if args.weights:
weights_filename = args.weights
dqn.load_weights(weights_filename)
dqn.test(env, nb_episodes=100, visualize=False)
print("****************************************"
" End of training {}-th "
"****************************************".format(TEST_ID))
| 45.682927
| 115
| 0.67165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 962
| 0.256807
|
a2fe2076a061b4411e718858d451c717a3acc756
| 318
|
py
|
Python
|
Chapter01/displacy-save-as-image-1-4-5.py
|
indrasmartmob/Mastering-spaCy
|
756876902eee8437d6d9ddcef2ba7ffabfc970a3
|
[
"MIT"
] | 76
|
2021-07-07T14:32:42.000Z
|
2022-03-27T17:15:15.000Z
|
Chapter01/displacy-save-as-image-1-4-5.py
|
indrasmartmob/Mastering-spaCy
|
756876902eee8437d6d9ddcef2ba7ffabfc970a3
|
[
"MIT"
] | 4
|
2021-08-18T18:08:23.000Z
|
2022-03-27T03:30:27.000Z
|
Chapter01/displacy-save-as-image-1-4-5.py
|
indrasmartmob/Mastering-spaCy
|
756876902eee8437d6d9ddcef2ba7ffabfc970a3
|
[
"MIT"
] | 38
|
2021-07-09T22:23:38.000Z
|
2022-03-12T07:11:37.000Z
|
#!/usr/bin/env python3
import spacy
from spacy import displacy
from pathlib import Path
nlp = spacy.load("en_core_web_md")
doc = nlp("I'm a butterfly.")
svg = displacy.render(doc, style="dep", jupyter=False)
filename = "butterfly.svg"
output_path = Path(filename)
output_path.open("w", encoding="utf-8").write(svg)
| 22.714286
| 54
| 0.735849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 86
| 0.27044
|
a2fe69feb718bafa1d3ea491a261e3b0356c764f
| 3,485
|
py
|
Python
|
mask_detector/opencv/camera_ver2.py
|
osamhack2021/AI_Mask_Detector
|
1d71980bd7b7168a9d006f03325fb51783c7f877
|
[
"MIT"
] | null | null | null |
mask_detector/opencv/camera_ver2.py
|
osamhack2021/AI_Mask_Detector
|
1d71980bd7b7168a9d006f03325fb51783c7f877
|
[
"MIT"
] | null | null | null |
mask_detector/opencv/camera_ver2.py
|
osamhack2021/AI_Mask_Detector
|
1d71980bd7b7168a9d006f03325fb51783c7f877
|
[
"MIT"
] | 1
|
2021-11-21T08:19:54.000Z
|
2021-11-21T08:19:54.000Z
|
import cv2
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import numpy as np
model = "./AI_Mask_Detector/res10_300x300_ssd_iter_140000_fp16.caffemodel"
config = "./AI_Mask_Detector/deploy.prototxt"
# model = './AI_Mask_Detector/opencv_face_detector_uint8.pb'
# config = './AI_Mask_Detector/opencv_face_detector.pbtxt'
mask_model = tf.keras.models.load_model("./AI_Mask_Detector/model.h5")
probability_model = tf.keras.Sequential([mask_model])
width = 64
height = 64
# cap = cv2.VideoCapture(0)
cap = cv2.VideoCapture("./AI_Mask_Detector/demoVideo/test2.mp4")
if not cap.isOpened():
print("Camera open failed!")
exit()
net = cv2.dnn.readNet(model, config)
if net.empty():
print("Net open failed!")
exit()
categories = ["mask", "none"]
print("len(categories) = ", len(categories))
while True:
ret, frame = cap.read()
if ret:
img = cv2.cvtColor(frame, code=cv2.COLOR_BGR2RGB)
blob = cv2.dnn.blobFromImage(img, 1, (300, 300), (104, 177, 123))
net.setInput(blob)
detect = net.forward()
detect = detect[0, 0, :, :]
(h, w) = frame.shape[:2]
# print('--------------------------')
for i in range(detect.shape[0]):
confidence = detect[i, 2]
if confidence < 0.4:
break
x1 = int(detect[i, 3] * w)
y1 = int(detect[i, 4] * h)
x2 = int(detect[i, 5] * w)
y2 = int(detect[i, 6] * h)
# cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0))
margin = 0
face = img[y1 - margin : y2 + margin, x1 - margin : x2 + margin]
resize = cv2.resize(face, (width, height))
# print(x1, y1, x2, y2, width, height)
# cv2.imshow("frame1", resize)
# np_image_data = np.asarray(inp)
rgb_tensor = tf.convert_to_tensor(resize, dtype=tf.float32)
rgb_tensor /= 255.0
rgb_tensor = tf.expand_dims(rgb_tensor, 0)
# 예측
predictions = probability_model.predict(rgb_tensor)
# print(categories[predictions[i][1]], ' ' , np.argmax(predictions[i]))
# lebel = categories[predictions[i]]
if predictions[0][0] > predictions[0][1]: # and predictions[0][0] > 0.7:
label = "Mask " + str(predictions[0][0])
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0))
cv2.putText(
frame,
label,
(x1, y1 - 1),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 255, 0),
1,
cv2.LINE_AA,
)
if predictions[0][0] < predictions[0][1]: # and predictions[0][1] > 0.7:
label = "No Mask " + str(predictions[0][1])
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 0, 255))
cv2.putText(
frame,
label,
(x1, y1 - 1),
cv2.FONT_HERSHEY_SIMPLEX,
0.5,
(0, 0, 255),
1,
cv2.LINE_AA,
)
# print(predictions[0][0], ' ', predictions[0][1])
cv2.imshow("frame", frame)
if cv2.waitKey(30) == 27:
break
else:
print("error")
cap.release()
cv2.destroyAllWindows()
| 29.786325
| 85
| 0.505595
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 839
| 0.24047
|
a2ff595beb35cc3bf63e8eee3f852f028caee135
| 55,499
|
py
|
Python
|
pipelines/head-pose-pipeline/training/models.py
|
tonouchi510/kfp-project
|
67b78ae53cc3de594b8254999a4f553a8d5cec27
|
[
"MIT"
] | null | null | null |
pipelines/head-pose-pipeline/training/models.py
|
tonouchi510/kfp-project
|
67b78ae53cc3de594b8254999a4f553a8d5cec27
|
[
"MIT"
] | null | null | null |
pipelines/head-pose-pipeline/training/models.py
|
tonouchi510/kfp-project
|
67b78ae53cc3de594b8254999a4f553a8d5cec27
|
[
"MIT"
] | null | null | null |
import sys
import logging
import numpy as np
import tensorflow as tf
from tensorflow.keras import backend as K
from capsulelayers import CapsuleLayer
from capsulelayers import MatMulLayer
from loupe_keras import NetVLAD
sys.setrecursionlimit(2**20)
np.random.seed(2**10)
# Custom layers
# Note - Usage of Lambda layers prevent the convertion
# and the optimizations by the underlying math engine (tensorflow in this case)
class SSRLayer(tf.keras.layers.Layer):
def __init__(self, s1, s2, s3, lambda_d, **kwargs):
super(SSRLayer, self).__init__(**kwargs)
self.s1 = s1
self.s2 = s2
self.s3 = s3
self.lambda_d = lambda_d
self.trainable = False
def call(self, inputs):
x = inputs
a = x[0][:, :, 0] * 0
b = x[0][:, :, 0] * 0
c = x[0][:, :, 0] * 0
s1 = self.s1
s2 = self.s2
s3 = self.s3
lambda_d = self.lambda_d
di = s1 // 2
dj = s2 // 2
dk = s3 // 2
V = 99
for i in range(0, s1):
a = a + (i - di + x[6]) * x[0][:, :, i]
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j - dj + x[7]) * x[1][:, :, j]
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k - dk + x[8]) * x[2][:, :, k]
c = c / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4])) / (s3 * (1 + lambda_d * x[5]))
pred = (a + b + c) * V
return pred
def compute_output_shape(self, input_shape):
return (input_shape[0], 3)
def get_config(self):
config = {
"s1": self.s1,
"s2": self.s2,
"s3": self.s3,
"lambda_d": self.lambda_d,
}
base_config = super(SSRLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class FeatSliceLayer(tf.keras.layers.Layer):
def __init__(self, start_index, end_index, **kwargs):
super(FeatSliceLayer, self).__init__(**kwargs)
self.start_index = start_index
self.end_index = end_index
self.trainable = False
def call(self, inputs):
return inputs[:, self.start_index:self.end_index]
def compute_output_shape(self, input_shape):
return (input_shape[0], self.end_index - self.start_index)
def get_config(self):
config = {"start_index": self.start_index, "end_index": self.end_index}
base_config = super(FeatSliceLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class MomentsLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(MomentsLayer, self).__init__(**kwargs)
self.trainable = False
def call(self, inputs):
_, var = tf.nn.moments(inputs, axes=-1)
return var
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[-1])
class MatrixMultiplyLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(MatrixMultiplyLayer, self).__init__(**kwargs)
self.trainable = False
def call(self, inputs):
x1, x2 = inputs
# TODO: add some asserts on the inputs
# it is expected the shape of inputs are
# arranged to be able to perform the matrix multiplication
return tf.matmul(x1, x2)
def compute_output_shape(self, input_shapes):
return (input_shapes[0][0], input_shapes[0][1], input_shapes[1][-1])
class MatrixNormLayer(tf.keras.layers.Layer):
def __init__(self, tile_count, **kwargs):
super(MatrixNormLayer, self).__init__(**kwargs)
self.trainable = False
self.tile_count = tile_count
def call(self, input):
sum = K.sum(input, axis=-1, keepdims=True)
tiled = K.tile(sum, (1, 1, self.tile_count))
return tiled
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[1], self.tile_count)
def get_config(self):
config = {"tile_count": self.tile_count}
base_config = super(MatrixNormLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class PrimCapsLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(PrimCapsLayer, self).__init__(**kwargs)
self.trainable = False
def call(self, inputs):
x1, x2, norm = inputs
return tf.matmul(x1, x2) / norm
def compute_output_shape(self, input_shapes):
return input_shapes[-1]
class AggregatedFeatureExtractionLayer(tf.keras.layers.Layer):
def __init__(self, num_capsule, **kwargs):
super(AggregatedFeatureExtractionLayer, self).__init__(**kwargs)
self.trainable = False
self.num_capsule = num_capsule
def call(self, input):
s1_a = 0
s1_b = self.num_capsule // 3
feat_s1_div = input[:, s1_a:s1_b, :]
s2_a = self.num_capsule // 3
s2_b = 2 * self.num_capsule // 3
feat_s2_div = input[:, s2_a:s2_b, :]
s3_a = 2 * self.num_capsule // 3
s3_b = self.num_capsule
feat_s3_div = input[:, s3_a:s3_b, :]
return [feat_s1_div, feat_s2_div, feat_s3_div]
def compute_output_shape(self, input_shape):
last_dim = input_shape[-1]
partition = self.num_capsule // 3
return [
(input_shape[0], partition, last_dim),
(input_shape[0], partition, last_dim),
(input_shape[0], partition, last_dim),
]
def get_config(self):
config = {"num_capsule": self.num_capsule}
base_config = super(AggregatedFeatureExtractionLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class BaseFSANet(object):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
self._channel_axis = 3 if K.image_data_format() == "channels_last" else 1
if self._channel_axis == 1:
logging.debug("image_dim_ordering = 'th'")
self._input_shape = (3, image_size, image_size)
else:
logging.debug("image_dim_ordering = 'tf'")
self._input_shape = (image_size, image_size, 3)
self.num_classes = num_classes
self.stage_num = stage_num
self.lambda_d = lambda_d
self.num_capsule = S_set[0]
self.dim_capsule = S_set[1]
self.routings = S_set[2]
self.num_primcaps = S_set[3]
self.m_dim = S_set[4]
self.F_shape = int(self.num_capsule / 3) * self.dim_capsule
self.map_xy_size = int(8 * image_size / 64)
self.is_fc_model = False
self.is_noS_model = False
self.is_varS_model = False
def _convBlock(self, x, num_filters, activation, kernel_size=(3, 3)):
x = tf.keras.layers.SeparableConv2D(num_filters, kernel_size, padding="same")(x)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x = tf.keras.layers.Activation(activation)(x)
return x
def ssr_G_model_build(self, img_inputs):
# -------------------------------------------------------------------------------------------------------------------------
x = self._convBlock(img_inputs, num_filters=16, activation="relu")
x_layer1 = tf.keras.layers.AveragePooling2D((2, 2))(x)
x = self._convBlock(x_layer1, num_filters=32, activation="relu")
x = self._convBlock(x, num_filters=32, activation="relu")
x_layer2 = tf.keras.layers.AveragePooling2D((2, 2))(x)
x = self._convBlock(x_layer2, num_filters=64, activation="relu")
x = self._convBlock(x, num_filters=64, activation="relu")
x_layer3 = tf.keras.layers.AveragePooling2D((2, 2))(x)
x = self._convBlock(x_layer3, num_filters=128, activation="relu")
x_layer4 = self._convBlock(x, num_filters=128, activation="relu")
# -------------------------------------------------------------------------------------------------------------------------
s = self._convBlock(img_inputs, num_filters=16, activation="tanh")
s_layer1 = tf.keras.layers.MaxPooling2D((2, 2))(s)
s = self._convBlock(s_layer1, num_filters=32, activation="tanh")
s = self._convBlock(s, num_filters=32, activation="tanh")
s_layer2 = tf.keras.layers.MaxPooling2D((2, 2))(s)
s = self._convBlock(s_layer2, num_filters=64, activation="tanh")
s = self._convBlock(s, num_filters=64, activation="tanh")
s_layer3 = tf.keras.layers.MaxPooling2D((2, 2))(s)
s = self._convBlock(s_layer3, num_filters=128, activation="tanh")
s_layer4 = self._convBlock(s, num_filters=128, activation="tanh")
# -------------------------------------------------------------------------------------------------------------------------
s_layer4 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer4)
x_layer4 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer4)
feat_s1_pre = tf.keras.layers.Multiply()([s_layer4, x_layer4])
# -------------------------------------------------------------------------------------------------------------------------
s_layer3 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer3)
x_layer3 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer3)
feat_s2_pre = tf.keras.layers.Multiply()([s_layer3, x_layer3])
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer2)
x_layer2 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer2)
feat_s3_pre = tf.keras.layers.Multiply()([s_layer2, x_layer2])
# -------------------------------------------------------------------------------------------------------------------------
# Spatial Pyramid Pooling
# feat_s1_pre = SpatialPyramidPooling([1, 2, 4],'average')(feat_s1_pre)
# feat_s2_pre = SpatialPyramidPooling([1, 2, 4],'average')(feat_s2_pre)
# feat_s3_pre = SpatialPyramidPooling([1, 2, 4],'average')(feat_s3_pre)
# feat_s1_pre = Globaltf.keras.layers.AveragePooling2D()(feat_s1_pre)
# feat_s2_pre = Globaltf.keras.layers.AveragePooling2D()(feat_s2_pre)
feat_s3_pre = tf.keras.layers.AveragePooling2D((2, 2))(
feat_s3_pre
) # make sure (8x8x64) feature maps
return tf.keras.models.Model(
inputs=img_inputs,
outputs=[feat_s1_pre, feat_s2_pre, feat_s3_pre],
name="ssr_G_model",
)
def ssr_F_model_build(self, feat_dim, name_F):
input_s1_pre = tf.keras.layers.Input((feat_dim,))
input_s2_pre = tf.keras.layers.Input((feat_dim,))
input_s3_pre = tf.keras.layers.Input((feat_dim,))
def _process_input(stage_index, stage_num, num_classes, input_s_pre):
feat_delta_s = FeatSliceLayer(0, 4)(input_s_pre)
delta_s = tf.keras.layers.Dense(
num_classes, activation="tanh", name=f"delta_s{stage_index}"
)(feat_delta_s)
feat_local_s = FeatSliceLayer(4, 8)(input_s_pre)
local_s = tf.keras.layers.Dense(
units=num_classes,
activation="tanh",
name=f"local_delta_stage{stage_index}",
)(feat_local_s)
feat_pred_s = FeatSliceLayer(8, 16)(input_s_pre)
feat_pred_s = tf.keras.layers.Dense(
stage_num * num_classes, activation="relu"
)(feat_pred_s)
pred_s = tf.keras.layers.Reshape((num_classes, stage_num))(feat_pred_s)
return delta_s, local_s, pred_s
delta_s1, local_s1, pred_s1 = _process_input(
1, self.stage_num[0], self.num_classes, input_s1_pre
)
delta_s2, local_s2, pred_s2 = _process_input(
2, self.stage_num[1], self.num_classes, input_s2_pre
)
delta_s3, local_s3, pred_s3 = _process_input(
3, self.stage_num[2], self.num_classes, input_s3_pre
)
return tf.keras.models.Model(
inputs=[input_s1_pre, input_s2_pre, input_s3_pre],
outputs=[
pred_s1,
pred_s2,
pred_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
],
name=name_F,
)
def ssr_FC_model_build(self, feat_dim, name_F):
input_s1_pre = tf.keras.layers.Input((feat_dim,))
input_s2_pre = tf.keras.layers.Input((feat_dim,))
input_s3_pre = tf.keras.layers.Input((feat_dim,))
def _process_input(stage_index, stage_num, num_classes, input_s_pre):
feat_delta_s = tf.keras.layers.Dense(2 * num_classes, activation="tanh")(
input_s_pre
)
delta_s = tf.keras.layers.Dense(
num_classes, activation="tanh", name=f"delta_s{stage_index}"
)(feat_delta_s)
feat_local_s = tf.keras.layers.Dense(2 * num_classes, activation="tanh")(
input_s_pre
)
local_s = tf.keras.layers.Dense(
units=num_classes,
activation="tanh",
name=f"local_delta_stage{stage_index}",
)(feat_local_s)
feat_pred_s = tf.keras.layers.Dense(
stage_num * num_classes, activation="relu"
)(input_s_pre)
pred_s = tf.keras.layers.Reshape((num_classes, stage_num))(feat_pred_s)
return delta_s, local_s, pred_s
delta_s1, local_s1, pred_s1 = _process_input(
1, self.stage_num[0], self.num_classes, input_s1_pre
)
delta_s2, local_s2, pred_s2 = _process_input(
2, self.stage_num[1], self.num_classes, input_s2_pre
)
delta_s3, local_s3, pred_s3 = _process_input(
3, self.stage_num[2], self.num_classes, input_s3_pre
)
return tf.keras.models.Model(
inputs=[input_s1_pre, input_s2_pre, input_s3_pre],
outputs=[
pred_s1,
pred_s2,
pred_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
],
name=name_F,
)
def ssr_feat_S_model_build(self, m_dim):
input_preS = tf.keras.layers.Input((self.map_xy_size, self.map_xy_size, 64))
if self.is_varS_model:
feat_preS = MomentsLayer()(input_preS)
else:
feat_preS = tf.keras.layers.Conv2D(
1, (1, 1), padding="same", activation="sigmoid"
)(input_preS)
feat_preS = tf.keras.layers.Reshape((-1,))(feat_preS)
SR_matrix = tf.keras.layers.Dense(
m_dim * (self.map_xy_size * self.map_xy_size * 3), activation="sigmoid"
)(feat_preS)
SR_matrix = tf.keras.layers.Reshape(
(m_dim, (self.map_xy_size * self.map_xy_size * 3))
)(SR_matrix)
return tf.keras.models.Model(
inputs=input_preS, outputs=[SR_matrix, feat_preS], name="feat_S_model"
)
def ssr_S_model_build(self, num_primcaps, m_dim):
input_s1_preS = tf.keras.layers.Input((self.map_xy_size, self.map_xy_size, 64))
input_s2_preS = tf.keras.layers.Input((self.map_xy_size, self.map_xy_size, 64))
input_s3_preS = tf.keras.layers.Input((self.map_xy_size, self.map_xy_size, 64))
feat_S_model = self.ssr_feat_S_model_build(m_dim)
SR_matrix_s1, feat_s1_preS = feat_S_model(input_s1_preS)
SR_matrix_s2, feat_s2_preS = feat_S_model(input_s2_preS)
SR_matrix_s3, feat_s3_preS = feat_S_model(input_s3_preS)
feat_pre_concat = tf.keras.layers.Concatenate()(
[feat_s1_preS, feat_s2_preS, feat_s3_preS]
)
SL_matrix = tf.keras.layers.Dense(
int(num_primcaps / 3) * m_dim, activation="sigmoid"
)(feat_pre_concat)
SL_matrix = tf.keras.layers.Reshape((int(num_primcaps / 3), m_dim))(SL_matrix)
S_matrix_s1 = MatrixMultiplyLayer(name="S_matrix_s1")([SL_matrix, SR_matrix_s1])
S_matrix_s2 = MatrixMultiplyLayer(name="S_matrix_s2")([SL_matrix, SR_matrix_s2])
S_matrix_s3 = MatrixMultiplyLayer(name="S_matrix_s3")([SL_matrix, SR_matrix_s3])
# Very important!!! Without this training won't converge.
# norm_S_s1 = Lambda(lambda x: K.tile(K.sum(x,axis=-1,keepdims=True),(1,1,64)))(S_matrix_s1)
norm_S_s1 = MatrixNormLayer(tile_count=64)(S_matrix_s1)
norm_S_s2 = MatrixNormLayer(tile_count=64)(S_matrix_s2)
norm_S_s3 = MatrixNormLayer(tile_count=64)(S_matrix_s3)
feat_s1_pre = tf.keras.layers.Reshape(
(self.map_xy_size * self.map_xy_size, 64)
)(input_s1_preS)
feat_s2_pre = tf.keras.layers.Reshape(
(self.map_xy_size * self.map_xy_size, 64)
)(input_s2_preS)
feat_s3_pre = tf.keras.layers.Reshape(
(self.map_xy_size * self.map_xy_size, 64)
)(input_s3_preS)
feat_pre_concat = tf.keras.layers.Concatenate(axis=1)(
[feat_s1_pre, feat_s2_pre, feat_s3_pre]
)
# Warining: don't use keras's 'K.dot'. It is very weird when high dimension is used.
# https://github.com/keras-team/keras/issues/9779
# Make sure 'tf.matmul' is used
# primcaps = Lambda(lambda x: tf.matmul(x[0],x[1])/x[2])([S_matrix,feat_pre_concat, norm_S])
primcaps_s1 = PrimCapsLayer()([S_matrix_s1, feat_pre_concat, norm_S_s1])
primcaps_s2 = PrimCapsLayer()([S_matrix_s2, feat_pre_concat, norm_S_s2])
primcaps_s3 = PrimCapsLayer()([S_matrix_s3, feat_pre_concat, norm_S_s3])
primcaps = tf.keras.layers.Concatenate(axis=1)(
[primcaps_s1, primcaps_s2, primcaps_s3]
)
return tf.keras.models.Model(
inputs=[input_s1_preS, input_s2_preS, input_s3_preS],
outputs=primcaps,
name="ssr_S_model",
)
def ssr_noS_model_build(self, **kwargs):
input_s1_preS = tf.keras.layers.Input((self.map_xy_size, self.map_xy_size, 64))
input_s2_preS = tf.keras.layers.Input((self.map_xy_size, self.map_xy_size, 64))
input_s3_preS = tf.keras.layers.Input((self.map_xy_size, self.map_xy_size, 64))
primcaps_s1 = tf.keras.layers.Reshape(
(self.map_xy_size * self.map_xy_size, 64)
)(input_s1_preS)
primcaps_s2 = tf.keras.layers.Reshape(
(self.map_xy_size * self.map_xy_size, 64)
)(input_s2_preS)
primcaps_s3 = tf.keras.layers.Reshape(
(self.map_xy_size * self.map_xy_size, 64)
)(input_s3_preS)
primcaps = tf.keras.layers.Concatenate(axis=1)(
[primcaps_s1, primcaps_s2, primcaps_s3]
)
return tf.keras.models.Model(
inputs=[input_s1_preS, input_s2_preS, input_s3_preS],
outputs=primcaps,
name="ssr_S_model",
)
def __call__(self):
logging.debug("Creating model...")
img_inputs = tf.keras.layers.Input(self._input_shape)
# Build various models
ssr_G_model = self.ssr_G_model_build(img_inputs)
if self.is_noS_model:
ssr_S_model = self.ssr_noS_model_build()
else:
ssr_S_model = self.ssr_S_model_build(
num_primcaps=self.num_primcaps, m_dim=self.m_dim
)
ssr_aggregation_model = self.ssr_aggregation_model_build(
(self.num_primcaps, 64)
)
if self.is_fc_model:
ssr_F_Cap_model = self.ssr_FC_model_build(self.F_shape, "ssr_F_Cap_model")
else:
ssr_F_Cap_model = self.ssr_F_model_build(self.F_shape, "ssr_F_Cap_model")
# Wire them up
ssr_G_list = ssr_G_model(img_inputs)
ssr_primcaps = ssr_S_model(ssr_G_list)
ssr_Cap_list = ssr_aggregation_model(ssr_primcaps)
ssr_F_Cap_list = ssr_F_Cap_model(ssr_Cap_list)
pred_pose = SSRLayer(
s1=self.stage_num[0],
s2=self.stage_num[1],
s3=self.stage_num[2],
lambda_d=self.lambda_d,
name="pred_pose",
)(ssr_F_Cap_list)
return tf.keras.models.Model(inputs=img_inputs, outputs=pred_pose)
# Capsule FSANetworks
class BaseCapsuleFSANet(BaseFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(BaseCapsuleFSANet, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
def ssr_aggregation_model_build(self, shape_primcaps):
input_primcaps = tf.keras.layers.Input(shape_primcaps)
capsule = CapsuleLayer(
self.num_capsule, self.dim_capsule, routings=self.routings, name="caps"
)(input_primcaps)
feat_s1_div, feat_s2_div, feat_s3_div = AggregatedFeatureExtractionLayer(
num_capsule=self.num_capsule
)(capsule)
feat_s1_div = tf.keras.layers.Reshape((-1,))(feat_s1_div)
feat_s2_div = tf.keras.layers.Reshape((-1,))(feat_s2_div)
feat_s3_div = tf.keras.layers.Reshape((-1,))(feat_s3_div)
return tf.keras.models.Model(
inputs=input_primcaps,
outputs=[feat_s1_div, feat_s2_div, feat_s3_div],
name="ssr_Cap_model",
)
class FSA_net_Capsule(BaseCapsuleFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Capsule, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_varS_model = False
class FSA_net_Var_Capsule(BaseCapsuleFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Var_Capsule, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_varS_model = True
class FSA_net_noS_Capsule(BaseCapsuleFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_noS_Capsule, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_noS_model = True
class FSA_net_Capsule_FC(FSA_net_Capsule):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Capsule_FC, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_fc_model = True
class FSA_net_Var_Capsule_FC(FSA_net_Var_Capsule):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Var_Capsule_FC, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_fc_model = True
class FSA_net_noS_Capsule_FC(FSA_net_noS_Capsule):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_noS_Capsule_FC, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_fc_model = True
# NetVLAD models
class BaseNetVLADFSANet(BaseFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(BaseNetVLADFSANet, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
def ssr_aggregation_model_build(self, shape_primcaps):
input_primcaps = tf.keras.layers.Input(shape_primcaps)
agg_feat = NetVLAD(
feature_size=64,
max_samples=self.num_primcaps,
cluster_size=self.num_capsule,
output_dim=self.num_capsule * self.dim_capsule,
)(input_primcaps)
agg_feat = tf.keras.layers.Reshape((self.num_capsule, self.dim_capsule))(
agg_feat
)
feat_s1_div, feat_s2_div, feat_s3_div = AggregatedFeatureExtractionLayer(
num_capsule=self.num_capsule
)(agg_feat)
feat_s1_div = tf.keras.layers.Reshape((-1,))(feat_s1_div)
feat_s2_div = tf.keras.layers.Reshape((-1,))(feat_s2_div)
feat_s3_div = tf.keras.layers.Reshape((-1,))(feat_s3_div)
return tf.keras.models.Model(
inputs=input_primcaps,
outputs=[feat_s1_div, feat_s2_div, feat_s3_div],
name="ssr_Agg_model",
)
class FSA_net_NetVLAD(BaseNetVLADFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_NetVLAD, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_varS_model = False
class FSA_net_Var_NetVLAD(BaseNetVLADFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Var_NetVLAD, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_varS_model = True
class FSA_net_noS_NetVLAD(BaseNetVLADFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_noS_NetVLAD, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_noS_model = True
class FSA_net_NetVLAD_FC(FSA_net_NetVLAD):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_NetVLAD_FC, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_fc_model = True
class FSA_net_Var_NetVLAD_FC(FSA_net_Var_NetVLAD):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Var_NetVLAD_FC, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_fc_model = True
class FSA_net_noS_NetVLAD_FC(FSA_net_noS_NetVLAD):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_noS_NetVLAD_FC, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_fc_model = True
# // Metric models
class BaseMetricFSANet(BaseFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(BaseMetricFSANet, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
def ssr_aggregation_model_build(self, shape_primcaps):
input_primcaps = tf.keras.layers.Input(shape_primcaps)
metric_feat = MatMulLayer(16, type=1)(input_primcaps)
metric_feat = MatMulLayer(3, type=2)(metric_feat)
feat_s1_div, feat_s2_div, feat_s3_div = AggregatedFeatureExtractionLayer(
num_capsule=self.num_capsule
)(metric_feat)
feat_s1_div = tf.keras.layers.Reshape((-1,))(feat_s1_div)
feat_s2_div = tf.keras.layers.Reshape((-1,))(feat_s2_div)
feat_s3_div = tf.keras.layers.Reshape((-1,))(feat_s3_div)
return tf.keras.models.Model(
inputs=input_primcaps,
outputs=[feat_s1_div, feat_s2_div, feat_s3_div],
name="ssr_Metric_model",
)
class FSA_net_Metric(BaseMetricFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Metric, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_varS_model = False
class FSA_net_Var_Metric(BaseMetricFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_Var_Metric, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_varS_model = True
class FSA_net_noS_Metric(BaseMetricFSANet):
def __init__(self, image_size, num_classes, stage_num, lambda_d, S_set):
super(FSA_net_noS_Metric, self).__init__(
image_size, num_classes, stage_num, lambda_d, S_set
)
self.is_noS_model = True
class SSR_net:
def __init__(self, image_size, stage_num, lambda_local, lambda_d):
self._channel_axis = -1
self._input_shape = (image_size, image_size, 3)
self.stage_num = stage_num
self.lambda_local = lambda_local
self.lambda_d = lambda_d
def __call__(self):
logging.debug("Creating model...")
inputs = tf.keras.layers.Input(shape=self._input_shape)
# -------------------------------------------------------------------------------------------------------------------------
x = tf.keras.layers.Conv2D(32, (3, 3))(inputs)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer1 = tf.keras.layers.AveragePooling2D(2, 2)(x)
x = tf.keras.layers.Conv2D(32, (3, 3))(x_layer1)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer2 = tf.keras.layers.AveragePooling2D(2, 2)(x)
x = tf.keras.layers.Conv2D(32, (3, 3))(x_layer2)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer3 = tf.keras.layers.AveragePooling2D(2, 2)(x)
x = tf.keras.layers.Conv2D(32, (3, 3))(x_layer3)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x = tf.keras.layers.Activation("relu")(x)
# -------------------------------------------------------------------------------------------------------------------------
s = tf.keras.layers.Conv2D(16, (3, 3))(inputs)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer1 = tf.keras.layers.MaxPooling2D(2, 2)(s)
s = tf.keras.layers.Conv2D(16, (3, 3))(s_layer1)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer2 = tf.keras.layers.MaxPooling2D(2, 2)(s)
s = tf.keras.layers.Conv2D(16, (3, 3))(s_layer2)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer3 = tf.keras.layers.MaxPooling2D(2, 2)(s)
s = tf.keras.layers.Conv2D(16, (3, 3))(s_layer3)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s = tf.keras.layers.Activation("tanh")(s)
# -------------------------------------------------------------------------------------------------------------------------
# Classifier block
s_layer4 = tf.keras.layers.Conv2D(10, (1, 1), activation="relu")(s)
s_layer4 = tf.keras.layers.Flatten()(s_layer4)
s_layer4_mix = tf.keras.layers.Dropout(0.2)(s_layer4)
s_layer4_mix = tf.keras.layers.Dense(
units=self.stage_num[0], activation="relu"
)(s_layer4_mix)
x_layer4 = tf.keras.layers.Conv2D(10, (1, 1), activation="relu")(x)
x_layer4 = tf.keras.layers.Flatten()(x_layer4)
x_layer4_mix = tf.keras.layers.Dropout(0.2)(x_layer4)
x_layer4_mix = tf.keras.layers.Dense(
units=self.stage_num[0], activation="relu"
)(x_layer4_mix)
feat_a_s1_pre = tf.keras.layers.Multiply()([s_layer4, x_layer4])
delta_s1 = tf.keras.layers.Dense(1, activation="tanh", name="delta_s1")(
feat_a_s1_pre
)
feat_a_s1 = tf.keras.layers.Multiply()([s_layer4_mix, x_layer4_mix])
feat_a_s1 = tf.keras.layers.Dense(2 * self.stage_num[0], activation="relu")(
feat_a_s1
)
pred_a_s1 = tf.keras.layers.Dense(
units=self.stage_num[0], activation="relu", name="pred_age_stage1"
)(feat_a_s1)
# feat_local_s1 = Lambda(lambda x: x/10)(feat_a_s1)
# feat_a_s1_local = Dropout(0.2)(pred_a_s1)
local_s1 = tf.keras.layers.Dense(
units=self.stage_num[0], activation="tanh", name="local_delta_stage1"
)(feat_a_s1)
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = tf.keras.layers.Conv2D(10, (1, 1), activation="relu")(s_layer2)
s_layer2 = tf.keras.layers.MaxPooling2D(4, 4)(s_layer2)
s_layer2 = tf.keras.layers.Flatten()(s_layer2)
s_layer2_mix = tf.keras.layers.Dropout(0.2)(s_layer2)
s_layer2_mix = tf.keras.layers.Dense(self.stage_num[1], activation="relu")(
s_layer2_mix
)
x_layer2 = tf.keras.layers.Conv2D(10, (1, 1), activation="relu")(x_layer2)
x_layer2 = tf.keras.layers.AveragePooling2D(4, 4)(x_layer2)
x_layer2 = tf.keras.layers.Flatten()(x_layer2)
x_layer2_mix = tf.keras.layers.Dropout(0.2)(x_layer2)
x_layer2_mix = tf.keras.layers.Dense(self.stage_num[1], activation="relu")(
x_layer2_mix
)
feat_a_s2_pre = tf.keras.layers.Multiply()([s_layer2, x_layer2])
delta_s2 = tf.keras.layers.Dense(1, activation="tanh", name="delta_s2")(
feat_a_s2_pre
)
feat_a_s2 = tf.keras.layers.Multiply()([s_layer2_mix, x_layer2_mix])
feat_a_s2 = tf.keras.layers.Dense(2 * self.stage_num[1], activation="relu")(
feat_a_s2
)
pred_a_s2 = tf.keras.layers.Dense(
units=self.stage_num[1], activation="relu", name="pred_age_stage2"
)(feat_a_s2)
# feat_local_s2 = Lambda(lambda x: x/10)(feat_a_s2)
# feat_a_s2_local = Dropout(0.2)(pred_a_s2)
local_s2 = tf.keras.layers.Dense(
units=self.stage_num[1], activation="tanh", name="local_delta_stage2"
)(feat_a_s2)
# -------------------------------------------------------------------------------------------------------------------------
s_layer1 = tf.keras.layers.Conv2D(10, (1, 1), activation="relu")(s_layer1)
s_layer1 = tf.keras.layers.MaxPooling2D(8, 8)(s_layer1)
s_layer1 = tf.keras.layers.Flatten()(s_layer1)
s_layer1_mix = tf.keras.layers.Dropout(0.2)(s_layer1)
s_layer1_mix = tf.keras.layers.Dense(self.stage_num[2], activation="relu")(
s_layer1_mix
)
x_layer1 = tf.keras.layers.Conv2D(10, (1, 1), activation="relu")(x_layer1)
x_layer1 = tf.keras.layers.AveragePooling2D(8, 8)(x_layer1)
x_layer1 = tf.keras.layers.Flatten()(x_layer1)
x_layer1_mix = tf.keras.layers.Dropout(0.2)(x_layer1)
x_layer1_mix = tf.keras.layers.Dense(self.stage_num[2], activation="relu")(
x_layer1_mix
)
feat_a_s3_pre = tf.keras.layers.Multiply()([s_layer1, x_layer1])
delta_s3 = tf.keras.layers.Dense(1, activation="tanh", name="delta_s3")(
feat_a_s3_pre
)
feat_a_s3 = tf.keras.layers.Multiply()([s_layer1_mix, x_layer1_mix])
feat_a_s3 = tf.keras.layers.Dense(2 * self.stage_num[2], activation="relu")(
feat_a_s3
)
pred_a_s3 = tf.keras.layers.Dense(
units=self.stage_num[2], activation="relu", name="pred_age_stage3"
)(feat_a_s3)
# feat_local_s3 = Lambda(lambda x: x/10)(feat_a_s3)
# feat_a_s3_local = Dropout(0.2)(pred_a_s3)
local_s3 = tf.keras.layers.Dense(
units=self.stage_num[2], activation="tanh", name="local_delta_stage3"
)(feat_a_s3)
# -------------------------------------------------------------------------------------------------------------------------
def merge_age(x, s1, s2, s3, lambda_local, lambda_d):
a = x[0][:, 0] * 0
b = x[0][:, 0] * 0
c = x[0][:, 0] * 0
# A = s1 * s2 * s3
V = 101
for i in range(0, s1):
a = a + (i + lambda_local * x[6][:, i]) * x[0][:, i]
a = K.expand_dims(a, -1)
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j + lambda_local * x[7][:, j]) * x[1][:, j]
b = K.expand_dims(b, -1)
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k + lambda_local * x[8][:, k]) * x[2][:, k]
c = K.expand_dims(c, -1)
c = c / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4])) / (s3 * (1 + lambda_d * x[5]))
age = (a + b + c) * V
return age
pred_a = tf.keras.layers.Lambda(
merge_age,
arguments={
"s1": self.stage_num[0],
"s2": self.stage_num[1],
"s3": self.stage_num[2],
"lambda_local": self.lambda_local,
"lambda_d": self.lambda_d,
},
name="pred_a",
)(
[
pred_a_s1,
pred_a_s2,
pred_a_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
]
)
model = tf.keras.models.Model(inputs=inputs, outputs=pred_a)
return model
class SSR_net_MT:
def __init__(self, image_size, num_classes, stage_num, lambda_d):
self._channel_axis = -1
self._input_shape = (image_size, image_size, 3)
self.num_classes = num_classes
self.stage_num = stage_num
self.lambda_d = lambda_d
def __call__(self):
logging.debug("Creating model...")
img_inputs = tf.keras.layers.Input(self._input_shape)
# -------------------------------------------------------------------------------------------------------------------------
x = tf.keras.layers.SeparableConv2D(16, (3, 3), padding="same")(img_inputs)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer1 = tf.keras.layers.AveragePooling2D((2, 2))(x)
x = tf.keras.layers.SeparableConv2D(32, (3, 3), padding="same")(x_layer1)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.SeparableConv2D(32, (3, 3), padding="same")(x)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer2 = tf.keras.layers.AveragePooling2D((2, 2))(x)
x = tf.keras.layers.SeparableConv2D(64, (3, 3), padding="same")(x_layer2)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.SeparableConv2D(64, (3, 3), padding="same")(x)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer3 = tf.keras.layers.AveragePooling2D((2, 2))(x)
x = tf.keras.layers.SeparableConv2D(128, (3, 3), padding="same")(x_layer3)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x = tf.keras.layers.Activation("relu")(x)
x = tf.keras.layers.SeparableConv2D(128, (3, 3), padding="same")(x)
x = tf.keras.layers.BatchNormalization(axis=-1)(x)
x_layer4 = tf.keras.layers.Activation("relu")(x)
# -------------------------------------------------------------------------------------------------------------------------
s = tf.keras.layers.SeparableConv2D(16, (3, 3), padding="same")(img_inputs)
s = tf.keras.layers.BatchNormalization(axis=-1)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer1 = tf.keras.layers.MaxPooling2D((2, 2))(s)
s = tf.keras.layers.SeparableConv2D(32, (3, 3), padding="same")(s_layer1)
s = tf.keras.layers.BatchNormalization(axis=-1)(s)
s = tf.keras.layers.Activation("tanh")(s)
s = tf.keras.layers.SeparableConv2D(32, (3, 3), padding="same")(s)
s = tf.keras.layers.BatchNormalization(axis=-1)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer2 = tf.keras.layers.MaxPooling2D((2, 2))(s)
s = tf.keras.layers.SeparableConv2D(64, (3, 3), padding="same")(s_layer2)
s = tf.keras.layers.BatchNormalization(axis=-1)(s)
s = tf.keras.layers.Activation("tanh")(s)
s = tf.keras.layers.SeparableConv2D(64, (3, 3), padding="same")(s)
s = tf.keras.layers.BatchNormalization(axis=-1)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer3 = tf.keras.layers.MaxPooling2D((2, 2))(s)
s = tf.keras.layers.SeparableConv2D(128, (3, 3), padding="same")(s_layer3)
s = tf.keras.layers.BatchNormalization(axis=-1)(s)
s = tf.keras.layers.Activation("tanh")(s)
s = tf.keras.layers.SeparableConv2D(128, (3, 3), padding="same")(s)
s = tf.keras.layers.BatchNormalization(axis=-1)(s)
s_layer4 = tf.keras.layers.Activation("tanh")(s)
# -------------------------------------------------------------------------------------------------------------------------
# Classifier block
s_layer4 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer4)
s_layer4 = tf.keras.layers.MaxPooling2D((2, 2))(s_layer4)
x_layer4 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer4)
x_layer4 = tf.keras.layers.AveragePooling2D((2, 2))(x_layer4)
feat_s1_pre = tf.keras.layers.Multiply()([s_layer4, x_layer4])
feat_s1_pre = tf.keras.layers.Flatten()(feat_s1_pre)
feat_delta_s1 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
delta_s1 = tf.keras.layers.Dense(
self.num_classes, activation="tanh", name="delta_s1"
)(feat_delta_s1)
feat_local_s1 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
local_s1 = tf.keras.layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage1"
)(feat_local_s1)
feat_pred_s1 = tf.keras.layers.Dense(
self.stage_num[0] * self.num_classes, activation="relu"
)(feat_s1_pre)
pred_a_s1 = tf.keras.layers.Reshape((self.num_classes, self.stage_num[0]))(
feat_pred_s1
)
# -------------------------------------------------------------------------------------------------------------------------
s_layer3 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer3)
s_layer3 = tf.keras.layers.MaxPooling2D((2, 2))(s_layer3)
x_layer3 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer3)
x_layer3 = tf.keras.layers.AveragePooling2D((2, 2))(x_layer3)
feat_s2_pre = tf.keras.layers.Multiply()([s_layer3, x_layer3])
feat_s2_pre = tf.keras.layers.Flatten()(feat_s2_pre)
feat_delta_s2 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
delta_s2 = tf.keras.layers.Dense(
self.num_classes, activation="tanh", name="delta_s2"
)(feat_delta_s2)
feat_local_s2 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
local_s2 = tf.keras.layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage2"
)(feat_local_s2)
feat_pred_s2 = tf.keras.layers.Dense(
self.stage_num[1] * self.num_classes, activation="relu"
)(feat_s2_pre)
pred_a_s2 = tf.keras.layers.Reshape((self.num_classes, self.stage_num[1]))(
feat_pred_s2
)
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer2)
s_layer2 = tf.keras.layers.MaxPooling2D((2, 2))(s_layer2)
x_layer2 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer2)
x_layer2 = tf.keras.layers.AveragePooling2D((2, 2))(x_layer2)
feat_s3_pre = tf.keras.layers.Multiply()([s_layer2, x_layer2])
feat_s3_pre = tf.keras.layers.Flatten()(feat_s3_pre)
feat_delta_s3 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
delta_s3 = tf.keras.layers.Dense(
self.num_classes, activation="tanh", name="delta_s3"
)(feat_delta_s3)
feat_local_s3 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
local_s3 = tf.keras.layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage3"
)(feat_local_s3)
feat_pred_s3 = tf.keras.layers.Dense(
self.stage_num[2] * self.num_classes, activation="relu"
)(feat_s3_pre)
pred_a_s3 = tf.keras.layers.Reshape((self.num_classes, self.stage_num[2]))(
feat_pred_s3
)
# -------------------------------------------------------------------------------------------------------------------------
def SSR_module(x, s1, s2, s3, lambda_d):
a = x[0][:, :, 0] * 0
b = x[0][:, :, 0] * 0
c = x[0][:, :, 0] * 0
di = s1 // 2
dj = s2 // 2
dk = s3 // 2
V = 99
# lambda_d = 0.9
for i in range(0, s1):
a = a + (i - di + x[6]) * x[0][:, :, i]
# a = K.expand_dims(a,-1)
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j - dj + x[7]) * x[1][:, :, j]
# b = K.expand_dims(b,-1)
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k - dk + x[8]) * x[2][:, :, k]
# c = K.expand_dims(c,-1)
c = c / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4])) / (s3 * (1 + lambda_d * x[5]))
pred = (a + b + c) * V
return pred
pred_pose = tf.keras.layers.Lambda(
SSR_module,
arguments={
"s1": self.stage_num[0],
"s2": self.stage_num[1],
"s3": self.stage_num[2],
"lambda_d": self.lambda_d,
},
name="pred_pose",
)(
[
pred_a_s1,
pred_a_s2,
pred_a_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
]
)
model = tf.keras.models.Model(inputs=img_inputs, outputs=pred_pose)
return model
class SSR_net_ori_MT:
def __init__(self, image_size, num_classes, stage_num, lambda_d):
self._channel_axis = -1
self._input_shape = (image_size, image_size, 3)
self.num_classes = num_classes
self.stage_num = stage_num
self.lambda_d = lambda_d
def __call__(self):
logging.debug("Creating model...")
img_inputs = tf.keras.layers.Input(self._input_shape)
# -------------------------------------------------------------------------------------------------------------------------
x = tf.keras.layers.Conv2D(32, (3, 3), padding="same")(img_inputs)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer1 = tf.keras.layers.AveragePooling2D(2, 2)(x)
x = tf.keras.layers.Conv2D(32, (3, 3), padding="same")(x_layer1)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer2 = tf.keras.layers.AveragePooling2D(2, 2)(x)
x = tf.keras.layers.Conv2D(32, (3, 3), padding="same")(x_layer2)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x = tf.keras.layers.Activation("relu")(x)
x_layer3 = tf.keras.layers.AveragePooling2D(2, 2)(x)
x = tf.keras.layers.Conv2D(32, (3, 3), padding="same")(x_layer3)
x = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(x)
x_layer4 = tf.keras.layers.Activation("relu")(x)
# -------------------------------------------------------------------------------------------------------------------------
s = tf.keras.layers.Conv2D(16, (3, 3), padding="same")(img_inputs)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer1 = tf.keras.layers.MaxPooling2D(2, 2)(s)
s = tf.keras.layers.Conv2D(16, (3, 3), padding="same")(s_layer1)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer2 = tf.keras.layers.MaxPooling2D(2, 2)(s)
s = tf.keras.layers.Conv2D(16, (3, 3), padding="same")(s_layer2)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s = tf.keras.layers.Activation("tanh")(s)
s_layer3 = tf.keras.layers.MaxPooling2D(2, 2)(s)
s = tf.keras.layers.Conv2D(16, (3, 3), padding="same")(s_layer3)
s = tf.keras.layers.BatchNormalization(axis=self._channel_axis)(s)
s_layer4 = tf.keras.layers.Activation("tanh")(s)
# -------------------------------------------------------------------------------------------------------------------------
# Classifier block
s_layer4 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer4)
s_layer4 = tf.keras.layers.MaxPooling2D((2, 2))(s_layer4)
x_layer4 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer4)
x_layer4 = tf.keras.layers.AveragePooling2D((2, 2))(x_layer4)
feat_s1_pre = tf.keras.layers.Multiply()([s_layer4, x_layer4])
feat_s1_pre = tf.keras.layers.Flatten()(feat_s1_pre)
feat_delta_s1 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
delta_s1 = tf.keras.layers.Dense(
self.num_classes, activation="tanh", name="delta_s1"
)(feat_delta_s1)
feat_local_s1 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s1_pre
)
local_s1 = tf.keras.layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage1"
)(feat_local_s1)
feat_pred_s1 = tf.keras.layers.Dense(
self.stage_num[0] * self.num_classes, activation="relu"
)(feat_s1_pre)
pred_a_s1 = tf.keras.layers.Reshape((self.num_classes, self.stage_num[0]))(
feat_pred_s1
)
# -------------------------------------------------------------------------------------------------------------------------
s_layer3 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer3)
s_layer3 = tf.keras.layers.MaxPooling2D((2, 2))(s_layer3)
x_layer3 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer3)
x_layer3 = tf.keras.layers.AveragePooling2D((2, 2))(x_layer3)
feat_s2_pre = tf.keras.layers.Multiply()([s_layer3, x_layer3])
feat_s2_pre = tf.keras.layers.Flatten()(feat_s2_pre)
feat_delta_s2 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
delta_s2 = tf.keras.layers.Dense(
self.num_classes, activation="tanh", name="delta_s2"
)(feat_delta_s2)
feat_local_s2 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s2_pre
)
local_s2 = tf.keras.layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage2"
)(feat_local_s2)
feat_pred_s2 = tf.keras.layers.Dense(
self.stage_num[1] * self.num_classes, activation="relu"
)(feat_s2_pre)
pred_a_s2 = tf.keras.layers.Reshape((self.num_classes, self.stage_num[1]))(
feat_pred_s2
)
# -------------------------------------------------------------------------------------------------------------------------
s_layer2 = tf.keras.layers.Conv2D(64, (1, 1), activation="tanh")(s_layer2)
s_layer2 = tf.keras.layers.MaxPooling2D((2, 2))(s_layer2)
x_layer2 = tf.keras.layers.Conv2D(64, (1, 1), activation="relu")(x_layer2)
x_layer2 = tf.keras.layers.AveragePooling2D((2, 2))(x_layer2)
feat_s3_pre = tf.keras.layers.Multiply()([s_layer2, x_layer2])
feat_s3_pre = tf.keras.layers.Flatten()(feat_s3_pre)
feat_delta_s3 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
delta_s3 = tf.keras.layers.Dense(
self.num_classes, activation="tanh", name="delta_s3"
)(feat_delta_s3)
feat_local_s3 = tf.keras.layers.Dense(2 * self.num_classes, activation="tanh")(
feat_s3_pre
)
local_s3 = tf.keras.layers.Dense(
units=self.num_classes, activation="tanh", name="local_delta_stage3"
)(feat_local_s3)
feat_pred_s3 = tf.keras.layers.Dense(
self.stage_num[2] * self.num_classes, activation="relu"
)(feat_s3_pre)
pred_a_s3 = tf.keras.layers.Reshape((self.num_classes, self.stage_num[2]))(
feat_pred_s3
)
# -------------------------------------------------------------------------------------------------------------------------
def SSR_module(x, s1, s2, s3, lambda_d):
a = x[0][:, :, 0] * 0
b = x[0][:, :, 0] * 0
c = x[0][:, :, 0] * 0
di = s1 // 2
dj = s2 // 2
dk = s3 // 2
V = 99
# lambda_d = 0.9
for i in range(0, s1):
a = a + (i - di + x[6]) * x[0][:, :, i]
# a = K.expand_dims(a,-1)
a = a / (s1 * (1 + lambda_d * x[3]))
for j in range(0, s2):
b = b + (j - dj + x[7]) * x[1][:, :, j]
# b = K.expand_dims(b,-1)
b = b / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4]))
for k in range(0, s3):
c = c + (k - dk + x[8]) * x[2][:, :, k]
# c = K.expand_dims(c,-1)
c = c / (s1 * (1 + lambda_d * x[3])) / (s2 * (1 + lambda_d * x[4])) / (s3 * (1 + lambda_d * x[5]))
pred = (a + b + c) * V
return pred
pred_pose = tf.keras.layers.Lambda(
SSR_module,
arguments={
"s1": self.stage_num[0],
"s2": self.stage_num[1],
"s3": self.stage_num[2],
"lambda_d": self.lambda_d,
},
name="pred_pose",
)(
[
pred_a_s1,
pred_a_s2,
pred_a_s3,
delta_s1,
delta_s2,
delta_s3,
local_s1,
local_s2,
local_s3,
]
)
model = tf.keras.models.Model(inputs=img_inputs, outputs=pred_pose)
return model
| 40.658608
| 131
| 0.568479
| 54,923
| 0.989621
| 0
| 0
| 0
| 0
| 0
| 0
| 6,542
| 0.117876
|
0c0064090948d111bf7fd540d7adcc81adb3d655
| 2,537
|
py
|
Python
|
remijquerytools/__init__.py
|
kdahlhaus/remi-jquery-tools
|
3ecc78d6a39edc7a77b89dd8ed08649f759b503a
|
[
"Apache-2.0"
] | null | null | null |
remijquerytools/__init__.py
|
kdahlhaus/remi-jquery-tools
|
3ecc78d6a39edc7a77b89dd8ed08649f759b503a
|
[
"Apache-2.0"
] | null | null | null |
remijquerytools/__init__.py
|
kdahlhaus/remi-jquery-tools
|
3ecc78d6a39edc7a77b89dd8ed08649f759b503a
|
[
"Apache-2.0"
] | null | null | null |
import remi.gui as gui
import os
import logging
log = logging.getLogger('remi.gui.remijquerytools.overlay')
def get_res_path():
""" return addtion to 'res' path for items needed by this lib """
res_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'res')
return res_path
class Overlay(gui.Widget):
@gui.decorate_constructor_parameter_types([dict, ])
def __init__(self, trigger, **kwargs):
self.trigger = trigger
super(Overlay, self).__init__(**kwargs)
def repr(self, client, changed_widgets={}):
""" It is used to automatically represent the object to HTML format
packs all the attributes, children and so on.
Args:
client (App): The client instance.
changed_widgets (dict): A dictionary containing a collection of tags that have to be updated.
The tag that have to be updated is the key, and the value is its textual repr.
"""
self.attributes['style'] = gui.jsonize(self.style)
# ensure overlay class is in the class attibutes once
overlay_class="simple_overlay black"
class_attribute=self.attributes.get('class','')
if not overlay_class in class_attribute:
class_attribute += " " + overlay_class
self.attributes['class'] = class_attribute
attribute_string = ' '.join('%s="%s"' %
(k, v) if v is not None else k for k, v in
self.attributes.items())
trigger_id = self.trigger.attributes["id"]
overlay_id = self.attributes["id"]
html = '<div %s>'%(attribute_string)
local_changed_widgets = {}
innerHTML = ''
for k in self._render_children_list:
s = self.children[k]
if isinstance(s, type('')):
innerHTML = innerHTML + s
elif isinstance(s, type(u'')):
innerHTML = innerHTML + s.encode('utf-8')
else:
try:
innerHTML = innerHTML + s.repr(client, local_changed_widgets)
except AttributeError:
innerHTML = innerHTML + repr(s)
html += innerHTML
html += '</div>'
html += """
<script>
$(document).ready(function(){{
var dt = $('#{trigger_id}').overlay({{target:'#{overlay_id}'}})
}});
</script>""".format(trigger_id=trigger_id, overlay_id=overlay_id)
log.debug('overlay html:%s', html)
return html
| 32.948052
| 109
| 0.581395
| 2,234
| 0.880568
| 0
| 0
| 173
| 0.068191
| 0
| 0
| 867
| 0.341742
|
0c01e08aaee863025867488824fa6692ef88b661
| 468
|
py
|
Python
|
Python_Advanced_Softuni/Comprehensions_Exericises/venv/number_classification.py
|
borisboychev/SoftUni
|
22062312f08e29a1d85377a6d41ef74966d37e99
|
[
"MIT"
] | 1
|
2020-12-14T23:25:19.000Z
|
2020-12-14T23:25:19.000Z
|
Python_Advanced_Softuni/Comprehensions_Exericises/venv/number_classification.py
|
borisboychev/SoftUni
|
22062312f08e29a1d85377a6d41ef74966d37e99
|
[
"MIT"
] | null | null | null |
Python_Advanced_Softuni/Comprehensions_Exericises/venv/number_classification.py
|
borisboychev/SoftUni
|
22062312f08e29a1d85377a6d41ef74966d37e99
|
[
"MIT"
] | null | null | null |
elements = [int(x) for x in input().split(', ')]
even_numbers = [x for x in elements if x % 2 == 0]
odd_numbers = [x for x in elements if x % 2 != 0]
positive = [x for x in elements if x >= 0]
negative = [x for x in elements if x < 0]
print(f"Positive: {', '.join(str(x) for x in positive)}")
print(f"Negative: {', '.join(str(x) for x in negative)}")
print(f"Even: {', '.join(str(x) for x in even_numbers)}")
print(f"Odd: {', '.join(str(x) for x in odd_numbers)}")
| 36
| 57
| 0.613248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 202
| 0.431624
|
0c02d2fcd975ca2fafbae393016b1ddc2ddcf6b5
| 2,048
|
py
|
Python
|
src/probnum/type.py
|
ralfrost/probnum
|
6b0988009a9dd7ecda87ba28c9d5c0b8019981b6
|
[
"MIT"
] | null | null | null |
src/probnum/type.py
|
ralfrost/probnum
|
6b0988009a9dd7ecda87ba28c9d5c0b8019981b6
|
[
"MIT"
] | 2
|
2020-12-28T19:37:16.000Z
|
2020-12-28T19:37:31.000Z
|
src/probnum/type.py
|
admdev8/probnum
|
792b6299bac247cf8b1b5056756f0f078855d83a
|
[
"MIT"
] | null | null | null |
import numbers
from typing import Iterable, Tuple, Union
import numpy as np
########################################################################################
# API Types
########################################################################################
ShapeType = Tuple[int, ...]
RandomStateType = Union[np.random.RandomState, np.random.Generator]
"""Type of a random number generator."""
########################################################################################
# Argument Types
########################################################################################
IntArgType = Union[int, numbers.Integral, np.integer]
FloatArgType = Union[float, numbers.Real, np.floating]
ShapeArgType = Union[IntArgType, Iterable[IntArgType]]
"""Type of a public API argument for supplying a shape. Values of this type should
always be converted into :class:`ShapeType` using the function
:func:`probnum.utils.as_shape` before further internal processing."""
DTypeArgType = Union[np.dtype, str]
"""Type of a public API argument for supplying a dtype. Values of this type should
always be converted into :class:`np.dtype` using the function
:func:`np.dtype` before further internal processing."""
ScalarArgType = Union[int, float, complex, numbers.Number, np.float_]
"""Type of a public API argument for supplying a scalar value. Values of this type
should always be converted into :class:`np.generic` using the function
:func:`probnum.utils.as_scalar` before further internal processing."""
ArrayLikeGetitemArgType = Union[
int,
slice,
np.ndarray,
np.newaxis,
None,
type(Ellipsis),
Tuple[Union[int, slice, np.ndarray, np.newaxis, None, type(Ellipsis)], ...],
]
RandomStateArgType = Union[None, int, np.random.RandomState, np.random.Generator]
"""Type of a public API argument for supplying a random number generator. Values of this
type should always be converted into :class:`RandomStateType` using the function
:func:`probnum.utils.as_random_state` before further internal processing."""
| 40.156863
| 88
| 0.626953
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,304
| 0.636719
|
0c03aa3f4a41bc42ddd522aaf547cfa062e47c23
| 12,279
|
py
|
Python
|
src/socialprofile/views.py
|
DLRSP/django-sp
|
9079358a4fc054f1a5afb056ccfd6a8b8afb36fa
|
[
"MIT"
] | 1
|
2022-01-11T07:25:17.000Z
|
2022-01-11T07:25:17.000Z
|
src/socialprofile/views.py
|
DLRSP/django-sp
|
9079358a4fc054f1a5afb056ccfd6a8b8afb36fa
|
[
"MIT"
] | 16
|
2021-12-20T01:30:34.000Z
|
2022-03-31T01:38:59.000Z
|
src/socialprofile/views.py
|
DLRSP/django-sp
|
9079358a4fc054f1a5afb056ccfd6a8b8afb36fa
|
[
"MIT"
] | null | null | null |
"""Django Views for the socialprofile module"""
import json
import logging
import sweetify
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import REDIRECT_FIELD_NAME, login
from django.contrib.auth import logout as auth_logout
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.http import Http404, HttpResponse, HttpResponseBadRequest
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
from django.views.generic import DeleteView, TemplateView, UpdateView
from oauth2_provider.contrib.rest_framework import TokenHasReadWriteScope
from rest_framework import permissions, viewsets
from social_core.backends.oauth import BaseOAuth1, BaseOAuth2
from social_core.backends.utils import load_backends
from social_django.utils import psa
from .decorators import render_to
from .forms import SocialProfileForm
from .models import SocialProfile
# from .serializers import SocialProfileSerializer, GroupSerializer
from .serializers import SocialProfileSerializer
LOGGER = logging.getLogger(name="socialprofile.views")
DEFAULT_RETURNTO_PATH = getattr(settings, "DEFAULT_RETURNTO_PATH", "/")
# ViewSets define the view behavior.
class SocialProfileViewSet(viewsets.ModelViewSet):
"""Serialize SocialProfiles"""
permission_classes = [permissions.IsAuthenticated, TokenHasReadWriteScope]
queryset = SocialProfile.objects.all()
serializer_class = SocialProfileSerializer
# class GroupViewSet(viewsets.ModelViewSet):
# """Serialize Groups"""
# permission_classes = [permissions.IsAuthenticated, TokenHasScope]
# required_scopes = ['groups']
# queryset = Group.objects.all()
# serializer_class = GroupSerializer
def logout(request):
"""Logs out user"""
auth_logout(request)
return redirect("sp_select_page")
def context(**extra):
return dict(
{
# "plus_id": getattr(settings, "SOCIAL_AUTH_GOOGLE_PLUS_KEY", None),
# "plus_scope": " ".join(GooglePlusAuth.DEFAULT_SCOPE),
"available_backends": load_backends(settings.AUTHENTICATION_BACKENDS),
},
**extra,
)
@render_to("socialprofile/sp_account_select.html")
def home(request):
"""Home view, displays login mechanism"""
if request.user.is_authenticated:
return redirect("done")
return context()
@login_required
@render_to("socialprofile/sp_account_select.html")
def done(request):
"""Login complete view, displays user data"""
if request.user.is_authenticated:
return context()
return redirect("sp_select_page")
@psa("social:complete")
def ajax_auth(request, backend):
if isinstance(request.backend, BaseOAuth1):
token = {
"oauth_token": request.REQUEST.get("access_token"),
"oauth_token_secret": request.REQUEST.get("access_token_secret"),
}
elif isinstance(request.backend, BaseOAuth2):
token = request.REQUEST.get("access_token")
else:
raise HttpResponseBadRequest(_("Wrong backend type"))
user = request.backend.do_auth(token, ajax=True)
login(request, user)
data = {"id": user.id, "username": user.username}
return HttpResponse(json.dumps(data), mimetype="application/json")
class SelectAuthView(TemplateView):
"""
Lets users choose how they want to request access.
url: /select
"""
template_name = "socialprofile/sp_account_select.html"
def get_context_data(self, **kwargs):
"""Ensure that 'next' gets passed along"""
LOGGER.debug("socialprofile.views.SelectAuthView.get_context_data")
next_url = self.request.GET.get(REDIRECT_FIELD_NAME, DEFAULT_RETURNTO_PATH)
context = super().get_context_data(**kwargs)
context["next_param"] = REDIRECT_FIELD_NAME
context["next_url"] = next_url
# context["plus_id"] = getattr(settings, "SOCIAL_AUTH_GOOGLE_PLUS_KEY", None)
# context["plus_scope"] = " ".join(GooglePlusAuth.DEFAULT_SCOPE)
context["available_backends"] = load_backends(settings.AUTHENTICATION_BACKENDS)
return context
class SocialProfileWelcome(TemplateView):
"""
New Profile Page
url: /sp/new-profile
"""
template_name = "socialprofile/sp_new_profile.html"
# class SocialProfileView(DetailView):
class SocialProfileView(TemplateView):
"""
Profile View Page
url: /sp/view
"""
model = SocialProfile
template_name = "socialprofile/sp_profile_view.html"
http_method_names = {"get"}
def get_context_data(self, **kwargs):
"""Load up the default data to show in the display form."""
username = self.kwargs.get("username")
if username:
try:
user = get_object_or_404(SocialProfile, username=username)
except Exception as e:
try:
user = get_object_or_404(SocialProfile, pk=username)
except Exception as e:
user = get_object_or_404(SocialProfile, pk=self.request.user.pk)
elif self.request.user.is_authenticated:
user = get_object_or_404(SocialProfile, pk=self.request.user.pk)
else:
raise Http404 # Case where user gets to this view anonymously for non-existent user
if self.request.user != user and user.visible:
raise Http404 # Case where user set to be private
return {
"user": user,
"available_backends": load_backends(settings.AUTHENTICATION_BACKENDS),
}
class SocialProfileViewAll(TemplateView):
"""
Profile View Page
url: /sp/view/all
"""
template_name = "socialprofile/sp_profile_view_all.html"
http_method_names = {"get"}
def dispatch(self, request, *args, **kwargs):
if not getattr(
settings,
"SP_VIEW_PUBLIC",
(
getattr(settings, "SP_VIEW_PUBLIC_ONLY_ADMIN", False)
and request.user.is_superuser
),
):
return redirect("sp_profile_view")
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
"""Load up the default data to show in the display form."""
return {
"users": SocialProfile.objects.filter(visible=True),
}
class SocialProfileEditView(UpdateView):
"""
Profile Editing View
url: /sp/edit
"""
model = SocialProfile
template_name = "socialprofile/sp_profile_edit.html"
http_method_names = {"get", "post"}
def get_context_data(self, **kwargs):
"""Load up the default data to show in the display form."""
# user_form = UserForm(request.POST, instance=request.user)
# username = self.kwargs.get("username")
username = self.kwargs.get("pk")
if username:
try:
user = get_object_or_404(SocialProfile, username=username)
except Exception as e:
try:
user = get_object_or_404(SocialProfile, pk=username)
except Exception as e:
user = self.request.user
elif self.request.user.is_authenticated:
user = self.request.user
else:
raise Http404 # Case where user gets to this view anonymously for non-existent user
if user != self.request.user:
raise PermissionDenied()
sp_form = SocialProfileForm(instance=user)
return {
"user": user,
"available_backends": load_backends(settings.AUTHENTICATION_BACKENDS),
"sp_form": sp_form,
}
def post(self, request, *args, **kwargs):
# user_form = UserForm(request.POST, instance=request.user)
return_to = self.request.POST.get("returnTo", DEFAULT_RETURNTO_PATH)
custom_alerts = (
getattr(settings, "SP_ALERT_LIBRARY", "sweetalert") == "sweetalert"
)
# username = self.kwargs.get("username")
username = self.kwargs.get("pk")
if username:
try:
user = get_object_or_404(SocialProfile, username=username)
except Exception as e:
try:
user = get_object_or_404(SocialProfile, pk=username)
except Exception as e:
user = self.request.user
if user != self.request.user:
raise PermissionDenied()
sp_form = SocialProfileForm(request.POST, instance=user)
sp_form.initial["returnTo"] = return_to
if sp_form.is_valid():
try:
sp_form.save()
if custom_alerts:
sweetify.toast(
self.request,
_("Your profile has been updated."),
icon="success",
timer=3000,
)
else:
messages.add_message(
self.request,
messages.SUCCESS,
_("Your profile has been updated."),
)
return self.render_to_response(
{
"success": True,
"user": user,
"available_backends": load_backends(
settings.AUTHENTICATION_BACKENDS
),
"sp_form": sp_form,
}
)
except Exception as e:
if custom_alerts:
sweetify.toast(
self.request,
f"{_('ERROR: Your profile has NOT been updated!')} [{e}]",
icon="error",
timer=3000,
)
else:
messages.add_message(
self.request,
messages.INFO,
f"{_('ERROR: Your profile has NOT been updated!')} [{e}]",
)
return self.render_to_response(
{
"success": False,
"user": user,
"available_backends": load_backends(
settings.AUTHENTICATION_BACKENDS
),
"sp_form": sp_form,
}
)
else:
if custom_alerts:
sweetify.toast(
self.request,
_("Your profile has NOT been updated!"),
icon="error",
timer=3000,
)
# multi = []
# for x, err_msg in enumerate(sp_form.errors):
# multi.append({f"err_mess_{x}": dict(title='Error', icon='warning',
# text=err_msg, toast=True, timer=3000, timerProgressBar='true')})
# # sweetify.toast(
# # self.request,
# # err_msg,
# # icon="warning",
# # timer=3000,
# # )
# if multi:
# sweetify.multiple(request, *multi[0])
else:
messages.add_message(
self.request, messages.INFO, _("Your profile has NOT been updated!")
)
return self.render_to_response(
{
"success": False,
"user": user,
"available_backends": load_backends(
settings.AUTHENTICATION_BACKENDS
),
"sp_form": sp_form,
}
)
class DeleteSocialProfileView(DeleteView):
"""
Account Delete Confirm Modal View
url: /delete
"""
model = SocialProfile
success_url = reverse_lazy("sp_logout_page")
def get_object(self, queryset=None):
"""Get the object that we are going to delete"""
return self.request.user
| 33.186486
| 96
| 0.582784
| 9,092
| 0.740451
| 0
| 0
| 1,093
| 0.089014
| 0
| 0
| 3,493
| 0.284469
|
0c042004c2d10428499c1e729e50d34d388b3eb9
| 519
|
py
|
Python
|
sources/101_test.py
|
Painatalman/python101
|
9727ca03da46f81813fc2d338b8ba22fc0d8b78b
|
[
"Apache-2.0"
] | null | null | null |
sources/101_test.py
|
Painatalman/python101
|
9727ca03da46f81813fc2d338b8ba22fc0d8b78b
|
[
"Apache-2.0"
] | null | null | null |
sources/101_test.py
|
Painatalman/python101
|
9727ca03da46f81813fc2d338b8ba22fc0d8b78b
|
[
"Apache-2.0"
] | null | null | null |
from fruits import validate_fruit
fruits = ["banana", "lemon", "apple", "orange", "batman"]
print fruits
def list_fruits(fruits, byName=True):
if byName:
# WARNING: this won't make a copy of the list and return it. It will change the list FOREVER
fruits.sort()
for index, fruit in enumerate(fruits):
if validate_fruit(fruit):
print "Fruit nr %d is %s" % (index, fruit)
else:
print "This %s is no fruit!" % (fruit)
list_fruits(fruits)
print fruits
| 22.565217
| 100
| 0.628131
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 171
| 0.32948
|
0c04e662d416158f9b46ddaf7846e7bfe2b9fca2
| 3,439
|
py
|
Python
|
tests/test_cms_config.py
|
Aiky30/djangocms-content-expiry
|
da7d348bcdafbf1a9862e4cc69a8363b3305a31a
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_cms_config.py
|
Aiky30/djangocms-content-expiry
|
da7d348bcdafbf1a9862e4cc69a8363b3305a31a
|
[
"BSD-3-Clause"
] | 4
|
2021-09-27T10:15:13.000Z
|
2021-11-23T17:18:04.000Z
|
tests/test_cms_config.py
|
Aiky30/djangocms-content-expiry
|
da7d348bcdafbf1a9862e4cc69a8363b3305a31a
|
[
"BSD-3-Clause"
] | 4
|
2021-09-06T20:13:45.000Z
|
2021-10-02T15:00:58.000Z
|
from unittest.mock import Mock
from django.apps import apps
from django.contrib import admin
from django.test import RequestFactory, TestCase
from djangocms_moderation.cms_config import ModerationExtension
from djangocms_moderation.models import ModerationRequestTreeNode
from djangocms_content_expiry.cms_config import (
ContentExpiryAppConfig,
ContentExpiryExtension,
)
from djangocms_content_expiry.constants import CONTENT_EXPIRY_EXPIRE_FIELD_LABEL
class ModerationConfigDependancyTestCase(TestCase):
def test_moderation_config_admin_controls_exist(self):
"""
Moderation controls are required for the content expiry records to be viewed,
ensure that they exist, a failure here means that the implementation in moderation
may have been changed
"""
moderation_extension = ModerationExtension()
self.assertTrue(hasattr(moderation_extension, "moderation_request_changelist_actions"))
self.assertTrue(hasattr(moderation_extension, "moderation_request_changelist_fields"))
self.assertTrue(
hasattr(moderation_extension, "handle_moderation_request_changelist_actions")
and callable(moderation_extension.handle_moderation_request_changelist_actions)
)
self.assertTrue(
hasattr(moderation_extension, "handle_moderation_request_changelist_fields")
and callable(moderation_extension.handle_moderation_request_changelist_fields)
)
def test_moderation_config_admin_controls_are_compiled_by_moderation(self):
moderation = apps.get_app_config("djangocms_moderation")
content_expiry_actions = ContentExpiryAppConfig.moderation_request_changelist_actions
content_expiry_fields = ContentExpiryAppConfig.moderation_request_changelist_fields
self.assertListEqual(
moderation.cms_extension.moderation_request_changelist_actions,
content_expiry_actions,
)
self.assertListEqual(
moderation.cms_extension.moderation_request_changelist_fields,
content_expiry_fields,
)
def test_moderation_request_contains_added_admin_fields(self):
"""
Ensure that the admin field is added as expected
"""
moderation_admin = admin.site._registry[ModerationRequestTreeNode]
request = RequestFactory().get("/")
list_display = moderation_admin.get_list_display(request)
self.assertIn('get_expiry_date', list_display)
self.assertEqual(CONTENT_EXPIRY_EXPIRE_FIELD_LABEL, moderation_admin.get_expiry_date.short_description)
class ContentExpiryChangelistQueryFilterSettingTestCase(TestCase):
def test_valid_cms_config_parameters(self):
def _dummy_fn(site, queryset):
return queryset
def _another_dummy_fn(site, queryset):
return queryset
extension = ContentExpiryExtension()
app_1_config = Mock(
djangocms_content_expiry_changelist_queryset_filters=[_dummy_fn],
)
extension.configure_app(app_1_config)
app_2_config = Mock(
djangocms_content_expiry_changelist_queryset_filters=[_another_dummy_fn],
)
extension.configure_app(app_2_config)
self.assertTrue(_dummy_fn in extension.expiry_changelist_queryset_filters)
self.assertTrue(_another_dummy_fn in extension.expiry_changelist_queryset_filters)
| 40.458824
| 111
| 0.756325
| 2,969
| 0.863332
| 0
| 0
| 0
| 0
| 0
| 0
| 504
| 0.146554
|
0c0689f206c41c5e5d28c78e11446ccb008b17b1
| 4,466
|
py
|
Python
|
tilequeue/format/OSciMap4/StaticVals/__init__.py
|
ducdk90/tilequeue
|
c664b5c89a9f0e6743405ab266aa9ca80b57806e
|
[
"MIT"
] | 29
|
2016-11-03T18:39:21.000Z
|
2022-02-27T17:42:37.000Z
|
tilequeue/format/OSciMap4/StaticVals/__init__.py
|
ducdk90/tilequeue
|
c664b5c89a9f0e6743405ab266aa9ca80b57806e
|
[
"MIT"
] | 146
|
2016-07-07T16:41:07.000Z
|
2021-12-11T00:27:20.000Z
|
tilequeue/format/OSciMap4/StaticVals/__init__.py
|
ducdk90/tilequeue
|
c664b5c89a9f0e6743405ab266aa9ca80b57806e
|
[
"MIT"
] | 28
|
2016-08-19T16:08:52.000Z
|
2021-07-26T10:16:29.000Z
|
vals = {
"yes" : 0,
"residential" : 1,
"service" : 2,
"unclassified" : 3,
"stream" : 4,
"track" : 5,
"water" : 6,
"footway" : 7,
"tertiary" : 8,
"private" : 9,
"tree" : 10,
"path" : 11,
"forest" : 12,
"secondary" : 13,
"house" : 14,
"no" : 15,
"asphalt" : 16,
"wood" : 17,
"grass" : 18,
"paved" : 19,
"primary" : 20,
"unpaved" : 21,
"bus_stop" : 22,
"parking" : 23,
"parking_aisle" : 24,
"rail" : 25,
"driveway" : 26,
"8" : 27,
"administrative" : 28,
"locality" : 29,
"turning_circle" : 30,
"crossing" : 31,
"village" : 32,
"fence" : 33,
"grade2" : 34,
"coastline" : 35,
"grade3" : 36,
"farmland" : 37,
"hamlet" : 38,
"hut" : 39,
"meadow" : 40,
"wetland" : 41,
"cycleway" : 42,
"river" : 43,
"school" : 44,
"trunk" : 45,
"gravel" : 46,
"place_of_worship" : 47,
"farm" : 48,
"grade1" : 49,
"traffic_signals" : 50,
"wall" : 51,
"garage" : 52,
"gate" : 53,
"motorway" : 54,
"living_street" : 55,
"pitch" : 56,
"grade4" : 57,
"industrial" : 58,
"road" : 59,
"ground" : 60,
"scrub" : 61,
"motorway_link" : 62,
"steps" : 63,
"ditch" : 64,
"swimming_pool" : 65,
"grade5" : 66,
"park" : 67,
"apartments" : 68,
"restaurant" : 69,
"designated" : 70,
"bench" : 71,
"survey_point" : 72,
"pedestrian" : 73,
"hedge" : 74,
"reservoir" : 75,
"riverbank" : 76,
"alley" : 77,
"farmyard" : 78,
"peak" : 79,
"level_crossing" : 80,
"roof" : 81,
"dirt" : 82,
"drain" : 83,
"garages" : 84,
"entrance" : 85,
"street_lamp" : 86,
"deciduous" : 87,
"fuel" : 88,
"trunk_link" : 89,
"information" : 90,
"playground" : 91,
"supermarket" : 92,
"primary_link" : 93,
"concrete" : 94,
"mixed" : 95,
"permissive" : 96,
"orchard" : 97,
"grave_yard" : 98,
"canal" : 99,
"garden" : 100,
"spur" : 101,
"paving_stones" : 102,
"rock" : 103,
"bollard" : 104,
"convenience" : 105,
"cemetery" : 106,
"post_box" : 107,
"commercial" : 108,
"pier" : 109,
"bank" : 110,
"hotel" : 111,
"cliff" : 112,
"retail" : 113,
"construction" : 114,
"-1" : 115,
"fast_food" : 116,
"coniferous" : 117,
"cafe" : 118,
"6" : 119,
"kindergarten" : 120,
"tower" : 121,
"hospital" : 122,
"yard" : 123,
"sand" : 124,
"public_building" : 125,
"cobblestone" : 126,
"destination" : 127,
"island" : 128,
"abandoned" : 129,
"vineyard" : 130,
"recycling" : 131,
"agricultural" : 132,
"isolated_dwelling" : 133,
"pharmacy" : 134,
"post_office" : 135,
"motorway_junction" : 136,
"pub" : 137,
"allotments" : 138,
"dam" : 139,
"secondary_link" : 140,
"lift_gate" : 141,
"siding" : 142,
"stop" : 143,
"main" : 144,
"farm_auxiliary" : 145,
"quarry" : 146,
"10" : 147,
"station" : 148,
"platform" : 149,
"taxiway" : 150,
"limited" : 151,
"sports_centre" : 152,
"cutline" : 153,
"detached" : 154,
"storage_tank" : 155,
"basin" : 156,
"bicycle_parking" : 157,
"telephone" : 158,
"terrace" : 159,
"town" : 160,
"suburb" : 161,
"bus" : 162,
"compacted" : 163,
"toilets" : 164,
"heath" : 165,
"works" : 166,
"tram" : 167,
"beach" : 168,
"culvert" : 169,
"fire_station" : 170,
"recreation_ground" : 171,
"bakery" : 172,
"police" : 173,
"atm" : 174,
"clothes" : 175,
"tertiary_link" : 176,
"waste_basket" : 177,
"attraction" : 178,
"viewpoint" : 179,
"bicycle" : 180,
"church" : 181,
"shelter" : 182,
"drinking_water" : 183,
"marsh" : 184,
"picnic_site" : 185,
"hairdresser" : 186,
"bridleway" : 187,
"retaining_wall" : 188,
"buffer_stop" : 189,
"nature_reserve" : 190,
"village_green" : 191,
"university" : 192,
"1" : 193,
"bar" : 194,
"townhall" : 195,
"mini_roundabout" : 196,
"camp_site" : 197,
"aerodrome" : 198,
"stile" : 199,
"9" : 200,
"car_repair" : 201,
"parking_space" : 202,
"library" : 203,
"pipeline" : 204,
"true" : 205,
"cycle_barrier" : 206,
"4" : 207,
"museum" : 208,
"spring" : 209,
"hunting_stand" : 210,
"disused" : 211,
"car" : 212,
"tram_stop" : 213,
"land" : 214,
"fountain" : 215,
"hiking" : 216,
"manufacture" : 217,
"vending_machine" : 218,
"kiosk" : 219,
"swamp" : 220,
"unknown" : 221,
"7" : 222,
"islet" : 223,
"shed" : 224,
"switch" : 225,
"rapids" : 226,
"office" : 227,
"bay" : 228,
"proposed" : 229,
"common" : 230,
"weir" : 231,
"grassland" : 232,
"customers" : 233,
"social_facility" : 234,
"hangar" : 235,
"doctors" : 236,
"stadium" : 237,
"give_way" : 238,
"greenhouse" : 239,
"guest_house" : 240,
"viaduct" : 241,
"doityourself" : 242,
"runway" : 243,
"bus_station" : 244,
"water_tower" : 245,
"golf_course" : 246,
"conservation" : 247,
"block" : 248,
"college" : 249,
"wastewater_plant" : 250,
"subway" : 251,
"halt" : 252,
"forestry" : 253,
"florist" : 254,
"butcher" : 255}
def getValues():
return vals
| 17.111111
| 26
| 0.59382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,481
| 0.555531
|
0c084907ab6f7a66d8c89aefdff3de051c8499ac
| 1,406
|
py
|
Python
|
encryption_client.py
|
salmanhiro/fernet-rabbitmq
|
8130514e6d21b7df9c78a28130c603512f500a23
|
[
"MIT"
] | null | null | null |
encryption_client.py
|
salmanhiro/fernet-rabbitmq
|
8130514e6d21b7df9c78a28130c603512f500a23
|
[
"MIT"
] | null | null | null |
encryption_client.py
|
salmanhiro/fernet-rabbitmq
|
8130514e6d21b7df9c78a28130c603512f500a23
|
[
"MIT"
] | null | null | null |
import pika
import uuid
import time
import json
class FernetRpc(object):
def __init__(self):
self.connection = pika.BlockingConnection(
pika.ConnectionParameters(host='localhost'))
self.channel = self.connection.channel()
result = self.channel.queue_declare(queue='', exclusive=True)
self.callback_queue = result.method.queue
self.channel.basic_consume(
queue=self.callback_queue,
on_message_callback=self.on_response,
auto_ack=True)
def on_response(self, ch, method, props, body):
if self.corr_id == props.correlation_id:
self.response = body
def call(self, n):
self.response = None
self.corr_id = str(uuid.uuid4())
self.channel.basic_publish(
exchange='',
routing_key='rpc_queue',
properties=pika.BasicProperties(
reply_to=self.callback_queue,
correlation_id=self.corr_id,
),
body=json.dumps(n))
while self.response is None:
self.connection.process_data_events()
return (self.response)
message = {"text":"kulikuli"}
fernet_result = FernetRpc()
print(" [x] Requesting user")
start = time.time()
response = fernet_result.call(message)
end = time.time() - start
print(" [v] Got %r" % response)
print(" [.] Time elapsed %r s" %end)
| 26.528302
| 69
| 0.619488
| 1,109
| 0.788762
| 0
| 0
| 0
| 0
| 0
| 0
| 101
| 0.071835
|
0c0862941d8ae706603317f21fde751ca0bd01fb
| 3,225
|
py
|
Python
|
services/cert_server/project/tests/test_cert_server.py
|
EvaldoNeto/openvpn-http
|
73d75a990d5d7ed7f89a526c0ce324db42c37f1f
|
[
"MIT"
] | 5
|
2019-11-19T02:54:05.000Z
|
2020-03-03T19:48:41.000Z
|
services/cert_server/project/tests/test_cert_server.py
|
EvaldoNeto/openvpn-http
|
73d75a990d5d7ed7f89a526c0ce324db42c37f1f
|
[
"MIT"
] | 23
|
2019-10-31T12:00:37.000Z
|
2019-11-22T21:00:28.000Z
|
services/cert_server/project/tests/test_cert_server.py
|
EvaldoNeto/openvpn-http
|
73d75a990d5d7ed7f89a526c0ce324db42c37f1f
|
[
"MIT"
] | null | null | null |
# services/ovpn_server/project/tests/test_ovpn_server.py
import os
import json
import io
from flask import current_app
from project.tests.base import BaseTestCase
class TestOvpnServer(BaseTestCase):
def test_certificates(self):
with self.client:
pki_path = current_app.config['PKI_PATH']
token = current_app.config['SECRET_KEY']
response = self.client.post(
'/cert/upload',
data={
'file': (io.BytesIO(b'test'), 'test_cert.req'),
'cert': 'False'
},
content_type='multipart/form-data',
headers={'Authorization': f'Bearer {token}'}
)
data = json.loads(response.data.decode())
self.assertIn('file uploaded', data['message'])
self.assertEqual(response.status_code, 200)
self.assertTrue(os.path.isfile(f'{pki_path}/reqs/test_cert.req'))
os.remove(f'{pki_path}/reqs/test_cert.req')
self.assertFalse(os.path.isfile(f'{pki_path}/reqs/test_cert.req'))
def test_certificates_invalid_token(self):
with self.client:
filename = 'test_cert.crt'
token = "INVALID_TOKEN"
response = self.client.post(
'/cert/upload',
data={'file': (io.BytesIO(b'test'), filename)},
headers={'Authorization': f'Bearer {token}'}
)
data = json.loads(response.data.decode())
self.assertIn(f'Provide a valid auth token', data['message'])
self.assertEqual(response.status_code, 401)
def test_certificates_no_token(self):
with self.client:
filename = 'test_cert.crt'
response = self.client.post(
'/cert/upload',
data={'file': (io.BytesIO(b'test'), filename)},
)
data = json.loads(response.data.decode())
self.assertIn(f'Provide a valid auth token', data['message'])
self.assertEqual(response.status_code, 403)
def test_certificate_no_file(self):
"""
Tests response when there is no file being sent
"""
with self.client:
token = current_app.config['SECRET_KEY']
response = self.client.post(
'/cert/upload',
data={},
headers={'Authorization': f'Bearer {token}'}
)
data = json.loads(response.data.decode())
self.assertIn('No file', data['message'])
self.assertEqual(response.status_code, 400)
def test_certificates_invalid_file(self):
"""
Tests response when an invalid file is sent
"""
with self.client:
token = current_app.config['SECRET_KEY']
response = self.client.post(
'/cert/upload',
data={'file': (io.BytesIO(str.encode('test')), 'test.txt')},
headers={'Authorization': f'Bearer {token}'}
)
data = json.loads(response.data.decode())
self.assertIn('Not a valid file', data['message'])
self.assertEqual(response.status_code, 400)
| 37.068966
| 78
| 0.556279
| 3,056
| 0.947597
| 0
| 0
| 0
| 0
| 0
| 0
| 834
| 0.258605
|
0c08971682b47651e14df294d06cff25310ada7b
| 956
|
py
|
Python
|
powerline/lib/watcher/stat.py
|
MrFishFinger/powerline
|
361534bafecf836e100eaff257c93eb4805f48db
|
[
"MIT"
] | 11,435
|
2015-01-01T03:32:34.000Z
|
2022-03-31T20:39:05.000Z
|
powerline/lib/watcher/stat.py
|
ritiek/powerline
|
82c1373ba0b424c57e8c12cb5f6f1a7ee3829c27
|
[
"MIT"
] | 879
|
2015-01-02T11:59:30.000Z
|
2022-03-24T09:52:17.000Z
|
powerline/lib/watcher/stat.py
|
ritiek/powerline
|
82c1373ba0b424c57e8c12cb5f6f1a7ee3829c27
|
[
"MIT"
] | 1,044
|
2015-01-05T22:37:53.000Z
|
2022-03-17T19:43:16.000Z
|
# vim:fileencoding=utf-8:noet
from __future__ import (unicode_literals, division, absolute_import, print_function)
import os
from threading import RLock
from powerline.lib.path import realpath
class StatFileWatcher(object):
def __init__(self):
self.watches = {}
self.lock = RLock()
def watch(self, path):
path = realpath(path)
with self.lock:
self.watches[path] = os.path.getmtime(path)
def unwatch(self, path):
path = realpath(path)
with self.lock:
self.watches.pop(path, None)
def is_watching(self, path):
with self.lock:
return realpath(path) in self.watches
def __call__(self, path):
path = realpath(path)
with self.lock:
if path not in self.watches:
self.watches[path] = os.path.getmtime(path)
return True
mtime = os.path.getmtime(path)
if mtime != self.watches[path]:
self.watches[path] = mtime
return True
return False
def close(self):
with self.lock:
self.watches.clear()
| 21.244444
| 84
| 0.706067
| 757
| 0.791841
| 0
| 0
| 0
| 0
| 0
| 0
| 29
| 0.030335
|
0c08a69ecbe4701e579ed0c55e6c61397156d087
| 2,531
|
py
|
Python
|
refData/mlpy/mlpy-3.5.0/mlpy/bordacount/borda.py
|
xrick/DTW-Tutorial
|
bbbce1c2beff91384cdcb7dbf503f93ad2fa285c
|
[
"MIT"
] | null | null | null |
refData/mlpy/mlpy-3.5.0/mlpy/bordacount/borda.py
|
xrick/DTW-Tutorial
|
bbbce1c2beff91384cdcb7dbf503f93ad2fa285c
|
[
"MIT"
] | null | null | null |
refData/mlpy/mlpy-3.5.0/mlpy/bordacount/borda.py
|
xrick/DTW-Tutorial
|
bbbce1c2beff91384cdcb7dbf503f93ad2fa285c
|
[
"MIT"
] | null | null | null |
## This code is written by Davide Albanese, <albanese@fbk.eu>.
## (C) 2010 mlpy Developers.
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
__all__ = ['borda_count']
import numpy as np
import sys
if sys.version >= '3':
from . import cborda
else:
import cborda
def borda_count(x, k=None):
"""Given N ranked ids lists of length P compute the number of
extractions on top-k positions and the mean position for each id.
Sort the element ids with decreasing number of extractions, and
element ids with equal number of extractions will be sorted with
increasing mean positions.
:Parameters:
x : 2d array_like object integer (N, P)
ranked ids lists. For each list ids must be unique
in [0, P-1].
k : None or integer
compute borda on top-k position (None -> k = P)
:Returns:
borda : 1d numpy array objects
sorted-ids, number of extractions, mean positions
Example:
>>> import numpy as np
>>> import mlpy
>>> x = [[2,4,1,3,0], # first ranked list
... [3,4,1,2,0], # second ranked list
... [2,4,3,0,1], # third ranked list
... [0,1,4,2,3]] # fourth ranked list
>>> mlpy.borda_count(x=x, k=3)
(array([4, 1, 2, 3, 0]), array([4, 3, 2, 2, 1]), array([ 1.25 , 1.66666667, 0. , 1. , 0. ]))
* Id 4 is in the first position with 4 extractions and mean position 1.25.
* Id 1 is in the first position with 3 extractions and mean position 1.67.
* ...
"""
x_arr = np.asarray(x, dtype=np.int)
n, p = x_arr.shape
if k == None:
k = p
if k < 1 or k > p:
raise ValueError('k must be in [1, %d]' % p)
ext, pos = cborda.core(x_arr, k)
invpos = (pos + 1)**(-1) # avoid zero division
idx = np.lexsort(keys=(invpos, ext))[::-1]
return idx, ext[idx], pos[idx]
| 32.448718
| 126
| 0.614382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,036
| 0.804425
|
0c08ae96e8b31b452042a012ea2cbfe21f5f54d5
| 2,641
|
py
|
Python
|
envs/base_mujoco_env.py
|
zaynahjaved/AWAC
|
e225eeb8c0cd3498ab55ce15a9de60cb4e957c50
|
[
"MIT"
] | null | null | null |
envs/base_mujoco_env.py
|
zaynahjaved/AWAC
|
e225eeb8c0cd3498ab55ce15a9de60cb4e957c50
|
[
"MIT"
] | null | null | null |
envs/base_mujoco_env.py
|
zaynahjaved/AWAC
|
e225eeb8c0cd3498ab55ce15a9de60cb4e957c50
|
[
"MIT"
] | null | null | null |
'''
All cartgripper env modules built on cartrgipper implementation in
https://github.com/SudeepDasari/visual_foresight
'''
from abc import ABC
from mujoco_py import load_model_from_path, MjSim
import numpy as np
from base_env import BaseEnv
class BaseMujocoEnv(BaseEnv, ABC):
def __init__(self, model_path, _hp):
super(BaseMujocoEnv, self).__init__()
self._frame_height = _hp.viewer_image_height
self._frame_width = _hp.viewer_image_width
self._reset_sim(model_path)
self._base_adim, self._base_sdim = None, None #state/action dimension of Mujoco control
self._adim, self._sdim = None, None #state/action dimension presented to agent
self.num_objects, self._n_joints = None, None
self._goal_obj_pose = None
self._goaldistances = []
self._ncam = _hp.ncam
if self._ncam == 2:
self.cameras = ['maincam', 'leftcam']
elif self._ncam == 1:
self.cameras = ['maincam']
else:
raise ValueError
self._last_obs = None
self._hp = _hp
def _default_hparams(self):
parent_params = super()._default_hparams()
parent_params['viewer_image_height'] = 256
parent_params['viewer_image_width'] = 256
parent_params['ncam'] = 1
return parent_params
def set_goal_obj_pose(self, pose):
self._goal_obj_pose = pose
def _reset_sim(self, model_path):
"""
Creates a MjSim from passed in model_path
:param model_path: Absolute path to model file
:return: None
"""
self._model_path = model_path
self.sim = MjSim(load_model_from_path(self._model_path))
def reset(self):
self._goaldistances = []
def render(self):
""" Renders the enviornment.
Implements custom rendering support. If mode is:
- dual: renders both left and main cameras
- left: renders only left camera
- main: renders only main (front) camera
:param mode: Mode to render with (dual by default)
:return: uint8 numpy array with rendering from sim
"""
images = np.zeros(
(self._ncam, self._frame_height, self._frame_width, 3),
dtype=np.uint8)
for i, cam in enumerate(self.cameras):
images[i] = self.sim.render(
self._frame_width, self._frame_height, camera_name=cam)
return images
@property
def adim(self):
return self._adim
@property
def sdim(self):
return self._sdim
@property
def ncam(self):
return self._ncam
| 29.344444
| 96
| 0.632336
| 2,395
| 0.906853
| 0
| 0
| 165
| 0.062476
| 0
| 0
| 779
| 0.294964
|
0c08ff139766a0d536bcc09bc242b07f333b8755
| 853
|
py
|
Python
|
HackerRank/Two Sum/Two Sum.py
|
nikku1234/Code-Practise
|
94eb6680ea36efd10856c377000219285f77e5a4
|
[
"Apache-2.0"
] | 9
|
2020-07-02T06:06:17.000Z
|
2022-02-26T11:08:09.000Z
|
HackerRank/Two Sum/Two Sum.py
|
nikku1234/Code-Practise
|
94eb6680ea36efd10856c377000219285f77e5a4
|
[
"Apache-2.0"
] | 1
|
2021-11-04T17:26:36.000Z
|
2021-11-04T17:26:36.000Z
|
HackerRank/Two Sum/Two Sum.py
|
nikku1234/Code-Practise
|
94eb6680ea36efd10856c377000219285f77e5a4
|
[
"Apache-2.0"
] | 8
|
2021-01-31T10:31:12.000Z
|
2022-03-13T09:15:55.000Z
|
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
"""Naive Logic"""
''' for i in range(len(nums)):
left = nums[i+1:]
for j in range(len(left)):
if (nums[i]+left[j]) ==target :
return i,j+i+1
'''
'''Better Logic'''
'''
k=0
for i in nums:
k = k+1
if target-i in nums[k:]:
return(k - 1, nums[k:].index(target - i) + k)
'''
'''Going for a better logic HashTable'''
hash_table={}
for i in range(len(nums)):
hash_table[nums[i]]=i
for i in range(len(nums)):
if target-nums[i] in hash_table:
if hash_table[target-nums[i]] != i:
return [i, hash_table[target-nums[i]] ]
return []
| 32.807692
| 64
| 0.444314
| 853
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 439
| 0.514654
|
0c097274adeceb2e1e44250ea00c4016e23c60ed
| 191
|
py
|
Python
|
Desafios/desafio009.py
|
LucasHenrique-dev/Exercicios-Python
|
b1f6ca56ea8e197a89a044245419dc6079bdb9c7
|
[
"MIT"
] | 1
|
2020-04-09T23:18:03.000Z
|
2020-04-09T23:18:03.000Z
|
Desafios/desafio009.py
|
LucasHenrique-dev/Exercicios-Python
|
b1f6ca56ea8e197a89a044245419dc6079bdb9c7
|
[
"MIT"
] | null | null | null |
Desafios/desafio009.py
|
LucasHenrique-dev/Exercicios-Python
|
b1f6ca56ea8e197a89a044245419dc6079bdb9c7
|
[
"MIT"
] | null | null | null |
n1 = int(input('Digite um número e veja qual a sua tabuada: '))
n = 0
print('{} X {:2} = {:2}'.format(n1, 0, n1*n))
while n < 10:
n += 1
print('{} X {:2} = {:2}'.format(n1, n, n1*n))
| 27.285714
| 63
| 0.502618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 83
| 0.432292
|
0c09891ffb40760a1dcac5e46984a7d055ce0caf
| 2,587
|
py
|
Python
|
web/app/djrq/admin/admin.py
|
bmillham/djrq2
|
c84283b75a7c15da1902ebfc32b7d75159c09e20
|
[
"MIT"
] | 1
|
2016-11-23T20:50:00.000Z
|
2016-11-23T20:50:00.000Z
|
web/app/djrq/admin/admin.py
|
bmillham/djrq2
|
c84283b75a7c15da1902ebfc32b7d75159c09e20
|
[
"MIT"
] | 15
|
2017-01-15T04:18:40.000Z
|
2017-02-25T04:13:06.000Z
|
web/app/djrq/admin/admin.py
|
bmillham/djrq2
|
c84283b75a7c15da1902ebfc32b7d75159c09e20
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
from web.ext.acl import when
from ..templates.admin.admintemplate import page as _page
from ..templates.admin.requests import requeststemplate, requestrow
from ..templates.requests import requestrow as rr
from ..send_update import send_update
import cinje
@when(when.matches(True, 'session.authenticated', True), when.never)
class Admin:
__dispatch__ = 'resource'
__resource__ = 'admin'
from .suggestions import Suggestions as suggestions
from .mistags import Mistags as mistags
from .auth import Auth as auth
from .logout import Logout as logout
from .showinfo import ShowInfo as showinfo
from .requestoptions import RequestOptions as requestoptions
from .catalogoptions import CatalogOptions as catalogoptions
from .uploadfiles import UploadFiles as uploadfiles
from .updatedatabase import UpdateDatabase as updatedatabase
from .changepw import ChangePassword as changepw
from .showhistory import ShowHistory as showhistory
from .restoredatabase import RestoreDatabase as restoredatabase, CurrentProgress as currentprogress
from .updatehistory import UpdateHistory as updatehistory
def __init__(self, context, name, *arg, **args):
self._name = name
self._ctx = context
self.queries = context.queries
def get(self, *arg, **args):
if len(arg) > 0 and arg[0] != 'requests':
return "Page not found: {}".format(arg[0])
if 'view_status' not in args:
args['view_status'] = 'New/Pending'
if 'change_status' in args:
changed_row = self.queries.change_request_status(args['id'], args['status'])
try:
request_row = cinje.flatten(rr(changed_row))
except:
request_row = '' # Row was deleted
np_info = self.queries.get_requests_info(status=args['view_status'])
send_update(self._ctx.websocket, requestbutton=np_info.request_count, request_row=request_row, new_request_status=args['status'], request_id=args['id']) # Update the request count button
send_update(self._ctx.websocket_admin, requestbutton=np_info.request_count) # Update the request count button
requestlist = self.queries.get_requests(status=args['view_status'])
try:
requestinfo = np_info
except:
requestinfo = self.queries.get_requests_info(status=args['view_status'])
return requeststemplate(_page, "Requests", self._ctx, requestlist=requestlist, view_status=args['view_status'], requestinfo=requestinfo)
| 43.847458
| 198
| 0.709702
| 2,241
| 0.866254
| 0
| 0
| 2,310
| 0.892926
| 0
| 0
| 312
| 0.120603
|
0c0a4769116e4e6ba1cffb9d2ccffc9442014420
| 895
|
py
|
Python
|
Middle/Que33.py
|
HuangZengPei/LeetCode
|
d2b8a1dfe986d71d02d2568b55bad6e5b1c81492
|
[
"MIT"
] | 2
|
2019-11-20T14:05:27.000Z
|
2019-11-20T14:05:28.000Z
|
Middle/Que33.py
|
HuangZengPei/LeetCode
|
d2b8a1dfe986d71d02d2568b55bad6e5b1c81492
|
[
"MIT"
] | null | null | null |
Middle/Que33.py
|
HuangZengPei/LeetCode
|
d2b8a1dfe986d71d02d2568b55bad6e5b1c81492
|
[
"MIT"
] | null | null | null |
class Solution(object):
def search(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
return binarySearch(nums,target,0,len(nums)-1)
def binarySearch(nums, target, low, high):
if (low > high):
return -1
middle = low + (high - low)/2
if nums[middle] == target:
return middle
if nums[low] <= nums[middle]:
if nums[low] < target and target < nums[middle]: # 在左边
return binarySearch(nums,target,low,middle-1)
else:
return binarySearch(nums,target,middle+1,high)
else:
if nums[middle] < target and target < nums[high]:
return binarySearch(nums,target,middle+1,high)
else:
return binarySearch(nums,target,low,middle-1)
| 34.423077
| 69
| 0.521788
| 892
| 0.990011
| 0
| 0
| 0
| 0
| 0
| 0
| 102
| 0.113208
|
0c0beaeefd6502afde93d7709e2ca76e12632ed9
| 2,560
|
py
|
Python
|
save.py
|
regismeyssonnier/NeuralNetwork
|
c998b9523ed02287e1c811d73b0757270dee773c
|
[
"MIT"
] | null | null | null |
save.py
|
regismeyssonnier/NeuralNetwork
|
c998b9523ed02287e1c811d73b0757270dee773c
|
[
"MIT"
] | null | null | null |
save.py
|
regismeyssonnier/NeuralNetwork
|
c998b9523ed02287e1c811d73b0757270dee773c
|
[
"MIT"
] | null | null | null |
def write_file(filess, T):
f = open(filess, "w")
for o in T:
f.write("[\n")
for l in o:
f.write(str(l)+"\n")
f.write("]\n")
f.close()
def save_hidden_weight(nb_hidden, hiddenw):
for i in range(nb_hidden):
write_file("save/base_nn_hid_" + str(i+1) + "w.nn", hiddenw[i])
def load_hiddenw(filess, hiddenw):
f = open(filess, "r")
s = f.read().splitlines()
h = 0
for o in s:
#print(o)
if o == "[":
h = []
elif o == "]":
hiddenw.append(h)
else:
h.append(float(o))
def load_hidden_weight(hiddenw, nb_hidden):
for i in range(nb_hidden):
hiddenw.append([])
load_hiddenw("save/base_nn_hid_" + str(i+1) + "w.nn", hiddenw[i])
def load_hidden_weight_v(hiddenw, nb_hidden):
for i in range(nb_hidden):
hiddenw.append([])
load_hiddenw("valid/NN/base_nn_hid_" + str(i+1) + "w.nn", hiddenw[i])
def display_hidden_weight(hiddenw, nb_hidden):
for i in range(nb_hidden):
for j in hiddenw[i]:
print("------------------------------------")
I = 0
for k in j:
print(k)
I+=1
if I > 3:
break
def write_fileb(filess, T):
f = open(filess, "w")
for o in T:
f.write(str(o)+"\n")
f.close()
def save_hidden_bias(nb_hidden, hiddenb):
for i in range(nb_hidden):
write_fileb("save/base_nn_hid_" + str(i+1) + "b.nn", hiddenb[i])
def load_hiddenb(filess, hiddenb):
f = open(filess, "r")
s = f.read().splitlines()
for o in s:
hiddenb.append(float(o))
def load_hidden_bias(hiddenb, nb_hidden):
for i in range(nb_hidden):
hiddenb.append([])
load_hiddenb("save/base_nn_hid_" + str(i+1) + "b.nn", hiddenb[i])
def load_hidden_bias_v(hiddenb, nb_hidden):
for i in range(nb_hidden):
hiddenb.append([])
load_hiddenb("valid/NN/base_nn_hid_" + str(i+1) + "b.nn", hiddenb[i])
def display_hidden_bias(hiddenb, nb_hidden):
for i in range(nb_hidden):
print("------------------------------------")
for j in hiddenb[i]:
print(j)
def save_output_weight(outputw):
write_file("save/base_nn_out_w.nn", outputw[0])
def load_output_weight(outputw):
outputw.append([])
load_hiddenw("save/base_nn_out_w.nn", outputw[0])
def load_output_weight_v(outputw):
outputw.append([])
load_hiddenw("valid/NN/base_nn_out_w.nn", outputw[0])
def save_output_bias(outputb):
write_fileb("save/base_nn_out_b.nn", outputb[0])
def load_output_bias(outputb):
outputb.append([])
load_hiddenb("save/base_nn_out_b.nn", outputb[0])
def load_output_bias_v(outputb):
outputb.append([])
load_hiddenb("valid/NN/base_nn_out_b.nn", outputb[0])
| 17.902098
| 71
| 0.636328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 425
| 0.166016
|
0c0c0154d635c140279cd61ef15b6dfc6c89cd23
| 755
|
py
|
Python
|
test_knot_hasher.py
|
mmokko/aoc2017
|
0732ac440775f9e6bd4a8447c665c9b0e6969f74
|
[
"MIT"
] | null | null | null |
test_knot_hasher.py
|
mmokko/aoc2017
|
0732ac440775f9e6bd4a8447c665c9b0e6969f74
|
[
"MIT"
] | null | null | null |
test_knot_hasher.py
|
mmokko/aoc2017
|
0732ac440775f9e6bd4a8447c665c9b0e6969f74
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from day10 import KnotHasher
class TestKnotHasher(TestCase):
def test_calc(self):
sut = KnotHasher(5, [3, 4, 1, 5])
self.assertEqual(12, sut.calc())
def test_hash1(self):
sut = KnotHasher(256, '')
self.assertEqual('a2582a3a0e66e6e86e3812dcb672a272', sut.hash())
def test_hash2(self):
sut = KnotHasher(256, 'AoC 2017')
self.assertEqual('33efeb34ea91902bb2f59c9920caa6cd', sut.hash())
def test_hash3(self):
sut = KnotHasher(256, '1,2,3')
self.assertEqual('3efbe78a8d82f29979031a4aa0b16a9d', sut.hash())
def test_hash4(self):
sut = KnotHasher(256, '1,2,4')
self.assertEqual('63960835bcdc130f0b66d7ff4f6a5a8e', sut.hash())
| 30.2
| 72
| 0.658278
| 693
| 0.917881
| 0
| 0
| 0
| 0
| 0
| 0
| 162
| 0.21457
|
0c0c55cfe0bc18dae70bf566cb7d439dd048fafe
| 602
|
py
|
Python
|
udp/src/server.py
|
matthewchute/net-prot
|
82d2d92b3c88afb245161780fdd7909d7bf15eb1
|
[
"MIT"
] | null | null | null |
udp/src/server.py
|
matthewchute/net-prot
|
82d2d92b3c88afb245161780fdd7909d7bf15eb1
|
[
"MIT"
] | null | null | null |
udp/src/server.py
|
matthewchute/net-prot
|
82d2d92b3c88afb245161780fdd7909d7bf15eb1
|
[
"MIT"
] | null | null | null |
import constants, helpers, os
temp_msg = None
whole_msg = b''
file_path = None
helpers.sock.bind(constants.IP_PORT)
print "Server Ready"
# recieve
while temp_msg != constants.EOF:
datagram = helpers.sock.recvfrom(constants.BUFFER_SIZE)
temp_msg = datagram[0]
if file_path is None:
print("Receiving " + temp_msg.decode() + "...")
file_path = os.path.join(constants.SERVER_FILE_PATH, temp_msg.decode())
else:
whole_msg += temp_msg
whole_msg = whole_msg.strip(constants.EOF)
with open(file_path, 'wb') as sFile:
sFile.write(whole_msg)
print "Received"
| 22.296296
| 79
| 0.696013
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 58
| 0.096346
|
0c0dcfc232bbe604e854e762de0825bd246ecc01
| 3,697
|
py
|
Python
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/hostname_configuration.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/hostname_configuration.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
sdk/apimanagement/azure-mgmt-apimanagement/azure/mgmt/apimanagement/models/hostname_configuration.py
|
tzhanl/azure-sdk-for-python
|
18cd03f4ab8fd76cc0498f03e80fbc99f217c96e
|
[
"MIT"
] | 1
|
2019-06-17T22:18:23.000Z
|
2019-06-17T22:18:23.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class HostnameConfiguration(Model):
"""Custom hostname configuration.
All required parameters must be populated in order to send to Azure.
:param type: Required. Hostname type. Possible values include: 'Proxy',
'Portal', 'Management', 'Scm', 'DeveloperPortal'
:type type: str or ~azure.mgmt.apimanagement.models.HostnameType
:param host_name: Required. Hostname to configure on the Api Management
service.
:type host_name: str
:param key_vault_id: Url to the KeyVault Secret containing the Ssl
Certificate. If absolute Url containing version is provided, auto-update
of ssl certificate will not work. This requires Api Management service to
be configured with MSI. The secret should be of type
*application/x-pkcs12*
:type key_vault_id: str
:param encoded_certificate: Base64 Encoded certificate.
:type encoded_certificate: str
:param certificate_password: Certificate Password.
:type certificate_password: str
:param default_ssl_binding: Specify true to setup the certificate
associated with this Hostname as the Default SSL Certificate. If a client
does not send the SNI header, then this will be the certificate that will
be challenged. The property is useful if a service has multiple custom
hostname enabled and it needs to decide on the default ssl certificate.
The setting only applied to Proxy Hostname Type. Default value: False .
:type default_ssl_binding: bool
:param negotiate_client_certificate: Specify true to always negotiate
client certificate on the hostname. Default Value is false. Default value:
False .
:type negotiate_client_certificate: bool
:param certificate: Certificate information.
:type certificate: ~azure.mgmt.apimanagement.models.CertificateInformation
"""
_validation = {
'type': {'required': True},
'host_name': {'required': True},
}
_attribute_map = {
'type': {'key': 'type', 'type': 'str'},
'host_name': {'key': 'hostName', 'type': 'str'},
'key_vault_id': {'key': 'keyVaultId', 'type': 'str'},
'encoded_certificate': {'key': 'encodedCertificate', 'type': 'str'},
'certificate_password': {'key': 'certificatePassword', 'type': 'str'},
'default_ssl_binding': {'key': 'defaultSslBinding', 'type': 'bool'},
'negotiate_client_certificate': {'key': 'negotiateClientCertificate', 'type': 'bool'},
'certificate': {'key': 'certificate', 'type': 'CertificateInformation'},
}
def __init__(self, **kwargs):
super(HostnameConfiguration, self).__init__(**kwargs)
self.type = kwargs.get('type', None)
self.host_name = kwargs.get('host_name', None)
self.key_vault_id = kwargs.get('key_vault_id', None)
self.encoded_certificate = kwargs.get('encoded_certificate', None)
self.certificate_password = kwargs.get('certificate_password', None)
self.default_ssl_binding = kwargs.get('default_ssl_binding', False)
self.negotiate_client_certificate = kwargs.get('negotiate_client_certificate', False)
self.certificate = kwargs.get('certificate', None)
| 48.012987
| 94
| 0.678117
| 3,181
| 0.860427
| 0
| 0
| 0
| 0
| 0
| 0
| 2,846
| 0.769813
|
0c0decf0160c2c2495315ba2014b0b8cb06458ac
| 4,717
|
py
|
Python
|
src/interactive_conditional_samples.py
|
50417/gpt-2
|
0e0b3c97efb0048abffb2947aaa8573a783706ed
|
[
"MIT"
] | null | null | null |
src/interactive_conditional_samples.py
|
50417/gpt-2
|
0e0b3c97efb0048abffb2947aaa8573a783706ed
|
[
"MIT"
] | null | null | null |
src/interactive_conditional_samples.py
|
50417/gpt-2
|
0e0b3c97efb0048abffb2947aaa8573a783706ed
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import fire
import json
import os
import numpy as np
import tensorflow as tf
import model, sample, encoder
def interact_model(
model_name='117M',
seed=None,
nsamples=1000,
batch_size=1,
length=None,
temperature=1,
top_k=0,
top_p=0.0
):
"""
Interactively run the model
:model_name=117M : String, which model to use
:seed=None : Integer seed for random number generators, fix seed to reproduce
results
:nsamples=1 : Number of samples to return total
:batch_size=1 : Number of batches (only affects speed/memory). Must divide nsamples.
:length=None : Number of tokens in generated text, if None (default), is
determined by model hyperparameters
:temperature=1 : Float value controlling randomness in boltzmann
distribution. Lower temperature results in less random completions. As the
temperature approaches zero, the model will become deterministic and
repetitive. Higher temperature results in more random completions.
:top_k=0 : Integer value controlling diversity. 1 means only 1 word is
considered for each step (token), resulting in deterministic completions,
while 40 means 40 words are considered at each step. 0 (default) is a
special setting meaning no restrictions. 40 generally is a good value.
:top_p=0.0 : Float value controlling diversity. Implements nucleus sampling,
overriding top_k if set to a value > 0. A good setting is 0.9.
"""
if batch_size is None:
batch_size = 1
assert nsamples % batch_size == 0
enc = encoder.get_encoder(model_name)
hparams = model.default_hparams()
with open(os.path.join('models', model_name, 'hparams.json')) as f:
hparams.override_from_dict(json.load(f))
if length is None:
length = hparams.n_ctx // 2
print(length)
#elif length > hparams.n_ctx:
# raise ValueError("Can't get samples longer than window size: %s" % hparams.n_ctx)
#config = tf.ConfigProto(device_count={'GPU': 0})
config = tf.ConfigProto()
with tf.Session(graph=tf.Graph(),config=config) as sess:
context = tf.placeholder(tf.int32, [batch_size, None])
np.random.seed(seed)
tf.set_random_seed(seed)
raw_text = """Model {"""
#input("Model prompt >>> ")
context_tokens = enc.encode(raw_text)
output = sample.sample_sequence(
hparams=hparams, length=length,
context=context,
batch_size=batch_size,
temperature=temperature, top_k=top_k, top_p=top_p
)
saver = tf.train.Saver()
ckpt = tf.train.latest_checkpoint(os.path.join('models', model_name))
saver.restore(sess, ckpt)
from datetime import datetime
#while True:
generated = 0
import time
grand_start = time.time()
for cnt in range(nsamples // batch_size):
start_per_sample = time.time()
output_text = raw_text
text = raw_text
context_tokens = enc.encode(text)
#raw_text = input("Model prompt >>> ")
# while not raw_text:
# print('Prompt should not be empty!')
# raw_text = input("Model prompt >>> ")
#print(context_tokens)
#file_to_save.write(raw_text)
#for cnt in range(nsamples // batch_size):
while "<|endoftext|>" not in text:
out = sess.run(output, feed_dict={context: [context_tokens for _ in range(batch_size)]})[:,
len(context_tokens):]
for i in range(batch_size):
#generated += 1
text = enc.decode(out[i])
if "<|endoftext|>" in text:
sep = "<|endoftext|>"
rest = text.split(sep, 1)[0]
output_text += rest
break
context_tokens = enc.encode(text)
output_text += text
print("=" * 40 + " SAMPLE " + str(cnt+12) + " " + "=" * 40)
minutes, seconds = divmod(time.time() - start_per_sample, 60)
print("Output Done : {:0>2}:{:05.2f}".format(int(minutes),seconds) )
print("=" * 80)
with open("Simulink_sample/sample__"+str(cnt+12)+".mdl","w+") as f:
f.write(output_text)
elapsed_total = time.time()-grand_start
hours, rem = divmod(elapsed_total,3600)
minutes, seconds = divmod(rem, 60)
print("Total time to generate 1000 samples :{:0>2}:{:0>2}:{:05.2f}".format(int(hours),int(minutes),seconds))
if __name__ == '__main__':
fire.Fire(interact_model)
| 38.349593
| 116
| 0.606954
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,928
| 0.408734
|
0c0e5be12d46a3b1b4e3d634643649fcf6a3f4da
| 291
|
py
|
Python
|
todofy/tests/conftest.py
|
bokiex/eti_todo
|
1c636d0973c57d4253440b4528185dba0ecb9d05
|
[
"BSD-3-Clause"
] | 1
|
2019-11-29T09:52:19.000Z
|
2019-11-29T09:52:19.000Z
|
todofy/tests/conftest.py
|
bokiex/eti_todo
|
1c636d0973c57d4253440b4528185dba0ecb9d05
|
[
"BSD-3-Clause"
] | 28
|
2019-11-28T20:02:48.000Z
|
2022-02-10T14:04:45.000Z
|
todofy/tests/conftest.py
|
bokiex/eti_todo
|
1c636d0973c57d4253440b4528185dba0ecb9d05
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
@pytest.fixture(scope='module')
def driver():
from selenium import webdriver
options = webdriver.ChromeOptions()
options.headless = True
chrome = webdriver.Chrome(options=options)
chrome.set_window_size(1440, 900)
yield chrome
chrome.close()
| 16.166667
| 46
| 0.704467
| 0
| 0
| 242
| 0.831615
| 274
| 0.941581
| 0
| 0
| 8
| 0.027491
|
0c0e6124651142c0387644ad144cc2392388c0c5
| 33
|
py
|
Python
|
Fase 4 - Temas avanzados/Tema 11 - Modulos/Leccion 01 - Modulos/Saludos/test.py
|
ruben69695/python-course
|
a3d3532279510fa0315a7636c373016c7abe4f0a
|
[
"MIT"
] | 1
|
2019-01-27T20:44:53.000Z
|
2019-01-27T20:44:53.000Z
|
Fase 4 - Temas avanzados/Tema 11 - Modulos/Leccion 01 - Modulos/Saludos/test.py
|
ruben69695/python-course
|
a3d3532279510fa0315a7636c373016c7abe4f0a
|
[
"MIT"
] | null | null | null |
Fase 4 - Temas avanzados/Tema 11 - Modulos/Leccion 01 - Modulos/Saludos/test.py
|
ruben69695/python-course
|
a3d3532279510fa0315a7636c373016c7abe4f0a
|
[
"MIT"
] | null | null | null |
import saludos
saludos.saludar()
| 11
| 17
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0c0ea1386a3f6993039b27ca1ae2f4e56ebc457c
| 1,033
|
py
|
Python
|
question_bank/split-array-into-fibonacci-sequence/split-array-into-fibonacci-sequence.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 9
|
2020-08-12T10:01:00.000Z
|
2022-01-05T04:37:48.000Z
|
question_bank/split-array-into-fibonacci-sequence/split-array-into-fibonacci-sequence.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 1
|
2021-02-16T10:19:31.000Z
|
2021-02-16T10:19:31.000Z
|
question_bank/split-array-into-fibonacci-sequence/split-array-into-fibonacci-sequence.py
|
yatengLG/leetcode-python
|
5d48aecb578c86d69835368fad3d9cc21961c226
|
[
"Apache-2.0"
] | 4
|
2020-08-12T10:13:31.000Z
|
2021-11-05T01:26:58.000Z
|
# -*- coding: utf-8 -*-
# @Author : LG
"""
执行用时:148 ms, 在所有 Python3 提交中击败了35.57% 的用户
内存消耗:13.7 MB, 在所有 Python3 提交中击败了36.81% 的用户
解题思路:
回溯
具体实现见代码注释
"""
class Solution:
def splitIntoFibonacci(self, S: str) -> List[int]:
def backtrack(S, current):
if S == '' and len(current) > 2: # 字符串均处理完,且当前序列长度大于2, 返回最终结果
return True
if S == '': # 当字符串处理完时,跳出
return
for i in range(1, len(S)+1): # 遍历当前字符串
if (S[0] == '0' and i == 1) or (S[0] != '0'): # 排除以0 开头的非0数, 如 01 02 等
if int(S[:i]) < (2**31-1) and (len(current) < 2 or int(S[:i]) == int(current[-1]) + int(current[-2])): # 数字限制;长度判断,如长度小于2,直接添加,如长度大于2,需判断和
current.append(S[:i])
if backtrack(S[i:], current):
return current
current.pop()
result = backtrack(S, [])
if result:
return result
else:
return []
| 30.382353
| 159
| 0.460794
| 1,039
| 0.798616
| 0
| 0
| 0
| 0
| 0
| 0
| 545
| 0.418909
|
0c111c07238e7921c9ce9cb0615b8ac96b16babf
| 2,771
|
py
|
Python
|
convert_bootswatch_vurple.py
|
douglaskastle/bootswatch
|
cb8f368c8d3671afddae487736d7cba6509b7f5b
|
[
"MIT"
] | null | null | null |
convert_bootswatch_vurple.py
|
douglaskastle/bootswatch
|
cb8f368c8d3671afddae487736d7cba6509b7f5b
|
[
"MIT"
] | null | null | null |
convert_bootswatch_vurple.py
|
douglaskastle/bootswatch
|
cb8f368c8d3671afddae487736d7cba6509b7f5b
|
[
"MIT"
] | null | null | null |
import re
import os
values = {
'uc': 'Vurple',
'lc': 'vurple',
'cl': '#116BB7',
}
def main():
infile = "yeti/variables.less"
f = open(infile, 'r')
lines = f.readlines()
f.close()
outfile = values['lc'] + "/variables.less"
f = open(outfile, 'w')
for line in lines:
line = re.sub('Yeti', values['uc'], line)
line = re.sub('yeti', values['lc'], line)
line = re.sub('#008cba', values['cl'], line)
line = re.sub('headings-font-family: @font-family-base', 'headings-font-family: @font-family-header-sans-serif', line)
if re.search("Open Sans", line):
line = re.sub('Open Sans', 'Lato', line)
line = '@font-family-header-sans-serif: "Orbitron", "Helvetica Neue", Helvetica, Arial, sans-serif;\n' + line
f.write(line)
f.close()
infile = "yeti/bootswatch.less"
f = open(infile, 'r')
lines = f.readlines()
f.close()
outfile = values['lc'] + "/bootswatch.less"
f = open(outfile, 'w')
for line in lines:
line = re.sub('Yeti', values['uc'], line)
if re.search("Open\+Sans", line):
continue
if re.search("web-font-path", line):
line = '@web-font-path2: "https://fonts.googleapis.com/css?family=Lato:400,700,400italic";\n' + line
line = '@web-font-path: "https://fonts.googleapis.com/css?family=Orbitron:300italic,400italic,700italic,400,300,700";\n' + line
line = line + '.web-font(@web-font-path2);\n'
f.write(line)
f.close()
infile = "yeti/index.html"
f = open(infile, 'r')
lines = f.readlines()
f.close()
outfile = values['lc'] + "/index.html"
f = open(outfile, 'w')
for line in lines:
line = re.sub('Yeti', values['uc'], line)
line = re.sub('yeti', values['lc'], line)
line = re.sub('UA-[0-9\-]+', '', line)
f.write(line)
f.close()
cmd = "/cygdrive/c/Users/keeshand/AppData/Roaming/npm/grunt swatch:{0}".format(values['lc'])
os.system(cmd)
cmd = "cp {0}/bootstrap.min.css ../vurple_com/pelican-themes/bootstrap3/static/css/bootstrap.{0}.min.css".format(values['lc'])
os.system(cmd)
cmd = "cp bower_components/font-awesome/css/*.css ../vurple_com/pelican-themes/bootstrap3/static/css/."
os.system(cmd)
cmd = "cp bower_components/font-awesome/fonts/* ../vurple_com/pelican-themes/bootstrap3/static/fonts/."
os.system(cmd)
cmd = "cp bower_components/bootstrap/fonts/* ../vurple_com/pelican-themes/bootstrap3/static/fonts/."
os.system(cmd)
cmd = "cp bower_components/bootstrap/dist/js/* ../vurple_com/pelican-themes/bootstrap3/static/js/."
os.system(cmd)
if __name__ == '__main__':
main()
| 35.075949
| 139
| 0.587153
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,298
| 0.468423
|
0c117a09b3c94bdc715dd3e404e0bc7ed330ac20
| 721
|
py
|
Python
|
python/interface_getPixel.py
|
BulliB/PixelTable
|
f08ff3a7908857583f3cbc1b689abf2e8739f7d8
|
[
"BSD-2-Clause"
] | 2
|
2019-10-28T14:33:31.000Z
|
2019-10-30T10:08:58.000Z
|
python/interface_getPixel.py
|
BulliB/PixelTable
|
f08ff3a7908857583f3cbc1b689abf2e8739f7d8
|
[
"BSD-2-Clause"
] | 33
|
2019-10-28T14:17:26.000Z
|
2020-02-22T11:04:02.000Z
|
python/interface_getPixel.py
|
BulliB/PixelTable
|
f08ff3a7908857583f3cbc1b689abf2e8739f7d8
|
[
"BSD-2-Clause"
] | 2
|
2019-11-08T11:14:33.000Z
|
2019-11-19T21:22:54.000Z
|
#!/usr/bin/python3
from validData import *
from command import *
from readback import *
import sys
import time
# Expected Input
# 1: Row -> 0 to 9
# 2: Column -> 0 to 19
if (
isInt(sys.argv[1]) and strLengthIs(sys.argv[1],1) and
isInt(sys.argv[2]) and (strLengthIs(sys.argv[2],1) or strLengthIs(sys.argv[2],2))
):
command = ["PixelToWeb"]
command.append(sys.argv[1])
command.append(sys.argv[2])
setNewCommand(command)
time.sleep(.3)
print(readbackGet())
readbackClear()
else:
f = open("/var/www/pixel/python/.error", "a")
f.write(sys.argv[0] + '\n')
f.write(sys.argv[1] + '\n')
f.write(sys.argv[2] + '\n')
f.write(sys.argv[3] + '\n')
f.close()
| 21.848485
| 89
| 0.601942
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 135
| 0.18724
|
0c11d6edd1fa7404e67e7a29c7dcaef50cd598a8
| 1,834
|
py
|
Python
|
FunTOTP/interface.py
|
Z33DD/FunTOTP
|
912c1a4a307af6a495f12a82305ae7dbf49916a2
|
[
"Unlicense"
] | 3
|
2020-01-19T17:10:37.000Z
|
2022-02-19T18:39:20.000Z
|
FunTOTP/interface.py
|
Z33DD/FunTOTP
|
912c1a4a307af6a495f12a82305ae7dbf49916a2
|
[
"Unlicense"
] | null | null | null |
FunTOTP/interface.py
|
Z33DD/FunTOTP
|
912c1a4a307af6a495f12a82305ae7dbf49916a2
|
[
"Unlicense"
] | 1
|
2020-01-19T20:25:18.000Z
|
2020-01-19T20:25:18.000Z
|
from getpass import getpass
from colorama import init, Fore, Back, Style
yes = ['Y', 'y', 'YES', 'yes', 'Yes']
class interface(object):
"""
Terminal CLI
"""
def log(self, arg, get=False):
if not get:
print("[*]: {} ".format(arg))
else:
return "[*]: {} ".format(arg)
def error(self, arg, get=False):
"""Short summary.
Parameters
----------
arg : str
String to print
get : bool
If true, returns a string with the formated string
Returns
-------
str
If get = true, returns a string with the formated string
"""
if not get:
print(Fore.RED + "[ERROR]: {}".format(arg))
print(Style.RESET_ALL)
exit(-1)
else:
return "[ERROR]: {}".format(arg)
def warning(self, arg, get=False):
if not get:
print(Fore.YELLOW + "[!]: {}".format(arg), end='')
print(Style.RESET_ALL)
else:
return "[!]: {}".format(arg)
def sure(self):
user = input(self.log("Are you sure? (y/N) ", get=True))
if user in yes:
return 0
else:
exit(0)
def newpasswd(self):
condition = True
while condition is True:
user_psswd = getpass("[*]: Password:")
user_psswd_repeat = getpass("[*]: Repeat password:")
if user_psswd == user_psswd_repeat:
condition = False
else:
self.warning("Passwords don't match! Try again")
return user_psswd_repeat
def passwd(self):
return getpass()
def info(self, arg):
print(Fore.BLACK + Back.WHITE + "[i]: {}".format(arg) + ' ', end='')
print(Style.RESET_ALL)
| 25.123288
| 76
| 0.490185
| 1,721
| 0.938386
| 0
| 0
| 0
| 0
| 0
| 0
| 535
| 0.291712
|
0c1456a33812aa7157896227520f3def0676ad91
| 885
|
py
|
Python
|
envdsys/envcontacts/apps.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | 1
|
2021-11-06T19:22:53.000Z
|
2021-11-06T19:22:53.000Z
|
envdsys/envcontacts/apps.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | 25
|
2019-06-18T20:40:36.000Z
|
2021-07-23T20:56:48.000Z
|
envdsys/envcontacts/apps.py
|
NOAA-PMEL/envDataSystem
|
4db4a3569d2329658799a3eef06ce36dd5c0597d
|
[
"Unlicense"
] | null | null | null |
from django.apps import AppConfig
class EnvcontactsConfig(AppConfig):
name = 'envcontacts'
# def ready(self) -> None:
# from envnet.registry.registry import ServiceRegistry
# try:
# from setup.ui_server_conf import run_config
# host = run_config["HOST"]["name"]
# except KeyError:
# host = "localhost"
# try:
# from setup.ui_server_conf import run_config
# port = run_config["HOST"]["port"]
# except KeyError:
# port = "8000"
# local = True
# config = {
# "host": host,
# "port": port,
# "regkey": None,
# "service_list": {"envdsys_contacts": {}},
# }
# registration = ServiceRegistry.register(local, config)
# print(registration)
# return super().ready()
| 29.5
| 64
| 0.523164
| 849
| 0.959322
| 0
| 0
| 0
| 0
| 0
| 0
| 681
| 0.769492
|
0c16dc36c44b72bd40c213bf05ac31ec7273fca3
| 8,053
|
py
|
Python
|
tests/interpreter.py
|
AndrejHatzi/Haya
|
31291142decf6a172149516f08a2f2d68115e2dc
|
[
"MIT"
] | null | null | null |
tests/interpreter.py
|
AndrejHatzi/Haya
|
31291142decf6a172149516f08a2f2d68115e2dc
|
[
"MIT"
] | 1
|
2019-02-14T16:47:10.000Z
|
2019-02-14T16:47:10.000Z
|
tests/interpreter.py
|
AndrejHatzi/Haya
|
31291142decf6a172149516f08a2f2d68115e2dc
|
[
"MIT"
] | null | null | null |
from sly import Lexer
from sly import Parser
import sys
#--------------------------
# While Loop
# Del Var
# Print stmt
# EQEQ, LEQ
#--------------------------
#=> This version has parenthesis precedence!
class BasicLexer(Lexer):
tokens = { NAME, NUMBER, STRING, IF, FOR, PRINT, CREATEFILE, WRITE, EQEQ, TO}
ignore = '\t '
ignore_newline = r'\n+'
literals = { '=', '+', '-', '/', '*', '(', ')', ',', ';', ':', '.'}
# Define tokens
IF = r'if'
#FUN = r'function'
FOR = r'for'
TO = r','
PRINT = r'print'
CREATEFILE = r'createfile'
WRITE = 'write'
NAME = r'[a-zA-Z_][a-zA-Z0-9_]*'
STRING = r'\".*?\"'
EQEQ = r'=='
@_(r'\d+')
def NUMBER(self, t):
t.value = int(t.value)
return t
@_(r'#.*')
def COMMENT(self, t):
pass
@_(r'\n+')
def newline(self,t ):
self.lineno = t.value.count('\n')
class BasicParser(Parser):
debugfile = 'parser.out'
tokens = BasicLexer.tokens
precedence = (
('left', '+', '-'),
('left', '*', '/'),
('right', 'UMINUS'),
)
def __init__(self):
self.env = { }
@_('')
def statement(self, p):
pass
#fzr update de syntax
@_('NAME "(" ")" ":" statement')
def statement(self, p):
return ('fun_def', p.NAME, p.statement)
@_('FOR "(" var_assign TO expr ")" ":" statement')
def statement(self, p):
return ('for_loop', ('for_loop_setup', p.var_assign, p.expr), p.statement)
@_('IF "(" condition ")" ":" statement')
def statement(self, p):
return ('if_stmt', p.condition, p.statement)
@_('NAME "(" ")"')
def statement(self, p):
return ('fun_call', p.NAME)
@_('var_assign')
def statement(self, p):
return p.var_assign
@_('NAME "=" STRING')
def var_assign(self, p):
return ('var_assign', p.NAME, p.STRING)
@_('NAME "=" statement')
def var_assign(self, p):
return ('var_assign', p.NAME, p.statement)
@_('PRINT "(" statement ")"')
def statement(self, p):
return ('print_stmt', p.statement)
@_('PRINT "(" STRING ")"')
def statement(self, p):
return ('print_stmt_string' , p.STRING)
@_('CREATEFILE "(" STRING ")"')
def statement(self, p):
return ('createfile_stmt', p.STRING)
@_('STRING "." WRITE "(" STRING ")"')
def statement(self, p):
return ('add_to_file_stmt', p.STRING0 ,p.STRING1)
@_('expr')
def statement(self, p):
return (p.expr)
@_('expr "+" expr')
def expr(self, p):
return ('add', p.expr0, p.expr1)
@_('expr "-" expr')
def expr(self, p):
return ('sub', p.expr0, p.expr1)
@_('expr "*" expr')
def expr(self, p):
return ('mul', p.expr0, p.expr1)
@_('expr "/" expr')
def expr(self, p):
return ('div', p.expr0, p.expr1)
@_('expr EQEQ expr')
def condition(self, p):
return ('condition_eqeq', p.expr0, p.expr1)
@_('"-" expr %prec UMINUS')
def expr(self, p):
return p.expr
@_('"(" expr ")"')
def expr(self, p):
return p.expr
@_('NAME')
def expr(self, p):
return ('var', p.NAME)
@_('NUMBER')
def expr(self, p):
return ('num', p.NUMBER)
class BasicExecute:
def __init__(self, tree, env):
self.env = env
result = self.walkTree(tree)
#print(env)
if result is not None and isinstance(result, int):
print(result)
if isinstance(result, str) and result[0] == '"':
print(result)
def walkTree(self, node):
if isinstance(node, int):
return node
if isinstance(node, str):
return node
if node is None:
return None
if node[0] == 'program':
if node[1] == None:
self.walkTree(node[2])
else:
self.walkTree(node[1])
self.walkTree(node[2])
if node[0] == 'num':
return node[1]
if node[0] == 'str':
return node[1]
if node[0] == 'if_stmt':
result = self.walkTree(node[1])
if result:
return self.walkTree(node[1][1])
if node[0] == 'fun_def':
self.env[node[1]] = node[2]
if node[0] == 'fun_call':
try:
return self.walkTree(self.env[node[1]])
except LookupError:
print("Undefined function '%s'" % node[1])
return 0
if node[0] == 'add':
return self.walkTree(node[1]) + self.walkTree(node[2])
elif node[0] == 'sub':
return self.walkTree(node[1]) - self.walkTree(node[2])
elif node[0] == 'mul':
return self.walkTree(node[1]) * self.walkTree(node[2])
elif node[0] == 'div':
return self.walkTree(node[1]) / self.walkTree(node[2])
if node[0] == 'condition_eqeq':
return self.walkTree(node[1]) == self.walkTree(node[2])
if node[0] == 'var_assign':
self.env[node[1]] = self.walkTree(node[2])
return node[1]
if node[0] == 'var':
try:
return self.env[node[1]]
except LookupError:
print("Undefined variable '"+node[1]+"' found!")
return 0
if node[0] == 'for_loop':
if node[1][0] == 'for_loop_setup':
loop_setup = self.walkTree(node[1])
#searches for the var in the env and gets it's value
loop_count = self.env[loop_setup[0]]
loop_limit = loop_setup[1]
for i in range(loop_count+1, loop_limit+1):
res = self.walkTree(node[2])
self.env[loop_setup[0]] = i
del self.env[loop_setup[0]]
if node[0] == 'for_loop_setup':
return (self.walkTree(node[1]), self.walkTree(node[2]))
if node[0] == 'print_stmt':
res = self.walkTree(node[1])
print(res)
if node[0] == 'print_stmt_string':
res = self.walkTree(node[1][1:-1])
print(res)
if node[0] == 'createfile_stmt':
file : str = self.walkTree(node[1][1:-1])
with open(file, 'a') as f:
pass
#node 1 2
if node[0] == 'write_to_file_stmt':
print(node[2])
try:
file : str = self.walkTree(node[1][1:-1])
with open(file, 'w') as f:
f.write(self.walkTree(node[2][1:-1]))
except LookupError:
print("file or dir '"+node[1][1:-1]+"' not found!")
return 0
if __name__ == '__main__':
lexer = BasicLexer()
parser = BasicParser()
env = {}
try:
file : str = sys.argv[1]
try:
with open(file, 'r', encoding="utf-8") as f:
line : str
for line in f:
try:
text = line
except EOFError:
break
if text:
tree = parser.parse(lexer.tokenize(text))
BasicExecute(tree, env)
except:
print('the specified file "{}" was not found!'.format(file))
except:
while True:
try:
text = input('haya development edition > ')
except EOFError:
break
if text:
tree = parser.parse(lexer.tokenize(text))
BasicExecute(tree, env)
#parsetree = parser.parse(lexer.tokenize(text))
#print(parsetree)
| 27.768966
| 83
| 0.46343
| 6,790
| 0.843164
| 0
| 0
| 2,244
| 0.278654
| 0
| 0
| 1,546
| 0.191978
|
0c16fa03a21f3d8b261783bab62dd87a48e2c16d
| 1,012
|
py
|
Python
|
braintree/apple_pay_card.py
|
futureironman/braintree_python
|
26bb8a857bc29322a8bca2e8e0fe6d99cfe6a1ac
|
[
"MIT"
] | 182
|
2015-01-09T05:26:46.000Z
|
2022-03-16T14:10:06.000Z
|
braintree/apple_pay_card.py
|
futureironman/braintree_python
|
26bb8a857bc29322a8bca2e8e0fe6d99cfe6a1ac
|
[
"MIT"
] | 95
|
2015-02-24T23:29:56.000Z
|
2022-03-13T03:27:58.000Z
|
braintree/apple_pay_card.py
|
futureironman/braintree_python
|
26bb8a857bc29322a8bca2e8e0fe6d99cfe6a1ac
|
[
"MIT"
] | 93
|
2015-02-19T17:59:06.000Z
|
2022-03-19T17:01:25.000Z
|
import braintree
from braintree.resource import Resource
class ApplePayCard(Resource):
"""
A class representing Braintree Apple Pay card objects.
"""
class CardType(object):
"""
Contants representing the type of the credit card. Available types are:
* Braintree.ApplePayCard.AmEx
* Braintree.ApplePayCard.MasterCard
* Braintree.ApplePayCard.Visa
"""
AmEx = "Apple Pay - American Express"
MasterCard = "Apple Pay - MasterCard"
Visa = "Apple Pay - Visa"
def __init__(self, gateway, attributes):
Resource.__init__(self, gateway, attributes)
if hasattr(self, 'expired'):
self.is_expired = self.expired
if "subscriptions" in attributes:
self.subscriptions = [braintree.subscription.Subscription(gateway, subscription) for subscription in self.subscriptions]
@property
def expiration_date(self):
return self.expiration_month + "/" + self.expiration_year
| 30.666667
| 132
| 0.662055
| 952
| 0.940711
| 0
| 0
| 106
| 0.104743
| 0
| 0
| 386
| 0.381423
|
0c1758002a3f4c2e5686dc0e50493960b4c98bea
| 4,054
|
py
|
Python
|
src/lda_without_tf_idf_sports.py
|
mspkvp/MiningOpinionTweets
|
23f05b4cea22254748675e03a51844da1dff70ac
|
[
"MIT"
] | 1
|
2016-01-18T14:30:31.000Z
|
2016-01-18T14:30:31.000Z
|
src/lda_without_tf_idf_sports.py
|
mspkvp/MiningOpinionTweets
|
23f05b4cea22254748675e03a51844da1dff70ac
|
[
"MIT"
] | null | null | null |
src/lda_without_tf_idf_sports.py
|
mspkvp/MiningOpinionTweets
|
23f05b4cea22254748675e03a51844da1dff70ac
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
from time import time
import csv
import sys
import os
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
import lda
import logging
logging.basicConfig(filename='lda_analyser.log', level=logging.DEBUG)
entities = ['jose_mourinho',
'cristiano_ronaldo',
'ruben_neves',
'pinto_da_costa',
'jorge_jesus',
'lionel_messi',
'eusebio',
'luisao',
'paulo_bento',
'iker_casillas',
'joao_moutinho',
'jorge_mendes',
'julen_lopetegui',
'rui_vitoria',
'ricardo',
'luis_figo',
'jose_socrates',
'antonio_costa',
'benfica',
'futebol_porto',
'sporting']
if not os.path.exists("results"):
os.makedirs("results")
for n_topics in [10, 20, 50, 100]:
n_features = 10000
n_top_words = int(sys.argv[1]) + 1
corpus = []
topics_write_file = csv.writer(open("results/lda_topics_{}topics_{}words_{}.csv".format(n_topics,
n_top_words - 1,
"sports"), "wb"),
delimiter="\t", quotechar='|', quoting=csv.QUOTE_MINIMAL)
write_file = csv.writer(open("results/lda_topics_{}topics_{}words_mapping_{}.csv".format(n_topics,
n_top_words - 1,
"sports"), "wb"),
delimiter="\t", quotechar='|', quoting=csv.QUOTE_MINIMAL)
def print_top_words(model, doc_topic, feature_names, n_top_words, dictionary):
for i, topic_dist in enumerate(model):
topic_words = np.array(feature_names)[np.argsort(topic_dist)][:-n_top_words:-1]
#write_file.write('Topic {}: {}\n'.format(i, ' '.join(topic_words)))
topic_row = [str(i)]
topic_row.extend(topic_words)
topics_write_file.writerow(topic_row)
for i in range(len(corpus)):
document_row = [dictionary[i][0], dictionary[i][1]]
document_row.append(doc_topic[i].argmax())
#document_row.append(corpus[i])
write_file.writerow(document_row)
entity_day_dict = dict()
# read all files and store their contents on a dictionary
for i in os.listdir(os.getcwd() + "/filtered_tweets"):
for filename in os.listdir(os.getcwd() + "/filtered_tweets" + "/" + i):
if(filename.split(".")[0] in entities):
entity_day_dict[i+" "+filename] = open(os.getcwd() + "/filtered_tweets" + "/" + i + "/" + filename, 'r').read()
entity_day_key_index = dict()
i = 0
for key in entity_day_dict:
entity_day_key_index[i] = key.split(" ")
corpus.append(entity_day_dict[key])
i += 1
# Use tf (raw term count) features for LDA.
logging.info("Extracting tf features for LDA...")
tf_vectorizer = CountVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
t0 = time()
tf = tf_vectorizer.fit_transform(corpus)
logging.info("done in %0.3fs." % (time() - t0))
logging.info("Fitting LDA models with tf")
model = lda.LDA(n_topics=n_topics, n_iter=1500, random_state=1)
#LatentDirichletAllocation(n_topics=n_topics, max_iter=5, learning_method='online', #learning_offset=50., random_state=0)
t0 = time()
model.fit(tf)
logging.info("done in %0.3fs." % (time() - t0))
topic_word = model.topic_word_
doc_topic = model.doc_topic_
logging.info("\nTopics in LDA model:")
tf_feature_names = tf_vectorizer.get_feature_names()
print_top_words(topic_word, doc_topic, tf_feature_names, n_top_words, entity_day_key_index)
| 37.192661
| 129
| 0.561914
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 973
| 0.24001
|
0c18dab0a973e417315a5c146525d7d91b9da0fe
| 4,476
|
py
|
Python
|
glab_common/allsummary.py
|
gentnerlab/glab-common-py
|
9ff87ac6ca5f07c0d550594da38080bd3ee916db
|
[
"BSD-3-Clause"
] | 3
|
2016-03-07T19:51:32.000Z
|
2018-11-08T22:34:14.000Z
|
glab_common/allsummary.py
|
gentnerlab/glab-common-py
|
9ff87ac6ca5f07c0d550594da38080bd3ee916db
|
[
"BSD-3-Clause"
] | 16
|
2015-02-19T04:32:01.000Z
|
2018-11-14T20:09:09.000Z
|
glab_common/allsummary.py
|
gentnerlab/glab-common-py
|
9ff87ac6ca5f07c0d550594da38080bd3ee916db
|
[
"BSD-3-Clause"
] | 4
|
2015-04-01T23:55:25.000Z
|
2018-02-28T18:23:29.000Z
|
from __future__ import print_function
import re
import datetime as dt
from behav.loading import load_data_pandas
import warnings
import subprocess
import os
import sys
process_fname = "/home/bird/opdat/panel_subject_behavior"
box_nums = []
bird_nums = []
processes = []
with open(process_fname, "rt") as psb_file:
for line in psb_file.readlines():
if line.startswith("#") or not line.strip():
pass # skip comment lines & blank lines
else:
spl_line = line.split()
if spl_line[1] == "1": # box enabled
box_nums.append(spl_line[0])
bird_nums.append(int(spl_line[2]))
processes.append(spl_line[4])
# rsync magpis
hostname = os.uname()[1]
if "magpi" in hostname:
for box_num in box_nums:
box_hostname = box_num
rsync_src = "bird@{}:/home/bird/opdat/".format(box_hostname)
rsync_dst = "/home/bird/opdat/"
print("Rsync src: {}".format(rsync_src), file=sys.stderr)
print("Rsync dest: {}".format(rsync_dst), file=sys.stderr)
rsync_output = subprocess.run(["rsync", "-avz", "--exclude Generated_Songs/", rsync_src, rsync_dst])
subjects = ["B%d" % (bird_num) for bird_num in bird_nums]
data_folder = "/home/bird/opdat"
with open("/home/bird/all.summary", "w") as as_file:
as_file.write(
"this all.summary generated at %s\n" % (dt.datetime.now().strftime("%x %X"))
)
as_file.write(
"FeedErr(won't come up, won't go down, already up, resp during feed)\n"
)
# Now loop through each bird and grab the error info from each summaryDAT file
for (box, bird, proc) in zip(box_nums, bird_nums, processes):
try:
# make sure box is a string
box = str(box)
if proc in ("shape", "lights", "pylights", "lights.py"):
as_file.write("%s\tB%d\t %s\n" % (box, bird, proc))
else:
summaryfname = "/home/bird/opdat/B%d/%d.summaryDAT" % (bird, bird)
with open(summaryfname, "rt") as sdat:
sdata = sdat.read()
m = re.search(r"failures today: (\w+)", sdata)
hopper_failures = m.group(1)
m = re.search(r"down failures today: (\w+)", sdata)
godown_failures = m.group(1)
m = re.search(r"up failures today: (\w+)", sdata)
goup_failures = m.group(1)
m = re.search(r"Responses during feed: (\w+)", sdata)
resp_feed = m.group(1)
subj = "B%d" % (bird)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
behav_data = load_data_pandas([subj], data_folder)
df = behav_data[subj]
# df = df[~pd.isnull(data.index)]
todays_data = df[
(df.index.date - dt.datetime.today().date()) == dt.timedelta(days=0)
]
feeder_ops = sum(todays_data["reward"].values)
trials_run = len(todays_data)
noRs = sum(todays_data["response"].values == "none")
TOs = trials_run - feeder_ops - noRs
last_trial_time = todays_data.sort_index().tail().index[-1]
if last_trial_time.day != dt.datetime.now().day:
datediff = "(not today)"
else:
minutes_ago = (dt.datetime.now() - last_trial_time).seconds / 60
datediff = "(%d mins ago)" % (minutes_ago)
outline = (
"%s\tB%d\t %s \ttrls=%s \tfeeds=%d \tTOs=%d \tnoRs=%d \tFeedErrs=(%s,%s,%s,%s) \tlast @ %s %s\n"
% (
box,
bird,
proc,
trials_run,
feeder_ops,
TOs,
noRs,
hopper_failures,
godown_failures,
goup_failures,
resp_feed,
last_trial_time.strftime("%x %X"),
datediff,
)
)
as_file.write(outline)
except Exception as e:
as_file.write(
"%s\tB%d\t Error opening SummaryDat or incorrect format\n" % (box, bird)
)
print(e)
| 38.921739
| 122
| 0.50849
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 974
| 0.217605
|
0c194bbda6bf427b571869e7619f91e9298b8f04
| 2,239
|
py
|
Python
|
api/v1/circuits.py
|
tahoe/janitor
|
b6ce73bddc13c70079bdc7ba4c7a9b3ee0cad0bd
|
[
"Apache-2.0"
] | 52
|
2019-08-14T10:48:26.000Z
|
2022-03-30T18:09:08.000Z
|
api/v1/circuits.py
|
tahoe/janitor
|
b6ce73bddc13c70079bdc7ba4c7a9b3ee0cad0bd
|
[
"Apache-2.0"
] | 18
|
2019-08-20T04:13:37.000Z
|
2022-01-31T12:40:12.000Z
|
api/v1/circuits.py
|
tahoe/janitor
|
b6ce73bddc13c70079bdc7ba4c7a9b3ee0cad0bd
|
[
"Apache-2.0"
] | 12
|
2019-08-14T10:49:11.000Z
|
2020-09-02T18:56:34.000Z
|
from app.models import Circuit, CircuitSchema, Provider
from flask import make_response, jsonify
from app import db
def read_all():
"""
This function responds to a request for /circuits
with the complete lists of circuits
:return: sorted list of circuits
"""
circuits = Circuit.query.all()
schema = CircuitSchema(many=True)
return schema.dump(circuits).data
def read_one(circuit_id):
circuit = Circuit.query.filter(Circuit.id == circuit_id).one_or_none()
if not circuit:
text = f'circuit not found for id {circuit_id}'
return make_response(jsonify(error=404, message=text), 404)
schema = CircuitSchema()
data = schema.dump(circuit).data
return data
def create(circuit):
"""
creates a circuit! checks to see if the provider_cid is unique and
that the provider exists.
:return: circuit
"""
provider_cid = circuit.get('provider_cid')
provider_id = circuit.get('provider_id')
circuit_exists = Circuit.query.filter(
Circuit.provider_cid == provider_cid
).one_or_none()
provider_exists = Provider.query.filter(Provider.id == provider_id).one_or_none()
if circuit_exists:
text = f'Circuit {provider_cid} already exists'
return make_response(jsonify(error=409, message=text), 409)
if not provider_exists:
text = f'Provider {provider_id} does not exist.' 'Unable to create circuit'
return make_response(jsonify(error=403, message=text), 403)
schema = CircuitSchema()
new_circuit = schema.load(circuit, session=db.session).data
db.session.add(new_circuit)
db.session.commit()
data = schema.dump(new_circuit).data
return data, 201
def update(circuit_id, circuit):
"""
updates a circuit!
:return: circuit
"""
c = Circuit.query.filter_by(id=circuit_id).one_or_none()
if not c:
text = f'Can not update a circuit that does not exist!'
return make_response(jsonify(error=409, message=text), 404)
schema = CircuitSchema()
update = schema.load(circuit, session=db.session).data
db.session.merge(update)
db.session.commit()
data = schema.dump(c).data
return data, 201
| 26.341176
| 85
| 0.676195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 575
| 0.256811
|
0c1a66a69d47f4abcbb592a1b69142a384d2f89b
| 2,311
|
py
|
Python
|
youtube_related/client.py
|
kijk2869/youtube-related
|
daabefc60277653098e1d8e266258b71567796d8
|
[
"MIT"
] | 7
|
2020-07-13T00:15:37.000Z
|
2021-12-06T14:35:14.000Z
|
youtube_related/client.py
|
kijk2869/youtube-related
|
daabefc60277653098e1d8e266258b71567796d8
|
[
"MIT"
] | 11
|
2020-07-17T16:11:16.000Z
|
2022-03-01T23:02:54.000Z
|
youtube_related/client.py
|
kijk2869/youtube-related
|
daabefc60277653098e1d8e266258b71567796d8
|
[
"MIT"
] | 3
|
2020-11-04T11:44:50.000Z
|
2022-01-11T04:21:01.000Z
|
import asyncio
import json
import re
from collections import deque
from typing import Deque, Dict, List, Match, Pattern
import aiohttp
from .error import RateLimited
headers: dict = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"
}
DATA_JSON: Pattern = re.compile(
r'(?:window\["ytInitialData"\]|ytInitialData)\W?=\W?({.*?});'
)
def fetch(vURL: str, local_addr: str = None) -> List[Dict]:
return asyncio.run(async_fetch(vURL, local_addr))
async def async_fetch(vURL: str, local_addr: str = None) -> List[Dict]:
connector: aiohttp.TCPConnector = (
aiohttp.TCPConnector(local_addr=(local_addr, 0)) if local_addr else None
)
async with aiohttp.ClientSession(connector=connector, headers=headers) as session:
async with session.get(vURL) as response:
if response.status == 429:
raise RateLimited
RAW: str = await response.text()
Search: Match = DATA_JSON.search(RAW)
if not Search:
raise ValueError("Could not extract ytInitialData.")
Data: Dict = json.loads(Search.group(1))
Overlay: Dict = Data["playerOverlays"]["playerOverlayRenderer"]
watchNextEndScreenRenderer: Dict = Overlay["endScreen"][
"watchNextEndScreenRenderer"
]
Result: list = [
{
"id": Item["videoId"],
"title": Item["title"]["simpleText"],
"duration": Item["lengthInSeconds"] if "lengthInSeconds" in Item else None,
}
for Item in [
result["endScreenVideoRenderer"]
for result in watchNextEndScreenRenderer["results"]
if "endScreenVideoRenderer" in result
]
]
return Result
class preventDuplication:
def __init__(self):
self._LastRelated: Deque = deque(maxlen=10)
def get(self, vURL: str, local_addr: str = None) -> Dict:
return asyncio.run(self.async_get(vURL, local_addr))
async def async_get(self, vURL: str, local_addr: str = None) -> Dict:
Data: List[Dict] = await async_fetch(vURL, local_addr)
for Item in Data:
if not Item["id"] in self._LastRelated:
self._LastRelated.append(Item["id"])
return Item
self._LastRelated.clear()
return Data[0]
| 29.253165
| 89
| 0.63479
| 581
| 0.251406
| 0
| 0
| 0
| 0
| 1,577
| 0.682389
| 404
| 0.174816
|
0c1b29cfd60d9ee7d4e6451a8264af9459d2ddcb
| 2,522
|
py
|
Python
|
app/request/migrations/0001_initial.py
|
contestcrew/2019SeoulContest-Backend
|
2e99cc6ec6a712911da3b79412ae84a9d35453e1
|
[
"MIT"
] | null | null | null |
app/request/migrations/0001_initial.py
|
contestcrew/2019SeoulContest-Backend
|
2e99cc6ec6a712911da3b79412ae84a9d35453e1
|
[
"MIT"
] | 32
|
2019-08-30T13:09:28.000Z
|
2021-06-10T19:07:56.000Z
|
app/request/migrations/0001_initial.py
|
contestcrew/2019SeoulContest-Backend
|
2e99cc6ec6a712911da3b79412ae84a9d35453e1
|
[
"MIT"
] | 3
|
2019-09-19T10:12:50.000Z
|
2019-09-30T15:59:13.000Z
|
# Generated by Django 2.2.5 on 2019-09-24 09:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, verbose_name='이름')),
('score', models.PositiveIntegerField(default=0, verbose_name='점수')),
('image', models.ImageField(blank=True, null=True, upload_to='category')),
],
),
migrations.CreateModel(
name='Request',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100, verbose_name='제목')),
('content', models.TextField(verbose_name='내용')),
('status', models.CharField(blank=True, choices=[('start', '도움요청중'), ('progress', '진행중'), ('complete', '완료')], default='start', max_length=20, verbose_name='상태')),
('score', models.PositiveIntegerField(default=0, verbose_name='점수')),
('main_address', models.CharField(blank=True, max_length=30, null=True, verbose_name='메인 주소')),
('detail_address', models.CharField(blank=True, max_length=50, null=True, verbose_name='상세 주소')),
('latitude', models.FloatField(blank=True, null=True, verbose_name='위도')),
('longitude', models.FloatField(blank=True, null=True, verbose_name='경도')),
('occurred_at', models.DateField(blank=True, null=True, verbose_name='발생 시각')),
('created_at', models.DateTimeField(auto_now_add=True, verbose_name='업로드 시각')),
('updated_at', models.DateTimeField(auto_now=True, verbose_name='수정 시각')),
],
),
migrations.CreateModel(
name='RequestImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(upload_to='request/%Y/%m/%d', verbose_name='이미지')),
('request', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='request.Request', verbose_name='의뢰')),
],
),
]
| 50.44
| 179
| 0.596352
| 2,500
| 0.952018
| 0
| 0
| 0
| 0
| 0
| 0
| 543
| 0.206778
|
0c1bfa28ddb2f6e0a2bc571eb9a019b7ef92cb0d
| 690
|
py
|
Python
|
field/FieldFactory.py
|
goph-R/NodeEditor
|
5cc4749785bbd348f3db01b27c1533b4caadb920
|
[
"Apache-2.0"
] | null | null | null |
field/FieldFactory.py
|
goph-R/NodeEditor
|
5cc4749785bbd348f3db01b27c1533b4caadb920
|
[
"Apache-2.0"
] | null | null | null |
field/FieldFactory.py
|
goph-R/NodeEditor
|
5cc4749785bbd348f3db01b27c1533b4caadb920
|
[
"Apache-2.0"
] | null | null | null |
from PySide2.QtGui import QVector3D, QColor
from field.ColorField import ColorField
from field.FloatField import FloatField
from field.StringField import StringField
from field.Vector3Field import Vector3Field
class FieldFactory(object):
def create(self, property):
result = None
type = property.type()
if type == str:
result = StringField(property)
elif type == float:
result = FloatField(property)
elif type == QVector3D:
result = Vector3Field(property)
elif type == QColor:
result = ColorField(property)
result.init()
return result
| 23
| 44
| 0.618841
| 465
| 0.673913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0c1cee1a04ba87b43d0454e7e5294887e53530fd
| 1,348
|
py
|
Python
|
scrapy/utils/engine.py
|
sulochanaviji/scrapy
|
6071c82e7ac80136e844b56a09d5d31aa8f41296
|
[
"BSD-3-Clause"
] | 8
|
2021-02-01T07:55:19.000Z
|
2021-03-22T18:17:47.000Z
|
scrapy/utils/engine.py
|
sulochanaviji/scrapy
|
6071c82e7ac80136e844b56a09d5d31aa8f41296
|
[
"BSD-3-Clause"
] | 30
|
2021-02-17T14:17:57.000Z
|
2021-03-03T16:57:16.000Z
|
scrapy/utils/engine.py
|
sulochanaviji/scrapy
|
6071c82e7ac80136e844b56a09d5d31aa8f41296
|
[
"BSD-3-Clause"
] | 3
|
2021-08-21T04:09:17.000Z
|
2021-08-25T01:00:41.000Z
|
"""Some debugging functions for working with the Scrapy engine"""
# used in global tests code
from time import time # noqa: F401
def get_engine_status(engine):
"""Return a report of the current engine status"""
tests = [
"time()-engine.start_time",
"engine.has_capacity()",
"len(engine.downloader.active)",
"engine.scraper.is_idle()",
"engine.spider.name",
"engine.spider_is_idle(engine.spider)",
"engine.slot.closing",
"len(engine.slot.inprogress)",
"len(engine.slot.scheduler.dqs or [])",
"len(engine.slot.scheduler.mqs)",
"len(engine.scraper.slot.queue)",
"len(engine.scraper.slot.active)",
"engine.scraper.slot.active_size",
"engine.scraper.slot.itemproc_size",
"engine.scraper.slot.needs_backout()",
]
checks = []
for test in tests:
try:
checks += [(test, eval(test))]
except Exception as e:
checks += [(test, f"{type(e).__name__} (exception)")]
return checks
def format_engine_status(engine=None):
checks = get_engine_status(engine)
s = "Execution engine status\n\n"
for test, result in checks:
s += f"{test:<47} : {result}\n"
s += "\n"
return s
def print_engine_status(engine):
print(format_engine_status(engine))
| 27.510204
| 65
| 0.609792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 700
| 0.519288
|
0c1da110a449d15b92ca6653ffd9fc76029d3fee
| 2,588
|
py
|
Python
|
share/pegasus/init/population/scripts/full_res_pop_raster.py
|
hariharan-devarajan/pegasus
|
d0641541f2eccc69dd6cc5a09b0b51303686d3ac
|
[
"Apache-2.0"
] | null | null | null |
share/pegasus/init/population/scripts/full_res_pop_raster.py
|
hariharan-devarajan/pegasus
|
d0641541f2eccc69dd6cc5a09b0b51303686d3ac
|
[
"Apache-2.0"
] | null | null | null |
share/pegasus/init/population/scripts/full_res_pop_raster.py
|
hariharan-devarajan/pegasus
|
d0641541f2eccc69dd6cc5a09b0b51303686d3ac
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
from typing import Dict
import optparse
import numpy as np
import rasterio
from rasterio import features
def main(county_pop_file, spatial_dist_file, fname_out, no_data_val=-9999):
'''
county_pop_file: County level population estimates
spatial_dist_file: Spatial projection of population distribution
'''
# -------------------------------------
# Open and read raster file with county
# level population estimates
# -------------------------------------
with rasterio.open(county_pop_file) as rastf:
county_pop = rastf.read()
nodatacp = rastf.nodata
# --------------------------------------------------------------
# Open and read raster file with spatial population distribution
# --------------------------------------------------------------
with rasterio.open(spatial_dist_file) as rastf:
pop_dist = rastf.read()
nodatasp = rastf.nodata
prf = rastf.profile
county_pop = np.squeeze(county_pop)
pop_dist = np.squeeze(pop_dist)
pop_est = np.ones(pop_dist.shape)*no_data_val
ind1 = np.where(county_pop.flatten() != nodatacp)[0]
ind2 = np.where(pop_dist.flatten() != nodatasp)[0]
ind = np.intersect1d(ind1, ind2)
ind2d = np.unravel_index(ind, pop_dist.shape)
pop_est[ind2d] = county_pop[ind2d] * pop_dist[ind2d]
pop_est[ind2d] = np.round(pop_est[ind2d])
# Update raster meta-data
prf.update(nodata=no_data_val)
# Write out spatially distributed population estimate to raster
with open(fname_out, "wb") as fout:
with rasterio.open(fout.name, 'w', **prf) as out_raster:
out_raster.write(pop_est.astype(rasterio.float32), 1)
argparser = optparse.OptionParser()
argparser.add_option('--population-file', action='store', dest='pop_file',
help='County level population estimates')
argparser.add_option('--dist-file', action='store', dest='dist_file',
help='Spatial projection of population distribution')
argparser.add_option('--out-file', action='store', dest='out_file',
help='Filename of the output')
(options, args) = argparser.parse_args()
if not options.pop_file:
print('Please specify a population file with --population-file')
sys.exit(1)
if not options.dist_file:
print('Please specify a distribution file with --dist-file')
sys.exit(1)
if not options.out_file:
print('Please specify the name of the output with --out-file')
sys.exit(1)
main(options.pop_file, options.dist_file, options.out_file)
| 33.179487
| 75
| 0.63524
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 952
| 0.367852
|
0c1e7c3ccf6eceb66230761a4bde8362593a8064
| 9,557
|
py
|
Python
|
TestCase/pr_test_case.py
|
openeuler-mirror/ci-bot
|
c50056ff73670bc0382e72cf8c653c01e1aed5e1
|
[
"MulanPSL-1.0"
] | 1
|
2020-01-12T07:35:34.000Z
|
2020-01-12T07:35:34.000Z
|
TestCase/pr_test_case.py
|
openeuler-mirror/ci-bot
|
c50056ff73670bc0382e72cf8c653c01e1aed5e1
|
[
"MulanPSL-1.0"
] | null | null | null |
TestCase/pr_test_case.py
|
openeuler-mirror/ci-bot
|
c50056ff73670bc0382e72cf8c653c01e1aed5e1
|
[
"MulanPSL-1.0"
] | 2
|
2020-03-04T02:09:14.000Z
|
2020-03-07T03:00:40.000Z
|
import os
import requests
import subprocess
import time
import yaml
class PullRequestOperation(object):
def __init__(self, owner, repo, local_owner):
"""initialize owner, repo and access_token"""
self.owner = owner
self.repo = repo
self.local_owner = local_owner
self.access_token = os.getenv('ACCESS_TOKEN', '')
def git_clone(self):
"""git clone code"""
subprocess.call("[ -d {} ]; if [ $? -eq 0 ]; then echo 'destination repo exists, pass...'; else git clone https://gitee.com/{}/{}.git; fi".format(self.repo, self.local_owner, self.repo), shell=True)
def change_file(self):
"""change file: Test whether test.txt exists.Remove test.txt if it exists, or touch test.txt"""
subprocess.call(
"cd {}/; [ -f test.txt ]; if [ $? -eq 0 ]; then rm test.txt; echo 'remove test.txt'; else touch test.txt; echo 'touch test.txt'; fi".format(self.repo), shell=True)
def write_2_file(self):
"""write some info to the test file"""
subprocess.call(
'cd {}/; echo "hello" > test.txt; git add .; git commit -m "change test.txt"; git push'.format(self.repo),
shell=True)
def git_push(self):
"""push code"""
subprocess.call("cd {}/; git add . ; git commit -m 'test'; git push".format(self.repo), shell=True)
def pull_request(self):
"""create a pull request"""
head = '{}:master'.format(self.local_owner)
data = {
'access_token': self.access_token,
'title': 'test',
'head': head,
'base': 'master'
}
url = 'https://gitee.com/api/v5/repos/{}/{}/pulls'.format(self.owner, self.repo)
r = requests.post(url, data)
if r.status_code == 201:
number = r.json()['number']
return number
if r.status_code == 400:
number = r.json()['message'].split('!')[1].split(' ')[0]
return number
def comment(self, number, body):
"""comment under the pull request"""
data = {
'access_token': self.access_token,
'body': body
}
url = 'https://gitee.com/api/v5/repos/{}/{}/pulls/{}/comments'.format(self.owner, self.repo, number)
print('comment body: {}'.format(body))
requests.post(url, data)
def comment_by_others(self, number, body):
"""comment under the pull request"""
data = {
'access_token': os.getenv('ACCESS_TOKEN_TWO', ''),
'body': body
}
url = 'https://gitee.com/api/v5/repos/{}/{}/pulls/{}/comments'.format(self.owner, self.repo, number)
print('comment body: {}'.format(body))
requests.post(url, data)
def get_all_comments(self, number):
"""get all comments under the pull request"""
params = 'access_token={}'.format(self.access_token)
url = 'https://gitee.com/api/v5/repos/{}/{}/pulls/{}/comments?per_page=100'.format(self.owner, self.repo,
number)
r = requests.get(url, params)
comments = []
if r.status_code == 404:
return r.json()['message']
if r.status_code == 200:
if len(r.json()) > 0:
for comment in r.json():
comments.append(comment['body'])
return comments
else:
return comments
def get_all_labels(self, number):
"""get all labels belong to the pull request"""
params = 'access_token={}'.format(self.access_token)
url = 'https://gitee.com/api/v5/repos/{}/{}/pulls/{}/labels'.format(self.owner, self.repo, number)
r = requests.get(url, params)
labels = []
if r.status_code == 200:
if len(r.json()) > 0:
for i in r.json():
labels.append(i['name'])
return labels
else:
return labels
def add_labels_2_pr(self, number, data):
"""add labels to a pull request"""
params = {'access_token': self.access_token}
data = data
url = 'https://gitee.com/api/v5/repos/{}/{}/pulls/{}/labels'.format(self.owner, self.repo, number)
requests.post(url, data=data, params=params)
def get_pr_status(self, number):
"""query the status of the pull request to see whether it was merged"""
params = {'access_token': self.access_token}
url = 'https://gitee.com/api/v5/repos/{}/{}/pulls/{}/merge'.format(self.owner, self.repo, number)
r = requests.get(url, params)
return r.status_code
if __name__ == '__main__':
with open('config.yaml', 'r') as f:
info = yaml.load(f.read())['test case']
owner = info[0]['owner']
repo = info[1]['repo']
local_owner = info[2]['local_owner']
pr = PullRequestOperation(owner, repo, local_owner)
print('Prepare:')
print('step 1/4: git clone')
pr.git_clone()
print('\nstep 2/4: change file')
pr.change_file()
print('\nstep 3/4: git push')
pr.git_push()
print('\nstep 4/4: pull request')
number = pr.pull_request()
print('the number of the pull request: {}'.format(number))
time.sleep(10)
print('\n\nTest:')
print('test case 1: without comments by contributor')
comments = pr.get_all_comments(number)
labels = pr.get_all_labels(number)
print('labels: {}'.format(labels))
errors = 0
if len(comments) == 0:
print('no "Welcome to ci-bot Community."')
print('no "Thanks for your pull request."')
else:
if 'Welcome to ci-bot Community.' not in comments[0]:
print('no "Welcome to ci-bot Community."')
errors += 1
if 'Thanks for your pull request.' not in comments[-1]:
print('no "Thanks for your pull request."')
errors += 1
if len(labels) == 0:
print('no label "ci-bot-cla/yes" or "ci-bot-cla/no"')
errors += 1
elif len(labels) > 0:
if 'ci-bot-cla/yes' not in labels:
print('no label "ci-bot-cla/yes"')
errors += 1
if 'ci-bot-cla/no' not in labels:
print('no label "ci-bot-cla/no"')
errors += 1
if errors == 0:
print('test case 1 succeeded')
else:
print('test case 1 failed')
print('\ntest case 2: /lgtm')
pr.comment(number, '/lgtm')
time.sleep(10)
labels = pr.get_all_labels(number)
print('labels: {}'.format(labels))
comments = pr.get_all_comments(number)
if 'can not be added in your self-own pull request' in comments[-1]:
print('test case 2 succeeded')
else:
print('test case 2 failed')
print(comments[-1])
print('\ntest case 3: comment /lgtm by others')
pr.comment_by_others(number, '/lgtm')
time.sleep(10)
labels = pr.get_all_labels(number)
print('labels: {}'.format(labels))
comments = pr.get_all_comments(number)
if 'Thanks for your review' in comments[-1]:
print('test case 3 succeeded')
else:
print('test case 3 failed')
print(comments[-1])
print('\ntest case 4: comment /approve by others')
pr.comment_by_others(number, '/approve')
time.sleep(10)
labels = pr.get_all_labels(number)
print('labels: {}'.format(labels))
comments = pr.get_all_comments(number)
if 'has no permission to add' in comments[-1]:
print('test case 4 succeeded')
else:
print('test case 4 failed')
print(comments[-1])
print('\ntest case 5: /approve')
pr.comment(number, '/approve')
time.sleep(10)
labels = pr.get_all_labels(number)
print('labels: {}'.format(labels))
comments = pr.get_all_comments(number)
if '***approved*** is added in this pull request by' in comments[-1]:
print('test case 5 succeeded')
else:
print('test case 5 failed')
print(comments[-1])
print('\ntest case 6: tag stat/need-squash')
labels_before_commit = pr.get_all_labels(number)
print('labels_before_commit: {}'.format(labels_before_commit))
pr.write_2_file()
time.sleep(10)
lables_after_commit = pr.get_all_labels(number)
print('lables_after_commit: {}'.format(lables_after_commit))
if 'lgtm' not in labels and 'stat/need-squash' in lables_after_commit:
print('test case 6 succeeded')
else:
print('test case 6 failed')
print('\ntest case 7: add labels')
pr.add_labels_2_pr(number, '["lgtm"]')
time.sleep(10)
labels = pr.get_all_labels(number)
print('labels: {}'.format(labels))
if "lgtm" in labels:
print('test case 7 succeeded')
else:
print('test case 7 failed')
print('\ntest case 8: check-pr')
pr.comment(number, '/check-pr')
time.sleep(10)
code = pr.get_pr_status(number)
if code == 200:
print('test case 8 succeeded')
else:
print('failed code: {}'.format(code))
print('test case 8 failed')
| 38.381526
| 206
| 0.548917
| 4,636
| 0.485089
| 0
| 0
| 0
| 0
| 0
| 0
| 3,092
| 0.323532
|
0c1eb2fd9329de0c031fe686c52f4c0e67ec1227
| 1,103
|
py
|
Python
|
tempest/api/hybrid_cloud/compute/flavors/test_flavors_operations.py
|
Hybrid-Cloud/hybrid-tempest
|
319e90c6fa6e46925b495c93cd5258f088a30ec0
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/hybrid_cloud/compute/flavors/test_flavors_operations.py
|
Hybrid-Cloud/hybrid-tempest
|
319e90c6fa6e46925b495c93cd5258f088a30ec0
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/hybrid_cloud/compute/flavors/test_flavors_operations.py
|
Hybrid-Cloud/hybrid-tempest
|
319e90c6fa6e46925b495c93cd5258f088a30ec0
|
[
"Apache-2.0"
] | null | null | null |
import testtools
from oslo_log import log
from tempest.api.compute import base
import tempest.api.compute.flavors.test_flavors as FlavorsV2Test
import tempest.api.compute.flavors.test_flavors_negative as FlavorsListWithDetailsNegativeTest
import tempest.api.compute.flavors.test_flavors_negative as FlavorDetailsNegativeTest
from tempest.common.utils import data_utils
from tempest.lib import exceptions as lib_exc
from tempest.lib import decorators
from tempest import test
from tempest import config
CONF = config.CONF
LOG = log.getLogger(__name__)
class HybridFlavorsV2TestJSON(FlavorsV2Test.FlavorsV2TestJSON):
"""Test flavors"""
@testtools.skip("testscenarios are not active.")
@test.SimpleNegativeAutoTest
class HybridFlavorsListWithDetailsNegativeTestJSON(FlavorsListWithDetailsNegativeTest.FlavorsListWithDetailsNegativeTestJSON):
"""Test FlavorsListWithDetails"""
@testtools.skip("testscenarios are not active.")
@test.SimpleNegativeAutoTest
class HybridFlavorDetailsNegativeTestJSON(FlavorDetailsNegativeTest.FlavorDetailsNegativeTestJSON):
"""Test FlavorsListWithDetails"""
| 36.766667
| 126
| 0.853128
| 387
| 0.350861
| 0
| 0
| 457
| 0.414325
| 0
| 0
| 146
| 0.132366
|
0c1f09091be19e77ace869bcb2f31a8df0eb57b2
| 8,910
|
py
|
Python
|
dycall/exports.py
|
demberto/DyCall
|
b234e7ba535eae71234723bb3d645eb986f96a30
|
[
"MIT"
] | null | null | null |
dycall/exports.py
|
demberto/DyCall
|
b234e7ba535eae71234723bb3d645eb986f96a30
|
[
"MIT"
] | null | null | null |
dycall/exports.py
|
demberto/DyCall
|
b234e7ba535eae71234723bb3d645eb986f96a30
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
dycall.exports
~~~~~~~~~~~~~~
Contains `ExportsFrame` and `ExportsTreeView`.
"""
from __future__ import annotations
import logging
import pathlib
from typing import TYPE_CHECKING
import ttkbootstrap as tk
from ttkbootstrap import ttk
from ttkbootstrap.dialogs import Messagebox
from ttkbootstrap.localization import MessageCatalog as MsgCat
from ttkbootstrap.tableview import Tableview
from dycall._widgets import _TrLabelFrame
from dycall.types import Export, PEExport
from dycall.util import StaticThemedTooltip, get_img
log = logging.getLogger(__name__)
class ExportsFrame(_TrLabelFrame):
"""Contains **Exports** combobox and a button for `ExportsTreeView`.
Use command line argument `--exp` to select an export from the library on
launch. Combobox validates export name.
TODO: Combobox works like google search (auto-suggest, recents etc.)
"""
def __init__(
self,
root: tk.Window,
selected_export: tk.StringVar,
sort_order: tk.StringVar,
output: tk.StringVar,
status: tk.StringVar,
is_loaded: tk.BooleanVar,
is_native: tk.BooleanVar,
is_reinitialised: tk.BooleanVar,
lib_path: tk.StringVar,
exports: list[Export],
):
log.debug("Initalising")
super().__init__(text="Exports")
self.__root = root
self.__selected_export = selected_export
self.__sort_order = sort_order
self.__output = output
self.__status = status
self.__is_loaded = is_loaded
self.__is_native = is_native
self.__is_reinitialised = is_reinitialised
self.__lib_path = lib_path
self.__exports = exports
self.__export_names: list[str] = []
self.cb = ttk.Combobox(
self,
state="disabled",
textvariable=selected_export,
validate="focusout",
validatecommand=(self.register(self.cb_validate), "%P"),
)
# ! self.cb.bind("<Return>", lambda *_: self.cb_validate) # Doesn't work
self.cb.bind("<<ComboboxSelected>>", self.cb_selected)
self.__list_png = get_img("list.png")
self.lb = ttk.Label(self, image=self.__list_png)
self.lb.bind(
"<Enter>", lambda *_: StaticThemedTooltip(self.lb, "List of exports")
)
self.lb.bind(
"<ButtonRelease-1>", lambda *_: status.set("Load a library first!")
)
self.lb.pack(padx=(0, 5), pady=5, side="right")
self.cb.pack(fill="x", padx=5, pady=5)
self.bind_all("<<PopulateExports>>", lambda *_: self.set_cb_values())
self.bind_all(
"<<ToggleExportsFrame>>", lambda event: self.set_state(event.state == 1)
)
self.bind_all("<<SortExports>>", lambda *_: self.sort())
log.debug("Initialised")
def cb_selected(self, *_):
"""Callback to handle clicks on **Exports** combobox.
Resets **Output** and activates/deactivates `FunctionFrame`.
"""
log.debug("%s selected", self.__selected_export.get())
self.__output.set("")
if self.__is_native.get():
self.__root.event_generate("<<ToggleFunctionFrame>>", state=1)
else:
self.__root.event_generate("<<ToggleFunctionFrame>>", state=0)
def cb_validate(self, *_) -> bool:
"""Callback to handle keyboard events on **Exports** combobox.
Activates `FunctionFrame` when the text in the combobox
is a valid export name. Deactivates it otherwise.
"""
log.debug("Validating Exports combobox")
try:
# Don't validate if combobox dropdown arrow was pressed
self.cb.state()[1] == "pressed"
except IndexError:
exp = self.cb.get()
if exp:
if exp in self.__export_names:
self.cb_selected()
return True
self.__root.event_generate("<<ToggleFunctionFrame>>", state=1)
return False
return True
def set_state(self, activate: bool = True):
"""Activates/deactivates **Exports** combobox.
Args:
activate (bool, optional): Activated when True, deactivated when
False. Defaults to True.
"""
log.debug("Called with activate=%s", activate)
state = "normal" if activate else "disabled"
self.cb.configure(state=state)
def set_cb_values(self):
"""Demangles and sets the export names to the **Exports** combobox."""
exports = self.__exports
if not self.__is_reinitialised.get() or self.__is_loaded.get():
num_exports = len(exports)
log.info("Found %d exports", num_exports)
self.__status.set(f"{num_exports} exports found")
failed = []
for exp in exports:
if isinstance(exp, PEExport):
if hasattr(exp, "exc"):
failed.append(exp.name)
if failed:
Messagebox.show_warning(
f"These export names couldn't be demangled: {failed}",
"Demangle Errors",
parent=self.__root,
)
self.__export_names = names = list(e.demangled_name for e in exports)
self.set_state()
self.cb.configure(values=names)
selected_export = self.__selected_export.get()
if selected_export:
if selected_export not in names:
err = "%s not found in export names"
log.error(err, selected_export)
Messagebox.show_error(
err % selected_export, "Export not found", parent=self.__root
)
self.cb.set("")
else:
# Activate function frame when export name is passed from command line
self.cb_selected()
self.lb.configure(cursor="hand2")
self.lb.bind(
"<ButtonRelease-1>",
lambda *_: ExportsTreeView(
self.__exports, pathlib.Path(self.__lib_path.get()).name
),
add=False,
)
def sort(self, *_):
"""Sorts the list of export names and repopulates the combobox."""
if self.__is_loaded.get():
sorter = self.__sort_order.get()
log.debug("Sorting w.r.t. %s", sorter)
names = self.__export_names
if sorter == "Name (ascending)":
names.sort()
elif sorter == "Name (descending)":
names.sort(reverse=True)
self.cb.configure(values=names)
self.__status.set("Sort order changed")
class ExportsTreeView(tk.Toplevel):
"""Displays detailed information about all the exports of a library.
Following information is displayed:
- Address
- Name
- Demangled name (whenever available)
- Ordinal (Windows only)
"""
def __init__(self, exports: list[Export], lib_name: str):
log.debug("Initialising")
super().__init__(
title=f"{MsgCat.translate('Exports')} - {lib_name}", size=(400, 500)
)
self.__old_height = 0
self.withdraw()
coldata = [
"Address",
"Name",
{"text": "Demangled", "stretch": True},
]
is_pe = isinstance(exports[0], PEExport)
if is_pe:
coldata.insert(0, "Ordinal")
self.__tv = tv = Tableview(
self,
searchable=True,
autofit=True,
coldata=coldata,
paginated=True,
pagesize=25,
)
tv.pack(fill="both", expand=True)
for e in exports:
values = [e.address, e.name, e.demangled_name] # type: ignore
if is_pe:
if TYPE_CHECKING:
assert isinstance(e, PEExport) # nosec
values.insert(0, e.ordinal)
tv.insert_row(values=values)
tv.load_table_data()
self.bind(
"<F11>",
lambda *_: self.attributes(
"-fullscreen", not self.attributes("-fullscreen")
),
)
self.bind("<Configure>", self.resize)
self.deiconify()
self.focus_set()
log.debug("Initialised")
def resize(self, event: tk.tk.Event):
"""Change the treeview's `pagesize` whenever this window is resized.
I came up with this because there is no way to show a vertical
scrollbar in/for the treeview.
"""
new_height = event.height
if event.widget.widgetName == "toplevel" and new_height != self.__old_height:
# ! This is an expensive call, avoid it whenever possible
self.__tv.pagesize = int(new_height) / 20
self.__old_height = new_height
| 34.534884
| 86
| 0.579237
| 8,313
| 0.932997
| 0
| 0
| 0
| 0
| 0
| 0
| 2,580
| 0.289562
|
0c1fb0aec727010060874060c9a7121a40357346
| 1,899
|
py
|
Python
|
src/homologs/filter_by_occupancy.py
|
jlanga/smsk_selection
|
08070c6d4a6fbd9320265e1e698c95ba80f81123
|
[
"MIT"
] | 4
|
2021-07-18T05:20:20.000Z
|
2022-01-03T10:22:33.000Z
|
src/homologs/filter_by_occupancy.py
|
jlanga/smsk_selection
|
08070c6d4a6fbd9320265e1e698c95ba80f81123
|
[
"MIT"
] | 1
|
2017-08-21T07:26:13.000Z
|
2018-11-08T13:59:48.000Z
|
src/homologs/filter_by_occupancy.py
|
jlanga/smsk_orthofinder
|
08070c6d4a6fbd9320265e1e698c95ba80f81123
|
[
"MIT"
] | 2
|
2021-07-18T05:20:26.000Z
|
2022-03-31T18:23:31.000Z
|
#!/usr/bin/env python
"""
Filter a fasta alignment according to its occupancy:
filter_by_occupancy.py fasta_raw.fa fasta_trimmed.fa 0.5
"""
import os
import sys
from helpers import fasta_to_dict
def filter_by_occupancy(filename_in, filename_out, min_occupancy=0.5):
"""
Filter an alignment in fasta format according to the occupancy of the
columns. Store the results in fasta format.
"""
fasta_raw = fasta_to_dict(filename_in)
n_sequences = len(fasta_raw.keys())
alignment_length = len(fasta_raw[tuple(fasta_raw.keys())[0]])
columns = tuple(
"".join(fasta_raw[seqname][column_index] for seqname in fasta_raw.keys())
for column_index in range(alignment_length)
)
columns_to_keep = []
for column_number, column in enumerate(columns):
n_gaps = column.count("-")
if 1 - float(n_gaps) / float(n_sequences) >= min_occupancy:
columns_to_keep.append(column_number)
fasta_trimmed = {}
for seqname, sequence in fasta_raw.items():
fasta_trimmed[seqname] = "".join(
fasta_raw[seqname][column_to_keep] for column_to_keep in columns_to_keep
)
if not os.path.exists(os.path.dirname(filename_out)):
os.makedirs(os.path.dirname(filename_out))
with open(filename_out, "w") as f_out:
for seqname, sequence in fasta_trimmed.items():
f_out.write(
">{seqname}\n{sequence}\n".format(seqname=seqname, sequence=sequence)
)
if __name__ == "__main__":
if len(sys.argv) != 4:
sys.stderr.write(
"ERROR: incorrect number of arguments.\n"
"python filter_by_occupancy.py fastain fastaout min_occupancy\n"
)
sys.exit(1)
FASTA_IN = sys.argv[1]
FASTA_OUT = sys.argv[2]
MIN_OCCUPANCY = float(sys.argv[3])
filter_by_occupancy(FASTA_IN, FASTA_OUT, MIN_OCCUPANCY)
| 28.772727
| 85
| 0.664034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 424
| 0.223275
|
0c20b529cd83a9fd598afa8e482ff4d521f8b78a
| 954
|
py
|
Python
|
setup.py
|
eppeters/xontrib-dotenv
|
f866f557592d822d1ecb2b607c63c4cdecb580e4
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
eppeters/xontrib-dotenv
|
f866f557592d822d1ecb2b607c63c4cdecb580e4
|
[
"BSD-2-Clause"
] | null | null | null |
setup.py
|
eppeters/xontrib-dotenv
|
f866f557592d822d1ecb2b607c63c4cdecb580e4
|
[
"BSD-2-Clause"
] | 1
|
2020-03-16T00:39:57.000Z
|
2020-03-16T00:39:57.000Z
|
#!/usr/bin/env python
"""
xontrib-dotenv
-----
Automatically reads .env file from current working directory
or parentdirectories and push variables to environment.
"""
from setuptools import setup
setup(
name='xontrib-dotenv',
version='0.1',
description='Reads .env files into environment',
long_description=__doc__,
license='BSD',
url='https://github.com/urbaniak/xontrib-dotenv',
author='Krzysztof Urbaniak',
packages=['xontrib'],
package_dir={'xontrib': 'xontrib'},
package_data={'xontrib': ['*.xsh']},
zip_safe=True,
include_package_data=False,
platforms='any',
install_requires=[
'xonsh>=0.4.6',
],
classifiers=[
'Environment :: Console',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: System :: Shells',
'Topic :: System :: System Shells',
]
)
| 25.105263
| 60
| 0.631027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 546
| 0.572327
|
0c20b7c255ec391f7dad36b9c36ded1071de5e8b
| 222
|
py
|
Python
|
tests/sample_app/urls.py
|
dreipol/meta-tagger
|
c1a2f1f8b0c051018a5bb75d4e579d27bd2c27b2
|
[
"BSD-3-Clause"
] | 3
|
2016-05-30T07:48:54.000Z
|
2017-02-08T21:16:03.000Z
|
tests/sample_app/urls.py
|
dreipol/meta-tagger
|
c1a2f1f8b0c051018a5bb75d4e579d27bd2c27b2
|
[
"BSD-3-Clause"
] | null | null | null |
tests/sample_app/urls.py
|
dreipol/meta-tagger
|
c1a2f1f8b0c051018a5bb75d4e579d27bd2c27b2
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.conf.urls import url
from tests.sample_app.views import NewsArticleDetailView
urlpatterns = [
url(r'^(?P<pk>\d+)/$', NewsArticleDetailView.as_view(), name='news-article-detail'),
]
| 27.75
| 88
| 0.702703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 61
| 0.274775
|
0c2115fcd6d26f9ce3cdf5ced126fdd61f06356a
| 105
|
py
|
Python
|
RCNN/config_RCNN.py
|
andrew-miao/ECE657A_Project-text-classification
|
5ad77ea6d6c685eecfe3dc189d9ea41f5b3ac1d1
|
[
"MIT"
] | 4
|
2020-08-11T18:31:55.000Z
|
2020-08-25T15:30:53.000Z
|
RCNN/config_RCNN.py
|
andrew-miao/ECE657A_Project-text-classification
|
5ad77ea6d6c685eecfe3dc189d9ea41f5b3ac1d1
|
[
"MIT"
] | null | null | null |
RCNN/config_RCNN.py
|
andrew-miao/ECE657A_Project-text-classification
|
5ad77ea6d6c685eecfe3dc189d9ea41f5b3ac1d1
|
[
"MIT"
] | null | null | null |
class Config(object):
embedding_size = 300
n_layers = 1
hidden_size = 128
drop_prob = 0.2
| 21
| 24
| 0.647619
| 105
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0c23261891d98100b6ddda0879a36f77857b6f48
| 624
|
py
|
Python
|
utils/bbox_utils/center_to_corner.py
|
Jaskaran197/Red-blood-cell-detection-SSD
|
a33b330ad17454a7425aa7f57818c0a41b4e0ff9
|
[
"MIT"
] | null | null | null |
utils/bbox_utils/center_to_corner.py
|
Jaskaran197/Red-blood-cell-detection-SSD
|
a33b330ad17454a7425aa7f57818c0a41b4e0ff9
|
[
"MIT"
] | null | null | null |
utils/bbox_utils/center_to_corner.py
|
Jaskaran197/Red-blood-cell-detection-SSD
|
a33b330ad17454a7425aa7f57818c0a41b4e0ff9
|
[
"MIT"
] | null | null | null |
import numpy as np
def center_to_corner(boxes):
""" Convert bounding boxes from center format (cx, cy, width, height) to corner format (xmin, ymin, xmax, ymax)
Args:
- boxes: numpy array of tensor containing all the boxes to be converted
Returns:
- A numpy array or tensor of converted boxes
"""
temp = boxes.copy()
temp[..., 0] = boxes[..., 0] - (boxes[..., 2] / 2) # xmin
temp[..., 1] = boxes[..., 1] - (boxes[..., 3] / 2) # ymin
temp[..., 2] = boxes[..., 0] + (boxes[..., 2] / 2) # xmax
temp[..., 3] = boxes[..., 1] + (boxes[..., 3] / 2) # ymax
return temp
| 32.842105
| 115
| 0.535256
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 301
| 0.482372
|
0c24e2918c9577a7b38b38b7b54cfb7d7c91ca26
| 337
|
py
|
Python
|
pytest/np.py
|
i0Ek3/disintegration
|
b59307f8166b93d76fab35af180a5cf3ffa51b09
|
[
"MIT"
] | null | null | null |
pytest/np.py
|
i0Ek3/disintegration
|
b59307f8166b93d76fab35af180a5cf3ffa51b09
|
[
"MIT"
] | null | null | null |
pytest/np.py
|
i0Ek3/disintegration
|
b59307f8166b93d76fab35af180a5cf3ffa51b09
|
[
"MIT"
] | null | null | null |
import numpy as np
list = [np.linspace([1,2,3], 3),\
np.array([1,2,3]),\
np.arange(3),\
np.arange(8).reshape(2,4),\
np.zeros((2,3)),\
np.zeros((2,3)).T,\
np.ones((3,1)),\
np.eye(3),\
np.full((3,3), 1),\
np.random.rand(3),\
np.random.rand(3,3),\
np.random.uniform(5,15,3),\
np.random.randn(3),\
np.random.normal(3, 2.5, 3)]
print(list)
| 17.736842
| 33
| 0.590504
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0c25a90b221d6137090c0e77b536a592e4921a3d
| 337
|
py
|
Python
|
api/data_explorer/models/__init__.py
|
karamalhotra/data-explorer
|
317f4d7330887969ab6bfe2ca23ec24163472c55
|
[
"BSD-3-Clause"
] | null | null | null |
api/data_explorer/models/__init__.py
|
karamalhotra/data-explorer
|
317f4d7330887969ab6bfe2ca23ec24163472c55
|
[
"BSD-3-Clause"
] | null | null | null |
api/data_explorer/models/__init__.py
|
karamalhotra/data-explorer
|
317f4d7330887969ab6bfe2ca23ec24163472c55
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# flake8: noqa
from __future__ import absolute_import
# import models into model package
from data_explorer.models.dataset_response import DatasetResponse
from data_explorer.models.facet import Facet
from data_explorer.models.facet_value import FacetValue
from data_explorer.models.facets_response import FacetsResponse
| 33.7
| 65
| 0.860534
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 63
| 0.186944
|
0c2791187a63a4bcc6905cd731c3e9fbdcde2c2b
| 2,288
|
py
|
Python
|
seabird/cli.py
|
nicholas512/seabird
|
23073b2b9a550b86ec155cbe43be9b50e50b8310
|
[
"BSD-3-Clause"
] | 38
|
2015-04-15T08:57:44.000Z
|
2022-03-13T02:51:53.000Z
|
seabird/cli.py
|
nicholas512/seabird
|
23073b2b9a550b86ec155cbe43be9b50e50b8310
|
[
"BSD-3-Clause"
] | 54
|
2015-01-28T03:53:43.000Z
|
2021-12-11T07:37:24.000Z
|
seabird/cli.py
|
nicholas512/seabird
|
23073b2b9a550b86ec155cbe43be9b50e50b8310
|
[
"BSD-3-Clause"
] | 22
|
2015-09-22T12:24:22.000Z
|
2022-01-31T22:27:16.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Command line utilities for package Seabird
"""
import click
from seabird.exceptions import CNVError
from .cnv import fCNV
from .netcdf import cnv2nc
@click.group()
def cli():
""" Utilities for seabird files
"""
pass
@cli.command(name='cnvdump')
@click.argument('inputfilename', type=click.Path(exists=True))
def dump(inputfilename):
"""Dump the .cnv content as text
Doesn't matter the version of the .cnv, this command will
show it's content in a unified pattern, as an ASCII text.
Consider the idea of a descriptor file with default values.
"""
try:
data = fCNV(inputfilename)
except CNVError as e:
print("\033[91m%s\033[0m" % e.msg)
return
except:
raise
print("file: %s" % inputfilename)
print("Global attributes")
for a in sorted(data.attrs.keys()):
print("\t\033[93m%s\033[0m: %s" % (a, data.attrs[a]))
print("\nVariabes")
for k in data.keys():
print("\033[91m%s\033[0m" % k)
for a in data[k].attrs.keys():
print("\t\033[93m%s\033[0m: %s" % (a, data[k].attrs[a]))
@cli.command(name='cnv2nc')
@click.option('--outputfilename', default=None,
help='The output netCDF filename.')
@click.argument('inputfilename', type=click.Path(exists=True))
def nc(inputfilename, outputfilename):
""" Export a CNV file as a netCDF
"""
if outputfilename is None:
outputfilename = inputfilename.replace('.cnv','.nc')
click.echo('Saving on %s' % outputfilename)
data = fCNV(inputfilename)
cnv2nc(data, outputfilename)
@cli.command(name='ctdqc')
@click.option('--outputfilename', default=None,
help='The output netCDF filename.')
@click.option('--config', default=None,
help='The output netCDF filename.')
@click.argument('inputfilename', type=click.Path(exists=True))
def qc(inputfilename, outputfilename, config):
"""
"""
from cotede.qc import ProfileQC, combined_flag
if outputfilename is None:
outputfilename = inputfilename.replace('.cnv', '.nc')
click.echo('Saving on %s' % outputfilename)
data = fCNV(inputfilename)
profile = ProfileQC(data, cfg=config, verbose=False)
print(profile.flags)
| 28.962025
| 68
| 0.645979
| 0
| 0
| 0
| 0
| 2,077
| 0.90778
| 0
| 0
| 807
| 0.35271
|
0c2931d41844c5cadfbc0f4d8cd12cf1c0991cb4
| 1,752
|
py
|
Python
|
main.py
|
Mitch-the-Fridge/pi
|
70ab24dab9b06722084e93f783dc541747d46720
|
[
"MIT"
] | null | null | null |
main.py
|
Mitch-the-Fridge/pi
|
70ab24dab9b06722084e93f783dc541747d46720
|
[
"MIT"
] | null | null | null |
main.py
|
Mitch-the-Fridge/pi
|
70ab24dab9b06722084e93f783dc541747d46720
|
[
"MIT"
] | 1
|
2020-05-31T17:13:42.000Z
|
2020-05-31T17:13:42.000Z
|
#!/usr/bin/env python3
#import face_recognition
import cv2
import numpy as np
from datetime import datetime, timedelta
from buffer import Buffer
from collections import deque
import os
from copy import copy
import archive
WEIGHT_EPS = 5
TIMEOUT = 5 # in seconds
def poll_weight():
return 500
# with an fps we then have a "before" duration of 15 seconds
video_buffer = Buffer(300)
building = False
clip = None
previous_weight = poll_weight()
last_weight_event = None
cap = cv2.VideoCapture(0)
while True:
archive.try_upload_buffer()
# if enough_diff is true we will actually start the recording
weight = poll_weight()
weight_diff = weight - previous_weight
enough_diff = abs(weight_diff) >= WEIGHT_EPS
ret, frame = cap.read()
rgb_frame = cv2.resize(frame, (0, 0), fx=.5, fy=.5)[:, :, ::-1]
#face_locations = face_recognition.face_locations(rgb_frame)
print(
len(video_buffer.q),
len(clip) if clip is not None else 0,
building,
#face_locations
)
point = {
'time': datetime.now(),
#'face_locations': face_locations,
'frame': frame,
'current_weight': weight,
}
if building:
clip.append(point)
else:
video_buffer.add(point)
if not building and enough_diff:
building = True
clip = copy(video_buffer.q)
video_buffer.clear()
elif building and datetime.now() >= last_weight_event + timedelta(seconds=TIMEOUT):
frames = list(clip)
clip = None
building = False
print("creating clip of len", len(frames))
print(archive.create_from_clip(frames))
previous_weight = weight
if enough_diff:
last_weight_event = datetime.now()
| 23.36
| 87
| 0.660388
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 339
| 0.193493
|
0c2a0afb31018189385f06e7bd9d48b8c0f6df9c
| 2,895
|
py
|
Python
|
OpenPNM/Network/models/pore_topology.py
|
Eng-RSMY/OpenPNM
|
a0a057d0f6346c515792459b1da97f05bab383c1
|
[
"MIT"
] | 1
|
2021-03-30T21:38:26.000Z
|
2021-03-30T21:38:26.000Z
|
OpenPNM/Network/models/pore_topology.py
|
Eng-RSMY/OpenPNM
|
a0a057d0f6346c515792459b1da97f05bab383c1
|
[
"MIT"
] | null | null | null |
OpenPNM/Network/models/pore_topology.py
|
Eng-RSMY/OpenPNM
|
a0a057d0f6346c515792459b1da97f05bab383c1
|
[
"MIT"
] | null | null | null |
r"""
===============================================================================
pore_topology -- functions for monitoring and adjusting topology
===============================================================================
"""
import scipy as _sp
def get_subscripts(network, shape, **kwargs):
r"""
Return the 3D subscripts (i,j,k) into the cubic network
Parameters
----------
shape : list
The (i,j,k) shape of the network in number of pores in each direction
"""
if network.num_pores('internal') != _sp.prod(shape):
print('Supplied shape does not match Network size, cannot proceed')
else:
template = _sp.atleast_3d(_sp.empty(shape))
a = _sp.indices(_sp.shape(template))
i = a[0].flatten()
j = a[1].flatten()
k = a[2].flatten()
ind = _sp.vstack((i, j, k)).T
vals = _sp.ones((network.Np, 3))*_sp.nan
vals[network.pores('internal')] = ind
return vals
def adjust_spacing(network, new_spacing, **kwargs):
r"""
Adjust the the pore-to-pore lattice spacing on a cubic network
Parameters
----------
new_spacing : float
The new lattice spacing to apply
Notes
-----
At present this method only applies a uniform spacing in all directions.
This is a limiation of OpenPNM Cubic Networks in general, and not of the
method.
"""
coords = network['pore.coords']
try:
spacing = network._spacing
coords = coords/spacing*new_spacing
network._spacing = new_spacing
except:
pass
return coords
def reduce_coordination(network, z, mode='random', **kwargs):
r"""
Reduce the coordination number to the specified z value
Parameters
----------
z : int
The coordination number or number of throats connected a pore
mode : string, optional
Controls the logic used to trim connections. Options are:
- 'random': (default) Throats will be randomly removed to achieve a
coordination of z
- 'max': All pores will be adjusted to have a maximum coordination of z
(not implemented yet)
Returns
-------
A label array indicating which throats should be trimmed to achieve desired
coordination.
Notes
-----
Pores with only 1 throat will be ignored in all calculations since these
are generally boundary pores.
"""
T_trim = ~network['throat.all']
T_nums = network.num_neighbors(network.pores())
# Find protected throats
T_keep = network.find_neighbor_throats(pores=(T_nums == 1))
if mode == 'random':
z_ave = _sp.average(T_nums[T_nums > 1])
f_trim = (z_ave - z)/z_ave
T_trim = _sp.rand(network.Nt) < f_trim
T_trim = T_trim*(~network.tomask(throats=T_keep))
if mode == 'max':
pass
return T_trim
| 29.242424
| 79
| 0.587219
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,716
| 0.592746
|
0c2a4b436c5eaf17c454eecf85f7cdb41e8c152f
| 9,559
|
py
|
Python
|
contrib/workflow/SpiffWorkflow/src/SpiffWorkflow/Tasks/Join.py
|
gonicus/clacks
|
da579f0acc4e48cf2e9451417ac6792282cf7ab6
|
[
"ZPL-2.1"
] | 2
|
2015-01-26T07:15:19.000Z
|
2015-11-09T13:42:11.000Z
|
contrib/workflow/SpiffWorkflow/src/SpiffWorkflow/Tasks/Join.py
|
gonicus/clacks
|
da579f0acc4e48cf2e9451417ac6792282cf7ab6
|
[
"ZPL-2.1"
] | null | null | null |
contrib/workflow/SpiffWorkflow/src/SpiffWorkflow/Tasks/Join.py
|
gonicus/clacks
|
da579f0acc4e48cf2e9451417ac6792282cf7ab6
|
[
"ZPL-2.1"
] | null | null | null |
# Copyright (C) 2007 Samuel Abels
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
from SpiffWorkflow.Task import Task
from SpiffWorkflow.Exception import WorkflowException
from SpiffWorkflow.Operators import valueof
from TaskSpec import TaskSpec
class Join(TaskSpec):
"""
A task for synchronizing branches that were previously split using a
conditional task, such as MultiChoice. It has two or more incoming
branches and one or more outputs.
"""
def __init__(self, parent, name, split_task = None, **kwargs):
"""
Constructor.
@type parent: Workflow
@param parent: A reference to the parent (usually a workflow).
@type name: string
@param name: A name for the task.
@type split_task: TaskSpec
@param split_task: The task that was previously used to split the
branch.
@type kwargs: dict
@param kwargs: The following options are supported:
- threshold: Specifies how many incoming branches need
to complete before the task triggers.
When the limit is reached, the task fires but still
expects all other branches to complete.
You may also pass an attribute, in which case the
value is determined at runtime.
- cancel: When True, any remaining incoming branches
are cancelled as soon as the discriminator is activated.
The default is False.
"""
TaskSpec.__init__(self, parent, name, **kwargs)
self.split_task = split_task
self.threshold = kwargs.get('threshold', None)
self.cancel_remaining = kwargs.get('cancel', False)
def _branch_is_complete(self, my_task):
# Determine whether that branch is now completed by checking whether
# it has any waiting items other than myself in it.
skip = None
for task in Task.Iterator(my_task, my_task.NOT_FINISHED_MASK):
# If the current task is a child of myself, ignore it.
if skip is not None and task._is_descendant_of(skip):
continue
if task.spec == self:
skip = task
continue
return False
return True
def _branch_may_merge_at(self, task):
for child in task:
# Ignore tasks that were created by a trigger.
if child._has_state(Task.TRIGGERED):
continue
# Merge found.
if child.spec == self:
return True
# If the task is predicted with less outputs than he has
# children, that means the prediction may be incomplete (for
# example, because a prediction is not yet possible at this time).
if not child._is_definite() \
and len(child.spec.outputs) > len(child.children):
return True
return False
def _fire(self, my_task, waiting_tasks):
"""
Fire, and cancel remaining tasks, if so requested.
"""
# If this is a cancelling join, cancel all incoming branches,
# except for the one that just completed.
if self.cancel_remaining:
for task in waiting_tasks:
task.cancel()
def _try_fire_unstructured(self, my_task, force = False):
# If the threshold was already reached, there is nothing else to do.
if my_task._has_state(Task.COMPLETED):
return False
if my_task._has_state(Task.READY):
return True
# The default threshold is the number of inputs.
threshold = valueof(my_task, self.threshold)
if threshold is None:
threshold = len(self.inputs)
# Look at the tree to find all places where this task is used.
tasks = []
for input in self.inputs:
for task in my_task.job.task_tree:
if task.thread_id != my_task.thread_id:
continue
if task.spec != input:
continue
tasks.append(task)
# Look up which tasks have already completed.
waiting_tasks = []
completed = 0
for task in tasks:
if task.parent is None or task._has_state(Task.COMPLETED):
completed += 1
else:
waiting_tasks.append(task)
# If the threshold was reached, get ready to fire.
if force or completed >= threshold:
self._fire(my_task, waiting_tasks)
return True
# We do NOT set the task state to COMPLETED, because in
# case all other incoming tasks get cancelled (or never reach
# the Join for other reasons, such as reaching a stub branch), we
# we need to revisit it.
return False
def _try_fire_structured(self, my_task, force = False):
# If the threshold was already reached, there is nothing else to do.
if my_task._has_state(Task.READY):
return True
if my_task._has_state(Task.COMPLETED):
return False
# Retrieve a list of all activated tasks from the associated
# task that did the conditional parallel split.
split_task = my_task._find_ancestor_from_name(self.split_task)
if split_task is None:
msg = 'Join with %s, which was not reached' % self.split_task
raise WorkflowException(self, msg)
tasks = split_task.spec._get_activated_tasks(split_task, my_task)
# The default threshold is the number of branches that were started.
threshold = valueof(my_task, self.threshold)
if threshold is None:
threshold = len(tasks)
# Look up which tasks have already completed.
waiting_tasks = []
completed = 0
for task in tasks:
# Refresh path prediction.
task.spec._predict(task)
if not self._branch_may_merge_at(task):
completed += 1
elif self._branch_is_complete(task):
completed += 1
else:
waiting_tasks.append(task)
# If the threshold was reached, get ready to fire.
if force or completed >= threshold:
self._fire(my_task, waiting_tasks)
return True
# We do NOT set the task state to COMPLETED, because in
# case all other incoming tasks get cancelled (or never reach
# the Join for other reasons, such as reaching a stub branch), we
# need to revisit it.
return False
def try_fire(self, my_task, force = False):
if self.split_task is None:
return self._try_fire_unstructured(my_task, force)
return self._try_fire_structured(my_task, force)
def _do_join(self, my_task):
if self.split_task:
split_task = my_task.job.get_task_from_name(self.split_task)
split_task = my_task._find_ancestor(split_task)
else:
split_task = my_task.job.task_tree
# Find the inbound node that was completed last.
last_changed = None
thread_tasks = []
for task in split_task._find_any(self):
if task.thread_id != my_task.thread_id:
continue
if self.split_task and task._is_descendant_of(my_task):
continue
changed = task.parent.last_state_change
if last_changed is None \
or changed > last_changed.parent.last_state_change:
last_changed = task
thread_tasks.append(task)
# Mark all nodes in this thread that reference this task as
# completed, except for the first one, which should be READY.
for task in thread_tasks:
if task == last_changed:
self.signal_emit('entered', my_task.job, my_task)
task._ready()
else:
task.state = Task.COMPLETED
task._drop_children()
return False
def _on_trigger(self, my_task):
"""
May be called to fire the Join before the incoming branches are
completed.
"""
for task in my_task.job.task_tree._find_any(self):
if task.thread_id != my_task.thread_id:
continue
return self._do_join(task)
def _update_state_hook(self, my_task):
if not self.try_fire(my_task):
my_task.state = Task.WAITING
return False
return self._do_join(my_task)
def _on_complete_hook(self, my_task):
"""
Runs the task. Should not be called directly.
Returns True if completed, False otherwise.
"""
return TaskSpec._on_complete_hook(self, my_task)
| 37.93254
| 80
| 0.61293
| 8,607
| 0.900408
| 0
| 0
| 0
| 0
| 0
| 0
| 4,012
| 0.419709
|
0c2c64f073f540439acf039ecdc1016885d5eb85
| 5,763
|
py
|
Python
|
covsirphy/visualization/bar_plot.py
|
ardhanii/covid19-sir
|
87881963c49a2fc5b6235c8b21269d216acaa941
|
[
"Apache-2.0"
] | 97
|
2020-05-15T15:20:15.000Z
|
2022-03-18T02:55:54.000Z
|
covsirphy/visualization/bar_plot.py
|
ardhanii/covid19-sir
|
87881963c49a2fc5b6235c8b21269d216acaa941
|
[
"Apache-2.0"
] | 970
|
2020-06-01T13:48:34.000Z
|
2022-03-29T08:20:49.000Z
|
covsirphy/visualization/bar_plot.py
|
ardhani31/Covid19-SIRV-v3
|
59d95156b375c41259c46ce4e656b86903f92ec2
|
[
"Apache-2.0"
] | 36
|
2020-05-15T15:36:43.000Z
|
2022-02-25T17:59:08.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from matplotlib import pyplot as plt
from matplotlib.ticker import ScalarFormatter
import pandas as pd
from covsirphy.util.argument import find_args
from covsirphy.visualization.vbase import VisualizeBase
class BarPlot(VisualizeBase):
"""
Create a bar plot.
Args:
filename (str or None): filename to save the figure or None (display)
bbox_inches (str): bounding box in inches when creating the figure
kwargs: the other arguments of matplotlib.pyplot.savefig()
"""
def __init__(self, filename=None, bbox_inches="tight", **kwargs):
self._filename = filename
self._savefig_dict = {"bbox_inches": bbox_inches, **kwargs}
# Properties
self._title = ""
self._variables = []
self._ax = None
def __enter__(self):
return super().__enter__()
def __exit__(self, *exc_info):
return super().__exit__(*exc_info)
def plot(self, data, vertical=True, colormap=None, color_dict=None, **kwargs):
"""
Create bar plot.
Args:
data (pandas.DataFrame or pandas.Series): data to show
Index
labels of the bars
Columns
variables to show
vartical (bool): whether vertical bar plot (True) or horizontal bar plot (False)
colormap (str, matplotlib colormap object or None): colormap, please refer to https://matplotlib.org/examples/color/colormaps_reference.html
color_dict (dict[str, str] or None): dictionary of column names (keys) and colors (values)
kwargs: keyword arguments of pandas.DataFrame.plot()
"""
if isinstance(data, pd.Series):
data = pd.DataFrame(data)
self._ensure_dataframe(data, name="data")
self._variables = data.columns.tolist()
# Color
color_args = self._plot_colors(data.columns, colormap=colormap, color_dict=color_dict)
# Set plotting
method_dict = {True: data.plot.bar, False: data.plot.barh}
try:
self._ax = method_dict[vertical](**color_args, **kwargs)
except ValueError as e:
raise ValueError(e.args[0]) from None
# No rotation of xticks
self._ax.tick_params(axis="x", rotation=0)
def x_axis(self, xlabel=None):
"""
Set x axis.
Args:
xlabel (str or None): x-label
"""
# Label
self._ax.set_xlabel(xlabel)
def y_axis(self, ylabel="Cases", y_logscale=False, ylim=(0, None), math_scale=True, y_integer=False):
"""
Set x axis.
Args:
ylabel (str or None): y-label
y_logscale (bool): whether use log-scale in y-axis or not
ylim (tuple(int or float, int or float)): limit of y dimain
math_scale (bool): whether use LaTEX or not in y-label
y_integer (bool): whether force to show the values as integer or not
Note:
If None is included in ylim, the values will be automatically determined by Matplotlib
"""
# Label
self._ax.set_ylabel(ylabel)
# Math scale
if math_scale:
self._ax.yaxis.set_major_formatter(ScalarFormatter(useMathText=True))
self._ax.ticklabel_format(style="sci", axis="y", scilimits=(0, 0))
# Interger scale
if y_integer:
fmt = ScalarFormatter(useOffset=False)
fmt.set_scientific(False)
self._ax.yaxis.set_major_formatter(fmt)
# Log scale
if y_logscale:
self._ax.set_yscale("log")
ylim = (None, None) if ylim[0] == 0 else ylim
# limit
self._ax.set_ylim(*ylim)
def line(self, v=None, h=None, color="black", linestyle=":"):
"""
Show vertical/horizontal lines.
Args:
v (list[int/float] or None): list of x values of vertical lines or None
h (list[int/float] or None): list of y values of horizontal lines or None
color (str): color of the line
linestyle (str): linestyle
"""
if h is not None:
self._ax.axhline(y=h, color=color, linestyle=linestyle)
if v is not None:
v = v if isinstance(v, list) else [v]
for value in v:
self._ax.axvline(x=value, color=color, linestyle=linestyle)
def bar_plot(df, title=None, filename=None, show_legend=True, **kwargs):
"""
Wrapper function: show chronological change of the data.
Args:
data (pandas.DataFrame or pandas.Series): data to show
Index
Date (pandas.Timestamp)
Columns
variables to show
title (str): title of the figure
filename (str or None): filename to save the figure or None (display)
show_legend (bool): whether show legend or not
kwargs: keyword arguments of the following classes and methods.
- covsirphy.BarPlot() and its methods,
- matplotlib.pyplot.savefig(), matplotlib.pyplot.legend(),
- pandas.DataFrame.plot()
"""
with BarPlot(filename=filename, **find_args(plt.savefig, **kwargs)) as bp:
bp.title = title
bp.plot(data=df, **find_args([BarPlot.plot, pd.DataFrame.plot], **kwargs))
# Axis
bp.x_axis(**find_args([BarPlot.x_axis], **kwargs))
bp.y_axis(**find_args([BarPlot.y_axis], **kwargs))
# Vertical/horizontal lines
bp.line(**find_args([BarPlot.line], **kwargs))
# Legend
if show_legend:
bp.legend(**find_args([BarPlot.legend, plt.legend], **kwargs))
else:
bp.legend_hide()
| 36.942308
| 152
| 0.599167
| 4,194
| 0.727746
| 0
| 0
| 0
| 0
| 0
| 0
| 2,760
| 0.478917
|
0c2c8fc01f580afd1e737eea2d3f4a891285699e
| 3,342
|
py
|
Python
|
03-process-unsplash-dataset.py
|
l294265421/natural-language-image-search
|
71621f2208f345b922ed0f82d406526cef456d48
|
[
"MIT"
] | null | null | null |
03-process-unsplash-dataset.py
|
l294265421/natural-language-image-search
|
71621f2208f345b922ed0f82d406526cef456d48
|
[
"MIT"
] | null | null | null |
03-process-unsplash-dataset.py
|
l294265421/natural-language-image-search
|
71621f2208f345b922ed0f82d406526cef456d48
|
[
"MIT"
] | null | null | null |
import os
import math
from pathlib import Path
import clip
import torch
from PIL import Image
import numpy as np
import pandas as pd
from common import common_path
# Set the path to the photos
# dataset_version = "lite" # Use "lite" or "full"
# photos_path = Path("unsplash-dataset") / dataset_version / "photos"
photos_path = os.path.join(common_path.project_dir, 'unsplash-dataset/lite/photos')
# List all JPGs in the folder
photos_files = list(Path(photos_path).glob("*.jpg"))
# Print some statistics
print(f"Photos found: {len(photos_files)}")
# Load the open CLIP model
device = "cuda" if torch.cuda.is_available() else "cpu"
model, preprocess = clip.load("ViT-B/32", device=device)
# Function that computes the feature vectors for a batch of images
def compute_clip_features(photos_batch):
# Load all the photos from the files
photos = [Image.open(photo_file) for photo_file in photos_batch]
# Preprocess all photos
photos_preprocessed = torch.stack([preprocess(photo) for photo in photos]).to(device)
with torch.no_grad():
# Encode the photos batch to compute the feature vectors and normalize them
photos_features = model.encode_image(photos_preprocessed)
photos_features /= photos_features.norm(dim=-1, keepdim=True)
# Transfer the feature vectors back to the CPU and convert to numpy
return photos_features.cpu().numpy()
# Define the batch size so that it fits on your GPU. You can also do the processing on the CPU, but it will be slower.
batch_size = 16
# Path where the feature vectors will be stored
features_path = os.path.join(common_path.project_dir, 'unsplash-dataset/lite/features')
# Compute how many batches are needed
batches = math.ceil(len(photos_files) / batch_size)
# Process each batch
for i in range(batches):
print(f"Processing batch {i + 1}/{batches}")
batch_ids_path = os.path.join(features_path, f"{i:010d}.csv")
batch_features_path = os.path.join(features_path, f"{i:010d}.npy")
# Only do the processing if the batch wasn't processed yet
if not os.path.exists(batch_features_path):
try:
# Select the photos for the current batch
batch_files = photos_files[i * batch_size: (i + 1) * batch_size]
# Compute the features and save to a numpy file
batch_features = compute_clip_features(batch_files)
np.save(batch_features_path, batch_features)
# Save the photo IDs to a CSV file
photo_ids = [photo_file.name.split(".")[0] for photo_file in batch_files]
photo_ids_data = pd.DataFrame(photo_ids, columns=['photo_id'])
photo_ids_data.to_csv(batch_ids_path, index=False)
except:
# Catch problems with the processing to make the process more robust
print(f'Problem with batch {i}')
# Load all numpy files
features_list = [np.load(features_file) for features_file in sorted(Path(features_path).glob("*.npy"))]
# Concatenate the features and store in a merged file
features = np.concatenate(features_list)
np.save(os.path.join(features_path, "features.npy"), features)
# Load all the photo IDs
photo_ids = pd.concat([pd.read_csv(ids_file) for ids_file in sorted(Path(features_path).glob("*.csv"))])
photo_ids.to_csv(os.path.join(features_path, "photo_ids.csv"), index=False)
| 36.326087
| 118
| 0.718731
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,335
| 0.399461
|
0c2caff0890d29c7f470b93cedd466717f34705f
| 4,612
|
py
|
Python
|
treadmill_pipeline/treadmill.py
|
ttngu207/project-treadmill
|
55b5241b1c0b2634da8c153bf9aaeb511f28b07f
|
[
"MIT"
] | null | null | null |
treadmill_pipeline/treadmill.py
|
ttngu207/project-treadmill
|
55b5241b1c0b2634da8c153bf9aaeb511f28b07f
|
[
"MIT"
] | null | null | null |
treadmill_pipeline/treadmill.py
|
ttngu207/project-treadmill
|
55b5241b1c0b2634da8c153bf9aaeb511f28b07f
|
[
"MIT"
] | 4
|
2020-03-05T15:44:36.000Z
|
2020-03-18T15:18:11.000Z
|
import numpy as np
import datajoint as dj
from treadmill_pipeline import project_database_prefix
from ephys.utilities import ingestion, time_sync
from ephys import get_schema_name
schema = dj.schema(project_database_prefix + 'treadmill_pipeline')
reference = dj.create_virtual_module('reference', get_schema_name('reference'))
acquisition = dj.create_virtual_module('acquisition', get_schema_name('acquisition'))
behavior = dj.create_virtual_module('behavior', get_schema_name('behavior'))
@schema
class TreadmillTracking(dj.Imported):
definition = """ # session-level tracking data from the treadmill_pipeline(s) employed in the experiment
-> acquisition.Session
treadmill_tracking_time: datetime # start time of this treadmill_pipeline speed recording
---
treadmill_tracking_name: varchar(40) # user-assign name of this treadmill_pipeline tracking (e.g. 27032019laserSess1)
treadmill_timestamps: blob@ephys_store # (s) timestamps of the treadmill_pipeline speed samples
"""
class TreadmillSync(dj.Part):
definition = """
-> master
---
sync_master_clock: varchar(128) # name of the sync-master
track_sync_data=null: blob@ephys_store # sync data (binary)
track_time_zero=null: float # (s) the first time point of this tracking
track_sync_timestamps=null: blob@ephys_store # (s) timestamps of sync data in tracking clock
track_sync_master_timestamps=null: blob@ephys_store # (s) timestamps of sync data in master clock
"""
class Speed(dj.Part):
definition = """
-> master
treadmill_name: varchar(32)
---
treadmill_speed: blob@ephys_store # (s) treadmill_pipeline speed at each timestamp
"""
key_source = acquisition.Session & acquisition.Recording # wait for recording to be ingested first before tracking
def make(self, key):
input_dir = ingestion.find_input_directory(key)
if not input_dir:
print(f'{input_dir} not found in this machine, skipping...')
return
rec_type, recordings = ingestion.get_recordings(input_dir)
if rec_type in ('neuropixels', 'neurologger'): # if 'neuropixels' recording, check for OptiTrack's `motive` or `.csv`
opti_list = ingestion.get_optitrack(input_dir)
if not opti_list:
raise FileNotFoundError('No OptiTrack "matmot.mtv" or ".csv" found')
for opti in opti_list:
if 'Format Version' not in opti.meta:
raise NotImplementedError('Treadmill data ingest from type other than "optitrack.csv" not implemented')
secondary_data = opti.secondary_data
if 'Treadmill' not in secondary_data:
raise KeyError('No "Treadmill" found in the secondary data of optitrack.csv')
treadmill_key = dict(key, treadmill_tracking_time=opti.recording_time)
self.insert1(dict(treadmill_key,
treadmill_tracking_name=opti.tracking_name, # name of the session folder
treadmill_timestamps=secondary_data['t']))
if hasattr(opti, 'sync_data'):
self.TreadmillSync.insert1(dict(treadmill_key,
sync_master_clock=opti.sync_data['master_name'],
track_time_zero=secondary_data['t'][0],
track_sync_timestamps=opti.sync_data['slave'],
track_sync_master_timestamps=opti.sync_data['master']))
else: # data presynced with tracking-recording pair,
# still need for a linear shift from session start to the recording this tracking is synced to
self.TrackingSync.insert1(time_sync.create_tracking_sync_data(
treadmill_key, np.array([secondary_data['t'][0], secondary_data['t'][-1]])))
self.Speed.insert1([dict(treadmill_key, treadmill_name=k, treadmill_speed=v['speed'])
for k, v in secondary_data['Treadmill'].items()])
print(f'Insert {len(opti_list)} treadmill_pipeline tracking(s): {input_dir.stem}')
else:
raise NotImplementedError(f'Treadmill Tracking ingestion for recording type {rec_type} not implemented')
| 50.681319
| 130
| 0.624892
| 4,107
| 0.890503
| 0
| 0
| 4,115
| 0.892238
| 0
| 0
| 2,099
| 0.455117
|
0c3043c88aed8f6a40aafefe3d1e9548537a28e3
| 1,324
|
py
|
Python
|
management/api/v1/serializers.py
|
bwksoftware/cypetulip
|
4ea5c56d2d48a311220e144d094280a275109316
|
[
"MIT"
] | 3
|
2019-08-03T12:00:22.000Z
|
2020-02-02T08:37:09.000Z
|
management/api/v1/serializers.py
|
basetwode/cypetulip
|
d6be294a288706c5661afb433215fe6c3ffea92b
|
[
"MIT"
] | 47
|
2019-08-03T16:17:41.000Z
|
2022-03-11T23:15:48.000Z
|
management/api/v1/serializers.py
|
basetwode/cypetulip
|
d6be294a288706c5661afb433215fe6c3ffea92b
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from management.models.main import MailSetting, LdapSetting, ShopSetting, LegalSetting, Header, CacheSetting, Footer
class MailSettingSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = MailSetting
fields = '__all__'
class LdapSettingSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LdapSetting
fields = '__all__'
class ShopSettingSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = ShopSetting
fields = '__all__'
class LegalSettingSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LegalSetting
fields = '__all__'
class HeaderSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Header
fields = '__all__'
class FooterSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Footer
fields = '__all__'
class CacheSettingSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = CacheSetting
fields = '__all__'
| 22.066667
| 116
| 0.702417
| 1,146
| 0.865559
| 0
| 0
| 0
| 0
| 0
| 0
| 63
| 0.047583
|
0c309ee4537295e1c6db342512009ad9c9a55328
| 9,854
|
py
|
Python
|
tests/test_optimal.py
|
craffer/fantasy-coty
|
08903cb138fa1c2d160b90fc028c8ec55901040b
|
[
"MIT"
] | null | null | null |
tests/test_optimal.py
|
craffer/fantasy-coty
|
08903cb138fa1c2d160b90fc028c8ec55901040b
|
[
"MIT"
] | 2
|
2019-12-21T18:48:40.000Z
|
2019-12-22T20:19:20.000Z
|
tests/test_optimal.py
|
craffer/fantasy-coty
|
08903cb138fa1c2d160b90fc028c8ec55901040b
|
[
"MIT"
] | null | null | null |
"""Unit test optimal lineup functions."""
import unittest
import copy
import ff_espn_api # pylint: disable=import-error
from collections import defaultdict
from fantasy_coty.main import add_to_optimal
class TestAddToOptimal(unittest.TestCase):
"""Test add_to_optimal() function."""
def setUp(self):
"""Set up settings and optimal objects for each function."""
self.settings = defaultdict(int)
self.settings["QB"] = 1
self.settings["RB"] = 2
self.settings["WR"] = 2
self.settings["TE"] = 1
self.settings["RB/WR/TE"] = 1
self.settings["D/ST"] = 1
self.settings["K"] = 1
self.optimal = defaultdict(list)
self.flex = "RB/WR/TE"
# all of this is garbage in order to create a default BoxPlayer that we can modify
data = {}
data["proTeamId"] = 1
data["player"] = {}
data["player"]["proTeamId"] = 1
data["player"]["stats"] = []
pro_schedule = {}
pos_rankings = {}
week = 0
self.default_players = {}
self.default_players["QB"] = ff_espn_api.BoxPlayer(data, pro_schedule, pos_rankings, week)
self.default_players["QB"].eligibleSlots = ["QB", "OP", "BE", "IR"]
self.default_players["QB"].position = "QB"
self.default_players["RB"] = ff_espn_api.BoxPlayer(data, pro_schedule, pos_rankings, week)
self.default_players["RB"].eligibleSlots = [
"RB",
"RB/WR",
"RB/WR/TE",
"OP",
"BE",
"IR",
]
self.default_players["RB"].position = "RB"
self.default_players["WR"] = ff_espn_api.BoxPlayer(data, pro_schedule, pos_rankings, week)
self.default_players["WR"].eligibleSlots = [
"RB/WR",
"WR",
"WR/TE",
"RB/WR/TE",
"OP",
"BE",
"IR",
]
self.default_players["WR"].position = "WR"
self.default_players["TE"] = ff_espn_api.BoxPlayer(data, pro_schedule, pos_rankings, week)
self.default_players["TE"].eligibleSlots = [
"WR/TE",
"TE",
"RB/WR/TE",
"OP",
"BE",
"IR",
]
self.default_players["TE"].position = "TE"
self.default_players["D/ST"] = ff_espn_api.BoxPlayer(data, pro_schedule, pos_rankings, week)
self.default_players["D/ST"].eligibleSlots = ["D/ST", "BE", "IR"]
self.default_players["D/ST"].position = "D/ST"
self.default_players["K"] = ff_espn_api.BoxPlayer(data, pro_schedule, pos_rankings, week)
self.default_players["K"].eligibleSlots = ["K", "BE", "IR"]
self.default_players["K"].position = "K"
def test_basic_functionality(self):
"""Test basic functionality of add_to_optimal()."""
# test basic replacement functionality
qb1 = copy.deepcopy(self.default_players["QB"])
qb1.points = 10
self.optimal = add_to_optimal(self.optimal, self.settings, qb1)
self.assertEqual(self.optimal["QB"][0].points, 10)
self.assertEqual(len(self.optimal["QB"]), self.settings["QB"])
qb2 = copy.deepcopy(self.default_players["QB"])
qb2.points = 8
self.optimal = add_to_optimal(self.optimal, self.settings, qb2)
self.assertEqual(self.optimal["QB"][0].points, 10)
self.assertEqual(len(self.optimal["QB"]), self.settings["QB"])
qb3 = copy.deepcopy(self.default_players["QB"])
qb3.points = 12
self.optimal = add_to_optimal(self.optimal, self.settings, qb3)
self.assertEqual(self.optimal["QB"][0].points, 12)
self.assertEqual(len(self.optimal["QB"]), self.settings["QB"])
def test_flex_replacement(self):
"""Test functionality involving of add_to_optimal() involving FLEX."""
rb1 = copy.deepcopy(self.default_players["RB"])
rb1.points = 20
self.optimal = add_to_optimal(self.optimal, self.settings, rb1)
self.assertEqual(self.optimal["RB"][0].points, rb1.points)
self.assertLess(len(self.optimal["RB"]), self.settings["RB"])
rb2 = copy.deepcopy(self.default_players["RB"])
rb2.points = 12
self.optimal = add_to_optimal(self.optimal, self.settings, rb2)
self.assertIn(rb1.points, [x.points for x in self.optimal["RB"]])
self.assertIn(rb2.points, [x.points for x in self.optimal["RB"]])
self.assertEqual(len(self.optimal["RB"]), self.settings["RB"])
# test adding to FLEX when less than the other 2 RBs
rb3 = copy.deepcopy(self.default_players["RB"])
rb3.points = 8
self.optimal = add_to_optimal(self.optimal, self.settings, rb3)
self.assertIn(rb1.points, [x.points for x in self.optimal["RB"]])
self.assertIn(rb2.points, [x.points for x in self.optimal["RB"]])
self.assertEqual(len(self.optimal["RB"]), self.settings["RB"])
self.assertIn(rb3.points, [x.points for x in self.optimal[self.flex]])
self.assertEqual(len(self.optimal[self.flex]), self.settings[self.flex])
# test bumping one RB from RB2 to FLEX
rb4 = copy.deepcopy(self.default_players["RB"])
rb4.points = 16
self.optimal = add_to_optimal(self.optimal, self.settings, rb4)
self.assertIn(rb1.points, [x.points for x in self.optimal["RB"]])
self.assertIn(rb4.points, [x.points for x in self.optimal["RB"]])
self.assertEqual(len(self.optimal["RB"]), self.settings["RB"])
self.assertIn(rb2.points, [x.points for x in self.optimal[self.flex]])
self.assertEqual(len(self.optimal[self.flex]), self.settings[self.flex])
# test putting something straight away in flex
rb5 = copy.deepcopy(self.default_players["RB"])
rb5.points = 14
self.optimal = add_to_optimal(self.optimal, self.settings, rb5)
self.assertIn(rb1.points, [x.points for x in self.optimal["RB"]])
self.assertIn(rb4.points, [x.points for x in self.optimal["RB"]])
self.assertEqual(len(self.optimal["RB"]), self.settings["RB"])
self.assertIn(rb5.points, [x.points for x in self.optimal[self.flex]])
self.assertEqual(len(self.optimal[self.flex]), self.settings[self.flex])
# test putting in low WRs with flex spot full
wr1 = copy.deepcopy(self.default_players["WR"])
wr1.points = 5
self.optimal = add_to_optimal(self.optimal, self.settings, wr1)
self.assertIn(wr1.points, [x.points for x in self.optimal["WR"]])
self.assertLess(len(self.optimal["WR"]), self.settings["WR"])
wr2 = copy.deepcopy(self.default_players["WR"])
wr2.points = 7
self.optimal = add_to_optimal(self.optimal, self.settings, wr2)
self.assertIn(wr1.points, [x.points for x in self.optimal["WR"]])
self.assertIn(wr2.points, [x.points for x in self.optimal["WR"]])
self.assertEqual(len(self.optimal["WR"]), self.settings["WR"])
# test putting in a very high WR, shouldn't bump anything to FLEX
wr3 = copy.deepcopy(self.default_players["WR"])
wr3.points = 30
self.optimal = add_to_optimal(self.optimal, self.settings, wr3)
self.assertIn(wr3.points, [x.points for x in self.optimal["WR"]])
self.assertIn(wr2.points, [x.points for x in self.optimal["WR"]])
self.assertEqual(len(self.optimal["WR"]), self.settings["WR"])
self.assertIn(rb5.points, [x.points for x in self.optimal[self.flex]])
self.assertEqual(len(self.optimal[self.flex]), self.settings[self.flex])
# two more receivers, the second one should bump something to FLEX
wr4 = copy.deepcopy(self.default_players["WR"])
wr4.points = 28
self.optimal = add_to_optimal(self.optimal, self.settings, wr4)
self.assertIn(wr3.points, [x.points for x in self.optimal["WR"]])
self.assertIn(wr4.points, [x.points for x in self.optimal["WR"]])
self.assertEqual(len(self.optimal["WR"]), self.settings["WR"])
self.assertIn(rb5.points, [x.points for x in self.optimal[self.flex]])
self.assertEqual(len(self.optimal[self.flex]), self.settings[self.flex])
# this should bump WR4 to FLEX
wr5 = copy.deepcopy(self.default_players["WR"])
wr5.points = 29
self.optimal = add_to_optimal(self.optimal, self.settings, wr5)
self.assertIn(wr3.points, [x.points for x in self.optimal["WR"]])
self.assertIn(wr5.points, [x.points for x in self.optimal["WR"]])
self.assertEqual(len(self.optimal["WR"]), self.settings["WR"])
self.assertIn(wr4.points, [x.points for x in self.optimal[self.flex]])
self.assertEqual(len(self.optimal[self.flex]), self.settings[self.flex])
# finally, this should go directly in FLEX
wr6 = copy.deepcopy(self.default_players["WR"])
wr6.points = 28.5
self.optimal = add_to_optimal(self.optimal, self.settings, wr6)
self.assertIn(wr3.points, [x.points for x in self.optimal["WR"]])
self.assertIn(wr5.points, [x.points for x in self.optimal["WR"]])
self.assertEqual(len(self.optimal["WR"]), self.settings["WR"])
self.assertIn(wr6.points, [x.points for x in self.optimal[self.flex]])
self.assertEqual(len(self.optimal[self.flex]), self.settings[self.flex])
def test_negative_player(self):
"""Make sure negative-scoring players aren't optimal, no matter what."""
d1 = copy.deepcopy(self.default_players["D/ST"])
d1.points = -0.1
self.optimal = add_to_optimal(self.optimal, self.settings, d1)
self.assertNotIn(d1.points, [x.points for x in self.optimal["D/ST"]])
self.assertLess(len(self.optimal["D/ST"]), self.settings["D/ST"])
if __name__ == "__main__":
unittest.main()
| 46.046729
| 100
| 0.623909
| 9,600
| 0.974224
| 0
| 0
| 0
| 0
| 0
| 0
| 1,504
| 0.152628
|
0c31fa4744359e49cd3c719e5fe2aae79bc7f68a
| 5,391
|
py
|
Python
|
spark/explorer.py
|
Elavarasan17/Stack-Overflow-Data-Dump-Analysis
|
3742a1eef17b211ddcda4bd5f41d8a8c42ec228f
|
[
"zlib-acknowledgement",
"RSA-MD"
] | null | null | null |
spark/explorer.py
|
Elavarasan17/Stack-Overflow-Data-Dump-Analysis
|
3742a1eef17b211ddcda4bd5f41d8a8c42ec228f
|
[
"zlib-acknowledgement",
"RSA-MD"
] | null | null | null |
spark/explorer.py
|
Elavarasan17/Stack-Overflow-Data-Dump-Analysis
|
3742a1eef17b211ddcda4bd5f41d8a8c42ec228f
|
[
"zlib-acknowledgement",
"RSA-MD"
] | null | null | null |
from pyspark import SparkConf, SparkContext
from pyspark.sql import SparkSession, functions, types
from pyspark.sql.functions import date_format
from pyspark.sql.functions import year, month, dayofmonth
import sys
import json
import argparse
assert sys.version_info >= (3, 5) # make sure we have Python 3.5+
# add more functions as necessary
def main(posts_inputs, users_inputs,temp_bucket_input,dataset_input):
# main logic starts here
#Hariish - Users and Tags
users = spark.read.parquet(users_inputs)
posts = spark.read.parquet(posts_inputs)
#User and posts join
posts = posts.withColumn("creation_year",year(posts['creation_date']))
res = users.join(posts,(posts['owner_user_id'] == users['id'])).select(users.display_name,users.id,posts.post_id,posts.creation_year,users.location,posts.owner_user_id,posts.post_type_id,posts.accepted_answer_id,posts.tags)
#Active users over years:
res1 = res.groupBy(res['id'],res['display_name'],res['creation_year']).agg(functions.count(res['post_id']).alias('post_count')).orderBy('post_count')
res2 = res1.groupBy(res1['id'],res['display_name']).pivot('creation_year').sum('post_count')
res3 = res2.na.fill(value=0)
res4 = res3.withColumn("overall_posts",res3['2015']+res3['2016']+res3['2017']+res3['2018']+res3['2019']+res3['2020']+res3['2021'])
active_users = res4.orderBy(res4['overall_posts'].desc()).select('id','display_name','2015','2016','2017','2018','2019','2020','2021','overall_posts')
act_id = active_users.limit(10).select('id','display_name')
active_users = active_users.withColumnRenamed('2015','y_2015').withColumnRenamed('2016','y_2016').withColumnRenamed('2017','y_2017').withColumnRenamed('2018','y_2018').withColumnRenamed('2019','y_2019').withColumnRenamed('2020','y_2020').withColumnRenamed('2021','y_2021')
active_users = active_users.limit(10)
a1 = active_users.selectExpr("display_name", "stack(8, 'y_2015', y_2015, 'y_2016', y_2016, 'y_2017', y_2017,'y_2018',y_2018,'y_2019',y_2019,'y_2020',y_2020,'y_2021',y_2021 ,'overall_posts',overall_posts) as (creation_year, values)").where("values is not null")
a1 = a1.select('creation_year','display_name','values')
act_user = a1.groupBy(a1['creation_year']).pivot('display_name').sum('values')
act_user = act_user.withColumnRenamed('Gordon Linoff',"Gordon_Linoff").withColumnRenamed('Nina Scholz','Nina_Scholz').withColumnRenamed('Ronak Shah','Ronak_Shah').withColumnRenamed('T.J. Crowder','TJ_Crowder').withColumnRenamed('Tim Biegeleisen','Tim_Biegeleisen').withColumnRenamed('Wiktor Stribiżew','Wiktor_Stribiżew')
#act_user.show()
#Famous Tags over the year
p1 = posts.withColumn("new",functions.arrays_zip("tags")).withColumn("new", functions.explode("new")).select('post_id',functions.col("new.tags").alias("tags"),'creation_year')
p2 = p1.groupBy(p1['tags'],p1['creation_year']).agg(functions.count(p1['tags']).alias('tag_count'))
p3 = p2.groupBy(p2['tags']).pivot('creation_year').sum('tag_count')
p3 = p3.na.fill(value=0)
p4 = p3.withColumn("overall_tag_usage",p3['2015']+p3['2016']+p3['2017']+p3['2018']+p3['2019']+p3['2020']+p3['2021'])
tag_trends = p4.orderBy(p4['overall_tag_usage'].desc()).select('tags','2015','2016','2017','2018','2019','2020','2021','overall_tag_usage')
tag_trends = tag_trends.withColumnRenamed('2015','y_2015').withColumnRenamed('2016','y_2016').withColumnRenamed('2017','y_2017').withColumnRenamed('2018','y_2018').withColumnRenamed('2019','y_2019').withColumnRenamed('2020','y_2020').withColumnRenamed('2021','y_2021')
tag_trends = tag_trends.limit(10)
t1 = tag_trends.selectExpr("tags", "stack(8, 'y_2015', y_2015, 'y_2016', y_2016, 'y_2017', y_2017,'y_2018',y_2018,'y_2019',y_2019,'y_2020',y_2020,'y_2021',y_2021 ,'overall_tag_usage',overall_tag_usage) as (creation_year, values)").where("values is not null")
t1 = t1.select('creation_year','tags','values')
tag_view = t1.groupBy(t1['creation_year']).pivot('tags').sum('values')
#tag_view.show()
#writing:
act_user.write.mode('overwrite').format('bigquery').option("temporaryGcsBucket",temp_bucket_input).option('table',dataset_input+".active_users").save()
tag_view.write.mode('overwrite').format('bigquery').option("temporaryGcsBucket",temp_bucket_input).option('table',dataset_input+".tag_trends").save()
act_id.write.mode('overwrite').format('bigquery').option("temporaryGcsBucket",temp_bucket_input).option('table',dataset_input+".top10_user_details").save()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-posts_src", action="store", dest="posts_src", type=str)
parser.add_argument("-users_src", action="store", dest="users_src", type=str)
parser.add_argument("-tempbucket_src", action="store", dest="tempbucket_src", type=str)
parser.add_argument("-dataset_src", action="store", dest="dataset_src", type=str)
args = parser.parse_args()
posts_inputs = args.posts_src
users_inputs = args.users_src
temp_bucket_input = args.tempbucket_src
dataset_input = args.dataset_src
spark = SparkSession.builder.appName('Explorer DF').getOrCreate()
assert spark.version >= '3.0' # make sure we have Spark 3.0+
spark.sparkContext.setLogLevel('WARN')
main(posts_inputs, users_inputs,temp_bucket_input,dataset_input)
| 70.012987
| 325
| 0.725654
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,165
| 0.401446
|
0c32795d8af79fcf1c3d723adbd4971a62b457ad
| 2,177
|
py
|
Python
|
self_supervised/loss.py
|
ravidziv/self-supervised-learning
|
f02c1639ce3c2119afa522e400d793e741fb68a0
|
[
"MIT"
] | null | null | null |
self_supervised/loss.py
|
ravidziv/self-supervised-learning
|
f02c1639ce3c2119afa522e400d793e741fb68a0
|
[
"MIT"
] | null | null | null |
self_supervised/loss.py
|
ravidziv/self-supervised-learning
|
f02c1639ce3c2119afa522e400d793e741fb68a0
|
[
"MIT"
] | null | null | null |
"""Contrastive loss functions."""
from functools import partial
import tensorflow as tf
LARGE_NUM = 1e9
def cont_loss2(temperature: float):
func = partial(add_contrastive_loss, temperature=temperature)
func.__name__ = 'cont_loss2'
return func
def add_supervised_loss(labels: tf.Tensor, logits: tf.Tensor):
"""Compute mean supervised loss over local batch."""
losses = tf.keras.losses.CategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE)(labels,
logits)
return tf.reduce_mean(losses)
def add_contrastive_loss(hidden: tf.Tensor, hidden_norm: bool = True,
temperature: float = 1.0):
"""Compute loss for model.
Args:
hidden: hidden vector (`Tensor`) of shape (bsz, dim).
hidden_norm: whether or not to use normalization on the hidden vector.
temperature: a `floating` number for temperature scaling.
Returns:
A loss scalar.
The logits for contrastive prediction task.
The labels for contrastive prediction task.
"""
if hidden_norm:
hidden = tf.math.l2_normalize(hidden, -1)
hidden1, hidden2 = tf.split(hidden, 2, 0)
batch_size = tf.shape(hidden1)[0]
hidden1_large = hidden1
hidden2_large = hidden2
labels = tf.one_hot(tf.range(batch_size), batch_size * 2)
masks = tf.one_hot(tf.range(batch_size), batch_size)
logits_aa = tf.matmul(hidden1, hidden1_large, transpose_b=True) / temperature
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = tf.matmul(hidden2, hidden2_large, transpose_b=True) / temperature
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = tf.matmul(hidden1, hidden2_large, transpose_b=True) / temperature
logits_ba = tf.matmul(hidden2, hidden1_large, transpose_b=True) / temperature
loss_a = tf.nn.softmax_cross_entropy_with_logits(
labels, tf.concat([logits_ab, logits_aa], 1))
loss_b = tf.nn.softmax_cross_entropy_with_logits(
labels, tf.concat([logits_ba, logits_bb], 1))
loss = tf.reduce_mean(loss_a + loss_b)
return loss, logits_ab, labels
| 36.283333
| 81
| 0.683969
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 478
| 0.219568
|
0c336dedc298c3448acb41a9e995e66ab5dfe2bf
| 3,391
|
py
|
Python
|
suzieq/engines/pandas/tables.py
|
zxiiro/suzieq
|
eca92820201c05bc80081599f69e41cd6b991107
|
[
"Apache-2.0"
] | null | null | null |
suzieq/engines/pandas/tables.py
|
zxiiro/suzieq
|
eca92820201c05bc80081599f69e41cd6b991107
|
[
"Apache-2.0"
] | null | null | null |
suzieq/engines/pandas/tables.py
|
zxiiro/suzieq
|
eca92820201c05bc80081599f69e41cd6b991107
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from suzieq.engines.pandas.engineobj import SqPandasEngine
from suzieq.sqobjects import get_sqobject
class TableObj(SqPandasEngine):
@staticmethod
def table_name():
return 'tables'
def get(self, **kwargs):
"""Show the known tables for which we have information"""
table_list = self._dbeng.get_tables()
df = pd.DataFrame()
columns = kwargs.pop('columns', ['default'])
unknown_tables = []
tables = []
for table in table_list:
table_obj = get_sqobject(table)
if not table_obj:
# This is a table without an sqobject backing store
# this happens either because we haven't yet implemented the
# table functions or because this table is collapsed into a
# single table as in the case of ospf
unknown_tables.append(table)
table_inst = get_sqobject('tables')(context=self.ctxt)
table_inst._table = table
else:
table_inst = table_obj(context=self.ctxt)
info = {'table': table}
info.update(table_inst.get_table_info(
table, columns=['namespace', 'hostname', 'timestamp'],
**kwargs))
tables.append(info)
df = pd.DataFrame.from_dict(tables)
if df.empty:
return df
df = df.sort_values(by=['table']).reset_index(drop=True)
cols = df.columns
total = pd.DataFrame([['TOTAL', df['firstTime'].min(),
df['latestTime'].max(),
df['intervals'].max(),
df['allRows'].sum(),
df['namespaces'].max(),
df['deviceCnt'].max()]],
columns=cols)
df = df.append(total, ignore_index=True).dropna()
return df
def summarize(self, **kwargs):
df = self.get(**kwargs)
if df.empty or ('error' in df.columns):
return df
df = df.set_index(['table'])
sdf = pd.DataFrame({
'serviceCnt': [df.index.nunique()-1],
'namespaceCnt': [df.at['TOTAL', 'namespaces']],
'deviceCnt': [df.at['device', 'deviceCnt']],
'earliestTimestamp': [df.firstTime.min()],
'lastTimestamp': [df.latestTime.max()],
'firstTime99': [df.firstTime.quantile(0.99)],
'latestTime99': [df.latestTime.quantile(0.99)],
})
return sdf.T.rename(columns={0: 'summary'})
def top(self, **kwargs):
"Tables implementation of top has to eliminate the TOTAL row"
what = kwargs.pop("what", None)
reverse = kwargs.pop("reverse", False)
sqTopCount = kwargs.pop("count", 5)
if not what:
return pd.DataFrame()
df = self.get(addnl_fields=self.iobj._addnl_fields, **kwargs)
if df.empty or ('error' in df.columns):
return df
if reverse:
return df.query('table != "TOTAL"') \
.nsmallest(sqTopCount, columns=what, keep="all") \
.head(sqTopCount)
else:
return df.query('table != "TOTAL"') \
.nlargest(sqTopCount, columns=what, keep="all") \
.head(sqTopCount)
| 34.252525
| 76
| 0.529932
| 3,266
| 0.963138
| 0
| 0
| 59
| 0.017399
| 0
| 0
| 712
| 0.209968
|
0c34007b8ed98fbad90350a4894f2960e309e1be
| 3,306
|
py
|
Python
|
connect_box/data.py
|
jtru/python-connect-box
|
2d26923e966fbb319760da82e3e71103018ded0b
|
[
"MIT"
] | null | null | null |
connect_box/data.py
|
jtru/python-connect-box
|
2d26923e966fbb319760da82e3e71103018ded0b
|
[
"MIT"
] | null | null | null |
connect_box/data.py
|
jtru/python-connect-box
|
2d26923e966fbb319760da82e3e71103018ded0b
|
[
"MIT"
] | null | null | null |
"""Handle Data attributes."""
from datetime import datetime
from ipaddress import IPv4Address, IPv6Address, ip_address as convert_ip
from typing import Iterable, Union
import attr
@attr.s
class Device:
"""A single device."""
mac: str = attr.ib()
hostname: str = attr.ib(cmp=False)
ip: Union[IPv4Address, IPv6Address] = attr.ib(cmp=False, converter=convert_ip)
interface: str = attr.ib()
speed: int = attr.ib()
interface_id: int = attr.ib()
method: int = attr.ib()
lease_time: str = attr.ib(
converter=lambda lease_time: datetime.strptime(lease_time, "00:%H:%M:%S")
)
@attr.s
class DownstreamChannel:
"""A locked downstream channel."""
frequency: int = attr.ib()
powerLevel: int = attr.ib()
modulation: str = attr.ib()
id: str = attr.ib()
snr: float = attr.ib()
preRs: int = attr.ib()
postRs: int = attr.ib()
qamLocked: bool = attr.ib()
fecLocked: bool = attr.ib()
mpegLocked: bool = attr.ib()
@attr.s
class UpstreamChannel:
"""A locked upstream channel."""
frequency: int = attr.ib()
powerLevel: int = attr.ib()
symbolRate: str = attr.ib()
id: str = attr.ib()
modulation: str = attr.ib()
type: str = attr.ib()
t1Timeouts: int = attr.ib()
t2Timeouts: int = attr.ib()
t3Timeouts: int = attr.ib()
t4Timeouts: int = attr.ib()
channelType: str = attr.ib()
messageType: int = attr.ib()
@attr.s
class Ipv6FilterInstance:
"""An IPv6 filter rule instance."""
idd: int = attr.ib()
srcAddr: Union[IPv4Address, IPv6Address] = attr.ib(converter=convert_ip)
srcAddr: str = attr.ib()
srcPrefix: int = attr.ib()
dstAddr: str = attr.ib()
dstAddr: Union[IPv4Address, IPv6Address] = attr.ib(converter=convert_ip)
dstPrefix: int = attr.ib()
srcPortStart: int = attr.ib()
srcPortEnd: int = attr.ib()
dstPortStart: int = attr.ib()
dstPortEnd: int = attr.ib()
protocol: int = attr.ib()
allow: int = attr.ib()
enabled: int = attr.ib()
@attr.s
class FiltersTimeMode:
"""Filters time setting."""
TMode: int = attr.ib()
XmlGeneralTime: str = attr.ib()
XmlDailyTime: str = attr.ib()
@attr.s
class FilterStatesList:
"""A sequence of filter state instances."""
entries: Iterable = attr.ib()
@attr.s
class FilterState:
"""A filter state instance."""
idd: int = attr.ib()
enabled: int = attr.ib()
@attr.s
class CmStatus:
provisioningStatus: str = attr.ib()
cmComment: str = attr.ib()
cmDocsisMode: str = attr.ib()
cmNetworkAccess: str = attr.ib()
firmwareFilename: str = attr.ib()
# number of IP addresses to assign via DHCP
numberOfCpes: int = attr.ib()
# ???
dMaxCpes: int = attr.ib()
bpiEnable: int = attr.ib()
@attr.s
class ServiceFlow:
id: int = attr.ib()
pMaxTrafficRate: int = attr.ib()
pMaxTrafficBurst: int = attr.ib()
pMinReservedRate: int = attr.ib()
pMaxConcatBurst: int = attr.ib()
# 2 seems to be Best Effort
pSchedulingType: int = attr.ib()
@attr.s
class Temperature:
# temperatures in degrees Celsius
tunerTemperature: float = attr.ib()
temperature: float = attr.ib()
# several other stats remain untapped here:
# wan_ipv4_addr
# wan_ipv6_addr, wan_ipv6_addr_entry
| 24.857143
| 82
| 0.635209
| 3,019
| 0.913188
| 0
| 0
| 3,099
| 0.937387
| 0
| 0
| 467
| 0.141258
|