hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9610da1cf47afbf95b11be72f8e2780125e49449 | 27,544 | py | Python | functions/asmm_xml.py | EUFAR/asmm-eufar | 69ede7a24f757392e63f04091e86c50ab129016f | [
"BSD-3-Clause"
] | null | null | null | functions/asmm_xml.py | EUFAR/asmm-eufar | 69ede7a24f757392e63f04091e86c50ab129016f | [
"BSD-3-Clause"
] | 2 | 2015-06-12T09:28:29.000Z | 2015-06-12T09:34:16.000Z | functions/asmm_xml.py | eufarn7sp/asmm-eufar | 69ede7a24f757392e63f04091e86c50ab129016f | [
"BSD-3-Clause"
] | null | null | null | import datetime
import xml.dom.minidom
import logging
from PyQt5 import QtCore, QtWidgets
from functions.button_functions import add_read
NAMESPACE_URI = 'http://www.eufar.net/ASMM'
| 46.37037 | 138 | 0.647255 |
9610eaf838ce8599d05cfd89f28acb8943b4bb46 | 191 | py | Python | github/models.py | pyprism/Hiren-Git-Commit-Reminder | 253ba078f63cc9bf3f39a5b735a783c4846b5ba7 | [
"MIT"
] | null | null | null | github/models.py | pyprism/Hiren-Git-Commit-Reminder | 253ba078f63cc9bf3f39a5b735a783c4846b5ba7 | [
"MIT"
] | null | null | null | github/models.py | pyprism/Hiren-Git-Commit-Reminder | 253ba078f63cc9bf3f39a5b735a783c4846b5ba7 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
| 19.1 | 51 | 0.759162 |
961296a2dbd17acbbeca5341d04b5200b3df15a3 | 4,973 | py | Python | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Axon/idGen.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
] | 1 | 2017-03-28T06:41:51.000Z | 2017-03-28T06:41:51.000Z | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Axon/idGen.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
] | null | null | null | linux-distro/package/nuxleus/Source/Vendor/Microsoft/IronPython-2.0.1/Lib/Axon/idGen.py | mdavid/nuxleus | 653f1310d8bf08eaa5a7e3326c2349e56a6abdc2 | [
"BSD-3-Clause"
] | 1 | 2016-12-13T21:08:58.000Z | 2016-12-13T21:08:58.000Z | #!/usr/bin/python
#
# Copyright (C) 2004 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: kamaelia-list-owner@lists.sourceforge.net
# to discuss alternative licensing.
# -------------------------------------------------------------------------
"""\
====================
Unique ID generation
====================
The methods of the idGen class are used to generate unique IDs in various forms
(numbers, strings, etc) which are used to give microprocesses and other Axon
objects a unique identifier and name.
* Every Axon.Microprocess.microprocess gets a unique ID
* Axon.ThreadedComponent.threadedcomponent uses unique IDs to identify threads
Generating a new unique ID
--------------------------
Do not use the idGen class defined in this module directly. Instead, use any
of these module methods to obtain a unique ID:
* **Axon.idGen.newId(thing)** - returns a unique identifier as a string based on
the class name of the object provided
* **Axon.idGen.strId(thing)** - returns a unique identifier as a string based on
the class name of the object provided
* **Axon.idGen.numId()** - returns a unique identifier as a number
* **Axon.idGen.tupleId(thing)** - returns both the numeric and string versions
of a new unique id as a tuple (where the string version is based on the class
name of the object provided)
Calling tupleId(thing) is *not* equivalent to calling numId() then strId(thing)
because doing that would return two different id values!
Examples::
>>> x=Component.component()
>>> idGen.newId(x)
'Component.component_4'
>>> idGen.strId(x)
'Component.component_5'
>>> idGen.numId()
6
>>> idGen.tupleId(x)
(7, 'Component.component_7')
"""
import debug;
debugger = debug.debug()
debugger.useConfig()
Debug = debugger.debug
# idGen - A class to provide Unique Identifiers
#
# Ids can provide be provided as numerical, string or a tuple.
#
# numerical ids are integers allocated on a "next integer" basis.
# eg object 1, apple 2, orange 3. (Not object 1, apple 2, orange 3)
#
# string ids consist of the '__str__' of the object, with the numerical
# id tacked on the end.
#
# tuple ids consists : '(the numerical id, the string id)'
#
newId = idGen().strId
strId=idGen().strId
numId=idGen().numId
tupleId=idGen().tupleId
if __name__ == '__main__':
print newId(foo())
print newId(bar())
print newId(bibble())
| 31.474684 | 83 | 0.646893 |
961374e180229cec23558c1850e6a56b8464ae8b | 63,005 | py | Python | pyCEvNS/flux.py | athompson-tamu/pyCEvNS | feb3f83c706e6604608eae83c50ac79ced9140bf | [
"MIT"
] | null | null | null | pyCEvNS/flux.py | athompson-tamu/pyCEvNS | feb3f83c706e6604608eae83c50ac79ced9140bf | [
"MIT"
] | null | null | null | pyCEvNS/flux.py | athompson-tamu/pyCEvNS | feb3f83c706e6604608eae83c50ac79ced9140bf | [
"MIT"
] | null | null | null | """
flux related class and functions
"""
from scipy.integrate import quad
import pandas as pd
from .helper import LinearInterp, polar_to_cartesian, lorentz_boost, lorentz_matrix
from .oscillation import survival_solar
from .parameters import *
| 48.09542 | 163 | 0.53164 |
9613fedd3e0d7142ca8e288d57dc930f5c14893f | 7,252 | py | Python | enso/contrib/minimessages.py | blackdaemon/enso-launcher-continued | 346f82811e77caf73560619cdeb16afabfbf1fce | [
"BSD-3-Clause"
] | 7 | 2015-09-19T20:57:32.000Z | 2020-12-31T16:34:42.000Z | enso/contrib/minimessages.py | blackdaemon/enso-launcher-continued | 346f82811e77caf73560619cdeb16afabfbf1fce | [
"BSD-3-Clause"
] | 21 | 2015-11-03T23:15:25.000Z | 2018-10-11T21:57:45.000Z | enso/contrib/minimessages.py | blackdaemon/enso-launcher-continued | 346f82811e77caf73560619cdeb16afabfbf1fce | [
"BSD-3-Clause"
] | 4 | 2015-09-15T17:18:00.000Z | 2021-06-16T07:06:06.000Z | # Author : Pavel Vitis "blackdaemon"
# Email : blackdaemon@seznam.cz
#
# Copyright (c) 2010, Pavel Vitis <blackdaemon@seznam.cz>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of Enso nor the names of its contributors may
# be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
#
# enso.contrib.minimessages
#
# ----------------------------------------------------------------------------
"""
An Enso plugin that makes all mini-messages related commands available.
Commands:
hide mini messages
"""
# ----------------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------------
from xml.sax.saxutils import escape as xml_escape
import enso.messages
from enso.commands import CommandManager, CommandObject
from enso.commands.factories import ArbitraryPostfixFactory
from enso.contrib.scriptotron.ensoapi import EnsoApi
from enso.contrib.scriptotron.tracebacks import safetyNetted
from enso.messages import MessageManager, TimedMiniMessage
ensoapi = EnsoApi()
# ----------------------------------------------------------------------------
# The 'hide mini messages' command
# ---------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# The 'show mini message' testing command
# ---------------------------------------------------------------------------
# ----------------------------------------------------------------------------
# Plugin initialization
# ---------------------------------------------------------------------------
def load():
cmdMan = CommandManager.get()
cmdMan.registerCommand(
HideMiniMessagesCommand.NAME,
HideMiniMessagesCommand()
)
cmdMan.registerCommand(
ShowMiniMessageFactory.NAME,
ShowMiniMessageFactory()
)
cmdMan.registerCommand(
ShowRecentMessageCommand.NAME,
ShowRecentMessageCommand()
)
# vim:set tabstop=4 shiftwidth=4 expandtab:
| 34.533333 | 90 | 0.59818 |
9616192a13cde5beffe85342bdb0bcbe725c8e0a | 3,597 | py | Python | article_curation/test_article_curation.py | mrkarezina/graph-recommendation-api | 8ed3895f7816b095ec27f3c1d972bf5b8e163b34 | [
"MIT"
] | null | null | null | article_curation/test_article_curation.py | mrkarezina/graph-recommendation-api | 8ed3895f7816b095ec27f3c1d972bf5b8e163b34 | [
"MIT"
] | null | null | null | article_curation/test_article_curation.py | mrkarezina/graph-recommendation-api | 8ed3895f7816b095ec27f3c1d972bf5b8e163b34 | [
"MIT"
] | null | null | null | import unittest
from unittest.mock import Mock
import json
from processor import scrape_article
import main
| 34.586538 | 186 | 0.633584 |
9616936f76e77083ea419e018de9e5eaec39224e | 4,715 | py | Python | test.py | chdre/noise-randomized | c803fd6c6fd641a0b1c0f4880920584a647587bc | [
"MIT"
] | null | null | null | test.py | chdre/noise-randomized | c803fd6c6fd641a0b1c0f4880920584a647587bc | [
"MIT"
] | null | null | null | test.py | chdre/noise-randomized | c803fd6c6fd641a0b1c0f4880920584a647587bc | [
"MIT"
] | 3 | 2021-10-05T09:01:51.000Z | 2021-10-05T09:37:06.000Z | import unittest
if __name__ == '__main__':
unittest.main()
| 32.972028 | 79 | 0.487381 |
961a0eab590ae86fe03daebff4911d080dc4f38a | 3,829 | py | Python | pipelines/controllers/datasets.py | platiagro/pipeline-generator | d84b9512c39970c469154eaed56f08780ebf21eb | [
"Apache-2.0"
] | 1 | 2020-05-19T14:57:55.000Z | 2020-05-19T14:57:55.000Z | pipelines/controllers/datasets.py | platiagro/pipelines | d84b9512c39970c469154eaed56f08780ebf21eb | [
"Apache-2.0"
] | 93 | 2020-04-25T21:10:49.000Z | 2020-12-15T18:25:49.000Z | pipelines/controllers/datasets.py | platiagro/pipelines | d84b9512c39970c469154eaed56f08780ebf21eb | [
"Apache-2.0"
] | 6 | 2019-09-05T12:37:59.000Z | 2020-08-08T00:08:25.000Z | # -*- coding: utf-8 -*-
import platiagro
import pandas as pd
from werkzeug.exceptions import NotFound
from pipelines.database import db_session
from pipelines.models import Operator
from pipelines.models.utils import raise_if_experiment_does_not_exist
def get_dataset_name(experiment_id, operator_id,):
"""Retrieves a dataset name from experiment.
Args:
experiment_id(str): the experiment uuid
operator_id(str): the operator uuid
Returns:
Dataset name
"""
raise_if_experiment_does_not_exist(experiment_id)
operator = Operator.query.get(operator_id)
if operator is None:
raise NotFound("The specified operator does not exist")
# get dataset name
dataset = operator.parameters.get('dataset')
if dataset is None:
# try to find dataset name in other operators
operators = db_session.query(Operator) \
.filter_by(experiment_id=experiment_id) \
.filter(Operator.uuid != operator_id) \
.all()
for operator in operators:
dataset = operator.parameters.get('dataset')
if dataset:
break
if dataset is None:
raise NotFound()
return dataset
def get_dataset_pagination(application_csv,
name,
operator_id,
page,
page_size,
run_id):
"""Retrieves a dataset.
Args:
application_csv(bool): if is to return dataset as csv
name(str): the dataset name
operator_id(str): the operator uuid
page_size(int) : record numbers
page(int): page number
run_id (str): the run id.
Returns:
Dataset
"""
try:
metadata = platiagro.stat_dataset(name=name, operator_id=operator_id)
if "run_id" not in metadata:
raise FileNotFoundError()
dataset = platiagro.load_dataset(name=name, operator_id=operator_id, run_id=run_id)
except FileNotFoundError as e:
raise NotFound(str(e))
if page_size == -1:
if application_csv:
return dataset.to_csv(index=False)
dataset = dataset.to_dict(orient="split")
del dataset["index"]
return dataset
else:
dataset = dataset.to_dict(orient="split")
del dataset["index"]
pdataset = pagination_datasets(page=page, page_size=page_size, dataset=dataset)
if application_csv:
df = pd.DataFrame(columns=pdataset['columns'], data=pdataset['data'])
return df.to_csv(index=False)
return pdataset
def pagination_datasets(page, page_size, dataset):
"""pagination of datasets.
Args:
page_size(int) : record numbers
page(int): page number
dataset(json): data to be paged
Returns:
Paged dataset
"""
try:
count = 0
new_datasets = []
total_elements = len(dataset['data'])
page = (page * page_size) - page_size
for i in range(page, total_elements):
new_datasets.append(dataset['data'][i])
count += 1
if page_size == count:
response = {
'columns': dataset['columns'],
'data': new_datasets,
'total': len(dataset['data'])
}
return response
if len(new_datasets) == 0:
raise NotFound("The informed page does not contain records")
else:
response = {
'columns': dataset['columns'],
'data': new_datasets,
'total': len(dataset['data'])
}
return response
except RuntimeError:
raise NotFound("The specified page does not exist")
| 32.176471 | 91 | 0.58527 |
961b41ac7e12348d2cd9bb21a06c9a3f33d3b4af | 4,545 | py | Python | tests/test_message.py | jfkinslow/flask-mailing | dda99214b783b60fabc7dfad209fff4438eaf61c | [
"MIT"
] | null | null | null | tests/test_message.py | jfkinslow/flask-mailing | dda99214b783b60fabc7dfad209fff4438eaf61c | [
"MIT"
] | null | null | null | tests/test_message.py | jfkinslow/flask-mailing | dda99214b783b60fabc7dfad209fff4438eaf61c | [
"MIT"
] | null | null | null | import pytest
from flask_mailing.schemas import Message, MultipartSubtypeEnum
from flask_mailing.msg import MailMsg
import os
CONTENT = "file test content"
| 24.175532 | 105 | 0.59824 |
961ccfb0c6fb46c865492bed7af363f36b450b4b | 1,239 | py | Python | utils/checks.py | JDJGInc/JDBot | 057bcc5c80452c9282606e9bf66219e614aac5e1 | [
"MIT"
] | 12 | 2021-01-09T06:17:51.000Z | 2022-03-18T06:30:15.000Z | utils/checks.py | JDJGInc/JDBot | 057bcc5c80452c9282606e9bf66219e614aac5e1 | [
"MIT"
] | 21 | 2021-03-21T16:43:45.000Z | 2022-02-01T16:02:26.000Z | utils/checks.py | JDJGInc/JDBot | 057bcc5c80452c9282606e9bf66219e614aac5e1 | [
"MIT"
] | 25 | 2021-03-21T16:33:56.000Z | 2022-03-12T16:52:25.000Z | import discord
| 27.533333 | 158 | 0.742534 |
961ceec2cadcdefd7771e879e51fe43976210c30 | 46,670 | py | Python | scripts/mgear/rigbits/eye_rigger.py | stormstudios/rigbits | 37ce738952a3cd31ba8a18b8989f5ea491d03bf0 | [
"MIT"
] | 1 | 2020-08-11T01:17:19.000Z | 2020-08-11T01:17:19.000Z | scripts/mgear/rigbits/eye_rigger.py | stormstudios/rigbits | 37ce738952a3cd31ba8a18b8989f5ea491d03bf0 | [
"MIT"
] | null | null | null | scripts/mgear/rigbits/eye_rigger.py | stormstudios/rigbits | 37ce738952a3cd31ba8a18b8989f5ea491d03bf0 | [
"MIT"
] | null | null | null | """Rigbits eye rigger tool"""
import json
import traceback
from functools import partial
import mgear.core.pyqt as gqt
import pymel.core as pm
from maya.app.general.mayaMixin import MayaQWidgetDockableMixin
from mgear.core import meshNavigation, curve, applyop, node, primitive, icon
from mgear.core import transform, utils, attribute, skin, string
from mgear.vendor.Qt import QtCore, QtWidgets
from pymel.core import datatypes
from mgear import rigbits
##########################################################
# Eye rig constructor
##########################################################
def eyeRig(eyeMesh,
edgeLoop,
blinkH,
namePrefix,
offset,
rigidLoops,
falloffLoops,
headJnt,
doSkin,
parent=None,
ctlName="ctl",
sideRange=False,
customCorner=False,
intCorner=None,
extCorner=None,
ctlGrp=None,
defGrp=None):
"""Create eyelid and eye rig
Args:
eyeMesh (TYPE): Description
edgeLoop (TYPE): Description
blinkH (TYPE): Description
namePrefix (TYPE): Description
offset (TYPE): Description
rigidLoops (TYPE): Description
falloffLoops (TYPE): Description
headJnt (TYPE): Description
doSkin (TYPE): Description
parent (None, optional): Description
ctlName (str, optional): Description
sideRange (bool, optional): Description
customCorner (bool, optional): Description
intCorner (None, optional): Description
extCorner (None, optional): Description
ctlGrp (None, optional): Description
defGrp (None, optional): Description
Returns:
TYPE: Description
"""
# Checkers
if edgeLoop:
edgeLoopList = [pm.PyNode(e) for e in edgeLoop.split(",")]
else:
pm.displayWarning("Please set the edge loop first")
return
if eyeMesh:
try:
eyeMesh = pm.PyNode(eyeMesh)
except pm.MayaNodeError:
pm.displayWarning("The object %s can not be found in the "
"scene" % (eyeMesh))
return
else:
pm.displayWarning("Please set the eye mesh first")
if doSkin:
if not headJnt:
pm.displayWarning("Please set the Head Jnt or unCheck "
"Compute Topological Autoskin")
return
# Initial Data
bboxCenter = meshNavigation.bboxCenter(eyeMesh)
extr_v = meshNavigation.getExtremeVertexFromLoop(edgeLoopList, sideRange)
upPos = extr_v[0]
lowPos = extr_v[1]
inPos = extr_v[2]
outPos = extr_v[3]
edgeList = extr_v[4]
vertexList = extr_v[5]
# Detect the side L or R from the x value
if inPos.getPosition(space='world')[0] < 0.0:
side = "R"
inPos = extr_v[3]
outPos = extr_v[2]
normalPos = outPos
npw = normalPos.getPosition(space='world')
normalVec = npw - bboxCenter
else:
side = "L"
normalPos = outPos
npw = normalPos.getPosition(space='world')
normalVec = bboxCenter - npw
# Manual Vertex corners
if customCorner:
if intCorner:
try:
if side == "R":
inPos = pm.PyNode(extCorner)
else:
inPos = pm.PyNode(intCorner)
except pm.MayaNodeError:
pm.displayWarning("%s can not be found" % intCorner)
return
else:
pm.displayWarning("Please set the internal eyelid corner")
return
if extCorner:
try:
normalPos = pm.PyNode(extCorner)
npw = normalPos.getPosition(space='world')
if side == "R":
outPos = pm.PyNode(intCorner)
normalVec = npw - bboxCenter
else:
outPos = pm.PyNode(extCorner)
normalVec = bboxCenter - npw
except pm.MayaNodeError:
pm.displayWarning("%s can not be found" % extCorner)
return
else:
pm.displayWarning("Please set the external eyelid corner")
return
# Check if we have prefix:
if namePrefix:
namePrefix = string.removeInvalidCharacter(namePrefix)
else:
pm.displayWarning("Prefix is needed")
return
if pm.ls(setName("root")):
pm.displayWarning("The object %s already exist in the scene. Please "
"choose another name prefix" % setName("root"))
return
# Eye root
eye_root = primitive.addTransform(None, setName("root"))
eyeCrv_root = primitive.addTransform(eye_root, setName("crvs"))
# Eyelid Main crvs
try:
upEyelid = meshNavigation.edgeRangeInLoopFromMid(
edgeList, upPos, inPos, outPos)
upCrv = curve.createCurveFromOrderedEdges(
upEyelid, inPos, setName("upperEyelid"), parent=eyeCrv_root)
upCrv_ctl = curve.createCurveFromOrderedEdges(
upEyelid, inPos, setName("upCtl_crv"), parent=eyeCrv_root)
pm.rebuildCurve(upCrv_ctl, s=2, rt=0, rpo=True, ch=False)
lowEyelid = meshNavigation.edgeRangeInLoopFromMid(
edgeList, lowPos, inPos, outPos)
lowCrv = curve.createCurveFromOrderedEdges(
lowEyelid, inPos, setName("lowerEyelid"), parent=eyeCrv_root)
lowCrv_ctl = curve.createCurveFromOrderedEdges(
lowEyelid,
inPos,
setName("lowCtl_crv"),
parent=eyeCrv_root)
pm.rebuildCurve(lowCrv_ctl, s=2, rt=0, rpo=True, ch=False)
except UnboundLocalError:
if customCorner:
pm.displayWarning("This error is maybe caused because the custom "
"Corner vertex is not part of the edge loop")
pm.displayError(traceback.format_exc())
return
upBlink = curve.createCurveFromCurve(
upCrv, setName("upblink_crv"), nbPoints=30, parent=eyeCrv_root)
lowBlink = curve.createCurveFromCurve(
lowCrv, setName("lowBlink_crv"), nbPoints=30, parent=eyeCrv_root)
upTarget = curve.createCurveFromCurve(
upCrv, setName("upblink_target"), nbPoints=30, parent=eyeCrv_root)
lowTarget = curve.createCurveFromCurve(
lowCrv, setName("lowBlink_target"), nbPoints=30, parent=eyeCrv_root)
midTarget = curve.createCurveFromCurve(
lowCrv, setName("midBlink_target"), nbPoints=30, parent=eyeCrv_root)
rigCrvs = [upCrv,
lowCrv,
upCrv_ctl,
lowCrv_ctl,
upBlink,
lowBlink,
upTarget,
lowTarget,
midTarget]
for crv in rigCrvs:
crv.attr("visibility").set(False)
# localBBOX
localBBox = eyeMesh.getBoundingBox(invisible=True, space='world')
wRadius = abs((localBBox[0][0] - localBBox[1][0]))
dRadius = abs((localBBox[0][1] - localBBox[1][1]) / 1.7)
# Groups
if not ctlGrp:
ctlGrp = "rig_controllers_grp"
try:
ctlSet = pm.PyNode(ctlGrp)
except pm.MayaNodeError:
pm.sets(n=ctlGrp, em=True)
ctlSet = pm.PyNode(ctlGrp)
if not defGrp:
defGrp = "rig_deformers_grp"
try:
defset = pm.PyNode(defGrp)
except pm.MayaNodeError:
pm.sets(n=defGrp, em=True)
defset = pm.PyNode(defGrp)
# Calculate center looking at
averagePosition = ((upPos.getPosition(space='world')
+ lowPos.getPosition(space='world')
+ inPos.getPosition(space='world')
+ outPos.getPosition(space='world'))
/ 4)
if side == "R":
negate = False
offset = offset
over_offset = dRadius
else:
negate = False
over_offset = dRadius
if side == "R" and sideRange or side == "R" and customCorner:
axis = "z-x"
# axis = "zx"
else:
axis = "z-x"
t = transform.getTransformLookingAt(
bboxCenter,
averagePosition,
normalVec,
axis=axis,
negate=negate)
over_npo = primitive.addTransform(
eye_root, setName("center_lookatRoot"), t)
over_ctl = icon.create(over_npo,
setName("over_%s" % ctlName),
t,
icon="square",
w=wRadius,
d=dRadius,
ro=datatypes.Vector(1.57079633, 0, 0),
po=datatypes.Vector(0, 0, over_offset),
color=4)
node.add_controller_tag(over_ctl)
attribute.add_mirror_config_channels(over_ctl)
attribute.setKeyableAttributes(
over_ctl,
params=["tx", "ty", "tz", "ro", "rx", "ry", "rz", "sx", "sy", "sz"])
if side == "R":
over_npo.attr("rx").set(over_npo.attr("rx").get() * -1)
over_npo.attr("ry").set(over_npo.attr("ry").get() + 180)
over_npo.attr("sz").set(-1)
if len(ctlName.split("_")) == 2 and ctlName.split("_")[-1] == "ghost":
pass
else:
pm.sets(ctlSet, add=over_ctl)
center_lookat = primitive.addTransform(
over_ctl, setName("center_lookat"), t)
# Tracking
# Eye aim control
t_arrow = transform.getTransformLookingAt(bboxCenter,
averagePosition,
upPos.getPosition(space='world'),
axis="zy", negate=False)
radius = abs((localBBox[0][0] - localBBox[1][0]) / 1.7)
arrow_npo = primitive.addTransform(eye_root, setName("aim_npo"), t_arrow)
arrow_ctl = icon.create(arrow_npo,
setName("aim_%s" % ctlName),
t_arrow,
icon="arrow",
w=1,
po=datatypes.Vector(0, 0, radius),
color=4)
if len(ctlName.split("_")) == 2 and ctlName.split("_")[-1] == "ghost":
pass
else:
pm.sets(ctlSet, add=arrow_ctl)
attribute.setKeyableAttributes(arrow_ctl, params=["rx", "ry", "rz"])
# tracking custom trigger
if side == "R":
tt = t_arrow
else:
tt = t
aimTrigger_root = primitive.addTransform(
center_lookat, setName("aimTrigger_root"), tt)
aimTrigger_lvl = primitive.addTransform(
aimTrigger_root, setName("aimTrigger_lvl"), tt)
aimTrigger_lvl.attr("tz").set(1.0)
aimTrigger_ref = primitive.addTransform(
aimTrigger_lvl, setName("aimTrigger_ref"), tt)
aimTrigger_ref.attr("tz").set(0.0)
# connect trigger with arrow_ctl
pm.parentConstraint(arrow_ctl, aimTrigger_ref, mo=True)
# Controls lists
upControls = []
trackLvl = []
# upper eyelid controls
upperCtlNames = ["inCorner", "upInMid", "upMid", "upOutMid", "outCorner"]
cvs = upCrv_ctl.getCVs(space="world")
if side == "R" and not sideRange:
# if side == "R":
cvs = [cv for cv in reversed(cvs)]
for i, cv in enumerate(cvs):
if utils.is_odd(i):
color = 14
wd = .5
icon_shape = "circle"
params = ["tx", "ty", "tz"]
else:
color = 4
wd = .7
icon_shape = "square"
params = ["tx",
"ty",
"tz",
"ro",
"rx",
"ry",
"rz",
"sx",
"sy",
"sz"]
t = transform.setMatrixPosition(t, cvs[i])
npo = primitive.addTransform(center_lookat,
setName("%s_npo" % upperCtlNames[i]),
t)
npoBase = npo
if i == 2:
# we add an extra level to input the tracking ofset values
npo = primitive.addTransform(npo,
setName("%s_trk" % upperCtlNames[i]),
t)
trackLvl.append(npo)
ctl = icon.create(npo,
setName("%s_%s" % (upperCtlNames[i], ctlName)),
t,
icon=icon_shape,
w=wd,
d=wd,
ro=datatypes.Vector(1.57079633, 0, 0),
po=datatypes.Vector(0, 0, offset),
color=color)
attribute.add_mirror_config_channels(ctl)
node.add_controller_tag(ctl, over_ctl)
upControls.append(ctl)
if len(ctlName.split("_")) == 2 and ctlName.split("_")[-1] == "ghost":
pass
else:
pm.sets(ctlSet, add=ctl)
attribute.setKeyableAttributes(ctl, params)
if side == "R":
npoBase.attr("ry").set(180)
npoBase.attr("sz").set(-1)
# adding parent average contrains to odd controls
for i, ctl in enumerate(upControls):
if utils.is_odd(i):
pm.parentConstraint(upControls[i - 1],
upControls[i + 1],
ctl.getParent(),
mo=True)
# lower eyelid controls
lowControls = [upControls[0]]
lowerCtlNames = ["inCorner",
"lowInMid",
"lowMid",
"lowOutMid",
"outCorner"]
cvs = lowCrv_ctl.getCVs(space="world")
if side == "R" and not sideRange:
cvs = [cv for cv in reversed(cvs)]
for i, cv in enumerate(cvs):
# we skip the first and last point since is already in the uper eyelid
if i in [0, 4]:
continue
if utils.is_odd(i):
color = 14
wd = .5
icon_shape = "circle"
params = ["tx", "ty", "tz"]
else:
color = 4
wd = .7
icon_shape = "square"
params = ["tx",
"ty",
"tz",
"ro",
"rx",
"ry",
"rz",
"sx",
"sy",
"sz"]
t = transform.setMatrixPosition(t, cvs[i])
npo = primitive.addTransform(center_lookat,
setName("%s_npo" % lowerCtlNames[i]),
t)
npoBase = npo
if i == 2:
# we add an extra level to input the tracking ofset values
npo = primitive.addTransform(npo,
setName("%s_trk" % lowerCtlNames[i]),
t)
trackLvl.append(npo)
ctl = icon.create(npo,
setName("%s_%s" % (lowerCtlNames[i], ctlName)),
t,
icon=icon_shape,
w=wd,
d=wd,
ro=datatypes.Vector(1.57079633, 0, 0),
po=datatypes.Vector(0, 0, offset),
color=color)
attribute.add_mirror_config_channels(ctl)
lowControls.append(ctl)
if len(ctlName.split("_")) == 2 and ctlName.split("_")[-1] == "ghost":
pass
else:
pm.sets(ctlSet, add=ctl)
attribute.setKeyableAttributes(ctl, params)
# mirror behaviout on R side controls
if side == "R":
npoBase.attr("ry").set(180)
npoBase.attr("sz").set(-1)
for lctl in reversed(lowControls[1:]):
node.add_controller_tag(lctl, over_ctl)
lowControls.append(upControls[-1])
# adding parent average contrains to odd controls
for i, ctl in enumerate(lowControls):
if utils.is_odd(i):
pm.parentConstraint(lowControls[i - 1],
lowControls[i + 1],
ctl.getParent(),
mo=True)
# Connecting control crvs with controls
applyop.gear_curvecns_op(upCrv_ctl, upControls)
applyop.gear_curvecns_op(lowCrv_ctl, lowControls)
# adding wires
w1 = pm.wire(upCrv, w=upBlink)[0]
w2 = pm.wire(lowCrv, w=lowBlink)[0]
w3 = pm.wire(upTarget, w=upCrv_ctl)[0]
w4 = pm.wire(lowTarget, w=lowCrv_ctl)[0]
# adding blendshapes
bs_upBlink = pm.blendShape(upTarget,
midTarget,
upBlink,
n="blendShapeUpBlink")
bs_lowBlink = pm.blendShape(lowTarget,
midTarget,
lowBlink,
n="blendShapeLowBlink")
bs_mid = pm.blendShape(lowTarget,
upTarget,
midTarget,
n="blendShapeLowBlink")
# setting blendshape reverse connections
rev_node = pm.createNode("reverse")
pm.connectAttr(bs_upBlink[0].attr(midTarget.name()), rev_node + ".inputX")
pm.connectAttr(rev_node + ".outputX", bs_upBlink[0].attr(upTarget.name()))
rev_node = pm.createNode("reverse")
pm.connectAttr(bs_lowBlink[0].attr(midTarget.name()), rev_node + ".inputX")
pm.connectAttr(rev_node + ".outputX",
bs_lowBlink[0].attr(lowTarget.name()))
rev_node = pm.createNode("reverse")
pm.connectAttr(bs_mid[0].attr(upTarget.name()), rev_node + ".inputX")
pm.connectAttr(rev_node + ".outputX", bs_mid[0].attr(lowTarget.name()))
# setting default values
bs_mid[0].attr(upTarget.name()).set(blinkH)
# joints root
jnt_root = primitive.addTransformFromPos(
eye_root, setName("joints"), pos=bboxCenter)
# head joint
if headJnt:
try:
headJnt = pm.PyNode(headJnt)
jnt_base = headJnt
except pm.MayaNodeError:
pm.displayWarning(
"Aborted can not find %s " % headJnt)
return
else:
# Eye root
jnt_base = jnt_root
eyeTargets_root = primitive.addTransform(eye_root,
setName("targets"))
eyeCenter_jnt = rigbits.addJnt(arrow_ctl,
jnt_base,
grp=defset,
jntName=setName("center_jnt"))
# Upper Eyelid joints ##################################################
cvs = upCrv.getCVs(space="world")
upCrv_info = node.createCurveInfoNode(upCrv)
# aim constrain targets and joints
upperEyelid_aimTargets = []
upperEyelid_jnt = []
upperEyelid_jntRoot = []
for i, cv in enumerate(cvs):
# aim targets
trn = primitive.addTransformFromPos(eyeTargets_root,
setName("upEyelid_aimTarget", i),
pos=cv)
upperEyelid_aimTargets.append(trn)
# connecting positions with crv
pm.connectAttr(upCrv_info + ".controlPoints[%s]" % str(i),
trn.attr("translate"))
# joints
jntRoot = primitive.addJointFromPos(jnt_root,
setName("upEyelid_jnt_base", i),
pos=bboxCenter)
jntRoot.attr("radius").set(.08)
jntRoot.attr("visibility").set(False)
upperEyelid_jntRoot.append(jntRoot)
applyop.aimCns(jntRoot, trn, axis="zy", wupObject=jnt_root)
jnt_ref = primitive.addJointFromPos(jntRoot,
setName("upEyelid_jnt_ref", i),
pos=cv)
jnt_ref.attr("radius").set(.08)
jnt_ref.attr("visibility").set(False)
jnt = rigbits.addJnt(jnt_ref,
jnt_base,
grp=defset,
jntName=setName("upEyelid_jnt", i))
upperEyelid_jnt.append(jnt)
# Lower Eyelid joints ##################################################
cvs = lowCrv.getCVs(space="world")
lowCrv_info = node.createCurveInfoNode(lowCrv)
# aim constrain targets and joints
lowerEyelid_aimTargets = []
lowerEyelid_jnt = []
lowerEyelid_jntRoot = []
for i, cv in enumerate(cvs):
if i in [0, len(cvs) - 1]:
continue
# aim targets
trn = primitive.addTransformFromPos(eyeTargets_root,
setName("lowEyelid_aimTarget", i),
pos=cv)
lowerEyelid_aimTargets.append(trn)
# connecting positions with crv
pm.connectAttr(lowCrv_info + ".controlPoints[%s]" % str(i),
trn.attr("translate"))
# joints
jntRoot = primitive.addJointFromPos(jnt_root,
setName("lowEyelid_base", i),
pos=bboxCenter)
jntRoot.attr("radius").set(.08)
jntRoot.attr("visibility").set(False)
lowerEyelid_jntRoot.append(jntRoot)
applyop.aimCns(jntRoot, trn, axis="zy", wupObject=jnt_root)
jnt_ref = primitive.addJointFromPos(jntRoot,
setName("lowEyelid_jnt_ref", i),
pos=cv)
jnt_ref.attr("radius").set(.08)
jnt_ref.attr("visibility").set(False)
jnt = rigbits.addJnt(jnt_ref,
jnt_base,
grp=defset,
jntName=setName("lowEyelid_jnt", i))
lowerEyelid_jnt.append(jnt)
# Channels
# Adding and connecting attributes for the blink
up_ctl = upControls[2]
blink_att = attribute.addAttribute(
over_ctl, "blink", "float", 0, minValue=0, maxValue=1)
blinkMult_att = attribute.addAttribute(
over_ctl, "blinkMult", "float", 1, minValue=1, maxValue=2)
midBlinkH_att = attribute.addAttribute(
over_ctl, "blinkHeight", "float", blinkH, minValue=0, maxValue=1)
mult_node = node.createMulNode(blink_att, blinkMult_att)
pm.connectAttr(mult_node + ".outputX",
bs_upBlink[0].attr(midTarget.name()))
pm.connectAttr(mult_node + ".outputX",
bs_lowBlink[0].attr(midTarget.name()))
pm.connectAttr(midBlinkH_att, bs_mid[0].attr(upTarget.name()))
low_ctl = lowControls[2]
# Adding channels for eye tracking
upVTracking_att = attribute.addAttribute(up_ctl,
"vTracking",
"float",
.02,
minValue=0,
maxValue=1,
keyable=False,
channelBox=True)
upHTracking_att = attribute.addAttribute(up_ctl,
"hTracking",
"float",
.01,
minValue=0,
maxValue=1,
keyable=False,
channelBox=True)
lowVTracking_att = attribute.addAttribute(low_ctl,
"vTracking",
"float",
.01,
minValue=0,
maxValue=1,
keyable=False,
channelBox=True)
lowHTracking_att = attribute.addAttribute(low_ctl,
"hTracking",
"float",
.01,
minValue=0,
maxValue=1,
keyable=False,
channelBox=True)
mult_node = node.createMulNode(upVTracking_att, aimTrigger_ref.attr("ty"))
pm.connectAttr(mult_node + ".outputX", trackLvl[0].attr("ty"))
mult_node = node.createMulNode(upHTracking_att, aimTrigger_ref.attr("tx"))
pm.connectAttr(mult_node + ".outputX", trackLvl[0].attr("tx"))
mult_node = node.createMulNode(lowVTracking_att, aimTrigger_ref.attr("ty"))
pm.connectAttr(mult_node + ".outputX", trackLvl[1].attr("ty"))
mult_node = node.createMulNode(lowHTracking_att, aimTrigger_ref.attr("tx"))
pm.connectAttr(mult_node + ".outputX", trackLvl[1].attr("tx"))
# Tension on blink
node.createReverseNode(blink_att, w1.scale[0])
node.createReverseNode(blink_att, w3.scale[0])
node.createReverseNode(blink_att, w2.scale[0])
node.createReverseNode(blink_att, w4.scale[0])
###########################################
# Reparenting
###########################################
if parent:
try:
if isinstance(parent, basestring):
parent = pm.PyNode(parent)
parent.addChild(eye_root)
except pm.MayaNodeError:
pm.displayWarning("The eye rig can not be parent to: %s. Maybe "
"this object doesn't exist." % parent)
###########################################
# Auto Skinning
###########################################
if doSkin:
# eyelid vertex rows
totalLoops = rigidLoops + falloffLoops
vertexLoopList = meshNavigation.getConcentricVertexLoop(vertexList,
totalLoops)
vertexRowList = meshNavigation.getVertexRowsFromLoops(vertexLoopList)
# we set the first value 100% for the first initial loop
skinPercList = [1.0]
# we expect to have a regular grid topology
for r in range(rigidLoops):
for rr in range(2):
skinPercList.append(1.0)
increment = 1.0 / float(falloffLoops)
# we invert to smooth out from 100 to 0
inv = 1.0 - increment
for r in range(falloffLoops):
for rr in range(2):
if inv < 0.0:
inv = 0.0
skinPercList.append(inv)
inv -= increment
# this loop add an extra 0.0 indices to avoid errors
for r in range(10):
for rr in range(2):
skinPercList.append(0.0)
# base skin
geo = pm.listRelatives(edgeLoopList[0], parent=True)[0]
# Check if the object has a skinCluster
objName = pm.listRelatives(geo, parent=True)[0]
skinCluster = skin.getSkinCluster(objName)
if not skinCluster:
skinCluster = pm.skinCluster(headJnt,
geo,
tsb=True,
nw=2,
n='skinClsEyelid')
eyelidJoints = upperEyelid_jnt + lowerEyelid_jnt
pm.progressWindow(title='Auto skinning process',
progress=0,
max=len(eyelidJoints))
firstBoundary = False
for jnt in eyelidJoints:
pm.progressWindow(e=True, step=1, status='\nSkinning %s' % jnt)
skinCluster.addInfluence(jnt, weight=0)
v = meshNavigation.getClosestVertexFromTransform(geo, jnt)
for row in vertexRowList:
if v in row:
it = 0 # iterator
inc = 1 # increment
for i, rv in enumerate(row):
try:
perc = skinPercList[it]
t_val = [(jnt, perc), (headJnt, 1.0 - perc)]
pm.skinPercent(skinCluster,
rv,
transformValue=t_val)
if rv.isOnBoundary():
# we need to compare with the first boundary
# to check if the row have inverted direction
# and offset the value
if not firstBoundary:
firstBoundary = True
firstBoundaryValue = it
else:
if it < firstBoundaryValue:
it -= 1
elif it > firstBoundaryValue:
it += 1
inc = 2
except IndexError:
continue
it = it + inc
pm.progressWindow(e=True, endProgress=True)
# Eye Mesh skinning
skinCluster = skin.getSkinCluster(eyeMesh)
if not skinCluster:
skinCluster = pm.skinCluster(eyeCenter_jnt,
eyeMesh,
tsb=True,
nw=1,
n='skinClsEye')
##########################################################
# Eye Rig UI
##########################################################
# build lips from json file:
def eyesFromfile(path):
buildDict = json.load(open(path))
eyeRig(*buildDict["eye"])
def showEyeRigUI(*args):
gqt.showDialog(eyeRigUI)
if __name__ == "__main__":
showEyeRigUI()
# path = "C:\\Users\\miquel\\Desktop\\eye_L.eyes"
# eyesFromfile(path)
# path = "C:\\Users\\miquel\\Desktop\\eye_R.eyes"
# eyesFromfile(path)
| 38.698176 | 79 | 0.541526 |
961d4ae687b3642af37cc358422318fe31255362 | 3,260 | py | Python | Python/expert/interact_with_linux/solution.py | fpichl/ProgrammingTasks | da494022455dd77de1c99a6c6e4962616e9764e6 | [
"Unlicense"
] | 2 | 2018-10-18T16:35:56.000Z | 2019-03-07T06:16:18.000Z | Python/expert/interact_with_linux/solution.py | fpichl/ProgrammingTasks | da494022455dd77de1c99a6c6e4962616e9764e6 | [
"Unlicense"
] | 2 | 2019-11-13T09:25:54.000Z | 2021-08-19T08:23:32.000Z | Python/expert/interact_with_linux/solution.py | fpichl/ProgrammingTasks | da494022455dd77de1c99a6c6e4962616e9764e6 | [
"Unlicense"
] | 3 | 2019-05-22T12:20:05.000Z | 2019-08-30T12:57:56.000Z | #!/usr/bin/env python3
import os
import shutil
import sys
import pathlib
import logging
# I will NEVER EVER use subproccess again
# At least not for something like Popen
try:
from sh import wget
except Exception:
print('[!] Just install sh right now!(pip install --user sh)')
sys.exit(0)
# Dumb Python2 support
if sys.version_info[0] == 2:
input = raw_input
# Path where this python script is located when it's run
curr_dir = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
# The URL
url = input('[$] Url(none for ema.perfact.de): ')
url = url if url else 'ema.perfact.de'
print('[*] Url: {}\n'.format(url))
# Get name of the directory where the whole page should be saved
dir_name = input('[$] Directory name for the page(none for "1337"): ')
dir_name = dir_name if dir_name else '1337'
page_dir = curr_dir / dir_name
if page_dir.is_dir():
print('[!] {} is already a directory and will be overwritten!'.format(page_dir))
choice = input('[!] Continue?(y/n):').lower()
if choice != 'y':
sys.exit(0)
print('[*] Directory to save the page: {}\n'.format(dir_name))
# Get name of directory where the files will be saved we actually want to save
save_name = input('[$] Directory name to save findings(none for "saved"): ')
save_name = save_name if save_name else 'saved'
save_dir = curr_dir / save_name
if save_dir.is_dir():
print('[!] {} is already a directory!'.format(save_dir))
choice = input('[!] Delete it?(y/n): '.format(save_dir)).lower()
if choice == 'y':
shutil.rmtree(save_dir.absolute().as_posix())
else:
sys.exit(0)
os.makedirs(save_dir.absolute().as_posix())
print('[*] Directory to save findings: {}\n'.format(save_name))
# The searchterm (which files we want to copy)
print('[*] Everything with the following substring will be copied')
search_term = input('[$] Files to copy to that directory(none for ".png"): ')
search_term = search_term if search_term else '.png'
print('[*] Searchterm: {}\n'.format(search_term))
input('\n[$] Press any key to continue...')
# We will give these exit_codes to the wget call later
# to disabled every exit/error message (will look horribly else)
exit_codes = (i for i in range(0, 9))
# Sets off the wget -m <url> -P <directory> commande
# It's written so weird, so we can see the output of the program
try:
for line in wget('-m', url, '-P', dir_name, _iter=True, _err_to_out=True,
_out_bufsize=1, _ok_code=exit_codes):
print(line)
except Exception:
pass
# Copying the files we want to save
try:
# Get every file with the correct searchterm from the folder where the webpage is saved
files = list(page_dir.glob("**/*{}".format(search_term)))
if not files:
print("[!] No matching files found")
else:
print("[*] Copying {} *{} files...".format(len(files), search_term))
for f in files:
shutil.copy(f.absolute().as_posix(), save_dir.absolute().as_posix())
except Exception as e:
print('[!] Something went wrong while copying data')
print(e)
# Deleting the saved webpage, cause we don't need it anymore
print('\n[*] Cleaning up...\n')
if page_dir.is_dir():
shutil.rmtree(page_dir.absolute().as_posix())
print('[*] All done!')
| 33.958333 | 91 | 0.674233 |
961e5e18627878c209a335c0392cc2286e8803ad | 323 | py | Python | Asap-3.8.4/Projects/NanoparticleMC/misc/viewatomsmc.py | auag92/n2dm | 03403ef8da303b79478580ae76466e374ec9da60 | [
"MIT"
] | 1 | 2021-10-19T11:35:34.000Z | 2021-10-19T11:35:34.000Z | Asap-3.8.4/Projects/NanoparticleMC/misc/viewatomsmc.py | auag92/n2dm | 03403ef8da303b79478580ae76466e374ec9da60 | [
"MIT"
] | null | null | null | Asap-3.8.4/Projects/NanoparticleMC/misc/viewatomsmc.py | auag92/n2dm | 03403ef8da303b79478580ae76466e374ec9da60 | [
"MIT"
] | 3 | 2016-07-18T19:22:48.000Z | 2021-07-06T03:06:42.000Z | import ase
from ase import Atoms
from ase.atom import Atom
import sys
from ase.visualize import view
import pickle
f = open(sys.argv[1],'r') #The .amc file
p = pickle.load(f)
positions = p['atomspositions']
atms = Atoms()
for p0 in positions:
a = Atom('Au',position=p0)
atms.append(a)
atms.center(vacuum=2)
view(atms)
| 17 | 40 | 0.721362 |
961e930045b962f6aec047adbd1d0fd8f14a977a | 453 | py | Python | bot_settings_example.py | nikmedoed/BalanceBot | 731e6d09d71bbf8d7802d0b42a570947343d3ce6 | [
"MIT"
] | null | null | null | bot_settings_example.py | nikmedoed/BalanceBot | 731e6d09d71bbf8d7802d0b42a570947343d3ce6 | [
"MIT"
] | null | null | null | bot_settings_example.py | nikmedoed/BalanceBot | 731e6d09d71bbf8d7802d0b42a570947343d3ce6 | [
"MIT"
] | null | null | null | # dev
TELEGRAM_TOKEN = "..."
RELATIVE_CHAT_IDS = [ "...", '...']
TEXT = {
"bot_info": (', , .\n\n'
' , '),
"get_link": " ",
"new_room": " \n%s",
"nothing_to_change": " "
} | 30.2 | 107 | 0.655629 |
961f8e0ded1739e7f84175c2bdac8bbf64966432 | 8,270 | py | Python | test/xslt/borrowed/sm_20000304.py | zepheira/amara | d3ffe07d6e2266b34d72b012a82d572c8edbf1e7 | [
"Apache-2.0"
] | 6 | 2015-01-30T03:50:36.000Z | 2022-03-20T16:09:58.000Z | test/xslt/borrowed/sm_20000304.py | zepheira/amara | d3ffe07d6e2266b34d72b012a82d572c8edbf1e7 | [
"Apache-2.0"
] | 2 | 2015-02-04T17:18:47.000Z | 2019-09-27T23:39:52.000Z | test/xslt/borrowed/sm_20000304.py | zepheira/amara | d3ffe07d6e2266b34d72b012a82d572c8edbf1e7 | [
"Apache-2.0"
] | 6 | 2015-02-04T16:16:18.000Z | 2019-10-30T20:07:48.000Z | ########################################################################
# test/xslt/sm20000304.py
# Example from Steve Muench <smuench@us.oracle.com>
# to Jon Smirl <jonsmirl@mediaone.net>
# on 4 March 2000
"""
From: "Steve Muench" <smuench@us.oracle.com>
To: <xsl-list@mulberrytech.com>
Subject: Re: SVG charts and graphs from XML input
Date: Sat, 4 Mar 2000 18:02:53 -0800 (19:02 MST)
This is by no means a bullet-proof, one-size-fits
all charting stylesheet, but it *was* my first foray
into SVG from XSLT.
Given XML results of an Oracle XSQL Page like:
<xsql:query xmlns:xsql="urn:oracle-xsql" connection="demo">
select ename, sal from dept
</xsql:query>
Which under the covers produces a dynamic XML doc like:
[SNIP source]
The following "salchart.xsl" XSLT stylesheet
renders a dynamic bar chart with "cool colors"
for the employees in the department.
You may have to modify the namespace of the
Java extension functions to get it to work in
XT or Saxon or other XSLT engines.
[SNIP stylesheet]
"""
import os
import cStringIO
import unittest
from amara.lib import treecompare
from amara.test import test_main
from amara.test.xslt import xslt_test, filesource, stringsource
### dalke - added to make the imports work
#Extensions
ORACLE_JAVA_NS = 'http://www.oracle.com/XSL/Transform/java'
JAVA_COLOR_NS = ORACLE_JAVA_NS + '/java.awt.Color'
JAVA_INTEGER_NS = ORACLE_JAVA_NS + '/java.lang.Integer'
ExtFunctions = {
(JAVA_COLOR_NS, 'getHSBColor') : Java_Color_GetHSBColor,
(JAVA_COLOR_NS, 'getRed') : Java_Color_GetRed,
(JAVA_COLOR_NS, 'getGreen') : Java_Color_GetGreen,
(JAVA_COLOR_NS, 'getBlue') : Java_Color_GetBlue,
(JAVA_INTEGER_NS, 'toHexString') : Java_Integer_ToHexString,
}
# Hide the test framework from nose
del xslt_test
if __name__ == '__main__':
test_main()
| 32.687747 | 143 | 0.606167 |
961fc04d55a2472f650b925e3c30b289d25af832 | 123 | py | Python | model-server/config.py | campos537/deep-fashion-system | 1de31dd6260cc967e1832cff63ae7e537a3a4e9d | [
"Unlicense"
] | 1 | 2021-04-06T00:43:26.000Z | 2021-04-06T00:43:26.000Z | model-server/config.py | campos537/deep-fashion-system | 1de31dd6260cc967e1832cff63ae7e537a3a4e9d | [
"Unlicense"
] | null | null | null | model-server/config.py | campos537/deep-fashion-system | 1de31dd6260cc967e1832cff63ae7e537a3a4e9d | [
"Unlicense"
] | null | null | null | import json
| 20.5 | 42 | 0.707317 |
9623258b0aadd8546b69b628ca22ab142e622094 | 1,642 | py | Python | pmfp/entrypoint/grpc/build_/build_pb_go.py | Python-Tools/pmfp | 832273890eec08e84f9c68d03f3316b2c8139133 | [
"MIT"
] | 4 | 2017-09-15T03:38:56.000Z | 2019-12-16T02:03:14.000Z | pmfp/entrypoint/grpc/build_/build_pb_go.py | Python-Tools/pmfp | 832273890eec08e84f9c68d03f3316b2c8139133 | [
"MIT"
] | 1 | 2021-04-27T10:51:42.000Z | 2021-04-27T10:51:42.000Z | pmfp/entrypoint/grpc/build_/build_pb_go.py | Python-Tools/pmfp | 832273890eec08e84f9c68d03f3316b2c8139133 | [
"MIT"
] | null | null | null | """go."""
import warnings
from typing import List, Optional
from pathlib import Path
from pmfp.utils.run_command_utils import run
def build_pb_go(serv_file: str, includes: List[str], to: str,
source_relative: bool, cwd: Path, files: Optional[List[str]] = None, **kwargs: str) -> None:
"""grpcprotobuffergo.
Args:
serv_file (str): grpc serviceproto
includes (List[str]): protobuffer
to (str):
source_relative (bool): ,go
cwd (Path): .
files (Optional[List[str]]): protobuffer
"""
includes_str = " ".join([f"-I {include}" for include in includes])
target_str = serv_file
if files:
target_str += " " + " ".join(files)
flag_str = ""
if source_relative:
flag_str += " --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative"
if kwargs:
if flag_str:
flag_str += " "
flag_str += " ".join([f"{k}={v}" for k, v in kwargs.items()])
_build_grpc(includes_str, flag_str, to, target_str, cwd)
| 32.84 | 108 | 0.626066 |
962392189f97293112a65685c141235eaa945995 | 369 | py | Python | instapp/migrations/0003_auto_20190522_0007.py | imekenye/Instagram-clone | 19c895a7bc4d5137f8df6eab7ade3920dfc3eb39 | [
"Unlicense"
] | null | null | null | instapp/migrations/0003_auto_20190522_0007.py | imekenye/Instagram-clone | 19c895a7bc4d5137f8df6eab7ade3920dfc3eb39 | [
"Unlicense"
] | 13 | 2020-02-12T00:19:23.000Z | 2022-03-11T23:47:08.000Z | instapp/migrations/0003_auto_20190522_0007.py | imekenye/Instagram-clone | 19c895a7bc4d5137f8df6eab7ade3920dfc3eb39 | [
"Unlicense"
] | 1 | 2019-06-07T10:01:06.000Z | 2019-06-07T10:01:06.000Z | # Generated by Django 2.2.1 on 2019-05-22 00:07
from django.db import migrations
| 19.421053 | 47 | 0.590786 |
9624816b19d6ea779fa1433613690a1826c3af03 | 4,007 | py | Python | app/api/v1_0/users.py | daichi-yoshikawa/flask-boilerplate | 2a136eb713a698955dc78ce07364ac333806e6da | [
"MIT"
] | 1 | 2021-01-04T21:25:24.000Z | 2021-01-04T21:25:24.000Z | app/api/v1_0/users.py | daichi-yoshikawa/flask-boilerplate | 2a136eb713a698955dc78ce07364ac333806e6da | [
"MIT"
] | null | null | null | app/api/v1_0/users.py | daichi-yoshikawa/flask-boilerplate | 2a136eb713a698955dc78ce07364ac333806e6da | [
"MIT"
] | null | null | null | import json
import logging
from flask import jsonify, make_response, request
from flask_jwt_extended import jwt_required
from flask_restful import Resource
from http import HTTPStatus
from marshmallow import ValidationError, Schema
from werkzeug.security import generate_password_hash
from app.models import db
from app.models.user import User, user_schema
from app.api.utils import get_url
from app.utils.exceptions import ApiException
logger = logging.getLogger(__name__)
| 30.356061 | 82 | 0.660344 |
9625303d504fb10bd57521a4e704cb6335319f31 | 984 | py | Python | src/count_targets.py | kahvel/MAProject | 1c17d0c3fde6d9acc7dd3861f926e8af0ddac222 | [
"MIT"
] | null | null | null | src/count_targets.py | kahvel/MAProject | 1c17d0c3fde6d9acc7dd3861f926e8af0ddac222 | [
"MIT"
] | null | null | null | src/count_targets.py | kahvel/MAProject | 1c17d0c3fde6d9acc7dd3861f926e8af0ddac222 | [
"MIT"
] | null | null | null | from main import readData, getTrueLabels, binariseLabels, removePacketsAfterChange
label_data = list()
label_data.append(readData("..\\data\\test5_targets_1.csv"))
label_data.append(readData("..\\data\\test5_targets_2.csv"))
label_data.append(readData("..\\data\\test5_targets_3.csv"))
labels = [getTrueLabels(label) for label in label_data]
binarised_labels = dict()
binarised_labels[1] = [binariseLabels(label, 1) for label in labels]
binarised_labels[2] = [binariseLabels(label, 2) for label in labels]
binarised_labels[3] = [binariseLabels(label, 3) for label in labels]
for target in [1,2,3]:
for dataset in [0,1,2]:
_, binarised_labels[target][dataset] =\
removePacketsAfterChange(binarised_labels[target][dataset], binarised_labels[target][dataset], label_data[dataset], 256)
for target in [1,2,3]:
for dataset in [0,1,2]:
print "Dataset:", str(dataset+1), "Target:", str(target), "Count:", str(sum(binarised_labels[target][dataset]))
| 41 | 132 | 0.727642 |
962533cca6da11f2ce0eaecf148aa3437a906a76 | 14,183 | py | Python | src/imagine/goal_sampler.py | jordyantunes/Imagine | 783cedaa53635b21e18ef41ab1524d56e368d120 | [
"MIT"
] | 20 | 2020-11-06T10:54:08.000Z | 2022-02-24T15:23:31.000Z | src/imagine/goal_sampler.py | jordyantunes/Imagine | 783cedaa53635b21e18ef41ab1524d56e368d120 | [
"MIT"
] | null | null | null | src/imagine/goal_sampler.py | jordyantunes/Imagine | 783cedaa53635b21e18ef41ab1524d56e368d120 | [
"MIT"
] | 4 | 2020-11-17T17:00:02.000Z | 2021-07-08T22:51:14.000Z | import numpy as np
from mpi4py import MPI
from src.imagine.goal_generator.simple_sentence_generator import SentenceGeneratorHeuristic
from src import logger
| 47.434783 | 140 | 0.585419 |
96262446beb9d081c0d44d53817c947e2939b91a | 711 | py | Python | src/actionsold.py | Grumpy-Old-Tech/WorkshopAssistant | 704e8080e76ba6feabd6eee3e1965439336306ad | [
"MIT"
] | null | null | null | src/actionsold.py | Grumpy-Old-Tech/WorkshopAssistant | 704e8080e76ba6feabd6eee3e1965439336306ad | [
"MIT"
] | null | null | null | src/actionsold.py | Grumpy-Old-Tech/WorkshopAssistant | 704e8080e76ba6feabd6eee3e1965439336306ad | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#This is different from AIY Kit's actions
#Copying and Pasting AIY Kit's actions commands will not work
from googleapiclient.discovery import build
from googleapiclient.errors import HttpError
from gmusicapi import Mobileclient
from googletrans import Translator
from gtts import gTTS
import requests
import os
import os.path
import RPi.GPIO as GPIO
import time
import re
import subprocess
import json
import urllib.request
import pafy
#API Key for YouTube and KS Search Engine
google_cloud_api_key='ENTER-YOUR-GOOGLE-CLOUD-API-KEY-HERE'
#YouTube API Constants
DEVELOPER_KEY = google_cloud_api_key
YOUTUBE_API_SERVICE_NAME = 'youtube'
YOUTUBE_API_VERSION = 'v3'
playshell = None
| 19.75 | 61 | 0.819972 |
96275facff37f1002cea2272aef725bd9db2358a | 2,358 | py | Python | openpype/tools/settings/settings/widgets/window.py | dangerstudios/OpenPype | 10ddcc4699137888616eec57cd7fac9648189714 | [
"MIT"
] | null | null | null | openpype/tools/settings/settings/widgets/window.py | dangerstudios/OpenPype | 10ddcc4699137888616eec57cd7fac9648189714 | [
"MIT"
] | null | null | null | openpype/tools/settings/settings/widgets/window.py | dangerstudios/OpenPype | 10ddcc4699137888616eec57cd7fac9648189714 | [
"MIT"
] | null | null | null | from Qt import QtWidgets, QtGui
from .categories import (
CategoryState,
SystemWidget,
ProjectWidget
)
from .widgets import ShadowWidget
from .. import style
| 29.111111 | 72 | 0.656064 |
824894a056e2da4cc1ec1c9dd0d07d94594ced73 | 6,093 | py | Python | azkaban_zip_uploader/tests/lambda_handler_tests.py | uk-gov-mirror/dwp.aws-azkaban | fa69ddf6e18fccba1fb96f6dd7a234b9441e96da | [
"0BSD"
] | null | null | null | azkaban_zip_uploader/tests/lambda_handler_tests.py | uk-gov-mirror/dwp.aws-azkaban | fa69ddf6e18fccba1fb96f6dd7a234b9441e96da | [
"0BSD"
] | null | null | null | azkaban_zip_uploader/tests/lambda_handler_tests.py | uk-gov-mirror/dwp.aws-azkaban | fa69ddf6e18fccba1fb96f6dd7a234b9441e96da | [
"0BSD"
] | null | null | null | import lambda_handler
from unittest import TestCase
from mock import call, patch, Mock
from datetime import datetime
import boto3
import json
from botocore.stub import Stubber
import urllib3
mock_s3_client = boto3.client('s3')
s3_stubber = Stubber(mock_s3_client)
list_objects_response = {
'IsTruncated': False,
'Contents': [
{
'Key': 'return1.zip',
'LastModified': datetime(2015, 1, 1),
'ETag': 'string',
'Size': 123,
'StorageClass': 'STANDARD',
'Owner': {
'DisplayName': 'string',
'ID': 'string'
}
},
{
'Key': 'do_not_return.txt',
'LastModified': datetime(2015, 1, 1),
'ETag': 'string',
'Size': 123,
'StorageClass': 'STANDARD',
'Owner': {
'DisplayName': 'string',
'ID': 'string'
}
},
{
'Key': 'return2.zip',
'LastModified': datetime(2015, 1, 1),
'ETag': 'string',
'Size': 123,
'StorageClass': 'STANDARD',
'Owner': {
'DisplayName': 'string',
'ID': 'string'
}
},
],
'Name': 'string',
'EncodingType': 'url',
'KeyCount': 123,
'ContinuationToken': 'string'
}
s3_stubber.add_response('list_objects_v2', list_objects_response)
s3_stubber.activate()
mock_sm_client = boto3.client('secretsmanager')
sm_stubber = Stubber(mock_sm_client)
mock_secret_value_response = {
'ARN': 'arn:aws:secretsmanager:eu-west-7:123456789012:secret:tutorials/MyFirstSecret-jiObOV',
'Name': 'string',
'VersionId': 'EXAMPLE1-90ab-cdef-fedc-ba987EXAMPLE',
'SecretBinary': b'{"azkaban_username": "test_user", "azkaban_password": "pw123"}',
'CreatedDate': datetime(2015, 1, 1)
}
sm_stubber.add_response('get_secret_value', mock_secret_value_response)
sm_stubber.add_response('get_secret_value', mock_secret_value_response)
sm_stubber.activate()
data_non_fail = json.dumps({
"status" : "error",
"message" : "Project already exists.",
}).encode('utf-8')
http_non_fail_error= Mock()
http_non_fail_error.data = data_non_fail
data_fail = json.dumps({
"error" : "error",
"message" : "Other message.",
}).encode('utf-8')
http_raise_error = Mock()
http_raise_error.data = data_fail
http_status_error = Mock()
http_status_error.data = "non JSON error response".encode('utf-8')
http_status_error.status = 418
session_data = json.dumps({
"status" : "success",
"session.id" : "test-session-id-12345432"
}).encode('utf-8')
http_session = Mock()
http_session.data = session_data
http_session.status = 200
| 35.841176 | 129 | 0.678976 |
8249af75d4def2ae40ae7a6a262676d0c39c2b63 | 2,189 | py | Python | cripts/usernames/username.py | lakiw/cripts | 43f62891a3724e1ec60629887d97c421fb302163 | [
"MIT"
] | 2 | 2017-04-06T12:26:11.000Z | 2018-11-05T19:17:15.000Z | cripts/usernames/username.py | lakiw/cripts | 43f62891a3724e1ec60629887d97c421fb302163 | [
"MIT"
] | 9 | 2016-09-28T10:19:10.000Z | 2017-02-24T17:58:43.000Z | cripts/usernames/username.py | lakiw/cripts | 43f62891a3724e1ec60629887d97c421fb302163 | [
"MIT"
] | null | null | null | import uuid
from mongoengine import Document, StringField, ListField, UUIDField
from django.conf import settings
from cripts.core.cripts_mongoengine import CriptsBaseAttributes, CriptsSourceDocument
from cripts.core.cripts_mongoengine import CriptsActionsDocument
| 39.8 | 145 | 0.516674 |
824a4f6bf20408ed367c7e9a67c9b62aea2ab1c0 | 7,611 | py | Python | sweetpea/tests/test_encoding_diagram.py | anniecherk/sweetpea-py | 23dbad99a9213ff764ec207b456cf5d002707fd0 | [
"MIT"
] | 1 | 2018-05-06T03:54:06.000Z | 2018-05-06T03:54:06.000Z | sweetpea/tests/test_encoding_diagram.py | anniecherk/sweetpea-py | 23dbad99a9213ff764ec207b456cf5d002707fd0 | [
"MIT"
] | 5 | 2018-09-18T02:15:17.000Z | 2018-12-05T20:02:24.000Z | sweetpea/tests/test_encoding_diagram.py | anniecherk/sweetpea-py | 23dbad99a9213ff764ec207b456cf5d002707fd0 | [
"MIT"
] | null | null | null | import pytest
import operator as op
from sweetpea import fully_cross_block
from sweetpea.primitives import Factor, DerivedLevel, WithinTrial, Transition, Window
from sweetpea.encoding_diagram import __generate_encoding_diagram
color = Factor("color", ["red", "blue"])
text = Factor("text", ["red", "blue"])
con_level = DerivedLevel("con", WithinTrial(op.eq, [color, text]))
inc_level = DerivedLevel("inc", WithinTrial(op.ne, [color, text]))
con_factor = Factor("congruent?", [con_level, inc_level])
color_repeats_factor = Factor("color repeats?", [
DerivedLevel("yes", Transition(lambda colors: colors[0] == colors[1], [color])),
DerivedLevel("no", Transition(lambda colors: colors[0] != colors[1], [color]))
])
text_repeats_factor = Factor("text repeats?", [
DerivedLevel("yes", Transition(lambda colors: colors[0] == colors[1], [text])),
DerivedLevel("no", Transition(lambda colors: colors[0] != colors[1], [text]))
])
design = [color, text, con_factor]
crossing = [color, text]
blk = fully_cross_block(design, crossing, [])
| 48.170886 | 102 | 0.411247 |
824adf7af953a3787b6ad72eca002b2f5fa3b943 | 297 | py | Python | Source_Code/Python/ConductedTest/case_generator.py | fenglwh/instruments | 7886158d1ed97fe6bfe372a55f4fca107e834311 | [
"MIT"
] | null | null | null | Source_Code/Python/ConductedTest/case_generator.py | fenglwh/instruments | 7886158d1ed97fe6bfe372a55f4fca107e834311 | [
"MIT"
] | 3 | 2018-09-21T00:57:21.000Z | 2018-09-21T01:49:40.000Z | Source_Code/Python/ConductedTest/case_generator.py | fenglwh/instruments | 7886158d1ed97fe6bfe372a55f4fca107e834311 | [
"MIT"
] | null | null | null | import json
from labinstrument.SS.CMW500.CMW500_WIFI.CMW500_WIFI import *
if __name__ == '__main__':
new_config_name='emm'
new_config=CMW_WIFI(17).get_parameters()
config=json.load(open('config.txt'))
config[new_config_name]=new_config
json.dump(config,open('config.txt','w')) | 33 | 61 | 0.737374 |
824c8df0f0e68c3c21ba270b931275c591b881bd | 9,957 | py | Python | internal-export-file/export-report-pdf/src/export-report-pdf.py | aakloul/connectors | 171bdc3441b9196ee7aef3f1f9524d8594da6425 | [
"Apache-2.0"
] | null | null | null | internal-export-file/export-report-pdf/src/export-report-pdf.py | aakloul/connectors | 171bdc3441b9196ee7aef3f1f9524d8594da6425 | [
"Apache-2.0"
] | null | null | null | internal-export-file/export-report-pdf/src/export-report-pdf.py | aakloul/connectors | 171bdc3441b9196ee7aef3f1f9524d8594da6425 | [
"Apache-2.0"
] | null | null | null | import yaml
import os
import time
import datetime
from pycti.utils.constants import StixCyberObservableTypes
from weasyprint import HTML
from pycti import OpenCTIConnectorHelper, get_config_variable
from jinja2 import Environment, FileSystemLoader
if __name__ == "__main__":
try:
connector_export_report_pdf = ExportReportPdf()
connector_export_report_pdf.start()
except Exception as e:
print(e)
time.sleep(10)
exit(0)
| 40.149194 | 141 | 0.610425 |
824e54bffa5be7c6d4c645bb089554003a4f25bc | 189 | py | Python | Lyft-Dental/payments/pay/urls.py | Abhik1998/Lyft-sample_project | 3f9a79fb86c7abee713ae37245f5e7971be09139 | [
"MIT"
] | 1 | 2021-01-09T08:42:24.000Z | 2021-01-09T08:42:24.000Z | Lyft-Dental/payments/pay/urls.py | Abhik1998/Lyft-sample_project | 3f9a79fb86c7abee713ae37245f5e7971be09139 | [
"MIT"
] | null | null | null | Lyft-Dental/payments/pay/urls.py | Abhik1998/Lyft-sample_project | 3f9a79fb86c7abee713ae37245f5e7971be09139 | [
"MIT"
] | null | null | null | from django.urls import path
from .views import initiate_payment, callback
urlpatterns = [
path('', initiate_payment, name='pay'),
path('callback/', callback, name='callback'),
]
| 21 | 49 | 0.703704 |
824eb389c2a7eca319848d5d0b764477a524317f | 544 | py | Python | ibmsecurity/isam/base/overview.py | zone-zero/ibmsecurity | 7d3e38104b67e1b267e18a44845cb756a5302c3d | [
"Apache-2.0"
] | 46 | 2017-03-21T21:08:59.000Z | 2022-02-20T22:03:46.000Z | ibmsecurity/isam/base/overview.py | zone-zero/ibmsecurity | 7d3e38104b67e1b267e18a44845cb756a5302c3d | [
"Apache-2.0"
] | 201 | 2017-03-21T21:25:52.000Z | 2022-03-30T21:38:20.000Z | ibmsecurity/isam/base/overview.py | zone-zero/ibmsecurity | 7d3e38104b67e1b267e18a44845cb756a5302c3d | [
"Apache-2.0"
] | 91 | 2017-03-22T16:25:36.000Z | 2022-02-04T04:36:29.000Z | def get(isamAppliance, check_mode=False, force=False):
"""
Retrieve an overview of updates and licensing information
"""
return isamAppliance.invoke_get("Retrieve an overview of updates and licensing information",
"/updates/overview")
def get_licensing_info(isamAppliance, check_mode=False, force=False):
"""
Retrieve the licensing information
"""
return isamAppliance.invoke_get("Retrieve the licensing information",
"/lum/is_licensed")
| 36.266667 | 96 | 0.647059 |
824f686fbf01dfe1ee2beac723ed207ab4daf6b1 | 1,741 | py | Python | src/sweetrpg_library_api/application/config.py | paulyhedral/sweetrpg-library-api | 0105e963ef4321398aa66d7cb3aa9c2df1c4f375 | [
"MIT"
] | null | null | null | src/sweetrpg_library_api/application/config.py | paulyhedral/sweetrpg-library-api | 0105e963ef4321398aa66d7cb3aa9c2df1c4f375 | [
"MIT"
] | 33 | 2021-09-18T23:52:05.000Z | 2022-03-30T12:25:49.000Z | src/sweetrpg_library_api/application/config.py | sweetrpg/library-api | 0105e963ef4321398aa66d7cb3aa9c2df1c4f375 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = "Paul Schifferer <dm@sweetrpg.com>"
"""
config.py
- settings for the flask application object
"""
import os
import redis
from sweetrpg_library_api.application import constants
| 38.688889 | 115 | 0.708788 |
824f8edece08e9acdf645fa301526e669393eaed | 1,711 | py | Python | frames.py | mppc12/special_subject_tea | 945c10ac5a4f0f2fec2fbd6abeb398074801250f | [
"MIT"
] | null | null | null | frames.py | mppc12/special_subject_tea | 945c10ac5a4f0f2fec2fbd6abeb398074801250f | [
"MIT"
] | null | null | null | frames.py | mppc12/special_subject_tea | 945c10ac5a4f0f2fec2fbd6abeb398074801250f | [
"MIT"
] | null | null | null | import pandas as pd
from group import Group
| 30.017544 | 81 | 0.499123 |
82500b40709a627c2f0699d9319a5f6bbab93bb0 | 20,594 | py | Python | msm/mycroft_skills_manager.py | forslund/mycroft-skills-manager | 825e910a555e1882999647d226a56734a7b75ea4 | [
"Apache-2.0"
] | null | null | null | msm/mycroft_skills_manager.py | forslund/mycroft-skills-manager | 825e910a555e1882999647d226a56734a7b75ea4 | [
"Apache-2.0"
] | null | null | null | msm/mycroft_skills_manager.py | forslund/mycroft-skills-manager | 825e910a555e1882999647d226a56734a7b75ea4 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 Mycroft AI, Inc.
#
# This file is part of Mycroft Skills Manager
# (see https://github.com/MatthewScholefield/mycroft-light).
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Install, remove, update and track the skills on a device
MSM can be used on the command line but is also used by Mycroft core daemons.
"""
import time
import logging
import shutil
from functools import wraps
from glob import glob
from multiprocessing.pool import ThreadPool
from os import path
from typing import Dict, List
from xdg import BaseDirectory
from msm import GitException
from msm.exceptions import (
AlreadyInstalled,
AlreadyRemoved,
MsmException,
MultipleSkillMatches,
RemoveException,
SkillNotFound
)
from msm.skill_entry import SkillEntry
from msm.skill_repo import SkillRepo
from msm.skill_state import (
initialize_skill_state,
get_skill_state,
write_device_skill_state,
load_device_skill_state,
device_skill_state_hash
)
from msm.util import cached_property, MsmProcessLock
LOG = logging.getLogger(__name__)
CURRENT_SKILLS_DATA_VERSION = 2
ONE_DAY = 86400
def save_device_skill_state(func):
"""Decorator to overwrite the skills.json file when skill state changes.
The methods decorated with this function are executed in threads. So,
this contains some funky logic to keep the threads from stepping on one
another.
"""
return func_wrapper
def _init_skills_data(self):
"""Initial load of the skill state that occurs upon instantiation.
If the skills state was upgraded after it was loaded, write the
updated skills state to disk.
"""
try:
del(self.device_skill_state['upgraded'])
except KeyError:
self.device_skill_state_hash = device_skill_state_hash(
self.device_skill_state
)
else:
self.write_device_skill_state()
def _upgrade_skills_data(self):
"""Upgrade the contents of the device skills state if needed."""
if self._device_skill_state.get('version', 0) == 0:
self._upgrade_to_v1()
if self._device_skill_state['version'] == 1:
self._upgrade_to_v2()
def _upgrade_to_v1(self):
"""Upgrade the device skills state to version one."""
self._device_skill_state.update(blacklist=[], version=1, skills=[])
for skill in self.local_skills.values():
skill_data = self._device_skill_state.get(skill.name, {})
try:
origin = skill_data['origin']
except KeyError:
origin = self._determine_skill_origin(skill)
beta = skill_data.get('beta', False)
skill_state = initialize_skill_state(
skill.name,
origin,
beta,
skill.skill_gid
)
skill_state['installed'] = skill_data.get('installed', 0)
if isinstance(skill_state['installed'], bool):
skill_state['installed'] = 0
skill_state['updated'] = skill_data.get('updated', 0)
self._device_skill_state['skills'].append(skill_state)
self._device_skill_state.update(upgraded=True)
def _upgrade_to_v2(self):
"""Upgrade the device skills state to version 2.
This adds the skill_gid field to skill entries.
"""
self._update_skill_gid()
self._device_skill_state.update(version=2, upgraded=True)
def _sync_device_skill_state(self):
"""Sync device's skill state with with actual skills on disk."""
self._add_skills_to_state()
self._remove_skills_from_state()
self._update_skill_gid()
def _add_skills_to_state(self):
"""Add local skill to state if it is not already there."""
skill_names = [s['name'] for s in self._device_skill_state['skills']]
for skill in self.local_skills.values():
if skill.name not in skill_names:
origin = self._determine_skill_origin(skill)
skill_state = initialize_skill_state(
skill.name,
origin,
False,
skill.skill_gid
)
self._device_skill_state['skills'].append(skill_state)
def _remove_skills_from_state(self):
"""Remove skills from state that no longer exist in the filesystem."""
skills_to_remove = []
for skill in self._device_skill_state['skills']:
is_not_local = skill['name'] not in self.local_skills
is_installed_state = skill['installation'] == 'installed'
if is_not_local and is_installed_state:
skills_to_remove.append(skill)
for skill in skills_to_remove:
self._device_skill_state['skills'].remove(skill)
def write_device_skill_state(self, data=None):
"""Write device's skill state to disk if it has been modified."""
data = data or self.device_skill_state
if device_skill_state_hash(data) != self.device_skill_state_hash:
write_device_skill_state(data)
self.device_skill_state_hash = device_skill_state_hash(data)
def update_all(self):
return self.apply(update_skill, self.local_skills.values())
def _invalidate_skills_cache(self, new_value=None):
"""Reset the cached skill lists in case something changed.
The cached_property decorator builds a _cache instance attribute
storing a dictionary of cached values. Deleting from this attribute
invalidates the cache.
"""
LOG.info('invalidating skills cache')
if hasattr(self, '_cache') and 'all_skills' in self._cache:
del self._cache['all_skills']
self._all_skills = None if new_value is None else new_value
self._local_skills = None
self._default_skills = None
def find_skill(self, param, author=None, skills=None):
# type: (str, str, List[SkillEntry]) -> SkillEntry
"""Find skill by name or url"""
if param.startswith('https://') or param.startswith('http://'):
repo_id = SkillEntry.extract_repo_id(param)
for skill in self.all_skills:
if skill.id == repo_id:
return skill
name = SkillEntry.extract_repo_name(param)
skill_directory = SkillEntry.create_path(self.skills_dir, param)
return SkillEntry(name, skill_directory, param, msm=self)
else:
skill_confs = {
skill: skill.match(param, author)
for skill in skills or self.all_skills
}
best_skill, score = max(skill_confs.items(), key=lambda x: x[1])
LOG.info('Best match ({}): {} by {}'.format(
round(score, 2), best_skill.name, best_skill.author)
)
if score < 0.3:
raise SkillNotFound(param)
low_bound = (score * 0.7) if score != 1.0 else 1.0
close_skills = [
skill for skill, conf in skill_confs.items()
if conf >= low_bound and skill != best_skill
]
if close_skills:
raise MultipleSkillMatches([best_skill] + close_skills)
return best_skill
| 37.240506 | 79 | 0.609838 |
8251357bc0686fc467cb6924c7a1a83a74692825 | 973 | py | Python | ietf/utils/resources.py | wpjesus/codematch | eee7405259cce9239ea0545a2a1300ee1accfe94 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 1 | 2015-09-02T19:53:12.000Z | 2015-09-02T19:53:12.000Z | ietf/utils/resources.py | wpjesus/codematch | eee7405259cce9239ea0545a2a1300ee1accfe94 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | ietf/utils/resources.py | wpjesus/codematch | eee7405259cce9239ea0545a2a1300ee1accfe94 | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Autogenerated by the mkresources management command 2014-11-13 05:39
from tastypie.resources import ModelResource
from tastypie.fields import CharField
from tastypie.constants import ALL
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from ietf import api
from ietf.utils.models import DumpInfo
api.utils.register(DumpInfoResource())
| 27.8 | 70 | 0.70298 |
825208daaf95b00b8d7fda9692bec10b366dcc4a | 1,624 | py | Python | maskrcnn_benchmark/data/datasets/concat_dataset.py | dukebw/maskrcnn-benchmark | f6710844f8cc6b6ce5345fcdc996f05ec04c3df7 | [
"MIT"
] | null | null | null | maskrcnn_benchmark/data/datasets/concat_dataset.py | dukebw/maskrcnn-benchmark | f6710844f8cc6b6ce5345fcdc996f05ec04c3df7 | [
"MIT"
] | null | null | null | maskrcnn_benchmark/data/datasets/concat_dataset.py | dukebw/maskrcnn-benchmark | f6710844f8cc6b6ce5345fcdc996f05ec04c3df7 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import bisect
import numpy as np
from torch.utils.data.dataset import ConcatDataset as _ConcatDataset
| 32.48 | 92 | 0.639163 |
8253b8de6bfcb3c4b2705d87c3cbd83db498bfb5 | 1,129 | py | Python | 153_find_minimum_in_rotated_sorted_array.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | 2 | 2018-04-24T19:17:40.000Z | 2018-04-24T19:33:52.000Z | 153_find_minimum_in_rotated_sorted_array.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | null | null | null | 153_find_minimum_in_rotated_sorted_array.py | gengwg/leetcode | 0af5256ec98149ef5863f3bba78ed1e749650f6e | [
"Apache-2.0"
] | 3 | 2020-06-17T05:48:52.000Z | 2021-01-02T06:08:25.000Z | # 153. Find Minimum in Rotated Sorted Array
#
# Suppose an array sorted in ascending order is rotated at some pivot unknown to you beforehand.
#
# (i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
#
# Find the minimum element.
#
# You may assume no duplicate exists in the array.
if __name__ == '__main__':
print Solution().findMin([4, 5, 6, 7, 0, 1, 2])
| 26.255814 | 96 | 0.495128 |
8254c27453fc702429a4cf3b2f9b5c4318d236f1 | 10,452 | py | Python | tests/bridge/test_bridge.py | shuklaayush/badger-system | 1274eadbd0b0f3a02efbf40702719ce1d0a96c44 | [
"MIT"
] | 99 | 2020-12-02T08:40:48.000Z | 2022-03-15T05:21:06.000Z | tests/bridge/test_bridge.py | shuklaayush/badger-system | 1274eadbd0b0f3a02efbf40702719ce1d0a96c44 | [
"MIT"
] | 115 | 2020-12-15T07:15:39.000Z | 2022-03-28T22:21:03.000Z | tests/bridge/test_bridge.py | shuklaayush/badger-system | 1274eadbd0b0f3a02efbf40702719ce1d0a96c44 | [
"MIT"
] | 56 | 2020-12-11T06:50:04.000Z | 2022-02-21T09:17:38.000Z | import pytest
from brownie import (
accounts,
interface,
MockVault,
BadgerBridgeAdapter,
CurveSwapStrategy,
CurveTokenWrapper,
)
from helpers.constants import AddressZero
from helpers.registry import registry
from config.badger_config import badger_config
from scripts.systems.badger_system import connect_badger
from scripts.systems.bridge_system import connect_bridge
from scripts.systems.swap_system import connect_swap
# Curve lp tokens
RENBTC = "0x49849C98ae39Fff122806C06791Fa73784FB3675"
TBTC = "0x64eda51d3Ad40D56b9dFc5554E06F94e1Dd786Fd"
SBTC = "0x075b1bb99792c9E1041bA13afEf80C91a1e70fB3"
# Bridge mock vaults for testing.
# Schema is (in token addr, vault name, vault symbol, vault token addr)
BRIDGE_VAULTS = [
# TODO: When bridge adapter addr is approved, can test
# directly against badger sett contracts.
{
"inToken": registry.tokens.renbtc,
"outToken": registry.tokens.renbtc,
"id": "native.renCrv",
"symbol": "bcrvrenBTC",
"token": RENBTC,
"address": "0x6dEf55d2e18486B9dDfaA075bc4e4EE0B28c1545",
"upgrade": True,
},
{
"inToken": registry.tokens.renbtc,
"outToken": registry.tokens.renbtc,
"id": "native.tbtcCrv",
"symbol": "bcrvtBTC",
"token": TBTC,
"address": "0xb9D076fDe463dbc9f915E5392F807315Bf940334",
"upgrade": True,
},
{
"inToken": registry.tokens.renbtc,
"outToken": registry.tokens.renbtc,
"id": "native.sbtcCrv",
"symbol": "bcrvsBTC",
"token": SBTC,
"address": "0xd04c48A53c111300aD41190D63681ed3dAd998eC",
"upgrade": True,
},
{
"inToken": registry.tokens.wbtc,
"outToken": registry.tokens.wbtc,
"id": "yearn.wbtc",
"symbol": "byvwBTC",
"token": registry.tokens.wbtc,
"address": "0x4b92d19c11435614cd49af1b589001b7c08cd4d5",
"upgrade": False,
},
]
# Tests mint/burn to/from crv sett.
# We create a mock vault for each pool token.
# Tests swap router failures and wbtc mint/burn.
def test_bridge_basic_swap_fail():
renbtc = registry.tokens.renbtc
wbtc = registry.tokens.wbtc
badger = connect_badger(badger_config.prod_json)
bridge = connect_bridge(badger, badger_config.prod_json)
swap = connect_swap(badger_config.prod_json)
bridge.add_existing_swap(swap)
_upgrade_bridge(badger, bridge)
_deploy_bridge_mocks(badger, bridge)
# NB: If true, fails during router opimizeSwap() call, otherwise the underlying strategy fails.
for router_fail in [True, False]:
_deploy_swap_mocks(badger, bridge, swap, router_fail=router_fail)
# .1% slippage
slippage = 0.001
amount = 1 * 10 ** 8
for accIdx in range(10, 12):
account = accounts[accIdx]
for i in range(0, 2):
balanceBefore = interface.IERC20(renbtc).balanceOf(account)
# Test mints
bridge.adapter.mint(
wbtc,
slippage * 10 ** 4,
account.address,
AddressZero, # No vault.
amount,
# Darknode args hash/sig optional since gateway is mocked.
"",
"",
{"from": account},
)
assert interface.IERC20(renbtc).balanceOf(account) > balanceBefore
# NB: User should not receive any wbtc but rather renbtc as part
# of the fallback mechanism.
assert interface.IERC20(wbtc).balanceOf(account) == 0
# Tests swap router and wbtc mint/burn.
| 32.560748 | 99 | 0.604191 |
8254e7450b3c4e0f6d891fdfe8c1ab7c064377f8 | 1,423 | py | Python | babylon_server/babylon/config.py | ajponte/babylon | e743f5b3bb5b2eb864247414c4f51962eea9108e | [
"MIT"
] | null | null | null | babylon_server/babylon/config.py | ajponte/babylon | e743f5b3bb5b2eb864247414c4f51962eea9108e | [
"MIT"
] | 2 | 2021-11-08T18:09:22.000Z | 2021-11-09T19:22:33.000Z | babylon_server/babylon/config.py | ajponte/babylon | e743f5b3bb5b2eb864247414c4f51962eea9108e | [
"MIT"
] | null | null | null | import os
| 37.447368 | 185 | 0.709065 |
82559085472d1981739859824315a98440b83c6f | 131 | py | Python | etherscan_py/__init__.py | saltduck/etherscan_py | 1a4ac48733d832d6dc4c8f74fafd7af4c3ce675e | [
"MIT"
] | 6 | 2021-02-20T10:32:36.000Z | 2022-02-10T17:00:00.000Z | etherscan_py/__init__.py | saltduck/etherscan_py | 1a4ac48733d832d6dc4c8f74fafd7af4c3ce675e | [
"MIT"
] | 2 | 2020-11-19T04:39:25.000Z | 2021-03-05T12:40:21.000Z | etherscan_py/__init__.py | saltduck/etherscan_py | 1a4ac48733d832d6dc4c8f74fafd7af4c3ce675e | [
"MIT"
] | 3 | 2021-03-03T18:37:26.000Z | 2021-04-04T14:14:05.000Z | """Top-level package for etherscan-py."""
__author__ = """Julian Koh"""
__email__ = 'juliankohtx@gmail.com'
__version__ = '0.1.0'
| 21.833333 | 41 | 0.687023 |
8258e9ef419949e0cfc0082d25711b7eeaaea221 | 427 | py | Python | realtime/realtime.py | mikerah13/python_samples | c4cd8af3cee99a5199dd2231f182240c35984b97 | [
"MIT"
] | null | null | null | realtime/realtime.py | mikerah13/python_samples | c4cd8af3cee99a5199dd2231f182240c35984b97 | [
"MIT"
] | null | null | null | realtime/realtime.py | mikerah13/python_samples | c4cd8af3cee99a5199dd2231f182240c35984b97 | [
"MIT"
] | null | null | null | from subprocess import Popen, PIPE
if __name__ == "__main__":
run_command("ping google.com")
| 23.722222 | 76 | 0.627635 |
825a7d574135cde50db9d1e2e4cce7b2af3b42c9 | 923 | py | Python | resources/model/agenda.py | diegohideky/climatempoworkshop | edb50eec386d6db5ede9b28192520922ed85c55e | [
"MIT"
] | null | null | null | resources/model/agenda.py | diegohideky/climatempoworkshop | edb50eec386d6db5ede9b28192520922ed85c55e | [
"MIT"
] | null | null | null | resources/model/agenda.py | diegohideky/climatempoworkshop | edb50eec386d6db5ede9b28192520922ed85c55e | [
"MIT"
] | null | null | null | from db_connection import db
| 31.827586 | 82 | 0.658722 |
825b17f2327978290f8d614819c14bd2efe19e58 | 661 | py | Python | data/models.py | sarfarazstark/To-Do-Bot | c2d032fa69e42b651d1c574c276161eceb141981 | [
"Apache-2.0"
] | 4 | 2020-11-21T14:49:00.000Z | 2022-02-21T11:24:17.000Z | data/models.py | sarfarazstark/To-Do-Bot | c2d032fa69e42b651d1c574c276161eceb141981 | [
"Apache-2.0"
] | null | null | null | data/models.py | sarfarazstark/To-Do-Bot | c2d032fa69e42b651d1c574c276161eceb141981 | [
"Apache-2.0"
] | null | null | null | """Database models"""
from sqlalchemy import orm
import sqlalchemy
from .db_session import SqlAlchemyBase
# Task database model
# User database model
| 23.607143 | 73 | 0.753404 |
825b9506a0a8cc2c13904600639147b936af53d7 | 470 | py | Python | graduated_site/migrations/0029_auto_20191218_2109.py | vbacaksiz/KTU-MEBSIS | e1afaa07a16e00ff9be3f39b728603b64f08590e | [
"MIT"
] | null | null | null | graduated_site/migrations/0029_auto_20191218_2109.py | vbacaksiz/KTU-MEBSIS | e1afaa07a16e00ff9be3f39b728603b64f08590e | [
"MIT"
] | null | null | null | graduated_site/migrations/0029_auto_20191218_2109.py | vbacaksiz/KTU-MEBSIS | e1afaa07a16e00ff9be3f39b728603b64f08590e | [
"MIT"
] | null | null | null | # Generated by Django 3.0 on 2019-12-18 21:09
import ckeditor.fields
from django.db import migrations
| 23.5 | 99 | 0.651064 |
825c203e1359d9feaff1e4d74ceac39a9987f062 | 4,945 | py | Python | tests/test_obj.py | runapp/M2Crypto | bc3b54758fe73dce86304663084b40fa5d6973c0 | [
"MIT"
] | 58 | 2015-04-20T01:17:37.000Z | 2022-03-31T10:55:13.000Z | tests/test_obj.py | runapp/M2Crypto | bc3b54758fe73dce86304663084b40fa5d6973c0 | [
"MIT"
] | 7 | 2015-07-08T21:59:37.000Z | 2021-04-18T12:27:41.000Z | tests/test_obj.py | runapp/M2Crypto | bc3b54758fe73dce86304663084b40fa5d6973c0 | [
"MIT"
] | 29 | 2015-02-23T17:46:31.000Z | 2022-03-15T09:57:46.000Z | #!/usr/bin/env python
"""Unit tests for M2Crypto.m2 obj_* functions.
"""
from M2Crypto import ASN1, BIO, Rand, X509, m2, six
from tests import unittest
"""
These functions must be cleaned up and moved to some python module
Taken from CA managment code
"""
if __name__ == '__main__':
Rand.load_file('randpool.dat', -1)
unittest.TextTestRunner().run(suite())
Rand.save_file('randpool.dat')
| 35.070922 | 81 | 0.591304 |
825ed7b070e5aaac9e764b86a1c9c4bdbe9ea988 | 4,656 | py | Python | new_scraper.py | Baw25/HomeSavvy | e07fb6f78e6f68fb981c92b15df5eef981e4d0ea | [
"MIT"
] | null | null | null | new_scraper.py | Baw25/HomeSavvy | e07fb6f78e6f68fb981c92b15df5eef981e4d0ea | [
"MIT"
] | null | null | null | new_scraper.py | Baw25/HomeSavvy | e07fb6f78e6f68fb981c92b15df5eef981e4d0ea | [
"MIT"
] | null | null | null | #!/bin/python
# -*- coding: utf-8 -*-
# Droplet Name: ubuntu-512mb-sfo2-01
# IP Address: 138.68.252.152
# Username: root
# Password: fbe29a96430704766b5054c4d9
# New Password: Rowing525
# https://medium.com/@hoppy/how-to-test-or-scrape-javascript-rendered-websites-with-python-selenium-a-beginner-step-by-c137892216aa
from time import sleep
from random import randint
from selenium import webdriver
from pyvirtualdisplay import Display
# Run spider
RealTassa = RealTassaSpider()
items_list = RealTassa.parse()
# Do something with the data touched
for item in items_list:
print item
| 27.550296 | 131 | 0.649055 |
825ff6c34b7f590f5f9226ffd0a964d853a9a998 | 532 | py | Python | gdsfactory/simulation/gmeep/__init__.py | gdsfactory/gdsfactory | ee761ae0b4429fbec7035bbea5d1e5206c66bea7 | [
"MIT"
] | 42 | 2020-05-25T09:33:45.000Z | 2022-03-29T03:41:19.000Z | gdsfactory/simulation/gmeep/__init__.py | gdsfactory/gdsfactory | ee761ae0b4429fbec7035bbea5d1e5206c66bea7 | [
"MIT"
] | 133 | 2020-05-28T18:29:04.000Z | 2022-03-31T22:21:42.000Z | gdsfactory/simulation/gmeep/__init__.py | gdsfactory/gdsfactory | ee761ae0b4429fbec7035bbea5d1e5206c66bea7 | [
"MIT"
] | 17 | 2020-06-30T07:07:50.000Z | 2022-03-17T15:45:27.000Z | from gdsfactory.simulation.gmeep.add_monitors import add_monitors
from gdsfactory.simulation.gmeep.get_simulation import get_simulation
from gdsfactory.simulation.gmeep.get_transmission_2ports import (
get_transmission_2ports,
plot2D,
plot3D,
)
from gdsfactory.simulation.gmeep.plot_xsection import plot_xsection
__all__ = [
"add_monitors",
"get_simulation",
"get_sparameters1x2",
"get_transmission_2ports",
"plot2D",
"plot3D",
"plot_xsection",
"plot_eigenmode",
]
__version__ = "0.0.2"
| 25.333333 | 69 | 0.757519 |
82602d4942f676f159704b220ea884a45a9e0b4a | 11,212 | py | Python | coreos-ostree-importer/coreos_ostree_importer.py | dustymabe/fedora-coreos-releng-automation | 654a3505f3cc0795fa192c7503858e6fc95a9093 | [
"MIT"
] | null | null | null | coreos-ostree-importer/coreos_ostree_importer.py | dustymabe/fedora-coreos-releng-automation | 654a3505f3cc0795fa192c7503858e6fc95a9093 | [
"MIT"
] | null | null | null | coreos-ostree-importer/coreos_ostree_importer.py | dustymabe/fedora-coreos-releng-automation | 654a3505f3cc0795fa192c7503858e6fc95a9093 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import boto3
import botocore
import fedora_messaging
import fedora_messaging.api
import hashlib
import json
import logging
import os
import subprocess
import sys
import tarfile
import tempfile
import traceback
# Set local logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
FEDORA_MESSAGING_TOPIC_LISTEN = (
"org.fedoraproject.prod.coreos.build.request.ostree-import"
)
FEDORA_MESSAGING_TOPIC_RESPOND = FEDORA_MESSAGING_TOPIC_LISTEN + ".finished"
# We are processing the org.fedoraproject.prod.coreos.build.request.ostree-import topic
# https://apps.fedoraproject.org/datagrepper/raw?topic=org.fedoraproject.prod.coreos.build.request.ostree-import&delta=100000
# The schema was originally designed in:
# https://github.com/coreos/fedora-coreos-tracker/issues/198#issuecomment-513944390
EXAMPLE_MESSAGE_BODY = json.loads("""
{
"build_id": "30.20190905.0",
"stream": "testing",
"basearch": "x86_64",
"commit": "s3://fcos-builds/prod/streams/testing/builds/30.20190905.0/x86_64/ostree-commit.tar",
"checksum": "sha256:d01db6939e7387afa2492ac8e2591c53697fc21cf16785585f7f1ac0de692863",
"ostree_ref": "fedora/x86_64/coreos/testing",
"ostree_checksum": "b4beca154dab3696fd04f32ddab818102caa9247ec3192403adb9aaecc991bd9",
"target_repo": "prod"
}
"""
)
KNOWN_OSTREE_REPOS = {
"prod": "/mnt/koji/ostree/repo",
"compose": "/mnt/koji/compose/ostree/repo",
}
# Given a repo (and thus an input JSON) analyze existing koji tag set
# and tag in any missing packages
# https://stackoverflow.com/a/55542529
# The code in this file is expected to be run through fedora messaging
# However, you can run the script directly for testing purposes. The
# below code allows us to do that and also fake feeding data to the
# call by updating the json text below.
if __name__ == "__main__":
sh = logging.StreamHandler()
sh.setFormatter(
logging.Formatter("%(asctime)s %(levelname)s %(name)s - %(message)s")
)
logger.addHandler(sh)
m = fedora_messaging.api.Message(
topic="org.fedoraproject.prod.coreos.build.request.ostree-import",
body=EXAMPLE_MESSAGE_BODY,
)
c = Consumer()
c.__call__(m)
| 37.249169 | 125 | 0.646539 |
8261781129ea227c5b055b630da103ca621c0fbe | 1,837 | py | Python | deepscm/datasets/medical/ukbb.py | mobarakol/deepscm | ffa5f0208c98b1f31e300f28c07c7d51090eda4a | [
"MIT"
] | null | null | null | deepscm/datasets/medical/ukbb.py | mobarakol/deepscm | ffa5f0208c98b1f31e300f28c07c7d51090eda4a | [
"MIT"
] | null | null | null | deepscm/datasets/medical/ukbb.py | mobarakol/deepscm | ffa5f0208c98b1f31e300f28c07c7d51090eda4a | [
"MIT"
] | null | null | null | from torch.utils.data.dataset import Dataset
import numpy as np
import pandas as pd
import os
import nibabel as nib
from nilearn.image import resample_img
import torch
| 41.75 | 169 | 0.645073 |
82629e49973e0be1f008350e2ac5d3d75aff0200 | 4,493 | py | Python | external/mmdetection/detection_tasks/extension/utils/pipelines.py | bes-dev/training_extensions | 7b016e3bd02ae7c74d60fd5a0ae0912a42ef87cb | [
"Apache-2.0"
] | 44 | 2018-10-27T15:28:19.000Z | 2019-02-26T12:50:39.000Z | external/mmdetection/detection_tasks/extension/utils/pipelines.py | bes-dev/training_extensions | 7b016e3bd02ae7c74d60fd5a0ae0912a42ef87cb | [
"Apache-2.0"
] | 31 | 2018-11-09T20:33:47.000Z | 2019-02-28T09:58:22.000Z | external/mmdetection/detection_tasks/extension/utils/pipelines.py | bes-dev/training_extensions | 7b016e3bd02ae7c74d60fd5a0ae0912a42ef87cb | [
"Apache-2.0"
] | 27 | 2018-11-05T21:59:34.000Z | 2019-02-28T14:28:50.000Z | # Copyright (C) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions
# and limitations under the License.
import copy
import numpy as np
from mmdet.datasets.builder import PIPELINES
from ..datasets import get_annotation_mmdet_format
| 37.132231 | 136 | 0.665702 |
826343a77ca38151d0a290d5ea759c030e820e04 | 846 | py | Python | Leetcode/Competition/180_1.py | ZR-Huang/AlgorithmPractices | 226cecde136531341ce23cdf88529345be1912fc | [
"BSD-3-Clause"
] | 1 | 2019-11-26T11:52:25.000Z | 2019-11-26T11:52:25.000Z | Leetcode/Competition/180_1.py | ZR-Huang/AlgorithmPractices | 226cecde136531341ce23cdf88529345be1912fc | [
"BSD-3-Clause"
] | null | null | null | Leetcode/Competition/180_1.py | ZR-Huang/AlgorithmPractices | 226cecde136531341ce23cdf88529345be1912fc | [
"BSD-3-Clause"
] | null | null | null | from typing import List
print(Solution().luckyNumbers([[3,7,8],[9,11,13],[15,16,17]]))
print(Solution().luckyNumbers([[1,10,4,2],[9,3,8,7],[15,16,17,12]]))
print(Solution().luckyNumbers([[7,8],[1,2]])) | 33.84 | 68 | 0.478723 |
8264bba891a9dc8d15b49f3c1fc314e278831022 | 3,282 | py | Python | GUITests/UC10.sikuli/UC10.py | gabrielganzer/EZGas | cc21dadb0001405e96a8fe298c2bbccf61d5d5a2 | [
"BSD-3-Clause"
] | null | null | null | GUITests/UC10.sikuli/UC10.py | gabrielganzer/EZGas | cc21dadb0001405e96a8fe298c2bbccf61d5d5a2 | [
"BSD-3-Clause"
] | null | null | null | GUITests/UC10.sikuli/UC10.py | gabrielganzer/EZGas | cc21dadb0001405e96a8fe298c2bbccf61d5d5a2 | [
"BSD-3-Clause"
] | 1 | 2021-04-06T14:31:08.000Z | 2021-04-06T14:31:08.000Z | # UC10 - Evaluate price
#
# User U exists and has valid account
# We create two Users, User1_UC10, User2_UC10 and one new gasStation GasStationUC10
#
# Registered on a 1920x1080p, Google Chrome 100% zoom
### SETUP
#User1
click("1590678880209.png")
click("1590678953637.png")
wait(2)
type("1590829373120.png", "User1_UC10" + Key.TAB + "user1uc10@polito.it" + Key.TAB + "user1")
click("1590679157604.png")
click("1590788841790.png")
wait(2)
# User2
click("1590678880209.png")
wait(2)
click("1590678953637.png")
wait(2)
type("1590829373120.png", "User2_UC10" + Key.TAB + "user2uc10@polito.it" + Key.TAB + "user2")
click("1590679157604.png")
click("1590788841790.png")
# Admin creates a new GasStation
click("1590678880209-1.png")
wait(3)
type("1590829943940.png", "admin@ezgas.com" + Key.TAB + "admin" )
click("1590784293656.png")
wait(2)
click("1590784369122.png")
wait(2)
wheel(WHEEL_DOWN, 6)
wait(2)
type("1590830169812.png", "GasStation_UC10" + Key.TAB + "Torino, corso duca")
wait( "add_UC10.png" , 20)
type(Key.DOWN + Key.ENTER)
type("1590830389386.png", Key.DOWN + Key.DOWN + Key.ENTER)
click("1590830256446.png")
click("1590830265272.png")
wait(2)
click("1590785166092.png")
wait(3)
type(Key.HOME)
click("1590788397797.png")
wait(2)
click("1590828906996.png")
wait(2)
click("1590788458524.png")
# User1 searches the gasStation
click("1590678880209.png")
wait(3)
type("1590829943940.png", "user1uc10@polito.it" + Key.TAB + "user1" )
click("1590784293656.png")
wait(2)
wheel(WHEEL_DOWN, 6)
type("1590931278631.png" , "Torino, corso duca" )
wait( "add_UC10.png" , 20)
type(Key.DOWN + Key.ENTER)
wait(2)
click("1590922172004.png")
wait(2)
wheel(WHEEL_DOWN, 4)
wait(2)
click(Pattern("1590922374562.png").targetOffset(543,-4))
wheel(WHEEL_DOWN, 4)
wait(2)
click(Pattern("1590930530512.png").targetOffset(73,1))
type("1.5")
click(Pattern("1590930568512.png").targetOffset(73,0))
type("1.4")
click("1590834482526.png")
wait(3)
type(Key.HOME)
wait(3)
click("1590788458524.png")
# User2 login and evaluate prices
wait(2)
click("1590678880209.png")
wait(3)
type("1590829943940.png", "user2uc10@polito.it" + Key.TAB + "user2" )
click("1590784293656.png")
wait(2)
wheel(WHEEL_DOWN, 4)
wait(2)
type("1590918242822-1.png" , "Torino, corso duca" )
wait( "add_UC10.png" , 20)
type(Key.DOWN + Key.ENTER)
wait(2)
click("1590918499196.png")
wheel(WHEEL_DOWN, 3)
click(Pattern("1591638408351.png").targetOffset(1068,-3))
# User2 clicks on the green button if the price is correct, otherwise clicks on the red button
# If User clicks the green button, the User1 trustlevel increases +1, otherwise it decreases -1
#
wait(3)
type(Key.HOME)
click("1590788458524.png")
wait(2)
# Admin deletes users and gasStation
click("1590678880209-1.png")
wait(3)
type("1590829943940.png", "admin@ezgas.com" + Key.TAB + "admin" )
click("1590784293656.png")
wait(2)
click("1590784369122.png")
wait(2)
wheel(WHEEL_DOWN, 10)
wait(2)
click(Pattern("1590931822851.png").targetOffset(905,-27))
wait(2)
wheel(WHEEL_UP, 15)
wait(2)
click(Pattern("1590931876805.png").targetOffset(560,-4))
wait(2)
click(Pattern("1590931914901.png").targetOffset(556,-10))
wait(2)
click("1590788397797.png")
wait(2)
click("1590828906996.png")
wait(2)
click("1590788458524.png")
wait(2)
| 23.442857 | 95 | 0.720902 |
82661285c2d18985678122dfb06c00248935e316 | 540 | py | Python | basic/migrations/0003_entrypoint_entry_function.py | kgdunn/django-peer-review-system | 8d013961e00d189fbbade5283128e956a27954f8 | [
"BSD-2-Clause"
] | null | null | null | basic/migrations/0003_entrypoint_entry_function.py | kgdunn/django-peer-review-system | 8d013961e00d189fbbade5283128e956a27954f8 | [
"BSD-2-Clause"
] | 2 | 2020-03-20T11:50:04.000Z | 2020-03-20T11:50:06.000Z | basic/migrations/0003_entrypoint_entry_function.py | kgdunn/django-peer-review-system | 8d013961e00d189fbbade5283128e956a27954f8 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-07-27 16:14
from __future__ import unicode_literals
from django.db import migrations, models
| 25.714286 | 131 | 0.644444 |
826680004e25570292d08da9b3737a70f6cd33e6 | 29,337 | py | Python | test_net_with_srgan.py | jasonlai777/Faster-R-CNN | b5c0c18a9b5faabd4b6ef23346aff85104df7356 | [
"MIT"
] | null | null | null | test_net_with_srgan.py | jasonlai777/Faster-R-CNN | b5c0c18a9b5faabd4b6ef23346aff85104df7356 | [
"MIT"
] | null | null | null | test_net_with_srgan.py | jasonlai777/Faster-R-CNN | b5c0c18a9b5faabd4b6ef23346aff85104df7356 | [
"MIT"
] | null | null | null | # --------------------------------------------------------
# Pytorch Multi-GPU Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import cv2
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import pickle
from roi_data_layer.roidb import combined_roidb
from roi_data_layer.roibatchLoader import roibatchLoader
from model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from model.rpn.bbox_transform import clip_boxes
# from model.nms.nms_wrapper import nms
from model.roi_layers import nms
from model.rpn.bbox_transform import bbox_transform_inv
from model.utils.net_utils import save_net, load_net, vis_detections
from model.faster_rcnn.vgg16 import vgg16
from model.faster_rcnn.resnet import resnet
from PIL import Image
from torchvision.utils import save_image
import cv2
from torch.utils.data import DataLoader
from srgan_datasets import *
from srgan import *
import torch.nn.functional as F
from datasets.voc_eval import parse_rec
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
classes = ('__background__', # always index 0
'A.bes(H)','A.bes(T)','A.bes','A.bic(H)','A.bic(T)','A.bic',
'A.fuj(H)','A.fuj(T)','A.fuj','B.xyl(H)','B.xyl(T)','B.xyl',
'C.ele(H)','C.ele(T)','C.ele','M.ent(H)','M.ent(T)','M.ent',
'M.gra(H)','M.gra(T)','M.gra','M.inc(H)','M.inc(T)','M.inc',
'P.cof(H)','P.cof(T)','P.cof','P.vul(H)','P.vul(T)','P.vul',
'P.spe(H)','P.spe(T)','P.spe','H.sp(H)','H.sp(T)','H.sp',
'M.ams(H)' ,'M.ams(T)','M.ams'
)###################
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='cfgs/res101.yml', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res50, res101, res152',
default='res101', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--load_dir', dest='load_dir',
help='directory to load models', default="models",
type=str)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--parallel_type', dest='parallel_type',
help='which part of model to parallel, 0: all, 1: model before roi pooling',
default=0, type=int)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load network',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load network',
default=10021, type=int)
parser.add_argument('--vis', dest='vis',
help='visualization mode',
action='store_true')
args = parser.parse_args()
return args
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
weight_decay = cfg.TRAIN.WEIGHT_DECAY
def iou(bb1, bb2):#########################
""" check if overlap"""
#assert bb1[0] < bb1[2]
#assert bb1[1] < bb1[3]
#assert bb2[0] < bb2[2]
#assert bb2[1] < bb2[3]
# determine the coordinates of the intersection rectangle
#print(bb1[0], bb2[0])
x_left = max(bb1[0], bb2[0])
y_top = max(bb1[1], bb2[1])
x_right = min(bb1[2], bb2[2])
y_bottom = min(bb1[3], bb2[3])
iw = x_right - x_left
ih = y_bottom - y_top
inters = iw * ih
# union
uni = ((bb1[2]-bb1[0])*(bb1[3]-bb1[1]) + (bb2[2]-bb2[0])*(bb2[3]-bb2[1]) - inters)
overlaps = inters / uni
return overlaps
if __name__ == '__main__':
args = parse_args()
args_sr = parse_args_for_srgan()
print('Called with args:')
print(args)
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
np.random.seed(cfg.RNG_SEED)
if args.dataset == "pascal_voc":
args.imdb_name = "voc_2007_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "pascal_voc_0712":
args.imdb_name = "voc_2007_trainval+voc_2012_trainval"
args.imdbval_name = "voc_2007_test"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "coco":
args.imdb_name = "coco_2014_train+coco_2014_valminusminival"
args.imdbval_name = "coco_2014_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "imagenet":
args.imdb_name = "imagenet_train"
args.imdbval_name = "imagenet_val"
args.set_cfgs = ['ANCHOR_SCALES', '[8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
elif args.dataset == "vg":
args.imdb_name = "vg_150-50-50_minitrain"
args.imdbval_name = "vg_150-50-50_minival"
args.set_cfgs = ['ANCHOR_SCALES', '[4, 8, 16, 32]', 'ANCHOR_RATIOS', '[0.5,1,2]']
args.cfg_file = "cfgs/{}_ls.yml".format(args.net) if args.large_scale else "cfgs/{}.yml".format(args.net)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
print('Using config:')
pprint.pprint(cfg)
cfg.TRAIN.USE_FLIPPED = False
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdbval_name, False)
imdb.competition_mode(on=True)
print('{:d} roidb entries'.format(len(roidb)))
input_dir = args.load_dir + "/" + args.net + "/" + args.dataset
if not os.path.exists(input_dir):
raise Exception('There is no input directory for loading network from ' + input_dir)
load_name = os.path.join(input_dir,
'faster_rcnn_{}_{}_{}.pth'.format(args.checksession, args.checkepoch, args.checkpoint))
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb.classes, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(imdb.classes, 101, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes, 50, pretrained=False, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(imdb.classes, 152, pretrained=False, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
print("load checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print('load model successfully!')
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
if args.cuda:
fasterRCNN.cuda()
start = time.time()
max_per_image = 100
vis = args.vis
if vis:
thresh = 0.0
else:
thresh = 0.0
save_name = 'faster_rcnn_10'
num_images = len(imdb.image_index)
all_boxes = [[[] for _ in range(num_images)]
for _ in range(imdb.num_classes)]
output_dir = get_output_dir(imdb, save_name)
dataset = roibatchLoader(roidb, ratio_list, ratio_index, 1, \
imdb.num_classes, training=False, normalize = False)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=1,
shuffle=False, num_workers=0,
pin_memory=True)
data_iter = iter(dataloader)
_t = {'im_detect': time.time(), 'misc': time.time()}
det_file = os.path.join(output_dir, 'detections.pkl')
fasterRCNN.eval()
empty_array = np.transpose(np.array([[],[],[],[],[]]), (1,0))
for i in range(num_images):
data = next(data_iter)
with torch.no_grad():
im_data.resize_(data[0].size()).copy_(data[0])
im_info.resize_(data[1].size()).copy_(data[1])
gt_boxes.resize_(data[2].size()).copy_(data[2])
num_boxes.resize_(data[3].size()).copy_(data[3])
#print(im_data.shape)
#print(im_info.shape)
#print(gt_boxes)
#print(num_boxes)
det_tic = time.time()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, \
rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
scores = cls_prob.data
boxes = rois.data[:, :, 1:5]
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4)
else:
box_deltas = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas = box_deltas.view(1, -1, 4 * len(imdb.classes))
pred_boxes = bbox_transform_inv(boxes, box_deltas, 1)
pred_boxes = clip_boxes(pred_boxes, im_info.data, 1)
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
pred_boxes /= data[1][0][2].item()
scores = scores.squeeze()
pred_boxes = pred_boxes.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
#print(scores[:,1:3].shape)
#print(pred_boxes[:,4:12].shape)
############################## decline head-tail overlapping
new_pred_boxes = torch.cuda.FloatTensor(300, 160).zero_()
new_scores = torch.cuda.FloatTensor(300,40).zero_()
for k in range(13):
b = torch.cat((pred_boxes[:,12*k+4:12*k+8],pred_boxes[:,12*k+8:12*k+12]),0)
s = torch.cat((scores[:,3*k+1],scores[:,3*k+2]),0)
keep = nms(b, s, 0.2)
#new head class
idx = [g for g in range(len(keep)) if keep[g] <300]
new_pred_boxes[:len(keep[idx]),12*k+4:12*k+8] = b[keep[idx]]
new_scores[:len(keep[idx]),3*k+1] = s[keep[idx]]
#new tail class
idx = [g for g in range(len(keep)) if keep[g] >=300]
new_pred_boxes[:len(keep[idx]),12*k+8:12*k+12] = b[keep[idx]]
new_scores[:len(keep[idx]),3*k+2] = s[keep[idx]]
#new full length class = original
new_pred_boxes[:,12*k+12:12*k+16] = pred_boxes[:,12*k+12:12*k+16]
new_scores[:,3*k+3] = scores[:,3*k+3]
if vis:
im = cv2.imread(imdb.image_path_at(i))
im2show = np.copy(im)
for j in range(1, imdb.num_classes):
inds = torch.nonzero(new_scores[:,j]>thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores = new_scores[:,j][inds]
_, order = torch.sort(cls_scores, 0, True)
if args.class_agnostic:
cls_boxes = new_pred_boxes[inds, :]
else:
cls_boxes = new_pred_boxes[inds][:, j * 4:(j + 1) * 4]
#print(cls_boxes.shape)
#print(cls_scores.unsqueeze(1).shape)
cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1)
# cls_dets = torch.cat((cls_boxes, cls_scores), 1)
cls_dets = cls_dets[order]
keep = nms(cls_boxes[order, :], cls_scores[order], cfg.TEST.NMS)
cls_dets = cls_dets[keep.view(-1).long()]
if vis:
im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)
all_boxes[j][i] = cls_dets.cpu().numpy()
else:
all_boxes[j][i] = empty_array
#print(exist_classes)
#for k, j in enumerate(exist_classes):
# all_boxes[j][i] = exist_dets[k]
#print(all_boxes)
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
#print(all_boxes[3][i][:,-1])
image_scores = np.hstack([all_boxes[j][i][:,-1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in xrange(1, imdb.num_classes):
keep = np.where(all_boxes[j][i][:, -1] >= image_thresh)[0]
all_boxes[j][i] = all_boxes[j][i][keep, :]
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(i + 1, num_images, detect_time, nms_time))
sys.stdout.flush()
if vis:
cv2.imwrite('result.png', im2show)
pdb.set_trace()
#cv2.imshow('test', im2show)
#cv2.waitKey(0)
#print(all_boxes[1][0][0])
print(torch.cuda.current_device())
with torch.cuda.device(torch.cuda.current_device()):
torch.cuda.empty_cache()
#################################### filter imgs need to do SRGAN-preprocessing
annopath = '/home/jason/faster-rcnn.pytorch-1.0/data/VOCdevkit2007/VOC2007/Annotations/{:s}.xml'
imagesetfile = '/home/jason/faster-rcnn.pytorch-1.0/data/VOCdevkit2007/VOC2007/ImageSets/Main/test.txt'
cachedir = '/home/jason/faster-rcnn.pytorch-1.0/data/VOCdevkit2007/annotations_cache'
image_file = '/home/jason/faster-rcnn.pytorch-1.0/data/VOCdevkit2007/VOC2007/JPEGImages'
f = open(imagesetfile)
new_indexes = []
img_ids = []
new_gt_boxes = []
for line in f:
img_ids.append(line.splitlines())
img_ids = np.squeeze(img_ids)
for i in range(num_images):
for j in range(1, imdb.num_classes):
gt_boxes_1 = load_gt_box(annopath,imagesetfile,classes[j],cachedir)
if not np.any(all_boxes[j][i]):
continue
if len(gt_boxes_1[img_ids[i]]['bbox']) == 0:
continue
else:# 1 GT box in single image for a single class
gt_b = gt_boxes_1[img_ids[i]]['bbox']
#print(gt_b)
z = 0
for m in range(len(all_boxes[j][i])):
for n in range(len(gt_b)):
det_b = [int(l) for l in all_boxes[j][i][m][:4]]
#print(all_boxes[j][i][m][4], iou(det_b, gt_b[n]), imdb.image_index[j])
if all_boxes[j][i][m][4] > 0.5 and all_boxes[j][i][m][4] < 0.8 \
and iou(det_b, gt_b[n]) > 0.5 and classes[j][-1]==")":
print("srgan beginning......")
new_indexes.append(img_ids[i]+"_"+classes[j]+"_"+str(z))
print(len(new_indexes))#, all_boxes[j][i][m][4], iou(det_b, gt_b[n]))
img_path = os.path.join(image_file, img_ids[i]+".JPG")
img = Image.open(img_path)
img = np.asarray(img)
quaterx = int(img.shape[1]*1/4)
quatery = int(img.shape[0]*1/4)
x1_padding = 0
y1_padding = 0
x2_padding = 0
y2_padding = 0
print(img.shape)
if Area(det_b) >= Area(gt_b[n]):
x1, y1, x2, y2 = det_b
print("det_b: " + str(det_b))
if x1 > quaterx:
x1-=quaterx
x1_padding = quaterx
else:
x1 = 0
x1_padding = x1
if x2 < img.shape[0]-quaterx:
x2+= quaterx
x2_padding = quaterx
else:
x2 = img.shape[0]-1
x2_padding = img.shape[0] - x2-1
if y1 > quatery:
y1 -=quatery
y1_padding = quatery
else:
y1 = 0
y1_padding = y1
if y2 < img.shape[1]-quatery:
y2+=quatery
y2_padding = quatery
else:
y2= img.shape[1]-1
y2_padding = img.shape[1] - y2-1
else:
x1, y1, x2, y2 = gt_b[n]
print("gt_b: "+str(gt_b))
if x1 > quaterx:
x1-=quaterx
x1_padding = quaterx
else:
x1 = 0
x1_padding = x1
if x2 < img.shape[0]-quaterx:
x2+= quaterx
x2_padding = quaterx
else:
x2 = img.shape[0]-1
x2_padding = img.shape[0] - x2-1
if y1 > quatery:
y1 -=quatery
y1_padding = quatery
else:
y1 = 0
y1_padding = y1
if y2 < img.shape[1]-quatery:
y2+=quatery
y2_padding = quatery
else:
y2= img.shape[1]-1
y2_padding = img.shape[1] - y2-1
x1, y1, x2, y2= int(x1),int(y1),int(x2), int(y2)
new_gt_boxes.append([x1_padding, y1_padding, x2-x1-x1_padding-x2_padding, \
y2-y1-y1_padding-y2_padding])# whole photo
srgan_in = img[y1:y2 ,x1:x2 ,:]
srgan_in = srgan_in[...,::-1]#rgb->bgr
print(x1,y1,x2,y2,srgan_in.shape)
cv2.imwrite(os.path.join("srgan/srgan_input", img_ids[i]+"_"+classes[j]+"_"+str(z)+".JPG"), srgan_in)
print("save input: %s" %(img_ids[i]+"_"+classes[j]+"_"+str(z)))
z+=1
all_boxes[j][i][m] = np.append(gt_b[n], 1.0)# turn original pred box to gt box
with torch.cuda.device(torch.cuda.current_device()):
torch.cuda.empty_cache()
dataloader = DataLoader(
ImageDataset("srgan/srgan_input", hr_shape=(1024,1024)),
batch_size=1,
shuffle=True,
num_workers=0,
)
#gan_output = srgan(args_sr, dataloader)
srgan(args_sr, dataloader)
#print("length of data: %d"%len(gan_output))
print("srgan finish......")
with torch.cuda.device(torch.cuda.current_device()):
torch.cuda.empty_cache()
# re-test srgan output
dataloader1 = DataLoader(
ImageDataset("srgan/srgan_output", hr_shape=(1024,1024)),
batch_size=1,
shuffle=True,
num_workers=0,
)
all_boxes_1 = [[[] for _ in range(len(dataloader1))]
for _ in range(imdb.num_classes)]
for i, gan_img in enumerate(dataloader1):
#for i in range(len(dataloader1)):
#gan_img = gan_output[i]
#print(gan_img)
arr = np.append(gan_img["origin_size"][0][0].numpy(), gan_img["origin_size"][1][0].numpy())
gan_img_os = F.interpolate(gan_img['hr'], size=(arr[0],arr[1]), mode='bilinear')
r = 600 / gan_img_os.shape[2]
gan_info = np.array([[gan_img_os.shape[2], gan_img_os.shape[3], r]])
with torch.no_grad():
gan_img_600 = F.interpolate(gan_img_os, scale_factor=r, mode="bilinear").cuda()
gan_info = torch.from_numpy(gan_info).cuda()
gt_boxes
num_boxes
#print(gan_img.shape)
#print(gan_info.shape)
#print(gt_boxes)
#print(num_boxes)
det_tic = time.time()
rois_1, cls_prob_1, bbox_pred_1, \
rpn_loss_cls_1, rpn_loss_box_1, \
RCNN_loss_cls_1, RCNN_loss_bbox_1, \
rois_label_1 = fasterRCNN(gan_img_600, gan_info, gt_boxes, num_boxes)
scores_1 = cls_prob_1.data
boxes_1 = rois_1.data[:, :, 1:5]
#print(data)
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = bbox_pred_1.data
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Optionally normalize targets by a precomputed mean and stdev
if args.class_agnostic:
box_deltas_1 = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas_1 = box_deltas.view(1, -1, 4)
else:
box_deltas_1 = box_deltas.view(-1, 4) * torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_STDS).cuda() \
+ torch.FloatTensor(cfg.TRAIN.BBOX_NORMALIZE_MEANS).cuda()
box_deltas_1 = box_deltas.view(1, -1, 4 * len(imdb.classes))
pred_boxes_1 = bbox_transform_inv(boxes, box_deltas_1, 1)
pred_boxes_1 = clip_boxes(pred_boxes_1, gan_info.data, 1)
else:
# Simply repeat the boxes, once for each class
pred_boxes_1 = np.tile(boxes_1, (1, scores.shape[1]))
pred_boxes_1 /= data[1][0][2].item()
scores_1 = scores_1.squeeze()
pred_boxes_1 = pred_boxes_1.squeeze()
det_toc = time.time()
detect_time = det_toc - det_tic
misc_tic = time.time()
############################## decline head-tail overlapping
new_pred_boxes = torch.cuda.FloatTensor(300, 160).zero_()
new_scores = torch.cuda.FloatTensor(300,40).zero_()
for k in range(13):
b = torch.cat((pred_boxes_1[:,12*k+4:12*k+8],pred_boxes_1[:,12*k+8:12*k+12]),0)
s = torch.cat((scores_1[:,3*k+1],scores_1[:,3*k+2]),0)
keep = nms(b, s, 0.2)
#new head class
idx = [g for g in range(len(keep)) if keep[g] <300]
new_pred_boxes[:len(keep[idx]),12*k+4:12*k+8] = b[keep[idx]]
new_scores[:len(keep[idx]),3*k+1] = s[keep[idx]]
#new tail class
idx = [g for g in range(len(keep)) if keep[g] >=300]
new_pred_boxes[:len(keep[idx]),12*k+8:12*k+12] = b[keep[idx]]
new_scores[:len(keep[idx]),3*k+2] = s[keep[idx]]
#new full length class = original
new_pred_boxes[:,12*k+12:12*k+16] = pred_boxes[:,12*k+12:12*k+16]
new_scores[:,3*k+3] = scores[:,3*k+3]
if vis:
im = cv2.imread(imdb.image_path_at(i))
im2show = np.copy(im)
for j in range(1, imdb.num_classes):
inds = torch.nonzero(new_scores[:,j]>thresh).view(-1)
# if there is det
if inds.numel() > 0:
cls_scores_1 = new_scores[:,j][inds]
_, order = torch.sort(cls_scores_1, 0, True)
if args.class_agnostic:
cls_boxes_1 = new_pred_boxes[inds, :]
else:
cls_boxes_1 = new_pred_boxes[inds][:, j * 4:(j + 1) * 4]
cls_dets_1 = torch.cat((cls_boxes_1, cls_scores_1.unsqueeze(1)), 1)
cls_dets_1 = cls_dets_1[order]
keep = nms(cls_boxes_1[order, :], cls_scores_1[order], cfg.TEST.NMS)
cls_dets_1 = cls_dets_1[keep.view(-1).long()]
if vis:
im2show = vis_detections(im2show, imdb.classes[j], cls_dets.cpu().numpy(), 0.3)
all_boxes_1[j][i] = cls_dets.cpu().numpy()
else:
all_boxes_1[j][i] = empty_array
# Limit to max_per_image detections *over all classes*
if max_per_image > 0:
#print(all_boxes[3][i][:,-1])
image_scores = np.hstack([all_boxes_1[j][i][:,-1]
for j in range(1, imdb.num_classes)])
if len(image_scores) > max_per_image:
image_thresh = np.sort(image_scores)[-max_per_image]
for j in range(1, imdb.num_classes):
keep = np.where(all_boxes_1[j][i][:, -1] >= image_thresh)[0]
all_boxes_1[j][i] = all_boxes_1[j][i][keep, :]
misc_toc = time.time()
nms_time = misc_toc - misc_tic
sys.stdout.write('im_detect: {:d}/{:d} {:.3f}s {:.3f}s \r' \
.format(i + 1, len(dataloader1), detect_time, nms_time))
sys.stdout.flush()
torch.cuda.empty_cache()
with open(det_file, 'wb') as f:
pickle.dump(all_boxes, f, pickle.HIGHEST_PROTOCOL)
print('Evaluating detections')
end = time.time()
#print(len(all_boxes))
#print(len(all_boxes_1[0]))
for a in range(len(all_boxes)):
all_boxes[a].extend(all_boxes_1[a])
print(len(all_boxes[a]))
print(new_indexes)
#print(new_gt_boxes)
imdb.evaluate_detections(all_boxes, output_dir, new_indexes, new_gt_boxes)
print("test time: %0.4fs" % (end - start))
| 39.325737 | 115 | 0.590449 |
8267a45960a2743e88617d4dc273ba1a2f8b4aea | 1,231 | py | Python | app.py | iio1989/oshite | dd95eced2630929705670aaf23be5f35df3b9737 | [
"OLDAP-2.3"
] | null | null | null | app.py | iio1989/oshite | dd95eced2630929705670aaf23be5f35df3b9737 | [
"OLDAP-2.3"
] | 1 | 2020-09-24T05:15:00.000Z | 2020-09-24T05:17:06.000Z | app.py | iio1989/oshite | dd95eced2630929705670aaf23be5f35df3b9737 | [
"OLDAP-2.3"
] | null | null | null | from flask import Flask, render_template, request, redirect, url_for, Markup
import app_helper as apHelp
app = Flask(__name__)
# click convetBtn. get HttpParam.
# click homeBtn from header.
# click aboutBtn from header.
# click historyBtn from header.
if __name__ == '__main__':
app.run(debug=True) | 30.775 | 77 | 0.645004 |
826808ff1bd0ba43f535ae2091908373eab637e4 | 1,322 | py | Python | build/cls/tp/slices.py | amunoz1/mines | 106f852fe4e64ee132d74290c1a57ea716312376 | [
"MIT"
] | 1 | 2016-07-19T08:50:54.000Z | 2016-07-19T08:50:54.000Z | src/tp/slices.py | amunoz1/mines | 106f852fe4e64ee132d74290c1a57ea716312376 | [
"MIT"
] | null | null | null | src/tp/slices.py | amunoz1/mines | 106f852fe4e64ee132d74290c1a57ea716312376 | [
"MIT"
] | null | null | null | """
Makes subdirectories with slices of seismic time or depth images.
For example, the directory with name "s3_84" contains a constant-i3
slice, where i3 = 84.
"""
from tputils import *
#setupForSubset("subz_401_4_600")
setupForSubset("subt_251_4_500")
seismicDir = getSeismicDir()
#############################################################################
#############################################################################
run(main)
| 28.12766 | 77 | 0.618759 |
826871d70c21ce76b15c27edf3c9b2a76149c4a5 | 4,442 | py | Python | lldb/examples/summaries/cocoa/NSException.py | bytesnake/Enzyme | 247606c279920d476645d2e319e574bf8be10fc9 | [
"Apache-2.0"
] | 427 | 2018-05-29T14:21:02.000Z | 2022-03-16T03:17:54.000Z | SymbolExtractorAndRenamer/lldb/examples/summaries/cocoa/NSException.py | PolideaPlayground/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | [
"Apache-2.0"
] | 25 | 2018-07-23T08:34:15.000Z | 2021-11-05T07:13:36.000Z | SymbolExtractorAndRenamer/lldb/examples/summaries/cocoa/NSException.py | PolideaPlayground/SiriusObfuscator | b0e590d8130e97856afe578869b83a209e2b19be | [
"Apache-2.0"
] | 52 | 2018-07-19T19:57:32.000Z | 2022-03-11T16:05:38.000Z | """
LLDB AppKit formatters
part of The LLVM Compiler Infrastructure
This file is distributed under the University of Illinois Open Source
License. See LICENSE.TXT for details.
"""
# summary provider for class NSException
import lldb.runtime.objc.objc_runtime
import lldb.formatters.metrics
import CFString
import lldb
import lldb.formatters.Logger
statistics = lldb.formatters.metrics.Metrics()
statistics.add_metric('invalid_isa')
statistics.add_metric('invalid_pointer')
statistics.add_metric('unknown_class')
statistics.add_metric('code_notrun')
def GetSummary_Impl(valobj):
logger = lldb.formatters.Logger.Logger()
global statistics
class_data, wrapper = lldb.runtime.objc.objc_runtime.Utilities.prepare_class_detection(
valobj, statistics)
if wrapper:
return wrapper
name_string = class_data.class_name()
logger >> "class name is: " + str(name_string)
if name_string == 'NSException':
wrapper = NSKnownException_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit('code_notrun', valobj)
else:
wrapper = NSUnknownException_SummaryProvider(
valobj, class_data.sys_params)
statistics.metric_hit(
'unknown_class',
valobj.GetName() +
" seen as " +
name_string)
return wrapper
def NSException_SummaryProvider(valobj, dict):
logger = lldb.formatters.Logger.Logger()
provider = GetSummary_Impl(valobj)
if provider is not None:
if isinstance(
provider,
lldb.runtime.objc.objc_runtime.SpecialSituation_Description):
return provider.message()
try:
summary = provider.description()
except:
summary = None
logger >> "got summary " + str(summary)
if summary is None:
summary = '<variable is not NSException>'
return str(summary)
return 'Summary Unavailable'
def __lldb_init_module(debugger, dict):
debugger.HandleCommand(
"type summary add -F NSException.NSException_SummaryProvider NSException")
| 32.903704 | 94 | 0.670419 |
8268e3ff708fceac06d057f89101a1b211a8db3a | 364 | py | Python | pacman-arch/test/pacman/tests/upgrade084.py | Maxython/pacman-for-termux | 3b208eb9274cbfc7a27fca673ea8a58f09ebad47 | [
"MIT"
] | 23 | 2021-05-21T19:11:06.000Z | 2022-03-31T18:14:20.000Z | source/pacman-6.0.1/test/pacman/tests/upgrade084.py | Scottx86-64/dotfiles-1 | 51004b1e2b032664cce6b553d2052757c286087d | [
"Unlicense"
] | 11 | 2021-05-21T12:08:44.000Z | 2021-12-21T08:30:08.000Z | source/pacman-6.0.1/test/pacman/tests/upgrade084.py | Scottx86-64/dotfiles-1 | 51004b1e2b032664cce6b553d2052757c286087d | [
"Unlicense"
] | 1 | 2021-09-26T08:44:40.000Z | 2021-09-26T08:44:40.000Z | self.description = "Install a package ('any' architecture)"
p = pmpkg("dummy")
p.files = ["bin/dummy",
"usr/man/man1/dummy.1"]
p.arch = 'any'
self.addpkg(p)
self.option["Architecture"] = ['auto']
self.args = "-U %s" % p.filename()
self.addrule("PACMAN_RETCODE=0")
self.addrule("PKG_EXIST=dummy")
for f in p.files:
self.addrule("FILE_EXIST=%s" % f)
| 21.411765 | 59 | 0.648352 |
826924435f780ad15c0f8fe2ba2c459917504e1d | 7,995 | py | Python | Examples And Benchmarks/HTTP over Raw Connection/SERVERS/http_server__http_tools.py | FI-Mihej/FI-ASockIOCore | 82ee94e94f692511afee5aeb0d75bb7366eb9b96 | [
"Apache-2.0"
] | null | null | null | Examples And Benchmarks/HTTP over Raw Connection/SERVERS/http_server__http_tools.py | FI-Mihej/FI-ASockIOCore | 82ee94e94f692511afee5aeb0d75bb7366eb9b96 | [
"Apache-2.0"
] | null | null | null | Examples And Benchmarks/HTTP over Raw Connection/SERVERS/http_server__http_tools.py | FI-Mihej/FI-ASockIOCore | 82ee94e94f692511afee5aeb0d75bb7366eb9b96 | [
"Apache-2.0"
] | null | null | null | from simple_network.tcp_app_server import *
import httptools
"""
Module Docstring
Docstrings: http://www.python.org/dev/peps/pep-0257/
"""
__author__ = 'ButenkoMS <gtalk@butenkoms.space>'
# ======================================================================
# ===================GLOBAL SETTINGS FOR ALL TESTS======================
#
SERVER_KEYWORD = b'http server inline'
SERVER_ADDRESS = ('localhost', 25000)
BSC__USE_READ_WITH_FIXED_BUFFER = True # "Optimized for speed". Good for Named Clients.
# BSC__USE_READ_WITH_FIXED_BUFFER = False # "Optimized for memory". Good for big amount of Unknown Clients (raw,
# http, etc.) if you have small server.
BSC__SOCKET_READ_FIXED_BUFFER_SIZE = 1024 ** 2
BSC__USE_NODELAY_INET = True
BSC__REUSE_GATE_ADDR = True
BSC__REUSE_GATE_PORT = True
LINE_TRACE_ALLOWED = True
#
# ===================GLOBAL SETTINGS FOR ALL TESTS======================
# ======================================================================
# ==============================================================================================================
# !!!!! IMPORTANT !!!!!
# NEXT CODE SHOULD BE EQUIVALENT TO ASYNCIO HTTP SERVER'S CODE FROM "https://github.com/MagicStack/vmbench" PROJECT
# (BENCHMARKING TOOL FROM 'UVLOOP' DEVELOPERS) FOR FAIR COMPARISON, SO IT'S SO DIRTY.
# (IT'S ALMOST EQUIVALENT: IT DOES NOT HAVE FEW CRITICAL vmbench's BUGS)
_RESP_CACHE = {}
if __name__ == '__main__':
run_http_server()
| 34.313305 | 115 | 0.635897 |
826a0a34399ac3cae5194033717af23bcc5eca24 | 1,066 | py | Python | bamboomba_description/launch/robot_state_publisher.launch.py | RIF-Robotics/bamboomba | 3fd725be9b0fdf33d3e46c37d20d8cbecea7d15d | [
"BSD-3-Clause"
] | null | null | null | bamboomba_description/launch/robot_state_publisher.launch.py | RIF-Robotics/bamboomba | 3fd725be9b0fdf33d3e46c37d20d8cbecea7d15d | [
"BSD-3-Clause"
] | null | null | null | bamboomba_description/launch/robot_state_publisher.launch.py | RIF-Robotics/bamboomba | 3fd725be9b0fdf33d3e46c37d20d8cbecea7d15d | [
"BSD-3-Clause"
] | null | null | null | from os import path
from launch import LaunchDescription
from ament_index_python.packages import get_package_share_directory
from launch_ros.actions import Node
from launch.actions import DeclareLaunchArgument
from launch.substitutions import Command, LaunchConfiguration
| 35.533333 | 83 | 0.674484 |
826b06577ba553bf4320966964956912f51ba613 | 1,211 | py | Python | tests/test__event.py | alpha-health-ai/pyformance | 3dcf6556a070e89c783f30ddfff03c986e7a5582 | [
"Apache-2.0"
] | 4 | 2019-11-13T11:11:43.000Z | 2021-06-20T11:01:27.000Z | tests/test__event.py | alpha-health-ai/pyformance | 3dcf6556a070e89c783f30ddfff03c986e7a5582 | [
"Apache-2.0"
] | 5 | 2019-01-14T14:59:44.000Z | 2020-12-13T17:04:27.000Z | tests/test__event.py | alpha-health-ai/pyformance | 3dcf6556a070e89c783f30ddfff03c986e7a5582 | [
"Apache-2.0"
] | 6 | 2019-04-17T21:07:40.000Z | 2022-01-18T16:11:51.000Z | from pyformance.meters import Event, EventPoint
from tests import TimedTestCase
| 26.911111 | 87 | 0.620974 |
826c1e777daa6347910fda6447c0f31e1cd72324 | 4,894 | py | Python | problem_#43_30032019.py | vivek28111992/DailyCoding | db58c069ef393f6a93fe86913660860134cb97a0 | [
"MIT"
] | null | null | null | problem_#43_30032019.py | vivek28111992/DailyCoding | db58c069ef393f6a93fe86913660860134cb97a0 | [
"MIT"
] | null | null | null | problem_#43_30032019.py | vivek28111992/DailyCoding | db58c069ef393f6a93fe86913660860134cb97a0 | [
"MIT"
] | null | null | null | """
Good morning! Here's your coding interview problem for today.
This problem was asked by Amazon.
Implement a stack that has the following methods:
push(val), which pushes an element onto the stack
pop(), which pops off and returns the topmost element of the stack. If there are no elements in the stack, then it should throw an error or return null.
max(), which returns the maximum value in the stack currently. If there are no elements in the stack, then it should throw an error or return null.
Each method should run in constant time.
https://www.geeksforgeeks.org/design-a-stack-that-supports-getmin-in-o1-time-and-o1-extra-space/
https://www.geeksforgeeks.org/design-and-implement-special-stack-data-structure/
"""
# Class to make a Node
stack = Stack()
stack.push(3)
stack.push(5)
stack.getMin()
stack.getMax()
stack.push(2)
stack.push(1)
stack.getMin()
stack.getMax()
stack.pop()
stack.getMin()
stack.getMax()
stack.pop()
| 31.574194 | 152 | 0.603801 |
826c762278c3cdf3123d44a03bc4ba2f8e259b59 | 1,161 | py | Python | scrapy/contrib/linkextractors/lxmlhtml.py | emschorsch/scrapy | acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f | [
"BSD-3-Clause"
] | 1 | 2015-09-03T18:30:10.000Z | 2015-09-03T18:30:10.000Z | scrapy/contrib/linkextractors/lxmlhtml.py | emschorsch/scrapy | acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f | [
"BSD-3-Clause"
] | 2 | 2021-12-13T20:51:32.000Z | 2022-02-11T03:47:35.000Z | scrapy/contrib/linkextractors/lxmlhtml.py | emschorsch/scrapy | acb7bad1ff4037b4a613ac94e2d3357bf92bdb8f | [
"BSD-3-Clause"
] | 1 | 2017-11-09T20:33:59.000Z | 2017-11-09T20:33:59.000Z | """
Link extractor based on lxml.html
"""
import lxml.html
from scrapy.link import Link
from scrapy.utils.python import unique as unique_list
| 31.378378 | 73 | 0.639966 |
826cefe38aada7c6d25cf287d99f1f536d838887 | 97 | py | Python | src/meetings/admin.py | Yalnyra/office-meeting-reservation | 52f558ec11a9b5d69c28acb60de132d70b0a789b | [
"bzip2-1.0.6"
] | null | null | null | src/meetings/admin.py | Yalnyra/office-meeting-reservation | 52f558ec11a9b5d69c28acb60de132d70b0a789b | [
"bzip2-1.0.6"
] | null | null | null | src/meetings/admin.py | Yalnyra/office-meeting-reservation | 52f558ec11a9b5d69c28acb60de132d70b0a789b | [
"bzip2-1.0.6"
] | null | null | null | from django.contrib import admin
from .models import Meeting
admin.site.register(Meeting)
| 16.166667 | 33 | 0.773196 |
826e60c1e9dd5f09bf2a2eb0580dbe4ef233f970 | 578 | py | Python | app/templates/init.py | arudmin/generator-flask-heroku | 12ecd9d37b732bf5d59912c4874f1dbc6cfa63b1 | [
"MIT"
] | null | null | null | app/templates/init.py | arudmin/generator-flask-heroku | 12ecd9d37b732bf5d59912c4874f1dbc6cfa63b1 | [
"MIT"
] | null | null | null | app/templates/init.py | arudmin/generator-flask-heroku | 12ecd9d37b732bf5d59912c4874f1dbc6cfa63b1 | [
"MIT"
] | null | null | null | from flask import Flask, url_for
import os
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SECRET_KEY'] = 'SECRET_KEY_CH1ng3me'
# Determines the destination of the build. Only usefull if you're using Frozen-Flask
app.config['FREEZER_DESTINATION'] = os.path.dirname(os.path.abspath(__file__))+'/../build'
# Function to easily find your assets
# In your template use <link rel=stylesheet href="{{ static('filename') }}">
<%= appName %>.jinja_env.globals['static'] = (
lambda filename: url_for('static', filename = filename)
)
from <%= appName %> import views
| 32.111111 | 90 | 0.723183 |
826e7e8ce0638e411f4ad1445cfe2c06fdbae9c6 | 936 | py | Python | sigmod2021-exdra-p523/experiments/code/other/l2svm.py | damslab/reproducibility | f7804b2513859f7e6f14fa7842d81003d0758bf8 | [
"Apache-2.0"
] | 4 | 2021-12-10T17:20:26.000Z | 2021-12-27T14:38:40.000Z | sigmod2021-exdra-p523/experiments/code/other/l2svm.py | damslab/reproducibility | f7804b2513859f7e6f14fa7842d81003d0758bf8 | [
"Apache-2.0"
] | null | null | null | sigmod2021-exdra-p523/experiments/code/other/l2svm.py | damslab/reproducibility | f7804b2513859f7e6f14fa7842d81003d0758bf8 | [
"Apache-2.0"
] | null | null | null |
import numpy as np
import argparse
from sklearn.svm import LinearSVR
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_regression
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--datapath', type=str, required=True)
parser.add_argument('-y', '--labels', type=str, required=True)
parser.add_argument('-v', '--verbose', type=bool, default=False)
parser.add_argument('-o', '--outputpath', type=str, required=True)
args = parser.parse_args()
X = np.load(args.datapath, allow_pickle=True)
y = np.load(args.labels, allow_pickle=True)
# http://scikit-learn.sourceforge.net/stable/modules/generated/sklearn.svm.LinearSVC.html#sklearn.svm.LinearSVC
regr = make_pipeline(StandardScaler(),
LinearSVR(verbose=args.verbose, tol = 1e-5, max_iter = 30))
regr.fit(X,y)
np.savetxt(args.outputpath, regr.named_steps['linearsvr'].coef_, delimiter=",")
| 36 | 111 | 0.766026 |
826ee5078415354fd8746cf24ad960817241f697 | 4,797 | py | Python | libs/fm_mission_planner/python/fm_mission_planner/target_viz.py | ethz-asl/mav_findmine | 2835995ace0a20a30f20812437b1b066428253a9 | [
"MIT"
] | 3 | 2021-06-25T03:38:38.000Z | 2022-01-13T08:39:48.000Z | libs/fm_mission_planner/python/fm_mission_planner/target_viz.py | ethz-asl/mav_findmine | 2835995ace0a20a30f20812437b1b066428253a9 | [
"MIT"
] | null | null | null | libs/fm_mission_planner/python/fm_mission_planner/target_viz.py | ethz-asl/mav_findmine | 2835995ace0a20a30f20812437b1b066428253a9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# MIT License
#
# Copyright (c) 2020 Rik Baehnemann, ASL, ETH Zurich, Switzerland
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import rospy
import rospkg
import pandas as pd
import pymap3d as pm
import os
import numpy as np
from matplotlib import cm
from matplotlib import colors
from sensor_msgs.msg import NavSatFix
from visualization_msgs.msg import Marker, MarkerArray
# Load target list from CSV, receive home point from ROS msgs and publish target points to RVIZ.
| 36.9 | 141 | 0.64999 |
826f63c5c39c287a99f63c5f1236dbd56acc410a | 20,198 | py | Python | capirca/lib/gcp_hf.py | PhillSimonds/capirca | 26c5f4f7d3bfc29841f5e6d3cdf07be9923c2c70 | [
"Apache-2.0"
] | null | null | null | capirca/lib/gcp_hf.py | PhillSimonds/capirca | 26c5f4f7d3bfc29841f5e6d3cdf07be9923c2c70 | [
"Apache-2.0"
] | null | null | null | capirca/lib/gcp_hf.py | PhillSimonds/capirca | 26c5f4f7d3bfc29841f5e6d3cdf07be9923c2c70 | [
"Apache-2.0"
] | 1 | 2022-02-14T03:22:18.000Z | 2022-02-14T03:22:18.000Z | """Google Cloud Hierarchical Firewall Generator.
Hierarchical Firewalls (HF) are represented in a SecurityPolicy GCP resouce.
"""
import copy
import re
from typing import Dict, Any
from absl import logging
from capirca.lib import gcp
from capirca.lib import nacaddr
def GetRuleTupleCount(dict_term: Dict[str, Any], api_version):
"""Calculate the tuple count of a rule in its dictionary form.
Quota is charged based on how complex the rules are rather than simply
limiting the number of rules.
The cost of a rule is the number of distinct protocol:port combinations plus
the number of IP addresses plus the number of targets.
Note: The goal of this function is not to determine if a rule is valid, but
to calculate its tuple count regardless of correctness.
Args:
dict_term: A dict object.
api_version: A string indicating the api version.
Returns:
int: The tuple count of the rule.
"""
layer4_count = 0
layer_4_config = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['layer_4_config']
dest_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['dest_ip_range']
src_ip_range = ApiVersionSyntaxMap.SYNTAX_MAP[api_version]['src_ip_range']
targets_count = len(dict_term.get('targetResources', []))
if api_version == 'ga':
config = dict_term.get('match', {})
else:
config = dict_term.get('match', {}).get('config', {})
addresses_count = len(
config.get(dest_ip_range, []) + config.get(src_ip_range, []))
for l4config in config.get(layer_4_config, []):
for _ in l4config.get('ports', []):
layer4_count += 1
if l4config.get('ipProtocol'):
layer4_count += +1
return addresses_count + layer4_count + targets_count
| 36.392793 | 105 | 0.645608 |
82701369685666a9616bf5da9dc3d5c258f51242 | 196 | py | Python | src/caracara/_kits.py | LaudateCorpus1/caracara | 1200f56891617f15dd48616d7198c45a1e0cbe26 | [
"Unlicense"
] | 1 | 2021-12-28T05:12:33.000Z | 2021-12-28T05:12:33.000Z | src/caracara/_kits.py | CrowdStrike/caracara | 0cfc12447ee299f69e23a5d5210eab5fce8e033e | [
"Unlicense"
] | 1 | 2021-11-26T08:53:25.000Z | 2021-11-26T08:53:25.000Z | src/caracara/_kits.py | LaudateCorpus1/caracara | 1200f56891617f15dd48616d7198c45a1e0cbe26 | [
"Unlicense"
] | 2 | 2022-02-22T07:32:20.000Z | 2022-02-26T03:05:57.000Z | """Kits class defines the available Toolboxes."""
from enum import Enum
| 19.6 | 52 | 0.683673 |
827202efc4734328b2680b6cfa71b26432695ccb | 559 | py | Python | hackerrank/algorithms/time_conversion.py | ontana/mystudy | 8158550da3cdbaaa81660be73f2dfad869aae466 | [
"MIT"
] | null | null | null | hackerrank/algorithms/time_conversion.py | ontana/mystudy | 8158550da3cdbaaa81660be73f2dfad869aae466 | [
"MIT"
] | null | null | null | hackerrank/algorithms/time_conversion.py | ontana/mystudy | 8158550da3cdbaaa81660be73f2dfad869aae466 | [
"MIT"
] | null | null | null | #!/bin/python3
# https://www.hackerrank.com/challenges/time-conversion
import sys
s = input().strip()
result = timeConversion(s)
print(result) | 26.619048 | 77 | 0.563506 |
8272095e27dabd69813e44dde70f50afb8a04d0c | 5,296 | py | Python | supplychainpy/demand/economic_order_quantity.py | supplybi/supplychainpy | 821ea21adb684abba7f9a7b26eaf218f44b45ced | [
"BSD-3-Clause"
] | 5 | 2018-09-07T09:00:31.000Z | 2022-01-02T05:20:46.000Z | supplychainpy/demand/economic_order_quantity.py | supplybi/supplychainpy | 821ea21adb684abba7f9a7b26eaf218f44b45ced | [
"BSD-3-Clause"
] | null | null | null | supplychainpy/demand/economic_order_quantity.py | supplybi/supplychainpy | 821ea21adb684abba7f9a7b26eaf218f44b45ced | [
"BSD-3-Clause"
] | 3 | 2018-02-14T14:04:45.000Z | 2021-04-15T04:31:42.000Z | from decimal import Decimal, getcontext, ROUND_HALF_UP
from supplychainpy.demand import analyse_uncertain_demand
from supplychainpy.demand.eoq import minimum_variable_cost, economic_order_quantity
| 42.368 | 138 | 0.624434 |
8273cb7b96dbba2d80d2ff7f28ed04bc72f420f5 | 2,182 | py | Python | tests/test_arraymisc.py | XinYangDong/mmcv-0.2.10 | 527388ea7c5daf7149a88b3dc833373d5a5fb850 | [
"Apache-2.0"
] | 1 | 2019-04-04T07:07:55.000Z | 2019-04-04T07:07:55.000Z | tests/test_arraymisc.py | XinYangDong/mmcv-0.2.10 | 527388ea7c5daf7149a88b3dc833373d5a5fb850 | [
"Apache-2.0"
] | null | null | null | tests/test_arraymisc.py | XinYangDong/mmcv-0.2.10 | 527388ea7c5daf7149a88b3dc833373d5a5fb850 | [
"Apache-2.0"
] | null | null | null | from __future__ import division
import mmcv
import numpy as np
import pytest
| 31.171429 | 75 | 0.593951 |
82740abbd8d90d8a1d79663896c17644f40508b8 | 2,373 | py | Python | train.py | jmribeiro/NumPyNeuralNetworkFromScratch | 19c4360ef4eec91cd17142ced9fde35773d795b5 | [
"Apache-2.0"
] | 1 | 2020-07-06T18:15:34.000Z | 2020-07-06T18:15:34.000Z | train.py | jmribeiro/NumPyNeuralNetworkFromScratch | 19c4360ef4eec91cd17142ced9fde35773d795b5 | [
"Apache-2.0"
] | null | null | null | train.py | jmribeiro/NumPyNeuralNetworkFromScratch | 19c4360ef4eec91cd17142ced9fde35773d795b5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
Author: Joo Ribeiro
"""
import argparse
import numpy as np
from model import FeedForwardNetwork
from utils import load_ocr_dataset, plot
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Hyperparams
parser.add_argument('-epochs', default=20, type=int, help="Number of training epochs.")
parser.add_argument('-num_layers', default=2, type=int, help="Number of hidden layers.")
parser.add_argument('-hidden_size', default=64, type=int, help="Number of units per hidden layer.")
parser.add_argument('-activation', default="relu", type=str, help="Activation function for the hidden layers.")
parser.add_argument('-learning_rate', default=0.1, type=float, help="Learning rate for SGD optimizer.")
parser.add_argument('-l2_penalty', default=0.0, type=float, help="L2 penalty for SGD optimizer.")
parser.add_argument('-batch_size', default=32, type=int, help="Number of datapoints per SGD step.")
# Misc.
parser.add_argument('-data', default='ocr_dataset/letter.data', help="Path to letter.data OCR dataset.")
parser.add_argument('-save_plot', action="store_true", help="Whether or not to save the generated accuracies plot.")
opt = parser.parse_args()
# ############ #
# Load Dataset #
# ############ #
print("Loading OCR Dataset", end="", flush=True)
data = load_ocr_dataset(opt.data)
X_train, y_train = data["train"]
X_val, y_val = data["dev"]
X_test, y_test = data["test"]
num_features = X_train.shape[1]
num_classes = np.unique(y_train).size
print(" [Done]", flush=True)
# ########### #
# Setup Model #
# ########### #
print("Deploying model", end="", flush=True)
model = FeedForwardNetwork(
num_features, num_classes,
opt.num_layers, opt.hidden_size, opt.activation,
opt.learning_rate, opt.l2_penalty, opt.batch_size
)
print(" [Done]", flush=True)
# ################ #
# Train & Evaluate #
# ################ #
print("Training model", flush=True)
validation_accuracies, final_test_accuracy = model.fit(X_train, y_train, X_val, y_val, X_test, y_test, opt.epochs)
# #### #
# Plot #
# #### #
print("Plotting", end="", flush=True)
plot(opt.epochs, validation_accuracies, opt.save_plot)
print(" [Done]\nGoodbye.", flush=True)
| 31.64 | 120 | 0.645175 |
8274f0f41fe5e911ea0767aa342d89364c6cbf67 | 3,935 | py | Python | varify/samples/views.py | chop-dbhi/varify | 5dc721e49ed9bd3582f4b117785fdd1a8b6ba777 | [
"BSD-2-Clause"
] | 6 | 2015-01-16T14:35:29.000Z | 2017-06-18T05:56:15.000Z | varify/samples/views.py | solvebio/varify | 5dc721e49ed9bd3582f4b117785fdd1a8b6ba777 | [
"BSD-2-Clause"
] | null | null | null | varify/samples/views.py | solvebio/varify | 5dc721e49ed9bd3582f4b117785fdd1a8b6ba777 | [
"BSD-2-Clause"
] | 3 | 2015-05-27T15:03:17.000Z | 2020-03-11T08:42:46.000Z | from guardian.shortcuts import get_objects_for_user
from django.http import Http404, HttpResponseRedirect
from django.db.models import Count
from django.core.urlresolvers import reverse
from django.shortcuts import render, get_object_or_404
from vdw.samples.models import Sample, Project, Batch, Cohort
from .forms import CohortForm
| 31.48 | 79 | 0.665565 |
8275090a0a26b9725fd053645507a75767690bfa | 6,656 | py | Python | dumbai.py | CapKenway/dumbai | affa89663c980177d6c1e0fef9bda7978032da4d | [
"Unlicense"
] | null | null | null | dumbai.py | CapKenway/dumbai | affa89663c980177d6c1e0fef9bda7978032da4d | [
"Unlicense"
] | null | null | null | dumbai.py | CapKenway/dumbai | affa89663c980177d6c1e0fef9bda7978032da4d | [
"Unlicense"
] | null | null | null | import sys
from pprint import pprint
import os
#--------------------------------------------------------------------------#
#--------------------------------------------------------------------------#
#--------------------------------------------------------------------------# | 35.404255 | 118 | 0.526292 |
82758db2cce35fc271964b68ef46df023933f752 | 6,079 | py | Python | napari_plugin_test/widgets/warp_image_volume.py | krentzd/napari-test | 97673f9408eab7d2cc01f4562a3deeeee7fd8bcb | [
"MIT"
] | null | null | null | napari_plugin_test/widgets/warp_image_volume.py | krentzd/napari-test | 97673f9408eab7d2cc01f4562a3deeeee7fd8bcb | [
"MIT"
] | null | null | null | napari_plugin_test/widgets/warp_image_volume.py | krentzd/napari-test | 97673f9408eab7d2cc01f4562a3deeeee7fd8bcb | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding: utf-8
# Adapted from: https://github.com/zpincus/celltool/blob/master/celltool/numerics/image_warp.py
from scipy import ndimage
import numpy as np
from probreg import bcpd
import tifffile
import matplotlib.pyplot as plt
import napari
from magicgui import magic_factory, widgets
from napari.types import PointsData, ImageData
from typing_extensions import Annotated
| 40.798658 | 147 | 0.659154 |
8275c53b65c622b02c7389ff31c415c23a469b50 | 6,235 | py | Python | main/tests/test_celery.py | OpenHumans/oh-23andme-source | 2580b0177b9caad079826305c7455ea7fb116a76 | [
"MIT"
] | null | null | null | main/tests/test_celery.py | OpenHumans/oh-23andme-source | 2580b0177b9caad079826305c7455ea7fb116a76 | [
"MIT"
] | 7 | 2018-03-26T02:09:43.000Z | 2021-06-10T17:42:26.000Z | main/tests/test_celery.py | OpenHumans/oh-23andme-source | 2580b0177b9caad079826305c7455ea7fb116a76 | [
"MIT"
] | 2 | 2018-03-29T12:51:28.000Z | 2018-12-27T18:44:35.000Z | from django.test import TestCase, RequestFactory
import vcr
from django.conf import settings
from django.core.management import call_command
from open_humans.models import OpenHumansMember
from main.celery import read_reference, clean_raw_23andme
from main.celery_helper import vcf_header
import os
import tempfile
import requests
import requests_mock
from main.celery import process_file
| 40.225806 | 171 | 0.563432 |
82763f4b601df981afd52e2acd04c501b896a5f2 | 168 | py | Python | apps/tracking/admin.py | Codeidea/budget-tracker | e07e8d6bb49b0a3de428942a57f090912c191d3e | [
"MIT"
] | null | null | null | apps/tracking/admin.py | Codeidea/budget-tracker | e07e8d6bb49b0a3de428942a57f090912c191d3e | [
"MIT"
] | null | null | null | apps/tracking/admin.py | Codeidea/budget-tracker | e07e8d6bb49b0a3de428942a57f090912c191d3e | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import LogCategory, BudgetLog
# Register your models here.
admin.site.register(LogCategory)
admin.site.register(BudgetLog) | 33.6 | 42 | 0.833333 |
8276d754ae9e540d8e94f8e6e543d48ce3a9e8c7 | 2,060 | py | Python | nn_model/embedding_layer.py | onlyrico/mling_sdgms | ef6015d1a815a317f16fa1e42cbb048e4fe443f7 | [
"MIT"
] | 4 | 2021-06-01T02:06:57.000Z | 2022-02-23T02:14:07.000Z | nn_model/embedding_layer.py | onlyrico/mling_sdgms | ef6015d1a815a317f16fa1e42cbb048e4fe443f7 | [
"MIT"
] | null | null | null | nn_model/embedding_layer.py | onlyrico/mling_sdgms | ef6015d1a815a317f16fa1e42cbb048e4fe443f7 | [
"MIT"
] | 2 | 2021-01-28T05:48:20.000Z | 2022-01-24T11:59:13.000Z | # -*- coding: UTF-8 -*-
#!/usr/bin/python3
"""
Embedding Layer
"""
#************************************************************
# Imported Libraries
#************************************************************
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from gensim.models import KeyedVectors
import pdb
| 30.294118 | 94 | 0.662136 |
827728c1d9e6e7856cb8f43e7465659d4d505df0 | 17,810 | py | Python | grpclib/server.py | panaetov/grpclib | 3c7d6ec3479cde417e748bc9b0cf0e9188d0f42a | [
"BSD-3-Clause"
] | null | null | null | grpclib/server.py | panaetov/grpclib | 3c7d6ec3479cde417e748bc9b0cf0e9188d0f42a | [
"BSD-3-Clause"
] | null | null | null | grpclib/server.py | panaetov/grpclib | 3c7d6ec3479cde417e748bc9b0cf0e9188d0f42a | [
"BSD-3-Clause"
] | null | null | null | import abc
import socket
import logging
import asyncio
import warnings
import h2.config
import h2.exceptions
from .utils import DeadlineWrapper
from .const import Status
from .stream import send_message, recv_message
from .stream import StreamIterator
from .metadata import Metadata, Deadline
from .protocol import H2Protocol, AbstractHandler
from .exceptions import GRPCError, ProtocolError
from .encoding.base import GRPC_CONTENT_TYPE
from .encoding.proto import ProtoCodec
log = logging.getLogger(__name__)
class Handler(_GC, AbstractHandler):
__gc_interval__ = 10
closing = False
class Server(_GC, asyncio.AbstractServer):
"""
HTTP/2 server, which uses gRPC service handlers to handle requests.
Handler is a subclass of the abstract base class, which was generated
from .proto file:
.. code-block:: python
class CoffeeMachine(cafe_grpc.CoffeeMachineBase):
async def MakeLatte(self, stream):
task: cafe_pb2.LatteOrder = await stream.recv_message()
...
await stream.send_message(empty_pb2.Empty())
server = Server([CoffeeMachine()], loop=loop)
"""
__gc_interval__ = 10
def __init__(self, handlers, *, loop, codec=None):
"""
:param handlers: list of handlers
:param loop: asyncio-compatible event loop
"""
mapping = {}
for handler in handlers:
mapping.update(handler.__mapping__())
self._mapping = mapping
self._loop = loop
self._codec = codec or ProtoCodec()
self._config = h2.config.H2Configuration(
client_side=False,
header_encoding='utf-8',
)
self._tcp_server = None
self._handlers = set()
def close(self):
"""Stops accepting new connections, cancels all currently running
requests. Request handlers are able to handle `CancelledError` and
exit properly.
"""
if self._tcp_server is None:
raise RuntimeError('Server is not started')
self._tcp_server.close()
for handler in self._handlers:
handler.close()
| 34.921569 | 80 | 0.600842 |
82789f2ad5480b27da525091f877dbbf7fb5f30c | 3,923 | py | Python | examples/vector_dot.py | Wheest/EVA | 6d19da1d454f398f0ade297d3a76a4ee9e773929 | [
"MIT"
] | null | null | null | examples/vector_dot.py | Wheest/EVA | 6d19da1d454f398f0ade297d3a76a4ee9e773929 | [
"MIT"
] | null | null | null | examples/vector_dot.py | Wheest/EVA | 6d19da1d454f398f0ade297d3a76a4ee9e773929 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import argparse
from eva import EvaProgram, Input, Output
from eva.ckks import CKKSCompiler
from eva.seal import generate_keys
import numpy as np
import time
from eva.std.numeric import horizontal_sum
def generate_vector_dot_naive(size):
"""Vector dot product with vector size of 1"""
fhe_dot = EvaProgram("fhe_dot", vec_size=1)
with fhe_dot:
a = np.array([Input(f"x_{n}") for n in range(size)]).reshape(1, size)
b = np.array([Input(f"w_{k}") for k in range(size)]).reshape(size, 1)
out = dot(a, b)
Output("y", out[0][0])
fhe_dot.set_input_scales(32)
fhe_dot.set_output_ranges(32)
return fhe_dot
def generate_vector_dot(size):
"""Vector dot product with CKKS vector size equal to the size"""
fhe_dot = EvaProgram("fhe_dot", vec_size=size)
with fhe_dot:
a = np.array([Input("x")])
b = np.array([Input(f"w")])
out = dot(a, b)
Output("y", horizontal_sum(out))
fhe_dot.set_input_scales(32)
fhe_dot.set_output_ranges(32)
return fhe_dot
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run a dot product program")
parser.add_argument(
"--mode",
default="SIMD",
choices=["SIMD", "naive"],
)
args = parser.parse_args()
results_cipher = dict()
results_plain = dict()
if args.mode == "SIMD":
print("Generating code in SIMD style")
else:
print("Generating code in naive style")
for size in [4, 8, 16, 32, 64, 128, 256, 512, 1024]:
time_cipher, time_plain = benchmark_vector_dot(size, args.mode)
results_cipher[f"{size}"] = time_cipher
results_plain[f"{size}"] = time_plain
print(f"Done vector size {size}, CKKS time: {time_cipher}")
print("Done")
print("CKKS times:", results_cipher)
print("Plain text times:", results_plain)
| 28.845588 | 87 | 0.640836 |
8278ddae26be1d01817d1cce51811bee77e0e097 | 5,903 | py | Python | ramcache.py | Lopez6969/chromium-dashboard | b35fb5372f33bfe1992c0ffaf1e723afbb3d9af2 | [
"Apache-2.0"
] | null | null | null | ramcache.py | Lopez6969/chromium-dashboard | b35fb5372f33bfe1992c0ffaf1e723afbb3d9af2 | [
"Apache-2.0"
] | null | null | null | ramcache.py | Lopez6969/chromium-dashboard | b35fb5372f33bfe1992c0ffaf1e723afbb3d9af2 | [
"Apache-2.0"
] | null | null | null | from __future__ import division
from __future__ import print_function
# -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module manages a distributed RAM cache as a global python dictionary in
each AppEngine instance. AppEngine can spin up new instances or kill old ones
at any time. Each instance's RAM cache is independent and might not have the
same entries as found in the RAM caches of other instances.
Each instance will do the work needed to compute a given RAM cache entry
itself. The values computed in a given instance will speed up future requests
made to that instance only.
When the user edits something in the app, the updated entity is stored in
datastore. Also, the singleton SharedInvalidate entity is updated with the
timestamp of the change. Every request handler must start processing a request
by first calling SharedInvalidate.check_for_distributed_invalidation() which
checks for any needed invalidations and clears RAM cache entries in
that instance if needed.
For now, there is only a single RAM cache per instance and when anything is
invalidated, that entire RAM cache is completely cleared. In the future,
invalidations could be compartmentalized by RAM cache type, or even specific
entity IDs. Monorail uses that approach, but existing ChromeStatus code does
not need it.
Calling code must not mutate any value that is passed into set() or returned
from get(). If calling code needs to mutate such objects, it should call
copy.copy() or copy.deepcopy() to avoid unintentional cumulative mutations.
Unlike memcache, this RAM cache has no concept of expiration time. So,
whenever a cached value would become invalid, it must be invalidated.
"""
import logging
import time as time_module
from google.appengine.ext import db
global_cache = {}
expires = {}
# Whenever the cache would have more than this many items, some
# random item is dropped, or the entire cache is cleared.
# If our instances are killed by appengine for exceeding memory limits,
# we can configure larger instances and/or reduce this value.
MAX_CACHE_SIZE = 10000
def set(key, value, time=None):
"""Emulate the memcache.set() method using a RAM cache."""
if len(global_cache) + 1 > MAX_CACHE_SIZE:
popped_item = global_cache.popitem()
if popped_item[0] in expires:
del expires[popped_item[0]]
global_cache[key] = value
if time:
expires[key] = int(time_module.time()) + time
def get(key):
"""Emulate the memcache.get() method using a RAM cache."""
_check_expired([key])
verb = 'hit' if key in global_cache else 'miss'
logging.info('cache %s for %r', verb, key)
return global_cache.get(key)
def get_multi(keys):
"""Emulate the memcache.get_multi() method using a RAM cache."""
_check_expired(keys)
return {
key: global_cache[key]
for key in keys
if key in global_cache
}
def set_multi(entries):
"""Emulate the memcache.set_multi() method using a RAM cache."""
if len(global_cache) + len(entries) > MAX_CACHE_SIZE:
global_cache.clear()
expires.clear()
global_cache.update(entries)
def delete(key):
"""Emulate the memcache.delete() method using a RAM cache."""
if key in global_cache:
del global_cache[key]
flush_all() # Note: this is wasteful but infrequent in our app.
def flush_all():
"""Emulate the memcache.flush_all() method using a RAM cache.
This does not clear the RAM cache in this instance. That happens
at the start of the next request when the request handler calls
SharedInvalidate.check_for_distributed_invalidation().
"""
SharedInvalidate.invalidate()
def check_for_distributed_invalidation():
"""Just a shorthand way to call the class method."""
SharedInvalidate.check_for_distributed_invalidation()
| 34.723529 | 79 | 0.750127 |
8279df0466383aeaceedee24127f9a8045b9a674 | 401 | py | Python | src/sales/migrations/0029_auto_20191025_1058.py | vladimirtkach/yesjob | 83800f4d29bf2dab30b14fc219d3150e3bc51e15 | [
"MIT"
] | null | null | null | src/sales/migrations/0029_auto_20191025_1058.py | vladimirtkach/yesjob | 83800f4d29bf2dab30b14fc219d3150e3bc51e15 | [
"MIT"
] | 18 | 2020-02-12T00:41:40.000Z | 2022-02-10T12:00:03.000Z | src/sales/migrations/0029_auto_20191025_1058.py | vladimirtkach/yesjob | 83800f4d29bf2dab30b14fc219d3150e3bc51e15 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2019-10-25 10:58
from django.db import migrations, models
| 21.105263 | 64 | 0.608479 |
8279e33b741621fbcfe10f065044f83eff6d9a93 | 41,238 | py | Python | maintenance/pymelControlPanel.py | GlenWalker/pymel | 8b69b72e1bb726a66792707af39626a987bf5c21 | [
"BSD-3-Clause"
] | null | null | null | maintenance/pymelControlPanel.py | GlenWalker/pymel | 8b69b72e1bb726a66792707af39626a987bf5c21 | [
"BSD-3-Clause"
] | null | null | null | maintenance/pymelControlPanel.py | GlenWalker/pymel | 8b69b72e1bb726a66792707af39626a987bf5c21 | [
"BSD-3-Clause"
] | null | null | null | """
UI for controlling how api classes and mel commands are combined into pymel classes.
This UI modifies factories.apiToMelData which is pickled out to apiMelBridge.
It controls:
which mel methods correspond to api methods
disabling of api methods
preference for overloaded methods (since currently only one overloaded method is supported)
renaming of apiMethod
"""
import inspect, re, os
import pymel.core as pm
import pymel.internal.factories as factories
import logging
logger = logging.getLogger(__name__)
if logger.level == logging.NOTSET:
logger.setLevel(logging.INFO)
FRAME_WIDTH = 800
VERBOSE = True
def _getClass(className):
for module in [pm.nodetypes, pm.datatypes, pm.general]:
try:
pymelClass = getattr(module, className)
return pymelClass
except AttributeError:
pass
def fixSpace():
"fix the Space enumerator"
enum = pm.util.getCascadingDictItem( factories.apiClassInfo, ('MSpace', 'pymelEnums', 'Space') )
keys = enum._keys.copy()
#print keys
val = keys.pop('postTransform', None)
if val is not None:
keys['object'] = val
newEnum = pm.util.Enum( 'Space', keys )
pm.util.setCascadingDictItem( factories.apiClassOverrides, ('MSpace', 'pymelEnums', 'Space'), newEnum )
else:
logger.warning( "could not fix Space")
# def doCacheResults():
# print "---"
# print "adding manual defaults"
# setManualDefaults()
# print "merging dictionaries"
# # update apiClasIfno with the sparse data stored in apiClassOverrides
# factories.mergeApiClassOverrides()
# print "saving api cache"
# factories.saveApiCache()
# print "saving bridge"
# factories.saveApiMelBridgeCache()
# print "---"
| 42.295385 | 151 | 0.591299 |
827aae5cfb257b722009084fcdc82d1efd26e382 | 35 | py | Python | src/test/data/pa3/sample/list_get_element_oob_3.py | Leo-Enrique-Wu/chocopy_compiler_code_generation | 4606be0531b3de77411572aae98f73169f46b3b9 | [
"BSD-2-Clause"
] | 7 | 2021-08-28T18:20:45.000Z | 2022-02-01T07:35:59.000Z | src/test/data/pa3/sample/list_get_element_oob_3.py | Leo-Enrique-Wu/chocopy_compiler_code_generation | 4606be0531b3de77411572aae98f73169f46b3b9 | [
"BSD-2-Clause"
] | 4 | 2020-05-18T01:06:15.000Z | 2020-06-12T19:33:14.000Z | src/test/data/pa3/sample/list_get_element_oob_3.py | Leo-Enrique-Wu/chocopy_compiler_code_generation | 4606be0531b3de77411572aae98f73169f46b3b9 | [
"BSD-2-Clause"
] | 5 | 2019-11-27T05:11:05.000Z | 2021-06-29T14:31:14.000Z | x:[int] = None
x = []
print(x[0])
| 7 | 14 | 0.457143 |
827ac159b8342adeb18a832d9a86cfcb0600fb29 | 62 | py | Python | modules/vqvc/__init__.py | reppy4620/VCon | cac3441443cb9b28ffbaa0646ed1826d71cb16e0 | [
"MIT"
] | 4 | 2021-05-22T03:14:44.000Z | 2022-01-03T04:32:54.000Z | modules/vqvc/__init__.py | reppy4620/VCon | cac3441443cb9b28ffbaa0646ed1826d71cb16e0 | [
"MIT"
] | null | null | null | modules/vqvc/__init__.py | reppy4620/VCon | cac3441443cb9b28ffbaa0646ed1826d71cb16e0 | [
"MIT"
] | null | null | null | from .model import VQVCModel
from .pl_model import VQVCModule
| 20.666667 | 32 | 0.83871 |
827b053defe8919cad9935212546496cfc58073c | 1,040 | py | Python | kornia/constants.py | carlosb1/kornia | a2b34d497314e7ed65f114401efdd3cc9ba2077c | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2020-06-17T16:57:14.000Z | 2020-06-17T16:57:14.000Z | kornia/constants.py | carlosb1/kornia | a2b34d497314e7ed65f114401efdd3cc9ba2077c | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/constants.py | carlosb1/kornia | a2b34d497314e7ed65f114401efdd3cc9ba2077c | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2022-01-26T13:39:34.000Z | 2022-01-26T13:39:34.000Z | from typing import Union, TypeVar
from enum import Enum
import torch
pi = torch.tensor(3.14159265358979323846)
T = TypeVar('T', bound='Resample')
U = TypeVar('U', bound='BorderType')
| 24.761905 | 65 | 0.567308 |
827beb02ef352cf0444f8df3acec604c0da03a1c | 1,731 | py | Python | intrepyd/tests/test_openplc.py | bobosoft/intrepyd | 13f0912b31f86f9bcc50f52ef4ad870e33f0cf65 | [
"BSD-3-Clause"
] | 2 | 2021-04-25T17:38:03.000Z | 2022-03-20T20:48:50.000Z | intrepyd/tests/test_openplc.py | bobosoft/intrepyd | 13f0912b31f86f9bcc50f52ef4ad870e33f0cf65 | [
"BSD-3-Clause"
] | 1 | 2016-11-30T22:25:00.000Z | 2017-01-16T22:43:39.000Z | intrepyd/tests/test_openplc.py | bobosoft/intrepyd | 13f0912b31f86f9bcc50f52ef4ad870e33f0cf65 | [
"BSD-3-Clause"
] | null | null | null | import intrepyd
from intrepyd.iec611312py.plcopen import parse_plc_open_file
from intrepyd.iec611312py.stmtprinter import StmtPrinter
import unittest
from . import from_fixture_path
if __name__ == "__main__":
unittest.main()
| 37.630435 | 139 | 0.688619 |
827d64df2b74d446113ab304669eb3fd477b0e80 | 3,506 | py | Python | reveal_graph_embedding/datautil/asu_datautil/asu_read_data.py | MKLab-ITI/reveal-graph-embedding | 72d4af794536f97b8ede06c0f27f261ea85d8c4b | [
"Apache-2.0"
] | 31 | 2015-07-14T16:21:25.000Z | 2021-06-30T14:10:44.000Z | reveal_graph_embedding/datautil/asu_datautil/asu_read_data.py | MKLab-ITI/reveal-graph-embedding | 72d4af794536f97b8ede06c0f27f261ea85d8c4b | [
"Apache-2.0"
] | null | null | null | reveal_graph_embedding/datautil/asu_datautil/asu_read_data.py | MKLab-ITI/reveal-graph-embedding | 72d4af794536f97b8ede06c0f27f261ea85d8c4b | [
"Apache-2.0"
] | 11 | 2016-08-21T03:07:20.000Z | 2020-03-07T03:17:05.000Z | __author__ = 'Georgios Rizos (georgerizos@iti.gr)'
import numpy as np
import scipy.sparse as sparse
from reveal_graph_embedding.common import get_file_row_generator
def read_adjacency_matrix(file_path, separator):
"""
Reads an edge list in csv format and returns the adjacency matrix in SciPy Sparse COOrdinate format.
Inputs: - file_path: The path where the adjacency matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
Outputs: - adjacency_matrix: The adjacency matrix in SciPy Sparse COOrdinate format.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
# Initialize lists for row and column sparse matrix arguments
row = list()
col = list()
append_row = row.append
append_col = col.append
# Read all file rows
for file_row in file_row_generator:
source_node = np.int64(file_row[0])
target_node = np.int64(file_row[1])
# Add edge
append_row(source_node)
append_col(target_node)
# Since this is an undirected network also add the reciprocal edge
append_row(target_node)
append_col(source_node)
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
number_of_nodes = np.max(row) # I assume that there are no missing nodes at the end.
# Array count should start from 0.
row -= 1
col -= 1
# Form sparse adjacency matrix
adjacency_matrix = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_nodes))
return adjacency_matrix
def read_node_label_matrix(file_path, separator, number_of_nodes):
"""
Reads node-label pairs in csv format and returns a list of tuples and a node-label matrix.
Inputs: - file_path: The path where the node-label matrix is stored.
- separator: The delimiter among values (e.g. ",", "\t", " ")
- number_of_nodes: The number of nodes of the full graph. It is possible that not all nodes are labelled.
Outputs: - node_label_matrix: The node-label associations in a NumPy array of tuples format.
- number_of_categories: The number of categories/classes the nodes may belong to.
- labelled_node_indices: A NumPy array containing the labelled node indices.
"""
# Open file
file_row_generator = get_file_row_generator(file_path, separator)
# Initialize lists for row and column sparse matrix arguments
row = list()
col = list()
append_row = row.append
append_col = col.append
# Populate the arrays
for file_row in file_row_generator:
node = np.int64(file_row[0])
label = np.int64(file_row[1])
# Add label
append_row(node)
append_col(label)
number_of_categories = len(set(col)) # I assume that there are no missing labels. There may be missing nodes.
labelled_node_indices = np.array(list(set(row)))
row = np.array(row, dtype=np.int64)
col = np.array(col, dtype=np.int64)
data = np.ones_like(row, dtype=np.float64)
# Array count should start from 0.
row -= 1
col -= 1
labelled_node_indices -= 1
# Form sparse adjacency matrix
node_label_matrix = sparse.coo_matrix((data, (row, col)), shape=(number_of_nodes, number_of_categories))
node_label_matrix = node_label_matrix.tocsr()
return node_label_matrix, number_of_categories, labelled_node_indices
| 33.711538 | 118 | 0.688534 |
827fbde3f6b49a475e21f72342cbd95940e44a4d | 1,255 | py | Python | create_tweet_classes.py | jmcguinness11/StockPredictor | 9dd545a11ca9beab6e108d5b8f001f69501af606 | [
"MIT"
] | null | null | null | create_tweet_classes.py | jmcguinness11/StockPredictor | 9dd545a11ca9beab6e108d5b8f001f69501af606 | [
"MIT"
] | null | null | null | create_tweet_classes.py | jmcguinness11/StockPredictor | 9dd545a11ca9beab6e108d5b8f001f69501af606 | [
"MIT"
] | null | null | null | # create_tweet_classes.py
# this assumes the existence of a get_class(day, hour, ticker) function
# that returns the class (0, 1, or -1) for a given hour and ticker
import collections
import json
import random
refined_tweets = collections.defaultdict(list)
#returns label for company and time
#parses individual json file
if __name__=='__main__':
main()
| 24.607843 | 73 | 0.6749 |
827fdac046ac07902d8fa5e1aeb478e27e40e24c | 11,538 | py | Python | integration_tests/test_router.py | madfish-solutions/quipuswap-token2token-core | 41fd4293029e2094a564141fb389fd9a1ef19185 | [
"MIT"
] | null | null | null | integration_tests/test_router.py | madfish-solutions/quipuswap-token2token-core | 41fd4293029e2094a564141fb389fd9a1ef19185 | [
"MIT"
] | null | null | null | integration_tests/test_router.py | madfish-solutions/quipuswap-token2token-core | 41fd4293029e2094a564141fb389fd9a1ef19185 | [
"MIT"
] | null | null | null | from unittest import TestCase
import json
from helpers import *
from pytezos import ContractInterface, pytezos, MichelsonRuntimeError
from pytezos.context.mixin import ExecutionContext
token_a = "KT1AxaBxkFLCUi3f8rdDAAxBKHfzY8LfKDRA"
token_b = "KT1PgHxzUXruWG5XAahQzJAjkk4c2sPcM3Ca"
token_c = "KT1RJ6PbjHpwc3M5rw5s2Nbmefwbuwbdxton"
token_d = "KT1Wz32jY2WEwWq8ZaA2C6cYFHGchFYVVczC"
pair_ab = {
"token_a_type" : {
"fa2": {
"token_address": token_a,
"token_id": 0
}
},
"token_b_type": {
"fa2": {
"token_address": token_b,
"token_id": 1
}
},
}
pair_bc = {
"token_a_type": {
"fa2": {
"token_address": token_b,
"token_id": 1
}
},
"token_b_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
}
}
pair_ac = {
"token_a_type" : {
"fa2": {
"token_address": token_a,
"token_id": 0
}
},
"token_b_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
}
}
pair_cd = {
"token_a_type" : {
"fa2": {
"token_address": token_c,
"token_id": 2
}
},
"token_b_type" : {
"fa2": {
"token_address": token_d,
"token_id": 3
}
}
}
| 30.68617 | 101 | 0.477639 |
8281bdb342d8804d5733ed2d9e90e2c325ef1463 | 4,013 | py | Python | run.py | evilspyboy/twitch-relay-monitor | 45c9c2f02b67f73b5baea53813d8818d673d93ba | [
"MIT"
] | 1 | 2021-03-19T15:02:38.000Z | 2021-03-19T15:02:38.000Z | run.py | evilspyboy/twitch-relay-monitor | 45c9c2f02b67f73b5baea53813d8818d673d93ba | [
"MIT"
] | null | null | null | run.py | evilspyboy/twitch-relay-monitor | 45c9c2f02b67f73b5baea53813d8818d673d93ba | [
"MIT"
] | 1 | 2021-03-19T15:02:38.000Z | 2021-03-19T15:02:38.000Z | import datetime
from datetime import timedelta
import pprint
from config import *
from helper import *
import time
import logging
from logging.handlers import RotatingFileHandler
logger = logging.getLogger('Twitch Relay Monitor')
logger.setLevel(logging.DEBUG)
handler = RotatingFileHandler('/home/pi/twitch_relay_monitor/logs/app.log', maxBytes=200000, backupCount=2)
handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(handler)
#First, start by getting token to access Twitch api
r=get_token(client_id,client_secret,grant_type,scope)
if r == False:
# if there is a problem, end the program
logger.error("Can't Auth user")
exit(1)
# since streamer username is given we need to get its broadcaster id for other requests
broadcaster=get_broadcaster_id(client_id,username)
if broadcaster==False:
# if there is a problem, end the program
logger.error("Can not get broadcster id")
exit(1)
if "access_token" not in r:
# if there is a problem, end the program
logger.error("Access token is missing " + str(r))
exit(1)
access_token=r['access_token'];
expires_in=r['expires_in']
# Fresh token interval will keep track of the time we need to validate the token
fresh_token_interval=token_validate_interval
skip_count=0
while True:
wait_time=online_user_wait_time
# refresh token if expired
if fresh_token_interval <30:
#confirm the token is valid
if is_valid_token(access_token) ==False:
r=get_token(client_id,client_secret,grant_type,scope)
if r ==False:
skip_count=skip_count+1
logger.info("Fresh Token Skip get token , skip:" + str(skip_count))
time.sleep(skip_wait_time)
continue
access_token=r['access_token'];
expires_in=r['expires_in']
fresh_token_interval=token_validate_interval
if is_user_live(client_id,access_token,username):
print_verbose("User ["+username+"] online")
set_stream(1)
user_streaming_flag=1
else:
print_verbose("User ["+username+"] offline")
set_hypetrain(0)
set_follow(0)
set_stream(0)
user_streaming_flag=0
wait_time=user_offline_wait_time
last_hype_train_action=get_last_hype_train_action(client_id,access_token,broadcaster["_id"])
if last_hype_train_action ==False:
skip_count=skip_count+1
logger.info("Hype Train Skip get token , skip:" + str(skip_count))
time.sleep(skip_wait_time)
continue
#retrieve most recent follow event
last_follow_action=get_last_follow_action(client_id,access_token,broadcaster["_id"])
if last_follow_action ==False:
skip_count=skip_count+1
logger.info("Follow Skip get token , skip:" + str(skip_count))
time.sleep(skip_wait_time)
continue
#mark follow if last follow event is < event notification time from current time
if user_streaming_flag==1:
subscribe_time=last_follow_action["data"][0]["followed_at"]
subscribe_time=datetime.datetime.strptime(subscribe_time,'%Y-%m-%dT%H:%M:%SZ')
if datetime.datetime.utcnow() < subscribe_time + timedelta(seconds=event_notification_delay):
print_verbose("Relay Function - Follow Event Active")
set_follow(1)
else:
set_follow(0)
#set hype train state
if(is_train_active(last_hype_train_action["data"])):
print_verbose("Train Active at level " + str(last_hype_train_action["data"][0]["event_data"]['level']))
level=last_hype_train_action["data"][0]["event_data"]['level']
if 1 <= level <= 5:
if user_streaming_flag==1:
logger.info("Relay Function - Hype Train Event")
set_hypetrain(level)
wait_time=5 # active hype train wait time in seconds
else:
print_verbose("Train not active")
set_hypetrain(0)
wait_time=online_user_wait_time
fresh_token_interval=fresh_token_interval-wait_time
if skip_count == max_skip_count:
logger.error("Skip count limit reached")
exit(1)
time.sleep(wait_time)
#reset skip_count if one request execute without issue within max_skip_count
skip_count=0 | 31.351563 | 107 | 0.76601 |
828565457c47cac1020f3188fe499892855af43c | 12,928 | py | Python | rbc/tests/test_omnisci_array.py | guilhermeleobas/rbc | 4b568b91c6ce3ef7727fee001169302c3803c4fd | [
"BSD-3-Clause"
] | null | null | null | rbc/tests/test_omnisci_array.py | guilhermeleobas/rbc | 4b568b91c6ce3ef7727fee001169302c3803c4fd | [
"BSD-3-Clause"
] | null | null | null | rbc/tests/test_omnisci_array.py | guilhermeleobas/rbc | 4b568b91c6ce3ef7727fee001169302c3803c4fd | [
"BSD-3-Clause"
] | null | null | null | import os
from collections import defaultdict
from rbc.omnisci_backend import Array
from rbc.errors import OmnisciServerError
from numba import types as nb_types
import pytest
rbc_omnisci = pytest.importorskip('rbc.omniscidb')
available_version, reason = rbc_omnisci.is_available()
pytestmark = pytest.mark.skipif(not available_version, reason=reason)
def test_len_i32(omnisci):
omnisci.reset()
desrc, result = omnisci.sql_execute(
f'select i4, array_sz_int32(i4) from {omnisci.table_name}')
for a, sz in result:
assert len(a) == sz
inps = [('int32', 'i4', 'trunc'), ('int32', 'i4', 'sext'),
('int32', 'i4', 'zext'), ('float', 'f4', 'fptrunc'),
('double', 'f8', 'fpext')]
| 27.274262 | 98 | 0.586556 |
82857e9a33dbe718b0d5cc5a60fda6fb7a1add58 | 4,666 | py | Python | env/lib/python3.8/site-packages/unidecode/x093.py | avdhari/enigma | b7e965a91ca5f0e929c4c719d695f15ccb8b5a2c | [
"MIT"
] | 48 | 2021-11-20T08:17:53.000Z | 2022-03-19T13:57:15.000Z | venv/lib/python3.6/site-packages/unidecode/x093.py | mrsaicharan1/iiita-updates | a22a0157b90d29b946d0f020e5f76744f73a6bff | [
"Apache-2.0"
] | 392 | 2015-07-30T14:37:05.000Z | 2022-03-21T16:56:09.000Z | venv/lib/python3.6/site-packages/unidecode/x093.py | mrsaicharan1/iiita-updates | a22a0157b90d29b946d0f020e5f76744f73a6bff | [
"Apache-2.0"
] | 15 | 2015-10-01T21:31:08.000Z | 2020-05-05T00:03:27.000Z | data = (
'Lun ', # 0x00
'Kua ', # 0x01
'Ling ', # 0x02
'Bei ', # 0x03
'Lu ', # 0x04
'Li ', # 0x05
'Qiang ', # 0x06
'Pou ', # 0x07
'Juan ', # 0x08
'Min ', # 0x09
'Zui ', # 0x0a
'Peng ', # 0x0b
'An ', # 0x0c
'Pi ', # 0x0d
'Xian ', # 0x0e
'Ya ', # 0x0f
'Zhui ', # 0x10
'Lei ', # 0x11
'A ', # 0x12
'Kong ', # 0x13
'Ta ', # 0x14
'Kun ', # 0x15
'Du ', # 0x16
'Wei ', # 0x17
'Chui ', # 0x18
'Zi ', # 0x19
'Zheng ', # 0x1a
'Ben ', # 0x1b
'Nie ', # 0x1c
'Cong ', # 0x1d
'Qun ', # 0x1e
'Tan ', # 0x1f
'Ding ', # 0x20
'Qi ', # 0x21
'Qian ', # 0x22
'Zhuo ', # 0x23
'Qi ', # 0x24
'Yu ', # 0x25
'Jin ', # 0x26
'Guan ', # 0x27
'Mao ', # 0x28
'Chang ', # 0x29
'Tian ', # 0x2a
'Xi ', # 0x2b
'Lian ', # 0x2c
'Tao ', # 0x2d
'Gu ', # 0x2e
'Cuo ', # 0x2f
'Shu ', # 0x30
'Zhen ', # 0x31
'Lu ', # 0x32
'Meng ', # 0x33
'Lu ', # 0x34
'Hua ', # 0x35
'Biao ', # 0x36
'Ga ', # 0x37
'Lai ', # 0x38
'Ken ', # 0x39
'Kazari ', # 0x3a
'Bu ', # 0x3b
'Nai ', # 0x3c
'Wan ', # 0x3d
'Zan ', # 0x3e
'[?] ', # 0x3f
'De ', # 0x40
'Xian ', # 0x41
'[?] ', # 0x42
'Huo ', # 0x43
'Liang ', # 0x44
'[?] ', # 0x45
'Men ', # 0x46
'Kai ', # 0x47
'Ying ', # 0x48
'Di ', # 0x49
'Lian ', # 0x4a
'Guo ', # 0x4b
'Xian ', # 0x4c
'Du ', # 0x4d
'Tu ', # 0x4e
'Wei ', # 0x4f
'Cong ', # 0x50
'Fu ', # 0x51
'Rou ', # 0x52
'Ji ', # 0x53
'E ', # 0x54
'Rou ', # 0x55
'Chen ', # 0x56
'Ti ', # 0x57
'Zha ', # 0x58
'Hong ', # 0x59
'Yang ', # 0x5a
'Duan ', # 0x5b
'Xia ', # 0x5c
'Yu ', # 0x5d
'Keng ', # 0x5e
'Xing ', # 0x5f
'Huang ', # 0x60
'Wei ', # 0x61
'Fu ', # 0x62
'Zhao ', # 0x63
'Cha ', # 0x64
'Qie ', # 0x65
'She ', # 0x66
'Hong ', # 0x67
'Kui ', # 0x68
'Tian ', # 0x69
'Mou ', # 0x6a
'Qiao ', # 0x6b
'Qiao ', # 0x6c
'Hou ', # 0x6d
'Tou ', # 0x6e
'Cong ', # 0x6f
'Huan ', # 0x70
'Ye ', # 0x71
'Min ', # 0x72
'Jian ', # 0x73
'Duan ', # 0x74
'Jian ', # 0x75
'Song ', # 0x76
'Kui ', # 0x77
'Hu ', # 0x78
'Xuan ', # 0x79
'Duo ', # 0x7a
'Jie ', # 0x7b
'Zhen ', # 0x7c
'Bian ', # 0x7d
'Zhong ', # 0x7e
'Zi ', # 0x7f
'Xiu ', # 0x80
'Ye ', # 0x81
'Mei ', # 0x82
'Pai ', # 0x83
'Ai ', # 0x84
'Jie ', # 0x85
'[?] ', # 0x86
'Mei ', # 0x87
'Chuo ', # 0x88
'Ta ', # 0x89
'Bang ', # 0x8a
'Xia ', # 0x8b
'Lian ', # 0x8c
'Suo ', # 0x8d
'Xi ', # 0x8e
'Liu ', # 0x8f
'Zu ', # 0x90
'Ye ', # 0x91
'Nou ', # 0x92
'Weng ', # 0x93
'Rong ', # 0x94
'Tang ', # 0x95
'Suo ', # 0x96
'Qiang ', # 0x97
'Ge ', # 0x98
'Shuo ', # 0x99
'Chui ', # 0x9a
'Bo ', # 0x9b
'Pan ', # 0x9c
'Sa ', # 0x9d
'Bi ', # 0x9e
'Sang ', # 0x9f
'Gang ', # 0xa0
'Zi ', # 0xa1
'Wu ', # 0xa2
'Ying ', # 0xa3
'Huang ', # 0xa4
'Tiao ', # 0xa5
'Liu ', # 0xa6
'Kai ', # 0xa7
'Sun ', # 0xa8
'Sha ', # 0xa9
'Sou ', # 0xaa
'Wan ', # 0xab
'Hao ', # 0xac
'Zhen ', # 0xad
'Zhen ', # 0xae
'Luo ', # 0xaf
'Yi ', # 0xb0
'Yuan ', # 0xb1
'Tang ', # 0xb2
'Nie ', # 0xb3
'Xi ', # 0xb4
'Jia ', # 0xb5
'Ge ', # 0xb6
'Ma ', # 0xb7
'Juan ', # 0xb8
'Kasugai ', # 0xb9
'Habaki ', # 0xba
'Suo ', # 0xbb
'[?] ', # 0xbc
'[?] ', # 0xbd
'[?] ', # 0xbe
'Na ', # 0xbf
'Lu ', # 0xc0
'Suo ', # 0xc1
'Ou ', # 0xc2
'Zu ', # 0xc3
'Tuan ', # 0xc4
'Xiu ', # 0xc5
'Guan ', # 0xc6
'Xuan ', # 0xc7
'Lian ', # 0xc8
'Shou ', # 0xc9
'Ao ', # 0xca
'Man ', # 0xcb
'Mo ', # 0xcc
'Luo ', # 0xcd
'Bi ', # 0xce
'Wei ', # 0xcf
'Liu ', # 0xd0
'Di ', # 0xd1
'Qiao ', # 0xd2
'Cong ', # 0xd3
'Yi ', # 0xd4
'Lu ', # 0xd5
'Ao ', # 0xd6
'Keng ', # 0xd7
'Qiang ', # 0xd8
'Cui ', # 0xd9
'Qi ', # 0xda
'Chang ', # 0xdb
'Tang ', # 0xdc
'Man ', # 0xdd
'Yong ', # 0xde
'Chan ', # 0xdf
'Feng ', # 0xe0
'Jing ', # 0xe1
'Biao ', # 0xe2
'Shu ', # 0xe3
'Lou ', # 0xe4
'Xiu ', # 0xe5
'Cong ', # 0xe6
'Long ', # 0xe7
'Zan ', # 0xe8
'Jian ', # 0xe9
'Cao ', # 0xea
'Li ', # 0xeb
'Xia ', # 0xec
'Xi ', # 0xed
'Kang ', # 0xee
'[?] ', # 0xef
'Beng ', # 0xf0
'[?] ', # 0xf1
'[?] ', # 0xf2
'Zheng ', # 0xf3
'Lu ', # 0xf4
'Hua ', # 0xf5
'Ji ', # 0xf6
'Pu ', # 0xf7
'Hui ', # 0xf8
'Qiang ', # 0xf9
'Po ', # 0xfa
'Lin ', # 0xfb
'Suo ', # 0xfc
'Xiu ', # 0xfd
'San ', # 0xfe
'Cheng ', # 0xff
)
| 18.015444 | 21 | 0.388556 |
82858d5820f148f4cd403dae133ec9b5dc1ebb08 | 1,785 | py | Python | src/core/default/commands/bucket/utils.py | cdev-framework/cdev-sdk | 06cd7b40936ab063d1d8fd1a7d9f6882750e8a96 | [
"BSD-3-Clause-Clear"
] | 2 | 2022-02-28T02:51:59.000Z | 2022-03-24T15:23:18.000Z | src/core/default/commands/bucket/utils.py | cdev-framework/cdev-sdk | 06cd7b40936ab063d1d8fd1a7d9f6882750e8a96 | [
"BSD-3-Clause-Clear"
] | null | null | null | src/core/default/commands/bucket/utils.py | cdev-framework/cdev-sdk | 06cd7b40936ab063d1d8fd1a7d9f6882750e8a96 | [
"BSD-3-Clause-Clear"
] | null | null | null | from dataclasses import dataclass
import re
from tokenize import group
from core.constructs.resource import ResourceModel
from core.constructs.workspace import Workspace
RUUID = "cdev::simple::bucket"
remote_name_regex = "bucket://([a-z,_]+).([a-z,_]+)/?(\S+)?"
compiled_regex = re.compile(remote_name_regex)
| 25.869565 | 86 | 0.678431 |
8285973ff004a3b86ceb55d9c1d9f9899c59ee73 | 7,434 | py | Python | examples/blank_cylinders.py | reflectometry/osrefl | ddf55d542f2eab2a29fd6ffc862379820a06d5c7 | [
"BSD-3-Clause"
] | 2 | 2015-05-21T15:16:46.000Z | 2015-10-23T17:47:36.000Z | examples/blank_cylinders.py | reflectometry/osrefl | ddf55d542f2eab2a29fd6ffc862379820a06d5c7 | [
"BSD-3-Clause"
] | null | null | null | examples/blank_cylinders.py | reflectometry/osrefl | ddf55d542f2eab2a29fd6ffc862379820a06d5c7 | [
"BSD-3-Clause"
] | null | null | null | from greens_thm_form import greens_form_line, greens_form_shape
from numpy import arange, linspace, float64, indices, zeros_like, ones_like, pi, sin, complex128, array, exp, newaxis, cumsum, sum, cos, sin, log, log10
from osrefl.theory.DWBAGISANS import dwbaWavefunction
# alternating SLD
wavelength = 1.24 # x-ray wavelength, Angstroms
spacing = 600.0 # distance between cylinder centers
radius = 200.0 # Angstroms, radius of cylinders
thickness = 300.0 # Angstrom, thickness of cylinder layer
sublayer_thickness = 200.0 # Angstrom, full layer of matrix below cylinders
matrix_sld = pi/(wavelength**2) * 2.0 * 1.0e-6 # substrate
matrix_sldi = pi/(wavelength**2) * 2.0 * 1.0e-7 # absorption in substrate
cyl_sld = 0.0
cyl_sldi = 0.0 # cylinders are holes in matrix
unit_dx = 2.0 * spacing
unit_dy = 1.0 * spacing
matrix = rectangle(0,0, 3000, 3000, matrix_sld, matrix_sldi)
cylinders = []
centers = []
for i in range(3):
for j in range(6):
x0 = i * 2.0 * spacing
y0 = j * spacing
x1 = x0 + spacing # basis
y1 = y0 + spacing/2.0
cylinders.append(arc(radius, 0.0, 360.0, x0, y0, sld=cyl_sld, sldi=cyl_sldi))
cylinders.append(arc(radius, 0.0, 360.0, x1, y1, sld=cyl_sld, sldi=cyl_sldi))
cyl_area = 0.0
for cyl in cylinders:
cyl_area += cyl.area
clipped_cylinders = [limit_cyl(cyl, xmin=0.0, xmax=3000.0, ymin=0.0, ymax=3000.0) for cyl in cylinders]
clipped_cyl_area = 0.0
for cyl in clipped_cylinders:
clipped_cyl_area += cyl.area
print "clipped_cyl_area / matrix.area = ", clipped_cyl_area / matrix.area
print "ratio should be 0.3491 for FCT planar array with a/b = 2 and r = a/6"
avg_sld = (matrix.area * matrix_sld + clipped_cyl_area * cyl_sld) / matrix.area
avg_sldi = (matrix.area * matrix_sldi + clipped_cyl_area * cyl_sldi) / matrix.area
front_sld = 0.0 # air
back_sld = pi/(wavelength**2) * 2.0 * 5.0e-6 # substrate
back_sldi = pi/(wavelength**2) * 2.0 * 7.0e-8 # absorption in substrate
qz = linspace(0.01, 0.21, 501)
qy = linspace(-0.1, 0.1, 500)
qx = ones_like(qy, dtype=complex128) * 1e-8
SLDArray = [ [0,0,0], # air
[avg_sld, thickness, avg_sldi], # sample
[matrix_sld, sublayer_thickness, matrix_sldi], # full matrix layer under cylinders
[back_sld, 0, back_sldi] ]
FT = zeros_like(qx, dtype=complex128)
for cyl in clipped_cylinders:
FT += greens_form_shape(cyl.points, qx, qy) * (cyl.sld)
FT += greens_form_shape(matrix.points, qx, qy) * (matrix.sld)
FT += greens_form_shape(matrix.points, qx, qy) * (-avg_sld)
SLDArray = array(SLDArray)
| 37.356784 | 152 | 0.644471 |
82866aa0a6c01bbd11d8219aa01d2f4e9089b2ed | 3,750 | py | Python | EXAMPLE/test_backtest/MACD_JCSC.py | evsteel/QUANTAXIS | 50e0116b2b52e6bbac6819d5f039608bf4a17367 | [
"MIT"
] | 2 | 2018-10-29T12:01:55.000Z | 2021-03-05T10:28:59.000Z | EXAMPLE/test_backtest/MACD_JCSC.py | evsteel/QUANTAXIS | 50e0116b2b52e6bbac6819d5f039608bf4a17367 | [
"MIT"
] | 1 | 2019-01-23T04:46:52.000Z | 2019-01-23T04:46:52.000Z | EXAMPLE/test_backtest/MACD_JCSC.py | evsteel/QUANTAXIS | 50e0116b2b52e6bbac6819d5f039608bf4a17367 | [
"MIT"
] | 2 | 2018-11-30T07:52:14.000Z | 2021-05-28T23:00:20.000Z | # -*- coding: utf-8 -*-
# Demo: MACD strategy
# src: ./test_backtest/MACD_JCSC.py
# jupyter: ./test_backtest/QUANTAXIS.ipynb
# paper: ./test_backtest/QUANTAXIS.md
import QUANTAXIS as QA
import numpy as np
import pandas as pd
import datetime
st1=datetime.datetime.now()
# define the MACD strategy
def MACD_JCSC(dataframe, SHORT=12, LONG=26, M=9):
"""
1.DIFDEA
2.DIFDEA
"""
CLOSE = dataframe.close
DIFF = QA.EMA(CLOSE, SHORT) - QA.EMA(CLOSE, LONG)
DEA = QA.EMA(DIFF, M)
MACD = 2*(DIFF-DEA)
CROSS_JC = QA.CROSS(DIFF, DEA)
CROSS_SC = QA.CROSS(DEA, DIFF)
ZERO = 0
return pd.DataFrame({'DIFF': DIFF, 'DEA': DEA, 'MACD': MACD, 'CROSS_JC': CROSS_JC, 'CROSS_SC': CROSS_SC, 'ZERO': ZERO})
# create account
Account = QA.QA_Account()
Broker = QA.QA_BacktestBroker()
Account.reset_assets(1000000)
Account.account_cookie = 'macd_stock'
QA.QA_SU_save_strategy('MACD_JCSC','Indicator',Account.account_cookie)
# get data from mongodb
data = QA.QA_fetch_stock_day_adv(
['000001', '000002', '000004', '600000'], '2017-09-01', '2018-05-20')
data = data.to_qfq()
# add indicator
ind = data.add_func(MACD_JCSC)
# ind.xs('000001',level=1)['2018-01'].plot()
data_forbacktest=data.select_time('2018-01-01','2018-05-01')
for items in data_forbacktest.panel_gen:
for item in items.security_gen:
daily_ind=ind.loc[item.index]
if daily_ind.CROSS_JC.iloc[0]>0:
order=Account.send_order(
code=item.code[0],
time=item.date[0],
amount=1000,
towards=QA.ORDER_DIRECTION.BUY,
price=0,
order_model=QA.ORDER_MODEL.CLOSE,
amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
#print(item.to_json()[0])
Broker.receive_order(QA.QA_Event(order=order,market_data=item))
trade_mes=Broker.query_orders(Account.account_cookie,'filled')
res=trade_mes.loc[order.account_cookie,order.realorder_id]
order.trade(res.trade_id,res.trade_price,res.trade_amount,res.trade_time)
elif daily_ind.CROSS_SC.iloc[0]>0:
#print(item.code)
if Account.sell_available.get(item.code[0], 0)>0:
order=Account.send_order(
code=item.code[0],
time=item.date[0],
amount=Account.sell_available.get(item.code[0], 0),
towards=QA.ORDER_DIRECTION.SELL,
price=0,
order_model=QA.ORDER_MODEL.MARKET,
amount_model=QA.AMOUNT_MODEL.BY_AMOUNT
)
#print
Broker.receive_order(QA.QA_Event(order=order,market_data=item))
trade_mes=Broker.query_orders(Account.account_cookie,'filled')
res=trade_mes.loc[order.account_cookie,order.realorder_id]
order.trade(res.trade_id,res.trade_price,res.trade_amount,res.trade_time)
Account.settle()
print('TIME -- {}'.format(datetime.datetime.now()-st1))
print(Account.history)
print(Account.history_table)
print(Account.daily_hold)
# create Risk analysis
Risk = QA.QA_Risk(Account)
Account.save()
Risk.save()
# print(Risk.message)
# print(Risk.assets)
# Risk.plot_assets_curve()
# plt=Risk.plot_dailyhold()
# plt.show()
# plt1=Risk.plot_signal()
# plt.show()
# performance=QA.QA_Performance(Account)
# plt=performance.plot_pnlmoney(performance.pnl_fifo)
# plt.show()
# Risk.assets.plot()
# Risk.benchmark_assets.plot()
# save result
#account_info = QA.QA_fetch_account({'account_cookie': 'user_admin_macd'})
#account = QA.QA_Account().from_message(account_info[0])
#print(account)
| 32.051282 | 123 | 0.6464 |
8286a3142dc78c8279d55cc22186d074f451c53d | 6,247 | py | Python | tests/zquantum/qaoa/ansatzes/farhi_ansatz_test.py | zapatacomputing/z-quantum-qaoa | a13a99939ee41c760fdfb302e5f4944e087a09a7 | [
"Apache-2.0"
] | 3 | 2020-10-06T13:54:40.000Z | 2021-07-04T21:02:14.000Z | tests/zquantum/qaoa/ansatzes/farhi_ansatz_test.py | zapatacomputing/z-quantum-qaoa | a13a99939ee41c760fdfb302e5f4944e087a09a7 | [
"Apache-2.0"
] | 34 | 2020-04-30T02:52:31.000Z | 2022-03-30T19:19:14.000Z | tests/zquantum/qaoa/ansatzes/farhi_ansatz_test.py | zapatacomputing/z-quantum-qaoa | a13a99939ee41c760fdfb302e5f4944e087a09a7 | [
"Apache-2.0"
] | 5 | 2020-06-24T10:57:01.000Z | 2021-07-09T01:14:16.000Z | from zquantum.core.interfaces.ansatz_test import AnsatzTests
from zquantum.core.circuits import Circuit, H, RX, RZ
from zquantum.core.utils import compare_unitary
from zquantum.core.openfermion import change_operator_type
from zquantum.qaoa.ansatzes.farhi_ansatz import (
QAOAFarhiAnsatz,
create_farhi_qaoa_circuits,
create_all_x_mixer_hamiltonian,
)
from openfermion import QubitOperator, IsingOperator
import pytest
import numpy as np
import sympy
def test_create_farhi_qaoa_circuits():
# Given
hamiltonians = [
QubitOperator("Z0 Z1"),
QubitOperator("Z0") + QubitOperator("Z1"),
]
number_of_layers = 2
# When
circuits = create_farhi_qaoa_circuits(hamiltonians, number_of_layers)
# Then
assert len(circuits) == len(hamiltonians)
for circuit in circuits:
assert isinstance(circuit, Circuit)
| 29.328638 | 88 | 0.664959 |
8287b90a2bda6ffe57a0b5c598cfc436f9b7c2a3 | 3,297 | py | Python | utils/parser.py | scalar42/scholar-alerts-assistant | 5b674f3784d09ced8a6c17a653d9bdfa08947125 | [
"MIT"
] | null | null | null | utils/parser.py | scalar42/scholar-alerts-assistant | 5b674f3784d09ced8a6c17a653d9bdfa08947125 | [
"MIT"
] | null | null | null | utils/parser.py | scalar42/scholar-alerts-assistant | 5b674f3784d09ced8a6c17a653d9bdfa08947125 | [
"MIT"
] | null | null | null | from html.parser import HTMLParser
| 30.813084 | 129 | 0.578101 |
8288331b93be5bebcd8bf3d2c82ccd107597d65b | 1,067 | py | Python | ApendixI-Games/StacklessPSP-2.5.2_R1/pspsnd.py | MelroLeandro/Matematica-Discreta-para-Hackers-ipnyb | 1f9ca7db685733a3df924db1269bd852acf27602 | [
"MIT"
] | null | null | null | ApendixI-Games/StacklessPSP-2.5.2_R1/pspsnd.py | MelroLeandro/Matematica-Discreta-para-Hackers-ipnyb | 1f9ca7db685733a3df924db1269bd852acf27602 | [
"MIT"
] | 1 | 2019-08-16T12:59:01.000Z | 2019-08-18T06:36:47.000Z | ApendixI-Games/StacklessPSP-2.5.2_R1/pspsnd.py | MelroLeandro/Matematica-Discreta-para-Hackers-ipnyb | 1f9ca7db685733a3df924db1269bd852acf27602 | [
"MIT"
] | null | null | null | """Wrapper for pygame, which exports the PSP Python API on non-PSP systems."""
__author__ = "Per Olofsson, <MagerValp@cling.gu.se>"
import pygame
pygame.init()
_vol_music = 255
_vol_sound = 255
| 21.34 | 78 | 0.62418 |
82884c2f951413f34a94ee10615c1c83e1f50fe8 | 1,540 | py | Python | utility_parseCMUMovie.py | bipulkumar22/pyTextClassification | 7ed92949aa5648d3198588d0c5d6db89b48025ac | [
"Apache-2.0"
] | 11 | 2016-09-16T10:38:19.000Z | 2021-12-13T19:38:24.000Z | utility_parseCMUMovie.py | tyiannak/pyTextClassification | 7ed92949aa5648d3198588d0c5d6db89b48025ac | [
"Apache-2.0"
] | null | null | null | utility_parseCMUMovie.py | tyiannak/pyTextClassification | 7ed92949aa5648d3198588d0c5d6db89b48025ac | [
"Apache-2.0"
] | 6 | 2016-11-19T15:35:13.000Z | 2020-03-29T17:09:22.000Z | import os
import csv
import ast
# used to generate folder-seperated corpus from CMUMovie dataset
# just type python utility_parseCMUMovie.py in a terminal and the data will be downloaded and split to subfolders in the moviePlots/ path
os.system("wget http://www.cs.cmu.edu/~ark/personas/data/MovieSummaries.tar.gz")
os.system("tar -xvzf MovieSummaries.tar.gz")
minRevenue = 20000000
movieMetadata = {}
with open('MovieSummaries/movie.metadata.tsv', 'rb') as csvfile:
reader = csv.reader(csvfile, delimiter='\t', quotechar='|')
for row in reader:
rev = 0
if len(row[4])>1:
rev = int(row[4])
if (minRevenue < 0) or ( (minRevenue > 0) and (rev>minRevenue) ):
movieMetadata[row[0]] = {}
movieMetadata[row[0]]['title'] = row[2]
movieMetadata[row[0]]['genres'] = ast.literal_eval(row[8]).values()
print len(movieMetadata)
with open("MovieSummaries/plot_summaries.txt") as f:
content = f.readlines()
for c in content:
d = c.split("\t")
id = d[0]
plot = d[1]
if id in movieMetadata:
print id, movieMetadata[id]['title']
for g in movieMetadata[id]['genres']:
if not os.path.exists("moviePlots" + os.sep + g.replace("/","-")):
os.makedirs("moviePlots" + os.sep + g.replace("/","-"))
f = open("moviePlots" + os.sep + g.replace("/","-") + os.sep + id + "_" + movieMetadata[id]["title"].replace("/","-"), 'w')
f.write(plot)
f.close()
| 37.560976 | 143 | 0.595455 |