max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
workbench/planning/migrations/0007_planningrequest_receivers.py | yoshson/workbench | 15 | 12770051 | <reponame>yoshson/workbench
# Generated by Django 3.2a1 on 2021-02-21 10:51
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("planning", "0006_auto_20210221_1150"),
]
operations = [
migrations.AddField(
model_name="planningrequest",
name="receivers",
field=models.ManyToManyField(
related_name="received_planning_requests",
through="planning.ReceivedRequest",
to=settings.AUTH_USER_MODEL,
verbose_name="receivers",
),
),
migrations.AddField(
model_name="receivedrequest",
name="reason",
field=models.TextField(blank=True, verbose_name="reason"),
),
]
| 1.734375 | 2 |
optixrap/tests/interpolationTest_interpol.py | hanswenzel/opticks | 11 | 12770052 | <gh_stars>10-100
#!/usr/bin/env python
#
# Copyright (c) 2019 Opticks Team. All Rights Reserved.
#
# This file is part of Opticks
# (see https://bitbucket.org/simoncblyth/opticks).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os,sys, numpy as np, logging
from opticks.ana.proplib import PropLib
from opticks.ana.nload import np_load
log = logging.getLogger(__name__)
np.set_printoptions(precision=3, suppress=True)
if __name__ == '__main__':
from opticks.ana.main import opticks_main
args = opticks_main()
base = "$TMP/interpolationTest"
blib = PropLib.load_GBndLib(base)
if blib is None:
log.warning("failed to load blib GPropLib from base:%s " % base )
sys.exit(0)
pass
names = blib.names
t = blib.data # boundary texture data
ext, nl = "interpol", 820-60+1
#oname = "OInterpolationTest_%s.npy" % ext
oname = "interpolationTest_%s.npy" % ext
cname = "CInterpolationTest_%s.npy" % ext
opath = os.path.expandvars(os.path.join(base,oname))
cpath = os.path.expandvars(os.path.join(base,cname))
opath_exists = os.path.exists(opath)
cpath_exists = os.path.exists(cpath)
log.info(" opath : %s : %s " % ( "Y" if opath_exists else "N", opath ))
log.info(" cpath : %s : %s " % ( "Y" if cpath_exists else "N", cpath ))
if not (opath_exists and cpath_exists):
log.warning("cannot proceed as missing input file ")
sys.exit(0)
pass
o = np.load(opath).reshape(-1,4,2,nl,4)
c = np.load(cpath).reshape(-1,4,2,nl,4)
assert len(t) == len(names)
assert len(t) == len(o)
assert len(t) == len(c)
n = len(t)
cf={}
cfb={}
for i in range(n):
name = names[i]
#print(name)
omat,osur,isur,imat = name.split("/")
cf_omat = np.all( c[i,blib.B_OMAT,0] == o[i,blib.B_OMAT,0] )
cf_imat = np.all( c[i,blib.B_IMAT,0] == o[i,blib.B_IMAT,0] )
if omat in cf:
assert cf[omat] == cf_omat
else:
cf[omat] = cf_omat
pass
if imat in cf:
assert cf[imat] == cf_imat
else:
cf[imat] = cf_imat
pass
if len(osur)>0:
cf_osur = np.all( c[i,blib.B_OSUR,0] == o[i,blib.B_OSUR,0] )
if osur in cf:
assert cf[osur] == cf_osur
else:
cf[osur] = cf_osur
pass
if not cf_osur:
if not osur in cfb:
cfb[osur] = []
cfb[osur].append( (i,blib.B_OSUR,0) )
else:
cf_osur = None
if len(isur)>0:
cf_isur = np.all( c[i,blib.B_ISUR,0] == o[i,blib.B_ISUR,0] )
if isur in cf:
assert cf[isur] == cf_isur
else:
cf[isur] = cf_isur
pass
if not cf_isur:
if not isur in cfb:
cfb[isur] = []
cfb[isur].append( (i,blib.B_ISUR,0) )
else:
cf_isur = None
if not cf_omat:
if not omat in cfb:
cfb[omat] = []
cfb[omat].append( (i,blib.B_OMAT,0) )
if not cf_imat:
if not imat in cfb:
cfb[imat] = []
cfb[imat].append( (i,blib.B_IMAT,0) )
#print("%4d omat %25s imat %25s cf_omat %7s cf_imat %7s " % ( i, omat, imat, cf_omat, cf_imat ))
if len(osur)>0 or len(isur)>0:
print("%4d osur %35s isur %35s cf_osur %7s cf_isur %7s " % ( i, osur, isur, cf_osur, cf_isur ))
pass
print("cf")
for b in [True,False]:
for k,v in cf.items():
if v == b:
if v:
print("%30s %s " % (k, v))
else:
print("%30s %s %s " % (k, v, str(cfb[k])))
pass
pass
pass
pass
| 1.914063 | 2 |
app/__init__.py | gabriel-esco/3.3.7.2 | 0 | 12770053 | <filename>app/__init__.py<gh_stars>0
import os
from flask import Flask, render_template, send_from_directory, request, flash
from flask_wtf import Form
from wtforms import TextField, BooleanField, TextAreaField, SubmitField, validators, ValidationError
from dotenv import load_dotenv
load_dotenv()
app = Flask(__name__)
app.secret_key = 'development identification key'
@app.route('/')
def index():
return render_template('home.html', title="MLH Fellow", url=os.getenv("URL"))
@app.route('/projects')
def projects():
return render_template('projects.html', title="Projects", url=os.getenv("URL"))
@app.route('/about')
def about():
return render_template('about.html', title="About", url=os.getenv("URL"))
@app.route('/contact', methods=['GET', 'POST'])
def contact():
form = ContactForm()
if request.method == 'POST':
if form.validate() == False:
flash('All fields are required.')
return render_template('contact.html', form=form)
else:
return 'Form submitted.'
elif request.method == 'GET':
return render_template('contact.html', form=form)
class ContactForm(Form):
name = TextField("Name", [validators.Required("Please enter your name.")])
email = TextField("Email", [validators.Required("Please enter your email address."), validators.Email("Please enter a valid email address")])
subject = TextField("Subject", [validators.Required("Please enter a subject.")])
message = TextAreaField("Message", [validators.Required("Please enter a message.")])
submit = SubmitField("Send")
| 2.796875 | 3 |
norimdb/db.py | meeron/norimdb | 0 | 12770054 | """Module defines NorimDb class"""
from os import path, SEEK_END
from .exceptions import *
import pybinn
from .docid import DocId
class NorimDb:
"""NorimDb class"""
def __init__(self, dir_path):
if not path.isdir(dir_path):
raise DbError(ERR_PATH, path=dir_path)
self._sys = {
'_sys': {'size': 0}
}
self._sys_file = NorimDb._open(path.join(dir_path, "sys.ndb"))
self._data_file = NorimDb._open(path.join(dir_path, "data.ndb"))
self._sys_file.seek(0, SEEK_END)
file_size = self._sys_file.tell()
if file_size > 0:
self._sys_file.seek(0)
self._sys = pybinn.load(self._sys_file)
self._opened = True
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return False
def get_collection(self, name):
return Collection(name, self)
def close(self):
self._sys_file.close()
self._data_file.close()
self._opened = False
def _sync(self):
self._sys_file.seek(0)
pybinn.dump(self._sys, self._sys_file)
print(pybinn.dumps(self._sys))
print(self._sys)
@staticmethod
def _open(file_path):
if path.isfile(file_path):
return open(file_path, 'r+b')
return open(file_path, 'w+b')
class Collection:
"""Collection class"""
def __init__(self, name: str, db: NorimDb):
if name[0] == '_':
raise DbError(ERR_COL_NAME, name=name)
self._collection = db._sys.get(name, {
'sys': {'size': 0, 'count': 0},
'keys': {}
})
db._sys[name] = self._collection
self._name = name
self._db = db
def add(self, dict_value: dict):
if not self._db._opened:
raise DbError(ERR_DB_CLOSED)
if not isinstance(dict_value, dict):
raise DbError(ERR_DOC_TYPE)
if '_id' not in dict_value:
dict_value['_id'] = DocId()
if dict_value['_id'] in self._collection['keys']:
raise DbError(ERR_COL_KEY, key=dict_value['_id'], collection=self._name)
self._collection['sys']['count'] += 1
self._collection['keys'][dict_value['_id']] = {
'offset': self._db._data_file.tell(),
'size': 0
}
pybinn.dump(dict_value, self._db._data_file)
self._db._sync()
def get(self, doc_id):
if not self._db._opened:
raise DbError(ERR_DB_CLOSED)
if doc_id not in self._collection['keys']:
return None
offset = self._collection['keys'][doc_id]['offset']
self._db._data_file.seek(offset)
return pybinn.load(self._db._data_file)
| 2.40625 | 2 |
scripts/visualstudio/__init__.py | GeneralLeeInept/inpet | 0 | 12770055 | __all__ = ["solution"]
| 0.996094 | 1 |
puavro/puavro.py | bry00/puavro | 0 | 12770056 | import io
import pulsar
import fastavro
class DictAVRO(dict):
"""``DictAVRO`` provides dictionary class compatible with the Pulsar AVRO "record" interface.
The class is based on regular Python dictionary (``dict``).
The actual "record" classes should be based on the ``DictAVRO`` and either:
- set ``SCHEMA`` class variable to the parsed AVRO schema,
- use ``set_schema`` class method to set parsed AVRO schema.
"""
_schema = None
@classmethod
def schema(cls) -> str:
"""Class method providing AVRO schema related to the class.
Returns:
AVRO schema associated with the class.
"""
if cls._schema is None:
if hasattr(cls, "SCHEMA") and not cls.SCHEMA is None:
cls._schema = cls.SCHEMA
if cls._schema is None:
raise ValueError(
"AVRO schema (e.g. from fastavro.schema.load_schema()) must be provided as SCHEMA attribute")
return cls._schema
@classmethod
def set_schema(cls, schema: str):
"""Sets AVRO schema for all derived classes.
Args:
schema (str): parsed AVRO schema
"""
cls._schema = schema
class DictAvroSchema(pulsar.schema.AvroSchema):
"""``DictAvroSchema`` provides AVRO schema class compatible with the Pulsar AVRO interface.
"""
def __init__(self, record_cls):
"""
Args:
record_cls (class): Class used as a record to write/read Pulsar AVRO messages.
Should be derived from :class:`DictAVRO`
"""
if not issubclass(record_cls, DictAVRO):
raise TypeError(
'Invalid record type {} - record should be derived from DictAVRO class'.format(record_cls.__name__))
super().__init__(record_cls)
def encode(self, obj):
"""Encodes the given object. Used internally by the Pulsar client.
Overrides base implementation in order to allow usage of ``DictAVRO`` based objects.
Args:
obj: AVRO record object to be encoded.
Should be the object of the same class that was used to
initialize the current instance of the class:`DictAVRO`,
i.e. class derivated from :class:`DictAVRO`.
"""
self._validate_object_type(obj)
buffer = io.BytesIO()
fastavro.schemaless_writer(buffer, self._schema, obj)
return buffer.getvalue()
if __name__ == '__main__':
import sys
import json
import datetime
import time
from pprint import pp
WAIT_SECONDS = 3
PULSAR_SERVICE_URL = "pulsar://localhost:6650"
TOPIC = "try"
AVRO_SCHEMA = fastavro.schema.load_schema(sys.argv[1]) if len(sys.argv) > 1 else fastavro.schema.parse_schema(json.loads(
"""{
"type" : "record",
"name" : "Segment",
"namespace" : "try",
"fields" : [ {
"name" : "id",
"type" : "long"
}, {
"name" : "name",
"type" : "string"
}, {
"name" : "when",
"type" : {
"type" : "long",
"logicalType" : "timestamp-millis"
}
}, {
"name" : "direction",
"type" : {
"type" : "enum",
"name" : "CardinalDirection",
"symbols" : [ "north", "south", "east", "west" ]
}
}, {
"name" : "length",
"type" : [ "null", "long" ]
} ]
}
"""))
def send():
class Segment(DictAVRO):
SCHEMA = AVRO_SCHEMA
pulsar_client = pulsar.Client(PULSAR_SERVICE_URL)
producer = pulsar_client.create_producer(topic=TOPIC, schema=DictAvroSchema(Segment))
try:
segment = Segment(
id=99,
name = "<NAME>",
when = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc),
direction = "north",
length = 12345,
)
producer.send(segment)
pp(segment)
finally:
producer.close()
pulsar_client.close()
def receive():
class Segment(DictAVRO):
pass
Segment.set_schema(AVRO_SCHEMA)
pulsar_client = pulsar.Client(PULSAR_SERVICE_URL)
consumer = pulsar_client.subscribe(TOPIC, subscription_name="try", consumer_type=pulsar.ConsumerType.Shared,
schema=DictAvroSchema(Segment))
try:
while True:
msg = consumer.receive(WAIT_SECONDS * 1000)
segment = msg.value()
pp(segment)
consumer.acknowledge(msg)
except Exception as e:
if str(e) == 'Pulsar error: TimeOut':
print("END OF DATA")
else:
raise
finally:
consumer.close()
pulsar_client.close()
send()
time.sleep(WAIT_SECONDS)
receive()
| 2.9375 | 3 |
ghost/engine.py | pikulet/ghost | 1 | 12770057 | <gh_stars>1-10
from bidict import bidict
from ghost.ghost import Ghost
from typing import List, Dict
import logging
class GhostEngine:
ERR_TOO_MANY_GAMES = 'Too many ongoing games... Please wait...'
ERR_GID_ALREADY_EXISTS = 'There is already an ongoing game in this group'
ERR_GID_DOES_NOT_EXIST = 'Game %d does not exist'
ERR_USER_IS_HOST = 'User @%s is the host of the game'
ERR_USER_NOT_HOST = 'User @%s is not the host of any game'
ERR_PLAYER_DOES_NOT_EXIST = 'User @%s has no ongoing game'
def __init__(self, max_games = 4):
self.__MAX_NUMBER_OF_GAMES = max_games
self.__games = dict() # gid to game
self.__host_to_gid = bidict() # host to gid
def add_game(self, gid: int, host: str) -> bool:
''' Creates a game in the engine.
Returns True if the game was successfully created '''
if gid in self.__games:
logging.warning(GhostEngine.ERR_GID_ALREADY_EXISTS)
return False
elif len(self.__games) >= self.__MAX_NUMBER_OF_GAMES:
logging.warning(GhostEngine.ERR_TOO_MANY_GAMES)
return False
new_game = Ghost()
self.__games[gid] = new_game
self.__host_to_gid[host] = gid
return True
def delete_game(self, gid: int) -> bool:
''' Removes a game from the engine.
Returns True if the game was successfully deleted '''
self.__is_game_exists()
del self.__games[gid]
host = self.__get_host_from_gid(gid)
del self.__hosts_to_gid[host]
def __is_game_exists(self, gid: int) -> bool:
if gid not in self.__games:
logging.warning(GhostEngine.ERR_GID_DOES_NOT_EXIST % gid)
return False
return True
def __is_host_exists(self, host: str) -> bool:
if host not in self.__host_to_gid:
logging.warning(GhostEngine.ERR_USER_NOT_HOST % host)
return False
return True
def __is_player_exists(self, username: str) -> None:
if player not in self.__username_to_gid:
logging.warning(GhostEngine.ERR_PLAYER_DOES_NOT_EXIST)
return False
return True
def __get_game_from_gid(self, gid: int) -> Ghost:
if not self.__is_game_exists(gid):
return Ghost()
return self.__games[gid]
def get_gid_from_host(self, host: str) -> int:
''' Returns the gid the host is in-charge of, -1 otherwise '''
if not self.__is_host_exists(host):
return -1
return self.__host_to_gid[host]
def __get_host_from_gid(self, gid: int) -> str:
if not self.__is_game_exists(gid):
return ''
return self.__host_to_gid.inverse[gid]
''' GET GAME INFORMATION '''
def get_game_state(self, gid: int) -> Ghost.States:
game = self.__get_game_from_gid(gid)
return game.get_game_state()
def get_existing_players(self, gid: int) -> List[str]:
game = self.__get_game_from_gid(gid)
return game.get_existing_players()
def get_player_order(self, gid: int) -> List[str]:
game = self.__get_game_from_gid(gid)
return game.get_player_order()
def get_player_roles(self, gid: int) -> Dict[str, Ghost.Roles]:
game = self.__get_game_from_gid(gid)
return game.get_player_roles()
def get_words(self, gid: int) -> (str, str):
game = self.__get_game_from_gid(gid)
return game.get_words()
''' PHASE: REGISTER PLAYERS '''
def register_player(self, gid: int, player: str) -> int:
''' Returns the number of players registered in the game '''
host = self.__get_host_from_gid(gid)
game = self.__get_game_from_gid(gid)
if player == host:
logging.warning(GhostEngine.ERR_USER_IS_HOST % player)
return len(game.get_existing_players())
return game.register_player(player)
def start_game(self, gid: int) -> bool:
''' Returns True if the game was successfully started '''
game = self.__get_game_from_gid(gid)
return game.start_game()
''' PHASE: SET PARAM '''
def set_param_town_word(self, host: str, value: str) -> bool:
''' Returns True if the town word was successfully set '''
gid = self.get_gid_from_host(host)
game = self.__get_game_from_gid(gid)
return game.set_param_town_word(value)
def set_param_fool_word(self, host: str, value: str) -> bool:
''' Returns True if the fool word was successfully set '''
gid = self.get_gid_from_host(host)
game = self.__get_game_from_gid(gid)
return game.set_param_fool_word(value)
''' PHASE: CLUES '''
def get_next_in_player_order(self, gid: int) -> str:
''' Returns the name of the next person expected to give a clue.
An empty string is returned if all clues have been given or
it's not the clue phase '''
game = self.__get_game_from_gid(gid)
return game.get_next_in_player_order()
def set_clue(self, gid: int, player: str, clue: str) -> (bool, bool):
''' Returns a tuple of two booleans.
The first boolean is True if the clue is successfully given.
The second boolean is True if all players have given a clue. '''
game = self.__get_game_from_gid(gid)
return game.set_clue(player, clue)
def get_all_clues(self, gid: int) -> Dict[str, str]:
''' Returns the clues given by the users.
An empty dict() is returned if not all clues have been given. '''
game = self.__get_game_from_gid(gid)
return game.get_all_clues()
''' PHASE: VOTE '''
def set_vote(self, gid: int, player: str, vote: str) -> (bool, bool, str):
''' Returns a tuple of three booleans.
The first boolean is True if the vote is successfully made.
The second boolean is True if all the players have voted.
The third boolean returns the player voted out,
or an empty string if no one is voted out. '''
game = self.__get_game_from_gid(gid)
return game.set_vote(player, vote)
''' PHASE: GUESS '''
def make_guess(self, gid: int, player: str, guess: str) -> bool:
''' Returns a tuple of two booleans.
The first boolean is True if the guess is successfully made.
The second boolean is True if the guess is correct. '''
game = self.__get_game_from_gid(gid)
return game.make_guess(player, guess)
| 2.6875 | 3 |
mspray/apps/mda/views/map.py | onaio/mspray | 0 | 12770058 | # -*- coding: utf-8 -*-
"""Map views"""
import json
from django.conf import settings
from django.views.generic import DetailView
from mspray.apps.main.mixins import SiteNameMixin
from mspray.apps.main.models import Location
from mspray.apps.main.query import get_location_qs
from mspray.apps.main.serializers.target_area import (
GeoTargetAreaSerializer,
TargetAreaQuerySerializer,
TargetAreaSerializer,
count_duplicates,
get_duplicates,
)
from mspray.apps.main.utils import get_location_dict, parse_spray_date
from mspray.apps.main.views.target_area import (
TargetAreaHouseholdsViewSet,
TargetAreaViewSet,
)
class MapView(SiteNameMixin, DetailView):
"""Map View"""
template_name = "map.html"
model = Location
slug_field = "pk"
def get_queryset(self):
return get_location_qs(super(MapView, self).get_queryset())
def get_context_data(self, **kwargs):
context = super(MapView, self).get_context_data(**kwargs)
serializer_class = (
TargetAreaQuerySerializer
if settings.SITE_NAME == "namibia"
else TargetAreaSerializer
)
location = context["object"]
if location.level == "RHC":
location = get_location_qs(
Location.objects.filter(pk=location.pk), "RHC"
).first()
serializer = serializer_class(
location, context={"request": self.request}
)
context["target_data"] = serializer.data
spray_date = parse_spray_date(self.request)
if spray_date:
context["spray_date"] = spray_date
if settings.MSPRAY_SPATIAL_QUERIES or context["object"].geom:
response = TargetAreaViewSet.as_view({"get": "retrieve"})(
self.request, pk=context["object"].pk, format="geojson"
)
response.render()
context["not_sprayable_value"] = getattr(
settings, "NOT_SPRAYABLE_VALUE", "noteligible"
)
context["ta_geojson"] = response.content.decode()
bgeom = settings.HH_BUFFER and settings.OSM_SUBMISSIONS
if self.object.level in ["district", "RHC"]:
data = GeoTargetAreaSerializer(
get_location_qs(
self.object.location_set.all(), self.object.level
),
many=True,
context={"request": self.request},
).data
context["hh_geojson"] = json.dumps(data)
else:
loc = context["object"]
hhview = TargetAreaHouseholdsViewSet.as_view(
{"get": "retrieve"}
)
response = hhview(
self.request,
pk=loc.pk,
bgeom=bgeom,
spray_date=spray_date,
format="geojson",
)
response.render()
context["hh_geojson"] = response.content.decode()
sprayed_duplicates = list(
get_duplicates(loc, True, spray_date)
)
not_sprayed_duplicates = list(
get_duplicates(loc, False, spray_date)
)
context["sprayed_duplicates_data"] = json.dumps(
sprayed_duplicates
)
context["sprayed_duplicates"] = count_duplicates(
loc, True, spray_date
)
context["not_sprayed_duplicates_data"] = json.dumps(
not_sprayed_duplicates
)
context["not_sprayed_duplicates"] = count_duplicates(
loc, False
)
context["districts"] = (
Location.objects.filter(parent=None)
.values_list("id", "code", "name")
.order_by("name")
)
context.update({"map_menu": True})
context.update(get_location_dict(self.object.pk))
context["not_sprayed_reasons"] = json.dumps(
settings.MSPRAY_UNSPRAYED_REASON_OTHER
)
return context
| 2.078125 | 2 |
model/common/runway.py | mlabru/visil | 0 | 12770059 | # -*- coding: utf-8 -*-
"""
runway
revision 0.2 2015/dez mlabru
pep8 style conventions
revision 0.1 2014/nov mlabru
initial release (Linux/Python)
"""
# < imports >--------------------------------------------------------------------------------------
# model
import model.common.fix as CFix
# < class CRunway >--------------------------------------------------------------------------------
class CRunway(CFix.CFix):
"""
DOCUMENT ME!
"""
# ---------------------------------------------------------------------------------------------
def __init__(self, fs_rwy_name, ff_rwy_lat, ff_rwy_lng, ff_rwy_track, ff_rwy_gp):
"""
define uma aerovia
@param fs_rwy_name: nome
@param ff_rwy_lat: latitude
@param ff_rwy_lng: longitude
@param ff_rwy_track: path
@param ff_rwy_gp: glide path
"""
# inicia a super classe
super(CRunway, self).__init__(fs_rwy_name, ff_rwy_lat, ff_rwy_lng)
# herdados de CFix
# self.s_name # nome
# self.position # posição
self._f_track = ff_rwy_track
self._f_app_angle = ff_rwy_gp
# =============================================================================================
# dados
# =============================================================================================
# ---------------------------------------------------------------------------------------------
@property
def f_app_angle(self):
"""
get glide path
"""
return self._f_app_angle
@f_app_angle.setter
def f_app_angle(self, f_val):
"""
set glide path
"""
self._f_app_angle = f_val
# ---------------------------------------------------------------------------------------------
@property
def f_track(self):
"""
get track
"""
return self._f_track
@f_track.setter
def f_track(self, f_val):
"""
set track
"""
self._f_track = f_val
# < the end >--------------------------------------------------------------------------------------
| 1.882813 | 2 |
src/modules/check_email.py | Nobregaigor/Optical-Mark-Registration-PDF-Reader | 0 | 12770060 | from .functions import read_pdf, read_emails_list
def get_email_from_pdf(*args, **kwargs):
input_file = args[0]["input_file"]
emails_list_file = args[0]["emails_list"]
emails_df = read_emails_list(emails_list_file)
(u_data, conf, conf_per_n) = read_pdf(input_file)
unumber = int("".join([str(v) for v in u_data]))
email = emails_df.loc[emails_df["UNUMBER"] == unumber]
print(email)
return email
| 2.921875 | 3 |
easy/number-of-1-bits.py | trilliwon/LeetCode | 0 | 12770061 | class Solution(object):
def hammingWeight(self, n):
"""
:type n: int
:rtype: int
"""
if n == 1:
return 1
hw = 0
while n != 0:
if n % 2 == 1:
hw += 1
n //= 2
return hw
| 3.609375 | 4 |
build/lib/ezyt/imageEditor/__init__.py | AlexBacho/ezyt | 1 | 12770062 | <gh_stars>1-10
from .editor import ImageEditor | 1 | 1 |
built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/vega/algorithms/nas/jdd_ea/jdd_ea_codec.py | Huawei-Ascend/modelzoo | 12 | 12770063 | # -*- coding:utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""Encode and decode the model config. for JDD."""
from copy import deepcopy
import numpy as np
from vega.search_space.codec import Codec
from vega.core.common.class_factory import ClassType, ClassFactory
@ClassFactory.register(ClassType.CODEC)
class JDDCodec(Codec):
"""Codec of the JDD search space."""
def __init__(self, search_space=None, **kwargs):
"""Construct the SRCodec class.
:param codec_name: name of the codec
:type codec_name: str
:param search_space: Search space of the codec
:type search_space: dictionary
"S_" means that the shrink RDB blcock with 1x1 convolution .
"G_" means that the RDB block with channel shuffle and group convolution.
"C_" means that the contextual RDB block with recursive layer.
first number: the number of convolutional layers in a block
second number: the growth rate of dense connected in a block
third number: the number of output channel in a block
"""
super(JDDCodec, self).__init__(search_space, **kwargs)
self.func_type, self.func_prob = self.get_choices()
self.func_type_num = len(self.func_type)
def get_choices(self):
"""Get search space information.
:return: the configs of the blocks
:rtype: lists
"""
channel_types = ['16', '32', '48']
channel_prob = [1, 0.5, 0.2]
block_types = ['R']
block_prob = [1]
model_type = self.search_space['modules'][0]
channel_types = self.search_space[model_type]['channel_types']
channel_prob = self.search_space[model_type]['channel_prob']
block_types = self.search_space[model_type]['block_types']
block_prob = self.search_space[model_type]['block_prob']
func_type = []
func_prob = []
for b_id in range(len(block_types)):
for chin_id in range(len(channel_types)):
for chout_id in range(len(channel_types)):
func_type.append(block_types[b_id] + '_' + channel_types[chin_id] + '_' + channel_types[chout_id])
func_prob.append(block_prob[b_id] * channel_prob[chin_id] * channel_prob[chout_id])
func_prob = np.cumsum(np.asarray(func_prob) / sum(func_prob))
return func_type, func_prob
def decode(self, indiv):
"""Add the network structure to config.
:param indiv: an individual which contains network architecture code
:type indiv: individual class
:return: config of model structure
:rtype: dictionary
"""
indiv_cfg = deepcopy(self.search_space)
model = indiv_cfg['modules'][0]
indiv_cfg[model]['code'] = indiv.gene.tolist()
indiv_cfg[model]['architecture'] = indiv.active_net_list()
return indiv_cfg
| 2.640625 | 3 |
test.py | csmets/CSSCrypt | 0 | 12770064 | # Testing CSSCrypt
import CSSCrypt
shiftKey = '3453465'
CSSCrypt = CSSCrypt.encryption()
encMsg = CSSCrypt.encrypt('My Secret Message', shiftKey)
print (encMsg)
print(CSSCrypt.decrypt(encMsg, shiftKey))
| 2.265625 | 2 |
credstuffer/proxy/grabber.py | bierschi/credstuffer | 0 | 12770065 | <reponame>bierschi/credstuffer
import requests
import logging
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor, as_completed
class ProxyGrabber:
"""class ProxyGrabber to grab different kinds of proxies
USAGE:
grabber = ProxyGrabber()
grabber.collect_proxies()
"""
def __init__(self, timeout=1000, max_workers=8):
self.logger = logging.getLogger('credstuffer')
self.logger.info("create class ProxyGrabber")
# create request session
self.timeout = timeout
self.session = requests.Session()
self.max_workers = max_workers
self.proxy_types = {key: [] for key in ["http", "socks4", "socks5"]}
self.proxyscrape_http_url = self.__define_proxyscrape_url(proxytype='http', timeout=self.timeout)
self.proxyscrape_socks4_url = self.__define_proxyscrape_url(proxytype='socks4', timeout=self.timeout)
self.proxyscrape_socks5_url = self.__define_proxyscrape_url(proxytype='socks5', timeout=self.timeout)
# used
self.free_proxy_url = 'https://free-proxy-list.net/'
self.us_proxy_url = 'https://us-proxy.org'
self.socks_proxy_url = 'https://socks-proxy.net'
self.ip_address_url = 'https://www.ip-adress.com/proxy-list'
self.proxy_daily_url = 'http://www.proxy-daily.com'
self.anonymous_proxy_url = 'https://free-proxy-list.net/anonymous-proxy.html' # not
self.ssl_proxy_url = 'https://sslproxies.org' # not
self.proxy_urls = [
self.free_proxy_url, # http
self.us_proxy_url, # http
self.ip_address_url, # http
self.socks_proxy_url, # socks4
self.proxy_daily_url, # http, socks4, socks5
self.proxyscrape_http_url, # http
self.proxyscrape_socks4_url, # socks4
self.proxyscrape_socks5_url, # socks5
]
def collect_proxies(self, proxytype='all'):
""" collects all proxies from url resources
:param proxytype: type of proxies
:return: proxy dict as ip:port string
"""
self.logger.info("collecting proxies ...")
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
future_to_url = {executor.submit(self.__load_url, url, 5): url for url in self.proxy_urls}
for future in as_completed(future_to_url):
url = future_to_url[future]
try:
resp = future.result()
if (url in self.ip_address_url) and (proxytype in ('all', 'http')):
proxies = self.__parse_ipadress(response=resp)
self.proxy_types['http'].extend(proxies)
elif url in self.proxy_daily_url:
http, socks4, socks5 = self.__parse_proxy_daily(response=resp)
if proxytype == 'http':
self.proxy_types['http'].extend(http)
elif proxytype == 'socks4':
self.proxy_types['socks4'].extend(socks4)
elif proxytype == 'socks5':
self.proxy_types['socks5'].extend(socks5)
else:
self.proxy_types['http'].extend(http)
self.proxy_types['socks4'].extend(socks4)
self.proxy_types['socks5'].extend(socks5)
elif (url in (self.free_proxy_url, self.us_proxy_url)) and (proxytype in ('all', 'http')):
proxies = self.__parse_free_proxy(response=resp)
self.proxy_types['http'].extend(proxies)
elif (url in self.socks_proxy_url) and (proxytype in ('all', 'socks4')):
proxies = self.__parse_free_proxy(response=resp)
self.proxy_types['socks4'].extend(proxies)
elif (url in self.proxyscrape_http_url) and (proxytype in ('all', 'http')):
proxies = self.__parse_proxyscrape(response=resp)
self.proxy_types['http'].extend(proxies)
elif (url in self.proxyscrape_socks4_url) and (proxytype in ('all', 'socks4')):
proxies = self.__parse_proxyscrape(response=resp)
self.proxy_types['socks4'].extend(proxies)
elif (url in self.proxyscrape_socks5_url) and (proxytype in ('all', 'socks5')):
proxies = self.__parse_proxyscrape(response=resp)
self.proxy_types['socks5'].extend(proxies)
except Exception as e:
self.logger.error(e)
return self.proxy_types
def __load_url(self, url, timeout=5):
""" loads given url resource
:param url: url string
:param timeout: timeout
:return: request response
"""
return self.session.get(url=url, headers={'User-Agent': 'Mozilla/5.0'}, timeout=timeout)
def __define_proxyscrape_url(self, proxytype='all', timeout=1000, ssl='all', anonymity='all', country='all'):
""" defines the proxyscrape url
:param proxytype: type of proxy
:param timeout: timeout for proxies
:param ssl: ssl proxy
:param anonymity: anonymity proxy
:param country: country for proxy
:return: url string
"""
if proxytype not in ('http', 'socks4', 'socks5', 'all'):
raise ValueError('proxytype {} is not a valid value'.format(proxytype))
if timeout <= 0:
raise ValueError('timeout must be an integer greater than 0')
if ssl not in ('yes', 'no', 'all'):
raise ValueError('ssl is not valid')
if anonymity not in ('elite', 'anonymous', 'transparent', 'all'):
raise ValueError('anonymity is not valid')
if len(country) != 2 and country != 'all':
raise ValueError('country is not valid')
url = 'https://api.proxyscrape.com?request=getproxies' + \
'&proxytype=%s' % proxytype + \
'&timeout=%s' % timeout + \
'&ssl=%s' % ssl + \
'&anonymity=%s' % anonymity + \
'&country=%s' % country
return url
### parser ####
def __parse_free_proxy(self, response):
"""parses response from free-proxy-list.net
:param url: string, url
:return: list, list of proxies as ip:port
"""
soup = BeautifulSoup(response.text, "html.parser")
proxy_list = list()
for items in soup.select("tbody tr"):
proxy = ':'.join([item.text for item in items.select("td")[:2]])
if ('-' in proxy) or (not proxy) or (':' not in proxy) or ('.' not in proxy):
pass
else:
proxy_list.append(proxy)
return proxy_list
def __parse_ipadress(self, response):
"""parses response from ip-adress.com
:return: list, ip:port as a string
"""
soup = BeautifulSoup(response.text, "html.parser")
parser = soup.find('tbody').find_all('tr')
proxy_list = list()
for elem in parser:
elem = elem.get_text().split()[:2]
if elem[1] != 'transparent':
if ('-' in elem[0]) or (not elem[0]) or (':' not in elem[0]) or ('.' not in elem[0]):
pass
else:
proxy_list.append(elem[0])
return proxy_list
def __parse_proxy_daily(self, response):
""" parses response from proxy-daily.com
:param response: response object
:return: list, ip:port as a string
"""
try:
soup = BeautifulSoup(response.content, 'html.parser')
content = soup.find('div', {'id': 'free-proxy-list'})
all_content = content.find_all(class_="freeProxyStyle")
http = list(filter(None, all_content[0].contents[0].split('\n')))
socks4 = list(filter(None, all_content[1].contents[0].split('\n')))
socks5 = list(filter(None, all_content[2].contents[0].split('\n')))
return http, socks4, socks5
except (AttributeError, KeyError) as ex:
self.logger.error(ex)
def __parse_proxyscrape(self, response):
""" parses response from proxyscrape.com
:param response: response object
:return: list, ip:port as a string
"""
return list(filter(None, response.content.decode('utf-8').split('\r\n')))
| 2.453125 | 2 |
ravendb/serverwide/server_operation_executor.py | ravendb/RavenDB-Python-Client | 8 | 12770066 | from __future__ import annotations
import enum
from typing import Union, TYPE_CHECKING
from ravendb.http.request_executor import ClusterRequestExecutor
from ravendb.http.topology import Topology
from ravendb.serverwide.operations.common import (
GetBuildNumberOperation,
ServerOperation,
VoidServerOperation,
ServerWideOperation,
)
from ravendb.tools.utils import CaseInsensitiveDict
if TYPE_CHECKING:
from ravendb.documents import DocumentStore
from ravendb.documents.operations import OperationIdResult, Operation
class ConnectionStringType(enum.Enum):
NONE = "NONE"
RAVEN = "RAVEN"
SQL = "SQL"
OLAP = "OLAP"
class ServerOperationExecutor:
def __init__(self, store: DocumentStore):
if store is None:
raise ValueError("Store cannot be None")
request_executor = self.create_request_executor(store)
if request_executor is None:
raise ValueError("Request Executor cannot be None")
self.__store = store
self.__request_executor = request_executor
self.__initial_request_executor = None
self.__node_tag = None
self.__cache = CaseInsensitiveDict()
# todo: register events
# todo: if node tag is null add after_close_listener
def send(
self,
operation: Union[VoidServerOperation, ServerOperation],
):
if isinstance(operation, VoidServerOperation):
command = operation.get_command(self.__request_executor.conventions)
self.__request_executor.execute_command(command)
elif isinstance(operation, ServerOperation):
command = operation.get_command(self.__request_executor.conventions)
self.__request_executor.execute_command(command)
return command.result
def send_async(self, operation: ServerOperation[OperationIdResult]) -> Operation:
command = operation.get_command(self.__request_executor.conventions)
self.__request_executor.execute_command(command)
return ServerWideOperation(
self.__request_executor,
self.__request_executor.conventions,
command.result.operation_id,
command.selected_node_tag if command.selected_node_tag else command.result.operation_node_tag,
)
def close(self) -> None:
if self.__node_tag is not None:
return
if self.__request_executor is not None:
self.__request_executor.close()
cache = self.__cache
if cache is not None:
for key, value in cache.items():
request_executor = value._request_executor
if request_executor is not None:
request_executor.close()
cache.clear()
def __get_topology(self, request_executor: ClusterRequestExecutor) -> Topology:
topology: Topology = None
try:
topology = request_executor.topology
if topology is None:
# a bit rude way to make sure that topology was refreshed
# but it handles a case when first topology update failed
operation = GetBuildNumberOperation()
command = operation.get_command(request_executor.conventions)
request_executor.execute_command(command)
topology = request_executor.topology
except:
pass
if topology is None:
raise RuntimeError("Could not fetch the topology")
return topology
@staticmethod
def create_request_executor(store: DocumentStore) -> ClusterRequestExecutor:
return (
ClusterRequestExecutor.create_for_single_node(
store.urls[0],
store.thread_pool_executor,
store.conventions,
store.certificate_path,
store.trust_store_path,
)
if store.conventions.disable_topology_updates
else ClusterRequestExecutor.create_without_database_name(
store.urls,
store.thread_pool_executor,
store.conventions,
store.certificate_path,
store.trust_store_path,
)
)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return
| 1.976563 | 2 |
app/views/login_page.py | ngocjr7/scoss_webapp | 3 | 12770067 | <gh_stars>1-10
import os
import sys
from werkzeug.utils import secure_filename
from flask import Flask, render_template, url_for, request, redirect, session, jsonify, Blueprint
from scoss import smoss
import requests
from sctokenizer import Source
from scoss import Scoss
from scoss.metrics import all_metrics
from models.models import db
from werkzeug.security import generate_password_hash, check_password_hash
from jinja2 import Environment
from config import URL
login = Blueprint('login_page', __name__)
@login.route('/login', methods=["GET", "POST"])
def login_page():
if request.method == "GET":
return render_template('login.html')
else:
session['logged_in'] = False
username = request.form['username']
password = request.form['password']
url = URL + '/api/login'
payload = {"username": username, "password": password}
req = requests.post(url=url, json=payload)
if req.status_code == 200:
session['user_id'] = req.json()['user_id']
session['username'] = req.json()['username']
session['role'] = req.json()['role']
session['token'] = req.json()['token']
session['logged_in'] = True
return redirect(url_for('home_page.index'))
return render_template('login.html', info='wrong_pass')
@login.route('/logout')
def logout():
url = "{}/api/logout".format(URL)
token = session['token']
headers = {'Authorization': "Bearer {}".format(token)}
requests.get(url, headers=headers)
session.clear()
return redirect(url_for('home_page.index'))
@login.route('/signup', methods=["GET", "POST"])
def signup():
if request.method == "GET":
return render_template('register.html')
else:
username = request.form['username']
password = request.form['password']
level = request.form['level']
data = {'username': username , 'password': <PASSWORD>_<PASSWORD>(password), 'role':level}
url = URL +'/api/users/add'
req = requests.post(url=url, data=data)
if 'user_id' not in req.json().keys():
return jsonify({'info': 'False'})
return jsonify({'info': 'True'})
| 2.15625 | 2 |
goldenmask/protect.py | youngquan/goldenmask | 6 | 12770068 | <reponame>youngquan/goldenmask
import compileall
import multiprocessing
import os
import shutil
import sys
from distutils.core import setup
from pathlib import Path
from Cython.Build import cythonize
from Cython.Compiler import Options
from goldenmask import GOLDENMASK, logger
from goldenmask.exceptions import NoPythonFiles
from goldenmask.utils import (
Ignore,
get_file_type,
is_entrypoint,
pack,
remove_python_files,
rename_so_and_pyd_file,
unpack,
)
Options.docstrings = False
class BaseProtector:
def __init__(self, source_path: str, inplace: bool, no_smart: bool) -> None:
self.source_path = Path(source_path)
self.is_pyfile, self.is_wheel, self.is_tarball, self.is_dir = get_file_type(
str(self.source_path)
)
if not self.is_dir:
if not any((self.is_pyfile, self.is_wheel, self.is_tarball)):
logger.error(
f"This {self.source_path} can not be protect now! "
f"Only files end with '.py', '.tar.gz' or '.whl' can be protect!"
)
sys.exit()
self.inplace = inplace
self.no_smart = no_smart
if self.is_pyfile:
if self.inplace:
self.file = self.source_path
else:
self.file = (
self.source_path.parent / "__goldenmask__" / self.source_path.name
)
if self.file.exists():
os.remove(self.file)
else:
try:
self.file.parent.mkdir()
except FileExistsError:
pass
shutil.copy(self.source_path, self.file)
self.info_file = self.file.parent / ".goldenmask"
self.build_temp = self.file.parent / "build-goldenmask"
elif self.is_dir:
if self.inplace:
self.dir = self.source_path
else:
self.dir = Path(source_path) / "__goldenmask__"
if self.dir.exists():
# TODO: may be I should try to speed !
shutil.rmtree(self.dir)
if self.no_smart:
shutil.copytree(self.source_path, self.dir)
else:
shutil.copytree(
self.source_path, self.dir, ignore=Ignore(self.dir).copy
)
self.info_file = self.dir / ".goldenmask"
self.build_temp = self.dir / "build-goldenmask"
else:
tmp_directory = unpack(self.source_path)
if self.is_wheel:
self.dir = tmp_directory
if self.is_tarball:
self.dir = list(tmp_directory.iterdir())[0]
self.no_smart = True
if self.inplace:
self.info_file = self.source_path.parent / ".goldenmask"
else:
self.info_file = self.source_path.parent / GOLDENMASK / ".goldenmask"
self.build_temp = self.dir / "build-goldenmask"
class CompileallProtector(BaseProtector):
def __init__(
self, source_path: str, inplace: bool = False, no_smart: bool = False
) -> None:
super().__init__(source_path, inplace, no_smart)
def protect(self) -> bool:
if self.is_pyfile:
success = compileall.compile_file(
self.file, force=True, legacy=True, optimize=2, quiet=1
)
if success:
os.remove(self.file)
else:
if self.is_dir:
# Below is needed, because when the option inplace is not true, virtual env folder has already
# been ignored when pasting them.
if self.inplace and not self.no_smart:
rx = Ignore(self.dir)
else:
rx = None
else:
rx = None
success = compileall.compile_dir(
self.dir,
force=True,
legacy=True,
optimize=2,
quiet=1,
rx=rx,
workers=os.cpu_count(),
)
if success:
remove_python_files(self.dir)
if self.is_wheel or self.is_tarball:
_ = pack(self.dir, self.source_path, self.inplace)
shutil.rmtree(self.dir, ignore_errors=True)
return success
class CythonProtector(BaseProtector):
def __init__(
self, source_path: str, inplace: bool = False, no_smart: bool = False
) -> None:
super().__init__(source_path, inplace, no_smart)
def protect(self) -> bool:
success = False
if self.is_pyfile:
success = True
ext_modules = cythonize(
str(self.file), compiler_directives={"language_level": 3}
)
try:
setup(
ext_modules=ext_modules,
script_args=[
"build_ext",
"-b",
str(self.file.parent),
"-t",
str(self.build_temp),
],
)
except Exception as e:
logger.warning(
f"Can not build file {self.file} using Cython, we will try to use Compileall!"
)
logger.warning(e)
protector = CompileallProtector(self.file, inplace=True)
success = protector.protect()
if success:
self.clean(self.file)
shutil.rmtree(self.build_temp)
return success
else:
python_files_normal = []
for file in Path(self.dir).rglob("*.py"):
if Ignore(self.dir).search(str(file)) and not self.no_smart:
continue
# TODO: It seems that there are many files that can not be compiled using Cython.
if (
(file.stem.startswith("__") and file.stem.endswith("__"))
or is_entrypoint(file)
or file.name == "setup.py"
):
protector = CompileallProtector(str(file), inplace=True)
success = protector.protect()
if not success:
break
else:
python_files_normal.append(str(file))
if not python_files_normal:
logger.error(
f"There is no python files to build using Cython in folder {self.dir}"
)
raise NoPythonFiles()
ext_modules = cythonize(
python_files_normal,
compiler_directives={"language_level": 3},
quiet=True,
force=True,
nthreads=multiprocessing.cpu_count(),
)
try:
setup(
ext_modules=ext_modules,
script_args=[
"build_ext",
"-b",
str(self.dir),
"-t",
str(self.build_temp),
],
)
except Exception as e:
logger.error(e)
success = False
if success:
for file in python_files_normal:
self.clean(Path(file))
shutil.rmtree(self.build_temp)
if self.is_wheel or self.is_tarball:
_ = pack(self.dir, self.source_path, self.inplace)
shutil.rmtree(self.dir, ignore_errors=True)
return success
@staticmethod
def clean(file: Path) -> None:
file.unlink()
file_c = file.with_suffix(".c")
if file_c.exists():
file_c.unlink()
rename_so_and_pyd_file(file)
| 1.898438 | 2 |
bpexts/hbp/sequential.py | f-dangel/hbp | 13 | 12770069 | <filename>bpexts/hbp/sequential.py<gh_stars>10-100
"""
Hessian backpropagation implementation of torch.nn.Sequential. and conversion
of torch.nn layers to HBP layers.
"""
from backpack.core.layers import Flatten
# torch layers
from torch.nn import (
Conv2d,
CrossEntropyLoss,
Linear,
MaxPool2d,
ReLU,
Sequential,
Sigmoid,
Tanh,
)
from .conv2d import HBPConv2d
from .crossentropy import HBPCrossEntropyLoss
from .flatten import HBPFlatten
from .linear import HBPLinear
from .maxpool2d import HBPMaxPool2d
from .module import hbp_decorate
# HBP layers
from .relu import HBPReLU
from .sigmoid import HBPSigmoid
from .tanh import HBPTanh
class HBPSequential(hbp_decorate(Sequential)):
"""A sequence of HBP modules."""
# override
@classmethod
def from_torch(cls, torch_layer):
if not isinstance(torch_layer, Sequential):
raise ValueError(
"Expecting torch.nn.Sequential, got {}".format(torch_layer.__class__)
)
layers = []
for mod in torch_layer:
layers.append(convert_torch_to_hbp(mod))
return cls(*layers)
# override
def hbp_hooks(self):
"""No hooks required."""
# override
def enable_hbp(self):
for mod in self.children():
mod.enable_hbp()
def set_hbp_approximation(
self, average_input_jacobian=True, average_parameter_jacobian=True
):
super().set_hbp_approximation(
average_input_jacobian=None, average_parameter_jacobian=None
)
for mod in self.children():
mod.set_hbp_approximation(
average_input_jacobian=average_input_jacobian,
average_parameter_jacobian=average_parameter_jacobian,
)
# override
def backward_hessian(
self, output_hessian, compute_input_hessian=False, modify_2nd_order_terms="none"
):
"""Propagate Hessian through the network.
Starting from the last layer, call `backward_hessian` recursively
until ending up in the first module.
"""
out_h = output_hessian
for idx in reversed(range(len(self))):
module = self[idx]
compute_in = True if (idx != 0) else compute_input_hessian
out_h = module.backward_hessian(
out_h,
compute_input_hessian=compute_in,
modify_2nd_order_terms=modify_2nd_order_terms,
)
return out_h
def _supported_conversions():
"""Return supported conversions."""
return [
(ReLU, HBPReLU),
(Sigmoid, HBPSigmoid),
(Tanh, HBPTanh),
(Linear, HBPLinear),
(Conv2d, HBPConv2d),
(MaxPool2d, HBPMaxPool2d),
(Sequential, HBPSequential),
(Flatten, HBPFlatten),
(CrossEntropyLoss, HBPCrossEntropyLoss),
]
def convert_torch_to_hbp(layer):
"""Convert torch layer to corresponding HBP layer."""
conversions = _supported_conversions()
for (torch_cls, hbp_cls) in conversions:
if isinstance(layer, torch_cls):
return hbp_cls.from_torch(layer)
_print_conversions()
raise ValueError("Class {} cannot be converted to HBP.".format(layer.__class__))
def _print_conversions():
"""Print all possible conversions."""
print("\nSupported conversions:")
for torch_cls, hbp_cls in _supported_conversions():
print("{:>20}\t->{:>25}".format(torch_cls.__name__, hbp_cls.__name__))
| 2.453125 | 2 |
tests/old_suite/interactive/pyqt5_qml_qrc.py | soleil0-0/pyinstaller | 2 | 12770070 | # -*- coding: utf-8 -*-
# Resource object code
#
# Created: Wed Sep 4 08:34:31 2013
# by: The Resource Compiler for PyQt (Qt v5.1.1)
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore
qt_resource_data = b"\
\x00\x00\x00\xf9\
\x69\
\x6d\x70\x6f\x72\x74\x20\x51\x74\x51\x75\x69\x63\x6b\x20\x32\x2e\
\x30\x0a\x0a\x52\x65\x63\x74\x61\x6e\x67\x6c\x65\x20\x7b\x0a\x20\
\x20\x20\x20\x77\x69\x64\x74\x68\x3a\x20\x33\x36\x30\x0a\x20\x20\
\x20\x20\x68\x65\x69\x67\x68\x74\x3a\x20\x33\x36\x30\x0a\x20\x20\
\x20\x20\x54\x65\x78\x74\x20\x7b\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x61\x6e\x63\x68\x6f\x72\x73\x2e\x63\x65\x6e\x74\x65\x72\x49\
\x6e\x3a\x20\x70\x61\x72\x65\x6e\x74\x0a\x20\x20\x20\x20\x20\x20\
\x20\x20\x74\x65\x78\x74\x3a\x20\x22\x48\x65\x6c\x6c\x6f\x20\x57\
\x6f\x72\x6c\x64\x22\x0a\x20\x20\x20\x20\x7d\x0a\x20\x20\x20\x20\
\x4d\x6f\x75\x73\x65\x41\x72\x65\x61\x20\x7b\x0a\x20\x20\x20\x20\
\x20\x20\x20\x20\x61\x6e\x63\x68\x6f\x72\x73\x2e\x66\x69\x6c\x6c\
\x3a\x20\x70\x61\x72\x65\x6e\x74\x0a\x20\x20\x20\x20\x20\x20\x20\
\x20\x6f\x6e\x43\x6c\x69\x63\x6b\x65\x64\x3a\x20\x7b\x0a\x20\x20\
\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x51\x74\x2e\x71\x75\x69\
\x74\x28\x29\x3b\x0a\x20\x20\x20\x20\x20\x20\x20\x20\x7d\x0a\x20\
\x20\x20\x20\x7d\x0a\x7d\x0a\x0a\
"
qt_resource_name = b"\
\x00\x09\
\x03\x32\x8d\xbc\
\x00\x68\
\x00\x65\x00\x6c\x00\x6c\x00\x6f\x00\x2e\x00\x71\x00\x6d\x00\x6c\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
"
def qInitResources():
QtCore.qRegisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x01, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 1.085938 | 1 |
graph/__init__.py | mrhash101/zincbase | 1 | 12770071 | from collections import defaultdict
import networkx as nx
class Node:
"""Class representing a node in the KB.
"""
def __init__(self, kb, name, data, watches=[]):
super().__setattr__('_kb', kb)
super().__setattr__('_name', name)
nx.set_node_attributes(self._kb.G, {self._name: data})
self._watches = defaultdict(list)
for watch in watches:
self._watches[watch[0]].append(watch[1])
def __repr__(self):
return self._name
def __eq__(self, comparator):
return self._name == str(comparator)
def __ne__(self, comparator):
return self._name != str(comparator)
def __getattr__(self, key):
try:
return self._kb.G.nodes(data=True)[self._name][key]
except KeyError as e:
return None
def __setattr__(self, key, value):
attrs = self._kb.G.nodes(data=True)[self._name]
prev_val = attrs.get(key, None)
attrs.update({key: value})
nx.set_node_attributes(self._kb.G, {self._name: attrs})
for watch_fn in self._watches.get(key, []):
watch_fn(self, prev_val)
def __getitem__(self, key):
return self.__getattr__(key)
def __setitem__(self, key, value):
return self.__setattr__(key, value)
@property
def attrs(self):
"""Returns attributes of the node stored in the KB
"""
attributes = self._kb.G.nodes(data=True)[self._name]
try:
del attributes['_watches']
del attributes['_new_neighbor_fn']
except:
pass
return attributes
@property
def neighbors(self):
"""Returns the node's neighbors, in the format of tuples:
[(neighbor_name, [{'pred': predicate aka edge_relation}])]
"""
return self._kb.neighbors(self._name)
def watch(self, attribute, fn):
"""Execute user-defined function when the value of attribute changes.
Function takes two args: `node` which has access to all
its own attributes, including neighbors and edges, and the second
arg is the previous value of the attribute that changed.
:returns int: id of the watch
:Example:
>>> kb.store('node(node1)')
>>> node = kb.node('node1')
>>> node.grains = 3
>>> print(node.grains)
3
>>> node.watch('grains', lambda x: print('grains changed to ' + x.grains))
('grains', 0)
>>> node.grains += 1
grains changed to 4
"""
self._watches[attribute].append(fn)
return (attribute, len(self._watches) - 1)
def remove_watch(self, attribute_or_watch_id):
"""Stop watching `attribute_or_watch_id`.
If it is a string, delete all watches for that attribute.
If it is a tuple of (attribute, watch_id): delete that specific watch.
"""
if isinstance(attribute_or_watch_id, tuple):
self._watches[attribute_or_watch_id[0]].pop(attribute_or_watch_id[1])
else:
self._watches[attribute_or_watch_id] = []
def watch_for_new_neighbor(self, fn):
"""Execute `fn` when node receives a new neighbor."""
self.__setattr__('_new_neighbor_fn', fn) | 2.75 | 3 |
utils.py | yxtay/text-classification-tensorflow | 9 | 12770072 | <gh_stars>1-10
import os
import re
import pandas as pd
import tensorflow as tf
###
# file system
###
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
def make_dirs(path, empty=False):
"""
create dir and clear if required
"""
os.makedirs(path, exist_ok=True)
if empty:
files = [os.path.join(path, item) for item in os.listdir(path)]
for item in files:
if os.path.isfile(item):
os.remove(item)
return path
def path_join(*paths, empty=False):
"""
join paths and create dir
"""
path = os.path.abspath(os.path.join(*paths))
make_dirs(os.path.dirname(path), empty)
return path
###
# data processing
##
def tokenise_text(df, text_col="text", token_regex=r"[A-Za-z]+", min_token_len=3):
df = df.copy()
df["tokens"] = (df[text_col].fillna("")
.str.lower()
.str.findall(token_regex, flags=re.IGNORECASE)
.map(lambda doc: [token for token in doc
if len(token) >= min_token_len]))
df["cleaned_text"] = df["tokens"].str.join(" ")
df["token_count"] = df["tokens"].str.len()
return df
def pad_boundary_tokens(df, tokens_col="tokens", go="<GO>", eos="<EOS>"):
n = len(df[tokens_col])
go_series = pd.Series([go] * n)
eos_series = pd.Series([eos] * n)
df[tokens_col] = (go_series
.str.cat(df[tokens_col].str.join(" "), sep=" ")
.str.cat(eos_series, sep=" ")
.str.split())
return df
###
# tf functions
###
def length(sequence):
length = tf.reduce_sum(tf.sign(sequence), 1)
length = tf.cast(length, tf.int32)
return length
def cost(output, target):
# Compute cross entropy for each frame.
cross_entropy = target * tf.log(output)
cross_entropy = -tf.reduce_sum(cross_entropy, 2)
mask = tf.sign(tf.reduce_max(tf.abs(target), 2))
cross_entropy *= mask
# Average over actual sequence lengths.
cross_entropy = tf.reduce_sum(cross_entropy, 1)
cross_entropy /= tf.reduce_sum(mask, 1)
return tf.reduce_mean(cross_entropy)
| 2.5625 | 3 |
src/scripts/m2o-analysis.py | ai-ku/upos | 4 | 12770073 | #!/usr/bin/env python
# # Sample Usage:
# # ./m2o-analysis.py enw.words.gz enw.pos.gz
# # > perp>1.75
# # dance 1.75 12
# # Crowd 1.75 8
# # meltdown 1.75 4
# # Personnel 1.75 4
# # ...
# # >
# #
from collections import defaultdict as dd
import gzip
from itertools import izip
import math
import re
import sys
def read_file(name):
if name.endswith('.gz'):
f = gzip.open(name)
elif name == "-":
f = sys.stdin
else:
f = open(name)
l = f.readlines()
f.close()
return l
def m2o_mapping(cluster, pos):
pos_counts = dd(lambda: dd(int))
for c, p in izip(cluster, pos):
pos_counts[c][p] += 1
mapping = {}
for c, pc in pos_counts.itervalues():
mapping[c] = max(pc.iterkeys(), key=lambda x: pc[x])
return mapping
def m2o_score(cluster, pos, mapping):
match = 0
for c, p in izip(cluster, pos):
if mapping[c] == p:
match += 1
return float(match) / len(cluster)
def entropy(vec):
s = sum(vec)
return sum(-math.log(float(v) / s, 2) * (float(v) / s) for v in vec)
def perplexity(word, pos):
pos_counts = dd(lambda: dd(int))
for w, p in izip(word, pos):
pos_counts[w][p] += 1
perp = {}
counts = {}
for w, pos_count in pos_counts.iteritems():
counts[w] = sum(pos_count.itervalues())
perp[w] = 2 ** entropy(pos_count.values())
return perp, counts
word = read_file(sys.argv[1])
pos = read_file(sys.argv[2])
assert len(word) == len(pos)
clusters = {}
for f in sys.argv[3:]:
clusters[f] = read_file(f)
assert len(clusters[f]) == len(word)
perp, counts = perplexity(word, pos)
mappings = {}
for c, f in clusters.iteritems():
mappings[c] = m2o_mapping(f, pos)
print "%s\t%.2f" % (c, 100 * m2o_score(mappings[c]))
user = raw_input('> ') + "\n"
while user != "\n":
if re.search("perp\s*>\s*(.*)", user):
try:
num = float(re.search("perp\s*>\s*(.*)", user).group(1))
for w, p in sorted(filter(lambda x: x[1] > num, perp.iteritems()), key=lambda x: x[1]):
print "%s\t%.2f\t%d" % (w.strip(), p, counts[w])
except:
print "Bad number."
else:
matches = dd(list)
for i in xrange(len(word)):
if word[i] == user:
for c, f in clusters.iteritems():
matches[c].append(f[i])
if len(matches) == 0:
print "No such word."
else:
for c, f in clusters.iteritems():
print "%s\t%.2f" % (c, 100 * m2o_score(mappings[c]))
user = raw_input('> ') + "\n"
print "Happy Happy Joy Joy."
| 2.75 | 3 |
test/test_timer.py | vaiorabbit/python-sdl2 | 1 | 12770074 | import ctypes, ctypes.util
import sys, os, threading, time
sys.path.append(os.pardir)
import sdl2
#from sdl2 import *
def timer_callback_fn(interval, param):
print("HI")
return interval
def timer_test():
resolution = 60
cb = sdl2.SDL_TimerCallback(timer_callback_fn)
print(type(cb))
t1 = sdl2.SDL_AddTimer(resolution, cb, None)
print("Waiting Timer...")
time.sleep(1)
print("Timer Done.")
sdl2.SDL_RemoveTimer(t1)
def main():
sdl2.sdl2_load(ctypes.util.find_library('SDL2')) # '/usr/local/lib/libSDL2.dylib'
sdl2.SDL_Init(sdl2.SDL_INIT_EVERYTHING)
thr = threading.Thread(target = timer_test, name = "thr")
thr.start()
print("Waiting Thread...")
thr.join()
print("Thread Done.")
sdl2.SDL_Quit()
if __name__ == '__main__':
main()
| 2.75 | 3 |
practice/88.py | porala/python | 1 | 12770075 | #Create a script that uses countries_by_area.txt file as data sourcea and prints out the top 5 most densely populated countries
import pandas
data = pandas.read_csv("countries_by_area.txt")
data["density"] = data["population_2013"] / data["area_sqkm"]
data = data.sort_values(by="density", ascending=False)
for index, row in data[:5].iterrows():
print(row["country"])
| 3.875 | 4 |
src/vendor/configrpc/plugin.py | radomirklacza/C-BAS | 0 | 12770076 | import eisoil.core.pluginmanager as pm
from crpc.configrpc import ConfigRPC
def setup():
# setup config keys
xmlrpc = pm.getService('xmlrpc')
xmlrpc.registerXMLRPC('configrpc', ConfigRPC(), '/amconfig') # handlerObj, endpoint | 1.453125 | 1 |
utils/restful_status.py | jin-hao-chen/team_go_backend | 0 | 12770077 | <filename>utils/restful_status.py
STATUS_SUCCESS = 0
STATUS_ERROR = 1
| 1.070313 | 1 |
aglocl.py | gunnihinn/aglocl | 4 | 12770078 | #!/usr/bin/env python3
import re
import time
import sys
import requests
from bs4 import BeautifulSoup
def name_and_class(tag_name, class_name):
return lambda e: e.name == tag_name and e.has_attr('class') and class_name in e['class']
def find_search_result_pages(url):
'Return a list of URLs of the pages of search results'
r = requests.get(url)
if not r.status_code == 200:
print('Could not get search results', file=sys.stderr)
return None
soup = BeautifulSoup(r.text, "html.parser")
divs = soup.find_all(name_and_class('div', 'pagination'))
if not divs:
print("Didn't find any pages of search results",
file=sys.stderr)
div = divs[0]
links = div.find_all('a')
numbers = [link for link in links if re.match(r'^[0-9]+$', link.text)]
last = numbers[-1]
return [make_github_page_url(n) for n in range(1, int(last.text)+1)]
def make_github_page_url(number):
url = ''.join([
'https://github.com/search?',
'p={n}'.format(n=number),
'&q="curated+list"',
'&type=Repositories',
'&utf8=%E2%9C%93',
])
return url
def find_repo_elements(soup):
'Find repo <li> elements in GitHub search result soup'
return soup.find_all(name_and_class('li', 'repo-list-item'))
def make_repo_dicts(repo_elements):
'Make repo dictionaries out of repo <li> soups'
dicts = [make_repo_dict(r) for r in repo_elements]
return [d for d in dicts if d is not None]
def make_repo_dict(repo_element):
'Make a repo dictionary out of a single repo <li> soup'
# Find name and URL
h3 = repo_element.find('h3')
if not h3:
print('No <h3> element found in <li> of search result',
file=sys.stderr)
return None
a = h3.find('a')
if not h3:
print('No <a> element found in <li><h3> of search result',
file=sys.stderr)
return None
name = a.text
url = a['href']
# Find description
paras = repo_element.find_all(name_and_class('p', 'repo-list-description'))
if not paras:
print('No description <p> element found in <li> of search result',
file=sys.stderr)
return None
p = paras[0]
desc = p.text.strip()
return {
'name': name,
'url': 'https://github.com{u}'.format(u=url),
'description': desc,
}
def repo_dicts_from_search(url):
pages = find_search_result_pages(url)
pages.reverse()
reqs = []
print('Got search result pages, making {0} requests...'
.format(len(pages), file=sys.stderr))
i = 0
while pages:
i += 1
page = pages.pop()
print('... making request {0}'.format(i), file=sys.stderr)
while True:
r = requests.get(page)
if r.status_code == 200:
reqs.append(r)
time.sleep(5)
break
elif r.status_code == 429:
print('GitHub server tired of us, waiting 60 seconds', file=sys.stderr)
time.sleep(60)
else:
print('... request {0} FAILED: {1}'.format(i+1, r.status_code), file=sys.stderr)
break
print('Got search result requests, making soup...', file=sys.stderr)
soups = [
BeautifulSoup(r.text, "html.parser") for r in reqs
if r.status_code == 200
]
print('Making repo elements...', file=sys.stderr)
repo_elements = [find_repo_elements(soup) for soup in soups]
repo_dicts = [make_repo_dicts(r) for r in repo_elements]
dicts = []
for rd in repo_dicts:
dicts += rd
return sorted(dicts, key=lambda d: d['name'])
def print_repo(dictionary):
u = dictionary['url']
n = dictionary['name']
d = dictionary['description']
return '* [{n}]({u}): {d}'.format(u=u, n=n, d=d)
if __name__ == '__main__':
url = 'https://github.com/search?q=%22curated+list%22&type=Repositories&utf8=%E2%9C%93'
repos = repo_dicts_from_search(url)
lines = []
with open('head.md') as head:
lines = [line.strip() for line in head.readlines()]
for repo in repos:
lines.append(print_repo(repo))
with open('README.md', 'w') as fh:
for line in lines:
print(line, file=fh)
| 3.03125 | 3 |
biolabs/core/views.py | romgar/django-biolabs | 0 | 12770079 |
# Create your views here.
from rest_framework import viewsets
from biolabs.core import models as core_models
from biolabs.core.serializers import LaboratorySerializer
class LaboratoryViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows labs to be viewed or edited.
"""
queryset = core_models.Laboratory.objects.filter(is_moderated=True)
serializer_class = LaboratorySerializer
| 1.8125 | 2 |
players.py | oxrock/chess-learner | 0 | 12770080 | <filename>players.py<gh_stars>0
import random
from math import inf
import numpy as np
from chessbot_utils import fast_predict
from math import inf
import chess
import chess.engine
import cProfile, pstats
class Player:
def __init__(
self, team: int, encoder: dict, score_encoder: dict, boosted_rewards: bool
):
self.team = 1 if team == 1 else -1
self.encoder = encoder
self.score_encoder = score_encoder
self.boosted_rewards = boosted_rewards
self.history = []
def generate_move(self, env):
starting_board = self.encode_env(env)
action = random.sample(list(env.legal_moves), 1)[0]
env.push(action)
self.history.append([starting_board, self.encode_env(env)])
return action
def encode_env(self, env) -> np.array:
temp = [self.encoder[x] for x in str(env).split()]
temp.append([0, 0, 0, 0, 0, 0, self.team])
return temp
def get_board_score(self, encoded_board, team) -> float: # returns piece score
score = 0
# if team == 1:
# for i in range(encoded_board.shape[0] - 1):
# val = self.score_encoder[tuple(encoded_board[i])]
# if val > 0:
# score += abs(val)
# else:
# for i in range(encoded_board.shape[0] - 1):
# val = self.score_encoder[tuple(encoded_board[i])]
# if val < 0:
# score += abs(val)
if team == 1:
for i in range(len(encoded_board) - 1):
val = self.score_encoder[tuple(encoded_board[i])]
if val > 0:
score += abs(val)
else:
for i in range(len(encoded_board) - 1):
val = self.score_encoder[tuple(encoded_board[i])]
if val < 0:
score += abs(val)
# print(score)
return score
def finalize_data(self, reward: float, draw=False) -> list:
labeled_data = []
base_reward = 1
enemy_team = 1 if self.team != 1 else -1
if not draw:
increment = reward / len(self.history)
for i in range(len(self.history)):
turn_reward = -inf
base_reward = (i + 1) * increment
if self.boosted_rewards and i < len(self.history) - 3:
before = self.get_board_score(self.history[i][0], enemy_team)
after = self.get_board_score(self.history[i][1], enemy_team)
if after < before:
turn_reward = before - after
if turn_reward > base_reward:
labeled_data.append([self.history[i][1], turn_reward])
else:
labeled_data.append([self.history[i][1], base_reward])
else:
my_score = self.get_board_score(self.history[-1][1], self.team)
enemy_score = self.get_board_score(self.history[-1][1], self.team * -1)
if my_score > enemy_score:
labeled_data.append([self.history[-1][1], reward])
# print(f"labeled data length: {len(labeled_data)}")
# print(f"labeled data[0] length: {len(labeled_data[0])}")
# print(f"labeled data[0][0] length: {len(labeled_data[0][0])}")
# print(f"labeled data[0][1] : {labeled_data[0][1]}")
# print()
return labeled_data
def quit(self):
pass
def __repr__(self):
return f"Random mover {'WHITE' if self.team == 1 else 'BLACK'} "
class chess_engine(Player):
def __init__(
self,
team: int,
encoder: dict,
score_encoder: dict,
boosted_rewards: bool,
engine_path: str,
time_limit: float,
engine_name: str,
):
super().__init__(team, encoder, score_encoder, boosted_rewards)
self.engine = chess.engine.SimpleEngine.popen_uci(engine_path)
self.time_limit = time_limit
self.name = engine_name
def __repr__(self):
return f"{self.name} chess engine {'BLACK' if self.team == -1 else 'WHITE'}"
def generate_move(self, env):
starting_board = self.encode_env(env)
action = self.engine.play(env, chess.engine.Limit(time=self.time_limit)).move
env.push(action)
self.history.append([starting_board, self.encode_env(env)])
return action
def quit(self):
self.engine.quit()
class ML_Player(Player):
def __init__(
self,
team: int,
encoder: dict,
score_encoder: dict,
boosted_rewards: bool,
model,
epsilon=0.95,
):
super().__init__(team, encoder, score_encoder, boosted_rewards)
self.model = model
self.epsilon = epsilon
def generate_move(self, env):
starting_board = self.encode_env(env)
if random.random() < self.epsilon:
best_move = None
best_score = -inf
chosen_board = None
# print("==========================")
for count, action in enumerate(env.legal_moves):
same = True
env.push(action)
encoded_board = self.encode_env(env)
pred_board = np.array([encoded_board])
# print(f"encoded board shape: {encoded_board.shape}")
pred = float(fast_predict(pred_board, self.model))
# print(pred)
env.pop()
if pred > best_score:
best_move = action
best_score = pred
chosen_board = encoded_board
else:
best_move = random.sample(list(env.legal_moves), 1)[0]
env.push(best_move)
chosen_board = self.encode_env(env)
self.history.append([starting_board, chosen_board])
return best_move
def __repr__(self):
return f"ML bot {'WHITE' if self.team == 1 else 'BLACK'} "
class ML_Player_Trainer(ML_Player):
def __init__(
self,
team: int,
encoder: dict,
score_encoder: dict,
boosted_rewards: bool,
model,
epsilon=1,
):
super().__init__(team, encoder, score_encoder, boosted_rewards, model, epsilon)
def __repr__(self):
return f"ML trainer bot {'WHITE' if self.team == 1 else 'BLACK'} "
class ML_Distiller(ML_Player):
def __init__(
self,
team: int,
encoder: dict,
score_encoder: dict,
boosted_rewards: bool,
model,
epsilon=1,
depth=2,
speed_hacks=True,
):
super().__init__(team, encoder, score_encoder, boosted_rewards, model, epsilon)
self.depth = depth
self.speed_hacks = speed_hacks
self.speed_barrier = 15
# self.profiler = cProfile.Profile()
def encode_env(self, env, team) -> np.array:
temp = [self.encoder[x] for x in str(env).split()]
temp.append([0, 0, 0, 0, 0, 0, team])
return np.array(temp)
def recursive_move_selector(self, env, team, legal_moves, depth):
_env = env.copy(stack=False)
starting_board = self.encode_env(_env, self.team)
if depth <= 1:
temp = self._generate_move(_env, team)
return temp[1], temp[0]
best_score = -999999
best_move = None
for move in legal_moves:
_env.push(move)
encoded_board = self.encode_env(_env, team)
pred_board = np.array([encoded_board])
pred = float(fast_predict(pred_board, self.model))
pred += self.terminal_balancer(_env, encoded_board, team)
enemy_move = self._generate_move(_env, team * -1)[0]
if enemy_move != None:
_env.push(enemy_move)
pred += self.recursive_move_selector(
_env, team, list(_env.legal_moves), depth - 1
)[0]
_env.pop()
_env.pop()
if pred > best_score:
best_score = pred
best_move = move
if best_move != None:
return best_score, best_move
else:
return 0, legal_moves[0] if len(legal_moves) > 0 else "xyz"
def terminal_balancer(self, env, encoded_board, team) -> float:
legal_moves = list(env.legal_moves)
result = env.result()
if result != "*":
if result == "1-0" and team == 1:
return 1
elif result == "0-1" and team == -1:
return 1
elif result == "1/2-1/2":
enemy_score = self.get_board_score(
encoded_board, 1 if team != 1 else -1
)
my_score = self.get_board_score(encoded_board, team)
ratio = my_score / enemy_score
return 0.5 if ratio <= 0.75 else -0.5
else:
if env.is_game_over():
print("result and game over mismatch!")
return None
else:
return None
else:
for action in legal_moves:
env.push(action)
result = env.result()
env.pop()
if result == "1-0" and team != 1:
return -1
elif result == "0-1" and team != -1:
return -1
return 0
def generate_move(self, env):
starting_board = self.encode_env(env, self.team)
legal_moves = list(env.legal_moves)
if len(legal_moves) > 1:
if random.random() < self.epsilon:
# self.profiler.enable()
if (
not self.speed_hacks
or self.depth == 1
or len(self.history) < self.speed_barrier
):
ideal_move = self.recursive_move_selector(
env, self.team, legal_moves, 1
)[1]
else:
ideal_move = self.recursive_move_selector(
env, self.team, legal_moves, self.depth
)[1]
# self.profiler.disable()
# stats = pstats.Stats(self.profiler).sort_stats('cumtime')
# stats.print_stats()
if type(ideal_move) == str:
ideal_move = random.sample(legal_moves, 1)[0]
print(f"Had to pick random move! {len(legal_moves)}")
else:
ideal_move = random.sample(list(env.legal_moves), 1)[0]
else:
ideal_move = legal_moves[0]
env.push(ideal_move)
chosen_board = self.encode_env(env, self.team)
self.history.append([starting_board, chosen_board])
return ideal_move
def _generate_move(self, env, team):
best_move = None
best_score = -inf
for action in env.legal_moves:
env.push(action)
encoded_board = self.encode_env(env, team)
pred = float(fast_predict(encoded_board, self.model))
# print(pred)
pred += self.terminal_balancer(env, encoded_board, team)
env.pop()
if pred > best_score:
best_move = action
best_score = pred
return best_move, best_score
def __repr__(self):
return f"Distiller bot {'WHITE' if self.team == 1 else 'BLACK'} "
if __name__ == "__main__":
pass
| 2.640625 | 3 |
ex04/bissextile.py | joaquim2509pedro/Day02 | 0 | 12770081 | <reponame>joaquim2509pedro/Day02
annee = int(input (f"Saisissez une année\n"))
if (annee%4 != 0):
print ("ce n'est pas une annee bissextile")
else:
if (annee%100 == 0 and annee%400 != 0):
print ("ce n'est une annee bissextile")
else :
print ("c'est une annee bissextile")
| 3.5 | 4 |
nlptasks/dephead.py | ulf1/nlptasks | 2 | 12770082 | <filename>nlptasks/dephead.py<gh_stars>1-10
from .padding import pad_merge_adjac_maskseqs
from .vocab import texttoken_to_index
from typing import List, Tuple
import warnings
import de_core_news_lg as spacy_model
import spacy
import stanza
# https://universaldependencies.org/u/dep/index.html
UD2_RELS = [
'acl', 'acl:relcl', 'advcl', 'advmod', 'advmod:emph', 'amod', 'appos',
'aux', 'aux:pass', 'case', 'cc', 'cc:preconj', 'ccomp', 'clf', 'compound',
'compound:lvc', 'compound:prt', 'compound:redup', 'compound:svc', 'conj',
'cop', 'csubj', 'csubj:pass', 'dep', 'det', 'det:numgov', 'det:nummod',
'det:poss', 'discourse', 'dislocated', 'expl', 'expl:impers', 'expl:pass',
'expl:pv', 'fixed', 'flat', 'flat:foreign', 'flat:name', 'goeswith',
'iobj', 'list', 'mark', 'nmod', 'nmod:poss', 'nmod:tmod', 'nsubj',
'nsubj:pass', 'nummod', 'nummod:gov', 'obj', 'obl', 'obl:agent', 'obl:arg',
'obl:tmod', 'orphan', 'parataxis', 'punct', 'reparandum', 'root',
'vocative', 'xcomp'
]
TIGER_RELS = [
'ac', 'adc', 'ag', 'ams', 'app', 'avc', 'cc', 'cd', 'cj', 'cm', 'cp',
'cvc', 'da', 'dm', 'ep', 'ju', 'mnr', 'mo', 'ng', 'nk', 'nmc', 'oa',
'oa2', 'oc', 'og', 'op', 'par', 'pd', 'pg', 'ph', 'pm', 'pnc', 'punct',
'rc', 're', 'rs', 'sb', 'sbp', 'sp', 'svp', 'uc', 'vo', 'ROOT'
]
def factory(name: str):
"""Factory function to return a processing function for
dependency parsing
Parameters:
-----------
name : str
Identifier, e.g. 'spacy-de'
Example:
--------
import nlptasks as nt
import nlptasks.deprel
sequences = [['Die', 'Kuh', 'ist', 'bunt', '.']]
myfn = nt.deprel.factory("spacy-de")
deps_child, deps_parent, seqlens = myfn(sequences)
"""
if name in ("spacy", "spacy-de"):
return spacy_de
elif name in ("stanza", "stanza-de"):
return stanza_de
# elif name == "imsnpars_zdl":
# return imsnpars_zdl
else:
raise Exception(f"Unknown dependency parser: '{name}'")
def deprel_factory(name: str):
warnings.warn(
"Please call `nlptasks.deprel.factory` instead",
DeprecationWarning, stacklevel=2)
return factory(name)
def get_model(name: str):
"""Instantiate the pretrained model outside the deprel function
so that it only needs to be done once
Parameters:
-----------
name : str
Identfier of the model
Example:
--------
from nlptasks.deprel import deprel
model = deprel.get_model('spacy-de')
fn = deprel.factory('spacy-de')
dc, dp, sl = fn(sents, model=model)
"""
if name in ("spacy", "spacy-de"):
model = spacy_model.load()
model.disable_pipes(["ner", "tagger"])
return model
elif name in ("stanza", "stanza-de"):
return stanza.Pipeline(
lang='de', processors='tokenize,mwt,pos,lemma,depparse',
tokenize_pretokenized=True)
else:
raise Exception(f"Unknown dependency parser: '{name}'")
@pad_merge_adjac_maskseqs
def spacy_de(data: List[List[str]], model=None) -> (
List[List[Tuple[int, int]]], List[List[Tuple[int, int]]], List[int]):
"""Dependency relations with spaCy de_core_news_lg for German
Parameters:
-----------
data : List[List[str]]
List of token sequences
model (Default: None)
Preloaded instance of the NLP model. See nlptasks.deprel.get_model
maxlen : Optional[int] = None
see @nlptasks.padding.pad_merge_adjac_maskseqs
padding : Optional[str] = 'pre'
see @nlptasks.padding.pad_merge_adjac_maskseqs
truncating : Optional[str] = 'pre'
see @nlptasks.padding.pad_merge_adjac_maskseqs
Returns:
--------
maskseqs : List[List[Tuple[int, int]]]
Sequences with token-parent relations and the one-hot encoded
dependency type
seqlens : List[int]
Length of each sequence that are also the matrix dimension of the
adjacency matrix
Example:
--------
import nlptasks as nt
import nlptasks.dephead
sequences = [['Die', 'Kuh', 'ist', 'bunt', '.']]
maskseqs, seqlens = nt.dephead.spacy_de(
sequences, maxlen=3, padding='pre', truncating='pre')
"""
# (1) load spacy model
if not model:
model = spacy_model.load()
model.disable_pipes(["ner", "tagger"])
# parse dependencies of a pre-tokenized sentencens
parser = model.pipeline[0][1]
docs = [parser(spacy.tokens.doc.Doc(model.vocab, words=sequence))
for sequence in data]
adjac_parent = [[(t.head.i, t.i) for t in doc] for doc in docs]
rel_types = [[t.dep_ for t in doc] for doc in docs]
seqlens = [len(doc) for doc in docs]
# (2) Define TIGER RELATIONS as VOCAB
SCHEME = TIGER_RELS.copy()
SCHEME.append("[UNK]")
# (3) Encode deprel tags
rel_types = [texttoken_to_index(seq, SCHEME) for seq in rel_types]
onehot_types = [[(ri, ti) for ti, ri in enumerate(sent)]
for sent in rel_types]
# done
return adjac_parent, onehot_types, seqlens, len(SCHEME)
@pad_merge_adjac_maskseqs
def stanza_de(data: List[List[str]], model=None) -> (
List[List[Tuple[int, int]]], List[List[Tuple[int, int]]], List[int]):
"""Dependency relations with stanza for German
Parameters:
-----------
data : List[List[str]]
List of token sequences
model (Default: None)
Preloaded instance of the NLP model. See nlptasks.deprel.get_model
maxlen : Optional[int] = None
see @nlptasks.padding.pad_merge_adjac_maskseqs
padding : Optional[str] = 'pre'
see @nlptasks.padding.pad_merge_adjac_maskseqs
truncating : Optional[str] = 'pre'
see @nlptasks.padding.pad_merge_adjac_maskseqs
Returns:
--------
maskseqs : List[List[Tuple[int, int]]]
Sequences with token-parent relations and the one-hot encoded
dependency type
seqlens : List[int]
Length of each sequence that are also the matrix dimension of the
adjacency matrix
Example:
--------
import nlptasks as nt
import nlptasks.dephead
sequences = [['Die', 'Kuh', 'ist', 'bunt', '.']]
maskseqs, seqlens = nt.dephead.stanza_de(
sequences, maxlen=4, padding='pre', truncating='pre')
"""
# (1) load spacy model
if not model:
model = stanza.Pipeline(
lang='de', processors='tokenize,mwt,pos,lemma,depparse',
tokenize_pretokenized=True)
# parse dependencies of a pre-tokenized sentencens
docs = model(data)
adjac_parent = [[(t.head, i) for i, t in enumerate(sent.words)]
for sent in docs.sentences]
rel_types = [[t.deprel for t in sent.words] for sent in docs.sentences]
seqlens = [len(sent.words) for sent in docs.sentences]
# (2) Define UD v2 RELATIONS as VOCAB
SCHEME = UD2_RELS.copy()
SCHEME.append("[UNK]")
# (3) Encode deprel tags
rel_types = [texttoken_to_index(seq, SCHEME) for seq in rel_types]
onehot_types = [[(ri, ti) for ti, ri in enumerate(sent)]
for sent in rel_types]
# done
return adjac_parent, onehot_types, seqlens, len(SCHEME)
| 2.140625 | 2 |
smlgui/ui.py | akshaybabloo/SNNML-GUI | 0 | 12770083 | <reponame>akshaybabloo/SNNML-GUI<gh_stars>0
"""
UI class to control the visibility, usability and control of GUI.
"""
import logging
import os
import numpy as np
from PyQt5 import uic, QtWidgets, QtGui, QtCore
from smlgui import __version__
from smlgui.utility import select_folder, loading_effects_decorator, get_sml_conf, write_sml_config, \
loading_effects_context, ReadCSV
from smlgui.widgets import TabWidget, CustomQMainWidget, CustomQDialog
__all__ = ['AboutUi', 'HomeUi', 'PreferenceUi', 'ImportUi', 'ExportUi']
logger = logging.getLogger(__name__)
conf = get_sml_conf()
dark_mode_check = QtCore.Qt.Checked if conf['DEFAULT']['dark_mode'] == "true" else QtCore.Qt.Unchecked
class AboutUi(CustomQDialog):
"""
Open's ``AboutUi`` GUI.
"""
def __init__(self):
super(AboutUi, self).__init__()
uic.loadUi(os.path.abspath('smlgui' + os.sep + 'gui' + os.sep + 'about.ui'), self)
content = """
Copyright <NAME>. Licensed under MIT. <br><br>
Spikes Markup Language (SML) Maker <br><br>
<b>Libraries that made it all possible:</b><br><br>
Python<br>
Click<br>
PyQT5<br>
QT<br>
NumPy
"""
self.setWindowTitle("AboutUi")
self.setWindowIcon(
QtGui.QIcon(os.getcwd() + os.sep + 'smlgui' + os.sep + 'gui' + os.sep + 'assets' + os.sep + 'logo.png'))
self.setWindowModality(QtCore.Qt.ApplicationModal) # Focus on this window.
self.textBrowser.setHtml(content)
spikes_logo = QtGui.QPixmap(
os.getcwd() + os.sep + 'smlgui' + os.sep + 'gui' + os.sep + 'assets' + os.sep + 'spikes-logo.png')
self.logo.setPixmap(spikes_logo.scaled(99, 39, QtCore.Qt.IgnoreAspectRatio, QtCore.Qt.FastTransformation))
self.version.setText("Version: " + __version__)
logger.info("AboutUi GUI started.")
self.show()
def closeEvent(self, a0: QtGui.QCloseEvent):
logger.info("Exiting AboutUi.")
class PreferenceUi(CustomQDialog):
"""
Open ``preference`` pane.
"""
def __init__(self, parent=None):
global dark_mode_check
super(PreferenceUi, self).__init__(parent)
uic.loadUi(os.getcwd() + os.sep + 'smlgui' + os.sep + 'gui' + os.sep + 'preference.ui', self)
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.setWindowIcon(
QtGui.QIcon(os.getcwd() + os.sep + 'smlgui' + os.sep + 'gui' + os.sep + 'assets' + os.sep + 'logo.png'))
self.ok_button.clicked.connect(self.on_ok)
self.dark_mode_check.setTristate(False)
self.dark_mode_check.setCheckState(dark_mode_check)
def on_ok(self):
"""
Event for ``Ok` button.
"""
global dark_mode_check, conf
if self.dark_mode_check.isChecked():
if dark_mode_check is not QtCore.Qt.Checked:
dark_mode_check = QtCore.Qt.Checked
if conf['DEFAULT']['dark_mode'] != "true":
conf.set('DEFAULT', 'dark_mode', 'true')
write_sml_config(conf)
QtWidgets.QMessageBox.warning(self, "SML Maker", "Restart SML Maker to make changes.")
else:
dark_mode_check = QtCore.Qt.Unchecked
conf.set('DEFAULT', 'dark_mode', 'false')
write_sml_config(conf)
QtWidgets.QMessageBox.information(self, "SML Maker", "Restart SML Maker to make changes.")
class ImportUi(CustomQMainWidget):
"""
Imports SML and exports to CSV.
"""
def __init__(self, parent=None):
super(ImportUi, self).__init__(parent)
uic.loadUi(os.getcwd() + os.sep + 'smlgui' + os.sep + 'gui' + os.sep + 'import.ui', self)
self.status_message = "Welcome to SML Importer!"
self.setWindowTitle("SML Importer")
self.messageBar.showMessage(self.status_message)
self.setWindowIcon(
QtGui.QIcon(os.getcwd() + os.sep + 'smlgui' + os.sep + 'gui' + os.sep + 'assets' + os.sep + 'logo.png'))
my_font = QtGui.QFont()
my_font.setBold(True)
my_font.setPixelSize(50)
# Text before loading the samples
self.temp_text_table = QtWidgets.QLabel()
self.temp_text_table.setText("Load SML")
self.temp_text_table.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.temp_text_table.setFont(my_font)
self.temp_text_table.setMinimumHeight(150)
self.temp_text_table.setMinimumWidth(400)
# Text before loading the samples
self.temp_text_stats = QtWidgets.QLabel()
self.temp_text_stats.setText("Load SML")
self.temp_text_stats.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.temp_text_stats.setFont(my_font)
self.temp_text_stats.setMinimumHeight(150)
self.temp_text_stats.setMinimumWidth(800)
self.table_layout.addWidget(self.temp_text_table)
self.stats_layout.addWidget(self.temp_text_stats)
self.samples_to_csv.setEnabled(False)
self.weights_to_csv.setEnabled(False)
self.connections_to_csv.setEnabled(False)
self.spikes_to_csv.setEnabled(False)
self.encoded_to_csv.setEnabled(False)
# GUI
self.load_sml_button.clicked.connect(self.load_table)
self.about_menu.triggered.connect(self.show_about)
logger.info("Exporter GUI started")
@loading_effects_decorator
def load_table(self):
"""
Populates the ``samples`` table.
"""
self.temp_text_table.deleteLater()
table_widget = TabWidget(np.random.randn(60, 128, 14))
self.table_layout.addWidget(table_widget)
@staticmethod
def show_about():
"""
Opening ``AboutUi``
"""
app = AboutUi()
app.exec_()
def closeEvent(self, a0: QtGui.QCloseEvent):
logger.info("Exiting ImportUi")
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
class ExportUi(CustomQMainWidget):
"""
Main class that loads and runs the ``export.ui``.
"""
def __init__(self, parent=None):
super(ExportUi, self).__init__(parent)
uic.loadUi(os.getcwd() + os.sep + 'smlgui' + os.sep + 'gui' + os.sep + 'export.ui', self)
self.status_message = "Welcome to SML Exporter!"
self.setWindowTitle("SML Exporter")
self.messageBar.showMessage(self.status_message)
self.setWindowIcon(
QtGui.QIcon(os.getcwd() + os.sep + 'smlgui' + os.sep + 'gui' + os.sep + 'assets' + os.sep + 'logo.png'))
my_font = QtGui.QFont()
my_font.setBold(True)
my_font.setPixelSize(50)
# Text before loading the samples
self.temp_text_table = QtWidgets.QLabel()
self.temp_text_table.setText("Load Files")
self.temp_text_table.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.temp_text_table.setFont(my_font)
self.temp_text_table.setMinimumHeight(150)
self.temp_text_table.setMinimumWidth(400)
# Text before loading the samples
self.temp_text_stats = QtWidgets.QLabel()
self.temp_text_stats.setText("Load Files")
self.temp_text_stats.setAlignment(QtCore.Qt.AlignHCenter | QtCore.Qt.AlignVCenter)
self.temp_text_stats.setFont(my_font)
self.temp_text_stats.setMinimumHeight(150)
self.temp_text_stats.setMinimumWidth(500)
# Connections and events
self.load_samples_button.clicked.connect(self.load_table)
self.load_samples_button.installEventFilter(self)
self.stats_layout.addWidget(self.temp_text_stats)
self.table_layout.addWidget(self.temp_text_table)
self.about_menu.triggered.connect(self.show_about)
logger.info("Main GUI started")
def load_table(self):
"""
Populates the ``samples`` table.
"""
location = select_folder()
read_csv = ReadCSV(location)
with loading_effects_context():
try:
self.temp_text_table.deleteLater()
except Exception:
pass
# Remove if QTabWidget already exists.
for a in range(self.table_layout.count()):
if isinstance(self.table_layout.itemAt(a).widget(), QtWidgets.QTabWidget):
self.table_layout.itemAt(a).widget().deleteLater()
table_widget = TabWidget(read_csv.read_samples())
self.table_layout.addWidget(table_widget)
@staticmethod
def show_about():
"""
Opens ``AboutUi``
"""
app = AboutUi()
app.exec_()
def eventFilter(self, objects, event):
if objects.objectName() == 'load_samples_button':
if event.type() == QtCore.QEvent.HoverEnter:
self.messageBar.showMessage("Loads all samples starting with sam1_*.csv")
return True
elif event.type() == QtCore.QEvent.HoverLeave:
self.messageBar.showMessage(self.status_message)
return True
return False
def closeEvent(self, a0: QtGui.QCloseEvent):
logger.info("Exiting ExportUi")
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
class HomeUi(CustomQMainWidget):
"""
Main class that loads and runs the ``main.ui``.
"""
def __init__(self, parent=None):
super(HomeUi, self).__init__(parent)
# self.setWindowFlags(QtCore.Qt.FramelessWindowHint | QtCore.Qt.WindowSystemMenuHint) # Frameless window
uic.loadUi(os.getcwd() + os.sep + 'smlgui' + os.sep + 'gui' + os.sep + 'home.ui', self)
self.status_message = "Welcome to SML Maker!"
self.messageBar.showMessage(self.status_message)
self.setWindowIcon(
QtGui.QIcon(os.getcwd() + os.sep + 'smlgui' + os.sep + 'gui' + os.sep + 'assets' + os.sep + 'logo.png'))
# Connections and events
self.export_button.clicked.connect(self.show_export_ui)
self.import_button.clicked.connect(self.show_import_ui)
self.export_button.installEventFilter(self)
self.import_button.installEventFilter(self)
self.about_menu.triggered.connect(self.show_about)
self.preference_menu.triggered.connect(self.show_preference)
self.exit_menu.triggered.connect(self.close)
self.export_ui = ExportUi()
self.import_ui = ImportUi()
logger.info("HomeUi GUI started")
# showing the app gui to user
self.show()
@staticmethod
def show_about():
"""
Opening ``AboutUi``
"""
app = AboutUi()
app.exec_()
@staticmethod
def show_preference():
"""
Opening ``AboutUi``
"""
app = PreferenceUi()
app.exec_()
def show_export_ui(self):
"""
Opening ``ExportUi``
"""
self.export_ui.show()
self.close()
def show_import_ui(self):
"""
Opening ``ImportUi``
"""
self.import_ui.show()
self.close()
def eventFilter(self, objects, event):
if objects.objectName() == 'export_button':
if event.type() == QtCore.QEvent.HoverEnter:
self.messageBar.showMessage("Export all your data to SML file.")
return True
elif event.type() == QtCore.QEvent.HoverLeave:
self.messageBar.showMessage(self.status_message)
return True
elif objects.objectName() == 'import_button':
if event.type() == QtCore.QEvent.HoverEnter:
self.messageBar.showMessage("Import SML and export it to CSV, JSON or Text.")
return True
elif event.type() == QtCore.QEvent.HoverLeave:
self.messageBar.showMessage(self.status_message)
return True
return False
def closeEvent(self, a0: QtGui.QCloseEvent):
logger.info("Exiting. Bye!")
| 1.890625 | 2 |
noc/bouncingball/ball.py | kantel/pygamezero | 1 | 12770084 | from pvector import PVector
WIDTH = 400
HEIGHT = 400
class Ball():
def __init__(self, x, y, v_x, v_y, radius, color):
self.position = PVector(x, y)
self.radius = radius
self.color = color
self. velocity = PVector(v_x, v_y)
def show(self, screen):
screen.draw.filled_circle((self.position.x, self.position.y), self.radius, self.color)
def move(self):
self.position.add(self.velocity)
if (self.position.x > WIDTH - self.radius) or (self.position.x < self.radius):
self.velocity.x *= -1
if (self.position.y > HEIGHT - self.radius) or (self.position.y < self.radius):
self.velocity.y *= -1 | 3.515625 | 4 |
models.py | kdropps87/dotstarweb | 0 | 12770085 | <filename>models.py
from sqlalchemy import Boolean, Column, ForeignKey
from sqlalchemy.sql.schema import Index
from sqlalchemy.orm import relation, relationship
from sqlalchemy.sql.sqltypes import Float, Enum, Integer, String
from database import Base
import enum
class LightingMode(str, enum.Enum):
single_color = "Single Color"
simple_pattern = "Simple Pattern"
class Strip(Base):
__tablename__ = "strips"
id = Column(Integer, primary_key=True, index=True)
name = Column(String)
location = Column(String)
description = Column(String)
clock_pin = Column(String)
data_pin = Column(String)
led_count = Column(Integer)
default_brightness = Column(Float)
power = Column(Boolean)
single_color_brightness = Column(Float)
single_color_hex = Column(String)
mode = Column(Enum(LightingMode))
simple_pattern = relationship("SimplePattern", back_populates="strip")
class Pattern(Base):
__tablename__ = "patterns"
pattern_id = Column(Integer, primary_key=True, index=True)
element_id = Column(Integer, primary_key=True, index=True)
red = Column(Integer)
green = Column(Integer)
blue = Column(Integer)
brightness = Column(Float)
class SimplePattern(Base):
__tablename__ = "simple_patterns"
strip_id = Column(Integer, ForeignKey("strips.id"), primary_key=True, index=True,)
pattern_id = Column(Integer, ForeignKey("patterns.pattern_id"), primary_key=True, index=True)
number_of_sequences = Column(Integer)
strip = relationship("Strip", back_populates="simple_pattern")
elements = relationship("Pattern")
| 2.5 | 2 |
History/DelayDDPG_origin_version.py | GuanShiTing/DL_RL_Zoo | 1 | 12770086 | <reponame>GuanShiTing/DL_RL_Zoo
import os
from time import time as timer
import gym
import numpy as np
import numpy.random as rd
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
GitHub: https://github.com/Yonv1943/LightWeight_Stable_ReinfLearning
Origin: Zen4 Jia1Hao2(Yonv1943), 2019-07-07
LunarLanderContinuous-v2
1531s E92, 1455s E97
LunarLander-v2 (Discrete)
1530s E147, 3036s E277,
BipedalWalker-v2
1620s E172, 1840s E178
"""
class Arguments:
"""
All the hyper-parameter is here.
If you are not familiar with this algorithm,
then I do not recommend that you modify other parameters that are not listed here.
The comments below are all my subjective guesses for reference only!
If you wanna change this code, please keep READABILITY and ELEGANT! (read 'import this')
Write by GitHub: Yonv1943 Z<NAME>, 2019-07-07
"""
'''device'''
gpu_id = 1 # sys.argv[0][-4] # ! !!!!!!!!! !!!! !!!
mod_dir = 'DelayDDPG_%s' % gpu_id # ! !!!!!!!!! !!!! !!!
env_name = "LunarLanderContinuous-v2"
is_remove = True # remove the pre-training data?
# is_remove = True, yes, remove the directory of model
# is_remove = None, ask me when the program is running
# is_remove = False, keep the pre-training data and load it when necessary
random_seed = 1943 # random_seed for py_torch and gym.env
'''training'''
mod_dim = 2 ** 8 # the network width of actor_net and critic_net
# low mod_dim should correspond to low dropout_rate
memories_size = int(2 ** 18) # memories capacity (memories: replay buffer)
# low memories capacity leads to low reward in the later stage of training.
batch_size = 2 ** 8 # num of transitions sampled from replay buffer.
# big batch_size makes training more stable.
update_gap = 2 ** 8 # update the target_net, delay update
# big update_gap will lengthen the training time, but get a better policy network
eval_epoch = 2 ** 2 # eval this model after training. and render the env
'''break'''
target_reward = 200 # when 'epoch_reward > target_reward', break the training loop
# "LunarLanderContinuous-v2" Recommended range(100, 200)
smooth_kernel = 16 # smooth the reward curve
# big smooth_kernel makes the curve more smooth. Recommended range(16, 64)
print_gap = 2 ** 5 # print the Reward, actor_loss, critic_loss
# print the information every 'print_gap'sec
max_epoch = 1000 # max num of train_epoch
# if 'epoch > max_epoch' or 'epoch_reward > target_reward', break the training loop
max_step = 2000 # max steps in one epoch
# if 'iter_num > max_step' or 'done', break. Then reset the env and start a new round of training
'''algorithm'''
gamma = 0.99 # discount for future rewards
# big gamma leads to a long-term strategy
explore_noise = 0.4 # action = select_action(state) + noise, 'explore_noise': sigma of noise
# big explore_noise is suitable when the fault tolerant rate of ENV is high.
# low explore_noise delays the time when the model reaches high reward
policy_noise = 0.8 # actor_target(next_state) + noise, 'policy_noise': sigma of noise
# low policy_noise lead to a stable training, but a longer learning period and clumsy movements
# Epsilon-Greedy, the variance of noise don not decay here.
# 'explore_noise' and 'explore_noise' act on 'action' (in range(-1, 1)), before 'action*action_max'
# if 'LunarLanderContinuous-v2':
# env_name = "LunarLanderContinuous-v2"
# if 'Pendulum-v0':
# env_name = "Pendulum-v0"
# max_step = 200
# if "BipedalWalker-v2":
# env_name = "BipedalWalker-v2"
# target_reward = 100 # 300
# if "BipedalWalkerHardcore-v2":
# env_name = "BipedalWalkerHardcore-v2"
# target_reward = 200 # 300
# mod_dim = 2 ** 9 # the network width of actor_net and critic_net
# memories_size = int(2 ** 19) # memories capacity (memories: replay buffer)
# max_step = 8000 # max steps in one epoch
# is_remove = None # remove the pre-training data?
# """Discrete_Action"""
# if 'LunarLander-v2':
# env_name = "LunarLander-v2"
# if 'CartPole-v0':
# env_name = "CartPole-v0"
# target_reward = 195
# print_gap = 2 ** 2
# explore_noise = 0.1
# policy_noise = 0.8
# memories_size = 2 ** 16
# batch_size = 2 ** 7
# mod_dim = 2 ** 7
def f_hard_swish(x):
return F.relu6(x + 3) / 6 * x
class Actor(nn.Module):
def __init__(self, state_dim, action_dim, mod_dim):
super(Actor, self).__init__()
inp_dim = state_dim
out_dim = action_dim
self.dense0 = nn.Linear(inp_dim, mod_dim * 1)
self.dense1 = nn.Linear(mod_dim * 1, mod_dim * 1)
self.dense2 = nn.Linear(mod_dim * 2, mod_dim * 2)
self.dense3 = nn.Linear(mod_dim * 4, out_dim)
def forward(self, x0):
x1 = f_hard_swish(self.dense0(x0))
x2 = torch.cat((x1, f_hard_swish(self.dense1(x1))), dim=1)
x3 = torch.cat((x2, f_hard_swish(self.dense2(x2))), dim=1)
x3 = F.dropout(x3, p=rd.uniform(0.0, 0.5), training=self.training)
x4 = torch.tanh(self.dense3(x3))
return x4
class Critic(nn.Module):
def __init__(self, state_dim, action_dim, mod_dim):
super(Critic, self).__init__()
inp_dim = state_dim + action_dim
out_dim = 1
self.dense0 = nn.Linear(inp_dim, mod_dim * 1)
self.dense1 = nn.Linear(mod_dim * 1, mod_dim)
self.dense2 = nn.Linear(mod_dim * 2, mod_dim * 2)
self.dense3 = nn.Linear(mod_dim * 4, out_dim)
def forward(self, s, a):
x0 = torch.cat((s, a), dim=1)
x1 = f_hard_swish(self.dense0(x0))
x2 = torch.cat((x1, f_hard_swish(self.dense1(x1))), dim=1)
x3 = torch.cat((x2, f_hard_swish(self.dense2(x2))), dim=1)
x3 = F.dropout(x3, p=rd.uniform(0.0, 0.5), training=self.training)
x4 = self.dense3(x3)
return x4
class AgentDelayDDPG:
def __init__(self, state_dim, action_dim, mod_dim,
gamma, policy_noise, update_gap):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
''''''
self.state_dim = state_dim
self.action_dim = action_dim
self.state_idx = 1 + 1 + state_dim # reward_dim==1, done_dim==1, state_dim
self.action_idx = self.state_idx + action_dim
from torch import optim
self.act = Actor(state_dim, action_dim, mod_dim).to(self.device)
self.act_optimizer = optim.Adam(self.act.parameters(), lr=4e-4)
self.act.train()
self.act_target = Actor(state_dim, action_dim, mod_dim).to(self.device)
self.act_target.load_state_dict(self.act.state_dict())
self.act_target.eval()
self.cri = Critic(state_dim, action_dim, mod_dim).to(self.device)
self.cri_optimizer = optim.Adam(self.cri.parameters(), lr=1e-3)
self.cri.train()
self.cri_target = Critic(state_dim, action_dim, mod_dim).to(self.device)
self.cri_target.load_state_dict(self.cri.state_dict())
self.cri_target.eval()
self.criterion = nn.SmoothL1Loss()
self.update_counter = 0
self.update_gap = update_gap
self.policy_noise = policy_noise
self.gamma = gamma
def select_action(self, state):
state = torch.tensor((state,), dtype=torch.float32).to(self.device)
action = self.act(state).cpu().data.numpy()
return action[0]
def update(self, memories, iter_num, batch_size):
actor_loss_avg, critic_loss_avg = 0, 0
k = 1 + memories.size / memories.memories_num
iter_num = int(k * iter_num)
batch_size = int(k * batch_size)
for i in range(iter_num):
with torch.no_grad():
memory = memories.sample(batch_size)
memory = torch.tensor(memory, dtype=torch.float32).to(self.device)
reward = memory[:, 0:1]
undone = memory[:, 1:2]
state = memory[:, 2:self.state_idx]
action = memory[:, self.state_idx:self.action_idx]
next_state = memory[:, self.action_idx:]
noise = torch.randn(action.size(), dtype=torch.float32, device=self.device) * self.policy_noise
next_action = self.act_target(next_state) + noise
next_action = next_action.clamp(-1.0, 1.0)
with torch.no_grad():
q_target = self.cri_target(next_state, next_action)
q_target = reward + undone * self.gamma * q_target
q_eval = self.cri(state, action)
critic_loss = self.criterion(q_eval, q_target)
critic_loss_avg += critic_loss.item()
self.cri_optimizer.zero_grad()
critic_loss.backward()
self.cri_optimizer.step()
actor_loss = -self.cri(state, self.act(state)).mean()
actor_loss_avg += actor_loss.item()
self.act_optimizer.zero_grad()
actor_loss.backward()
self.act_optimizer.step()
self.update_counter += 1
if self.update_counter == self.update_gap:
self.update_counter = 0
self.act_target.load_state_dict(self.act.state_dict())
self.cri_target.load_state_dict(self.cri.state_dict())
actor_loss_avg /= iter_num
critic_loss_avg /= iter_num
return actor_loss_avg, critic_loss_avg
def save(self, mod_dir, save_actor_only=False):
if save_actor_only:
torch.save(self.act.state_dict(), '%s/actor.pth' % (mod_dir,))
print("Saved: (actor_only)", mod_dir)
else:
torch.save(self.act.state_dict(), '%s/actor.pth' % (mod_dir,))
torch.save(self.cri.state_dict(), '%s/critic.pth' % (mod_dir,))
torch.save(self.act_target.state_dict(), '%s/actor_target.pth' % (mod_dir,))
torch.save(self.cri_target.state_dict(), '%s/critic_target.pth' % (mod_dir,))
torch.save(self.act_optimizer.state_dict(), '%s/actor_optimizer.pth' % (mod_dir,))
torch.save(self.cri_optimizer.state_dict(), '%s/critic_optimizer.pth' % (mod_dir,))
print("Saved:", mod_dir)
def load(self, mod_dir, load_actor_only=False):
if load_actor_only:
print("Loading: (actor_only)", mod_dir)
self.act_target.load_state_dict(
torch.load('%s/actor_target.pth' % (mod_dir,), map_location=lambda storage, loc: storage))
else:
def map_location(storage, loc): # ignore storage location (device id)
return storage
print("Loading:", mod_dir)
torch.load('%s/actor.pth' % (mod_dir,), map_location)
torch.load('%s/critic.pth' % (mod_dir,), map_location)
torch.load('%s/actor_target.pth' % (mod_dir,), map_location)
torch.load('%s/critic_target.pth' % (mod_dir,), map_location)
torch.load('%s/actor_optimizer.pth' % (mod_dir,), map_location)
torch.load('%s/critic_optimizer.pth' % (mod_dir,), map_location)
class Memories:
ptr_u = 0 # pointer_for_update
ptr_s = 0 # pointer_for_sample
is_full = False
def __init__(self, memories_num, state_dim, action_dim, ):
self.size = 0
memories_num = int(memories_num)
self.memories_num = memories_num
reward_dim = 1
done_dim = 1
memories_dim = reward_dim + done_dim + state_dim + action_dim + state_dim
self.memories = np.empty((memories_num, memories_dim), dtype=np.float32)
self.indices = np.arange(memories_num)
def add(self, memory):
self.memories[self.ptr_u, :] = memory
self.ptr_u += 1
if self.ptr_u == self.memories_num:
self.ptr_u = 0
if not self.is_full:
self.is_full = True
print('Memories is_full!')
self.size = self.memories_num if self.is_full else self.ptr_u
def sample(self, batch_size):
self.ptr_s += batch_size
if self.ptr_s >= self.size:
self.ptr_s = batch_size
rd.shuffle(self.indices[:self.size])
batch_memory = self.memories[self.indices[self.ptr_s - batch_size:self.ptr_s]]
return batch_memory
def train():
args = Arguments()
gpu_id = args.gpu_id
env_name = args.env_name
mod_dir = args.mod_dir
memories_size = args.memories_size
batch_size = args.batch_size
update_gap = args.update_gap
mod_dim = args.mod_dim
target_reward = args.target_reward
smooth_kernel = args.smooth_kernel
print_gap = args.print_gap
max_step = args.max_step
max_epoch = args.max_epoch
gamma = args.gamma
explore_noise = args.explore_noise
policy_noise = args.policy_noise
random_seed = args.random_seed
def whether_remove_history(remove=None):
print(' GPUid: %s' % gpu_id)
print(' Model: %s' % mod_dir)
if remove is None:
remove = bool(input(" 'y' to REMOVE: %s? " % mod_dir) == 'y')
if remove:
import shutil
shutil.rmtree(mod_dir, ignore_errors=True)
print("| Remove")
del shutil
if not os.path.exists(mod_dir):
os.mkdir(mod_dir)
whether_remove_history(remove=args.is_remove)
'''env init'''
env = gym.make(env_name)
env.seed(random_seed)
state_dim = env.observation_space.shape[0]
try:
action_dim = env.action_space.shape[0]
action_max = float(env.action_space.high[0])
except IndexError:
action_dim = env.action_space.n # Discrete
action_max = None
print('action_space: Discrete:', action_dim)
'''mod init'''
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
policy = AgentDelayDDPG(state_dim, action_dim, mod_dim,
gamma, policy_noise, update_gap)
memories = Memories(memories_size, state_dim, action_dim)
torch.set_num_threads(8)
torch.manual_seed(random_seed)
np.random.seed(random_seed)
'''train loop'''
rd_normal = np.random.normal
recorders = list()
rewards = list()
start_time = show_time = timer()
try:
for epoch in range(max_epoch):
state = env.reset()
epoch_reward = 0
iter_num = 0
for iter_num in range(max_step):
action = policy.select_action(state)
action += rd_normal(0, explore_noise, size=action_dim) # add explore noise
action = action.clip(-1.0, 1.0)
next_state, reward, done, _ = env.step(adapt_action(action, action_max, action_dim))
memories.add(np.hstack(((reward, 1 - float(done)), state, action, next_state)))
state = next_state
epoch_reward += reward
if done:
break
al, cl = policy.update(memories, iter_num, batch_size)
recorders.append((epoch, al, cl))
rewards.append(epoch_reward)
smooth_reward = np.average(rewards[-smooth_kernel:])
if timer() - show_time > print_gap:
show_time = timer()
print("%3i\tSmoR: %3i\tEpiR %3i\t|A %.3f, C %.3f"
% (epoch, smooth_reward, epoch_reward, al, cl))
if smooth_reward > target_reward and epoch_reward > target_reward:
print("########## Solved! ###########")
print("%3i\tSmoR: %3i\tEpiR %3i\t|A %.3f, C %.3f"
% (epoch, smooth_reward, epoch_reward, al, cl))
break
if epoch_reward > target_reward: # eval and break
print("Eval: %.2f" % epoch_reward)
policy.act.eval()
eva_rewards = list()
eva_epoch = 100
for eval_epoch in range(eva_epoch):
state = env.reset()
eva_reward = 0
for _ in range(max_step):
action = policy.select_action(state)
state, reward, done, _ = env.step(adapt_action(action, action_max, action_dim=False))
eva_reward += reward
# env.render()
if done:
break
eva_rewards.append(eva_reward)
temp_target_reward = target_reward * (len(eva_rewards) / eva_epoch)
if np.average(eva_rewards) < temp_target_reward:
break # break the evaluating loop ahead of time.
if np.average(eva_rewards) > target_reward:
print("########## Solved! ###########")
print("%3i\tSmoR: %3i\tEpiR %3i\t|A %.3f, C %.3f"
% (epoch, smooth_reward, epoch_reward, al, cl))
break
policy.act.train()
except KeyboardInterrupt:
print("KeyboardInterrupt")
finally:
print('TimeUsed:', int(timer() - start_time))
policy.save(mod_dir)
recorders = np.concatenate((np.array(rewards)[:, np.newaxis],
recorders), axis=1)
report_plot(recorders, smooth_kernel, mod_dir,
img_save_name="%s_plot.png" % (mod_dir,))
def evals():
args = Arguments()
gpu_id = args.gpu_id
mod_dir = args.mod_dir
env_name = args.env_name
eval_epoch = args.eval_epoch
max_step = args.max_step
mod_dim = args.mod_dim
'''env init'''
env = gym.make(env_name)
state_dim = env.observation_space.shape[0]
try:
action_dim = env.action_space.shape[0]
action_max = float(env.action_space.high[0])
except IndexError:
action_dim = env.action_space.n # Discrete
action_max = None
print('action_space: Discrete:', action_dim)
'''mod init'''
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)
policy = AgentDelayDDPG(state_dim, action_dim, mod_dim,
gamma=0, policy_noise=0, update_gap=0)
# (gamma=0, policy_noise=0, update_gap=0) are not required for evaluating
policy.load(mod_dir, load_actor_only=True)
policy.act.eval()
policy.cri.eval()
for epoch in range(eval_epoch):
epoch_reward = 0
state = env.reset()
for iter_num in range(max_step):
action = policy.select_action(state)
state, reward, done, _ = env.step(adapt_action(action, action_max, action_dim=False))
epoch_reward += reward
env.render()
# Image.fromarray(env.render(mode='rgb_array')).save('%s/img_%4i.png'%(mod_dir, iter_num))
if done:
break
print("%3i\tEpiR %3i" % (epoch, epoch_reward))
env.close()
def adapt_action(action, action_max, action_dim):
"""
:param action: belongs to range(-1, 1), makes it suit for env.step(action)
:param action_max: if it is False, means DISCRETE action_space
:param action_dim: if it is False, means DISCRETE action_space and not train.
:return: a compatible action for env
"""
if action_max: # action_space: Continuous
return action * action_max
elif action_dim: # action_space: Discrete and is_train
action_prob = action + 1.00001
action_prob /= sum(action_prob)
return rd.choice(action_dim, p=action_prob)
else: # action_space: Discrete and is_eval
return np.argmax(action)
def report_plot(recorders, smooth_kernel, mod_dir, img_save_name):
np.save('%s/recorders.npy' % mod_dir, recorders)
# recorders = np.load('%s/recorders.npy'% mod_dir)
# report_plot(recorders=np.load('recorders.npy', ), smooth_kernel=32, mod_dir=0, save_name='TD.png')
if recorders is list():
return print('Record is empty')
else:
print("Matplotlib Plot:", img_save_name)
import matplotlib.pyplot as plt
y_reward = np.array(recorders[:, 0]).clip(-500, 500)
y_reward_smooth = np.pad(y_reward, (smooth_kernel - 1, 0), mode='reflect')
y_reward_smooth = np.convolve(y_reward_smooth, np.ones(smooth_kernel) / smooth_kernel, mode='valid')
x_epoch = np.array(recorders[:, 1])
fig, axs = plt.subplots(3)
plt.title(img_save_name, y=3.5)
axs[0].plot(x_epoch, y_reward, label='Reward', linestyle=':')
axs[0].plot(x_epoch, y_reward_smooth, label='Smooth R')
axs[0].legend()
axs[1].plot(x_epoch, recorders[:, 2], label='loss_A')
axs[2].plot(x_epoch, recorders[:, 3], label='loss_C')
plt.savefig("%s/%s" % (mod_dir, img_save_name))
plt.show()
if __name__ == '__main__':
train()
evals()
| 2.15625 | 2 |
best_browsable_api/templatetags/browsable_api.py | alsur/best-browsable-api | 2 | 12770087 | from django.template import Library
from django.utils.encoding import force_text
register = Library()
def force_text_filter(obj):
return force_text(obj)
register.filter('force_text', force_text_filter)
| 1.554688 | 2 |
freenit/models/mixins.py | mekanix/fastapi-playground | 0 | 12770088 | from typing import Optional
import ormar
import pydantic
from ..config import getConfig
config = getConfig()
class AllOptional(pydantic.main.ModelMetaclass):
def __new__(self, name, bases, namespaces, **kwargs):
annotations = namespaces.get("__annotations__", {})
for base in bases:
annotations = {**annotations, **base.__annotations__}
for field in annotations:
if not field.startswith("__"):
annotations[field] = Optional[annotations[field]]
namespaces["__annotations__"] = annotations
return super().__new__(self, name, bases, namespaces, **kwargs)
class MainMeta(ormar.ModelMeta):
metadata = config.metadata
database = config.database
| 2.28125 | 2 |
src/apex/algo/shared.py | kpwhri/apex_iud_nlp | 1 | 12770089 | <reponame>kpwhri/apex_iud_nlp
from apex.algo.pattern import Pattern
# date pattern
years_ago = r'(?:\d+ (?:year|yr|week|wk|month|mon|day)s? (?:ago|before|previous))'
date_pat = r'\d+[-/]\d+[-/]\d+'
date2_pat = r'\d+[/]\d+'
month_pat = r'\b(?:jan|feb|mar|apr|may|jun|jul|aug|sept|oct|nov|dec)\w*(?:\W*\d{1,2})?\W*\d{4}'
month_only_pat = r'in\b(?:jan|feb|mar|apr|may|jun|jul|aug|sept|oct|nov|dec)\w*'
DATE_PAT = Pattern(f'({years_ago}|{date_pat}|{date2_pat}|{month_pat}|{month_only_pat})')
# iud pattern
iuds = r'\b(iuds?|intrauterine( contraceptive)? devices?)'
lng_iuds = r'(lng ius|levonorgestrel( (releasing|rlse))? (intrauterine|us))'
brand = r'(mirena|paragu?ard|skyla\b|lilett?a|kyleena|copper)'
IUD = Pattern(f'({iuds}|{lng_iuds}|{brand})')
# status annotation patterns
in_place = r'(?<!not) in (place|situ)\b'
boilerplate = r'\b(complication|pamphlet|warning|information|review|side effect|counsel|\bsign|infection|ensure|' \
r'cramps|risk|\bif\b|after your visit|conceive|appt|appointment|due (to|for|at)|recommend|' \
r'pregnan|pamphlet|schedul|doctor|contact|\brare|\bhow\b|\bcall|includ|failure|' \
r'associated|avoid|instruct|guideline)'
possible = r'\b(unlikely|\bposs\b|possib(ly|le|ility)|improbable|potential|susp(ect|icious)|' \
r'chance|may\b|afraid|concern|tentative|doubt|thought|think)'
POSSIBLE = Pattern(possible)
negation = r'(no evidence|without|r/o|rule out|normal|\bnot?\b|\bor\b|denies)'
historical = r'(history|previous|\bhx\b|\bpast\b|\bh/o\b)'
# avoid months (followed by day/year)
# avoid 'last' or 'in' or 'since'
safe_may = r'(?<!in|st|ce) may (?!\d)'
hypothetical = r'(option|possib\w+|desire|want|will|\bcan\b|usual|' \
r'\bor\b|like|would|need|until|request|when|you ll|' \
r'\bif\b|consider|concern|return|nervous|anxious|to be remov|could|' \
r'discuss|inform|should|\btry|once|worr(y|ied)|question|ideal)'
| 2.46875 | 2 |
day1.py | luciansmith/adventOfCode | 0 | 12770090 | import numpy
prev = numpy.inf
prev2 = numpy.inf
prev3 = numpy.inf
increases = 0
for line in open("day1_input.txt"):
# for line in open("day1_example.txt"):
next = int(line.strip())
if prev3 < next:
increases += 1
prev3 = prev2
prev2 = prev
prev = next
print(increases)
| 3.1875 | 3 |
issue_order/migrations/0015_auto_20180823_2108.py | jiejiang/courier | 0 | 12770091 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2018-08-23 20:08
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('issue_order', '0014_auto_20180819_2108'),
]
operations = [
migrations.CreateModel(
name='Route',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('system', models.CharField(choices=[('jixun', '\u5409\u8bafCC\u7ebf'), ('postal', '\u90ae\u653fBC\u7ebf'), ('yunda', '\u97f5\u8fbeCC\u7ebf')], db_index=True, max_length=32)),
('code', models.CharField(db_index=True, max_length=64)),
('name', models.CharField(max_length=64)),
],
),
migrations.RemoveField(
model_name='courierorder',
name='system',
),
migrations.AddField(
model_name='courierorder',
name='route',
field=models.ForeignKey(default=0, on_delete=django.db.models.deletion.PROTECT, to='issue_order.Route'),
preserve_default=False,
),
]
| 1.617188 | 2 |
iepy/webui/corpus/migrations/0014_data_migration_move_metadata.py | francolq/iepy | 813 | 12770092 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging
from django.db import models, migrations
logging.basicConfig(format="%(asctime)-15s %(message)s")
logger = logging.getLogger(__file__)
logger.setLevel(logging.INFO)
BULK_SIZE = 2500
def move_metadata(apps, schema_editor):
IEDocument = apps.get_model('corpus', 'IEDocument')
IEDocumentMetadata = apps.get_model('corpus', 'IEDocumentMetadata')
documents = IEDocument.objects.all()
total = documents.count()
objects_to_create = []
logger.info("Creating missing documents metadata objects")
for i, document in enumerate(documents.iterator()):
if i % BULK_SIZE == 0:
logger.info("Created {} out of {}".format(i, total))
if objects_to_create:
IEDocumentMetadata.objects.bulk_create(objects_to_create)
objects_to_create = []
objects_to_create.append(IEDocumentMetadata(
title=document.title,
url=document.url,
items=document.metadata,
document_tmp=document
))
if objects_to_create:
logger.info("Created {} out of {}".format(i+1, total))
IEDocumentMetadata.objects.bulk_create(objects_to_create)
logger.info("Updating documents to point to their metadata objects")
doc_mtds = IEDocumentMetadata.objects.filter(document_tmp__metadata_fk__isnull=True)
total = doc_mtds.count()
for i, doc_mtd in enumerate(doc_mtds):
if i % BULK_SIZE == 0:
logger.info("Updated {} out of {}".format(i, total))
IEDocument.objects.filter(pk=doc_mtd.document_tmp_id).update(metadata_fk=doc_mtd.id)
logger.info("Updated {} out of {}".format(total, total))
class Migration(migrations.Migration):
dependencies = [
('corpus', '0013_create_metadata_model'),
]
operations = [
migrations.RunPython(move_metadata),
]
| 2 | 2 |
ymir/command/tests/unit/test_cmd_merge.py | under-chaos/ymir | 1 | 12770093 | <gh_stars>1-10
import logging
import os
import shutil
from typing import Dict, List, Tuple
import unittest
from google.protobuf.json_format import MessageToDict, ParseDict
from mir.commands.merge import CmdMerge
from mir.protos import mir_command_pb2 as mirpb
from mir.tools.code import MirCode
import tests.utils as test_utils
class TestMergeCmd(unittest.TestCase):
def __init__(self, methodName: str) -> None:
super().__init__(methodName=methodName)
self._mir_root = test_utils.dir_test_root(self.id().split(".")[-3:])
@classmethod
def setUpClass(cls) -> None:
return super().setUpClass()
@classmethod
def tearDownClass(cls) -> None:
return super().tearDownClass()
def setUp(self):
test_utils.check_commands()
self._prepare_dirs()
self._prepare_mir_repo()
def tearDown(self):
self._deprepare_dirs()
pass
# protected: prepare env
def _prepare_dirs(self):
test_utils.remake_dirs(self._mir_root)
def _deprepare_dirs(self):
if os.path.isdir(self._mir_root):
shutil.rmtree(self._mir_root)
def _prepare_mir_repo(self):
test_utils.mir_repo_init(self._mir_root)
# branches:
# a: a0, a1, a2, a3
# b: b0, b1, b2
# d: a0, d0, d1
test_utils.mir_repo_create_branch(self._mir_root, "a")
self._prepare_mir_branch_a()
test_utils.mir_repo_create_branch(self._mir_root, "b")
self._prepare_mir_branch_b()
# test_utils.mir_repo_create_branch(self._mir_root, "c")
# self._prepare_mir_branch_c()
test_utils.mir_repo_create_branch(self._mir_root, "d")
self._prepare_mir_branch_d()
# test_utils.mir_repo_create_branch(self._mir_root, "e")
# self._prepare_mir_branch_e()
test_utils.mir_repo_checkout(self._mir_root, "master")
def _generate_attribute_for_asset(width: int, height: int, tvt_type: int = mirpb.TvtTypeUnknown) -> dict:
if tvt_type == mirpb.TvtTypeUnknown:
return {'asset_type': 'AssetTypeImageJpeg', 'width': width, 'height': height, 'image_channels': 3}
else:
return {
'asset_type': 'AssetTypeImageJpeg',
'width': width,
'height': height,
'image_channels': 3,
"tvt_type": mirpb.TvtType.Name(tvt_type)
}
def _generate_annotations_for_asset(type_ids: List[int], x: int, y: int):
annotations_list = []
for idx, type_id in enumerate(type_ids):
annotations_list.append({
'class_id': type_id,
'box': {
'x': idx * 100 + x,
'y': y,
'w': 50,
'h': 50
},
})
return {'annotations': annotations_list}
def _generate_keywords_for_asset(predefined: List[int], customized: List[str]):
return {'predifined_keyids': predefined, 'customized_keywords': customized}
def _generate_task(task_id: str, name: str, type: int, timestamp: int):
return {
'type': type,
'name': name,
'task_id': task_id,
'timestamp': timestamp,
'model': {
'model_hash': 'model_hash'
}
}
def _prepare_mir_branch(self, assets_and_keywords: Dict[str, Tuple[List[int], List[str]]], size: int, task_id: str,
task_timestamp: int, commit_msg: str):
mir_annotations = mirpb.MirAnnotations()
mir_keywords = mirpb.MirKeywords()
mir_metadatas = mirpb.MirMetadatas()
mir_tasks = mirpb.MirTasks()
dict_metadatas = {'attributes': {}}
for asset_id in assets_and_keywords:
dict_metadatas["attributes"][asset_id] = TestMergeCmd._generate_attribute_for_asset(size, size)
ParseDict(dict_metadatas, mir_metadatas)
image_annotations = {}
for asset_idx, (asset_id, keywords_pair) in enumerate(assets_and_keywords.items()):
image_annotations[asset_id] = TestMergeCmd._generate_annotations_for_asset(type_ids=keywords_pair[0],
x=100,
y=(asset_idx + 1) * 100)
dict_annotations = {
"task_annotations": {
task_id: {
"image_annotations": image_annotations
}
},
'head_task_id': task_id
}
ParseDict(dict_annotations, mir_annotations)
dict_keywords = {"keywords": {}}
for asset_id, keywords_pair in assets_and_keywords.items():
dict_keywords["keywords"][asset_id] = TestMergeCmd._generate_keywords_for_asset(
keywords_pair[0], keywords_pair[1])
ParseDict(dict_keywords, mir_keywords)
dict_tasks = {
'tasks': {
task_id:
TestMergeCmd._generate_task(task_id=task_id,
name="mining",
type=mirpb.TaskTypeMining,
timestamp=task_timestamp)
},
'head_task_id': task_id
}
ParseDict(dict_tasks, mir_tasks)
test_utils.mir_repo_commit_all(self._mir_root, mir_metadatas, mir_annotations, mir_keywords, mir_tasks,
commit_msg)
def _prepare_mir_branch_a(self):
"""
assets and keywords:
a: a0(1), a1(1), a2(1), a3(1)
all asset size set to (1000, 1000)
"""
assets_and_keywords = {
"a0": ([1], ["c0", "c1"]),
"a1": ([1], ["c0", "c1"]),
"a2": ([1], ["c0", "c1"]),
"a3": ([1], ["c0", "c1"]),
}
self._prepare_mir_branch(assets_and_keywords=assets_and_keywords,
size=1000,
task_id="a",
task_timestamp=1624376173,
commit_msg="prepare_branch_merge_a")
def _prepare_mir_branch_b(self):
"""
assets and keywords:
b: b0(2), b1(2), b2(2)
all asset size set to (1100, 1100)
"""
assets_and_keywords = {
"b0": ([2], ["c0", "c2"]),
"b1": ([2], ["c0", "c2"]),
"b2": ([2], ["c0", "c2"]),
}
self._prepare_mir_branch(assets_and_keywords=assets_and_keywords,
size=1100,
task_id="b",
task_timestamp=1624376173 + 10,
commit_msg="prepare_branch_merge_b")
def _prepare_mir_branch_d(self):
"""
assets and keywords:
d: a0(1, 2), d0(1, 4), d1(1, 4)
all asset size set to (1300, 1300)
"""
assets_and_keywords = {
"a0": ([1, 2], ["c0", "c1", "c2"]),
"d0": ([1, 4], ["c0", "c1", "c4"]),
"d1": ([1, 4], ["c0", "c1", "c4"]),
}
self._prepare_mir_branch(assets_and_keywords=assets_and_keywords,
size=1300,
task_id="d",
task_timestamp=1624376173 + 30,
commit_msg="prepare_branch_merge_d")
# protected: check
def _check_result(self,
expected_dict_metadatas=None,
expected_dict_annotations=None,
expected_dict_keywords=None,
expected_dict_tasks=None):
if expected_dict_metadatas:
try:
mir_metadatas = test_utils.read_mir_pb(os.path.join(self._mir_root, "metadatas.mir"),
mirpb.MirMetadatas)
actual_dict_metadatas = MessageToDict(mir_metadatas, preserving_proto_field_name=True)
self.assertEqual(expected_dict_metadatas, actual_dict_metadatas)
except AssertionError as e:
logging.info(f"e: {expected_dict_metadatas}")
logging.info(f"a: {actual_dict_metadatas}")
raise e
if expected_dict_annotations:
try:
mir_annotations = test_utils.read_mir_pb(os.path.join(self._mir_root, "annotations.mir"),
mirpb.MirAnnotations)
actual_dict_annotations = MessageToDict(mir_annotations, preserving_proto_field_name=True)
self.assertEqual(expected_dict_annotations, actual_dict_annotations)
except AssertionError as e:
logging.info(f"e: {expected_dict_annotations}")
logging.info(f"a: {actual_dict_annotations}")
raise e
if expected_dict_keywords:
mir_keywords = test_utils.read_mir_pb(os.path.join(self._mir_root, "keywords.mir"), mirpb.MirKeywords)
actual_dict_keywords = MessageToDict(mir_keywords, preserving_proto_field_name=True)
for asset_id, expected_keywords in expected_dict_keywords["keywords"].items():
actual_keywords = actual_dict_keywords["keywords"][asset_id]
try:
self.assertEqual(set(expected_keywords["predifined_keyids"]),
set(actual_keywords["predifined_keyids"]))
except AssertionError as e:
logging.info(f"e: {expected_keywords}")
logging.info(f"a: {actual_keywords}")
raise e
if expected_dict_tasks:
try:
mir_tasks = test_utils.read_mir_pb(os.path.join(self._mir_root, "tasks.mir"), mirpb.MirTasks)
actual_dict_tasks = MessageToDict(mir_tasks, preserving_proto_field_name=True)
for task_id in expected_dict_tasks["tasks"]:
self.assertTrue(task_id in actual_dict_tasks["tasks"])
self.assertTrue("merge-task-id" in actual_dict_tasks["tasks"])
self.assertTrue("merge-task-id", mir_tasks.head_task_id)
except AssertionError as e:
logging.info(f"e: {expected_dict_tasks}")
logging.info(f"a: {actual_dict_tasks}")
raise e
# public: test cases
def test_all(self):
self._test_exclude_no_tvt_stop_00()
self._test_no_tvt_stop_00()
self._test_tvt_guest_00()
self._test_tvt_host_00()
self._test_tvt_stop_01()
# protected: test cases
def _test_no_tvt_stop_00(self):
""" a + b """
mir_root = self._mir_root
fake_args = type('', (), {})()
fake_args.mir_root = mir_root
fake_args.src_revs = 'b;a'
fake_args.ex_src_revs = ''
fake_args.dst_rev = '_test_no_tvt_stop_00@merge-task-id'
fake_args.strategy = 'stop'
merge_instance = CmdMerge(fake_args)
ret = merge_instance.run()
# check results
self.assertEqual(MirCode.RC_OK, ret)
expected_dict_metadatas = {
"attributes": {
"a0": TestMergeCmd._generate_attribute_for_asset(1000, 1000),
"a1": TestMergeCmd._generate_attribute_for_asset(1000, 1000),
"a2": TestMergeCmd._generate_attribute_for_asset(1000, 1000),
"a3": TestMergeCmd._generate_attribute_for_asset(1000, 1000),
"b0": TestMergeCmd._generate_attribute_for_asset(1100, 1100),
"b1": TestMergeCmd._generate_attribute_for_asset(1100, 1100),
"b2": TestMergeCmd._generate_attribute_for_asset(1100, 1100),
}
}
expected_dict_annotations = {
"task_annotations": {
"merge-task-id": {
"image_annotations": {
"a0": TestMergeCmd._generate_annotations_for_asset([1], 100, 100),
"a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200),
"a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300),
"a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400),
"b0": TestMergeCmd._generate_annotations_for_asset([2], 100, 100),
"b1": TestMergeCmd._generate_annotations_for_asset([2], 100, 200),
"b2": TestMergeCmd._generate_annotations_for_asset([2], 100, 300),
}
}
},
'head_task_id': 'merge-task-id',
}
expected_dict_keywords = {
"keywords": {
"a0": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]),
"a1": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]),
"a2": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]),
"a3": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]),
"b0": TestMergeCmd._generate_keywords_for_asset([2], ["c0", "c2"]),
"b1": TestMergeCmd._generate_keywords_for_asset([2], ["c0", "c2"]),
"b2": TestMergeCmd._generate_keywords_for_asset([2], ["c0", "c2"]),
}
}
expected_dict_tasks = {
'tasks': {
"a":
TestMergeCmd._generate_task(task_id="a",
name="mining",
type=mirpb.TaskTypeMining,
timestamp=1624376173),
"b":
TestMergeCmd._generate_task(task_id="b",
name="mining",
type=mirpb.TaskTypeMining,
timestamp=1624376173 + 10),
'merge-task-id':
TestMergeCmd._generate_task(task_id="merge-task-id",
name="merge",
type=mirpb.TaskTypeMerge,
timestamp=0),
}
}
self._check_result(expected_dict_metadatas, expected_dict_annotations, expected_dict_keywords,
expected_dict_tasks)
def _test_tvt_stop_01(self):
""" abnormal case: with tvt flag assigned, strategy stop, a + d, have joint assets """
mir_root = self._mir_root
fake_args = type('', (), {})()
fake_args.mir_root = mir_root
fake_args.src_revs = 'tr:a;va:d'
fake_args.ex_src_revs = ''
fake_args.dst_rev = "_test_tvt_stop_01@merge-task-id"
fake_args.strategy = 'stop'
merge_instance = CmdMerge(fake_args)
ret = merge_instance.run()
# check result
self.assertEqual(MirCode.RC_CMD_CONFLICTS_OCCURED, ret)
def _test_tvt_host_00(self):
""" normal case: with tvt flag assigned, strategy host, a + d, have joint assets """
mir_root = self._mir_root
fake_args = type('', (), {})()
fake_args.mir_root = mir_root
fake_args.src_revs = 'tr:a;va:d'
fake_args.ex_src_revs = ''
fake_args.dst_rev = '_test_tvt_host_00@merge-task-id'
fake_args.strategy = 'host'
merge_instance = CmdMerge(fake_args)
ret = merge_instance.run()
# check result
self.assertEqual(MirCode.RC_OK, ret)
expected_dict_metadatas = {
"attributes": {
"a0": TestMergeCmd._generate_attribute_for_asset(1000, 1000, tvt_type=mirpb.TvtTypeTraining),
"a1": TestMergeCmd._generate_attribute_for_asset(1000, 1000, tvt_type=mirpb.TvtTypeTraining),
"a2": TestMergeCmd._generate_attribute_for_asset(1000, 1000, tvt_type=mirpb.TvtTypeTraining),
"a3": TestMergeCmd._generate_attribute_for_asset(1000, 1000, tvt_type=mirpb.TvtTypeTraining),
"d0": TestMergeCmd._generate_attribute_for_asset(1300, 1300, tvt_type=mirpb.TvtTypeValidation),
"d1": TestMergeCmd._generate_attribute_for_asset(1300, 1300, tvt_type=mirpb.TvtTypeValidation),
}
}
expected_dict_annotations = {
"task_annotations": {
"merge-task-id": {
"image_annotations": {
"a0": TestMergeCmd._generate_annotations_for_asset([1], 100, 100),
"a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200),
"a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300),
"a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400),
"d0": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 200),
"d1": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 300),
}
}
},
'head_task_id': 'merge-task-id',
}
expected_dict_keywords = {
"keywords": {
"a0": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]),
"a1": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]),
"a2": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]),
"a3": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]),
"d0": TestMergeCmd._generate_keywords_for_asset([1, 4], ["c0", "c1", "c4"]),
"d1": TestMergeCmd._generate_keywords_for_asset([1, 4], ["c0", "c1", "c4"]),
}
}
expected_dict_tasks = {
'tasks': {
"a":
TestMergeCmd._generate_task(task_id="a",
name="mining",
type=mirpb.TaskTypeMining,
timestamp=1624376173),
"d":
TestMergeCmd._generate_task(task_id="d",
name="mining",
type=mirpb.TaskTypeMining,
timestamp=1624376173 + 30),
'merge-task-id':
TestMergeCmd._generate_task(task_id="merge-task-id",
name="merge",
type=mirpb.TaskTypeMerge,
timestamp=0),
}
}
self._check_result(expected_dict_metadatas, expected_dict_annotations, expected_dict_keywords,
expected_dict_tasks)
def _test_tvt_guest_00(self):
""" normal case: with tvt flag assigned, strategy guest, a + d, have joint assets """
mir_root = self._mir_root
fake_args = type('', (), {})()
fake_args.mir_root = mir_root
fake_args.src_revs = 'tr:a;va:d'
fake_args.ex_src_revs = ''
fake_args.dst_rev = '_test_tvt_guest_00@merge-task-id'
fake_args.strategy = 'guest'
merge_instance = CmdMerge(fake_args)
ret = merge_instance.run()
# check result
self.assertEqual(MirCode.RC_OK, ret)
expected_dict_metadatas = {
"attributes": {
"a0": TestMergeCmd._generate_attribute_for_asset(1300, 1300, tvt_type=mirpb.TvtTypeValidation),
"a1": TestMergeCmd._generate_attribute_for_asset(1000, 1000, tvt_type=mirpb.TvtTypeTraining),
"a2": TestMergeCmd._generate_attribute_for_asset(1000, 1000, tvt_type=mirpb.TvtTypeTraining),
"a3": TestMergeCmd._generate_attribute_for_asset(1000, 1000, tvt_type=mirpb.TvtTypeTraining),
"d0": TestMergeCmd._generate_attribute_for_asset(1300, 1300, tvt_type=mirpb.TvtTypeValidation),
"d1": TestMergeCmd._generate_attribute_for_asset(1300, 1300, tvt_type=mirpb.TvtTypeValidation),
}
}
expected_dict_annotations = {
"task_annotations": {
"merge-task-id": {
"image_annotations": {
"a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200),
"a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300),
"a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400),
"a0": TestMergeCmd._generate_annotations_for_asset([1, 2], 100, 100),
"d0": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 200),
"d1": TestMergeCmd._generate_annotations_for_asset([1, 4], 100, 300),
}
}
},
'head_task_id': 'merge-task-id',
}
expected_dict_keywords = {
"keywords": {
"a0": TestMergeCmd._generate_keywords_for_asset([1, 2], ["c0", "c1", "c2"]),
"a1": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]),
"a2": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]),
"a3": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]),
"d0": TestMergeCmd._generate_keywords_for_asset([1, 4], ["c0", "c1", "c4"]),
"d1": TestMergeCmd._generate_keywords_for_asset([1, 4], ["c0", "c1", "c4"]),
}
}
expected_dict_tasks = {
'tasks': {
"a":
TestMergeCmd._generate_task(task_id="a",
name="mining",
type=mirpb.TaskTypeMining,
timestamp=1624376173),
"d":
TestMergeCmd._generate_task(task_id="d",
name="mining",
type=mirpb.TaskTypeMining,
timestamp=1624376173 + 30),
'merge-task-id':
TestMergeCmd._generate_task(task_id="merge-task-id",
name="merge",
type=mirpb.TaskTypeMerge,
timestamp=0),
}
}
self._check_result(expected_dict_metadatas, expected_dict_annotations, expected_dict_keywords,
expected_dict_tasks)
def _test_exclude_no_tvt_stop_00(self):
""" a - d """
mir_root = self._mir_root
fake_args = type('', (), {})()
fake_args.mir_root = mir_root
fake_args.src_revs = 'tr:a'
fake_args.ex_src_revs = 'd'
fake_args.dst_rev = '_test_exclude_no_tvt_stop_00@merge-task-id'
fake_args.strategy = 'host'
merge_instance = CmdMerge(fake_args)
ret = merge_instance.run()
# check result
self.assertEqual(MirCode.RC_OK, ret)
expected_dict_metadatas = {
"attributes": {
"a1": TestMergeCmd._generate_attribute_for_asset(1000, 1000, tvt_type=mirpb.TvtTypeTraining),
"a2": TestMergeCmd._generate_attribute_for_asset(1000, 1000, tvt_type=mirpb.TvtTypeTraining),
"a3": TestMergeCmd._generate_attribute_for_asset(1000, 1000, tvt_type=mirpb.TvtTypeTraining),
}
}
expected_dict_annotations = {
"task_annotations": {
"merge-task-id": {
"image_annotations": {
"a1": TestMergeCmd._generate_annotations_for_asset([1], 100, 200),
"a2": TestMergeCmd._generate_annotations_for_asset([1], 100, 300),
"a3": TestMergeCmd._generate_annotations_for_asset([1], 100, 400),
}
}
},
'head_task_id': 'merge-task-id',
}
expected_dict_keywords = {
"keywords": {
"a1": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]),
"a2": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]),
"a3": TestMergeCmd._generate_keywords_for_asset([1], ["c0", "c1"]),
}
}
expected_dict_tasks = {
'tasks': {
"a":
TestMergeCmd._generate_task(task_id="a",
name="mining",
type=mirpb.TaskTypeMining,
timestamp=1624376173),
'merge-task-id':
TestMergeCmd._generate_task(task_id="merge-task-id",
name="merge",
type=mirpb.TaskTypeMerge,
timestamp=0),
}
}
self._check_result(expected_dict_metadatas, expected_dict_annotations, expected_dict_keywords,
expected_dict_tasks)
| 2.125 | 2 |
django/tests/conftest.py | fossabot/docker-django | 0 | 12770094 | import pytest
from rest_framework.test import APIClient
from tests.factories import accounts
@pytest.fixture
def api_client():
api = APIClient()
return api
@pytest.fixture
def superuser():
return accounts.superuser()
| 1.695313 | 2 |
demos/simple_pipeline/gpu-example-pipeline.py | dimara/kubeflow-examples | 6 | 12770095 | <reponame>dimara/kubeflow-examples
#!/usr/bin/env python3
import kfp.dsl as kfp
def training_op(learning_rate: float,
num_layers: int,
optimizer='ftrl',
step_name='training'):
return kfp.ContainerOp(
name=step_name,
image='katib/mxnet-mnist-example',
command=['python', '/mxnet/example/image-classification/train_mnist.py'],
arguments=[
'--batch-size', '64',
'--lr', learning_rate,
'--num-layers', num_layers,
'--optimizer', optimizer
],
file_outputs={'output': '/etc/timezone'}
)
def postprocessing_op(output,
step_name='postprocessing'):
return kfp.ContainerOp(
name=step_name,
image='library/bash:4.4.23',
command=['sh', '-c'],
arguments=['echo "%s"' % output]
)
@kfp.pipeline(
name='Pipeline GPU Example',
description='Demonstrate the Kubeflow pipelines SDK with GPUs'
)
def kubeflow_training(
learning_rate: kfp.PipelineParam = kfp.PipelineParam(name='learningrate', value=0.1),
num_layers: kfp.PipelineParam = kfp.PipelineParam(name='numlayers', value='2'),
optimizer: kfp.PipelineParam = kfp.PipelineParam(name='optimizer', value='ftrl')):
training = training_op(learning_rate, num_layers, optimizer)
postprocessing = postprocessing_op(training.output) # pylint: disable=unused-variable
if __name__ == '__main__':
import kfp.compiler as compiler
compiler.Compiler().compile(kubeflow_training, __file__ + '.tar.gz')
| 2.671875 | 3 |
csvtest5.py | ivan-pi/rheology_lab | 0 | 12770096 | <filename>csvtest5.py<gh_stars>0
import csv
import numpy as np
from lmfit import minimize, Parameters, fit_report
import matplotlib.pyplot as plt
import maxwell as m
# INITIALIZE LISTS
omega = []
storG = []
lossG = []
dFactor = []
cVisc = []
# READ FROM CSV FILE
with open('1skupina.csv', 'rb') as csvfile:
for i in range(0,5):
next(csvfile)
podatki = csv.reader(csvfile)
for row in podatki:
omega.append(float(row[1]))
storG.append(float(row[2]))
lossG.append(float(row[3]))
dFactor.append(float(row[4]))
cVisc.append(float(row[5]))
# CONVERT LIST TO ARRAYS
omega = np.array(omega)
storG = np.array(storG)
lossG = np.array(lossG)
dFactor = np.array(dFactor)
cVisc = np.array(cVisc)
N = 5
def residual(params,N,omega,y1=None,y2=None):
g = [None]*N
l = [None]*N
for i in range(1,N+1):
gstr = 'g'+str(i)
lstr = 'l'+str(i)
g[i-1] = params[gstr].value
l[i-1] = params[lstr].value
sG = 0
lG = 0
for i in range(1,N+1):
sG = sG + g[i-1]*l[i-1]**2*omega**2/(1.0+l[i-1]**2*omega**2)
lG = lG + g[i-1]*l[i-1]*omega/(1.0+l[i-1]**2*omega**2)
if (y1 is None) and (y2 is None):
return sG,lG
return np.r_[y1-sG,
y2-lG];
params = Parameters()
params.add('g1',value=1,min=1.e-4)
params.add('g2',value=1,min=1.e-4)
params.add('g3',value=1,min=1.e-4)
params.add('g4',value=1,min=1.e-4)
params.add('g5',value=1,min=1.e-4)
#~ params.add('g6',value=1,min=1.e-4)
#~ params.add('g7',value=1,min=1.e-4)
#~ params.add('g8',value=1,min=1.e-4)
#~ params.add('g9',value=1,min=1.e-4)
#~ params.add('g10',value=1,min=1.e-4)
params.add('l1',value=1,min=1.e-4)
params.add('l2',value=1,min=1.e-4)
params.add('l3',value=1,min=1.e-4)
params.add('l4',value=1,min=1.e-4)
params.add('l5',value=1,min=1.e-4)
#~ params.add('l6',value=1,min=1.e-4)
#~ params.add('l7',value=1,min=1.e-4)
#~ params.add('l8',value=1,min=1.e-4)
#~ params.add('l9',value=1,min=1.e-4)
#~ params.add('l10',value=1,min=1.e-4)
out = minimize(residual,params,args=(N,omega,storG,lossG),method='leastsq')
fit = residual(params,N,omega)[0]
print "fit", fit
print(fit_report(params))
# EXTRACT PARAMETER VALUES
p = params.valuesdict()
g = np.array([])
l = np.array([])
for i in range(1,N+1):
gstr = 'g'+str(i)
lstr = 'l'+str(i)
g = np.append(g,p[gstr])
l = np.append(l,p[lstr])
# sortiramo v pravem zaporedju
g = g[np.argsort(l)]
l.sort()
print "g", np.round(g,4)
print "l", np.round(l,4)
g2 = np.r_[907.6350,10.6665,6.8126,5.6231,13.8406]
l2 = np.r_[0.0002,0.0387,0.2233,1.2070,17.8452]
# MEHANSKI SPEKTER
plt.subplot(121)
x = np.logspace(-3,4,200)
plt.plot(omega,storG,'o',
omega,lossG,'o',
x,residual(params,N,x)[0],
x,residual(params,N,x)[1],
x,m.sG5eval(x,np.append(g2,l2)),
x,m.lG5eval(x,np.append(g2,l2)))
plt.legend(('storage','loss','python S5','python L5','MATLAB S5','MATLAB L5'),loc='lower right')
plt.title('Mehanski spekter',fontsize='18')
plt.yscale('log')
plt.xscale('log')
plt.ylabel("G', G'' (Pa)",fontsize='16')
plt.xlabel('$\omega$ (rad/s)', fontsize='16')
# RELAKSACIJSKI SPEKTER
plt.subplot(122)
plt.plot(l,g,
l2,g2,'o')
plt.legend(('python','MATLAB'))
plt.title('Relaksacijski spekter',fontsize='18')
plt.yscale('log')
plt.xscale('log')
plt.ylabel("$g_i$ (Pa)",fontsize='16')
plt.xlabel('$\lambda_i$ (s)',fontsize='16')
plt.show()
| 2.671875 | 3 |
cosmosis-standard-library/shear/shear_bias/shear_m_bias.py | ktanidis2/Modified_CosmoSIS_for_galaxy_number_count_angular_power_spectra | 1 | 12770097 | """
Errors in cosmic shear measurement can lead to a multiplicative factor
scaling the observed shear spectra.
This module scales the measured C_ell to account for that difference,
assuming model values of the multplicative factor m, either per bin or for all bins.
"""
from __future__ import print_function
from builtins import range
from cosmosis.datablock import names, option_section
import sys
warning_note_displayed = False
def setup(options):
# This is an option - can set m_per_bin = T to get
# a different m for each tomographic bin, or F to get
# one global value
m_per_bin = options.get_bool(option_section, "m_per_bin", True)
cl_section = options.get_string(
option_section, "cl_section", default=names.shear_cl)
cross_section = options.get_string(
option_section, "cross_section", default="galaxy_shear_cl")
cal_section = options.get_string(
option_section, "cal_section", default=names.shear_calibration_parameters)
verbose = options.get_bool(option_section, "verbose", False)
print()
print("The shear_m_bias module will use calibration values from {} and look for ".format(cal_section))
print("shear-shear spectra in {} and position-shear in {}".format(cl_section, cross_section))
return m_per_bin, cl_section, cal_section, cross_section, verbose
def get_nbins(block, section):
if block.has_value(section, "nbin_a"):
n_a = block[section, "nbin_a"]
n_b = block[section, "nbin_b"]
else:
n_a = block[section, "nbin"]
n_b = n_a
return n_a, n_b
def calibrate_section(block, section, m_a, m_b, verbose):
n_a = len(m_a)
n_b = len(m_b)
for i in range(n_a):
for j in range(n_b):
# Get existing C_ell
cl_name = "bin_{}_{}".format(i + 1, j + 1)
if block.has_value(section, cl_name):
if verbose:
print("Calibrating {} bin {} {} by (1+{}) * (1+{}) = {}".format(section, i + 1, j + 1, m_a[i], m_b[j], (1 + m_a[i]) * (1 + m_b[j])))
block[section, cl_name] *= (1 + m_a[i]) * (1 + m_b[j])
elif verbose:
print("No {} bin {} {} to calibrate".format(section, i + 1, j + 1))
def calibrate_shear_shear(block, section, cal_section, m_per_bin, verbose):
nbin_a, nbin_b = get_nbins(block, section)
if m_per_bin:
m = [block[cal_section, "m{}".format(i + 1)] for i in range(nbin_a)]
else:
m0 = block[cal_section, "m0"]
m = [m0 for i in range(nbin_a)]
calibrate_section(block, section, m, m, verbose)
def calibrate_position_shear(block, section, cal_section, m_per_bin, verbose):
nbin_a, nbin_b = get_nbins(block, section)
m_a = [0.0 for i in range(nbin_a)]
if m_per_bin:
m_b = [block[cal_section, "m{}".format(i + 1)] for i in range(nbin_b)]
else:
m0 = block[cal_section, "m0"]
m_b = [m0 for i in range(nbin_b)]
calibrate_section(block, section, m_a, m_b, verbose)
def execute(block, config):
m_per_bin, cl_section, cal_section, cross_section, verbose = config
do_auto = block.has_section(cl_section)
do_cross = block.has_section(cross_section)
if do_auto:
calibrate_shear_shear(
block, cl_section, cal_section, m_per_bin, verbose)
if do_cross:
calibrate_position_shear(block, cross_section,
cal_section, m_per_bin, verbose)
if (not do_auto) and (not do_cross):
sys.stderr.write("ERROR: The shear bias calibration module could not find either a section {} or a {} to calibrate.\n".format(
cl_section, cross_section))
sys.stderr.write("The module therefore has nothing to do and considers this an error. You may need to either change settings in the module or the precedng pipeline, or remove the module altogether\n")
return 1
global warning_note_displayed
if not warning_note_displayed:
warning_note_displayed = True
if not do_auto:
sys.stderr.write(
"Note: No shear-shear section {} was found to calibrate. I did calibrate position-shear in {}.\n".format(cl_section, cross_section))
elif not do_cross:
sys.stderr.write(
"Note: No position-shear section {} was found to calibrate. I did calibrate shear-shear in {}.\n".format(cross_section, cl_section))
return 0
| 2.171875 | 2 |
SBP_SNN_pBP/train.py | lcx1997213/sbp | 6 | 12770098 | import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
from SNN import SNN
import time
import os
from tensorboardX import SummaryWriter
from nettalk import Nettalk
from gesture import Gesture
import argparse
parser = argparse.ArgumentParser(description='train.py')
parser.add_argument('-gpu', type=int, default=3)
parser.add_argument('-seed', type=int, default=3154)
parser.add_argument('-num_epoch', type=int, default=100)
parser.add_argument('-layers', type=int, default=3)
parser.add_argument('-interval', type=int, default=20, help='interval of loss print during training')
parser.add_argument('-bp_mark', type=int)
parser.add_argument('-hidden_size', type=int, default=500)
parser.add_argument('-alpha', type=float, default=0.1)
parser.add_argument('-task', type=str, default='MNIST', choices=['MNIST', 'NETTalk', 'DVSGesture'])
parser.add_argument('-energy', action='store_true')
parser.add_argument('-sbp', action='store_true')
parser.add_argument('-tensorboard', action='store_true')
opt = parser.parse_args()
torch.cuda.set_device(opt.gpu)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed_all(opt.seed)
torch.backends.cudnn.deterministic = True
test_scores = []
train_scores = []
if opt.task == 'MNIST':
if opt.tensorboard:
writer = SummaryWriter(comment = '-Mni')
hyperparams = [100, 784, 10, 1e-3, 20, 'MNIST']
train_dataset = dsets.MNIST(root = './data/mnist', train = True, transform = transforms.ToTensor(), download = True)
test_dataset = dsets.MNIST(root = './data/mnist', train = False, transform = transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset = train_dataset, batch_size = hyperparams[0], shuffle = True)
test_loader = torch.utils.data.DataLoader(dataset = test_dataset, batch_size = hyperparams[0], shuffle = False)
elif opt.task == 'NETTalk':
if opt.tensorboard:
writer = SummaryWriter(comment = '-Net')
hyperparams = [5, 189, 26, 1e-3, 20, 'NETTalk']
train_dataset = Nettalk('train', transform=transforms.ToTensor())
test_dataset = Nettalk('test', transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset = train_dataset, batch_size = hyperparams[0], shuffle = True)
test_loader = torch.utils.data.DataLoader(dataset = test_dataset, batch_size = hyperparams[0], shuffle = False)
elif opt.task == 'DVSGesture':
if opt.tensorboard:
writer = SummaryWriter(comment = '-Ges')
hyperparams = [16, 1024, 11, 1e-4, 20, 'DVSGesture']
train_dataset = Gesture('train', transform=transforms.ToTensor())
test_dataset = Gesture('test', transform=transforms.ToTensor())
train_loader = torch.utils.data.DataLoader(dataset = train_dataset, batch_size = hyperparams[0], shuffle = True)
test_loader = torch.utils.data.DataLoader(dataset = test_dataset, batch_size = hyperparams[0], shuffle = False)
print('Dataset: ' + opt.task)
print('Random Seed: {}'.format(opt.seed))
print('Alpha: {}'.format(opt.alpha))
print('Length of Training Dataset: {}'.format(len(train_dataset)))
print('Length of Test Dataset: {}'.format(len(test_dataset)))
print('Build Model')
model = SNN(hyperparams, opt.hidden_size, opt.layers, opt.sbp, opt.bp_mark)
model.cuda()
loss_function = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=hyperparams[3])
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1)
cossim = torch.nn.CosineSimilarity(dim=1, eps=1e-6)
sigmoid = torch.nn.Sigmoid()
def train(epoch):
model.train()
print('Train Epoch ' + str(epoch + 1))
start_time = time.time()
total_loss = 0
for i, (images, labels) in enumerate(train_loader):
if images.size()[0] == hyperparams[0]:
optimizer.zero_grad()
images = Variable(images.cuda())
if opt.task == 'MNIST':
one_hot = torch.zeros(hyperparams[0], hyperparams[2]).scatter(1, labels.unsqueeze(1), 1)
labels = Variable(one_hot.cuda())
elif opt.task == 'NETTalk':
labels = labels.float()
labels = Variable(labels.cuda())
elif opt.task == 'DVSGesture':
labels = labels.float()
labels = Variable(labels.cuda())
outputs, e_loss = model(images, labels)
c_loss = loss_function(outputs, labels)
loss = c_loss + e_loss * opt.alpha if opt.energy else c_loss
total_loss += float(loss)
loss.backward(retain_graph = True)
optimizer.step()
if (i + 1) % (len(train_dataset) // (hyperparams[0] * opt.interval)) == 0:
print('Epoch: [%d/%d], Step: [%d/%d], Loss: %.6f, Time: %.2f' % (epoch + 1, opt.num_epoch, i + 1, len(train_dataset) // hyperparams[0], total_loss / (hyperparams[0] * opt.interval), time.time() - start_time))
xs = epoch * opt.interval + ((i + 1) // (len(train_dataset) // (hyperparams[0] * opt.interval)))
if opt.tensorboard:
writer.add_scalar('loss_train', total_loss / (hyperparams[0] * opt.interval), xs)
writer.add_scalar('time_train', time.time() - start_time, xs)
start_time = time.time()
total_loss = 0
scheduler.step()
def eval(epoch, if_test):
model.eval()
correct = 0
total = 0
if if_test:
print('Test Epoch ' + str(epoch + 1))
loader = test_loader
test_or_train = 'test'
else:
loader = train_loader
test_or_train = 'train'
if opt.task == 'MNIST':
for i, (images, labels) in enumerate(loader):
images = Variable(images.cuda())
labels = Variable(labels.cuda())
outputs, _ = model(images)
total += labels.size(0)
pred = outputs.max(1)[1]
correct += (pred == labels).sum()
correct = correct.item()
elif opt.task == 'NETTalk':
for i, (images, labels) in enumerate(loader):
images = Variable(images.cuda())
labels = Variable(labels.cuda())
outputs, _ = model(images, labels)
total += 1
if outputs.max() >= 0.05:
pos = []
for label in range(26):
if (labels[0, label] != 0) or (outputs[0, label] != 0):
pos.append(label)
tem_out = torch.zeros((1, len(pos)))
tem_lab = torch.zeros((1, len(pos)))
for label in range(len(pos)):
tem_out[0, label] = outputs[0, pos[label]]
tem_lab[0, label] = labels[0, pos[label]]
correct += cossim(tem_out, tem_lab)
else:
correct += 0
elif opt.task == 'DVSGesture':
for i, (images, labels) in enumerate(loader):
images = Variable(images.cuda())
labels = Variable(labels.cuda())
outputs, _ = model(images, labels)
total += labels.size(0)
pred = outputs.max(1)[1]
t_label = labels.max(1)[1]
correct += (pred == t_label).sum()
correct = correct.item()
acc = 100.0 * correct / total
print(test_or_train + ' correct: %d accuracy: %.2f%%' % (correct, acc))
if opt.tensorboard:
writer.add_scalar('acc_' + test_or_train, acc, epoch + 1)
if if_test:
test_scores.append(acc)
else:
train_scores.append(acc)
def main():
for epoch in range(opt.num_epoch):
train(epoch)
if (epoch + 1) % 1 == 0:
eval(epoch, if_test = True)
if (epoch + 1) % 20 == 0:
print('Best Test Accuracy in %d: %.2f%%' % (epoch + 1, max(test_scores)))
avg = (test_scores[-1] + test_scores[-2] + test_scores[-3] + test_scores[-4] + test_scores[-5] + test_scores[-6] + test_scores[-7] + test_scores[-8] + test_scores[-9] + test_scores[-10]) / 10
print('Average of Last Ten Test Accuracy : %.2f%%' % (avg))
if opt.tensorboard:
writer.close()
if __name__ == '__main__':
main() | 2.21875 | 2 |
file_storage/users.py | cleac/knu-oop-module | 0 | 12770099 | <filename>file_storage/users.py
import json
from enum import IntEnum
from file_storage.utils import Deserializable
from file_storage.utils import Serializable
Roles = IntEnum("Roles", {
'user': 1,
'moderator': 2,
'admin': 3,
'superadmin': 4
})
class User(Serializable, Deserializable):
def __init__(
self, login, name='Anonymous', role=Roles.user,
password='password'):
self.name = name
self.login = login
self.role = role
self.password = password
def serialize(self):
return json.dumps({
'name': self.name,
'login': self.login,
'role': self.role.value,
'password': <PASSWORD>,
})
@staticmethod
def deserialize(json_string):
data = json.loads(json_string)
for field in ['name', 'login', 'role', 'password']:
if field not in data:
raise ValueError("Invalid json {}".format(json_string))
data.role = Roles(data.role)
return User(**data)
def __repr__(self):
return '<User name={} access={}'.format(
self.name, self.role)
| 3.0625 | 3 |
output/models/sun_data/elem_decl/type_def/type_def01202m/type_def01202m1_xsd/__init__.py | tefra/xsdata-w3c-tests | 1 | 12770100 | <filename>output/models/sun_data/elem_decl/type_def/type_def01202m/type_def01202m1_xsd/__init__.py
from output.models.sun_data.elem_decl.type_def.type_def01202m.type_def01202m1_xsd.type_def01202m1 import Root
__all__ = [
"Root",
]
| 1.265625 | 1 |
src/models/emotion_classifier/fasttext_hypertuning.py | Neronuser/EmoCourseChat | 1 | 12770101 | import csv
import subprocess
from itertools import product
import textacy
from sklearn.metrics import accuracy_score
from sklearn.metrics import f1_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from textacy.text_utils import detect_language
from src.utils import preprocess
if __name__ == '__main__':
EMOTION_DATAPATH = 'data/processed/emotions_full.csv'
raw_data = []
with open(EMOTION_DATAPATH) as data_file:
reader = csv.reader(data_file, quoting=csv.QUOTE_MINIMAL)
reader.__next__()
for i, line in enumerate(reader):
preprocessed_line = preprocess(line[1])
if detect_language(preprocessed_line) == 'en':
doc = textacy.Doc(preprocessed_line, lang='en_core_web_lg')
raw_data.append((doc, line[2]))
texts, labels = zip(*raw_data)
label_encoder = LabelEncoder()
encoded_labels = label_encoder.fit_transform(labels)
x_train, x_test, y_train, y_test = \
train_test_split(texts, encoded_labels, shuffle=True, stratify=encoded_labels,
random_state=42, test_size=0.2)
MODELS_TEST_RESULTS = 'reports/tune_test_scores.csv'
FASTTEXT_INPUT_FILE = 'data/processed/fasttext_input.txt'
FASTTEXT_TEST_FILE = 'data/processed/fasttext_test.txt'
FASTTEXT_FULL_FILE = 'data/processed/fasttext_full.txt'
MODEL_PATH = 'models/emotion_classification/fasttext/model'
label_prefix = '__label__'
with open(FASTTEXT_INPUT_FILE, 'w') as input_file:
for x, y in zip(x_train, y_train):
input_file.write(' , '.join([label_prefix + str(y), x.text]) + '\n')
with open(FASTTEXT_TEST_FILE, 'w') as input_file:
for x, y in zip(x_test, y_test):
input_file.write(x.text.replace('\n', '') + '\n')
tested_dims = [200, 300, 500]
tested_lrs = [0.1, 0.01, 0.01]
tested_epochs = [10, 20, 50]
tested_min_counts = [1]
lr_update_rates = [100, 100000, 1000000]
negs = [5, 50, 100]
word_ngrams = 1
combinations = product(tested_dims, tested_lrs, tested_epochs, tested_min_counts, lr_update_rates, negs)
thread = str(12)
best_params = None
best_score = 0
n_combinations = len(tested_dims) * len(tested_lrs) * len(tested_epochs) * len(tested_min_counts) * \
len(lr_update_rates) * len(negs)
for i, (dim, lr, epoch, min_count, lr_update_rate, neg) in enumerate(combinations):
print("%d / %d" % (i, n_combinations))
subprocess.call(['./fastText-0.1.0/fasttext', 'supervised', '-input', FASTTEXT_INPUT_FILE,
'-output', MODEL_PATH, '-dim', str(dim), '-lr', str(lr), '-epoch', str(epoch),
'-label', label_prefix, '-wordNgrams', str(word_ngrams), '-minCount', str(min_count),
'-thread', thread, '-lrUpdateRate', str(lr_update_rate), '-neg', str(neg)])
test_preds = subprocess.check_output(['./fastText-0.1.0/fasttext', 'predict', MODEL_PATH + '.bin',
FASTTEXT_TEST_FILE])
preds = [int(pred[-1]) for pred in test_preds.decode("utf-8").split('\n') if pred != '']
score = f1_score(y_test, preds, average='micro')
accuracy = accuracy_score(y_test, preds)
if best_score < score:
best_score = score
best_params = {"dim": dim, "lr": lr, "epochs": epoch, "min_count": min_count,
"lr_update_rate": lr_update_rate, "neg": neg, "accuracy": accuracy}
with open(MODELS_TEST_RESULTS, "a") as test_scores_table:
writer = csv.writer(test_scores_table, quoting=csv.QUOTE_MINIMAL)
writer.writerow(["FT", best_score, '', str(best_params)])
| 2.71875 | 3 |
chho2/ignore/1.py | srishtikohli99/CHHO2 | 0 | 12770102 | <filename>chho2/ignore/1.py
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 5 03:41:18 2019
@author: srishti
"""
print("Hello")
| 1.28125 | 1 |
covid_stats/apps.py | cploutarchou/covid-19-tracker | 0 | 12770103 | from django.apps import AppConfig
class CovidStatsConfig(AppConfig):
name = 'covid_stats'
| 1.039063 | 1 |
toolkit/filetype.py | yufeiminds/toolkit | 0 | 12770104 | <gh_stars>0
# coding=utf8
"""
Helper function for file types.
"""
from toolkit import ToolkitException
class FiletypeException(ToolkitException):
pass
class FiledetException(FiletypeException):
pass
_file_type_map = {
'.py': 'python',
'.sh': 'bash',
}
def filedet(name, fobj=None, suffix=None):
"""
Detect file type by filename.
:param name: file name
:param fobj: file object
:param suffix: file suffix like ``py``, ``.py``
:return: file type full name, such as ``python``, ``bash``
"""
name = name or (fobj and fobj.name) or suffix
separated = name.split('.')
if len(separated) == 1:
raise FiledetException('file name error.')
key = '.' + separated[-1]
return _file_type_map.get(key)
| 3.234375 | 3 |
scripts/ruler/measures/cwl_rbp.py | leifos/cwl | 6 | 12770105 | <reponame>leifos/cwl
import numpy as np
from ruler.measures.cwl_metrics import CWLMetric
'''
@article{Moffat:2008:RPM:1416950.1416952,
author = {<NAME> <NAME>},
title = {Rank-biased Precision for Measurement of Retrieval Effectiveness},
journal = {ACM Trans. Inf. Syst.},
volume = {27},
number = {1},
year = {2008},
pages = {2:1--2:27},
articleno = {2},
numpages = {27},
url = {http://doi.acm.org/10.1145/1416950.1416952},
}
'''
class RBPCWLMetric(CWLMetric):
def __init__(self, theta=0.9):
super(CWLMetric, self).__init__()
self.metric_name = "RBP@{<EMAIL>(theta)
self.theta = theta
self.bibtex = """
@article{Moffat:2008:RPM:1416950.1416952,
author = {<NAME> and <NAME>},
title = {Rank-biased Precision for Measurement of Retrieval Effectiveness},
journal = {ACM Trans. Inf. Syst.},
volume = {27},
number = {1},
year = {2008},
pages = {2:1--2:27},
articleno = {2},
numpages = {27},
url = {http://doi.acm.org/10.1145/1416950.1416952},
}
"""
def name(self):
return "RBP@{<EMAIL>(self.theta)
def c_vector(self, ranking):
cvec = np.dot(np.ones(len(ranking.gains)), self.theta)
return cvec
| 2.390625 | 2 |
utils/ana_score.py | igemsoftware/HFUT-China_2015 | 0 | 12770106 | <reponame>igemsoftware/HFUT-China_2015<filename>utils/ana_score.py<gh_stars>0
import os
import django
import sys
import traceback
pro_dir = os.getcwd()
sys.path.append(pro_dir)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "BioDesigner.settings")
from design.models import parts
if __name__ == '__main__':
django.setup()
f = open('ans.txt', 'r')
for line in f:
infos = line.split('\t')
name = infos[0][:-4]
score = float(infos[1])
print name
try:
part_obj = parts.objects.get(part_name=name)
if score == 0:
part_obj.score = 0
else:
part_obj.score = float(score / float(part_obj.sequence_length))
print part_obj.score
part_obj.save()
except:
traceback.print_exc()
pass
| 1.9375 | 2 |
metrics/fid.py | iviazovetskyi/rewriting | 526 | 12770107 | from __future__ import absolute_import, division, print_function
import torch
import warnings
from tqdm import tqdm
import pathlib
from scipy import linalg
import tensorflow as tf
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
def check_or_download_inception(inception_path):
''' Checks if the path to the inception file is valid, or downloads
the file if it is not present. '''
INCEPTION_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'
if inception_path is None:
inception_path = '/tmp'
inception_path = pathlib.Path(inception_path)
model_file = inception_path / 'classify_image_graph_def.pb'
if not model_file.exists():
print("Downloading Inception model")
from urllib import request
import tarfile
fn, _ = request.urlretrieve(INCEPTION_URL)
with tarfile.open(fn, mode='r') as f:
f.extract('classify_image_graph_def.pb', str(model_file.parent))
return str(model_file)
def create_inception_graph(pth):
"""Creates a graph from saved GraphDef file."""
# Creates graph from saved graph_def.pb.
with tf.io.gfile.GFile(pth, 'rb') as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='FID_Inception_Net')
def calculate_activation_statistics(images,
sess,
batch_size=50,
verbose=False):
"""Calculation of the statistics used by the FID.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 255.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the available hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- mu : The mean over samples of the activations of the pool_3 layer of
the incption model.
-- sigma : The covariance matrix of the activations of the pool_3 layer of
the incption model.
"""
act = get_activations(images, sess, batch_size, verbose)
mu = np.mean(act, axis=0)
sigma = np.cov(act, rowvar=False)
return mu, sigma
# code for handling inception net derived from
# https://github.com/openai/improved-gan/blob/master/inception_score/model.py
def _get_inception_layer(sess):
"""Prepares inception net for batched usage and returns pool_3 layer. """
layername = 'FID_Inception_Net/pool_3:0'
pool3 = sess.graph.get_tensor_by_name(layername)
ops = pool3.graph.get_operations()
for op_idx, op in enumerate(ops):
for o in op.outputs:
shape = o.get_shape()
if shape._dims != []:
shape = [s.value for s in shape]
new_shape = []
for j, s in enumerate(shape):
if s == 1 and j == 0:
new_shape.append(None)
else:
new_shape.append(s)
o.__dict__['_shape_val'] = tf.TensorShape(new_shape)
return pool3
# -------------------------------------------------------------------------------
def get_activations(images, sess, batch_size=200, verbose=False):
"""Calculates the activations of the pool_3 layer for all images.
Params:
-- images : Numpy array of dimension (n_images, hi, wi, 3). The values
must lie between 0 and 256.
-- sess : current session
-- batch_size : the images numpy array is split into batches with batch size
batch_size. A reasonable batch size depends on the disposable hardware.
-- verbose : If set to True and parameter out_step is given, the number of calculated
batches is reported.
Returns:
-- A numpy array of dimension (num images, 2048) that contains the
activations of the given tensor when feeding inception with the query tensor.
"""
inception_layer = _get_inception_layer(sess)
n_images = images.shape[0]
if batch_size > n_images:
print(
"warning: batch size is bigger than the data size. setting batch size to data size"
)
batch_size = n_images
n_batches = n_images // batch_size
pred_arr = np.empty((n_images, 2048))
for i in tqdm(range(n_batches)):
if verbose:
print("\rPropagating batch %d/%d" % (i + 1, n_batches),
end="",
flush=True)
start = i * batch_size
if start + batch_size < n_images:
end = start + batch_size
else:
end = n_images
batch = images[start:end]
pred = sess.run(inception_layer,
{'FID_Inception_Net/ExpandDims:0': batch})
pred_arr[start:end] = pred.reshape(batch_size, -1)
if verbose:
print(" done")
return pred_arr
# -------------------------------------------------------------------------------
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
"""Numpy implementation of the Frechet Distance.
The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
and X_2 ~ N(mu_2, C_2) is
d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
Stable version by <NAME>.
Params:
-- mu1 : Numpy array containing the activations of the pool_3 layer of the
inception net ( like returned by the function 'get_predictions')
for generated samples.
-- mu2 : The sample mean over activations of the pool_3 layer, precalcualted
on an representive data set.
-- sigma1: The covariance matrix over activations of the pool_3 layer for
generated samples.
-- sigma2: The covariance matrix over activations of the pool_3 layer,
precalcualted on an representive data set.
Returns:
-- : The Frechet Distance.
"""
mu1 = np.atleast_1d(mu1)
mu2 = np.atleast_1d(mu2)
sigma1 = np.atleast_2d(sigma1)
sigma2 = np.atleast_2d(sigma2)
assert mu1.shape == mu2.shape, "Training and test mean vectors have different lengths"
assert sigma1.shape == sigma2.shape, "Training and test covariances have different dimensions"
diff = mu1 - mu2
# product might be almost singular
covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
if not np.isfinite(covmean).all():
msg = "fid calculation produces singular product; adding %s to diagonal of cov estimates" % eps
warnings.warn(msg)
offset = np.eye(sigma1.shape[0]) * eps
covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
# numerical error might give slight imaginary component
if np.iscomplexobj(covmean):
if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
m = np.max(np.abs(covmean.imag))
raise ValueError("Imaginary component {}".format(m))
covmean = covmean.real
tr_covmean = np.trace(covmean)
return diff.dot(diff) + np.trace(sigma1) + np.trace(
sigma2) - 2 * tr_covmean
def pt_to_np(imgs):
'''normalizes pytorch image in [-1, 1] to [0, 255]'''
normalized = [((img / 2 + 0.5) * 255).clamp(0, 255) for img in imgs]
return np.array([img.permute(1, 2, 0).numpy() for img in normalized])
def compute_fid_given_images(fake_images, real_images):
'''requires that the image batches are numpy format, normalized to 0, 255'''
inception_path = check_or_download_inception(None)
create_inception_graph(inception_path)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
if isinstance(fake_images, tuple):
m1, s1 = fake_images
else:
m1, s1 = calculate_activation_statistics(fake_images, sess)
if isinstance(real_images, tuple):
m2, s2 = real_images
else:
m2, s2 = calculate_activation_statistics(real_images, sess)
return calculate_frechet_distance(m1, s1, m2, s2)
def compute_fid_given_path(path):
with np.load(path) as data:
fake_imgs = data['fake']
real_imgs = data['real']
return compute_fid_given_images(fake_imgs, real_imgs)
def load_from_path(source):
root = '/data/vision/torralba/ganprojects/placesgan/tracer/utils/fid_stats/'
path = os.path.join(root, f'{source}_stats.npz')
if os.path.exists(path):
print('Loading statistics from ', path)
with np.load(path) as data:
return data['m'], data['s']
else:
print("Stats not found in path", path)
exit()
def compute_fid(source1, source2):
if isinstance(source1, str):
source1 = load_from_path(source1)
if isinstance(source1, torch.Tensor):
source1 = pt_to_np(source1)
if isinstance(source2, str):
source2 = load_from_path(source2)
if isinstance(source2, torch.Tensor):
source2 = pt_to_np(source2)
return compute_fid_given_images(source1, source2)
if __name__ == '__main__':
import argparse
from PIL import Image
from torchvision import transforms
parser = argparse.ArgumentParser()
parser.add_argument('--source')
parser.add_argument('--target')
args = parser.parse_args()
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
images1 = []
for file_name in tqdm(os.listdir(args.source)):
if file_name.lower().endswith(('.png', 'jpeg', '.jpg')):
path = os.path.join(args.source, file_name)
images1.append(transform(Image.open(path).convert('RGB')))
images1 = torch.stack(images1)
images2 = []
for file_name in tqdm(os.listdir(args.source)):
if file_name.lower().endswith(('.png', 'jpeg', '.jpg')):
path = os.path.join(args.source, file_name)
images2.append(transform(Image.open(path).convert('RGB')))
images2 = torch.stack(images2)
result = compute_fid(images1, images2)
print(result)
with open('fid_results.txt', 'a+') as f:
f.write(args.source + args.target + ':\n')
f.write(str(result) + '\n')
| 2.40625 | 2 |
project/apps/CI-producer/app/constants.py | Monxun/PortainerPractice | 0 | 12770108 | # import os
# BANK_URL = os.environ['BANK_URL']
# TRANSACTION_URL = os.environ['TRANSACTION_URL']
# UNDERWRITER_URL = os.environ['UNDERWRITER_URL']
# USER_URL = os.environ['USER_URL']
# applications_url = f"http://{UNDERWRITER_URL}/applications"
# registration_url = f"http://{USER_URL}/users/registration"
# login_url = f"http://{USER_URL}/login"
# # Requires bearer token
# bank_url = f"http://{BANK_URL}/banks"
# branch_url = f"http://{BANK_URL}/branches"
# transaction_url = f"http://{TRANSACTION_URL}/transactions"
# Kubernetes ingress
applications_url = "http://localhost/applications"
registration_url = "http://localhost/users/registration"
login_url = "http://localhost/login"
# Requires bearer token
bank_url = "http://localhost/banks"
branch_url = "http://localhost/branches"
transaction_url = "http://localhost/transactions" | 1.890625 | 2 |
solutions/day10.py | nitekat1124/advent-of-code-2018 | 0 | 12770109 | from utils.solution_base import SolutionBase
class Solution(SolutionBase):
def solve(self, part_num: int):
self.test_runner(part_num)
func = getattr(self, f"part{part_num}")
result = func(self.data)
return result
def test_runner(self, part_num):
test_inputs = self.get_test_input()
test_results = self.get_test_result(part_num)
test_counter = 1
func = getattr(self, f"part{part_num}")
for i, r in zip(test_inputs, test_results):
if len(r):
if (tr := str(func(i))) == r[0]:
print(f"test {test_counter} passed")
else:
print(f"your result: {tr}")
print(f"test answer: {r[0]}")
print(f"test {test_counter} NOT passed")
test_counter += 1
print()
def part1(self, data):
points = []
for line in data:
p0 = line.index("<")
p1 = line.index(">")
pos = line[p0 + 1 : p1]
v1 = line[p1 + 1 :].index("<")
v2 = line[p1 + 1 :].index(">")
vel = line[p1 + 1 :][v1 + 1 : v2]
points += [[[*map(int, pos.split(","))], tuple(map(int, vel.split(",")))]]
for i in range(15000):
np = []
for p in points:
np += [[p[0][0] + p[1][0] * i, p[0][1] + p[1][1] * i]]
r = self.draw(np)
if r:
return i
def part2(self, data):
return "in the result of part 1"
def draw(self, points):
min_x = min(p[0] for p in points)
max_x = max(p[0] for p in points)
min_y = min(p[1] for p in points)
max_y = max(p[1] for p in points)
if max_x - min_x < 70: # not a algorithm... just because I know it would shirnk to 62 with my puzzle input :p
for y in range(min_y, max_y + 1):
for x in range(min_x, max_x + 1):
if any(p[0] == x and p[1] == y for p in points):
print("#", end="")
else:
print(".", end="")
print()
print()
return True
return False
| 3.25 | 3 |
TheArtificialPoet/draw.py | shyamvalsan/BotsRevolution | 0 | 12770110 | import subprocess
import random, os, sys
from PIL import Image, ImageDraw, ImageFont, ImageFilter
cmd20 = "awk -F\"|\" '$11 == 20{print}' ../poem_db.csv | sort -R | head -n 1"
cmd40 = "awk -F\"|\" '$11 == 40{print}' ../poem_db.csv | sort -R | head -n 1"
os.chdir(sys.argv[1])
for fn in os.listdir('.'):
fnr = random.choice([x for x in os.listdir('.')])
fnr_list = fnr.split('_')
color = fnr_list[0]
if color == "w":
clr = (255,255,255,255)
elif color == "b":
clr = (0,0,0,255)
else:
print "Incorrect filename"
pos = fnr_list[1]
if pos == "top":
pos1,pos2,pos3,pos4,cmd=(50,100),(50,140),(50,180),(50,220),cmd40
elif pos == "bottom":
pos1,pos2,pos3,pos4,cmd=(50,400),(50,440),(50,480),(50,520),cmd40
elif pos == "middle":
pos1,pos2,pos3,pos4,cmd=(50,300),(50,340),(50,380),(50,420),cmd40
elif pos == "topl":
pos1,pos2,pos3,pos4,cmd=(50,100),(50,140),(50,180),(50,220),cmd20
elif pos == "topr":
pos1,pos2,pos3,pos4,cmd=(320,100),(320,140),(320,180),(320,220),cmd20
elif pos == "bottoml":
pos1,pos2,pos3,pos4,cmd=(50,360),(50,400),(50,440),(50,480),cmd20
elif pos == "bottomr":
pos1,pos2,pos3,pos4,cmd=(320,360),(320,400),(320,440),(320,480),cmd20
else:
print "Incorrect filename"
row = subprocess.check_output(cmd, shell=True)
index = row.split('|')[0]
author_name = row.split('|')[1]
author_bio = row.split('|')[2]
poem_type = row.split('|')[3]
poem_context = row.split('|')[4]
poem_line1 = row.split('|')[5]
poem_line2 = row.split('|')[6]
poem_line3 = row.split('|')[7]
poem_line4 = row.split('|')[8]
poem_specific_hashtags = row.split('|')[9]
base = Image.open(fnr).convert('RGBA')
txt = Image.new('RGBA', base.size, (255,255,255,0))
fnt = ImageFont.truetype('../SpecialElite.ttf', 30)
d = ImageDraw.Draw(txt)
d.text(pos1, poem_line1, font=fnt, fill=clr)
d.text(pos2, poem_line2, font=fnt, fill=clr)
d.text(pos3, poem_line3, font=fnt, fill=clr)
d.text(pos4, poem_line4, font=fnt, fill=clr)
out = Image.alpha_composite(base, txt)
rand = random.randint(1,9999)
out.save('../ReadyToPost/%s_%s_%d.png' % (index,author_name,rand))
# im = Image.open('%s_%s.png' % (index,author_name))
# im.save('../ReadyToPost/%s_%s.jpg' % (index,author_name))
# os.remove('%s_%s.png' % (index,author_name))
caption = "\n" + poem_type + " by " + author_name + "\n" + author_bio + "\n" + poem_context + "\n" + poem_specific_hashtags
text_file = open('../ReadyToPost/%s_%s.txt' % (index,author_name), 'wb')
text_file.write(caption)
text_file.close()
| 2.21875 | 2 |
myfs.py | cty12/sdbfs | 0 | 12770111 | <reponame>cty12/sdbfs
#!/usr/bin/env python
import os
from stat import S_IFDIR, S_IFLNK, S_IFREG
from sys import argv, exit
from time import time
import traceback
import errno
from fuse import FUSE, FuseOSError, Operations
import sqlite3
class DbFS(Operations):
def __init__(self):
#print os.path.abspath('.')
self.DBFILE = './database/data.db'
try:
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
c.execute("""
create table if not exists files (
path text primary key,
mode integer,
size integer,
ctime real,
mtime real,
atime real,
uid integer,
gid integer,
nlink integer
)
""")
c.execute("""
create table if not exists data (
path text primary key,
content blob
)
""")
now = time()
c.execute("""
insert or ignore into files
(path,mode,size,ctime,mtime,atime,uid,gid,nlink)
values (?,?,?,?,?,?,?,?,?)
""", ('/', (S_IFDIR | 0755), 0, now, now, now, 0, 0, 2))
conn.commit()
c.close()
conn.close()
except:
print 'aha some problems. '
traceback.print_exc()
pass
self.fd = 0
def chmod(self, path, mode):
print 'chmod: %s' % path
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
c.execute("select mode from files where path=?",(path,))
fetch = c.fetchone()
if fetch is not None:
mode_orig = fetch[0]
mode_orig &= 0770000
mode_orig |= mode
print 'new mode: ', mode_orig
c.execute("update files set mode=? where path=?", (mode_orig, path))
else:
raise IOError(errno.ENOENT,path)
conn.commit()
c.close()
conn.close()
return 0
def chown(self, path, uid, gid):
print 'chown: %s' % path
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
c.execute("update files set uid=? where path=?", (uid, path))
c.execute("update files set gid=? where path=?", (gid, path))
conn.commit()
c.close()
conn.close()
def create(self, path, mode):
print 'create: %s' % path
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
now = time()
c.execute("""
insert or ignore into files
(path,mode,size,ctime,mtime,atime,uid,gid,nlink)
values (?,?,?,?,?,?,?,?,?)
""", (path, (S_IFREG | mode), 0, now, now, now, 0, 0, 1))
conn.commit()
c.close()
conn.close()
self.fd += 1
return self.fd
def getattr(self, path, fh=None):
print 'getattr: %s' % path
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
c.execute("select * from files where path=?",(path,))
fetch = c.fetchone()
if fetch is not None:
attr = dict(st_mode=fetch[1], st_size=fetch[2], st_ctime=fetch[3], st_mtime=fetch[4], st_atime=fetch[5], st_uid=fetch[6], st_gid=fetch[7], st_nlink=fetch[8])
else:
raise FuseOSError(errno.ENOENT)
c.close()
conn.close()
print 'attr_size: ', attr['st_size']
return attr
def getxattr(self, path, name, position=0):
print 'getxattr: %s' % path
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
c.execute("select * from files where path=?",(path,))
fetch = c.fetchone()
if fetch is not None:
attr = dict(st_mode=fetch[1], st_size=fetch[2], st_ctime=fetch[3], st_mtime=fetch[4], st_atime=fetch[5], st_uid=fetch[6], st_gid=fetch[7], st_nlink=fetch[8])
else:
raise FuseOSError(errno.ENOENT)
c.close()
conn.close()
try:
return attr[name]
except KeyError:
return ''
def listxattr(self, path):
print 'listxattr: %s' % path
return ['st_mode', 'st_size', 'st_ctime', 'st_mtime', 'st_atime', 'st_uid', 'st_gid', 'st_nlink']
def mkdir(self, path, mode):
print 'mkdir: %s' % path
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
now = time()
c.execute("""
insert or ignore into files
(path,mode,size,ctime,mtime,atime,uid,gid,nlink)
values (?,?,?,?,?,?,?,?,?)
""", (path, (S_IFDIR | mode), 0, now, now, now, 0, 0, 2))
c.execute("update files set nlink=nlink+1 where path='/'")
conn.commit()
c.close()
conn.close()
def open(self, path, flags):
print 'open: %s' % path
self.fd += 1
return self.fd
def read(self, path, size, offset, fh):
print 'read', path, size, offset, fh
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
c.execute("select content from data where path=?", (path,))
fetch = c.fetchone()
if fetch is not None:
content = fetch[0]
else:
# raise IOError(errno.ENOENT,path)
content = ''
c.close()
conn.close()
return content[offset:offset + size]
def readdir(self, path, fh):
print 'readdir: %s' % path
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
c.execute("select path from files where path!='/'")
fetch = c.fetchall()
if fetch is not None:
dirlist = ['.', '..'] + [x[0][1:] for x in fetch]
else:
dirlist = ['.', '..']
c.close()
conn.close()
return dirlist
def readlink(self, path):
print 'readlink: %s' % path
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
c.execute("select content from data where path=?", (path,))
fetch = c.fetchone()
c.close()
conn.close()
return fetch[0]
# TO DO removexattr
def rename(self, old, new):
print 'rename from %s to %s' %(old, new)
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
c.execute("update files set path=? where path=?", (new, old))
conn.commit()
c.close()
conn.close()
def rmdir(self, path):
print 'rmdir: %s' % path
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
c.execute("delete from files where path=?", (path,))
c.execute("update files set nlink=nlink-1 where path='/'")
conn.commit()
c.close()
conn.close()
# TO DO setxattr
def statfs(self, path):
return dict(f_bsize=512, f_blocks=4096, f_bavail=2048)
def symlink(self, target, source):
print 'symlink src: %s -> tg: %s' %(source, target)
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
now = time()
c.execute("""
insert or ignore into files
(path,mode,size,ctime,mtime,atime,uid,gid,nlink)
values (?,?,?,?,?,?,?,?,?)
""", (target, (S_IFLNK | 0777), len(source), now, now, now, 0, 0, 1))
c.execute("""
insert or ignore into data
(path, content)
values (?,?)
""", (target, source))
conn.commit()
c.close()
conn.close()
def truncate(self, path, length, fh=None):
print 'truncate: %s' % path
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
c.execute("select content from data where path=?", (path,))
fetch = c.fetchone()
if fetch is not None:
content = fetch[0]
else:
# raise IOError(errno.ENOENT,path)
content = ''
content = content[:length]
c.execute("update data set content=? where path=?", (content, path))
c.execute("update files set size=? where path=?", (length, path))
conn.commit()
c.close()
conn.close()
def unlink(self, path):
print 'unlink: ', path
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
c.execute("delete from files where path=?", (path,))
conn.commit()
c.close()
conn.close()
def utimens(self, path, times=None):
print 'utimens', path
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
now = time()
atime, mtime = times if times else (now, now)
c.execute("update files set atime=? where path=?", (atime, path))
c.execute("update files set mtime=? where path=?", (mtime, path))
conn.commit()
c.close()
conn.close()
def write(self, path, data, offset, fh):
print 'write: %s' % path
print 'data type: ', type(data)
conn = sqlite3.connect(self.DBFILE, timeout=600.0)
c = conn.cursor()
c.execute("select content from data where path=?", (path,))
fetch = c.fetchone()
if fetch is not None:
content = fetch[0]
else:
content = ''
content = content[:offset] + data
c.execute("""
insert or replace into data
(path,content)
values (?,?)
""", (path, sqlite3.Binary(content)))
c.execute("update files set size=? where path=?", (len(content), path))
c.execute("select count(*) from data")
number = (c.fetchone())[0]
conn.commit()
c.close()
conn.close()
return number
if __name__ == '__main__':
if len(argv) != 2:
print 'usage: %s <mntpt>' % (argv[0])
exit(1)
fuse = FUSE(DbFS(), argv[1], foreground=True)
| 2.28125 | 2 |
data_clean_v2.py | Jacobvs/ML-Music-Analyzer | 6 | 12770112 | import ast
import json
import pickle
import ujson
import collections
import numpy as np
from chord_labels import parse_chord
from progressbar import ProgressBar, Bar, Percentage, AdaptiveETA, Counter
print("Opening files")
with open('dataset_chords.json', 'r') as values:
formatted_chords = ujson.load(values)
with open('dataset_chroma.pickle', 'rb') as chroma:
formatted_chroma = pickle.load(chroma)
print("Files Opened\n")
blank = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
blank12 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
values = collections.OrderedDict()
cleaned_chroma = []
cleaned_chords = []
final_chroma = {}
final_chords = {}
key_binary_pairs = {}
def slice_vals(chroma_vals, chord_vals, slice_size):
num_slices = int(len(chroma_vals)/slice_size)
sliced_chroma = []
sliced_chords = []
for i in range(num_slices):
sliced_chroma.append(chroma_vals[i*slice_size:(i+1)*100])
sliced_chords.append(chord_vals[i*100:(i+1)*100])
remaining_chroma = chroma_vals[num_slices*100:]
remaining_chords = chord_vals[num_slices*100:]
for i in range(100-len(remaining_chroma)):
remaining_chroma.append(blank)
remaining_chords.append(blank12)
if len(remaining_chroma) > 0:
sliced_chroma.append(remaining_chroma)
sliced_chords.append(remaining_chords)
del remaining_chords
del remaining_chroma
return sliced_chroma, sliced_chords
with open('file_ids.txt', 'r') as idList:
print("--CLEANING FILES: 890 TODO--\n")
progress_bar = ProgressBar(widgets=['PROCESSED: ', Counter(), '/890 ', Bar('>'), Percentage(), ' --- ', AdaptiveETA()], maxval=891)
progress_bar.start()
for i, id in enumerate(idList):
progress_bar.update(value=i)
id = int(id.strip('\n'))
chord_iter = iter(formatted_chords[str(id)].keys())
curr_chord = next(chord_iter)
curr_chord_tuple = ast.literal_eval(curr_chord)
in_chord = False
cleaned_chroma = []
cleaned_chords = []
chord_nums = 0
for i, time in enumerate(formatted_chroma[id].keys()):
if curr_chord_tuple[0] <= time <= curr_chord_tuple[1] and formatted_chords[str(id)][curr_chord] != 'X':
curr_chord_binary = parse_chord(formatted_chords[str(id)][curr_chord]).tones_binary
print(curr_chord_binary)
cleaned_chords.append(curr_chord_binary)
cleaned_chroma.append(formatted_chroma[id][time])
key_binary_pairs[tuple(curr_chord_binary)] = formatted_chords[str(id)][curr_chord]
chord_nums += 1
in_chord = True
elif in_chord:
try:
in_chord = False
cleaned_chords.append(blank12)
cleaned_chroma.append(formatted_chroma[id][time])
curr_chord = next(chord_iter)
curr_chord_tuple = ast.literal_eval(curr_chord)
except StopIteration:
pass
else:
cleaned_chords.append(blank12)
cleaned_chroma.append(formatted_chroma[id][time])
if time > curr_chord_tuple[1]:
try:
in_chord = False
cleaned_chords.append(blank12)
cleaned_chroma.append(formatted_chroma[id][time])
curr_chord = next(chord_iter)
curr_chord_tuple = ast.literal_eval(curr_chord)
except StopIteration:
pass
sliced = slice_vals(cleaned_chroma, cleaned_chords, 100)
final_chroma[int(id)] = sliced[0]
final_chords[int(id)] = sliced[1]
del sliced
key_binary_pairs[tuple(blank12)] = 'None'
print('\n')
print("<------------------------------------------------------->")
print("<------------------COUNTING KEYS------------------------>")
print("<------------------------------------------------------->")
hold_x = []
hold_y = []
print(len(final_chroma[12]))
with open("file_ids_subset.txt", 'r') as idFile:
for id in idFile:
id = int(id.strip('\n'))
for thing1 in final_chroma[id]:
hold_x.append(thing1)
for thing2 in final_chords[id]:
hold_y.append(thing2)
# samples x 100 x 24
print(hold_x[0][99][23])
cleaned_x = np.array(hold_x)
cleaned_y = np.array(hold_y)
# format in [file id][chroma (0) or chord (1)][slice num to look at (per 100)][index within slice]
print(cleaned_x.shape)
print(cleaned_y.shape)
print("NUM OBJECTS: " + str(len(final_chords)))
# with open("cleaned_chroma.pickle", 'wb') as file:
# dill.dump(cleaned_chroma, file, protocol=pickle.HIGHEST_PROTOCOL)
# del cleaned_chroma
#
# with open("cleaned_chords.pickle", 'wb') as file:
# dill.dump(cleaned_chords, file, protocol=pickle.HIGHEST_PROTOCOL)
# del cleaned_chords
print("saving chroma")
with open("cleaned_x.json", 'w') as file:
ujson.dump(hold_x, file)
print("saving chords")
with open("cleaned_y.json", 'w') as file:
ujson.dump(hold_y, file)
print("saving pairs")
with open("key_binary_pairs.json", 'w') as file:
ujson.dump(key_binary_pairs, file)
print("DONE SAVING")
| 2.515625 | 3 |
dolo/misc/display.py | zhuang13atJHU/dolo | 1 | 12770113 | <filename>dolo/misc/display.py
import sys
is_python_3 = sys.version_info >= (3, 0)
def read_file_or_url(url):
if 'http' in url:
if is_python_3:
import urllib.request
txt = urllib.request.urlopen(url).read()
txt = txt.decode('utf8') # not very robust
else:
import urllib2
txt = urllib2.urlopen(url).read()
else:
# must be a file
with open(url, encoding='utf8') as f:
txt = f.read()
return txt
def pcat(filename, target='ipython'):
code = read_file_or_url(filename)
HTML_TEMPLATE = """<style>
{}
</style>
{}
"""
from pygments.lexers import get_lexer_for_filename
lexer = get_lexer_for_filename(filename, stripall=True)
from pygments.formatters import HtmlFormatter, TerminalFormatter
from pygments import highlight
try:
assert(target=='ipython')
from IPython.display import HTML, display
from pygments.formatters import HtmlFormatter
formatter = HtmlFormatter(linenos=True, cssclass="source")
html_code = highlight(code, lexer, formatter)
css = formatter.get_style_defs()
html = HTML_TEMPLATE.format(css, html_code)
htmlres = HTML(html)
return htmlres
except Exception as e:
print(e)
pass
formatter = TerminalFormatter()
output = highlight(code,lexer,formatter)
print(output)
| 3.21875 | 3 |
get_together/views/speakers.py | bheesham/GetTogether | 1 | 12770114 | <filename>get_together/views/speakers.py
from django.utils.translation import ugettext_lazy as _
from django.utils.safestring import mark_safe
from django.contrib import messages
from django.contrib.auth import logout as logout_user
from django.shortcuts import render, redirect, get_object_or_404
from django.http import HttpResponse, JsonResponse
from django.core.exceptions import ObjectDoesNotExist
from events.models.profiles import UserProfile
from events.forms import (
SpeakerBioForm,
DeleteSpeakerForm,
UserTalkForm,
DeleteTalkForm,
SchedulePresentationForm,
)
from events.models.events import Event
from events.models.speakers import Speaker, Talk, Presentation, SpeakerRequest
import datetime
import simplejson
from .teams import *
from .events import *
@login_required
def list_user_talks(request):
profile = request.user.profile
speaker_bios = Speaker.objects.filter(user=profile)
talks = list(Talk.objects.filter(speaker__user=profile))
context = {
'speaker_bios': speaker_bios,
'talks': talks,
}
return render(request, 'get_together/speakers/list_user_talks.html', context)
def show_speaker(request, speaker_id):
speaker = get_object_or_404(Speaker, id=speaker_id)
context = {
'speaker': speaker,
'talks': Talk.objects.filter(speaker=speaker),
'presentations': Presentation.objects.filter(talk__speaker=speaker, status=Presentation.ACCEPTED),
}
return render(request, 'get_together/speakers/show_speaker.html', context)
def add_speaker(request):
new_speaker = Speaker(user=request.user.profile)
if request.method == 'GET':
speaker_form = SpeakerBioForm(instance=new_speaker)
context = {
'speaker': new_speaker,
'speaker_form': speaker_form,
}
return render(request, 'get_together/speakers/create_speaker.html', context)
elif request.method == 'POST':
speaker_form = SpeakerBioForm(request.POST, request.FILES, instance=new_speaker)
if speaker_form.is_valid():
new_speaker = speaker_form.save()
return redirect('user-talks')
else:
context = {
'speaker': new_speaker,
'speaker_form': speaker_form,
}
return render(request, 'get_together/speakers/create_speaker.html', context)
return redirect('home')
def edit_speaker(request, speaker_id):
speaker = get_object_or_404(Speaker, id=speaker_id)
if request.method == 'GET':
speaker_form = SpeakerBioForm(instance=speaker)
context = {
'speaker': speaker,
'speaker_form': speaker_form,
}
return render(request, 'get_together/speakers/edit_speaker.html', context)
elif request.method == 'POST':
speaker_form = SpeakerBioForm(request.POST, request.FILES, instance=speaker)
if speaker_form.is_valid():
speaker = speaker_form.save()
return redirect('user-talks')
else:
context = {
'speaker': speaker,
'speaker_form': speaker_form,
}
return render(request, 'get_together/speakers/edit_speaker.html', context)
return redirect('home')
def delete_speaker(request, speaker_id):
speaker = get_object_or_404(Speaker, id=speaker_id)
if not speaker.user == request.user.profile:
messages.add_message(request, messages.WARNING, message=_('You can not make changes to this speaker bio.'))
return redirect('show-speaker', speaker_id)
if request.method == 'GET':
form = DeleteSpeakerForm()
context = {
'speaker': speaker,
'delete_form': form,
}
return render(request, 'get_together/speakers/delete_speaker.html', context)
elif request.method == 'POST':
form = DeleteSpeakerForm(request.POST)
if form.is_valid() and form.cleaned_data['confirm']:
speaker.delete()
return redirect('user-talks')
else:
context = {
'speaker': speaker,
'delete_form': form,
}
return render(request, 'get_together/speakers/delete_speaker.html', context)
else:
return redirect('home')
def show_talk(request, talk_id):
talk = get_object_or_404(Talk, id=talk_id)
presentations = Presentation.objects.filter(talk=talk, status=Presentation.ACCEPTED).order_by('-event__start_time')
context = {
'talk': talk,
'presentations': presentations,
}
return render(request, 'get_together/speakers/show_talk.html', context)
def add_talk(request):
new_talk = Talk()
if request.method == 'GET':
talk_form = UserTalkForm(instance=new_talk)
talk_form.fields['speaker'].queryset = request.user.profile.speaker_set
context = {
'talk': new_talk,
'talk_form': talk_form,
}
if 'event' in request.GET and request.GET['event']:
context['event'] = get_object_or_404(Event, id=request.GET['event'])
return render(request, 'get_together/speakers/create_talk.html', context)
elif request.method == 'POST':
talk_form = UserTalkForm(request.POST, instance=new_talk)
talk_form.fields['speaker'].queryset = request.user.profile.speaker_set
if talk_form.is_valid():
new_talk = talk_form.save()
if 'event' in request.POST and request.POST['event']:
event = Event.objects.get(id=request.POST['event'])
Presentation.objects.create(
event=event,
talk=new_talk,
status=Presentation.PROPOSED,
created_by=request.user.profile
)
return redirect(event.get_absolute_url())
return redirect('show-talk', new_talk.id)
else:
context = {
'talk': new_talk,
'talk_form': talk_form,
}
return render(request, 'get_together/speakers/create_talk.html', context)
return redirect('home')
def edit_talk(request, talk_id):
talk = get_object_or_404(Talk, id=talk_id)
if not talk.speaker.user == request.user.profile:
messages.add_message(request, messages.WARNING, message=_('You can not make changes to this talk.'))
return redirect('show-talk', talk_id)
if request.method == 'GET':
talk_form = UserTalkForm(instance=talk)
talk_form.fields['speaker'].queryset = request.user.profile.speaker_set
context = {
'talk': talk,
'talk_form': talk_form,
}
return render(request, 'get_together/speakers/edit_talk.html', context)
elif request.method == 'POST':
talk_form = UserTalkForm(request.POST, instance=talk)
talk_form.fields['speaker'].queryset = request.user.profile.speaker_set
if talk_form.is_valid():
talk = talk_form.save()
return redirect('show-talk', talk.id)
else:
context = {
'talk': talk,
'talk_form': talk_form,
}
return render(request, 'get_together/speakers/edit_talk.html', context)
return redirect('home')
def delete_talk(request, talk_id):
talk = get_object_or_404(Talk, id=talk_id)
if not talk.speaker.user == request.user.profile:
messages.add_message(request, messages.WARNING, message=_('You can not make changes to this talk.'))
return redirect('show-talk', talk_id)
if request.method == 'GET':
form = DeleteTalkForm()
context = {
'talk': talk,
'delete_form': form,
}
return render(request, 'get_together/speakers/delete_talk.html', context)
elif request.method == 'POST':
form = DeleteTalkForm(request.POST)
if form.is_valid() and form.cleaned_data['confirm']:
talk.delete()
return redirect('user-talks')
else:
context = {
'talk': talk,
'delete_form': form,
}
return render(request, 'get_together/speakers/delete_talk.html', context)
else:
return redirect('home')
@login_required
def propose_event_talk(request, event_id):
event = get_object_or_404(Event, id=event_id)
if not event.team.is_premium:
messages.add_message(request, messages.ERROR, message=_("You can not manage talks for this event."))
return redirect(event.get_absolute_url())
if request.method == 'GET':
profile = request.user.profile
talks = list(Talk.objects.filter(speaker__user=profile))
presentations = event.presentations.all().order_by('-status')
for presentation in presentations:
if presentation.talk in talks:
talks.remove(presentation.talk)
context = {
'event': event,
'available_talks': talks,
'proposed_talks': presentations,
}
return render(request, 'get_together/speakers/list_user_presentations.html', context)
elif request.method == 'POST':
talk = get_object_or_404(Talk, id=request.POST.get('talk_id'))
new_proposal = Presentation.objects.create(
event=event,
talk=talk,
status=Presentation.PROPOSED,
start_time=event.local_start_time,
created_by=request.user.profile,
)
messages.add_message(request, messages.SUCCESS, message=_('Your talk has been submitted to the event organizer.'))
return redirect(event.get_absolute_url())
else:
redirect('home')
def schedule_event_talks(request, event_id):
event = get_object_or_404(Event, id=event_id)
if not event.team.is_premium:
messages.add_message(request, messages.ERROR, message=mark_safe(_('Upgrade this team to a <a href="/about/premium">Premium</a> account to use this feature.')))
return redirect(event.get_absolute_url())
if request.method == 'POST':
presentation = get_object_or_404(Presentation, id=request.POST.get('presentation_id'))
if request.POST.get('action') == 'accept':
presentation.status = Presentation.ACCEPTED
elif request.POST.get('action') == 'decline':
presentation.status = Presentation.DECLINED
elif request.POST.get('action') == 'propose':
presentation.status = Presentation.PROPOSED
presentation.save()
context = {
'event': event,
'talks_count': event.presentations.count(),
'accepted_talks': event.presentations.filter(status=Presentation.ACCEPTED).order_by('start_time'),
'pending_talks': event.presentations.filter(status=Presentation.PROPOSED).order_by('start_time'),
'declined_talks': event.presentations.filter(status=Presentation.DECLINED).order_by('start_time'),
}
return render(request, 'get_together/events/schedule_event_talks.html', context)
| 2.09375 | 2 |
examples/task.py | Nostalgiaaa/dispatcher | 3 | 12770115 | <filename>examples/task.py
import logging
from dispatcher import init_dispatcher
from celery import Celery
app = Celery('tasks', broker='redis://localhost')
app.conf.update(broker_transport_options={'visibility_timeout': 5})
logger = logging.getLogger('test')
init_dispatcher('.', '.', logger)
| 1.960938 | 2 |
tableprint/printer.py | veya2ztn/mltool | 0 | 12770116 | # -*- coding: utf-8 -*-
"""
Table printing
A module to print and display formatted tables of data
Usage
-----
>>> data = np.random.randn(10, 3)
>>> headers = ['Column A', 'Column B', 'Column C']
>>> tableprint.table(data, headers)
"""
from __future__ import print_function, unicode_literals
import sys
from numbers import Number
from six import string_types
from .style import LineStyle, STYLES
from .utils import ansi_len, format_line, parse_width
def isnotebook():
try:
from google import colab
return True
except: pass
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell':
return True # Jupyter notebook, Spyder or qtconsole
elif shell == 'TerminalInteractiveShell':
return False # Terminal running IPython
else:
return False # Other type (?)
except NameError:
return False # Probably standard Python interpreter
IN_NOTEBOOK = isnotebook()
__all__ = ('table', 'header', 'row', 'hrule', 'top', 'bottom', 'banner', 'dataframe', 'TableContext')
# Defaults
STYLE = 'round' if not IN_NOTEBOOK else 'grid'
WIDTH = 14
FMT = '5g'
ALIGN = 'right'
ALIGNMENTS = {"left": "<", "right": ">", "center": "^"}
class TableContext:
def __init__(self, headers, width=WIDTH, align=ALIGN, style=STYLE, add_hr=True, out=sys.stdout):
"""Context manager for table printing
Parameters
----------
headers : array_like
A list of N strings consisting of the header of each of the N columns
width : int or array_like, optional
The width of each column in the table (Default: 11)
align : string
The alignment to use ('left', 'center', or 'right'). (Default: 'right')
style : string or tuple, optional
A formatting style. (Default: 'round')
add_hr : boolean, optional
Whether or not to add a horizontal rule (hr) after the headers
Usage
-----
>>> with TableContext("ABC") as t:
for k in range(10):
t.row(np.random.randn(3))
"""
self.out = out
self.config = {'width': width, 'style': style, 'align': align}
self.headers = header(headers, add_hr=add_hr, **self.config)
self.bottom = bottom(len(headers), width=width, style=style)
def __call__(self, data):
self.out.write(row(data, **self.config) + '\n')
self.out.flush()
def __enter__(self):
self.out.write(self.headers + '\n')
self.out.flush()
return self
def __exit__(self, *exc):
self.out.write(self.bottom + '\n')
self.out.flush()
def table(data, headers=None, format_spec=FMT, width=WIDTH, align=ALIGN, style=STYLE, out=sys.stdout):
"""Print a table with the given data
Parameters
----------
data : array_like
An (m x n) array containing the data to print (m rows of n columns)
headers : list, optional
A list of n strings consisting of the header of each of the n columns (Default: None)
format_spec : string, optional
Format specification for formatting numbers (Default: '5g')
width : int or array_like, optional
The width of each column in the table (Default: 11)
align : string
The alignment to use ('left', 'center', or 'right'). (Default: 'right')
style : string or tuple, optional
A formatting style. (Default: 'fancy_grid')
out : writer, optional
A file handle or object that has write() and flush() methods (Default: sys.stdout)
"""
# Number of columns in the table.
ncols = len(data[0]) if headers is None else len(headers)
tablestyle = STYLES[style]
width_max_data = max([len(str(d)) for l in data for d in l])
width_max_head = max([len(str(d)) for d in headers]) if headers is not None else 0
width = max(width,width_max_data,width_max_head)
widths = parse_width(width, ncols)
# Initialize with a hr or the header
tablestr = [hrule(ncols, widths, tablestyle.top)] \
if headers is None else [header(headers, width=widths, align=align, style=style)]
# parse each row
tablestr += [row(d, widths, format_spec, align, style) for d in data]
# only add the final border if there was data in the table
if len(data) > 0:
tablestr += [hrule(ncols, widths, tablestyle.bottom)]
# print the table
out.write('\n'.join(tablestr) + '\n')
out.flush()
def tablist(data, headers=None, title=None,format_spec=FMT, width=WIDTH, align=ALIGN, style=STYLE, out=sys.stdout):
# Number of columns in the table.
ncols = len(data[0]) if headers is None else len(headers)
tablestyle = STYLES[style]
widths = parse_width(width, ncols)
# Initialize with a hr or the header
tablestr = [hrule(ncols, widths, tablestyle.top)] if headers is None else headerlist(headers, width=widths, align=align, style=style)
# parse each row
tablestr += [row(d, widths, format_spec, align, style) for d in data]
import numpy as np
if title is not None:
space_len = len(tablestr[0])-4
left_space = [""]*int(np.ceil((space_len-len(title))/2))
right_space= [""]*int(np.floor((space_len-len(title))/2))
title = " ".join(left_space)+title+" ".join(right_space)
title_str = titler([title], width=len(tablestr[0])-4, style=style)
if headers is not None:
a = list(tablestr[0])
b = list(tablestr[1])
a[0]=b[0]
a[-1]=b[-1]
tablestr[0]="".join(a)
# tablestr[0][0]=tablestr[1][0]
# tablestr[0][-1]=tablestr[1][-1]
tablestr = [title_str]+tablestr
# only add the final border if there was data in the table
if len(data) > 0:
tablestr += [hrule(ncols, widths, tablestyle.bottom)]
# print the table
return tablestr
def headerlist(headers, width=WIDTH, align=ALIGN, style=STYLE, add_hr=True):
"""Returns a formatted row of column header strings
Parameters
----------
headers : list of strings
A list of n strings, the column headers
width : int
The width of each column (Default: 11)
style : string or tuple, optional
A formatting style (see STYLES)
Returns
-------
headerstr : string
A string consisting of the full header row to print
"""
tablestyle = STYLES[style]
widths = parse_width(width, len(headers))
alignment = ALIGNMENTS[align]
# string formatter
data = map(lambda x: ('{:%s%d}' % (alignment, x[0] + ansi_len(x[1]))).format(x[1]), zip(widths, headers))
# build the formatted str
headerstr = format_line(data, tablestyle.row)
if add_hr:
upper = hrule(len(headers), widths, tablestyle.top)
lower = hrule(len(headers), widths, tablestyle.below_header)
headerlr = [upper, headerstr, lower]
return headerlr
def header(headers, width=WIDTH, align=ALIGN, style=STYLE, add_hr=True):
"""Returns a formatted row of column header strings
Parameters
----------
headers : list of strings
A list of n strings, the column headers
width : int
The width of each column (Default: 11)
style : string or tuple, optional
A formatting style (see STYLES)
Returns
-------
headerstr : string
A string consisting of the full header row to print
"""
tablestyle = STYLES[style]
widths = parse_width(width, len(headers))
alignment = ALIGNMENTS[align]
# string formatter
data = map(lambda x: ('{:%s%d}' % (alignment, x[0] + ansi_len(x[1]))).format(x[1]), zip(widths, headers))
# build the formatted str
headerstr = format_line(data, tablestyle.row)
if add_hr:
upper = hrule(len(headers), widths, tablestyle.top)
lower = hrule(len(headers), widths, tablestyle.below_header)
headerstr = '\n'.join([upper, headerstr, lower])
return headerstr
def titler(headers, width=WIDTH, align=ALIGN, style=STYLE, add_hr=True):
"""Returns a formatted row of column header strings
Parameters
----------
headers : list of strings
A list of n strings, the column headers
width : int
The width of each column (Default: 11)
style : string or tuple, optional
A formatting style (see STYLES)
Returns
-------
headerstr : string
A string consisting of the full header row to print
"""
tablestyle = STYLES[style]
widths = parse_width(width, len(headers))
alignment = ALIGNMENTS[align]
# string formatter
data = map(lambda x: ('{:%s%d}' % (alignment, x[0] + ansi_len(x[1]))).format(x[1]), zip(widths, headers))
# build the formatted str
headerstr = format_line(data, tablestyle.row)
if add_hr:
upper = hrule(len(headers), widths, tablestyle.top)
#lower = hrule(len(headers), widths, tablestyle.below_header)
headerstr = '\n'.join([upper, headerstr])
return headerstr
def row(values, width=WIDTH, format_spec=FMT, align=ALIGN, style=STYLE):
"""Returns a formatted row of data
Parameters
----------
values : array_like
An iterable array of data (numbers or strings), each value is printed in a separate column
width : int
The width of each column (Default: 11)
format_spec : string
The precision format string used to format numbers in the values array (Default: '5g')
align : string
The alignment to use ('left', 'center', or 'right'). (Default: 'right')
style : namedtuple, optional
A line formatting style
Returns
-------
rowstr : string
A string consisting of the full row of data to print
"""
tablestyle = STYLES[style]
widths = parse_width(width, len(values))
assert isinstance(format_spec, string_types) | isinstance(format_spec, list), \
"format_spec must be a string or list of strings"
if isinstance(format_spec, string_types):
format_spec = [format_spec] * len(list(values))
# mapping function for string formatting
def mapdata(val):
# unpack
width, datum, prec = val
if isinstance(datum, string_types):
return ('{:%s%i}' % (ALIGNMENTS[align], width + ansi_len(datum))).format(datum)
elif isinstance(datum, Number):
return ('{:%s%i.%s}' % (ALIGNMENTS[align], width, prec)).format(datum)
else:
raise ValueError('Elements in the values array must be strings, ints, or floats')
# string formatter
data = map(mapdata, zip(widths, values, format_spec))
# build the row string
return format_line(data, tablestyle.row)
def hrule(n=1, width=WIDTH, linestyle=LineStyle('', '─', '─', '')):
"""Returns a formatted string used as a border between table rows
Parameters
----------
n : int
The number of columns in the table
width : int
The width of each column (Default: 11)
linestyle : tuple
A LineStyle namedtuple containing the characters for (begin, hr, sep, end).
(Default: ('', '─', '─', ''))
Returns
-------
rowstr : string
A string consisting of the row border to print
"""
widths = parse_width(width, n)
hrstr = linestyle.sep.join([('{:%s^%i}' % (linestyle.hline, width)).format('')
for width in widths])
return linestyle.begin + hrstr + linestyle.end
def top(n, width=WIDTH, style=STYLE):
"""Prints the top row of a table"""
return hrule(n, width, linestyle=STYLES[style].top)
def bottom(n, width=WIDTH, style=STYLE):
"""Prints the top row of a table"""
return hrule(n, width, linestyle=STYLES[style].bottom)
def banner(message, width=30, style='banner', out=sys.stdout):
"""Prints a banner message
Parameters
----------
message : string
The message to print in the banner
width : int
The minimum width of the banner (Default: 30)
style : string
A line formatting style (Default: 'banner')
out : writer
An object that has write() and flush() methods (Default: sys.stdout)
"""
out.write(header([message], width=max(width, len(message)), style=style) + '\n')
out.flush()
def dataframe(df, **kwargs):
"""Print table with data from the given pandas DataFrame
Parameters
----------
df : DataFrame
A pandas DataFrame with the table to print
"""
table(df.values, list(df.columns), **kwargs)
class summary_table_info:
def __init__(self,headers,title,rows=1):
self.headers = headers
self.title = title
col_width = len(title)//len(headers)
self.width = [max(len(t)+2,10,col_width) for t in headers]
self.width[0]= max(self.width[1:])+2
self.rows = rows
def demo(self):
headers = self.headers
title = self.title
data = [[0 for i in headers] for i in range(self.rows)]
widths = self.width
content = tablist(data,headers,title=title,width=widths)
content = "\n".join(content)
return content
def show(self,data,title=None):
title = self.title if title is None else title
headers = self.headers
widths = self.width
content = tablist(data,headers,title=title,width=widths)
return content
| 3.265625 | 3 |
main.py | huangjien/psInstance | 0 | 12770117 | <filename>main.py
import os
from fastapi import FastAPI, Body, HTTPException, status
from fastapi.responses import JSONResponse
from fastapi.encoders import jsonable_encoder
from pydantic import BaseModel, Field, EmailStr
from bson import ObjectId
from typing import Optional, List
import motor.motor_asyncio
from cachetools import cached, LRUCache, TTLCache
MONGO_DETAILS = "mongodb://localhost:27017"
app = FastAPI()
client = motor.motor_asyncio.AsyncIOMotorClient(MONGO_DETAILS)
db = client.test
class PyObjectId(ObjectId):
@classmethod
def __get_validators__(cls):
yield cls.validate
@classmethod
def validate(cls, v):
if not ObjectId.is_valid(v):
raise ValueError("Invalid objectid")
return ObjectId(v)
@classmethod
def __modify_schema__(cls, field_schema):
field_schema.update(type="string")
class SettingModel(BaseModel):
name: str = Field(..., index=True)
value: str = Field(...)
category: str = Field(..., index=True)
description: str = Field(...)
class Config:
allow_population_by_field_name = True
arbitrary_types_allowed = True
json_encoders = {ObjectId: str}
@app.post("/setting/", response_model=SettingModel)
async def create_setting(setting: SettingModel = Body(..., embed=True)):
setting = jsonable_encoder(setting)
new_setting = await db.settings.insert_one(setting)
created_setting = await db.settings.find_one({"name": new_setting.inserted_id})
return JSONResponse(status_code=status.HTTP_201_CREATED, content=created_setting)
@cached(cache=TTLCache(maxsize=1, ttl=600))
@app.get("/setting/", response_model=List[SettingModel])
async def get_settings():
settings = await db.settings.find().to_list(None)
return settings
@cached(cache=TTLCache(maxsize=64, ttl=600))
@app.get("/setting/{setting_id}", response_model=SettingModel)
async def get_setting(setting_id: str):
if (setting := await db.settings.find_one({"name": setting_id})) is not None:
return setting
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Setting {setting_id} not found")
@app.get("/")
async def root():
return {"status": "SUCCESS"}
| 2.28125 | 2 |
worley_noise.py | yohei-washizaki/perlin | 1 | 12770118 | #!/usr/bin/env python3
import math
def calc_sqr_distance(a, b):
vx = a[0] - b[0]
vy = a[1] - b[1]
return vx * vx + vy * vy
def find_nearest_distance(uv, max_size, random_points):
xf, xi = math.modf(uv[0])
yf, yi = math.modf(uv[1])
min_sqr_distance = float("inf")
for y_offset in [-1, 0, 1]:
for x_offset in [-1, 0, 1]:
x = int((xi + x_offset + max_size) % max_size)
y = int((yi + y_offset + max_size) % max_size)
idx = int(x * max_size + y)
p = random_points[idx]
other = (x_offset + xi + p[0],
y_offset + yi + p[1])
sqr_distance = calc_sqr_distance(uv, other)
if sqr_distance < min_sqr_distance:
min_sqr_distance = sqr_distance
return math.sqrt(min_sqr_distance)
if __name__ == "__main__":
import argparse
import random
from random_point import gen_random_points
from uv import gen_uv
parser = argparse.ArgumentParser()
parser.add_argument("size", type=int, help="size of a texture. power of 2")
parser.add_argument("-r", "--random_seed", type=int, help="random seed")
parser.add_argument("-s", "--scale_factor", type=float, default=1.0, help="scale factor")
args = parser.parse_args()
scale_factor = args.scale_factor
random.seed(args.random_seed)
random_points = gen_random_points(int(scale_factor*scale_factor), 1.0, random)
uvs = gen_uv(args.size, args.size, scale_factor)
for uv in uvs:
nearest_distance = find_nearest_distance(uv, scale_factor, random_points)
print(nearest_distance)
| 3.0625 | 3 |
pythonUtils/TUI_tools/automatic_questioner.py | tgquintela/pythonUtils | 1 | 12770119 |
"""
automatic_questioner
--------------------
Module which serves as a interactor between the possible database with the
described structure and which contains information about functions and
variables of other packages.
Scheme of the db
----------------
# {'function_name':
# {'variables':
# {'variable_name':
# {'question_info':
# {'qtype': ['simple_input', 'confirmation_question',
# 'selection_options', 'selection_list_options'],
# 'question_spec': 'question_spec'},
# 'default': default}},
########
# 'descendants': [{'agg_description':
# {variable_name:
# {'variable_value': 'function_name'}
# },
# 'agg_name': 'aggregated_parameter_name'}]
# }}
######## OR
# 'descendants': [{'agg_description': 'function_name'
# 'agg_name': 'aggregated_parameter_name'}]
# }}
#TODO: checker 1 function with list of functions and dicts of dicts
"""
from tui_questioner import general_questioner
def check_quest_info(db):
"""Function which carry out the automatic checking of the database of
function and variables.
Parameters
----------
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
Returns
-------
check: boolean
returns the correctness of the database.
path: list
path of the possible error.
message: str
message of the error if it exists.
"""
## 0. Initial preset variables needed
# Function to compare lists
def equality_elements_list(a, b):
a = a.keys() if type(a) == dict else a
b = b.keys() if type(b) == dict else b
c = a[-1::-1]
return a == b or c == b
# List of elements available in some dicts at some levels
first_level = ['descendants', 'variables']
desc_2_level = ['agg_description', 'agg_name']
vars_2_level = ['question_info', 'default']
vars_3_level = ['qtype', 'question_spec']
# Messages of errors
m0 = "The given database of functions is not a dictionary."
m1 = "The function '%s' does not have "+str(first_level)+" as keys."
m2 = "The variables of function '%s' is not a dict."
m3 = "Incorrect keys "+str(vars_2_level)+" in function %s and variable %s."
m4 = "Incorrect question_info format for function %s and variable %s."
m5 = "Not a string the 'qtype' of function %s and variable %s."
m6 = "Incorrect 'question_spec' format for function %s and variable %s."
m7 = "Descendants of the function %s is not a list."
m8 = "Elements of the list of descendants not a dict for function %s."
m9 = "Incorrect structure of a dict in descendants for function %s."
m10 = "Incorrect type of agg_description for function %s and variable %s."
m11 = "Incorrect type of agg_description for function %s."
## Check db is a dict
if type(db) != dict:
return False, [], m0
## Loop for check each function in db
for funct in db.keys():
## Check main keys:
first_bl = equality_elements_list(db[funct], first_level)
if not first_bl:
return False, [funct], m1 % funct
## Check variables
if not type(db[funct]['variables']) == dict:
check = False
path = [funct, 'variables']
message = m2 % funct
return check, path, message
for var in db[funct]['variables']:
varsbles = db[funct]['variables']
v2_bl = equality_elements_list(varsbles[var], vars_2_level)
v3_bl = equality_elements_list(varsbles[var]['question_info'],
vars_3_level)
qtype_bl = db[funct]['variables'][var]['question_info']['qtype']
qtype_bl = type(qtype_bl) != str
qspec_bl = db[funct]['variables'][var]['question_info']
qspec_bl = type(qspec_bl['question_spec']) != dict
if not v2_bl:
check = False
path = [funct, 'variables', var]
message = m3 % (funct, var)
return check, path, message
### Check question_info
if not v3_bl:
check = False
path = [funct, 'variables', 'question_info']
message = m4 % (funct, var)
return check, path, message
if qtype_bl:
check = False
path = [funct, 'variables', 'question_info', 'qtype']
message = m5 % (funct, var)
return check, path, message
if qspec_bl:
check = False
path = [funct, 'variables', 'question_info', 'question_spec']
message = m6 % (funct, var)
return check, path, message
## Check descendants
if not type(db[funct]['descendants']) == list:
check = False
path = [funct, 'descendants']
message = m7 % funct
return check, path, message
for var_desc in db[funct]['descendants']:
if not type(var_desc) == dict:
check = False
path = [funct, 'descendants']
message = m8 % funct
return check, path, message
d2_bl = equality_elements_list(var_desc.keys(), desc_2_level)
if not d2_bl:
check = False
path = [funct, 'descendants']
message = m9 % funct
return check, path, message
if type(var_desc['agg_description']) == str:
pass
elif type(var_desc['agg_description']) == dict:
for varname in var_desc['agg_description']:
if not type(var_desc['agg_description'][varname]) == dict:
check = False
path = [funct, 'descendants', 'agg_description']
message = m10 % (funct, varname)
return check, path, message
else:
check = False
path = [funct, 'descendants', 'agg_description']
message = m11 % funct
return check, path, message
return True, [], ''
def automatic_questioner(function_name, db, choosen={}):
"""Function which carry out the automatic questioning task.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
"""
## Initialize variables needed
m1 = "Not value for a variables in order to create aggregate variables."
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Put the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
question = data_f['variables'][var]['question_info']
choosen_values[var] = general_questioner(**question)
# Put aggregated variables (descendants)
for var_desc in data_f['descendants']:
# Possible variables and aggregated parameter name
agg_description = var_desc['agg_description']
agg_param = var_desc['agg_name']
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
## Without dependant variable
if type(agg_description) == str:
# Obtain function name
fn = choosen_values[agg_param]
# Recurrent call
aux = automatic_questioner(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## With dependant variable
elif type(agg_description) == dict:
for var in var_desc['agg_description']:
if not var in choosen_values:
raise Exception(m1)
## Give a list and return a dict in the aggparam variable
elif type(choosen_values[var]) == str:
# Obtain function name
fn = var_desc['agg_description'][var][choosen_values[var]]
# Recurrent call
aux = automatic_questioner(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## Give a list and return a list in the aggparam variable
elif type(choosen_values[var]) == list:
choosen_values[agg_param] = []
aggvarval = [] if type(aggvarval) != list else aggvarval
for i in range(len(choosen_values[var])):
val = choosen_values[var][i]
fn = var_desc['agg_description'][var][val]
aux = automatic_questioner(fn, db, aggvarval[i])
choosen_values.append(aux)
return choosen_values
def get_default(function_name, db, choosen={}):
"""Function which returns a dictionary of choosen values by default.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
-----
TODO: Possibility of being integrated with authomatic_questioner after
testing.
"""
## Initialize variables needed
m1 = "Not value for a variables in order to create aggregate variables."
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Get the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
default = data_f['variables'][var]['default']
choosen_values[var] = default
# Get aggregated variables (descendants)
for var_desc in data_f['descendants']:
# Possible variables and aggregated parameter name
agg_description = var_desc['agg_description']
agg_param = var_desc['agg_name']
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
## Without dependant variable
if type(agg_description) == str:
# Obtain function name
fn = choosen_values[agg_param]
# Recurrent call
aux = get_default(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## With dependant variable
elif type(agg_description) == dict:
for var in var_desc['agg_description']:
if not var in choosen_values:
raise Exception(m1)
## Give a list and return a dict in the aggparam variable
elif type(choosen_values[var]) == str:
# Obtain function name
fn = var_desc['agg_description'][var][choosen_values[var]]
# Recurrent call
aux = get_default(fn, db, aggvarval)
# Aggregate to our values
choosen_values[agg_param] = aux
## Give a list and return a list in the aggparam variable
elif type(choosen_values[var]) == list:
choosen_values[agg_param] = []
aggvarval = [] if type(aggvarval) != list else aggvarval
for i in range(len(choosen_values[var])):
val = choosen_values[var][i]
fn = var_desc['agg_description'][var][val]
aux = get_default(fn, db, aggvarval[i])
choosen_values.append(aux)
return choosen_values
###############################################################################
###############################################################################
###############################################################################
def get_default3(function_name, db, choosen={}):
"""Function which returns a dictionary of choosen values by default.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
-----
TODO: Possibility of being integrated with authomatic_questioner after
testing.
"""
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Get the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
default = data_f['variables'][var]['default']
choosen_values[var] = default
# Get the aggregated variables (descendants)
for i in range(len(data_f['descendants'])):
# Possible variables and aggregated parameter name
vars_values = data_f['descendants'][i]['variable_values']
agg_param = data_f['descendants'][i]['parameters']
variables = vars_values.keys()
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
for var in variables:
# boolean variables
value = choosen_values[var]
iflist = type(value) == list
ifvars = var in choosen_values.keys()
# if we have to return a list
if ifvars and iflist:
# Initialization values
n = len(value)
aggvarval = aggvarval if ifaggvar else [{} for i in range(n)]
results = []
i = 0
for val in value:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
aux = get_default(f_name, db, aggvarval[i])
# Insert in the correspondent list
results.append(aux)
i += 1
# if we have to return a dict
elif ifvars and not iflist:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
choosen_values[agg_param] = get_default(f_name, db, aggvarval)
return choosen_values
def automatic_questioner3(function_name, db, choosen={}):
"""Function which carry out the automatic questioning task.
Parameters
----------
function_name: str
the function for which we are interested in their params in order to
call it.
db: dict
the dictionary of all the information about the system with all its
functions and dependencies between them in order to ask for their
variables authomatically.
choosen: dict
previous choosen parameters. The function will avoid to ask for the
pre-set parameters.
Returns
-------
choosen_values: dict
the selected values which are disposed to input in the function we want
to call.
"""
## Initialize variables needed
m1 = "Not value for a variables in order to create aggregate variables."
choosen_values = choosen
if function_name in db.keys():
data_f = db[function_name]
else:
# Better raise error?
return choosen_values
# Put the variables
for var in data_f['variables'].keys():
# Put the variables if there are still not selected
if var not in choosen_values.keys():
question = data_f['variables'][var]['question_info']
choosen_values[var] = general_questioner(**question)
# Put aggregated variables (descendants)
for i in range(len(data_f['descendants'])):
# Possible variables and aggregated parameter name
vars_values = data_f['descendants'][i]['variable_values']
agg_param = data_f['descendants'][i]['parameters']
variables = vars_values.keys()
# prepare possible input for existant aggregated value in choosen
ifaggvar = agg_param in choosen_values
aggvarval = choosen_values[agg_param] if ifaggvar else {}
for var in variables:
# boolean variables
value = choosen_values[var]
iflist = type(value) == list
ifvars = var in choosen_values.keys()
# if we have to return a list
if ifvars and iflist:
# Initialization values
n = len(value)
aggvarval = aggvarval if ifaggvar else [{} for i in range(n)]
results = []
i = 0
for val in value:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
aux = authomatic_questioner(f_name, db, aggvarval[i])
# Insert in the correspondent list
results.append(aux)
i += 1
# if we have to return a dict
elif ifvars and not iflist:
# Obtain function_name
f_name = vars_values[var][value]
# Recurrent call
choosen_values[agg_param] = authomatic_questioner(f_name, db,
aggvarval)
return choosen_values
| 2.890625 | 3 |
opencenter/backends/nova/psbk_manager.py | oolorg/opencenter | 0 | 12770120 | <reponame>oolorg/opencenter<gh_stars>0
import ool_rm_if
import pica8_if
import pfs_if
import riava_if
import centec_if
import datetime
import pslist_backup
import commands
import os
# Physical Switch Backup/Restore Manager
TST_FLAG='OFF'
DBG_FLAG='ON'
TRACE_TITLE='PSBK_MANAGER:%s'
EXCEPT_TITLE='PSBK_MANAGER:%s'
AUTH_KEY='xxx'
BACKDIR="/backup/"
PY_PATH=os.path.abspath(os.path.dirname(__file__))
TST_FILE='psbk_manager_tst.cnf'
MODEL_KEY='A_MODEL='
MAC_KEY='A_MAC='
IP_KEY='A_IP='
UID_KEY='A_UID='
UPW_KEY='A_UPW='
PICA8_KEY1="P-3290"
PICA8_KEY2="P-3295"
RIAVA_KEY1="<KEY>"
PFS_KEY1="PF5240"
CENTEC_KEY1="V350"
#-------------------------------------------------------
class psbk_manager:
def __init__(self, Local_User, BK_Host, BK_Dir, ObjLog):
self.auth =""
self.a_device_name={}
self.a_model={}
self.a_mac={}
self.a_ip={}
self.a_uid={}
self.a_upw={}
self.a_host=[]
self.ps_list=''
self.ori=ool_rm_if.ool_rm_if()
self.local_user= Local_User
self.bk_host= BK_Host
self.bk_dir = BK_Dir
self.logger = ObjLog
self.ori.set_auth(AUTH_KEY)
def __get_BK_info__(self):
node_data=self.ori.get_node(self.bk_host)
nic_data=self.ori.get_nic_traffic_info(self.bk_host, "M-Plane")
if -1 != node_data[0]:
node_data1={}
node_data1=node_data[1]
self.bk_uid = node_data1['user_name']
self.bk_upw = node_data1['password']
nic_data1={}
nic_data1=nic_data[1][0]
self.bk_uip = nic_data1['ip_address']
else:
self.__except_log__(node_data[1])
return -1
return 0
def __trace_log__(self, log):
if 'ON' == DBG_FLAG:
self.logger.debug(TRACE_TITLE %(log))
return 0
def __except_log__(self, log):
self.logger.debug(EXCEPT_TITLE %(log))
return 0
def __get_SW_info__(self):
self.__trace_log__('get_PS_info IN')
self.a_host = self.ps_list.split(",")
# Get Physical switch infomation from resource manager
for i in range(0, len(self.a_host)):
self.ori.set_auth(self.auth)
data_sw=self.ori.get_switch(self.a_host[i])
if -1 != data_sw[0]:
pfs_info={}
pfs_info=data_sw[1]
self.a_model[i]=pfs_info["product_name"]
self.a_mac[i]=pfs_info["mac_address"]
self.a_ip[i]=pfs_info["ip_address"]
self.a_uid[i]=pfs_info["user_name"]
self.a_upw[i]=pfs_info["password"]
else:
self.__except_log__('<url access error>')
return -1
if TST_FLAG == 'ON':
tst_file= '%s/%s' % (PY_PATH, TST_FILE)
try:
f=open(tst_file, 'r')
except Exception, e:
self.__except_log__(str(e))
return -1
for line in f:
if line.find(MODEL_KEY) != -1:
self.a_mode=line[line.find(MODEL_KEY)+len(MODEL_KEY):-1]
if line.find(MAC_KEY) != -1:
self.a_mac=line[line.find(MAC_KEY)+len(MAC_KEY):-1]
if line.find(IP_KEY) != -1:
self.a_ip=line[line.find(IP_KEY)+len(IP_KEY):-1]
if line.find(UID_KEY) != -1:
self.a_uid=line[line.find(UID_KEY)+len(UID_KEY):-1]
if line.find(UPW_KEY) != -1:
self.a_upw=line[line.find(UPW_KEY)+len(UPW_KEY):-1]
self.__trace_log__('get_PS_info OUT')
return 0
def set_PS_list(self, PS_list):
self.__trace_log__('set_PS_list IN')
self.ps_list = PS_list
self.__trace_log__('set_PS_list OUT')
return 0
def set_auth(self, auth):
self.__trace_log__('set_auth IN')
self.auth=auth
self.ori.set_auth(auth)
self.__trace_log__('set_auth OUT')
def exec_backup(self):
self.__trace_log__('exec_backup IN')
ret=self.__get_SW_info__()
if -1==ret:
self.__except_log__('get_PS_info error')
return -1
if -1 ==self.__get_BK_info__():
return -1
print "Start Backup of Physical Switch"
for i in range(0, len(self.a_host)):
bk_dir_tmp=self.bk_dir.replace(".", ":")
# backup procedure
if ((True == self.a_model[i].startswith(PICA8_KEY1)) or (True == self.a_model[i].startswith(PICA8_KEY2))):
bk=pica8_if.pica8_if()
elif (True == self.a_model[i].startswith(RIAVA_KEY1)):
bk=riava_if.riava_if()
elif (True == self.a_model[i].startswith(PFS_KEY1)):
bk=pfs_if.pfs_if()
elif (True == self.a_model[i].startswith(CENTEC_KEY1)):
bk=centec_if.centec_if()
else:
self.__except_log__('Physical Switch info err %s' % self.a_model[i])
return -1
bk.set_auth(self.auth)
bk.set_host_name(self.a_host[i])
bk.set_logger(self.logger)
ret=bk.set_bksrv(self.bk_host, BACKDIR + bk_dir_tmp)
if ret == -1:
self.__except_log__('backup process error')
return -1
ret=bk.exec_backup()
if ret == -1:
self.__except_log__('backup process error')
return -1
print "Finish Backup of Physical Switch"
# Save Physical Switch list
d = datetime.datetime.today()
tm= d.strftime("%H%M%S")
TMP_FILE='/tmp/ps_' + tm
try:
f=open(TMP_FILE, 'w')
except Exception, e:
self.__except_log__('pfs file error(backup)' + str(e))
return -1
else:
f.write(self.ps_list)
f.close()
psl=pslist_backup.pslist_backup(self.local_user, self.bk_uip, self.bk_uid, self.bk_upw)
psl.set_logger(self.logger)
ret = psl.set_pslist(TMP_FILE, self.bk_dir)
if 0 != ret:
self.__except_log__('pslist copy err')
print 'pslist copy err'
return -1
commands.getoutput('rm ' + TMP_FILE)
self.__trace_log__('exec_backup OUT')
return 0
def exec_restore(self):
self.__trace_log__('exec_restore IN')
ret=self.__get_SW_info__()
if -1==ret:
self.__except_log__('get_PS_info error')
return -1
if -1 ==self.__get_BK_info__():
return -1
# Load Physical Switch list
d = datetime.datetime.today()
tm= d.strftime("%H%M%S")
TMP_FILE='/tmp/ps_' + tm
psl=pslist_backup.pslist_backup(self.local_user, self.bk_uip, self.bk_uid, self.bk_upw)
psl.set_logger(self.logger)
ret = psl.get_pslist(TMP_FILE, self.bk_dir)
if 0 != ret:
self.__except_log__('pslist copy err')
print 'pslist copy err'
return -1
pslist=''
try:
f=open(TMP_FILE, 'r')
except Exception, e:
self.__except_log__('psl file error(restore)' + str(e))
return -1
else:
for line in f:
pslist=line
f.close()
commands.getoutput('rm ' + TMP_FILE)
# Check Physical Switch list
for key in range(0, len(self.a_host)):
if -1 == pslist.find(self.a_host[key]):
self.__except_log__('different physical switch list' + self.a_host[key])
print ' different backup and restore to physical switch list'
return -1
print "Start Restore of Physical Switch"
for i in range(0, len(self.a_host)):
bk_dir_tmp=self.bk_dir.replace(".", ":")
# restore procedure
if ((True == self.a_model[i].startswith(PICA8_KEY1)) or (True == self.a_model[i].startswith(PICA8_KEY2))):
rst=pica8_if.pica8_if()
elif (True == self.a_model[i].startswith(RIAVA_KEY1)):
rst=riava_if.riava_if()
elif (True == self.a_model[i].startswith(PFS_KEY1)):
rst=pfs_if.pfs_if()
elif (True == self.a_model[i].startswith(CENTEC_KEY1)):
rst=centec_if.centec_if()
else:
self.__except_log__('Physical Switch info err %s' % self.a_model[i])
return -1
rst.set_auth(self.auth)
rst.set_host_name(self.a_host[i])
rst.set_logger(self.logger)
ret=rst.set_bksrv(self.bk_host, BACKDIR + bk_dir_tmp)
if ret == -1:
self.__except_log__('backup process error')
return -1
ret = rst.exec_restore()
if ret == -1:
self.__except_log__('restore process error')
return -1
print "Finish Restore of Physical Switch"
#put configuration to resource manager
self.__trace_log__('exec_restore OUT')
return 0
| 2.0625 | 2 |
env/Lib/site-packages/libarchive-0.4.7/libarchive/constants/__init__.py | dondemonz/RestApi | 94 | 12770121 | <reponame>dondemonz/RestApi
from libarchive.constants.archive import *
from libarchive.constants.archive_entry import *
| 1.0625 | 1 |
setup.py | shridarpatil/frappe_private_attachments | 4 | 12770122 | <reponame>shridarpatil/frappe_private_attachments<gh_stars>1-10
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in frappe_private_attachments/__init__.py
from frappe_private_attachments import __version__ as version
setup(
name='frappe_private_attachments',
version=version,
description='Make all attachments as private',
author='Shridhar',
author_email='<EMAIL>',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 1.421875 | 1 |
mopidy_raspberry_gpio/__init__.py | stef-tel/mopidy-raspberry-gpio_stef | 0 | 12770123 | from __future__ import unicode_literals
import logging
import os
from mopidy import config, ext
from .pinconfig import PinConfig
__version__ = "0.0.2"
logger = logging.getLogger(__name__)
class Extension(ext.Extension):
dist_name = "Mopidy-Raspberry-GPIO"
ext_name = "raspberry-gpio"
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), "ext.conf")
return config.read(conf_file)
def get_config_schema(self):
schema = super(Extension, self).get_config_schema()
for pin in range(28):
schema["bcm{:d}".format(pin)] = PinConfig()
return schema
def setup(self, registry):
from .frontend import RaspberryGPIOFrontend
registry.add("frontend", RaspberryGPIOFrontend)
| 2.1875 | 2 |
python_structure/data_structures/lists_tuples_dictionaries/list_defs.py | bangyen/pascal-triangle | 0 | 12770124 | <gh_stars>0
from python_structure.data_structures.stacks_queues.stack import Stack
star_wars_movies = ["Episode I - The Phantom Menace",
"Episode II – Attack of the Clones",
"Episode III – Revenge of the Sith",
"Episode IV – A New Hope",
"Episode V – The Empire Strikes Back",
"Episode VI – Return of the Jedi",
"Episode VII – The Force Awakens",
"Episode VIII – The Last Jedi",
"Episode IX – The Rise of Skywalker",
"Rogue One: A Star Wars Story",
"Solo: A Star Wars Story"]
rating_movies = ["I love Episode I",
"Episode II is nice",
"Episode III is quite good",
"Episode IV is super cool",
"Episode V is wonderful",
"Episode VI is super bad",
"I felt asleep during Episode VII",
"I watched Episode VIII two times",
"I didn't want to see Episode IX",
"Rogue One is a nice film",
"I didn't see Solo"]
num_rating_movies = [2, 3, 5, 10, 4, 1, 8, 9, 3, 2, 5]
def list_declaration_1(a="Mozart", b=1.2):
"""
Lists are mutable
:param a: optional parameter
:param b: optional parameter
:return: a declared list
"""
artists = [a, b, "Caparezza", "Lety", "Pit", "Virgi"]
print(artists)
return artists
def list_append(my_list: list):
user_input = input("Item to append to the list: ")
my_list.append(user_input)
print(my_list)
return my_list
def list_sum(my_list: list):
print("\nSum the parameter's list with another list")
random_list = list("H7")
random_list.append(45)
sum_l = my_list + random_list
print(sum_l)
return sum_l
def list_change_element(my_list: list, index, new_element="default"):
print("\nChange a list element with another one at a given index")
my_list[index] = new_element
return print(my_list)
def combine_two_lists(list_1, list_2):
"""
Use the built-in 'zip' function: it takes two lists and
combines the fist two elements into a tuple, than make the
same thing for all the elements of the lists
:param list_1:
:param list_2:
:return:
"""
my_list = []
for i in zip(list_1, list_2):
my_list.append(i)
return my_list
"""
List Comprehension
Allows to create lists based on criteria applied to existing lists.
1) Iterates
2) Processes
3) Filter
"""
def string_to_list(word):
"""
Take each character of the 'word' parameter and return a list
populated with all the characters
:param word: string
:return: list of string's characters
"""
return [c for c in word]
def string_to_list_only_digit(word):
"""
Take each character of the 'word' parameter and return a list
populated with all the characters. The if clause allows only
the digits to be added to the list.
:param word: string
:return: list of digit inside 'word'
"""
return [c for c in word if c.isdigit()]
def string_to_list_only_last_digit(word):
"""
The negative index allows to select only the last digit
from the new list
:param word: parameter
:return: list
"""
return [c for c in word if c.isdigit()][-1]
def multiply_list(multiplier, input_list):
return [i * multiplier for i in input_list]
def list_intersection(list1, list2):
"""
Check if each value in list1 is also in list2
:param list1:
:param list2:
:return: a list containing the elements common to both the lists
"""
return [value for value in list1 if value in list2]
def set_intersection(list1, list2):
"""
Using the built-in 'intersection()' method form a 'set()' element.
'intersection()' can have as parameters more than two sets.
:param list1:
:param list2:
:return:
"""
set1 = set(list1)
set2 = set(list2)
return list(set1.intersection(set2))
def find_the_single_element(my_list):
"""
Given a non-empty list of integers, every element appears twice except for one. Find that single one.
:param my_list:
:return:
"""
duplicates = []
my_set = set()
for i in my_list:
length_one = len(my_set)
my_set.add(i)
length_two = len(my_set)
if length_one == length_two:
duplicates.append(i)
return [i for i in my_list + duplicates if i not in my_list or i not in duplicates]
# list_dif = []
# for i in my_list + duplicates:
# if i not in my_list or i not in duplicates:
# list_dif.append(i)
# return list_dif
def find_the_single_element_with_dict(my_list):
count = {}
for i in my_list:
if i not in count:
count[i] = 1
else:
count[i] += 1
for key, value in count.items():
if value == 1:
return key
def reverse_list(my_list):
"""
Reverse the given list by adding each element to a stack with a for-loop.
Then use the 'pop' function of the stack for each element of the stack
to get the items from the last to the first and append to a new list.
:param my_list: a list
:return: reversed list
"""
stack = Stack()
for i in my_list:
stack.push(i)
reversed_list = []
for i in range(stack.size()):
reversed_list.append(stack.pop())
return reversed_list
if __name__ == '__main__':
main_artist = list_declaration_1("Aladino")
# list_append(main_artist)
s_list = list_sum(main_artist)
list_change_element(s_list, 4)
print("\n")
print(combine_two_lists(star_wars_movies, rating_movies))
my_s = "You 2 are good to be 43."
print(string_to_list(my_s))
print(string_to_list_only_digit(my_s))
print(string_to_list_only_last_digit(my_s))
my_n = [1, 7, 5, 3, 2]
print(multiply_list(7, my_n))
my_n2 = [1, 4, 3, 8]
print(list_intersection(my_n, my_n2))
print(set_intersection(my_n, my_n2))
# find_the_single_element
duplicates_list = [1, 2, 3, 2, 3, 1, 4]
print(find_the_single_element(duplicates_list))
print(find_the_single_element_with_dict(duplicates_list))
print(reverse_list(my_n2))
| 3.5625 | 4 |
smart_meter/urls/user_meter_urls.py | GPXenergy/gpx_server_api | 0 | 12770125 | <reponame>GPXenergy/gpx_server_api<filename>smart_meter/urls/user_meter_urls.py
from django.urls import path, include
from smart_meter.views import UserMeterListView, UserMeterDetailView, GroupMeterDetailView, \
GroupMeterListView, MeterGroupParticipationDetailView, MeterParticipationListView, \
PowerMeasurementListView, GasMeasurementListView, SolarMeasurementListView
# urls under /users/<user_pk>/meters/...
urlpatterns = [
path('', UserMeterListView.as_view(), name='user_meter_list'),
path('<int:pk>/', UserMeterDetailView.as_view(), name='user_meter_detail'),
path('<int:meter_pk>/', include([
path('power/', PowerMeasurementListView.as_view(), name='power_measurement_list'),
path('gas/', GasMeasurementListView.as_view(), name='gas_measurement_list'),
path('solar/', SolarMeasurementListView.as_view(), name='solar_measurement_list'),
])),
path('groups/', GroupMeterListView.as_view(), name='group_meter_list'),
path('groups/<int:pk>/', GroupMeterDetailView.as_view(), name='group_meter_detail'),
path('participation/', MeterParticipationListView.as_view(), name='meter_participation_list'),
path('participation/<int:pk>/', MeterGroupParticipationDetailView.as_view(), name='meter_participation_detail'),
]
| 2.09375 | 2 |
vms/cspcevents/models.py | denisemauldin/django-vms | 1 | 12770126 | from django.db import models
from schedule.models import Event, EventRelation, Calendar
from vms.locations.models import Location
# Create your models here.
class CSPCEvent(Event):
event_location = models.ForeignKey(Location, default=1)
| 1.867188 | 2 |
project/app/main.py | pombreda/tipfy | 23 | 12770127 | # -*- coding: utf-8 -*-
"""WSGI app setup."""
import os
import sys
# Add lib as primary libraries directory, with fallback to lib/dist
# and optionally to lib/dist.zip, loaded using zipimport.
lib_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'lib')
if lib_path not in sys.path:
sys.path[0:0] = [
lib_path,
os.path.join(lib_path, 'dist'),
os.path.join(lib_path, 'dist.zip'),
]
from tipfy.app import App
from config import config
from urls import rules
def enable_appstats(app):
"""Enables appstats middleware."""
from google.appengine.ext.appstats.recording import \
appstats_wsgi_middleware
app.dispatch = appstats_wsgi_middleware(app.dispatch)
def enable_jinja2_debugging():
"""Enables blacklisted modules that help Jinja2 debugging."""
if not debug:
return
from google.appengine.tools.dev_appserver import HardenedModulesHook
HardenedModulesHook._WHITE_LIST_C_MODULES += ['_ctypes', 'gestalt']
# Is this the development server?
debug = os.environ.get('SERVER_SOFTWARE', '').startswith('Dev')
# Instantiate the application.
app = App(rules=rules, config=config, debug=debug)
enable_appstats(app)
enable_jinja2_debugging()
def main():
app.run()
if __name__ == '__main__':
main()
| 1.960938 | 2 |
models/shapley_lstm.py | 96-Zachary/PGCD-for-ABSA | 3 | 12770128 | # -*- coding: utf-8 -*-
from layers.dynamic_rnn import DynamicLSTM
from layers.shap import Distribution_SHAP, Map_SHAP
import torch
import torch.nn as nn
import numpy as np
class SHAP_LSTM(nn.Module):
def __init__(self, embedding_matrix, opt):
super(SHAP_LSTM, self).__init__()
self.opt = opt
self.embed_dim = embedding_matrix.shape[-1]
self.embed = nn.Embedding.from_pretrained(torch.tensor(embedding_matrix, dtype=torch.float))
self.lstm = DynamicLSTM(opt.embed_dim, opt.hidden_dim, num_layers=1, batch_first=True)
self.shap = Distribution_SHAP(self.opt.max_seq_len, self.opt.polarities_dim, opt)
self.map_shap = Map_SHAP(opt.embed_dim, opt.max_seq_len, opt)
self.dense = nn.Linear(opt.hidden_dim, opt.polarities_dim)
def forward(self, inputs, label, weights, update=True):
text_raw_indices, aspect_indices = inputs[0], inputs[1]
x = self.embed(text_raw_indices)
x_len = torch.sum(text_raw_indices != 0, dim=-1)
aspect_idx = torch.tensor(torch.eq(text_raw_indices, aspect_indices[:, 0].reshape((-1, 1))),
dtype=torch.float)
aspect_pos_idx = [np.where(aspect_idx[i, :] == 1)[0] for i in range(len(aspect_idx))]
if update:
H_N, (h_n, _) = self.lstm(x, x_len)
weights = self.shap(text_raw_indices, aspect_indices, label, H_N, weights, self.dense)
out = self.dense(h_n[0])
return out, weights
else:
if len(weights) != 0:
x = self.map_shap(x, aspect_pos_idx, weights)
else:
pass
_, (h_n, _) = self.lstm(x.to(self.opt.device), x_len)
out = self.dense(h_n[0])
return out
| 2.390625 | 2 |
lib/solutions/SUM/sum_solution.py | DPNT-Sourcecode/CHK-rjxh01 | 0 | 12770129 | # noinspection PyShadowingBuiltins,PyUnusedLocal
def compute(x, y):
"""The problem sets the parameters as integers in the range 0-100.
We'll raise an exception if we receive a type other than int, or if the value
of that int is not in the right range"""
if type(x) != int or type(y) != int:
raise TypeError('The types of both arguments must be ints')
if x < 0 or x > 100 or y < 0 or y > 100:
raise ValueError('The value of each argument must be in the range 0-100')
return x+y
| 3.734375 | 4 |
configs/baselines/DACN/utils.py | vivek-r-2000/BoundaryNet | 17 | 12770130 | import cv2
import numpy as np
from scipy.ndimage.morphology import distance_transform_cdt
import torch
from skimage.io import imsave
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def get_edge_mask(poly, mask):
"""
Generate edge mask
"""
h = mask.shape[0]
w = mask.shape[1]
gt_poly = np.zeros((poly.shape[0],poly.shape[1]),np.int32)
gt_poly[:,0] = np.floor(poly[:,0]*w)
gt_poly[:,1] = np.floor(poly[:,1]*h)
# print(gt_poly[:,0], gt_poly[:,1])
cv2.polylines(mask, np.int32([gt_poly]),True,[1], thickness = 1)
# cv2.fillPoly(mask, np.int32([gt_poly]),[255])
# imsave("./test33/"+str(poly.shape[0])+"edge.jpg",mask[0])
# imsave("./test33/"+str(poly.shape[0])+"edgegt.jpg",mask[1])
return mask
def get_poly_mask(poly, mask):
"""
Generate edge mask
"""
h = mask.shape[0]
w = mask.shape[1]
gt_poly = np.zeros((poly.shape[0],poly.shape[1]),np.int32)
gt_poly[:,0] = np.floor(poly[:,0]*w)
gt_poly[:,1] = np.floor(poly[:,1]*h)
# print(gt_poly[:,0], gt_poly[:,1])
# cv2.polylines(mask, np.int32([gt_poly]),True,[1], thickness = 1)
cv2.fillPoly(mask, np.int32([gt_poly]),[1])
# imsave("./test33/"+str(poly.shape[0])+"edge.jpg",mask[0])
# imsave("./test33/"+str(poly.shape[0])+"edgegt.jpg",mask[1])
return mask
def get_original_mask(poly, mask):
"""
Generate edge mask
"""
h = mask.shape[0]
w = mask.shape[1]
gt_poly = np.zeros((poly.shape[0],poly.shape[1]),np.int32)
gt_poly[:,0] = np.floor(poly[:,0]*w)
gt_poly[:,1] = np.floor(poly[:,1]*h)
# print(gt_poly[:,0], gt_poly[:,1])
# cv2.polylines(mask, np.int32([gt_poly]),True,[1], thickness = 1)
cv2.fillPoly(mask, np.int32([gt_poly]),[1])
# imsave("./test33/"+str(poly.shape[0])+"edge.jpg",mask[0])
# imsave("./test33/"+str(poly.shape[0])+"edgegt.jpg",mask[1])
return mask
def get_fp_mask(poly,mask):
h = mask.shape[0]
w = mask.shape[1]
x = np.int32(np.floor(poly[0,0]*w))
y = np.int32(np.floor(poly[0,1]*h))
mask[y,x] = 1.0
# if(y<=14 and x<=190 and x>=1 and y>=1):
# mask[y,x+1] = 1.0
# mask[y,x-1] = 1.0
# mask[y+1,x] = 1.0
# mask[y-1,x] = 1.0
return mask
def get_vertices_mask(poly, mask):
"""
Generate a vertex mask
"""
h = mask.shape[0]
w = mask.shape[1]
gt_poly = np.zeros((poly.shape[0],poly.shape[1]),np.int32)
gt_poly[:,0] = np.floor(poly[:,0]*w)
gt_poly[:,1] = np.floor(poly[:,1]*h)
mask[gt_poly[:, 1], gt_poly[:, 0]] = 1.0
return mask
def get_previous_mask(poly,mask,t):
mask = torch.zeros(1, 1, 25, 60, device=device)
h = 25
w = 60
x = np.int32(np.floor(poly[0,t,0]*w))
y = np.int32(np.floor(poly[0,t,1]*h))
mask[0,0,y,x] = 1
# if(y<=14 and x<=190 and x>=1 and y>=1):
# mask[0,0,y,x+1] = 1.0
# mask[0,0,y,x-1] = 1.0
# mask[0,0,y+1,x] = 1.0
# mask[0,0,y-1,x] = 1.0
return mask
def get_instance_mask(poly,mask):
h = 25
w = 60
masks = []
for tr in range(poly.shape[0]):
# print(poly[tr,0],poly[tr,1])
x = np.int32(np.floor(poly[tr,0]*w))
y = np.int32(np.floor(poly[tr,1]*h))
# print(y,x)
mask[y,x] = 1.0
# if(y<=14 and x<=190 and x>=1 and y>=1):
# mask[y,x+1] = 1.0
# mask[y,x-1] = 1.0
# mask[y+1,x] = 1.0
# mask[y-1,x] = 1.0
mask1 = mask.flatten()
if(tr == poly.shape[0]-1):
mask1 = np.append(mask1,[1.0])
else:
mask1 = np.append(mask1,[0.0])
masks.append(mask1)
# print(y,x)
mask[y,x] = 0.0
# if(y<=14 and x<=190 and x>=1 and y>=1):
# mask[y+1,x] = 0.0
# mask[y-1,x] = 0.0
# mask[y,x+1] = 0.0
# mask[y,x-1] = 0.0
return np.asarray(masks, dtype=np.float32)
def class_to_grid(poly, out_tensor):
"""
NOTE: Torch function
accepts out_tensor to do it inplace
poly: [batch, ]
out_tensor: [batch, 1, grid_size, grid_size]
"""
out_tensor.zero_()
# Remove old state of out_tensor
b = 0
for i in poly:
if i < 16 * 192:
x = (i%192).long()
y = (i/16).long()
out_tensor[0,0,y,x] = 1
b += 1
return out_tensor
def dt_targets_from_class(poly):
"""
NOTE: numpy function!
poly: [bs, time_steps], each value in [0, grid*size**2+1)
grid_size: size of the grid the polygon is in
dt_threshold: threshold for smoothing in dt targets
returns:
full_targets: [bs, time_steps, grid_size**2+1] array containing
dt smoothed targets to be used for the polygon loss function
"""
full_targets = []
for b in range(poly.shape[0]):
targets = []
for p in poly[b]:
t = np.zeros(16*192+1, dtype=np.int32)
t[p] += 1
if p != 16*192:#EOS
spatial_part = t[:-1]
spatial_part = np.reshape(spatial_part, [16, 192, 1])
# Invert image
spatial_part = -1 * (spatial_part - 1)
# Compute distance transform
spatial_part = distance_transform_cdt(spatial_part, metric='taxicab').astype(np.float32)
# Threshold
spatial_part = np.clip(spatial_part, 0, dt_threshold)
# Normalize
spatial_part /= dt_threshold
# Invert back
spatial_part = -1. * (spatial_part - 1.)
spatial_part /= np.sum(spatial_part)
spatial_part = spatial_part.flatten()
t = np.concatenate([spatial_part, [0.]], axis=-1)
targets.append(t.astype(np.float32))
full_targets.append(targets)
return np.array(full_targets, dtype=np.float32)
# def class_to_grid(poly, out_tensor):
# """
# NOTE: Torch function
# accepts out_tensor to do it inplace
# poly: [batch, ]
# out_tensor: [batch, 1, grid_size, grid_size]
# """
# out_tensor.zero_()
# # Remove old state of out_tensor
# b = 0
# for i in poly:
# if i < 16 * 192:
# x = (i%192).long()
# y = (i/16).long()
# out_tensor[b,0,y,x] = 1
# b += 1
# return out_tensor | 2.265625 | 2 |
tests/utils/suite_writer/test_container.py | kbh2o/slash | 70 | 12770131 | from .element import Element
class SuiteWriterTestContainer(Element):
def __init__(self, suite):
super(SuiteWriterTestContainer, self).__init__(suite)
self._tests = []
@property
def tests(self):
return list(self._tests)
def get_num_tests(self):
return len(self._tests)
| 2.5 | 2 |
jboss_pwd_hash.py | rawiriblundell/scripts | 5 | 12770132 | <filename>jboss_pwd_hash.py
#!/bin/env python
# A very simple python script to generate jboss/wildfly password hashes
# Requires a username, realm and password
#
# In jboss, these three are hashed as follows (from add-user.sh):
# "
# By default the properties realm expects the entries to be in the format: -
# username=HEX( MD5( username ':' realm ':' password))
# "
# This script does that, just using python
#
# No sanity checking etc is provided
# Author: Too embarrassed to put his/her name to this
# Reviewers: Please fix at your earliest convenience
# Date: 20180919
import sys, hashlib
# Rudementary help system
if len(sys.argv) != 4 :
print('Usage: ./jboss_pwd_hash.py username realm password')
sys.exit()
# Assign the positional parameters from 1 onwards to the variable 'triplet'
triplet = ' '.join(sys.argv[1:])
# Replace the spaces in the variable with colons
triplet = triplet.replace(" ", ":")
# md5 our variable, then hex it
md5sum = hashlib.md5(triplet).hexdigest()
# Finally, print out what we've generated
print md5sum
# The below is here for debugging/testing
#md5sum = hashlib.md5("testUserOne:ApplicationRealm:testPasswordOne").hexdigest(); print md5sum
| 2.40625 | 2 |
__scraping__/oldnavy.gap.com/main.py | whitmans-max/python-examples | 140 | 12770133 | #!/usr/bin/env python3
# date: 2016.11.24 (update: 2020.06.13)
# https://stackoverflow.com/questions/40777864/retrieving-all-information-from-page-beautifulsoup/
from selenium import webdriver
from bs4 import BeautifulSoup
import time
# --- get page ---
link = 'http://oldnavy.gap.com/browse/category.do?cid=1035712&sop=true'
#driver = webdriver.PhantomJS() # deprecated
driver = webdriver.Firefox()
driver.get(link)
time.sleep(3)
# --- scrolling ---
#size = driver.get_window_size()
#print(size)
#window_height = size['height']
#print('window_height:', window_height) # webpage + toolbars + border
# https://stackoverflow.com/questions/1248081/how-to-get-the-browser-viewport-dimensions
# this may give too big value because it includes scrollbar's height (ie. 962 = 950+22)
#viewport_height = driver.execute_script('return window.innerHeight;')
#print('viewport_height:', viewport_height)
# this gives correct value without scrollbar (ie. 950)
viewport_height = driver.execute_script('return document.documentElement.clientHeight;')
print('viewport_height:', viewport_height)
y = 0 # position to scroll
# at start it has to bigger then `y` to run `while y < page_height:`
page_height = 1
#page_height = driver.execute_script('return document.body.scrollHeight;')
while y < page_height:
y += viewport_height # move only visible height
print('y:', y, 'page_height:', page_height)
# scroll
driver.execute_script(f'window.scrollTo(0, {y});')
# browser may need time to update page
time.sleep(0.5)
# get page height (it can change when JavaScript adds elements)
page_height = driver.execute_script('return document.body.scrollHeight;')
# --- get data with BeautifulSoup ---
base_url = 'http://www.oldnavy.com'
html = driver.page_source
soup = BeautifulSoup(html, 'html5lib')
all_divs = soup.find_all('div', class_='product-card') # new layout
print('len(all_divs):', len(all_divs))
#for div in all_divs:
# link = div.find('a')
# print(link.text)
# print(base_url + link['href'])
# --- get data with Selenium ---
all_products = driver.find_elements_by_class_name('product-card')
print('len(all_products):', len(all_products))
for product in all_products:
link = product.find_element_by_tag_name('a')
print(link.text)
# print(base_url + link['href'])
| 3.234375 | 3 |
olc_webportalv2/cowbat/tests/test_forms.py | OLC-Bioinformatics/olc_genomics_portal | 3 | 12770134 | <reponame>OLC-Bioinformatics/olc_genomics_portal
from django.test import TestCase, Client
from django import forms
from olc_webportalv2.cowbat.forms import RunNameForm
from olc_webportalv2.geneseekr.forms import EmailForm
class FormTest(TestCase):
def test_run_form_external_lab(self):
form = RunNameForm({
'run_name': '191218_CAL'
})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data.get('run_name'), '191218_CAL')
def test_run_form_miseq_id(self):
form = RunNameForm({
'run_name': '191218_M02345'
})
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data.get('run_name'), '191218_M02345')
def test_run_form_invalid_name_date_second(self):
form = RunNameForm({
'run_name': 'M02345_191215'
})
self.assertFalse(form.is_valid())
try:
form.cleaned_data.get('run_name')
except forms.ValidationError as e:
self.assertEqual('BadRunName', e.code)
def test_run_form_invalid_date_too_short(self):
form = RunNameForm({
'run_name': '12345_CAL'
})
self.assertFalse(form.is_valid())
try:
form.cleaned_data.get('run_name')
except forms.ValidationError as e:
self.assertEqual('BadRunName', e.code)
def test_run_form_invalid_date_too_long(self):
form = RunNameForm({
'run_name': '1234567_CAL'
})
self.assertFalse(form.is_valid())
try:
form.cleaned_data.get('run_name')
except forms.ValidationError as e:
self.assertEqual('BadRunName', e.code)
def test_run_form_invalid_lowercase(self):
form = RunNameForm({
'run_name': '1234567_aaa'
})
self.assertFalse(form.is_valid())
try:
form.cleaned_data.get('run_name')
except forms.ValidationError as e:
self.assertEqual('BadRunName', e.code)
# These pretty much just test that we actually remembered to use an email form
def test_email_form_good(self):
form = EmailForm({
'email': '<EMAIL>'
})
self.assertTrue(form.is_valid())
def test_email_form_bad(self):
form = EmailForm({
'email': 'not_an_email_at_all'
})
self.assertFalse(form.is_valid())
| 2.234375 | 2 |
tests/unit-tests/test_settings.py | Ycallaer/schema_registry_viz | 1 | 12770135 | from schema_reg_viz.config.settings import get_settings
def test_health():
result = get_settings()
assert result.schema_registry.port == 8081
assert result.schema_registry.protocol == 'http'
assert result.schema_registry.url == 'localhost'
| 1.773438 | 2 |
test/language/templates/python/StructFullAndShortTemplateArgumentTest.py | chenpeihua/zserio | 0 | 12770136 | <reponame>chenpeihua/zserio
import unittest
import zserio
from testutils import getZserioApi
class StructFullAndShortTemplateArgumentTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.api = getZserioApi(__file__, "templates.zs").struct_full_and_short_template_argument
def testReadWriteFull(self):
storage = self.api.templated_struct.Storage.fromFields("String")
structFullNameTemplateArgument = self.api.StructFullNameTemplateArgument.fromFields(
self.api.templated_struct.TemplatedStruct_Storage.fromFields(storage))
writer = zserio.BitStreamWriter()
structFullNameTemplateArgument.write(writer)
reader = zserio.BitStreamReader(writer.getByteArray())
readStructFullNameTemplateArgument = self.api.StructFullNameTemplateArgument()
readStructFullNameTemplateArgument.read(reader)
self.assertEqual(structFullNameTemplateArgument, readStructFullNameTemplateArgument)
def testReadWriteShort(self):
storage = self.api.templated_struct.Storage.fromFields("String")
structShortNameTemplateArgument = self.api.templated_struct.StructShortNameTemplateArgument.fromFields(
self.api.templated_struct.TemplatedStruct_Storage.fromFields(storage))
writer = zserio.BitStreamWriter()
structShortNameTemplateArgument.write(writer)
reader = zserio.BitStreamReader(writer.getByteArray())
readStructShortNameTemplateArgument = self.api.templated_struct.StructShortNameTemplateArgument()
readStructShortNameTemplateArgument.read(reader)
self.assertEqual(structShortNameTemplateArgument, readStructShortNameTemplateArgument)
| 2.390625 | 2 |
backend/api/routes.py | haley-r/solderless-microlab | 0 | 12770137 | """
Module defining API.
"""
from api import app
from flask import jsonify
import recipes
@app.route('/list')
def list():
"""
List all available recipes
:return:
list
a list containing the names of the recipes. ex: ['recipe1',recipe2']
"""
recipes.refresh()
return jsonify(recipes.list)
@app.route('/status')
def status():
"""
Get the status of the app.
:return:
object
message
The message to be displayed to the user.
options
null or a list of strings to display to the user as selectable options.
recipe
Name of the currently running recipe or null if none is running.
step
The step number or -1 if no recipe is running
status
The state of the application. One of:
idle
App is waiting for the user to start a recipe
running
App is running a recipe and doesn't need any input from the user
user_input
App is waiting for the user to make a decision. See options.
complete
Recipe is complete.
error
A system error has occurred.
"""
return jsonify(recipes.status())
@app.route('/start/<name>')
def start(name):
"""
Start running a recipe.
:param name:
The recipe name. Must be one of the items returned by /list
:return:
object
response
One of:
ok
error
message
Only present if response is "error" and there is a message to present to the user.
"""
(state,msg) = recipes.start(name)
if state:
return jsonify({'response':'ok'})
else:
return jsonify({'response':'error','message':msg})
@app.route('/stop')
def stop():
"""
Stop the currently running recipe.
:return:
object
response
One of:
ok
error
message
Only present if response is "error" and there is a message to present to the user.
"""
recipes.stop()
return jsonify({'response':'ok'})
@app.route('/select/option/<name>')
def selectOption(name):
"""
Provide user selected input.
:param name:
The name of the user selected option. This must be one of the strings presented in the
"options" list in the /status call.
:return:
object
response
One of:
ok
error
message
Only present if response is "error" and there is a message to present to the user.
"""
(state,msg) = recipes.selectOption(name)
if state:
return jsonify({'response':'ok'})
else:
return jsonify({'response':'error','message':msg})
| 3.515625 | 4 |
SingleCodeRun.py | hsspratt/Nott-Hawkeye1 | 0 | 12770138 | # %% TEst Final run
%matplotlib widget
import matplotlib.pyplot as plt
import numpy as np
import VectorFunctions as vf
import importlib as imp
import functions as f
imp.reload(vf)
angles_BA = np.array(f.import_bz2('test_photos_angles2'))
cameraA_r0 = np.array([0,0,0])
cameraB_r0 = np.array([-29,42,1])
cameras_r0 = [cameraA_r0, cameraB_r0]
# meausured values therefore from y axis for theta and xy for phi
anglesA_theta = angles_BA[2,:]
anglesA_phi = angles_BA[3,:]
anglesB_theta = angles_BA[0,:]
anglesB_phi = angles_BA[1,:]
angles_AB = np.vstack([anglesA_theta,anglesA_phi,anglesB_theta,anglesB_phi])
frame = 4
name = str(frame)
cameras_angles = angles_AB[:,frame]
dt_A, dt_B, dp_A, dp_B = np.array([np.radians(0), np.radians(0), np.radians(18), np.radians(38)])
cameraA_theta = np.pi/2 - cameras_angles[0]
cameraA_phi = np.pi/2 - cameras_angles[1]
cameraB_theta = cameras_angles[2]
cameraB_phi = np.pi/2 - cameras_angles[3]
cameraB_theta = - cameraB_theta
print('camera A theta: ', cameraA_theta)
print('camera A phi: ', cameraA_phi)
print('camera B theta: ', cameraB_theta)
print('camera B phi: ', cameraB_phi)
r = 1
xA = r * np.cos(cameraA_theta + dt_A) * np.sin(cameraA_phi + dp_A)
yA = r * np.sin(cameraA_theta + dt_A) * np.sin(cameraA_phi + dp_A)
zA = r * np.cos(cameraA_phi + dp_A)
cameraA_vector = np.array([xA, yA, zA])
print('Cart points from A: ', xA,yA,zA)
xB = r * np.cos(cameraB_theta + dt_B) * np.sin(cameraB_phi + dp_B)
yB = r * np.sin(cameraB_theta + dt_B) * np.sin(cameraB_phi + dp_B)
zB = r * np.cos(cameraB_phi + dp_B)
cameraB_vector = np.array([xB, yB, zB])
print('Cart points from B: ', xB,yB,zB)
c1, c2, dist = vf.LocShortestDistance(cameraA_r0, cameraB_r0, cameraA_vector, cameraB_vector)
position3D = c1 + c2 / 2
print(position3D)
tlim = np.max(cameras_r0)*2
t = np.linspace(0,tlim,50000)
camera1_line = t*np.array([cameraA_vector]).T + np.array([cameraA_r0]).T
camera2_line = t*np.array([cameraB_vector]).T + np.array([cameraB_r0]).T
plt.figure('3D position of ball')
ax = plt.axes(projection='3d')
ax.set_xlabel('X')
ax.set_ylabel('Z')
ax.set_zlabel('Y')
ax.plot3D(camera1_line[0,[0,-1]], camera1_line[1,[0,-1]], camera1_line[2,[0,-1]])
ax.plot3D(camera2_line[0,[0,-1]], camera2_line[1,[0,-1]], camera2_line[2,[0,-1]])
ax.plot3D(cameraA_r0[0], cameraA_r0[1], cameraA_r0[2],marker='x', color='k')
ax.plot3D(cameraB_r0[0], cameraB_r0[1], cameraB_r0[2],marker='x', color='k')
ax.plot3D([-13, 13], [56, 56], [-27, -27], color='r')
ax.plot3D([13, 13], [56, 18], [-27, -27], color='r')
ax.plot3D([13, -13], [18, 18], [-27, -27], color='r')
ax.plot3D([-13, -13], [18, 56], [-27, -27], color='r')
cube_size = 70
ax.set_xlim3d([-cube_size/2, cube_size/2])
ax.set_ylim3d([0, cube_size])
ax.set_zlim3d([-cube_size, 0])
ax.set_zticks([])
# name = name + name + name+ '.png'
# plt.savefig(name, dpi=600)
ax.elev = 90# 150
ax.azim = 180
# %% later ---------------------------
| 2.640625 | 3 |
trovebot.py | Sectoidfodder/Trovebot | 0 | 12770139 | <filename>trovebot.py
import traceback
import sys
import os
import asyncio
from dotenv import load_dotenv
from configparser import ConfigParser
from discord.ext import commands
from automod.automod import AutoMod
from records.records import Records
#from utils import autoping
trovebot = commands.Bot('!')
config = ConfigParser()
config.read('trovebot.ini')
load_dotenv()
automod = AutoMod(trovebot, config['AutoMod'])
records = Records(trovebot, config['Records'])
@trovebot.event
async def on_ready():
print('Logged in as {}'.format(trovebot.user))
@trovebot.event
async def on_command_error(ctx, err):
if hasattr(ctx.command, 'on_error'):
return
ignored = (commands.CommandOnCooldown, commands.CommandNotFound, commands.CheckFailure, commands.MissingRequiredArgument, commands.BadArgument)
if isinstance(err, ignored):
return
print('Ignoring exception in command {}:'.format(ctx.command), file=sys.stderr)
traceback.print_exception(type(err), err, err.__traceback__, file=sys.stderr)
@trovebot.command()
@commands.is_owner()
async def echo(ctx):
await ctx.send('echo')
@trovebot.command()
@commands.is_owner()
async def reload_config(ctx):
config.read('trovebot.ini')
automod.config = config['AutoMod']
records.config = config['Records']
print('config reloaded')
@trovebot.command()
@commands.is_owner()
async def shutdown(ctx):
await trovebot.logout()
async def periodic_save():
while True:
await asyncio.sleep(1800)
await automod.save()
await records.save()
print('periodic save')
async def periodic_leaderboard():
while True:
await asyncio.sleep(3600)
await records.update_leaderboard()
print('periodic leaderboard')
#autoping.autoping()
trovebot.add_cog(automod)
trovebot.add_cog(records)
trovebot.loop.create_task(periodic_save())
trovebot.loop.create_task(periodic_leaderboard())
trovebot.run(os.getenv('TOKEN')) | 2.21875 | 2 |
r2r_offer_utils/logging.py | Ride2Rail/r2r-offer-utils | 0 | 12770140 | <filename>r2r_offer_utils/logging.py
#!/usr/bin/env python3
import logging
def setup_logger(name=None):
# create logger
logger = logging.getLogger(name=name)
logger.setLevel(logging.DEBUG)
# add stream handler (console)
ch = logging.StreamHandler()
# create formatter
formatter = logging.Formatter('[%(asctime)s][%(levelname)s](%(name)s): %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
return logger, ch
| 2.484375 | 2 |
models/Transformer/utils/plot_swipe.py | Juncheng-Dong/ML_MM_Benchmark | 3 | 12770141 | import numpy as np
import torch
from utils import plotsAnalysis
import os
from utils.helper_functions import load_flags
def auto_swipe(mother_dir=None):
"""
This function swipes the parameter space of a folder and extract the varying hyper-parameters and make 2d heatmap w.r.t. all combinations of them
"""
if mother_dir is None:
#mother_dir = '/scratch/sr365/ML_MM_Benchmark/Yang_temp/models/sweep8'
#mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/Yang_new_sweep/'
mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/new_norm_color/'
#mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/Color_new_sweep/'
#mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/encoder_pos_analysis/Color'
#mother_dir = '/scratch/sr365/ML_MM_Benchmark/Color_temp/models'
#mother_dir = '/scratch/sr365/ML_MM_Benchmark/Color_temp/prev_sweep/test_size'
#mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/sweep_encode_lr'
flags_list = []
# First step, get the list of object flags
for folder in os.listdir(mother_dir):
# Get the current sub_folder
cur_folder = os.path.join(mother_dir, folder)
if not os.path.isdir(cur_folder) or not os.path.isfile(os.path.join(cur_folder, 'flags.obj')):
print('Either this is not a folder or there is no flags object under this folder for ', cur_folder)
continue
# Read the pickle object
cur_flags = load_flags(cur_folder)
flags_list.append(cur_flags)
# From the list of flags, get the things that are different except for loss terms
att_list = [a for a in dir(cur_flags) if not a.startswith('_') and not 'loss' in a and not 'trainable_param' in a and not 'model_name' in a and not 'dir' in a]
print('In total {} attributes, they are {}'.format(len(att_list), att_list))
# Create a dictionary that have keys as attributes and unique values as that
attDict = {key: [] for key in att_list}
# Loop over all the flags and get the unique values inside
for flags in flags_list:
for keys in attDict.keys():
try:
att = getattr(flags,keys)
except:
print('There is not attribute {} in flags, continue'.format(keys))
continue
# Skip if this is already inside the list
if att in attDict[keys]:
continue
attDict[keys].append(att)
# Get the atts in the dictionary that has more than 1 att inside
varying_att_list = []
for keys in attDict.keys():
if len(attDict[keys]) > 1:
# For linear layers, apply special handlings
if 'linear' not in keys:
varying_att_list.append(keys)
continue
length_list = []
num_node_in_layer_list = []
# Loop over the lists of linear
for linear_list in attDict[keys]:
assert type(linear_list) == list, 'Your linear layer is not list, check again'
length_list.append(len(linear_list)) # Record the length instead
if 'head_linear' in keys:
if len(linear_list) > 2:
num_node_in_layer_list.append(linear_list[-2]) # Record the -2 of the list, which denotes the number of nodes
elif 'tail_linear' in keys:
if len(linear_list) > 1:
num_node_in_layer_list.append(linear_list[-2]) # Record the -2 of the list, which denotes the number of nodes
# Add these two attributes to the
if len(np.unique(length_list)) > 1:
varying_att_list.append(keys)
if len(np.unique(num_node_in_layer_list)) > 1:
varying_att_list.append('linear_unit')
print('varying attributes are', varying_att_list)
# Showing how they are changing
for keys in varying_att_list:
if keys == 'linear_unit':
continue
print('att is {}, they have values of {}'.format(keys, attDict[keys]))
if len(varying_att_list) == 1:
# There is only 1 attribute that is changing
att = varying_att_list[0]
key_a = att
key_b = 'lr'
for heatmap_value in ['best_validation_loss', 'best_training_loss','trainable_param']:
#try:
print('doing heatmap {}'.format(heatmap_value))
plotsAnalysis.HeatMapBVL(key_a, key_b, key_a + '_' + key_b + '_HeatMap',save_name=mother_dir + '_' + key_a + '_' + key_b + '_' + heatmap_value + '_heatmap.png',
HeatMap_dir=mother_dir,feature_1_name=key_a,feature_2_name=key_b, heat_value_name=heatmap_value)
#except Exception as e:
# print('the plotswipe does not work in {} and {} cross for {}'.format(key_a, key_b, heatmap_value))
# print('error message: {}'.format(e))
# Start calling the plotsAnalysis function for all the pairs
for a, key_a in enumerate(varying_att_list):
for b, key_b in enumerate(varying_att_list):
# Skip the same attribute
if a <= b:
continue
# Call the plotsAnalysis function
#for heatmap_value in ['best_validation_loss']:
for heatmap_value in ['best_validation_loss', 'best_training_loss','trainable_param']:
print('doing heatmap {}'.format(heatmap_value))
try:
plotsAnalysis.HeatMapBVL(key_a, key_b, key_a + '_' + key_b + '_HeatMap',save_name=mother_dir + '_' + key_a + '_' + key_b + '_' + heatmap_value + '_heatmap.png',
HeatMap_dir=mother_dir,feature_1_name=key_a,feature_2_name=key_b, heat_value_name=heatmap_value)
except:
print('the plotswipe does not work in {} and {} cross for {}'.format(key_a, key_b, heatmap_value))
if __name__ == '__main__':
#pathnamelist = ['/scratch/sr365/ML_MM_Benchmark/Yang_temp/models/sweep4',
# '/scratch/sr365/ML_MM_Benchmark/Transformer/models/sweep4']#,
#'/scratch/sr365/ML_MM_Benchmark/Color_temp/models/sweep2']
#'/scratch/sr365/ML_MM_Benchmark/Yang_temp/models/lr_sweep']
#big_mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/encoder'
#big_mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/sequence_len'
#big_mother_dir = '/scratch/sr365/ML_MM_Benchmark/Transformer/models/MLP_complexity/'
#for dirs in os.listdir(big_mother_dir):
# mother_dir = os.path.join(big_mother_dir, dirs)
# if os.path.isdir(mother_dir):
# auto_swipe(mother_dir)
auto_swipe()
| 2.453125 | 2 |
dictionary/views/edit.py | ankitgc1/django-sozluk-master | 248 | 12770142 | from django.contrib import messages as notifications
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib.messages.views import SuccessMessageMixin
from django.db.models import F, Q
from django.db.models.functions import Coalesce
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect
from django.urls import reverse, reverse_lazy
from django.utils import timezone
from django.utils.translation import gettext, gettext_lazy as _
from django.views.generic import CreateView, FormView, UpdateView
from dictionary.forms.edit import EntryForm, PreferencesForm
from dictionary.models import Author, Comment, Entry, Topic
from dictionary.utils import time_threshold
class UserPreferences(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = Author
form_class = PreferencesForm
template_name = "dictionary/user/preferences/index.html"
success_message = _("settings are saved, dear")
success_url = reverse_lazy("user_preferences")
def get_object(self, queryset=None):
return self.request.user
def form_invalid(self, form):
notifications.error(self.request, gettext("we couldn't handle your request. try again later."))
return super().form_invalid(form)
class EntryCreateMixin:
model = Entry
form_class = EntryForm
def form_valid(self, form):
"""
User sent new entry, whose topic may or may not be existent. If topic
exists, adds the entry and redirects to the entry permalink, otherwise
the topic is created if the title is valid. Entry.save() sets created_by
field of the topic.
"""
draft_pk = self.request.POST.get("pub_draft_pk", "")
publishing_draft = draft_pk.isdigit()
if (not publishing_draft) and (self.topic.exists and self.topic.is_banned):
# Cannot check is_banned before checking its existence.
notifications.error(self.request, _("we couldn't handle your request. try again later."))
return self.form_invalid(form)
status = self.request.user.entry_publishable_status
if status is not None:
notifications.error(self.request, status, extra_tags="persistent")
if publishing_draft:
return redirect(reverse("entry_update", kwargs={"pk": int(draft_pk)}))
return self.form_invalid(form)
if publishing_draft:
try:
entry = Entry.objects_all.get(
pk=int(draft_pk), is_draft=True, author=self.request.user, topic__is_banned=False
)
entry.content = form.cleaned_data["content"]
entry.is_draft = False
entry.date_created = timezone.now()
entry.date_edited = None
except Entry.DoesNotExist:
notifications.error(self.request, _("we couldn't handle your request. try again later."))
return self.form_invalid(form)
else:
# Creating a brand new entry.
entry = form.save(commit=False)
entry.author = self.request.user
if self.topic.exists:
entry.topic = self.topic
else:
if not self.topic.valid:
notifications.error(self.request, _("curses to such a topic anyway."), extra_tags="persistent")
return self.form_invalid(form)
entry.topic = Topic.objects.create_topic(title=self.topic.title)
entry.save()
notifications.info(self.request, _("the entry was successfully launched into stratosphere"))
return redirect(reverse("entry-permalink", kwargs={"entry_id": entry.id}))
def form_invalid(self, form):
if form.errors:
for err in form.errors["content"]:
notifications.error(self.request, err, extra_tags="persistent")
return super().form_invalid(form)
class EntryCreate(LoginRequiredMixin, EntryCreateMixin, FormView):
template_name = "dictionary/edit/entry_create.html"
def dispatch(self, request, *args, **kwargs):
self.extra_context = {"title": self.request.POST.get("title", "")}
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["recent_drafts"] = (
Entry.objects_all.filter(
Q(date_created__gte=time_threshold(hours=24)) | Q(date_edited__gte=time_threshold(hours=24)),
is_draft=True,
author=self.request.user,
)
.select_related("topic")
.only("topic__title", "date_created", "date_edited")
.alias(last_edited=Coalesce(F("date_edited"), F("date_created")))
.order_by("-last_edited")[:5]
)
return context
def form_valid(self, form):
if not self.request.POST.get("pub_draft_pk", "").isdigit():
# Topic object is only required if not publishing a draft.
self.topic = Topic.objects.get_or_pseudo(unicode_string=self.extra_context.get("title")) # noqa
return super().form_valid(form)
class EntryUpdate(LoginRequiredMixin, UpdateView):
model = Entry
form_class = EntryForm
template_name = "dictionary/edit/entry_update.html"
context_object_name = "entry"
def form_valid(self, form):
entry = form.save(commit=False)
if self.request.user.is_suspended or entry.topic.is_banned:
notifications.error(self.request, gettext("you lack the required permissions."))
return super().form_invalid(form)
if entry.is_draft:
status = self.request.user.entry_publishable_status
if status is not None:
notifications.error(self.request, status, extra_tags="persistent")
return super().form_invalid(form)
entry.is_draft = False
entry.date_created = timezone.now()
entry.date_edited = None
notifications.info(self.request, gettext("the entry was successfully launched into stratosphere"))
else:
entry.date_edited = timezone.now()
return super().form_valid(form)
def form_invalid(self, form):
for error in form.errors["content"]:
notifications.error(self.request, error)
return super().form_invalid(form)
def get_queryset(self):
return Entry.objects_all.filter(author=self.request.user)
class CommentMixin(LoginRequiredMixin, SuccessMessageMixin):
model = Comment
fields = ("content",)
template_name = "dictionary/edit/comment_form.html"
def form_invalid(self, form):
for error in form.errors["content"]:
notifications.error(self.request, error)
return super().form_invalid(form)
class CommentCreate(CommentMixin, CreateView):
success_message = _("the comment was successfully launched into stratosphere")
entry = None
def dispatch(self, request, *args, **kwargs):
self.entry = get_object_or_404(Entry.objects_published, pk=self.kwargs.get("pk"))
if not (
request.user.has_perm("dictionary.can_comment") and self.entry.topic.is_ama and request.user.is_accessible
):
raise Http404
return super().dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["entry"] = self.entry
return context
def form_valid(self, form):
comment = form.save(commit=False)
comment.author = self.request.user
comment.entry = self.entry
comment.save()
return super().form_valid(form)
class CommentUpdate(CommentMixin, UpdateView):
success_message = _("the comment has been updated")
def get_object(self, queryset=None):
return get_object_or_404(Comment, pk=self.kwargs.get(self.pk_url_kwarg), author=self.request.user)
def form_valid(self, form):
if self.request.POST.get("delete"):
self.object.delete()
notifications.success(self.request, gettext("the comment has been deleted"))
return redirect(self.object.entry.get_absolute_url())
if not self.request.user.is_accessible:
notifications.error(
self.request, gettext("you lack the permissions to edit this comment. you might as well delete it?")
)
return self.form_invalid(form)
comment = form.save(commit=False)
comment.date_edited = timezone.now()
return super().form_valid(form)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["entry"] = self.object.entry
context["updating"] = True
return context
| 1.96875 | 2 |
sorting/quick_sort.py | allenyummy/SortingAlgo | 0 | 12770143 | <reponame>allenyummy/SortingAlgo
# encoding=utf-8
# Author: <NAME>
# Description: Quick Sort
from __future__ import print_function
from typing import List
def quick_sort_1(arr: List[int], left, right):
def partition() -> int: | 2.140625 | 2 |
slide/snippet/list_str.py | TomohikoK/PyCat | 0 | 12770144 | <gh_stars>0
from typing import List
list_of_string: List[str] = ['a', 'b', 'c']
list_of_int: List[int] = [1, 2, 3]
| 3 | 3 |
test/test_styles.py | mat-m/odfdo | 0 | 12770145 | <gh_stars>0
#!/usr/bin/env python
# Copyright 2018 <NAME>
# Copyright (c) 2009-2010 <NAME>, Itaapy, Pierlis, Talend.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# Authors (odfdo project): <EMAIL>
# The odfdo project is a derivative work of the lpod-python project:
# https://github.com/lpod/lpod-python
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
from unittest import TestCase, main
from odfdo.const import ODF_STYLES
from odfdo.document import Document
from odfdo.style import Style
class TestStyle(TestCase):
def setUp(self):
self.document = document = Document('samples/example.odt')
self.styles = document.get_part(ODF_STYLES)
def tearDown(self):
del self.styles
del self.document
def test_create_style(self):
style = Style('paragraph', 'style1')
self.assertIn(style.serialize(),
(('<style:style style:name="style1" '
'style:family="paragraph"/>'),
('<style:style style:family="paragraph" '
'style:name="style1"/>')))
def test_get_styles(self):
style_list = self.styles.get_styles()
self.assertEqual(len(style_list), 20)
def test_get_styles_paragraph(self):
style_list = self.styles.get_styles(family='paragraph')
self.assertEqual(len(style_list), 10)
def test_get_styles_master_page(self):
style_list = self.styles.get_styles(family='master-page')
self.assertEqual(len(style_list), 1)
def test_get_style_automatic(self):
style = self.styles.get_style('page-layout', 'Mpm1')
self.assertNotEqual(style, None)
def test_get_style_named(self):
style = self.styles.get_style('paragraph', 'Heading_20_1')
self.assertEqual(style.display_name, "Heading 1")
def test_get_style_display_name(self):
style = self.styles.get_style('paragraph', display_name="Text body")
self.assertEqual(style.name, "Text_20_body")
def test_insert_style(self):
styles = self.styles.clone
style = Style(
'paragraph',
name='style1',
area='text',
**{
'fo:color': '#0000ff',
'fo:background-color': '#ff0000'
})
context = styles.get_element('//office:styles')
context.append(style)
get1 = styles.get_style('paragraph', 'style1')
self.assertIn(
get1.serialize(),
(('<style:style style:name="style1" '
'style:family="paragraph">'
'<style:text-properties fo:background-color="#ff0000" '
'fo:color="#0000ff"/>'
'</style:style>'), '<style:style style:family="paragraph" '
'style:name="style1">'
'<style:text-properties '
'fo:background-color="#ff0000" '
'fo:color="#0000ff"/>'
'</style:style>'))
class TestInsertStyleCase(TestCase):
def setUp(self):
self.doc = Document('samples/example.odt')
def test_insert_common_style(self):
doc = self.doc
style = Style('paragraph', 'MyStyle')
doc.insert_style(style)
inserted_style = doc.get_style('paragraph', 'MyStyle')
self.assertEqual(style.serialize(), inserted_style.serialize())
def test_insert_default_style(self):
doc = self.doc
style = Style('paragraph', 'MyStyle')
doc.insert_style(style, default=True)
inserted_style = doc.get_style('paragraph')
expected = '<style:default-style style:family="paragraph"/>'
self.assertEqual(inserted_style.serialize(), expected)
def test_insert_automatic_style(self):
doc = self.doc
style = Style('paragraph')
doc.insert_style(style, automatic=True)
self.assertNotEqual(style.name, None)
def test_insert_with_error(self):
doc = self.doc
style = Style('paragraph', 'MyStyle')
self.assertRaises(
AttributeError,
doc.insert_style,
style=style,
automatic=True,
default=True)
def test_insert_master_page_style(self):
doc = self.doc
style = Style('master-page', 'MyPageStyle')
doc.insert_style(style)
inserted_style = doc.get_style('master-page', 'MyPageStyle')
self.assertEqual(style.serialize(), inserted_style.serialize())
if __name__ == '__main__':
main()
| 2.15625 | 2 |
setup.py | nvie/python-drainers | 1 | 12770146 | <reponame>nvie/python-drainers<gh_stars>1-10
#!/usr/bin/env python
import os
import codecs
try:
from setuptools import setup
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
import drainers as distmeta
if os.path.exists("README.rst"):
long_description = codecs.open('README.rst', "r", "utf-8").read()
else:
long_description = "See http://github.com/nvie/drainers/tree/master"
setup(
name="drainers",
version=distmeta.__version__,
description="Event-based draining of process output",
author=distmeta.__author__,
author_email=distmeta.__contact__,
url=distmeta.__homepage__,
platforms=["any"],
license="BSD",
packages=["drainers"],
zip_safe=False,
classifiers=[
#"Development Status :: 1 - Planning",
"Development Status :: 2 - Pre-Alpha",
#"Development Status :: 3 - Alpha",
#"Development Status :: 4 - Beta",
#"Development Status :: 5 - Production/Stable",
#"Development Status :: 6 - Mature",
#"Development Status :: 7 - Inactive",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: POSIX",
"Topic :: Software Development :: Libraries :: Python Modules",
],
long_description=long_description,
)
| 1.679688 | 2 |
Scripts/rfpy_ccp.py | wbm06/RfPy | 0 | 12770147 | #!/usr/bin/env python
# Copyright 2019 <NAME>
#
# This file is part of RfPy.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Import modules and functions
import numpy as np
import pickle
import stdb
from obspy.clients.fdsn import Client
from obspy.core import Stream, UTCDateTime
from rfpy import arguments, binning, plotting
from rfpy import CCPimage
from pathlib import Path
def main():
print()
print("############################################")
print("# __ #")
print("# _ __ / _|_ __ _ _ ___ ___ _ __ #")
print("# | '__| |_| '_ \| | | | / __/ __| '_ \ #")
print("# | | | _| |_) | |_| | | (_| (__| |_) | #")
print("# |_| |_| | .__/ \__, |___\___\___| .__/ #")
print("# |_| |___/_____| |_| #")
print("# #")
print("############################################")
print()
# Run Input Parser
args = arguments.get_ccp_arguments()
# Load Database
db = stdb.io.load_db(fname=args.indb)
# Construct station key loop
allkeys = db.keys()
# Extract key subset
if len(args.stkeys) > 0:
stkeys = []
for skey in args.stkeys:
stkeys.extend([s for s in allkeys if skey in s])
else:
stkeys = db.keys()
if args.load:
# Check if CCPimage object exists and whether overwrite has been set
load_file = Path('CCP_load.pkl')
if load_file.is_file() and not args.ovr:
ccpfile = open(load_file, "rb")
ccpimage = pickle.load(ccpfile)
ccpfile.close()
else:
print()
print("|-----------------------------------------------|")
print("| Loading data |")
print("|-----------------------------------------------|")
print("| Gridding: ")
print("| start = {0:5.1f},{1:6.1f}".format(
args.coord_start[0],args.coord_start[1]))
print("| end = {0:5.1f},{1:6.1f}".format(
args.coord_end[0],args.coord_end[1]))
print("| dz = {0} (km)".format(str(args.dz)))
print("| dx = {0} (km)".format(str(args.dx)))
print()
# Initialize CCPimage object
ccpimage = CCPimage(coord_start=args.coord_start,
coord_end=args.coord_end,
dz=args.dz, dx=args.dx)
# Loop over station keys
for stkey in list(stkeys):
# Extract station information from dictionary
sta = db[stkey]
# Define path to see if it exists
if args.phase in ['P', 'PP', 'allP']:
datapath = Path('P_DATA') / stkey
elif args.phase in ['S', 'SKS', 'allS']:
datapath = Path('S_DATA') / stkey
if not datapath.is_dir():
print('Path to ' + str(datapath) + ' doesn`t exist - continuing')
continue
# Temporary print locations
tlocs = sta.location
if len(tlocs) == 0:
tlocs = ['']
for il in range(0, len(tlocs)):
if len(tlocs[il]) == 0:
tlocs[il] = "--"
sta.location = tlocs
rfRstream = Stream()
datafiles = [x for x in datapath.iterdir() if x.is_dir()]
for folder in datafiles:
# Skip hidden folders
if folder.name.startswith('.'):
continue
# Load meta data
filename = folder / "Meta_Data.pkl"
if not filename.is_file():
continue
metafile = open(filename, 'rb')
meta = pickle.load(metafile)
metafile.close()
# Skip data not in list of phases
if meta.phase not in args.listphase:
continue
# QC Thresholding
if meta.snrh < args.snrh:
continue
if meta.snr < args.snr:
continue
if meta.cc < args.cc:
continue
# If everything passed, load the RF data
filename = folder / "RF_Data.pkl"
if filename.is_file():
file = open(filename, "rb")
rfdata = pickle.load(file)
rfRstream.append(rfdata[1])
file.close()
if len(rfRstream) == 0:
continue
if args.no_outl:
t1 = 0.
t2 = 30.
varR = []
for i in range(len(rfRstream)):
taxis = rfRstream[i].stats.taxis
tselect = (taxis > t1) & (taxis < t2)
varR.append(np.var(rfRstream[i].data[tselect]))
varR = np.array(varR)
# Remove outliers wrt variance within time range
medvarR = np.median(varR)
madvarR = 1.4826*np.median(np.abs(varR-medvarR))
robustR = np.abs((varR-medvarR)/madvarR)
outliersR = np.arange(len(rfRstream))[robustR > 2.5]
for i in outliersR[::-1]:
rfRstream.remove(rfRstream[i])
print("Station: {0:>2s}.{1:5s} - {2} traces loaded".format(
sta.network, sta.station, len(rfRstream)))
if len(rfRstream)==0:
continue
ccpimage.add_rfstream(rfRstream)
if len(ccpimage.radialRF) > 0:
ccpimage.save("CCP_load.pkl")
ccpimage.is_ready_for_prep = True
print()
print("CCPimage saved to 'CCP_load.pkl'")
else:
ccpimage.is_ready_for_prep = False
else:
pass
if args.prep:
prep_file = Path("CCP_prep.pkl")
if prep_file.is_file() and not args.ovr:
ccpfile = open(prep_file, 'rb')
ccpimage = pickle.load(ccpfile)
ccpfile.close()
else:
load_file = Path('CCP_load.pkl')
if not load_file.is_file():
raise(Exception("No CCP_load.pkl file available - aborting"))
else:
print()
print("|-----------------------------------------------|")
print("| Preparing data before stacking |")
print("|-----------------------------------------------|")
print("| Frequencies: ")
print("| f1 = {0:4.2f} (Hz)".format(args.f1))
print("| f2ps = {0:4.2f} (Hz)".format(args.f2ps))
print("| f2pps = {0:4.2f} (Hz)".format(args.f2pps))
print("| f2pss = {0:4.2f} (Hz)".format(args.f2pss))
print("| Binning: ")
print("| nbaz = {0}".format(str(args.nbaz)))
print("| nslow = {0}".format(str(args.nslow)))
print()
ccpfile = open(load_file,"rb")
ccpimage = pickle.load(ccpfile)
ccpfile.close()
ccpimage.prep_data(f1=args.f1, f2ps=args.f2ps,
f2pps=args.f2pps, f2pss=args.f2pss,
nbaz=args.nbaz, nslow=args.nslow)
ccpimage.is_ready_for_prestack = True
ccpimage.save(prep_file)
print()
print("CCPimage saved to {0}".format(str(prep_file)))
else:
pass
if args.prestack:
prestack_file = Path("CCP_prestack.pkl")
if prestack_file.is_file() and not args.ovr:
ccpfile = open(prestack_file, 'rb')
ccpimage = pickle.load(ccpfile)
ccpfile.close()
else:
prep_file = Path("CCP_prep.pkl")
if not prep_file.is_file():
raise(Exception("No CCP_prep.pkl file available - aborting"))
else:
print()
print("|-----------------------------------------------|")
print("| CCP pre-stacking each phase |")
print("|-----------------------------------------------|")
print()
ccpfile = open(prep_file, 'rb')
ccpimage = pickle.load(ccpfile)
ccpfile.close()
ccpimage.prestack()
ccpimage.save(prestack_file)
print()
print("CCPimage saved to {0}".format(str(prestack_file)))
else:
pass
if args.ccp:
ccp_file = Path("CCP_stack.pkl")
if ccp_file.is_file() and not args.ovr:
ccpfile = open(ccp_file, 'rb')
ccpimage = pickle.load(ccpfile)
ccpfile.close()
else:
prestack_file = Path("CCP_prestack.pkl")
if not prestack_file.is_file():
raise(Exception("No CCP_prestack.pkl file available - aborting"))
else:
if args.linear:
print()
print("|-----------------------------------------------|")
print("| Linear CCP stack - all phases |")
print("|-----------------------------------------------|")
print()
elif args.pws:
print()
print("|-----------------------------------------------|")
print("| Phase-weighted CCP stack - all phases |")
print("|-----------------------------------------------|")
print()
ccpfile = open(prestack_file, 'rb')
ccpimage = pickle.load(ccpfile)
ccpfile.close()
ccpimage.ccp()
if args.linear:
if args.weights:
ccpimage.weights = args.weights
ccpimage.linear_stack(typ='ccp')
elif args.pws:
if args.weights:
ccpimage.weights = args.weights
ccpimage.phase_weighted_stack(typ='ccp')
ccpimage.save(ccp_file)
print()
print("CCPimage saved to {0}".format(str(ccp_file)))
if args.ccp_figure:
ccpimage.plot_ccp(save=args.save_figure, fmt=args.fmt,
vmin=-1.*args.cbound, vmax=args.cbound, title=args.title)
else:
pass
if args.gccp:
gccp_file = Path("GCCP_stack.pkl")
if gccp_file.is_file() and not args.ovr:
ccpfile = open(gccp_file, 'rb')
ccpimage = pickle.load(ccpfile)
ccpfile.close()
else:
prestack_file = Path("CCP_prestack.pkl")
if not prestack_file.is_file():
raise(Exception("No CCP_prestack.pkl file available - aborting"))
else:
if args.linear:
print()
print("|-----------------------------------------------|")
print("| Linear GCCP stack - all phases |")
print("|-----------------------------------------------|")
print()
elif args.pws:
print()
print("|-----------------------------------------------|")
print("| Phase-weighted GCCP stack - all phases |")
print("|-----------------------------------------------|")
print()
ccpfile = open(prestack_file, 'rb')
ccpimage = pickle.load(ccpfile)
ccpfile.close()
ccpimage.gccp(wlen=args.wlen)
if args.linear:
if args.weights:
ccpimage.weights = args.weights
ccpimage.linear_stack(typ='gccp')
elif args.pws:
if args.weights:
ccpimage.weights = args.weights
ccpimage.phase_weighted_stack(typ='gccp')
ccpimage.save(gccp_file)
print()
print("CCPimage saved to {0}".format(str(gccp_file)))
if args.ccp_figure:
ccpimage.plot_gccp(save=args.save_figure, fmt=args.fmt,
vmin=-1.*args.cbound, vmax=args.cbound, title=args.title)
else:
pass
if __name__ == "__main__":
# Run main program
main()
| 1.820313 | 2 |
gallery/models.py | otienotimothy/art-gallery | 0 | 12770148 | <gh_stars>0
from django.db import models
from django.contrib.auth.models import User
from cloudinary.models import CloudinaryField
# Create your models here.
class Photo(models.Model):
image = CloudinaryField('image')
image_name = models.CharField(max_length=100)
image_description = models.TextField()
added_by = models.ForeignKey(User, on_delete=models.CASCADE)
location = models.ForeignKey('Location', on_delete=models.RESTRICT)
category = models.ForeignKey('Category', on_delete=models.RESTRICT)
def __str__(self):
return self.image_name
class Location(models.Model):
photo_location = models.CharField(max_length=100)
def __str__(self):
return self.photo_location
class Category(models.Model):
photo_category = models.CharField(max_length=100)
def __str__(self):
return self.photo_category
| 2.3125 | 2 |
backend/webserver/db.py | Kevinwochan/Ultracast | 0 | 12770149 | from . import models
from . import schema
import re
import magic
import mimetypes
import boto3
from botocore.client import Config
from mongoengine import connect
from pydub import AudioSegment
import io
import hashlib
from base64 import urlsafe_b64encode
#MONGO_URI = f'mongodb://{MONGO_USERNAME}:{MONGO_PASSWORD}@{MONGO_IP}/{MONGO_DB}?authSource={MONGO_AUTH_DB}'
config = None
'''
Defaults
Modified when init_app() called
'''
REGION = 'sfo2'
STATIC_FILE_BASE_URL = f'https://{REGION}.digitaloceanspaces.com'
session = boto3.session.Session()
client = session.client('s3',
region_name=REGION,
endpoint_url=STATIC_FILE_BASE_URL,
aws_access_key_id='<KEY>',
aws_secret_access_key='<KEY>')
BUCKET = 'ultracast-files'
FILE_ACCESS = 'public-read'
def init_app(app):
'''
Init based off apps config
'''
config = app.config
REGION = app.config["S3"]["REGION"]
STATIC_FILE_BASE_URL = f'https://{REGION}.digitaloceanspaces.com'
client = session.client('s3',
region_name=REGION,
endpoint_url=STATIC_FILE_BASE_URL,
aws_access_key_id=app.config["S3"]["AWS_ACCESS_KEY"],
aws_secret_access_key=app.config["S3"]["AWS_SECRET_ACCESS_KEY"])
BUCKET = app.config["S3"]["BUCKET"]
FILE_ACCESS = app.config["S3"]["FILE_ACCESS"]
def connect_mongo(app_config):
mongo_uri = "mongodb://{u}:{p}@{ip}/{db}?authSource={auth_db}".format(
u=app_config["MONGO_USERNAME"], p=app_config["MONGO_PASSWORD"],
ip=app_config["MONGO_IP"], db=app_config["MONGO_DB"], auth_db=app_config["MONGO_AUTH_DB"])
connect(host=mongo_uri)
# Digital Ocean Space (Static-Files)
class IllegalMimeException(Exception):
pass
def get_bucket_url():
return re.sub(r"^https://", f"https://{BUCKET}.", STATIC_FILE_BASE_URL)
def get_file_url(filename):
return get_bucket_url() + f"/{filename}"
def get_key_from_url(url):
return re.sub(get_bucket_url() + "/", "", url)
def get_key_from_binary_data(data, ext=""):
return urlsafe_b64encode(hashlib.sha256(data).digest()).decode('UTF-8') + ext
def check_status(resp, ok_statuses, op):
if resp['ResponseMetadata']['HTTPStatusCode'] not in ok_statuses:
raise Exception(f"Error for operation [{op}] - Response: {resp}")
def file_exists(key):
try:
client.head_object(Bucket=BUCKET, Key=key)
return True
except:
return False
def url_exists(url):
return file_exists(get_key_from_url(url))
def get_key(data, key=None, ext=""):
if key is None:
return get_key_from_binary_data(data, ext)
else:
return key
def check_mime(data, valid_mimes):
try:
mime_type = magic.from_buffer(data, mime=True)
except:
raise IllegalMimeException(f"Could not interpret MIME type of payload")
if mime_type not in valid_mimes:
raise IllegalMimeException(f"MIME type {mime_type} not allowed")
return mime_type
def add_file(data, key=None, valid_mimes=[], override=False):
mime_type = check_mime(data, valid_mimes)
extension = mimetypes.guess_extension(mime_type)
key = get_key(data, key, extension)
if not override and file_exists(key):
return get_file_url(key)
resp = client.put_object(
Body=data,
Bucket=BUCKET,
Key=key,
ACL=FILE_ACCESS,
ContentType=mime_type)
check_status(resp, [200], 'Add File')
return get_file_url(key)
def remove_file(url, key=None):
if key is None:
resp = client.delete_object(Bucket=BUCKET, Key=get_key_from_url(url))
else:
resp = client.delete_object(Bucket=BUCKET, Key=key)
check_status(resp, [200, 204], 'Remove File')
def update_file(old_url, data, new_key=None, valid_mimes=[]):
if url_exists(old_url):
remove_file(old_url)
return add_file(data, new_key, valid_mimes)
def audio_file_duration_secs(data):
try:
audio = AudioSegment.from_file(io.BytesIO(data), format="mp3")
return int(round(audio.duration_seconds))
except:
return -1
| 1.976563 | 2 |
usaspending_api/search/v2/views/search.py | truthiswill/usaspending-api | 0 | 12770150 | import copy
from django.conf import settings
from django.db.models import Sum, Count, F
from rest_framework.response import Response
from rest_framework.views import APIView
from usaspending_api.awards.models_matviews import UniversalAwardView
from usaspending_api.awards.v2.filters.matview_filters import matview_search_filter
from usaspending_api.awards.v2.filters.sub_award import subaward_filter
from usaspending_api.awards.v2.filters.view_selector import spending_by_award_count
from usaspending_api.awards.v2.lookups.lookups import (contract_type_mapping, loan_type_mapping,
non_loan_assistance_type_mapping, grant_type_mapping,
contract_subaward_mapping, grant_subaward_mapping,
idv_type_mapping)
from usaspending_api.awards.v2.lookups.matview_lookups import (award_contracts_mapping, loan_award_mapping,
non_loan_assistance_award_mapping, award_idv_mapping)
from usaspending_api.common.api_versioning import api_transformations, API_TRANSFORM_FUNCTIONS
from usaspending_api.common.cache_decorator import cache_response
from usaspending_api.common.exceptions import InvalidParameterException, UnprocessableEntityException
from usaspending_api.core.validator.award_filter import AWARD_FILTER
from usaspending_api.core.validator.pagination import PAGINATION
from usaspending_api.core.validator.tinyshield import TinyShield
@api_transformations(api_version=settings.API_VERSION, function_list=API_TRANSFORM_FUNCTIONS)
class SpendingByAwardVisualizationViewSet(APIView):
"""
This route takes award filters and fields, and returns the fields of the filtered awards.
endpoint_doc: /advanced_award_search/spending_by_award.md
"""
@cache_response()
def post(self, request):
"""Return all awards matching the provided filters and limits"""
models = [
{'name': 'fields', 'key': 'fields', 'type': 'array', 'array_type': 'text', 'text_type': 'search', 'min': 1},
{'name': 'subawards', 'key': 'subawards', 'type': 'boolean', 'default': False}
]
models.extend(copy.deepcopy(AWARD_FILTER))
models.extend(copy.deepcopy(PAGINATION))
for m in models:
if m['name'] in ('award_type_codes', 'fields'):
m['optional'] = False
json_request = TinyShield(models).block(request.data)
fields = json_request["fields"]
filters = json_request.get("filters", {})
subawards = json_request["subawards"]
order = json_request["order"]
limit = json_request["limit"]
page = json_request["page"]
if "no intersection" in filters["award_type_codes"]:
# "Special case": there will never be results when the website provides this value
return Response({
"limit": limit,
"results": [],
"page_metadata": {"page": page, "hasNext": False},
})
sort = json_request.get("sort", fields[0])
if sort not in fields:
raise InvalidParameterException("Sort value '{}' not found in requested fields: {}".format(sort, fields))
subawards_values = list(contract_subaward_mapping.keys()) + list(grant_subaward_mapping.keys())
awards_values = list(award_contracts_mapping.keys()) + list(loan_award_mapping.keys()) + \
list(non_loan_assistance_award_mapping.keys()) + list(award_idv_mapping.keys())
msg = "Sort value '{}' not found in {{}} mappings: {{}}".format(sort)
if not subawards and sort not in awards_values:
raise InvalidParameterException(msg.format("award", awards_values))
elif subawards and sort not in subawards_values:
raise InvalidParameterException(msg.format("subaward", subawards_values))
# build sql query filters
if subawards:
queryset = subaward_filter(filters)
values = {'subaward_number', 'piid', 'fain', 'award_type'}
for field in fields:
if contract_subaward_mapping.get(field):
values.add(contract_subaward_mapping.get(field))
if grant_subaward_mapping.get(field):
values.add(grant_subaward_mapping.get(field))
else:
queryset = matview_search_filter(filters, UniversalAwardView).values()
values = {'award_id', 'piid', 'fain', 'uri', 'type'}
for field in fields:
if award_contracts_mapping.get(field):
values.add(award_contracts_mapping.get(field))
if loan_award_mapping.get(field):
values.add(loan_award_mapping.get(field))
if non_loan_assistance_award_mapping.get(field):
values.add(non_loan_assistance_award_mapping.get(field))
if award_idv_mapping.get(field):
values.add(award_idv_mapping.get(field))
# Modify queryset to be ordered by requested "sort" in the request or default value(s)
if sort:
if subawards:
if set(filters["award_type_codes"]) <= set(contract_type_mapping): # Subaward contracts
sort_filters = [contract_subaward_mapping[sort]]
elif set(filters["award_type_codes"]) <= set(grant_type_mapping): # Subaward grants
sort_filters = [grant_subaward_mapping[sort]]
else:
msg = 'Award Type codes limited for Subawards. Only contracts {} or grants {} are available'
msg = msg.format(list(contract_type_mapping.keys()), list(grant_type_mapping.keys()))
raise UnprocessableEntityException(msg)
else:
if set(filters["award_type_codes"]) <= set(contract_type_mapping): # contracts
sort_filters = [award_contracts_mapping[sort]]
elif set(filters["award_type_codes"]) <= set(loan_type_mapping): # loans
sort_filters = [loan_award_mapping[sort]]
elif set(filters["award_type_codes"]) <= set(idv_type_mapping): # idvs
sort_filters = [award_idv_mapping[sort]]
else: # assistance data
sort_filters = [non_loan_assistance_award_mapping[sort]]
# Explictly set NULLS LAST in the ordering to encourage the usage of the indexes
if sort == "Award ID" and subawards:
if order == "desc":
queryset = queryset.order_by(
F('award__piid').desc(nulls_last=True),
F('award__fain').desc(nulls_last=True)).values(*list(values))
else:
queryset = queryset.order_by(
F('award__piid').asc(nulls_last=True),
F('award__fain').asc(nulls_last=True)).values(*list(values))
elif sort == "Award ID":
if order == "desc":
queryset = queryset.order_by(
F('piid').desc(nulls_last=True),
F('fain').desc(nulls_last=True),
F('uri').desc(nulls_last=True)).values(*list(values))
else:
queryset = queryset.order_by(
F('piid').asc(nulls_last=True),
F('fain').asc(nulls_last=True),
F('uri').asc(nulls_last=True)).values(*list(values))
elif order == "desc":
queryset = queryset.order_by(F(sort_filters[0]).desc(nulls_last=True)).values(*list(values))
else:
queryset = queryset.order_by(F(sort_filters[0]).asc(nulls_last=True)).values(*list(values))
limited_queryset = queryset[(page - 1) * limit:page * limit + 1] # lower limit : upper limit
has_next = len(limited_queryset) > limit
results = []
for award in limited_queryset[:limit]:
if subawards:
row = {"internal_id": award["subaward_number"]}
if award['award_type'] == 'procurement':
for field in fields:
row[field] = award.get(contract_subaward_mapping[field])
elif award['award_type'] == 'grant':
for field in fields:
row[field] = award.get(grant_subaward_mapping[field])
else:
row = {"internal_id": award["award_id"]}
if award['type'] in loan_type_mapping: # loans
for field in fields:
row[field] = award.get(loan_award_mapping.get(field))
elif award['type'] in non_loan_assistance_type_mapping: # assistance data
for field in fields:
row[field] = award.get(non_loan_assistance_award_mapping.get(field))
elif award['type'] in idv_type_mapping:
for field in fields:
row[field] = award.get(award_idv_mapping.get(field))
elif (award['type'] is None and award['piid']) or award['type'] in contract_type_mapping:
# IDV + contract
for field in fields:
row[field] = award.get(award_contracts_mapping.get(field))
if "Award ID" in fields:
for id_type in ["piid", "fain", "uri"]:
if award[id_type]:
row["Award ID"] = award[id_type]
break
results.append(row)
return Response({"limit": limit, "results": results, "page_metadata": {"page": page, "hasNext": has_next}})
@api_transformations(api_version=settings.API_VERSION, function_list=API_TRANSFORM_FUNCTIONS)
class SpendingByAwardCountVisualizationViewSet(APIView):
"""
This route takes award filters, and returns the number of awards in each award type (Contracts, Loans, Grants, etc.)
endpoint_doc: /advanced_award_search/spending_by_award_count.md
"""
@cache_response()
def post(self, request):
models = [{'name': 'subawards', 'key': 'subawards', 'type': 'boolean', 'default': False}]
models.extend(copy.deepcopy(AWARD_FILTER))
models.extend(copy.deepcopy(PAGINATION))
json_request = TinyShield(models).block(request.data)
filters = json_request.get("filters", None)
subawards = json_request["subawards"]
if filters is None:
raise InvalidParameterException("Missing required request parameters: 'filters'")
results = {
"contracts": 0, "idvs": 0, "grants": 0, "direct_payments": 0, "loans": 0, "other": 0
} if not subawards else {
"subcontracts": 0, "subgrants": 0
}
if "award_type_codes" in filters and "no intersection" in filters["award_type_codes"]:
# "Special case": there will never be results when the website provides this value
return Response({"results": results})
if subawards:
queryset = subaward_filter(filters)
else:
queryset, model = spending_by_award_count(filters)
if subawards:
queryset = queryset.values('award_type').annotate(category_count=Count('subaward_id'))
elif model == 'SummaryAwardView':
queryset = queryset.values('category').annotate(category_count=Sum('counts'))
else:
queryset = queryset.values('category').annotate(category_count=Count('category'))
categories = {
'contract': 'contracts',
'idv': 'idvs',
'grant': 'grants',
'direct payment': 'direct_payments',
'loans': 'loans',
'other': 'other'
} if not subawards else {'procurement': 'subcontracts', 'grant': 'subgrants'}
category_name = 'category' if not subawards else 'award_type'
# DB hit here
for award in queryset:
if award[category_name] is None:
result_key = 'other' if not subawards else 'subcontracts'
elif award[category_name] not in categories.keys():
result_key = 'other'
else:
result_key = categories[award[category_name]]
results[result_key] += award['category_count']
return Response({"results": results})
| 1.710938 | 2 |