hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ea53fd441fd38a9c7b9db7db2a21779711dfe5a5 | 195 | py | Python | xin-era/foundation/segment_tree/main.py | fan-weiwei/algo-prep | 56514630cfe58d451dd126897459cca81bf1b2af | [
"Apache-2.0"
] | null | null | null | xin-era/foundation/segment_tree/main.py | fan-weiwei/algo-prep | 56514630cfe58d451dd126897459cca81bf1b2af | [
"Apache-2.0"
] | null | null | null | xin-era/foundation/segment_tree/main.py | fan-weiwei/algo-prep | 56514630cfe58d451dd126897459cca81bf1b2af | [
"Apache-2.0"
] | null | null | null | # Goal:
#
# Find the sums of subsections of an array
# Change an element of the array and don't recalculate everything
if __name__ == '__main__':
arr = [1,2,3,4,5]
print('here') | 15 | 65 | 0.641026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.671795 |
ea540d35be6aa8bb6a870342c44c751f5089211c | 2,658 | py | Python | Python/visualization.py | richieliuse/SegmentationCNN | 12aaeff53d01f7c2ddd1f27489283b3062bb1d4a | [
"MIT"
] | 54 | 2016-11-19T02:12:04.000Z | 2022-02-24T14:26:41.000Z | Python/visualization.py | richieliuse/SegmentationCNN | 12aaeff53d01f7c2ddd1f27489283b3062bb1d4a | [
"MIT"
] | 7 | 2019-05-01T10:51:36.000Z | 2022-02-10T04:24:54.000Z | Python/visualization.py | richieliuse/SegmentationCNN | 12aaeff53d01f7c2ddd1f27489283b3062bb1d4a | [
"MIT"
] | 13 | 2016-08-06T00:15:55.000Z | 2021-12-26T20:20:35.000Z | # encoding: utf-8
"""
Visualization functions for features and predictions.
Copyright 2016 Matthias Leimeister
"""
import numpy as np
from feature_extraction import load_raw_features
from evaluation import post_processing
import matplotlib.pyplot as plt
import pickle
def visualize_predictions():
"""
Visualize predictions resulting from a pretrained CNN model
on the test dataset.
"""
preds = np.load('../Data/predsTestTracks_100epochs_lr005.npy')
train_features, train_labels, test_features, test_labels = load_raw_features('../Data/rawFeatures.pickle')
data = np.load('../Data/testDataNormalized.npz')
test_y = data['test_y']
# load file lists and indices
with open('../Data/fileListsAndIndex.pickle', 'rb') as f:
train_files, train_idx, test_files, test_idx = pickle.load(f)
for i in range(len(test_labels)):
f = test_files[i]
print f
idx = np.where(test_idx == i)[0]
labels = test_y[idx]
preds_track = np.squeeze(np.asarray(preds[idx]))
preds_track = post_processing(preds_track)
preds_track = 0.5 + 0.5 * preds_track
labels *= 0.5
plt.plot(labels)
plt.plot(preds_track)
plt.show()
def visualize_training_data():
"""
Visualize log Mel beat spectra of the training dataset.
"""
train_features, train_labels, test_features, test_labels = load_raw_features('../Data/rawFeatures.pickle')
for features, labels in zip(train_features, train_labels):
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1.imshow(features)
ax2.plot(labels)
ax1.set_xlim([0, features.shape[1]])
ax1.set_ylim([0, 80])
ax2.set_xlim([0, features.shape[1]])
ax2.set_ylim([0, 1])
ax1.set_adjustable('box-forced')
ax2.set_adjustable('box-forced')
plt.show()
def visualize_test_data():
"""
Visualize log Mel beat spectra of the test dataset.
"""
train_features, train_labels, test_features, test_labels = load_raw_features('../Data/rawFeatures.pickle')
for features, labels in zip(test_features, test_labels):
f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
ax1.imshow(features)
ax2.plot(labels)
ax1.set_xlim([0, features.shape[1]])
ax1.set_ylim([0, 80])
ax2.set_xlim([0, features.shape[1]])
ax2.set_ylim([0, 1])
ax1.set_adjustable('box-forced')
ax2.set_adjustable('box-forced')
plt.show()
if __name__ == "__main__":
visualize_predictions()
# visualize_test_data()
# visualize_training_data()
| 25.805825 | 110 | 0.648232 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 704 | 0.264861 |
ea546f15a82d98847008f1b61316d71d6a4f99fe | 7,468 | py | Python | python/edgeauth/token_builder.py | ian-ross/EdgeAuth | f00fcd5287dafbe350ac6ac97d7d073dec6278de | [
"Apache-2.0"
] | 3 | 2020-07-27T07:08:42.000Z | 2021-11-07T20:02:03.000Z | python/edgeauth/token_builder.py | ian-ross/EdgeAuth | f00fcd5287dafbe350ac6ac97d7d073dec6278de | [
"Apache-2.0"
] | 2 | 2021-04-12T18:34:38.000Z | 2021-04-29T15:17:11.000Z | python/edgeauth/token_builder.py | ian-ross/EdgeAuth | f00fcd5287dafbe350ac6ac97d7d073dec6278de | [
"Apache-2.0"
] | 7 | 2020-01-17T09:54:34.000Z | 2021-11-11T17:16:52.000Z | from time import time
from datetime import datetime
from .digest_tokens import DigestTokens
class TokenBuilder:
application_id = None
secret = None
token = {}
def __init__(self):
"""Token builder helper class to create digest tokens that
can be used with the Phenix platform.
"""
self.application_id = None
self.secret = None
self.token = {}
def with_application_id(self, application_id):
"""The application ID used to sign the token. (required)
Keyword arguments:
application_id -- the application ID to sign the token
"""
if not isinstance(application_id, str):
raise TypeError('Application Id must be a string')
self.application_id = application_id
return self
def with_secret(self, secret):
"""The secret used to sign the token. (required)
Keyword arguments:
secret -- the shared secret ro sigh the token
"""
if not isinstance(secret, str):
raise TypeError('Secret must be a string')
self.secret = secret
return self
def with_uri(self, uri):
"""The backend URI. (optional)
Keyword arguments:
uri -- the backend URI
"""
if not isinstance(uri, str):
raise TypeError('URI must be a string')
self.token['uri'] = uri
return self
def with_capability(self, capability):
"""Set a capability for the token, e.g. to publish a stream. (optional)
Keyword arguments:
capability -- the valid capability
"""
if not isinstance(capability, str):
raise TypeError('Capability must be a string')
token = self.token
capabilities = set(token['capabilities']) if 'capabilities' in token else set([])
capabilities.add(capability)
self.token['capabilities'] = sorted(list(capabilities))
return self
def expires_in_seconds(self, seconds):
"""Expires the token in the given time.
NOTE: Your time must be synced with the atomic clock for expiration time to work properly.
Keyword arguments:
seconds -- the time in seconds
"""
if not isinstance(seconds, int):
raise TypeError('Seconds must be an int')
self.token['expires'] = int((datetime.now().timestamp() + seconds)*1000.0)
return self
def expires_at(self, ex_datetime):
"""Expires the token at the given dateime
NOTE: Your time must be synced with the atomic clock for expiration time to work properly.
Keyword arguments:
datetime -- the time as a datetime
"""
if not isinstance(ex_datetime, datetime):
raise TypeError('datetime must be a valid date')
self.token['expires'] = int(ex_datetime.timestamp()*1000.0)
return self
def for_authenticate_only(self):
"""Limit the token to authentication only. (optional)
"""
self.token['type'] = 'auth'
return self
def for_streaming_only(self):
"""Limit the token to streaming only. (optional)
"""
self.token['type'] = 'stream'
return self
def for_publishing_only(self):
"""Limit the token to publishing only. (optional)
"""
self.token['type'] = 'publish'
return self
def for_session(self, session_id):
"""Limit the token to the specified session ID. (optional)
Keyword arguments:
session_id -- the session id
"""
if not isinstance(session_id, str):
raise TypeError('Session Id must be a string')
self.token['sessionId'] = session_id
return self
def for_remote_address(self, remote_address):
"""Limit the token to the specified remote address. (optional)
Keyword arguments:
remote_address -- the remote address
"""
if not isinstance(remote_address, str):
raise TypeError('Remote Address must be a string')
self.token['remoteAddress'] = remote_address
return self
def for_origin_stream(self, origin_stream_id):
"""Limit the token to the specified origin stream ID. (optional)
Keyword arguments:
origin_stream_id -- the origin stream ID
"""
if not isinstance(origin_stream_id, str):
raise TypeError('Origin Stream Id must be a string')
self.token['originStreamId'] = origin_stream_id
return self
def for_channel(self, channel_id):
"""Limit the token to the specified channel ID. (optional)
Keyword arguments:
channel_id -- the channel id
"""
if not isinstance(channel_id, str):
raise TypeError('Channel ID must be a string')
self.for_tag('channelId:{}'.format(channel_id))
return self
def for_channel_alias(self, channel_alias):
"""Limit the token to the specified channel alias. (optional)
Keyword arguments:
channel_alias -- the channel alias
"""
if not isinstance(channel_alias, str):
raise TypeError('Channel Alias must be a string')
self.for_tag('channelAlias:{}'.format(channel_alias))
return self
def for_room(self, room_id):
"""Limit the token to the specified room ID. (optional)
Keyword arguments:
room_id -- the room id
"""
if not isinstance(room_id, str):
raise TypeError('Room ID must be a string')
self.for_tag('roomId:{}'.format(room_id))
return self
def for_room_alias(self, room_alias):
"""Limit the token to the specified room alias. (optional)
Keyword arguments:
room_alias -- the room alias
"""
if not isinstance(room_alias, str):
raise TypeError('Room Alias must be a string')
self.for_tag('roomAlias:{}'.format(room_alias))
return self
def for_tag(self, tag):
"""Limit the token to the specified tag on the origin stream. (optional)
Keyword arguments:
tag -- the tag required on the origin stream
"""
if not isinstance(tag, str):
raise TypeError('Tag must be a string')
self.token['requiredTag'] = tag
return self
def apply_tag(self, tag):
"""Apply the tag to the stream when it is setup. (optional)
Keyword arguments:
tag -- the tag added to the new stream
"""
if not isinstance(tag, str):
raise TypeError('Tag must be a string')
token = self.token
apply_tags = set(token['applyTags']) if 'applyTags' in token else set()
apply_tags.add(tag)
self.token['applyTags'] = list(apply_tags)
return self
def build(self):
"""Build the signed token
"""
token = DigestTokens()
if not self.application_id:
raise ValueError('application_id must be set using the \
"with_application_id" method before calling "build"')
if not self.secret:
raise ValueError('secret must be set using the \
"with_secret" method call before calling "build"')
return token.sign_and_encode(self.application_id, self.secret, self.token)
def value(self):
return self.token | 28.287879 | 98 | 0.604847 | 7,373 | 0.987279 | 0 | 0 | 0 | 0 | 0 | 0 | 3,526 | 0.472148 |
ea55fb37f7d90d99122deeb02d480c900db16d68 | 343 | py | Python | myhood/migrations/0010_remove_neighborhood_occupants_count.py | kiptoo-rotich/neighborhood | 54974922dbd52e83ccfc6ab8c5cf5e3b258211fb | [
"MIT"
] | null | null | null | myhood/migrations/0010_remove_neighborhood_occupants_count.py | kiptoo-rotich/neighborhood | 54974922dbd52e83ccfc6ab8c5cf5e3b258211fb | [
"MIT"
] | null | null | null | myhood/migrations/0010_remove_neighborhood_occupants_count.py | kiptoo-rotich/neighborhood | 54974922dbd52e83ccfc6ab8c5cf5e3b258211fb | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-07-26 18:46
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('myhood', '0009_business_created_on'),
]
operations = [
migrations.RemoveField(
model_name='neighborhood',
name='occupants_count',
),
]
| 19.055556 | 47 | 0.609329 | 258 | 0.752187 | 0 | 0 | 0 | 0 | 0 | 0 | 112 | 0.326531 |
ea584960cfdef9e2884e44f505e9d436fd02c8f8 | 954 | py | Python | modules/blog.py | rdelfin/personal-site | cb89a9b9fe4d2c522e18d953c5a26e28219066e8 | [
"MIT"
] | null | null | null | modules/blog.py | rdelfin/personal-site | cb89a9b9fe4d2c522e18d953c5a26e28219066e8 | [
"MIT"
] | null | null | null | modules/blog.py | rdelfin/personal-site | cb89a9b9fe4d2c522e18d953c5a26e28219066e8 | [
"MIT"
] | null | null | null | from flask import Blueprint, Response, abort, render_template
from utils import blog as blog_utils
from utils import tags as tag_utils
bp = Blueprint("blog", __name__)
@bp.route("/", methods=["GET"])
def blog():
return blog_utils.respond_blog_list()
@bp.route("/post_<post_name>", methods=["GET"])
def blog_post(post_name: str) -> Response:
return blog_utils.respond_blog(post_name)
@bp.route("/tags", methods=["GET"])
def tags() -> Response:
tags = tag_utils.list_tags()
return render_template('blog_tags.html', tags=sorted(tags, key=lambda t: t.name))
@bp.route("/tag/<tag_name>", methods=["GET"])
def tag(tag_name: str) -> Response:
tag = tag_utils.get_tag(tag_name)
if not tag:
abort(404)
blogs = blog_utils.get_blogs_with_tag(tag_name)
sorted_blogs = sorted(blogs.items(), reverse=True, key=lambda t: t[1].creation_time)
return render_template("blog_tag_page.html", tag=tag, blogs=sorted_blogs)
| 28.909091 | 88 | 0.708595 | 0 | 0 | 0 | 0 | 773 | 0.810273 | 0 | 0 | 108 | 0.113208 |
ea588a3617cbc016fb5719414b03fb8f8e82c488 | 3,208 | py | Python | autolint/runners.py | fmontoto/autolint | 25430e5b654d69c14d80f66d1297dc7a7ade9f79 | [
"MIT"
] | null | null | null | autolint/runners.py | fmontoto/autolint | 25430e5b654d69c14d80f66d1297dc7a7ade9f79 | [
"MIT"
] | null | null | null | autolint/runners.py | fmontoto/autolint | 25430e5b654d69c14d80f66d1297dc7a7ade9f79 | [
"MIT"
] | null | null | null | import collections
import subprocess
class Runner(object):
"""Base object to run linters."""
_runners = collections.defaultdict(lambda: ByFileRunner)
def __init__(self):
"""Runner constructor"""
pass
@classmethod
def new_runner(cls, name):
"""Return an instance of a Runner specified by name
:param name: name of a registered runner.
:return: an instance of the specified runner, the default one if not
found.
"""
return cls._runners[name]()
def run(self, *args, **kwargs):
"""Run the linter."""
raise NotImplementedError(
"%s.%s must override run()." % (self.__class__.__module__,
self.__class__.__name__))
class ByFileRunner(Runner):
def __init__(self):
super(ByFileRunner, self).__init__()
def _execute(self, cmd, files, cb=None):
"""Execute and collect the results of the linter execution on the files.
There is no timeout here, the method will wait till the execution of
cmd returns.
:param cmd, list of str as Popen receives to run a program. The path to
the file will be replaced in the list if the keyword
'%file_path%' appears, otherwise the path will be append to
the list.
:param files, list of str with the path to the files to be linted.
:param cb: If not None, will call the callback for each tuple
(returncode, stdout, stderr).
:return an ordered dict with one entry for each file in the files list,
as value it will contain the exit code of the linter, the
stdout and the stderr."""
ret = collections.OrderedDict()
need_replace = False
for c in cmd:
if '%file_path%' in c:
need_replace = True
break
for f in files:
if need_replace:
command = [s.replace('%file_path%', f) for s in cmd]
else:
command = list(cmd)
command.append(f)
p = subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
result = (p.returncode, stdout, stderr)
if cb is not None:
cb(*result)
ret[f] = result
return ret
def run(self, linter_configuration, files, cb):
"""Run the linter specified at linter_configuration.
:param linter_configuration: dict, Linter configuration, parsed from
autolint configuration file.
:param files: iterable of files to be linted at this run.
:param cb: callable to be called after every run of the linter, passed
to self._execute.
:return see self.execute return.
"""
cmd = [linter_configuration['cmd']]
if 'flags' in linter_configuration:
for flag in linter_configuration['flags']:
cmd.append(flag)
return self._execute(cmd, files, cb)
| 32.734694 | 80 | 0.569202 | 3,165 | 0.986596 | 0 | 0 | 305 | 0.095075 | 0 | 0 | 1,625 | 0.506546 |
ea5ac3ac50c918deeb7f8f4138b0c8ba4507a1ff | 138 | py | Python | app/recipe/models.py | shivam230697/recipe-api | 3b614a591577b75d48eb2d9d458ddd3abd81644c | [
"MIT"
] | null | null | null | app/recipe/models.py | shivam230697/recipe-api | 3b614a591577b75d48eb2d9d458ddd3abd81644c | [
"MIT"
] | null | null | null | app/recipe/models.py | shivam230697/recipe-api | 3b614a591577b75d48eb2d9d458ddd3abd81644c | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
class TestModel(models.Model):
test_field = models.IntegerField(default=0)
| 17.25 | 47 | 0.76087 | 78 | 0.565217 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.188406 |
ea5bc5156458545268e929502e53f862acdfcdb2 | 590 | py | Python | 133. Clone Graph.py | Dharaneeshwar/Leetcode | cc3ed07f6ac5f4d6e3f60c57a94a06a8be2f5287 | [
"MIT"
] | 4 | 2020-11-17T05:24:24.000Z | 2021-06-14T21:01:45.000Z | 133. Clone Graph.py | Dharaneeshwar/Leetcode | cc3ed07f6ac5f4d6e3f60c57a94a06a8be2f5287 | [
"MIT"
] | null | null | null | 133. Clone Graph.py | Dharaneeshwar/Leetcode | cc3ed07f6ac5f4d6e3f60c57a94a06a8be2f5287 | [
"MIT"
] | null | null | null | """
# Definition for a Node.
class Node:
def __init__(self, val = 0, neighbors = None):
self.val = val
self.neighbors = neighbors if neighbors is not None else []
"""
class Solution:
dairy = {}
def cloneGraph(self, node: 'Node') -> 'Node':
if not node:
return None
if node in self.dairy:
return self.dairy[node]
newnode = Node(node.val)
self.dairy[node] = newnode
for n in node.neighbors:
newnode.neighbors.append(self.cloneGraph(n))
return newnode | 25.652174 | 67 | 0.549153 | 398 | 0.674576 | 0 | 0 | 0 | 0 | 0 | 0 | 198 | 0.335593 |
ea5bdaa1291c21dac6e42524e3e0a66e0bdee80e | 1,580 | py | Python | pythonidbot/error/__init__.py | hexatester/pythonidbot | 39964a340dca90dd64e3cd45d0513d5ae0be3986 | [
"MIT"
] | 1 | 2021-02-01T15:19:25.000Z | 2021-02-01T15:19:25.000Z | bot/error/__init__.py | hexatester/ptb-skeleton | f6f8b3b0dd814e223a8650a70a6749b6f208a225 | [
"MIT"
] | null | null | null | bot/error/__init__.py | hexatester/ptb-skeleton | f6f8b3b0dd814e223a8650a70a6749b6f208a225 | [
"MIT"
] | null | null | null | import logging
from telegram.error import (
TelegramError,
Unauthorized,
BadRequest,
TimedOut,
ChatMigrated,
NetworkError,
)
from .badrequest import badrequest
from .chatmigrated import chatmigrated
from .networkerror import networkerror
from .telegramerror import telegramerror
from .timedout import timedout
from .unauthorized import unauthorized
logger = logging.getLogger(__name__)
def error(update, context):
"""Log Errors caused by Updates."""
# https://github.com/python-telegram-bot/python-telegram-bot/wiki/Exception-Handling
try:
raise context.error
except Unauthorized as e:
# remove update.message.chat_id from conversation list
unauthorized(update, context, e)
except BadRequest as e:
# handle malformed requests - read more below!
badrequest(update, context, e)
except TimedOut as e:
# handle slow connection problems
timedout(update, context, e)
except NetworkError as e:
# handle other connection problems
networkerror(update, context, e)
except ChatMigrated as e:
# the chat_id of a group has changed, use e.new_chat_id instead
chatmigrated(update, context, e)
except TelegramError as e:
# handle all other telegram related errors
telegramerror(update, context, e)
logger.warning('Update "%s" caused error "%s"', update, context.error)
HANDLERS = [error]
def register_errors(dispatcher):
if HANDLERS:
for handler in HANDLERS:
dispatcher.add_error_handler(handler)
| 29.811321 | 88 | 0.703797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 422 | 0.267089 |
ea5c2b0dc4b47d7f49ad8aff5279a05c1976c300 | 2,590 | py | Python | numpyro/examples/runge_kutta.py | ahmadsalim/numpyro | 015c80ddd24cf6bc89006fc3a70b424fecd09331 | [
"Apache-2.0"
] | 3 | 2020-08-25T14:31:08.000Z | 2020-08-26T02:23:08.000Z | numpyro/examples/runge_kutta.py | ahmadsalim/numpyro | 015c80ddd24cf6bc89006fc3a70b424fecd09331 | [
"Apache-2.0"
] | null | null | null | numpyro/examples/runge_kutta.py | ahmadsalim/numpyro | 015c80ddd24cf6bc89006fc3a70b424fecd09331 | [
"Apache-2.0"
] | 1 | 2020-09-11T10:08:27.000Z | 2020-09-11T10:08:27.000Z | import functools
from typing import Callable, TypeVar
import jax
import jax.numpy as jnp
def scan(f, s, as_):
bs = []
for a in as_:
s, b = f(s, a)
bs.append(b)
return s, jnp.concatenate(bs)
KwArg = TypeVar('KwArg')
@functools.partial(jax.jit, static_argnums=(0, 1, 2, 3, 4, 5, 6, 7))
def _runge_kutta_4(f: Callable[[float, jnp.ndarray, KwArg], jnp.ndarray],
step_size,
num_steps,
dampening_rate,
lyapunov_scale,
clip,
unconstrain_fn,
constrain_fn,
rng_key: jnp.ndarray,
y0: jnp.ndarray,
**kwargs: KwArg):
def step(t, y, **kwargs):
k1 = clip(step_size * f(t, y, **kwargs))
k2 = clip(step_size * f(t + step_size / 2, y + k1 / 2, **kwargs))
k3 = clip(step_size * f(t + step_size / 2, y + k2 / 2, **kwargs))
k4 = clip(step_size * f(t + step_size, y + k3, **kwargs))
dy = clip((k1 + 2 * k2 + 2 * k3 + k4) / 6)
return y + dy
k1, rng_key = jax.random.split(rng_key)
nkwargs = {}
for kwa, kwv in kwargs.items():
k1, rng_key = jax.random.split(rng_key)
kwn = jax.random.normal(k1, jnp.shape(kwv)) * lyapunov_scale
nkwargs[kwa] = constrain_fn(kwa, unconstrain_fn(kwa, kwv) + kwn)
def body_fn(s, i):
y, rng_key, lyapunov_loss = s
t = i * step_size
k1, rng_key = jax.random.split(rng_key)
# noise = jax.random.normal(k1, jnp.shape(y)) * lyapunov_scale
# ly_prev = constrain_fn('y', unconstrain_fn('y', y) + noise)
# ly = step(t, ly_prev, **nkwargs)
y_und = step(t, y, **kwargs)
y = (1 - dampening_rate) * jax.lax.stop_gradient(y_und) + dampening_rate * y_und
# ll = jnp.sum(jnp.abs(y - ly)) / jnp.sum(jnp.abs(noise))
lyapunov_loss = 0.0 # lyapunov_loss + jnp.maximum(0.0, jnp.log(ll))
return ((y, rng_key, lyapunov_loss), y)
s = (y0, rng_key, jnp.array(0.))
(_, _, lyapunov_loss), res = jax.lax.scan(body_fn, s, jnp.arange(num_steps))
return res, lyapunov_loss
def runge_kutta_4(f: Callable[[float, jnp.ndarray], jnp.ndarray], step_size=0.1, num_steps=10, dampening_rate=0.9,
lyapunov_scale=1e-3,
clip=lambda x: x, unconstrain_fn=lambda k, v: v, constrain_fn=lambda k, v: v):
return functools.partial(_runge_kutta_4, f, step_size, num_steps, dampening_rate,
lyapunov_scale, clip, unconstrain_fn, constrain_fn)
| 37.536232 | 114 | 0.563707 | 0 | 0 | 0 | 0 | 1,919 | 0.740927 | 0 | 0 | 268 | 0.103475 |
ea5ce034937183164e81a955f75fc682707059c6 | 131 | py | Python | python-opencv/blog2-pixel/demo7.py | meteor1993/python-learning | 4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40 | [
"MIT"
] | 83 | 2019-10-15T06:54:06.000Z | 2022-03-28T14:08:21.000Z | python-opencv/blog2-pixel/demo7.py | wenxuefeng3930/python-learning | 4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40 | [
"MIT"
] | 1 | 2020-04-16T08:13:19.000Z | 2020-07-14T01:52:46.000Z | python-opencv/blog2-pixel/demo7.py | wenxuefeng3930/python-learning | 4ee574c9360caf6e63bb6ee2ef31fa6a9918fa40 | [
"MIT"
] | 74 | 2019-11-02T08:10:36.000Z | 2022-02-19T12:23:36.000Z | import cv2 as cv
from matplotlib import pyplot as plt
img=cv.imread('maliao.jpg', cv.IMREAD_COLOR)
plt.imshow(img)
plt.show() | 21.833333 | 45 | 0.740458 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 12 | 0.091603 |
ea5d2e688d4dea54f8e149ad7683f67e025d7b0f | 1,713 | py | Python | editaveis/prototipos/protoLevenshtein.py | Ziul/tcc1 | 97dc2b9afcd6736aa8158066b95a698301629543 | [
"CC-BY-3.0"
] | null | null | null | editaveis/prototipos/protoLevenshtein.py | Ziul/tcc1 | 97dc2b9afcd6736aa8158066b95a698301629543 | [
"CC-BY-3.0"
] | 2 | 2015-11-21T02:30:20.000Z | 2015-11-21T02:30:35.000Z | editaveis/prototipos/protoLevenshtein.py | Ziul/tcc1 | 97dc2b9afcd6736aa8158066b95a698301629543 | [
"CC-BY-3.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Code to rank packages from a search in APT using Levenshtein
"""
from apt import Cache
from Levenshtein import ratio
from exact import Pack, _parser
from multiprocessing.pool import ThreadPool as Pool
_MAX_PEERS = 20
def Thread_Rank(k):
pack = _args[0]
item = Pack()
item.name = k
item.ratio = ratio(pack, k)
return item
def Rankilist(pack):
cache = Cache()
if _options.single:
list_app = []
for k in cache:
item = Pack()
item.name = k.name
item.ratio = ratio(pack, k.name)
list_app.append(item)
return list_app
else:
_pool = Pool(processes=_MAX_PEERS)
result = _pool.map(Thread_Rank, cache._set)
return result
if __name__ == '__main__':
(_options, _args) = _parser.parse_args()
package_name = _args[0]
suffixes = ['core', 'dev', 'commom', 'devel']
prefixes = ['lib']
lista = Rankilist(package_name)
if _options.suffix:
for suffix in suffixes:
matches = Rankilist('{}-{}'.format(package_name, suffix))
lista.extend(matches)
if _options.prefix:
for prefix in prefixes:
matches = Rankilist('{}{}'.format(prefix, package_name))
lista.extend(matches)
if _options.suffix and _options.prefix:
for suffix in suffixes:
for prefix in prefixes:
matches = Rankilist(
'{}{}-{}'.format(prefix, package_name, suffix))
lista.extend(matches)
# ultimo = time.time()
lista = list(set(lista))
lista = sorted(lista, reverse=True)
for i in lista[:_options.amount]:
print i
| 27.190476 | 69 | 0.590776 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 181 | 0.105663 |
ea5d44be711e561c7884cf746acc4f8a140b0e15 | 4,991 | py | Python | inpystem/tools/matlab_interface.py | etienne-monier/inpystem | 7914008374d0eb32362c447052de1af2b1ea9fa9 | [
"MIT"
] | 2 | 2020-04-29T21:45:58.000Z | 2020-06-12T04:39:18.000Z | inpystem/tools/matlab_interface.py | etienne-monier/inpystem | 7914008374d0eb32362c447052de1af2b1ea9fa9 | [
"MIT"
] | 8 | 2021-04-20T19:19:52.000Z | 2022-03-12T00:12:00.000Z | inpystem/tools/matlab_interface.py | etienne-monier/inpystem | 7914008374d0eb32362c447052de1af2b1ea9fa9 | [
"MIT"
] | 1 | 2020-10-02T20:54:05.000Z | 2020-10-02T20:54:05.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This module defines an interface to run matlab codes from python.
"""
import os
import time
import sys
import logging
import pathlib
import numpy as np
import scipy.io as sio
_logger = logging.getLogger(__name__)
def matlab_interface(program, dataDico):
"""Interfaces a matlab code with python3.
The functions needs a **matlab program** to run and **input data**
to be given to the matlab program.
The input data should be given in dictionary format where keys are
the matlab variable names and values are the variable data.
Arguments
---------
program: str, Path object
The program path.
dataDico: dict
The dico containing the data to give to the program.
Returns
-------
dict
The data returned by the program.
Note
----
A matlab command `matlab` should be accessible in the command line
to make this code work.
If this does not work, please be sure the PATH variable is perfecty
set. For exemple, please add this to your `.bashrc` for Linux Users:
.. code-block:: bash
:caption: .bashrc
export PATH:$PATH:/path/to/matlab/bin
and for Windows users, please have a search about how to add a
location into your path (this is a graphical task).
"""
_logger.info('Preparing matlab interface.')
# Check that the program is a matlab file.
if isinstance(program, str):
program = pathlib.Path(program)
if program.suffix != '.m':
raise ValueError(
'The program is not a matlab file. Its extension is'
' {} instead of .m.'.format(program.suffix))
program = program.resolve()
# Get program directory name.
progDirName = program.parent
# Get data exchange directory
dataDir = progDirName / 'InOut'
dataDir.mkdir(parents=True, exist_ok=True) # In case this dir does
# not exist, create it.
# Get data in and out files names
dateStr = time.strftime('%A-%d-%B-%Y-%Hh%Mm%Ss', time.localtime())
inName = dataDir / 'in_{}.mat'.format(id(dateStr))
outName = dataDir / 'out_{}.mat'.format(id(dateStr))
# Linux program
#
if sys.platform.startswith('linux'):
# Give outname to program
dataDico['outName'] = str(outName)
# Save temp data.
_logger.info('Saving Matlab data for interface.')
sio.savemat(str(inName), dataDico)
# Run code in matlab.
_logger.info('Lanching matlab.')
os.system(
"matlab -nodesktop -nosplash -nodisplay -sd"
" '{}' -r 'load(\"{}\"); run {}; quit;' | tail -n +11"
.format(progDirName, inName, program.name))
# Loads output data
_logger.info('Loading Matlab results.')
# try:
data = sio.loadmat(str(outName))
# except FileNotFoundError:
# res = input(
# '\033[93mWarning: something has gone wrong in the {} program.'
# ' Do you want to let the tmp file {} for debugging ? [Y/n]'
# ' \033[0m'.format(program.name, outName))
# if res.strip().lower() != 'n':
# os.remove(str(inName))
# _logger.exception(
# 'The Matlab program {} did not execute correctly.'.format(
# program.name))
# raise
# else:
# Erase temp input/output data
_logger.info('Cleaning temporary files.')
os.remove(str(inName))
os.remove(str(outName))
# Windows code
#
elif sys.platform.startswith('win32'):
# Convert to windows path
inName = pathlib.PureWindowsPath(inName)
outName = pathlib.PureWindowsPath(outName)
inName = pathlib.PureWindowsPath(inName)
progDirName = pathlib.PureWindowsPath(progDirName)
program = pathlib.PureWindowsPath(program)
# Give outname to program
dataDico['outName'] = str(outName)
# Save temp data.
_logger.info('Saving Matlab data for interface.')
sio.savemat(str(inName), dataDico)
# Run code in matlab.
_logger.info('Lanching matlab.')
os.system(
"matlab -nodesktop -nosplash -sd"
" '{}' -batch 'load(\"{}\"); run {}; quit;'"
.format(progDirName, inName, program.name))
# Loads output data
_logger.info('Loading Matlab results.')
data = sio.loadmat(str(outName))
# Erase temp input/output data
_logger.info('Cleaning temporary files.')
os.remove(str(inName))
os.remove(str(outName))
else:
_logger.error(
"Sorry, we don't currently support Matlab interface for the " +
sys.platform + " OS")
# All output numpy data are squeezed, just in case ...
for key, value in data.items():
if type(value) is np.ndarray:
data[key] = np.squeeze(value)
return data
| 29.532544 | 80 | 0.601082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,743 | 0.549589 |
ea5d9376e7e61ab783150c786ab9ecc5e0df5125 | 4,719 | py | Python | binho/commands/binho_adc.py | binhollc/binho-python-package | 21ea73b03755c4205e93525ed24b4becba23c93e | [
"BSD-3-Clause"
] | 4 | 2021-03-11T12:40:27.000Z | 2022-02-01T10:08:20.000Z | binho/commands/binho_adc.py | binhollc/binho-python-package | 21ea73b03755c4205e93525ed24b4becba23c93e | [
"BSD-3-Clause"
] | 1 | 2021-11-26T10:20:18.000Z | 2021-11-30T14:25:56.000Z | binho/commands/binho_adc.py | binhollc/binho-python-package | 21ea73b03755c4205e93525ed24b4becba23c93e | [
"BSD-3-Clause"
] | 2 | 2021-02-28T00:39:39.000Z | 2021-04-05T12:45:56.000Z | #!/usr/bin/env python3
from __future__ import print_function
import sys
import errno
import statistics
import serial
from binho.utils import log_silent, log_verbose, binhoArgumentParser
from binho.errors import DeviceNotFoundError, CapabilityError
def main():
# Set up a simple argument parser.
parser = binhoArgumentParser(description="utility for reading from Binho host adapter's ADC")
parser.add_argument(
"-f",
"--format",
dest="format",
type=str,
default="voltage",
choices=["voltage", "raw"],
help="Format to output in.\nVoltage string, or raw fraction returned by the ADC.",
)
parser.add_argument(
"-s", "--samples", dest="sample_count", type=int, default=1, help="The number of samples to read. (default: 1)",
)
parser.add_argument("-n", "--iopin", default=0, help="Use the given IO pin number for the ADC input")
args = parser.parse_args()
log_function = log_verbose if args.verbose else log_silent
try:
log_function("Trying to find a Binho host adapter...")
device = parser.find_specified_device()
if device.inBootloaderMode:
print(
"{} found on {}, but it cannot be used now because it's in DFU mode".format(
device.productName, device.commPort
)
)
sys.exit(errno.ENODEV)
elif device.inDAPLinkMode:
print(
"{} found on {}, but it cannot be used now because it's in DAPlink mode".format(
device.productName, device.commPort
)
)
print("Tip: Exit DAPLink mode using 'binho daplink -q' command")
sys.exit(errno.ENODEV)
else:
log_function("{} found on {}. (Device ID: {})".format(device.productName, device.commPort, device.deviceID))
except serial.SerialException:
print(
"The target Binho host adapter was found, but failed to connect because another application already has an\
open connection to it."
)
print("Please close the connection in the other application and try again.")
sys.exit(errno.ENODEV)
except DeviceNotFoundError:
if args.serial:
print(
"No Binho host adapter found matching Device ID '{}'.".format(args.serial), file=sys.stderr,
)
else:
print("No Binho host adapter found!", file=sys.stderr)
sys.exit(errno.ENODEV)
# if we fail before here, no connection to the device was opened yet.
# however, if we fail after this point, we need to make sure we don't
# leave the serial port open.
try:
adcPin = {}
if args.iopin:
if args.iopin.isnumeric():
adcPin = "IO" + str(args.iopin)
else:
adcPin = args.iopin.upper()
else:
adcPin = device.adc.getDefaultADCPin()
if args.sample_count == 0:
raise CapabilityError("Cannot take 0 samples! Samples must be >= 1.")
if args.sample_count > 1:
log_function("Taking {} samples...".format(args.sample_count))
else:
log_function("Taking {} sample...".format(args.sample_count))
log_function("")
samples = []
for x in range(args.sample_count):
if args.format == "voltage":
sample = device.adc.readInputVoltage(adcPin)
log_function("[{}] ADC channel {} reads {} Volts".format(x + 1, adcPin, sample))
else:
sample = device.adc.readInputRaw(adcPin)
log_function("[{}] ADC channel {} reads {}".format(x + 1, adcPin, sample))
samples.append(sample)
log_function("")
if args.format == "voltage":
log_function(
"Stats: Min = {} V, Mean = {} V, Max = {} V, Range = {} V (n = {})".format(
min(samples),
statistics.mean(samples),
max(samples),
"%.3f" % (max(samples) - min(samples)),
len(samples),
)
)
else:
log_function(
"Stats: Min = {}, Mean = {}, Max = {}, Range = {} (n = {})".format(
min(samples),
statistics.mean(samples),
max(samples),
"%.3f" % (max(samples) - min(samples)),
len(samples),
)
)
finally:
# close the connection to the host adapter
device.close()
if __name__ == "__main__":
main()
| 32.321918 | 120 | 0.546726 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,467 | 0.310871 |
ea5e0c05fc5fd1bd126ce33ac9399b17f9572400 | 6,349 | py | Python | server/app/models.py | ju1115kr/hash-brown | 93c5e636404608c7cba889cc9f9e0f3d3d0723b2 | [
"Apache-2.0"
] | 4 | 2018-06-27T10:28:54.000Z | 2020-03-15T10:44:37.000Z | server/app/models.py | ju1115kr/hash-brown | 93c5e636404608c7cba889cc9f9e0f3d3d0723b2 | [
"Apache-2.0"
] | 9 | 2018-06-27T10:29:37.000Z | 2021-12-13T19:48:39.000Z | server/app/models.py | ju1115kr/hash-brown | 93c5e636404608c7cba889cc9f9e0f3d3d0723b2 | [
"Apache-2.0"
] | 2 | 2018-07-04T16:54:20.000Z | 2018-07-04T16:58:36.000Z | # -*- coding: utf-8 -*-
from flask import url_for, current_app, g
from werkzeug import secure_filename
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from . import db
from app.exceptions import ValidationError
from datetime import datetime
# 유저-뉴스 간 Many-to-Many 관계 테이블
class Star(db.Model):
__tablename__ = 'stars'
user_id = db.Column(db.Integer, db.ForeignKey('users.id'),
primary_key=True)
news_id = db.Column(db.Integer, db.ForeignKey('news.id'),
primary_key=True)
timestamp = db.Column(db.DateTime, default=datetime.utcnow)
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), unique=True, index=True)
ethereum_id = db.Column(db.Text)
realname = db.Column(db.Text)
password_hash = db.Column(db.String(128))
tier = db.Column(db.Integer)
balance = db.Column(db.Integer)
stars = db.relationship('Star',
foreign_keys=[Star.user_id],
backref=db.backref('user', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
news = db.relationship('News', backref='author', lazy='dynamic')
def __init__(self, username, realname, password):
self.username = username
self.realname = realname
self.password = password
self.balance = 0
self.tier = 0
def __repr__(self):
return '<User %r[%r]>' % (self.username, self.realname)
@property
def password(self): # password 맴버 변수 직접 접근 차단
raise AttributeError('password is not a readable attrubute')
@property
def is_authenticated(self):
return True
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
@staticmethod
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expires_in=expiration)
return s.dumps({'id': self.id})
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def to_json(self): # json 출력 루틴
json_user = {
'id': self.id,
'username': self.username,
'realname': self.realname,
'ethereum_id': self.ethereum_id,
'tier': self.tier,
'balance': self.balance,
'stars': [ star.news_id for star in self.stars ],
'starcount': self.stars.count(),
}
return json_user
@staticmethod
def from_json(json_user): # json 입력 루틴
user_id = json_user.get('id')
user_pw = json_user.get('pw')
user_name = json_user.get('name')
if user_id is None or user_id == '':
raise ValidationError('user does not have a id')
elif user_pw is None or user_pw == '':
raise ValidationError('user does not have a pw')
elif user_name is None or user_name == '':
raise ValidationError('user does not have a name')
return User(username=user_id, realname=user_name, password=user_pw)
class News(db.Model):
__tablename__ = 'news'
id = db.Column(db.Integer, primary_key=True)
author_id = db.Column(db.Integer, db.ForeignKey('users.id'))
field = db.Column(db.Text)
title = db.Column(db.Text)
context = db.Column(db.Text, nullable=False)
parsed_context = db.Column(db.Text)
created_at = db.Column(db.DateTime, index=True,
default=datetime.utcnow)
parent_id = db.Column(db.Integer, db.ForeignKey('news.id'))
associated = db.relationship('News', lazy='dynamic')
refutation = db.Column(db.Boolean)
stars = db.relationship('Star',
foreign_keys=[Star.news_id],
backref=db.backref('news', lazy='joined'),
lazy='dynamic',
cascade='all, delete-orphan')
def __init__(self, field, title, context, parsed_context, author=None):
self.field = field
self.title = title
self.context = context
self.parsed_context = parsed_context
if author is not None:
self.author_name = author.realname
self.author = author
def __repr__(self):
return '<News [%r](%r):%r>' % (self.created_at, self.author_id, self.context)
def to_json(self): # json 출력 루틴
json_news = {
'id': self.id,
'author': self.author.username,
'author_name': self.author.realname,
'field': self.field,
'title': self. title,
'context': self.context,
'created_at': self.created_at,
'parent_id': self.parent_id,
'refutation': self.refutation,
'stars': [ star.user_id for star in self.stars ],
'starcount': self.stars.count(),
'associated_reply': self.associated.count()
}
return json_news
@staticmethod
def from_json(json_news): # json 입력 루틴
field = json_news.get('field')
title = json_news.get('title')
context = json_news.get('context')
if field is None or field == '':
raise ValidationError('news does not have a field')
if title is None or title == '':
raise ValidationError('news does not have a title')
if context is None or context == '':
raise ValidationError('news does not have a context')
parsed_context = removeEscapeChar(context).lower()
news = News(field=field, title=title, context=context, parsed_context=parsed_context)
return news
def removeEscapeChar(context): #Frontsize의 HTML 태그 제거
import re
str = re.sub("(<([^>]+)>)", "", context)
str = str.replace(' ', "").replace('<', "<").replace('>', ">")\
.replace('&', "&").replace('"', '"')
return str
| 35.272222 | 93 | 0.599779 | 5,760 | 0.895662 | 0 | 0 | 2,045 | 0.317991 | 0 | 0 | 993 | 0.154408 |
ea5f3e26198b37258a038046ed6c084a792a8485 | 1,098 | py | Python | PlotGenGain_PathsOfSelection_TBV.py | janaobsteter/Genotype_CODES | 8adf70660ebff4dd106c666db02cdba8b8ce4f97 | [
"Apache-2.0"
] | 1 | 2021-10-07T18:55:03.000Z | 2021-10-07T18:55:03.000Z | PlotGenGain_PathsOfSelection_TBV.py | janaobsteter/Genotype_CODES | 8adf70660ebff4dd106c666db02cdba8b8ce4f97 | [
"Apache-2.0"
] | null | null | null | PlotGenGain_PathsOfSelection_TBV.py | janaobsteter/Genotype_CODES | 8adf70660ebff4dd106c666db02cdba8b8ce4f97 | [
"Apache-2.0"
] | 1 | 2017-04-13T09:07:41.000Z | 2017-04-13T09:07:41.000Z | import pandas as pd
import sys
import numpy as np
import matplotlib.pyplot as plt
T = pd.read_csv('GenTrends_cat.csv')
T.index = T.cat
T = T.drop('cat', axis=1)
tT = np.transpose(T)
tT.loc[:,'Cycle'] = [i.strip('_vars').strip('_mean') for i in list(tT.index)]
tT_mean = tT.ix[0::2,:]
tT_var = tT.ix[1::2,:]
cats = [i for i in ['pBM', 'pb','gpb','genTest', 'k', 'pripust1', 'pripust2', 'mladi'] if i in tT_mean.columns]
for cat in cats:
tT_meanP = tT_mean[[cat, 'Cycle']]
tT_varP = tT_var[[cat, 'Cycle']]
plt.plot(tT_meanP.Cycle, tT_meanP.loc[:,cat], label = cat)
plt.xlabel('Selected Generation')
plt.ylabel('Mean Generation TBV')
legend(loc='upper left')
plt.savefig('GenTrends_Mean_PathOfSel.pdf')
for cat in cats:
tT_meanP = tT_mean[[cat, 'Cycle']]
tT_varP = tT_var[[cat, 'Cycle']]
plt.plot(tT_varP.Cycle, tT_varP.loc[:,cat], label = cat)
plt.xlabel('Selected Generation')
plt.ylabel('Mean Generation TBV')
legend(loc='upper left')
plt.savefig('GenTrends_Var_PathOfSel.pdf')
print 'Created plots: GenTrends_Mean_' + cat + '.pdf and GenTrends_Var_' + cat + '.pdf'
| 28.894737 | 111 | 0.675774 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 356 | 0.324226 |
ea5f6a8299056823901669f8b2e31fd53aa748a1 | 1,094 | py | Python | web_info.py | B4t33n/web_info | a0f8d5be160c96e6e949140a7e3c8982bfce4ab5 | [
"Apache-2.0"
] | 1 | 2021-05-16T13:29:17.000Z | 2021-05-16T13:29:17.000Z | web_info.py | B4t33n/web_info | a0f8d5be160c96e6e949140a7e3c8982bfce4ab5 | [
"Apache-2.0"
] | null | null | null | web_info.py | B4t33n/web_info | a0f8d5be160c96e6e949140a7e3c8982bfce4ab5 | [
"Apache-2.0"
] | null | null | null | print("\033[92m")
import os
import urllib2
import sys
print("---------------------------------------------")
os.system("figlet web info")
print("----------------------------------------------")
print("\033[91m")
print("#############################################")
print(" Coded by B4t33n ")
print("#############################################")
print("\033[93m")
print(" INFORMATION ")
print(".............................................")
print("")
print(" FB PAGE : https://www.facebook.com/B4t33n/")
print(" CODER : B4t33n")
print(" VERSION : 1.0")
print(" TEAM : BWHH")
print(" EMAIL : gamilniyakikorbu@gmail.com")
print("\033[92m")
print(">>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<<<<<<<<<")
print("\033[91m")
print(" Enter CTRL+Z To Exit Tool ")
print("")
print("\033[94m")
url = raw_input(">>>>ENTER YOUR WEBSITE LINK :")
print("\033[95m")
print("")
url.rstrip ( )
print("")
header = urllib2.urlopen (url) .info ( )
print("")
print(str (header) ) | 23.782609 | 60 | 0.399452 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 732 | 0.669104 |
ea61b0a3ba0b11abd7ed94000c53187e3c4b4ffc | 109 | py | Python | viper_dev.py | safinsingh/viper | f7fa9182713c4f0fbb33c2e881f668b807fd3956 | [
"MIT"
] | null | null | null | viper_dev.py | safinsingh/viper | f7fa9182713c4f0fbb33c2e881f668b807fd3956 | [
"MIT"
] | null | null | null | viper_dev.py | safinsingh/viper | f7fa9182713c4f0fbb33c2e881f668b807fd3956 | [
"MIT"
] | null | null | null | from viper import *
import inspect
def GetSource(func):
lines = inspect.getsource(func)
print(lines) | 18.166667 | 35 | 0.724771 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
ea631526ede5cfa72720ca402cc33b3e8ab33c52 | 925 | py | Python | cride/utils/models.py | jpablocardona/platzi-django-advance | 591fbfbb318ba235933544e585a6866ede829d63 | [
"MIT"
] | null | null | null | cride/utils/models.py | jpablocardona/platzi-django-advance | 591fbfbb318ba235933544e585a6866ede829d63 | [
"MIT"
] | 6 | 2020-06-06T01:34:33.000Z | 2022-03-12T00:21:34.000Z | cride/utils/models.py | jpablocardona/platzi-django-advance | 591fbfbb318ba235933544e585a6866ede829d63 | [
"MIT"
] | null | null | null | """ django models utilities"""
from django.db import models
class CRideModel(models.Model):
""" Comparte Ride base model
CRideModel acts as an abstract base class from which every
other model in the project will inherit. This class provides
every table with the following attributes:
+ created (Datetime): Store the datetime to the object was created
+ updated (Datetime): Store the last datetime to the object was modified
"""
created = models.DateTimeField(
'created at',
auto_now_add=True,
help_text='Date time on which object was created'
)
modified = models.DateTimeField(
'updated at',
auto_now=True,
help_text='Date time on which the object was last modified'
)
class Meta:
"""Meta options."""
abstract = True
get_latest_by = 'created'
ordering = ['-created', '-modified']
| 26.428571 | 80 | 0.646486 | 861 | 0.930811 | 0 | 0 | 0 | 0 | 0 | 0 | 560 | 0.605405 |
ea63c3548941af69f71d32ce6f27172728ac718b | 1,811 | py | Python | lambda/VisitorsDynamoDBClient.py | kyhau/hello-visitor | 6c0c6685c1c2a7226f00a87e8858d9e0a2f01699 | [
"Unlicense"
] | null | null | null | lambda/VisitorsDynamoDBClient.py | kyhau/hello-visitor | 6c0c6685c1c2a7226f00a87e8858d9e0a2f01699 | [
"Unlicense"
] | null | null | null | lambda/VisitorsDynamoDBClient.py | kyhau/hello-visitor | 6c0c6685c1c2a7226f00a87e8858d9e0a2f01699 | [
"Unlicense"
] | 1 | 2017-08-24T06:14:22.000Z | 2017-08-24T06:14:22.000Z | import boto3
import json
import uuid
from datetime import datetime
import logging
# Update the root logger to get messages at DEBUG and above
logging.getLogger().setLevel(logging.DEBUG)
logging.getLogger("botocore").setLevel(logging.CRITICAL)
logging.getLogger("boto3").setLevel(logging.CRITICAL)
logging.getLogger("urllib3").setLevel(logging.CRITICAL)
def handler(event, context):
"""Provide an event that contains the following keys:
- operation: one of the operations in the operations dict below
- tableName: required for operations that interact with DynamoDB
- payload: a parameter to pass to the operation being performed
"""
logging.info("Received event: " + json.dumps(event, indent=2))
operation = event["operation"]
if "tableName" in event:
dynamo = boto3.resource("dynamodb").Table(event["tableName"])
if operation == "create":
event["payload"]["Item"]["UUID"] = str(uuid.uuid4())
if "timestampField" in event:
if operation == "create":
event["payload"]["Item"][event["timestampField"]] = datetime.now().strftime("%Y-%m-%d,%H:%M")
elif operation == "update":
event["payload"].update({"AttributeUpdates": { event["timestampField"]: {"Value": datetime.now().strftime("%Y-%m-%d,%H:%M")}}})
operations = {
"create": lambda x: dynamo.put_item(**x),
"read": lambda x: dynamo.get_item(**x),
"update": lambda x: dynamo.update_item(**x),
"delete": lambda x: dynamo.delete_item(**x),
"list": lambda x: dynamo.scan(**x),
"echo": lambda x: x,
"ping": lambda x: "pong"
}
if operation in operations:
return operations[operation](event.get("payload"))
else:
raise ValueError(f"Unrecognized operation {operation}")
| 35.509804 | 139 | 0.651022 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 693 | 0.382662 |
ea65ee07c7aea0580031cc966706784863780580 | 2,735 | py | Python | merge.py | marcelbrueckner/merge-intervals | d8f6329f12bfe6ec48180ba215f40214f476f654 | [
"MIT"
] | null | null | null | merge.py | marcelbrueckner/merge-intervals | d8f6329f12bfe6ec48180ba215f40214f476f654 | [
"MIT"
] | null | null | null | merge.py | marcelbrueckner/merge-intervals | d8f6329f12bfe6ec48180ba215f40214f476f654 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from argparse import ArgumentParser, ArgumentError
import re, sys
# Define a custom argument type `interval_int` to properly parse arguments
# https://docs.python.org/3/library/argparse.html#type
def interval_int(arg):
"""
Validate given interval and return as list
"""
pattern = re.compile("^\s*\[?\s*(-?\s*(\d\s*)+,\s*-?\s*(\d\s*)+)\]?\s*$")
match = pattern.match(arg)
if not match:
raise ArgumentError(None, f"argument interval: invalid interval value: '{arg}'")
# Convert comma-separated list (of strings) to list of integers
arg = match.group(1).replace(" ", "")
return sorted([int(i) for i in arg.split(",")])
def merge(intervals):
"""
Merges probably overlapping intervals into non-overlapping intervals.
"""
merged_interval = None
merged_intervals = []
for i, current_interval in enumerate(sorted(intervals)):
# First iteration
if merged_interval is None:
merged_interval = current_interval
# Current interval overlaps with the previous(ly merged) interval(s)
if current_interval[0] <= merged_interval[1]:
merged_interval[1] = max(current_interval[1], merged_interval[1])
# Current interval doesn't overlap with previous(ly merged) inverval(s)
# As intervals are sorted by the interval's lower limit, no other interval at a higher index will.
# Thus the previous(ly merged) inverval(s) are "complete".
else:
merged_intervals.append(merged_interval)
merged_interval = current_interval
# Last iteration
if i == len(intervals) - 1:
merged_intervals.append(merged_interval)
return merged_intervals
if __name__ == '__main__':
# argparse has issues with parameters starting with a negative integer value,
# thus a little workaround is required (by adding a space in front)
# https://stackoverflow.com/questions/9025204/python-argparse-issue-with-optional-arguments-which-are-negative-numbers
for i, arg in enumerate(sys.argv):
if (arg[0] == '-') and arg[1].isdigit(): sys.argv[i] = ' ' + arg
# Define and parse arguments
parser = ArgumentParser(description='Merge probably overlapping intervals into non-overlapping intervals.')
parser.add_argument('intervals', metavar='interval', type=interval_int, nargs='+',
help='list of intervals to merge (example: -1,3 3,9)')
parser.add_argument('--verbose', action='store_true', help='Print merge intervals to stdout')
args = parser.parse_args()
# Merge intervals
merged_intervals = merge(args.intervals)
if args.verbose:
print(merged_intervals)
| 36.959459 | 122 | 0.669835 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,317 | 0.481536 |
ea668e4c3f0055d09aecf153fc4a7af6e486eded | 388 | py | Python | factor_vae/types.py | kiwi0fruit/jats-semi-supervised-pytorch | 67e9bb85f09f8ef02e17e495784d1d9a71c3adec | [
"MIT"
] | null | null | null | factor_vae/types.py | kiwi0fruit/jats-semi-supervised-pytorch | 67e9bb85f09f8ef02e17e495784d1d9a71c3adec | [
"MIT"
] | null | null | null | factor_vae/types.py | kiwi0fruit/jats-semi-supervised-pytorch | 67e9bb85f09f8ef02e17e495784d1d9a71c3adec | [
"MIT"
] | null | null | null | from typing import Tuple
from abc import abstractmethod
from torch import Tensor
from torch.nn import Module
class BaseDiscriminator(Module):
@abstractmethod
def forward_(self, z: Tensor) -> Tuple[Tensor, Tensor]:
raise NotImplementedError
def forward(self, z: Tensor) -> Tuple[Tensor, Tensor]: # pylint: disable=arguments-differ
return self.forward_(z=z)
| 27.714286 | 94 | 0.729381 | 276 | 0.71134 | 0 | 0 | 109 | 0.280928 | 0 | 0 | 34 | 0.087629 |
ea6700a25cf2ed6741438c3d34e31d11883d00c5 | 1,484 | py | Python | day6/main.py | urosZoretic/adventofcode2021 | aa55b9ba9d07ef70ad2ecfd0b72b14329e7685ba | [
"Apache-2.0"
] | null | null | null | day6/main.py | urosZoretic/adventofcode2021 | aa55b9ba9d07ef70ad2ecfd0b72b14329e7685ba | [
"Apache-2.0"
] | null | null | null | day6/main.py | urosZoretic/adventofcode2021 | aa55b9ba9d07ef70ad2ecfd0b72b14329e7685ba | [
"Apache-2.0"
] | null | null | null | inputFile = "day6/day6_1_input.txt"
# https://adventofcode.com/2021/day/6
if __name__ == '__main__':
print("Lanternfish")
with open(inputFile, "r") as f:
fishArray = [int(num) for num in f.read().strip().split(",")]
# for part2... not needed to read array again from file
origFishArray = fishArray.copy()
# unsustainable solution for part2... to big array.. memory heavy
nbDays = 1
while nbDays <= 80:
for index in range(0, len(fishArray)):
if fishArray[index] == 0:
fishArray[index] = 6
fishArray.append(8)
continue
fishArray[index] -= 1
nbDays += 1
print("part1. Nb fish after 80 days: ", len(fishArray))
## part 2 --> fish counters
fishCounter = [0] * 9
for num in origFishArray:
fishCounter[num] += 1 # index represent number.. value represent nb fishes for that fish stage
nbDays = 1
while nbDays <= 256:
# each day shift array values to th left.
nbSpawn = 0
for i in range(0, len(fishCounter) - 1):
if i == 0: ## spawn for current day
nbSpawn = fishCounter[i]
fishCounter[i] = fishCounter[i + 1]
# spawn fishes for current day
fishCounter[8] = nbSpawn # spawn nb fishes
fishCounter[6] += nbSpawn # reset nbSpawn fished to state 6
nbDays +=1
print("part2. After 256 days. Efficient solution: ", sum(fishCounter)) | 31.574468 | 102 | 0.58558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 530 | 0.357143 |
ea68b376b16e104101a6c8ebb4b9a9fa6ab801a5 | 7,915 | py | Python | datasets/Voc_Dataset.py | DLsnowman/Deeplab-v3plus | f7001f669c91aac02ff4e9c39e53037ad863979d | [
"MIT"
] | 333 | 2018-10-31T03:19:26.000Z | 2022-03-15T07:31:13.000Z | datasets/Voc_Dataset.py | mqchen1993/Deeplab-v3plus | 886041c2099a069bf4b667b981b650be7931be73 | [
"MIT"
] | 19 | 2018-10-31T06:37:25.000Z | 2022-01-13T00:56:31.000Z | datasets/Voc_Dataset.py | mqchen1993/Deeplab-v3plus | 886041c2099a069bf4b667b981b650be7931be73 | [
"MIT"
] | 68 | 2018-10-31T03:51:25.000Z | 2022-03-30T04:58:55.000Z | # -*- coding: utf-8 -*-
# @Time : 2018/9/21 17:21
# @Author : HLin
# @Email : linhua2017@ia.ac.cn
# @File : Voc_Dataset.py
# @Software: PyCharm
import PIL
import random
import scipy.io
from PIL import Image, ImageOps, ImageFilter
import numpy as np
import cv2
import os
import torch
import torch.utils.data as data
import torchvision.transforms as ttransforms
class Voc_Dataset(data.Dataset):
def __init__(self,
root_path='/data/linhua/VOCdevkit',
dataset='voc2012_aug',
base_size=513,
crop_size=513,
is_training=True):
"""
:param root_path:
:param dataset:
:param base_size:
:param is_trainging:
:param transforms:
"""
self.dataset = dataset
self.is_training = is_training
self.base_size = base_size
self.crop_size = crop_size
if self.dataset == 'voc2007':
self.data_path = os.path.join(root_path, "VOC2007")
if is_training:
item_list_filepath = os.path.join(self.data_path, "ImageSets/Segmentation/trainval.txt")
else:
item_list_filepath = os.path.join(self.data_path, "ImageSets/Segmentation/test.txt")
elif self.dataset == 'voc2012':
self.data_path = os.path.join(root_path, "VOC2012")
if is_training:
item_list_filepath = os.path.join(self.data_path, "ImageSets/Segmentation/train.txt")
else:
item_list_filepath = os.path.join(self.data_path, "ImageSets/Segmentation/val.txt")
elif self.dataset == 'voc2012_aug':
self.data_path = os.path.join(root_path, "VOC2012")
if is_training:
item_list_filepath = os.path.join(self.data_path, "ImageSets/Segmentation/train_aug.txt")
else:
item_list_filepath = os.path.join(self.data_path, "ImageSets/Segmentation/val_aug.txt")
else:
raise Warning("dataset must be voc2007 or voc2012 or voc2012_aug")
self.image_filepath = os.path.join(self.data_path, "JPEGImages")
self.gt_filepath = os.path.join(self.data_path, "SegmentationClassAug")
self.items = [id.strip() for id in open(item_list_filepath)]
self.classes = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor']
def __getitem__(self, item):
id = self.items[item]
gt_image_path = os.path.join(self.gt_filepath, "{}.png".format(id))
gt_image = Image.open(gt_image_path)
image_path = os.path.join(self.image_filepath, "{}.jpg".format(id))
image = Image.open(image_path).convert("RGB")
if self.is_training:
image, gt_image = self._train_sync_transform(image, gt_image)
else:
image, gt_image = self._val_sync_transform(image, gt_image)
return image, gt_image, id
def _train_sync_transform(self, img, mask):
'''
:param image: PIL input image
:param gt_image: PIL input gt_image
:return:
'''
# random mirror
if random.random() < 0.5:
img = img.transpose(Image.FLIP_LEFT_RIGHT)
mask = mask.transpose(Image.FLIP_LEFT_RIGHT)
crop_size = self.crop_size
# random scale (short edge)
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = img.size
if h > w:
ow = short_size
oh = int(1.0 * h * ow / w)
else:
oh = short_size
ow = int(1.0 * w * oh / h)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# pad crop
if short_size < crop_size:
padh = crop_size - oh if oh < crop_size else 0
padw = crop_size - ow if ow < crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=0)
# random crop crop_size
w, h = img.size
x1 = random.randint(0, w - crop_size)
y1 = random.randint(0, h - crop_size)
img = img.crop((x1, y1, x1 + crop_size, y1 + crop_size))
mask = mask.crop((x1, y1, x1 + crop_size, y1 + crop_size))
# gaussian blur as in PSP
if random.random() < 0.5:
img = img.filter(ImageFilter.GaussianBlur(
radius=random.random()))
# final transform
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask
def _val_sync_transform(self, img, mask):
outsize = self.crop_size
short_size = outsize
w, h = img.size
if w > h:
oh = short_size
ow = int(1.0 * w * oh / h)
else:
ow = short_size
oh = int(1.0 * h * ow / w)
img = img.resize((ow, oh), Image.BILINEAR)
mask = mask.resize((ow, oh), Image.NEAREST)
# center crop
w, h = img.size
x1 = int(round((w - outsize) / 2.))
y1 = int(round((h - outsize) / 2.))
img = img.crop((x1, y1, x1 + outsize, y1 + outsize))
mask = mask.crop((x1, y1, x1 + outsize, y1 + outsize))
# final transform
img, mask = self._img_transform(img), self._mask_transform(mask)
return img, mask
def _img_transform(self, image):
image_transforms = ttransforms.Compose([
ttransforms.ToTensor(),
ttransforms.Normalize([.485, .456, .406], [.229, .224, .225]),
])
image = image_transforms(image)
return image
def _mask_transform(self, gt_image):
target = np.array(gt_image).astype('int32')
target = torch.from_numpy(target)
return target
def __len__(self):
return len(self.items)
class VOCDataLoader():
def __init__(self, args):
self.args = args
train_set = Voc_Dataset(dataset=self.args.dataset,
base_size=self.args.base_size,
crop_size=self.args.crop_size,
is_training=True)
val_set = Voc_Dataset(dataset=self.args.dataset,
base_size=self.args.base_size,
crop_size=self.args.crop_size,
is_training=False)
self.train_loader = data.DataLoader(train_set,
batch_size=self.args.batch_size,
shuffle=True,
num_workers=self.args.data_loader_workers,
pin_memory=self.args.pin_memory,
drop_last=True)
self.valid_loader = data.DataLoader(val_set,
batch_size=self.args.batch_size,
shuffle=False,
num_workers=self.args.data_loader_workers,
pin_memory=self.args.pin_memory,
drop_last=True)
self.train_iterations = (len(train_set) + self.args.batch_size) // self.args.batch_size
self.valid_iterations = (len(val_set) + self.args.batch_size) // self.args.batch_size
if __name__ == '__main__':
data=scipy.io.loadmat('/data/linhua/VOCdevkit/BSD/dataset/cls/2008_003846.mat')
print(data['GTcls']["Segmentation"][0,0])
print(np.array([[(1,2,3)]]).shape)
print(np.array([[np.array(1), np.array(2), np.array(3)]]).shape) | 36.643519 | 118 | 0.552622 | 7,270 | 0.918509 | 0 | 0 | 0 | 0 | 0 | 0 | 1,218 | 0.153885 |
ea6a9e7274c58c8850478174965f8dcd501745fd | 2,341 | py | Python | tool/grid.py | David-Loibl/gistemp | 4b96696243cbbb425c7b27fed35398e0fef9968d | [
"BSD-3-Clause"
] | 1 | 2020-02-04T13:16:05.000Z | 2020-02-04T13:16:05.000Z | tool/grid.py | David-Loibl/gistemp4.0 | 4b96696243cbbb425c7b27fed35398e0fef9968d | [
"BSD-3-Clause"
] | null | null | null | tool/grid.py | David-Loibl/gistemp4.0 | 4b96696243cbbb425c7b27fed35398e0fef9968d | [
"BSD-3-Clause"
] | null | null | null | #!/usr/local/bin/python3.4
#
# Avi Persin, Revision 2016-01-06
# grid.py
"""
grid YYYY-MM [v2-file]
Display gridded anomalies as SVG file.
"""
# Regular expression used to match/validate the "when" argument.
RE_WHEN = r'(\d{4})-(\d{2})'
def map(when, inp, out):
"""Take a cccgistemp subbox file in V2 mean format as *inp* and
produce an SVG file on *out*."""
import math
import re
out.write("""<svg
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
version="1.1">\n""")
m = re.match(RE_WHEN, when)
year, month = m.groups()
# Convert to 0-based month.
month = int(month) - 1
assert 0 <= month < 12
out.write("""<g transform='translate(180,90)'>\n""")
for (lat, lon), v in filter_month(inp, year, month):
x, y = topixel(lat, lon)
y = -y
if v > 0:
fill = 'red'
else:
fill = 'blue'
radius_scale = 0.25
r = math.sqrt(abs(v)) * radius_scale
out.write("""<circle cx='%.1f' cy='%.1f' r='%.1f' fill='%s' />\n""" %
(x, y, r, fill))
out.write('</g>\n')
out.write("""</svg>\n""")
def topixel(lat, lon):
u"""Return x,y coordinate. Plate Carr\xe9e projection."""
return lon, lat
def filter_month(inp, year, month):
"""Yield each value from the v2.mean file for the year and month in
question. Invalid values (marked with -9999 in the file) are
ignored. Each value is yielded as ((lat,lon),v) where lat,lon are
the latitude and longitude in degrees (positive is North and East).
"""
for line in inp:
if line[12:16] == year:
v = int(line[16 + 5 * month:21 + 5 * month])
if v != -9999:
lat = float(line[:5])
lon = float(line[5:11])
yield ((lat, lon), v)
def usage(out):
out.write("Usage:\n")
out.write(__doc__)
return 2
def main(argv=None):
import re
import sys
if argv is None:
argv = sys.argv
if len(argv) <= 1:
return usage(sys.stderr)
when = argv[1]
if not re.match(RE_WHEN, when):
return usage(sys.stderr)
if len(argv) > 2:
f = open(argv[2])
else:
f = sys.stdin
map(when, f, sys.stdout)
if __name__ == '__main__':
main()
| 23.41 | 77 | 0.547202 | 0 | 0 | 578 | 0.246903 | 0 | 0 | 0 | 0 | 965 | 0.412217 |
ea6aca3a1ab763492e5a9e482d9cc0174a64d165 | 1,460 | py | Python | nlu_flow/retrieval/faq_answer_retrieval/inferencer.py | cheesama/nlflow | 5c504fc4bfc5aa0ca3892af7b01b2eb46f5edfbb | [
"Apache-2.0"
] | 1 | 2022-03-22T23:01:33.000Z | 2022-03-22T23:01:33.000Z | nlu_flow/retrieval/faq_answer_retrieval/inferencer.py | cheesama/nlu_flow | 5c504fc4bfc5aa0ca3892af7b01b2eb46f5edfbb | [
"Apache-2.0"
] | null | null | null | nlu_flow/retrieval/faq_answer_retrieval/inferencer.py | cheesama/nlu_flow | 5c504fc4bfc5aa0ca3892af7b01b2eb46f5edfbb | [
"Apache-2.0"
] | null | null | null | from fastapi import FastAPI
from transformers import ElectraModel, ElectraTokenizer
from koelectra_fine_tuner import KoelectraQAFineTuner
from nlu_flow.preprocessor.text_preprocessor import normalize
import torch
import faiss
import dill
app = FastAPI()
is_ready = False
#load chitchat_retrieval_model
model = None
model = KoelectraQAFineTuner()
model.load_state_dict(torch.load('./koelectra_chitchat_retrieval_model.modeldict', map_location=lambda storage, loc: storage))
#load tokenizer
MAX_LEN = 64
tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-small-v2-discriminator")
#load index
index = faiss.read_index('chitchat_retrieval_index')
top_k = 1
#load response_dict
response_dict = {}
with open('./response_dict.dill', 'rb') as responseFile:
response_dict = dill.load(responseFile)
if model:
is_ready = True
#endpoints
@app.get("/")
async def health():
if is_ready:
output = {'code': 200}
else:
output = {'code': 500}
return output
@app.post("/chitchat_retrieval/search")
async def search_chitchat_answer(text: str):
text = normalize(text, with_space=True)
tokens = tokenizer.encode(text, max_length=MAX_LEN, pad_to_max_length=True, truncation=True)
feature = model.get_question_feature(torch.tensor(tokens).unsqueeze(0))
distance, neighbour = index.search(feature,k = top_k)
return {'name': name, 'confidence': confidence, 'Classifier': 'domain_classifier_model.svc'}
| 26.545455 | 126 | 0.761644 | 0 | 0 | 0 | 0 | 597 | 0.408904 | 543 | 0.371918 | 330 | 0.226027 |
ea6ace3dbcf73350bee9a00aa55e54d97dc8b537 | 930 | py | Python | Processing_api.py | enpmo/first-personal-work | 23c64cd11405256af50e9b2e930f580b4d918e9b | [
"Apache-2.0"
] | null | null | null | Processing_api.py | enpmo/first-personal-work | 23c64cd11405256af50e9b2e930f580b4d918e9b | [
"Apache-2.0"
] | null | null | null | Processing_api.py | enpmo/first-personal-work | 23c64cd11405256af50e9b2e930f580b4d918e9b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[8]:
import requests
import re
import time
import json
def get_one_page(url):
# 根据源码分析,构造请求头
headers = {
# 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_4) AppleWebKit/537.36 (KHTML, like Gecko) '
# 'Chrome/52.0.2743.116 Safari/537.36'
"authoration":"apicode","apicode":"2b33b36fb2564ecaaac6ae66226e995f"
}
response = requests.get(url, headers = headers)
if response.status_code == 200:
return response.content.decode(encoding='utf-8')
return None
url = 'https://api.yonyoucloud.com/apis/dst/ncov/wholeworld'
html = get_one_page(url)
print(html)
# In[15]:
import json
b = json.dumps(html)
f2 = open('json2.json', 'w',encoding='utf-8')
f2.write(b)
f2.close()
# In[14]:
import json
f = open('global_epidemic_statistics.json.json', 'w',encoding='utf-8')
f.write(html)
f.close()
# In[ ]:
| 17.54717 | 113 | 0.648387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 477 | 0.5 |
ea6d08e4b75d46d8c207e5f66fc96bf1e79be92d | 2,434 | py | Python | docs/index_functions.py | guyms/pyansys | 7a9182a7c44098d9b99a0d1eb2fd183b7256ac01 | [
"MIT"
] | null | null | null | docs/index_functions.py | guyms/pyansys | 7a9182a7c44098d9b99a0d1eb2fd183b7256ac01 | [
"MIT"
] | null | null | null | docs/index_functions.py | guyms/pyansys | 7a9182a7c44098d9b99a0d1eb2fd183b7256ac01 | [
"MIT"
] | null | null | null | #==============================================================================
# load a beam and write it
#==============================================================================
import pyansys
from pyansys import examples
# Sample *.cdb
filename = examples.hexarchivefile
# Read ansys archive file
archive = pyansys.Archive(filename)
# Print raw data from cdb
for key in archive.raw:
print "%s : %s" % (key, archive.raw[key])
# Create a vtk unstructured grid from the raw data and plot it
archive.ParseFEM()
archive.uGrid.Plot()
# write this as a vtk xml file
archive.save_as_vtk('hex.vtu')
# Load this from vtk
import vtki
grid = vtki.LoadGrid('hex.vtk')
grid.Plot()
#==============================================================================
# load beam results
#==============================================================================
# Load the reader from pyansys
import pyansys
from pyansys import examples
# Sample result file and associated archive file
rstfile = examples.rstfile
hexarchivefile = examples.hexarchivefile
# Create result reader object by loading the result file
result = pyansys.ResultReader(rstfile)
# Get beam natural frequencies
freqs = result.GetTimeValues()
# Get the node numbers in this result file
nnum = result.nnum
# Get the 1st bending mode shape. Nodes are ordered according to nnum.
disp = result.GetResult(0, True) # uses 0 based indexing
# Load CDB (necessary for display)
result.LoadArchive(hexarchivefile)
# Plot the displacement of Mode 0 in the x direction
result.PlotNodalResult(0, 'x', label='Displacement')
#==============================================================================
# Load KM
#==============================================================================
# Load the reader from pyansys
import pyansys
from pyansys import examples
filename = examples.fullfile
# Create result reader object and read in full file
fobj = pyansys.FullReader(filename)
fobj.LoadFullKM()
import numpy as np
from scipy.sparse import csc_matrix, linalg
ndim = fobj.nref.size
k = csc_matrix((fobj.kdata, (fobj.krows, fobj.kcols)), shape=(ndim, ndim))
m = csc_matrix((fobj.mdata, (fobj.mrows, fobj.mcols)), shape=(ndim, ndim))
# Solve
w, v = linalg.eigsh(k, k=20, M=m, sigma=10000)
# System natural frequencies
f = (np.real(w))**0.5/(2*np.pi)
print('First four natural frequencies')
for i in range(4):
print '{:.3f} Hz'.format(f[i])
| 26.747253 | 79 | 0.595727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,295 | 0.532046 |
ea6ff3d5fd97a9be844e9c0c6ad45a5997b57e35 | 5,401 | py | Python | PAST3/o.py | nishio/atcoder | 8db36537b5d8580745d5f98312162506ad7d7ab4 | [
"MIT"
] | 1 | 2021-03-09T04:28:13.000Z | 2021-03-09T04:28:13.000Z | PAST3/o.py | nishio/atcoder | 8db36537b5d8580745d5f98312162506ad7d7ab4 | [
"MIT"
] | null | null | null | PAST3/o.py | nishio/atcoder | 8db36537b5d8580745d5f98312162506ad7d7ab4 | [
"MIT"
] | null | null | null | # included from libs/mincostflow.py
"""
Min Cost Flow
"""
# derived: https://atcoder.jp/contests/practice2/submissions/16726003
from heapq import heappush, heappop
class MinCostFlow():
def __init__(self, n):
self.n = n
self.graph = [[] for _ in range(n)]
self.pos = []
def add_edge(self, fr, to, cap, cost):
#assert 0 <= fr < self.n
#assert 0 <= to < self.n
m = len(self.pos)
self.pos.append((fr, len(self.graph[fr])))
self.graph[fr].append([to, len(self.graph[to]), cap, cost])
self.graph[to].append([fr, len(self.graph[fr]) - 1, 0, -cost])
return m
def get_edge(self, idx):
#assert 0 <= idx < len(self.pos)
to, rev, cap, cost = self.graph[self.pos[idx][0]][self.pos[idx][1]]
_rev_to, _rev_rev, rev_cap, _rev_cost = self.graph[to][rev]
return self.pos[idx][0], to, cap + rev_cap, rev_cap, cost
def edges(self):
for i in range(len(self.pos)):
yield self.get_edge(i)
def dual_ref(self, s, t):
dist = [2**63 - 1] * self.n
dist[s] = 0
vis = [0] * self.n
self.pv = [-1] * self.n
self.pe = [-1] * self.n
queue = []
heappush(queue, (0, s))
while queue:
k, v = heappop(queue)
if vis[v]:
continue
vis[v] = True
if v == t:
break
for i in range(len(self.graph[v])):
to, _rev, cap, cost = self.graph[v][i]
if vis[to] or cap == 0:
continue
cost += self.dual[v] - self.dual[to]
if dist[to] - dist[v] > cost:
dist[to] = dist[v] + cost
self.pv[to] = v
self.pe[to] = i
heappush(queue, (dist[to], to))
if not vis[t]:
return False
for v in range(self.n):
if not vis[v]:
continue
self.dual[v] -= dist[t] - dist[v]
return True
def flow(self, s, t):
return self.flow_with_limit(s, t, 2**63 - 1)
def flow_with_limit(self, s, t, limit):
return self.slope_with_limit(s, t, limit)[-1]
def slope(self, s, t):
return self.slope_with_limit(s, t, 2**63 - 1)
def slope_with_limit(self, s, t, limit):
#assert 0 <= s < self.n
#assert 0 <= t < self.n
#assert s != t
flow = 0
cost = 0
prev_cost = -1
res = [(flow, cost)]
self.dual = [0] * self.n
while flow < limit:
if not self.dual_ref(s, t):
break
c = limit - flow
v = t
while v != s:
c = min(c, self.graph[self.pv[v]][self.pe[v]][2])
v = self.pv[v]
v = t
while v != s:
_to, rev, _cap, _ = self.graph[self.pv[v]][self.pe[v]]
self.graph[self.pv[v]][self.pe[v]][2] -= c
self.graph[v][rev][2] += c
v = self.pv[v]
d = -self.dual[s]
flow += c
cost += c * d
if prev_cost == d:
res.pop()
res.append((flow, cost))
prev_cost = cost
return res
# end of libs/mincostflow.py
# included from snippets/main.py
def debug(*x, msg=""):
import sys
print(msg, *x, file=sys.stderr)
def solve(N, M, AS, BS, RS):
global mcf
INF = 10 ** 5
mcf = MinCostFlow(N + 5)
start = N
goal = N + 1
round = N + 2
for i in range(3):
mcf.add_edge(start, round + i, M, 0)
for i in range(3):
for j in range(N):
r = AS[j] * (BS[j] ** (i + 1)) % RS[i]
mcf.add_edge(round + i, j, 1, INF - r)
for j in range(N):
cs = [AS[j] * (BS[j] ** (k + 1)) for k in range(3)]
cs.append(0)
for k in range(3):
c = cs[k] - cs[k-1]
mcf.add_edge(j, goal, 1, c)
return INF * (3 * M) - mcf.flow(start, goal)[-1]
def main():
# parse input
N, M = map(int, input().split())
AS = list(map(int, input().split()))
BS = list(map(int, input().split()))
RS = list(map(int, input().split()))
print(solve(N, M, AS, BS, RS))
# tests
T1 = """
2 1
3 2
3 3
100000 100000 100000
"""
TEST_T1 = """
>>> as_input(T1)
>>> main()
81
"""
T2 = """
4 2
2 4 3 3
4 2 3 3
100000 100000 100000
"""
TEST_T2 = """
>>> as_input(T2)
>>> main()
210
"""
T3 = """
20 19
3 2 3 4 3 3 2 3 2 2 3 3 4 3 2 4 4 3 3 4
2 3 4 2 4 3 3 2 4 2 4 3 3 2 3 4 4 4 2 2
3 4 5
"""
TEST_T3 = """
>>> as_input(T3)
>>> main()
-1417
"""
def _test():
import doctest
doctest.testmod()
g = globals()
for k in sorted(g):
if k.startswith("TEST_"):
print(k)
doctest.run_docstring_examples(g[k], g, name=k)
def as_input(s):
"use in test, use given string as input file"
import io
f = io.StringIO(s.strip())
g = globals()
g["input"] = lambda: bytes(f.readline(), "ascii")
g["read"] = lambda: bytes(f.read(), "ascii")
if __name__ == "__main__":
import sys
input = sys.stdin.buffer.readline
read = sys.stdin.buffer.read
sys.setrecursionlimit(10 ** 6)
if sys.argv[-1] == "-t":
print("testing")
_test()
sys.exit()
main()
sys.exit()
# end of snippets/main.py
| 24.004444 | 75 | 0.478245 | 3,135 | 0.580448 | 90 | 0.016664 | 0 | 0 | 0 | 0 | 780 | 0.144418 |
ea7173b82ff50e9cc6614bf2f1132029d50e0907 | 1,512 | py | Python | reo/migrations/0112_auto_20210713_0037.py | NREL/REopt_API | fbc70f3b0cdeec9ee220266d6b3b0c5d64f257a6 | [
"BSD-3-Clause"
] | 7 | 2022-01-29T12:10:10.000Z | 2022-03-28T13:45:20.000Z | reo/migrations/0112_auto_20210713_0037.py | NREL/reopt_api | fbc70f3b0cdeec9ee220266d6b3b0c5d64f257a6 | [
"BSD-3-Clause"
] | 12 | 2022-02-01T18:23:18.000Z | 2022-03-31T17:22:17.000Z | reo/migrations/0112_auto_20210713_0037.py | NREL/REopt_API | fbc70f3b0cdeec9ee220266d6b3b0c5d64f257a6 | [
"BSD-3-Clause"
] | 3 | 2022-02-08T19:44:40.000Z | 2022-03-12T11:05:36.000Z | # Generated by Django 3.1.12 on 2021-07-13 00:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('reo', '0111_auto_20210708_2144'),
]
operations = [
migrations.RenameField(
model_name='sitemodel',
old_name='year_one_emissions_from_elec_grid_purchase',
new_name='year_one_CO2_emissions_from_elec_grid_purchase',
),
migrations.RenameField(
model_name='sitemodel',
old_name='year_one_emissions_from_elec_grid_purchase_bau',
new_name='year_one_CO2_emissions_from_elec_grid_purchase_bau',
),
migrations.RenameField(
model_name='sitemodel',
old_name='year_one_emissions_from_fuelburn',
new_name='year_one_CO2_emissions_from_fuelburn',
),
migrations.RenameField(
model_name='sitemodel',
old_name='year_one_emissions_from_fuelburn_bau',
new_name='year_one_CO2_emissions_from_fuelburn_bau',
),
migrations.RenameField(
model_name='sitemodel',
old_name='year_one_emissions_offset_from_elec_exports',
new_name='year_one_CO2_emissions_offset_from_elec_exports',
),
migrations.RenameField(
model_name='sitemodel',
old_name='year_one_emissions_offset_from_elec_exports_bau',
new_name='year_one_CO2_emissions_offset_from_elec_exports_bau',
),
]
| 34.363636 | 75 | 0.65873 | 1,426 | 0.943122 | 0 | 0 | 0 | 0 | 0 | 0 | 684 | 0.452381 |
ea7787caeef8cdf78dfed953bd54966edcd2ae6e | 150 | py | Python | deepstreampy/constants/call_state.py | sapid/deepstreampy-twisted | 78025141bb0ac3aadc248d68f9273bf8993fc3d4 | [
"MIT"
] | null | null | null | deepstreampy/constants/call_state.py | sapid/deepstreampy-twisted | 78025141bb0ac3aadc248d68f9273bf8993fc3d4 | [
"MIT"
] | null | null | null | deepstreampy/constants/call_state.py | sapid/deepstreampy-twisted | 78025141bb0ac3aadc248d68f9273bf8993fc3d4 | [
"MIT"
] | null | null | null | INITIAL = 'INITIAL'
CONNECTING = 'CONNECTING'
ESTABLISHED = 'ESTABLISHED'
ACCEPTED = 'ACCEPTED'
DECLINED = 'DECLINED'
ENDED = 'ENDED'
ERROR = 'ERROR'
| 18.75 | 27 | 0.72 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.453333 |
ea778e1c5be8fff0511c1fa6bd9148f44fac726e | 901 | py | Python | data/ZLData.py | sharmavins23/Zhongli-Artifact-and-Weapon-Calcs | 3254ca25238604865f34ac7084c10202db5713e0 | [
"MIT"
] | null | null | null | data/ZLData.py | sharmavins23/Zhongli-Artifact-and-Weapon-Calcs | 3254ca25238604865f34ac7084c10202db5713e0 | [
"MIT"
] | null | null | null | data/ZLData.py | sharmavins23/Zhongli-Artifact-and-Weapon-Calcs | 3254ca25238604865f34ac7084c10202db5713e0 | [
"MIT"
] | null | null | null | # Static data class for character stats
class Zhongli:
level = 90
talentLevel = 8
# Base stat values
baseHP = 14695
baseATK = 251
baseCritRATE = 0.05
baseCritDMG = 0.5
# Ability MVs and frame counts
class Normal: # Normal attack spear kick hop combo
frames = 140
#mv = 2.7090
mv = 0.5653+0.5723+0.7087+0.7889+(4*0.1975)
hits = 8
rotations = (720 - 100 - 140) / 140 # Removing time for hold E and Q
hpConv = 0.0139
class HoldE: # Hold E initial hit
frames = 720
mv = 1.3440
hits = 2 # Damage from pillar pop-up and hold E shockwave
hpConv = 0.0190
class EResonate: # Pillar resonance
frames = 120
mv = 0.4480
rotations = 6
hpConv = 0.0190
class Q: # I WILL HAVE ORDER
frames = 140
mv = 6.3956
hpConv = 0.33
| 24.351351 | 77 | 0.556049 | 860 | 0.954495 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.301887 |
ea78ba9051ae04427d4f22dd53746203619b8ac9 | 369 | py | Python | app/__init__.py | abhishtagatya/pandubot | ec0f20b31fc61b5581753711d774213bbf70d438 | [
"MIT"
] | 1 | 2018-08-29T12:03:06.000Z | 2018-08-29T12:03:06.000Z | app/__init__.py | abhishtagatya/pandubot | ec0f20b31fc61b5581753711d774213bbf70d438 | [
"MIT"
] | null | null | null | app/__init__.py | abhishtagatya/pandubot | ec0f20b31fc61b5581753711d774213bbf70d438 | [
"MIT"
] | null | null | null | import os
import sys
from instance.config import DATABASE_URI
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_compress import Compress
app = Flask(__name__)
Compress(app)
app.config['SQLALCHEMY_DATABASE_URI'] = DATABASE_URI
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
from app import bot
from app import web
| 20.5 | 52 | 0.821138 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.154472 |
ea79a3dbc9f8a91f2ad34706bc154dae9826035f | 302 | py | Python | docker/app/app/backend/apps/_archive/accounts_new/profiles/serializers.py | JTarball/tetherbox | cae7a4bb6e29da0f34670f8c8d228faf5f909f3a | [
"0BSD"
] | 1 | 2018-02-26T07:26:30.000Z | 2018-02-26T07:26:30.000Z | docker/app/app/backend/apps/_archive/accounts_new/profiles/serializers.py | JTarball/docker-django-polymer-starter-kit | b5250030b1646e29567c15d01ba4668c6ad535c9 | [
"0BSD"
] | null | null | null | docker/app/app/backend/apps/_archive/accounts_new/profiles/serializers.py | JTarball/docker-django-polymer-starter-kit | b5250030b1646e29567c15d01ba4668c6ad535c9 | [
"0BSD"
] | null | null | null | """
accounts.profile.serializers
============================
Serializers file for a basic Accounts App
"""
from rest_framework import serializers
from .models import AccountsUser
class AccountsUserSerializer(serializers.ModelSerializer):
class Meta:
model = AccountsUser
| 17.764706 | 58 | 0.672185 | 104 | 0.344371 | 0 | 0 | 0 | 0 | 0 | 0 | 121 | 0.400662 |
ea79a9740f615953bdb03c857f3a9afc44c42c3d | 20,551 | py | Python | src/ops.py | Elite-Volumetric-Capture-Sqad/DDRNet | 10340d1bff26c41e429562d8ff42fc15afd9e3d5 | [
"MIT"
] | 128 | 2018-07-28T13:02:43.000Z | 2022-03-03T11:59:49.000Z | src/ops.py | Elite-Volumetric-Capture-Sqad/DDRNet | 10340d1bff26c41e429562d8ff42fc15afd9e3d5 | [
"MIT"
] | 3 | 2018-08-31T15:07:26.000Z | 2019-04-10T13:12:57.000Z | src/ops.py | Elite-Volumetric-Capture-Sqad/DDRNet | 10340d1bff26c41e429562d8ff42fc15afd9e3d5 | [
"MIT"
] | 20 | 2018-08-21T12:32:18.000Z | 2022-02-08T07:58:01.000Z | import numpy as np
import sys
import tensorflow as tf
slim = tf.contrib.slim
def convertNHWC2NCHW(data, name):
out = tf.transpose(data, [0, 3, 1, 2], name=name)
return out
def convertNCHW2NHWC(data, name):
out = tf.transpose(data, [0, 2, 3, 1], name=name)
return out
def denormalize(batch_input, low_thres, up_thres, zero2one=False, rm_zeros=False, eps=10.0):
# denormalize depth from [-1, 1] to real depth.
if not zero2one: # [-1, 1]
rel_input = (batch_input + 1.0) / 2.0
else: # [0, 1]
rel_input = batch_input
denormalized = rel_input * (up_thres - low_thres) + low_thres
if rm_zeros:
low_mask = tf.less(denormalized, low_thres+eps, name='low_mask')
zero_const = tf.zeros_like(denormalized)
denormalized = tf.where(low_mask, zero_const, denormalized)
return denormalized
def compute_normals(depth, config, conv=False, eps=1e-4):
# convert NHWC depth to NCHW normal
with tf.variable_scope("depth_to_normal"):
intrinsics = tf.constant([[536.628 / 640.0, 536.606 / 480.0, 310.591 / 640.0, 234.759 / 480.0]])
intrinsics = tf.tile(intrinsics, [config.batch_size, 1])
depth_real = convertNHWC2NCHW(
denormalize(depth, low_thres=config.low_thres, up_thres=config.up_thres), name='depth_NCHW')
normals = depth_to_normals_tf(depth_real, intrinsics)
if conv:
kernel_size = 3
stride = 1
in_channels = normals.get_shape()[1]
assert in_channels == 3, 'normals should have 3 channel instead of {}.'.format(in_channels)
normal_filter = tf.get_variable("filter",
[kernel_size, kernel_size, 1, 1],
dtype=tf.float32,
initializer=tf.constant_initializer(1.0/(kernel_size*kernel_size)),
trainable=False)
normals1, normals2, normals3 = tf.split(convertNCHW2NHWC(normals, 'normals_NHWC'), 3, axis=3)
normals1 = tf.nn.conv2d(normals1, normal_filter,
[1, stride, stride, 1], 'SAME', name='normal_conv_r')
normals2 = tf.nn.conv2d(normals2, normal_filter,
[1, stride, stride, 1], 'SAME', name='normal_conv_g')
normals3 = tf.nn.conv2d(normals3, normal_filter,
[1, stride, stride, 1], 'SAME', name='normal_conv_b')
normals = tf.concat([normals1, normals2, normals3], 3)
unused = tf.less(tf.norm(normals, axis=3), np.sqrt(eps))
unused = tf.stack([unused]*3, axis=3)
normals = tf.nn.l2_normalize(normals, 3, epsilon=eps, name='normalize_normals')
normals = tf.where(unused, tf.zeros_like(normals), normals)
normals = convertNHWC2NCHW(normals, name='normals_NCHW')
return normals
def depth_to_normals_tf(depth, intrinsics, scope=None, eps=1e-4):
"""
:param depth: real depth (B,1,H,W)
:param intrinsics: (B,4)
:return: normals (B,3,H,W)
"""
with tf.name_scope(scope, 'depth_to_normals_tf', [depth, intrinsics]):
H, W = depth.shape.as_list()[-2:]
B = tf.shape(depth)[0] # config.batch_size
depth = tf.reshape(depth, [B, H, W])
# fx_rel = fx_abs / W, cx_real = cx_abs / W
fx, fy, cx, cy = tf.split(tf.expand_dims(intrinsics, 2), 4, axis=1) # (B,1,1)
inv_fx = tf.div(1.0, fx * W)
inv_fy = tf.div(1.0, fy * H)
cx = cx * W
cy = cy * H
X, Y = tf.meshgrid(tf.range(W), tf.range(H))
X = tf.cast(tf.tile(tf.expand_dims(X, axis=0), [B, 1, 1]), tf.float32) # (B,H,W)
Y = tf.cast(tf.tile(tf.expand_dims(Y, axis=0), [B, 1, 1]), tf.float32)
x_cord = (X - cx) * inv_fx * depth
y_cord = (Y - cy) * inv_fy * depth
p = tf.stack([x_cord, y_cord, depth], axis=3, name='p_3d') # (B,H,W,3)
# vector of p_3d in west, south, east, north direction
p_ctr = p[:, 1:-1, 1:-1, :]
vw = p_ctr - p[:, 1:-1, 2:, :]
vs = p[:, 2:, 1:-1, :] - p_ctr
ve = p_ctr - p[:, 1:-1, :-2, :]
vn = p[:, :-2, 1:-1, :] - p_ctr
normal_1 = tf.cross(vs, vw, name='cross_1') # (B,H-2,W-2,3)
normal_2 = tf.cross(vn, ve, name='cross_2')
normal_1 = tf.nn.l2_normalize(normal_1, 3, epsilon=eps)
normal_2 = tf.nn.l2_normalize(normal_2, 3, epsilon=eps)
normal = normal_1 + normal_2
# unused = tf.less(tf.norm(normal, axis=3), np.sqrt(eps))
# unused = tf.stack([unused] * 3, axis=3)
normal = tf.nn.l2_normalize(normal, 3, epsilon=eps, name='normal')
# normal = tf.where(unused, tf.zeros_like(normal), normal)
paddings = [[0, 0], [1, 1], [1, 1], [0, 0]]
normal = tf.pad(normal, paddings) # (B,H,W,3)
normal = convertNHWC2NCHW(normal, 'normal_NCHW')
return normal
def instance_norm(input):
with tf.variable_scope("instance_norm"):
input = tf.identity(input)
channels = input.get_shape()[3]
shift = tf.get_variable("shift", [channels], dtype=tf.float32, initializer=tf.zeros_initializer())
scale = tf.get_variable("scale", [channels], dtype=tf.float32,
initializer=tf.random_normal_initializer(1.0, 0.02))
mean, variance = tf.nn.moments(input, [1, 2], keep_dims=True)
variance_epsilon = 1e-5
normalized = tf.nn.batch_normalization(input, mean, variance, shift, scale, variance_epsilon=variance_epsilon,
name='instancenorm')
return normalized
@slim.add_arg_scope
def lrelu(inputs, leak=0.2, scope="lrelu"):
"""
For tf > 1.4, use tf.nn.leaky_relu()
decorate a func with slim.add_arg_scope so that it can be used within an arg_scope in a slim way.
"""
with tf.variable_scope(scope):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * inputs + f2 * abs(inputs)
def conv_bn_relu(batch_input, kernel_size, stride, out_channels=None):
with tf.variable_scope("conv_bn_relu"):
in_channels = batch_input.get_shape()[3]
if not out_channels: out_channels = in_channels
filter = tf.get_variable("filter", [kernel_size, kernel_size, in_channels, out_channels],
dtype=tf.float32, initializer=tf.random_normal_initializer(0, 0.02))
convolved = tf.nn.conv2d(batch_input, filter, [1, stride, stride, 1], padding="SAME")
normed = batchnorm_u(convolved)
rectified = tf.nn.relu(normed)
return rectified, filter
def resize_conv(x, out_ch, k_size, size_factor):
_, in_h, in_w, in_ch = x.shape.as_list()
resized = tf.image.resize_nearest_neighbor(x, [in_h * size_factor, in_w * size_factor])
conv = conv_act(resized, out_ch, k_size, 1)
return conv
def resize_add_conv_u(input, size_factor, out_ch=None, k_size=3, axis=3, act=tf.nn.relu):
"""
Bilinear Additive Upsampling. see:
Wojna, Zbigniew, et al. "The Devil is in the Decoder." arXiv preprint arXiv:1707.05847 (2017).
"""
with tf.variable_scope("resize_add_conv") as scp:
_, in_height, in_width, in_ch = input.shape.as_list()
if out_ch:
assert in_ch % out_ch == 0, 'cannot add in_ch: {} to out_ch: {}'.format(in_ch, out_ch)
else:
out_ch, r = divmod(in_ch, (size_factor * size_factor))
assert r == 0, 'in_ch: {} not divisible by size_factor^2'.format(in_ch)
ch_split = in_ch / out_ch
# bilinear upsample
resized = tf.image.resize_images(input, [in_height * size_factor, in_width * size_factor])
stack_list = []
for i in range(out_ch):
resized_split = resized[:, :, :, i * ch_split:(i + 1) * ch_split]
stack_list.append(tf.reduce_sum(resized_split, axis=axis))
stacked = tf.stack(stack_list, axis=axis)
filter = tf.get_variable("filter", [k_size, k_size, out_ch, out_ch], dtype=tf.float32,
initializer=tf.random_normal_initializer(0, 0.02))
conv = tf.nn.conv2d(stacked, filter, [1, 1, 1, 1], padding="SAME")
if act is not None:
conv = tf.nn.relu(conv)
return conv
def conv_concat(input, skip, axis, conv=True):
with tf.variable_scope("concat"):
in_ch = input.shape[3]
if conv:
skip, _ = conv_bn_relu(skip, 3, 1, out_channels=in_ch)
return tf.concat([input, skip], axis)
def resize_like(inputs, ref, method='NN'):
iH, iW = inputs.shape[1], inputs.shape[2]
rH, rW = ref.shape[1], ref.shape[2]
if iH == rH and iW == rW:
return inputs
if method == 'NN':
return tf.image.resize_nearest_neighbor(inputs, [rH.value, rW.value])
elif method == 'BI':
return tf.image.resize_bilinear(inputs, [rH.value, rW.value])
else:
raise NotImplementedError('resize method not implemented yet.')
def residual_block(inputs, ch_out, stride=1, norm_fn=slim.batch_norm, outputs_collections=None, scope=None):
"""
Residual_block with pre-activation.
see resnet_model.py for more detailed version.
"""
with tf.variable_scope(scope, "residual_block") as scp:
shortcut = tf.identity(inputs, name="shortcut")
if norm_fn:
preact = norm_fn(inputs, activation_fn=tf.nn.relu, scope="preact")
else:
preact = tf.nn.relu(inputs, name="preact")
residual = slim.conv2d(preact, ch_out, [3, 3], stride=stride, normalizer_fn=norm_fn, activation_fn=tf.nn.relu,
scope="conv1")
residual = slim.conv2d(residual, ch_out, [3, 3], stride=stride, normalizer_fn=None, activation_fn=None,
scope="conv2")
output = shortcut + residual
return output
def rand_shift_depth(depths, low_th, up_th, seed=666):
"""
:param depths: list of depth maps to be randomly shifted together.
depths values shoud be in range [low_th, up_th]
:return: list of shifted depth maps
"""
if len(depths) > 1:
depth_ref = depths[1]
else:
depth_ref = depths[0]
ref_min = tf.reduce_min(depth_ref)
ref_max = tf.reduce_max(depth_ref)
shift_min = low_th - ref_min
shift_max = up_th - ref_max
shift_val = tf.random_uniform([], minval=shift_min, maxval=shift_max, seed=seed, name='shift_val')
depths_shifted = [tf.clip_by_value(d + shift_val, low_th, up_th) for d in depths]
return depths_shifted
def read_image_from_filename(filename, batch_size, num_threads=4, has_mask=True, has_abd=False,
aux_type="JPEG", depth_type=tf.uint16,
low_th=500.0, up_th=3000.0, diff_th=5.0,
output_height=256, output_width=256,
min_after_dequeue=128, use_shuffle_batch=False,
rand_crop=True, rand_scale=False, rand_depth_shift=False, rand_flip=True, rand_brightness=True,
scope=None):
"""
:param filename: index csv file for training.
:param batch_size: 16 or 32 recommended for Titan X.
:param num_threads: 4 or 8.
:param has_mask: single channel [0, 255]. offline mask obtained by threshold, instance segmentation or other methods.
:param has_abd: offline albedo obtained by intrinsic decomposition methods, if False assume uniform albedo.
:param aux_type: auxiliary(e.g. color) image file type.
:param depth_type: data type of depth maps.
:param low_th: limited lower bound of depth range.
:param up_th: limited upper bound of depth range.
:param diff_th: threshold to reject bad training pairs with large L1 diff.
:param output_height: patch height.
:param output_width: patch width.
:param min_after_dequeue: see docs of tf.train.shuffle_batch.
:param use_shuffle_batch: see docs of tf.train.shuffle_batch.
:param rand_crop: random cropping patches for training, change cx, cy.
:param rand_flip: random flipping patches, change cx, cy.
:param rand_scale: random scaling, change fx, fy, cx, cy.
:param rand_depth_shift: only shift depth value, no change in intrinsics.
:param rand_brightness: augment color image.
:param scope: visualize graphs in tensorboard.
:return: depth_raw_batch, depth_ref_batch, color_batch, mask_batch, albedo_batch
"""
with tf.variable_scope(scope, "image_producer"):
# Load index csv file
textReader = tf.TextLineReader()
csv_path = tf.train.string_input_producer([filename], shuffle=True)
_, csv_content = textReader.read(csv_path)
if has_mask and has_abd:
depth_raw_filename, depth_ref_filename, color_filename, mask_filename, albedo_filename = \
tf.decode_csv(csv_content, [[""], [""], [""], [""], [""]])
elif has_mask:
depth_raw_filename, depth_ref_filename, color_filename, mask_filename = \
tf.decode_csv(csv_content, [[""], [""], [""], [""]])
else:
depth_raw_filename, depth_ref_filename, color_filename = \
tf.decode_csv(csv_content, [[""], [""], [""]])
# Read and decode image data to tf.float32 tensor
depth_raw_data = tf.read_file(depth_raw_filename)
depth_ref_data = tf.read_file(depth_ref_filename)
color_data = tf.read_file(color_filename)
depth_raw_im = tf.image.decode_png(depth_raw_data, channels=1, dtype=depth_type)
depth_ref_im = tf.image.decode_png(depth_ref_data, channels=1, dtype=depth_type)
if has_mask:
mask_data = tf.read_file(mask_filename)
mask = tf.image.decode_png(mask_data, channels=1) / 255
mask = tf.cast(mask, tf.float32)
if has_abd:
albedo_data = tf.read_file(albedo_filename)
albedo_im = tf.image.decode_png(albedo_data, channels=1)
albedo_im = tf.cast(albedo_im, tf.float32)
if aux_type == "JPEG":
color_im = tf.image.decode_jpeg(color_data, channels=1)
elif aux_type == "PNG":
color_im = tf.image.decode_png(color_data, channels=1)
else:
raise NotImplementedError("unsupport auxiliary image type for now!")
depth_raw_im = tf.cast(depth_raw_im, tf.float32)
depth_ref_im = tf.cast(depth_ref_im, tf.float32)
color_im = tf.cast(color_im, tf.float32)
# color_im = tf.image.resize_images(color_im, depth_raw_shape[:2], method=2) # return float Tensor
# Concat all images in channel axis to randomly crop together
if has_mask and has_abd:
concated_im = tf.concat([depth_raw_im, depth_ref_im, color_im, mask, albedo_im], axis=2)
n_concat = 5
elif has_mask:
concated_im = tf.concat([depth_raw_im, depth_ref_im, color_im, mask], axis=2)
n_concat = 4
else:
concated_im = tf.concat([depth_raw_im, depth_ref_im, color_im], axis=2)
n_concat = 3
# Prepose rand_crop here to reduce unnecessary computation of subsequent data augmentations.
if rand_crop:
concated_im = tf.random_crop(concated_im, [output_height, output_width, n_concat])
# concated_im = tf.image.crop_to_bounding_box(concated_im, 80, 250, output_height, output_width) # dbg
else:
concated_im = tf.image.resize_image_with_crop_or_pad(concated_im, output_height, output_width)
if has_mask and has_abd:
depth_raw_im, depth_ref_im, color_im, mask, albedo_im = tf.split(concated_im, n_concat, axis=2)
elif has_mask:
depth_raw_im, depth_ref_im, color_im, mask = tf.split(concated_im, n_concat, axis=2)
else:
depth_raw_im, depth_ref_im, color_im = tf.split(concated_im, 3, axis=2)
# Filter bad inputs use diff_mean or mse
n_holes = tf.count_nonzero(tf.less(depth_ref_im, tf.constant(50.0)), dtype=tf.float32)
diff = tf.abs(tf.subtract(depth_raw_im, depth_ref_im, name='diff'))
diff = tf.where(diff<up_th/10, diff, tf.zeros_like(diff))
diff_mean = tf.reduce_mean(diff, name='diff_mean')
# mse = tf.reduce_mean(tf.square(diff), name='mse')
enqueue_cond = tf.logical_and(tf.less(n_holes, output_height*output_width*2/3), tf.less(diff_mean, diff_th))
def zero_img():
return tf.constant(0, shape=[0, output_height, output_width, n_concat])
def one_img():
# Data augmentation: rand_flip, rand_scale and rand_depth_shift on filtered patches.
raw = tf.clip_by_value(depth_raw_im, low_th, up_th)
ref = tf.clip_by_value(depth_ref_im, low_th, up_th)
if rand_brightness:
color = tf.image.random_brightness(color_im, 20)
else:
color = color_im
if rand_depth_shift:
raw, ref = rand_shift_depth([raw, ref], low_th, up_th)
if has_mask and has_abd:
im = tf.concat([raw, ref, color, mask, abd], axis=2)
elif has_mask:
im = tf.concat([raw, ref, color, mask], axis=2)
else:
im = tf.concat([raw, ref, color], axis=2)
if rand_flip:
im = tf.image.random_flip_left_right(im)
if rand_scale:
pass
return tf.expand_dims(im, 0)
concated_im = tf.cond(enqueue_cond, one_img, zero_img)
## Pass the 4D batch tensors to a batching op at the end of input data queue
# shuffle_batch creates a shuffling queue with dequeue op and enqueue QueueRunner
# min_after_dequeue defines how big a buffer we will randomly sample from
# bigger means better shuffling but slower start up and more memory used.
# capacity must be larger than min_after_dequeue and the amount larger
# determines the maximum we will prefetch.
# capacity = min_after_dequeue + (num_threads + small_safety_margin) * batch_size
if use_shuffle_batch:
capacity = min_after_dequeue + (num_threads + 1) * batch_size
im_batch = tf.train.shuffle_batch(
[concated_im],
batch_size=batch_size,
capacity=capacity,
enqueue_many=True,
num_threads=num_threads,
min_after_dequeue=min_after_dequeue,
allow_smaller_final_batch=True,
name="shuffle_batch")
else:
im_batch = tf.train.batch(
[concated_im],
batch_size=batch_size,
num_threads=num_threads,
allow_smaller_final_batch=True,
enqueue_many=True,
name="batch")
# Split concatenated data
if has_mask and has_abd:
depth_raw_batch, depth_ref_batch, color_batch, mask_batch, albedo_batch = tf.split(im_batch, n_concat, axis=3)
elif has_mask:
depth_raw_batch, depth_ref_batch, color_batch, mask_batch = tf.split(im_batch, n_concat, axis=3)
else: # get mask only from ref(after clip, outliers are equal to low_th)
depth_raw_batch, depth_ref_batch, color_batch = tf.split(im_batch, n_concat, axis=3)
mask_batch = tf.cast(tf.not_equal(depth_ref_batch, low_th), tf.float32, name='mask_batch') # 0.0 or 1.0
# Normalize depth and color maps
with tf.name_scope('normalize'):
thres_range = (up_th - low_th) / 2.0
depth_raw_batch = (depth_raw_batch - low_th) / thres_range
depth_raw_batch = tf.subtract(depth_raw_batch, 1.0, name='raw_batch') # [low,up]->[-1,1]
depth_ref_batch = (depth_ref_batch - low_th) / thres_range
depth_ref_batch = tf.subtract(depth_ref_batch, 1.0, name='ref_batch') # [low,up]->[-1,1]
color_batch = color_batch * mask_batch / 127.0
color_batch = tf.subtract(color_batch, 1.0, name='aux_batch') # [0,255]->[-1,1]
if has_abd:
albedo_batch = albedo_batch / 127.0 # offline estimated albedo from RGB, [0,255]->[0,2]
else:
albedo_batch = None
# dbg: return and show last diff_mean in batch
return depth_raw_batch, depth_ref_batch, color_batch, mask_batch, albedo_batch, diff_mean
| 46.920091 | 124 | 0.617586 | 0 | 0 | 0 | 0 | 363 | 0.017663 | 0 | 0 | 4,641 | 0.225828 |
ea7a9655fe05af9a10f1098bf7c17031501e094a | 829 | py | Python | tests/fractalmusic/test_fm_split.py | alexgorji/musurgia | 81d37afbf1ac70348002a93299db228b5ed4a591 | [
"MIT"
] | null | null | null | tests/fractalmusic/test_fm_split.py | alexgorji/musurgia | 81d37afbf1ac70348002a93299db228b5ed4a591 | [
"MIT"
] | 45 | 2020-02-24T19:37:00.000Z | 2021-04-06T16:13:56.000Z | tests/fractalmusic/test_fm_split.py | alexgorji/musurgia | 81d37afbf1ac70348002a93299db228b5ed4a591 | [
"MIT"
] | null | null | null | import os
from musicscore.musictree.treescoretimewise import TreeScoreTimewise
from musurgia.unittest import TestCase
from musurgia.fractaltree.fractalmusic import FractalMusic
path = str(os.path.abspath(__file__).split('.')[0])
class Test(TestCase):
def setUp(self) -> None:
self.fm = FractalMusic(tempo=60, quarter_duration=4)
def test_1(self):
self.fm.split(1, 2)
score = TreeScoreTimewise()
self.fm.get_score(score)
xml_path = path + '_test_1.xml'
score.write(xml_path)
self.assertCompareFiles(xml_path)
def test_2(self):
self.fm.split(1, 2)[1].chord.to_rest()
score = TreeScoreTimewise()
self.fm.get_score(score)
xml_path = path + '_test_2.xml'
score.write(xml_path)
self.assertCompareFiles(xml_path)
| 27.633333 | 68 | 0.670688 | 594 | 0.716526 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.034982 |
ea7b1fb5e109895783f00bbbc89f3c2e5c72c142 | 96 | py | Python | docs/p3/setup.py | khchine5/atelier | 25a287595d35b14f7eaa8379636b7538e1c4314f | [
"BSD-2-Clause"
] | 1 | 2018-09-20T12:49:51.000Z | 2018-09-20T12:49:51.000Z | docs/p3/setup.py | khchine5/atelier | 25a287595d35b14f7eaa8379636b7538e1c4314f | [
"BSD-2-Clause"
] | 2 | 2016-01-12T20:07:51.000Z | 2016-01-13T22:55:19.000Z | docs/p3/setup.py | khchine5/atelier | 25a287595d35b14f7eaa8379636b7538e1c4314f | [
"BSD-2-Clause"
] | 2 | 2016-10-12T16:13:16.000Z | 2021-02-28T07:15:59.000Z | from setuptools import setup
if __name__ == '__main__':
setup(name='foo', version='1.0.0')
| 19.2 | 38 | 0.677083 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.229167 |
ea7b6fc02919fcb3d751ed35c29312f28ec2bea8 | 8,324 | py | Python | ParaMol/Objective_function/Properties/regularization.py | mnagaku/ParaMol | 13529f584e2d50076e038388ecbdd57af23c73b9 | [
"MIT"
] | 15 | 2021-03-08T17:56:36.000Z | 2022-03-30T01:46:29.000Z | ParaMol/Objective_function/Properties/regularization.py | mnagaku/ParaMol | 13529f584e2d50076e038388ecbdd57af23c73b9 | [
"MIT"
] | 7 | 2021-03-28T05:53:59.000Z | 2021-10-15T15:37:56.000Z | ParaMol/Objective_function/Properties/regularization.py | mnagaku/ParaMol | 13529f584e2d50076e038388ecbdd57af23c73b9 | [
"MIT"
] | 2 | 2021-04-30T06:42:09.000Z | 2021-06-21T05:49:29.000Z | # -*- coding: utf-8 -*-
"""
Description
-----------
This module defines the :obj:`ParaMol.Objective_function.Properties.regularization.Regularization` class, which is a ParaMol representation of the regularization property.
"""
import numpy as np
from .property import *
# -----------------------------------------------------------#
# #
# REGULARIZATION #
# #
# -----------------------------------------------------------#
class Regularization(Property):
"""
ParaMol representation of the regularization property.
Parameters
----------
initial_parameters_values : list or np.array of floats
List or np.array containing the initial parameters' values.
prior_widths : list or np.array of floats
List or np.array containing the prior width of each parameter.
method : str
Type of regularization. Options are 'L1', 'L2' or 'hyperbolic' ('hyperbolic' only for RESP calculations)
weight : float
Weight of this property in the objective function.
scaling_factor : float
Scaling factor of the regularization value.
hyperbolic_beta : float
Hyperbolic beta value. Only used if `method` is `hyperbolic`.
Attributes
----------
name : str
'REGULARIZATION'
systems : list of :obj:`ParaMol.System.system.ParaMolSystem`
List of ParaMol Systems. Currently not used and it is set to None.
units : str
'ADIMENSIONAL'
value : float
Current value of this property
weight : float
Weight of this property in the objective function.
"""
def __init__(self, initial_parameters_values, prior_widths, method, weight=1.0, scaling_factor=1.0, hyperbolic_beta=0.01):
self.name = "REGULARIZATION"
self.systems = None
self._regularization_type = method
self._scaling_factor = scaling_factor
self._hyperbolic_beta = hyperbolic_beta
self._initial_parameters_values = initial_parameters_values
self._prior_widths = prior_widths
self.units = 'ADIMENSIONAL'
self.value = None
self.weight = weight
# ------------------------------------------------------------ #
# #
# PUBLIC METHODS #
# #
# ------------------------------------------------------------ #
def set_initial_parameters_values(self, initial_parameters_values):
"""
Method that sets the initial parameters' values as a private attribute of this instance.
Parameters
----------
initial_parameters_values : list or np.array of floats
List or np.array containing the initial parameters' values.
Returns
-------
initial_parameters_values : list of floats
List containing the prior width of each parameter (private attribute).
"""
self._initial_parameters_values = initial_parameters_values
return self._initial_parameters_values
def set_prior_widths(self, prior_widths):
"""
Method that sets the prior widths of the variables as a private attribute of this instance.
Parameters
----------
prior_widths : list or np.array of floats
List or np.array containing the prior width of each parameter.
Returns
-------
prior_widths: list of floats
List containing the prior width of each parameter (private attribute).
"""
self._prior_widths = prior_widths
return self._prior_widths
def calculate_property(self, current_parameters, a=None, b=None):
"""
Method that wraps private regularization methods in order to calculate the regularization term of the objective function.
Parameters
----------
current_parameters : list of floats
Lists containing the optimizable values of the parameters.
a : float, default=`None`
a parameter (scaling factor). If not `None`, instance attribute `self._scaling_factor` is ignored.
b : float, default=`None`
Hyperbolic beta parameter. If not `None`, instance attribute `self._hyperbolic_beta` is ignored.
Returns
-------
float
Regularization value.
"""
if self._regularization_type.upper() == "L2":
return self._l2_regularization(current_parameters, a)
elif self._regularization_type.upper() == "L1":
return self._l1_regularization(current_parameters, a)
elif self._regularization_type.upper() == "HYPERBOLIC":
return self._hyperbolic_regularization(current_parameters, a, b)
else:
raise NotImplementedError("Regularization {} scheme not implement.".format(self._regularization_type))
# ------------------------------------------------------------ #
# #
# PRIVATE METHODS #
# #
# ------------------------------------------------------------ #
def _l2_regularization(self, current_parameters, a=None):
"""
Method that computes the value of the L2 regularization.
Parameters
----------
current_parameters : list of floats
Lists containing the optimizable values of the parameters.
a : float, default=`None`
a parameter (scaling factor). If not `None`, instance attribute `self._scaling_factor` is ignored.
Notes
-----
:math:`L2 = a(param-param_0)^2` where a is a scaling factor.
Returns
-------
value : float
Value of the regularization.
"""
if a is None:
a = self._scaling_factor
diff = (np.asarray(current_parameters) - self._initial_parameters_values) / self._prior_widths
reg = np.power(diff, 2)
self.value = a * np.sum(reg)
return self.value
def _l1_regularization(self, current_parameters, a=None):
"""
Method that computes the value of the L1 regularization.
Parameters
----------
current_parameters : list of floats
Lists containing the optimizable values of the parameters.
a : float, default=`None`
a parameter (scaling factor). If not `None`, instance attribute `self._scaling_factor` is ignored.
Notes
-----
:math:`L1 = a|param-param_0|` where a is a scaling factor.
Returns
-------
value : float
Value of the regularization.
"""
if a is None:
a = self._scaling_factor
diff = (np.asarray(current_parameters) - self._initial_parameters_values) / self._prior_widths
reg = np.abs(diff)
self.value = a * np.sum(reg)
return self.value
def _hyperbolic_regularization(self, current_parameters, a=None, b=None):
"""
Method that computes the value of the hyperbolic regularization.
Parameters
----------
current_parameters : list of floats
Lists containing the optimizable values of the parameters.
a : float, default=`None`
a parameter (scaling factor). If not `None`, instance attribute `self._scaling_factor` is ignored.
b : float, default=`None`
Hyperbolic beta parameter. If not `None`, instance attribute `self._hyperbolic_beta` is ignored.
Notes
-----
:math:`hyperbolic = a\sum_{m}^{N_{charges}} ((q_m^2 + b^2 )^{1/2} - b)`
Returns
-------
value : float
Value of the regularization.
"""
if a is None:
a = self._scaling_factor
if b is None:
b = self._hyperbolic_beta
reg = np.sum( ((np.asarray(current_parameters) )**2 + b**2)**(1/2.) - b)
self.value = a * reg
return self.value
| 36.349345 | 171 | 0.555142 | 7,732 | 0.92888 | 0 | 0 | 0 | 0 | 0 | 0 | 5,815 | 0.698582 |
ea7b9d12029f07974525dc659c2414d6e62953e4 | 643 | py | Python | conversion/octalToDecimal.py | slowy07/pythonApps | 22f9766291dbccd8185035745950c5ee4ebd6a3e | [
"MIT"
] | 10 | 2020-10-09T11:05:18.000Z | 2022-02-13T03:22:10.000Z | conversion/octalToDecimal.py | khairanabila/pythonApps | f90b8823f939b98f7bf1dea7ed35fe6e22e2f730 | [
"MIT"
] | null | null | null | conversion/octalToDecimal.py | khairanabila/pythonApps | f90b8823f939b98f7bf1dea7ed35fe6e22e2f730 | [
"MIT"
] | 6 | 2020-11-26T12:49:43.000Z | 2022-03-06T06:46:43.000Z | def octalToDecimal(octString: str)->str:
octString = str(octString).strip()
if not octString:
raise ValueError("empty string was passed to function")
isNegative = octString[0] == "-"
if isNegative:
octString = octString[1:]
if not all(0 <= int(char) <= 7 for char in octString):
raise ValueError("non octal value was passed to function")
decimalNumber = 0
for char in octString:
decimalNumber = 8 * decimalNumber + int(char)
if isNegative:
decimalNumber = -decimalNumber
return decimalNumber
if __name__ == '__main__':
from doctest import testmod
testmod()
| 30.619048 | 66 | 0.656299 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 90 | 0.139969 |
ea80ed40b25b2af9be9e4742f1f9e34326e94328 | 879 | py | Python | newsXtract.py | selection-bias-www2018/NewsXtract | 6b66024fea912ed5f34a5ac2fe051d9abf8e5ee2 | [
"BSD-3-Clause"
] | 1 | 2019-10-24T10:04:59.000Z | 2019-10-24T10:04:59.000Z | newsXtract.py | selection-bias-www2018/selection-bias-code | 6b66024fea912ed5f34a5ac2fe051d9abf8e5ee2 | [
"BSD-3-Clause"
] | null | null | null | newsXtract.py | selection-bias-www2018/selection-bias-code | 6b66024fea912ed5f34a5ac2fe051d9abf8e5ee2 | [
"BSD-3-Clause"
] | 1 | 2021-05-04T12:51:23.000Z | 2021-05-04T12:51:23.000Z | import os,json
import requests
BASE_URL = 'http://epfl.elasticsearch.spinn3r.com/content*/_search'
BULK_SIZE = 100
SPINN3R_SECRET = os.environ['SPINN3R_SECRET']
HEADERS = {
'X-vendor': 'epfl',
'X-vendor-auth': SPINN3R_SECRET
}
query = {
"size": BULK_SIZE,
"query":{
"bool":{
"must":{
"match":{
"domain":"afp.com"
}
},
"filter":{
"range":{
"published":{
"gte":"18/02/2017",
"lte":"20/02/2017",
"format":"dd/MM/yyyy"
}
}
}
}
}
}
resp = requests.post(BASE_URL, headers=HEADERS, json=query)
resp_json = json.loads(resp.text)
titles = set()
for r in resp_json['hits']['hits']:
t = r['_source']['title']
if t not in titles:
print t
titles.add(t)
| 18.702128 | 68 | 0.482366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 260 | 0.295791 |
ea827b47a08c581fe5f57d1e383518173508f1a0 | 1,672 | py | Python | core/plugins/hibp.py | area55git/Gitmails | 813234909f7bc59ef13113c92d8a52d86c896338 | [
"MIT"
] | 140 | 2018-04-26T18:29:01.000Z | 2022-02-22T01:32:31.000Z | core/plugins/hibp.py | area55git/Gitmails | 813234909f7bc59ef13113c92d8a52d86c896338 | [
"MIT"
] | 4 | 2018-04-26T20:18:47.000Z | 2018-05-02T14:46:24.000Z | core/plugins/hibp.py | area55git/Gitmails | 813234909f7bc59ef13113c92d8a52d86c896338 | [
"MIT"
] | 23 | 2018-04-26T19:08:26.000Z | 2022-02-12T22:13:14.000Z | import time
import requests
from core.utils.parser import Parser
from core.utils.helpers import Helpers
from core.models.plugin import BasePlugin
class HIBP(BasePlugin):
def __init__(self, args):
self.args = args
self.base_url = "https://haveibeenpwned.com/api/v2/breachedaccount"
self.url_parameters = "truncateResponse=true&includeUnverified=true"
def execute(self, data):
Helpers.print_warning("Starting Have I Been Pwned plugin...", jumpline=True)
all_emails = Parser(self.args).all_unique_emails(data)
if all_emails:
self.check_all_emails(all_emails)
return True
return False
def check_authors(self, authors):
for author in authors:
time.sleep(2)
self.check_email(author.email)
def check_all_emails(self, emails):
for email in emails:
time.sleep(2)
self.check_email(email)
def check_email(self, email):
try:
url = "{}/{}?{}".format(self.base_url, email, self.url_parameters)
r = requests.get(url)
if r.status_code == 503:
Helpers.print_error("hibp: IP got in DDoS protection by CloudFare")
elif r.status_code == 429:
Helpers.print_error("hibp: Throttled by HIBP API")
elif r.text:
r = r.json()
print("\n{} leaks:".format(email))
for leak in r:
print("\t- {}".format(leak["Name"]))
return True
return False
except Exception as e:
Helpers.print_error(e)
return False
| 30.962963 | 84 | 0.586124 | 1,523 | 0.910885 | 0 | 0 | 0 | 0 | 0 | 0 | 247 | 0.147727 |
ea83c30edcb40ded19cfef34c991c3cc46a06772 | 2,099 | py | Python | app/purchases.py | thowell332/Mini-Amazon | 927c387d569aef00275b7d6ecaae891fc16025e9 | [
"MIT"
] | null | null | null | app/purchases.py | thowell332/Mini-Amazon | 927c387d569aef00275b7d6ecaae891fc16025e9 | [
"MIT"
] | null | null | null | app/purchases.py | thowell332/Mini-Amazon | 927c387d569aef00275b7d6ecaae891fc16025e9 | [
"MIT"
] | null | null | null | from re import S
from flask import render_template, redirect, url_for, flash, request
from flask_paginate import Pagination, get_page_parameter
from flask_login import current_user
from flask_wtf import FlaskForm
from wtforms import SubmitField
from flask_babel import _, lazy_gettext as _l
from flask_login import current_user
from .models.purchase import Purchase, PurchaseSummary, PurchaseEntry
from .models.user import User
from flask import Blueprint
bp = Blueprint('purchases', __name__)
# Route to show a user's past purchases.
@bp.route('/purchases', methods=['GET', 'POST'])
def purchases():
# Set up pagination.
page = request.args.get(get_page_parameter(), type=int, default=1)
per_page = 10
start = (page - 1) * per_page
# If user is not logged in, redirect.
if not current_user.is_authenticated:
return redirect(url_for('users.login'))
seller_status = User.sellerStatus(current_user.id)
# Get list of past purchases.
past_purchases = Purchase._get_purchases(current_user.id)
pagination = Pagination(page=page, per_page=per_page, total=len(past_purchases), record_name='products')
return render_template('purchases.html', purchases=past_purchases[start: start + per_page], pagination=pagination, seller_status=seller_status)
# Route to show a specific order.
@bp.route('/individual-purchase<purchase_id>', methods=['GET', 'POST'])
def individual_purchase(purchase_id):
# If user is not logged in, redirect.
if not current_user.is_authenticated:
return redirect(url_for('users.login'))
seller_status = User.sellerStatus(current_user.id)
# Get all entries in the purchase and the total price.
purchase_entries = Purchase._get_individual_purchase(current_user.id, purchase_id)
total_price_paid = Purchase._get_total_purchase_cost(current_user.id, purchase_id)
total_price_paid = ('%.2f'%total_price_paid)
return render_template('individualPurchase.html', purchase_id=purchase_id, purchase_entries=purchase_entries, total_price_paid=total_price_paid, seller_status=seller_status) | 41.98 | 177 | 0.769414 | 0 | 0 | 0 | 0 | 1,525 | 0.726536 | 0 | 0 | 413 | 0.19676 |
ea84710a91e9ce1a19dd0a2f68da27eda2cd6074 | 835 | bzl | Python | ml_metadata/workspace.bzl | zijianjoy/ml-metadata | 2fe02bc234408c49c3df37c7c5b276a85990da7d | [
"Apache-2.0"
] | 458 | 2019-01-18T18:11:00.000Z | 2022-03-28T15:27:33.000Z | ml_metadata/workspace.bzl | DrStarkXavier/ml-metadata | 3434ebaf36db54a7e67dbb0793980a74ec0c5d50 | [
"Apache-2.0"
] | 108 | 2019-01-24T02:17:46.000Z | 2022-03-30T18:36:46.000Z | ml_metadata/workspace.bzl | DrStarkXavier/ml-metadata | 3434ebaf36db54a7e67dbb0793980a74ec0c5d50 | [
"Apache-2.0"
] | 128 | 2019-01-24T05:42:43.000Z | 2022-03-25T11:11:44.000Z | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""ML METADATA Data Validation external dependencies that can be loaded in WORKSPACE files.
"""
load("//ml_metadata:mysql_configure.bzl", "mysql_configure")
def ml_metadata_workspace():
"""All ML Metadata external dependencies."""
mysql_configure()
| 36.304348 | 91 | 0.758084 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 753 | 0.901796 |
ea87a3235f180eca6e23bc04d0cf3cf624a85395 | 8,072 | py | Python | python/py_src/sudachipy/command_line.py | sorami/sudachi.rs | 0e3462841f281472d149140481889a265bd12dd1 | [
"Apache-2.0"
] | 69 | 2019-11-23T14:24:44.000Z | 2021-03-07T11:59:34.000Z | python/py_src/sudachipy/command_line.py | sorami/sudachi.rs | 0e3462841f281472d149140481889a265bd12dd1 | [
"Apache-2.0"
] | 5 | 2019-11-23T12:37:16.000Z | 2020-08-20T07:07:48.000Z | python/py_src/sudachipy/command_line.py | hata6502/sudachi-wasm | 7afc1c59d81cf2bacd859087fdff40f238293c25 | [
"Apache-2.0"
] | 7 | 2019-11-27T13:42:03.000Z | 2020-10-29T05:46:54.000Z | # Copyright (c) 2019 Works Applications Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import fileinput
import logging
import os
import sys
from . import __version__
from . import Dictionary, SplitMode
def _set_default_subparser(self, name, args=None):
"""
copy and modify code from https://bitbucket.org/ruamel/std.argparse
"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name)
argparse.ArgumentParser.set_default_subparser = _set_default_subparser
def run(tokenizer, mode, input_, output, logger, print_all, enable_dump):
for line in input_:
line = line.rstrip('\n')
# Note: Current version of the tokenizer ignores logger
for m in tokenizer.tokenize(line, mode, logger if enable_dump else None):
list_info = [
m.surface(),
",".join(m.part_of_speech()),
m.normalized_form()]
if print_all:
list_info += [
m.dictionary_form(),
m.reading_form(),
str(m.dictionary_id()),
'[{}]'.format(','.join([str(synonym_group_id) for synonym_group_id in m.synonym_group_ids()]))]
if m.is_oov():
list_info.append("(OOV)")
output.write("\t".join(list_info))
output.write("\n")
output.write("EOS\n")
def _input_files_checker(args, print_usage):
for file in args.in_files:
if not os.path.exists(file):
print_usage()
print('{}: error: {} doesn\'t exist'.format(
__name__, file), file=sys.stderr)
exit(1)
def _command_tokenize(args, print_usage):
if args.version:
print_version()
return
_input_files_checker(args, print_usage)
if args.mode == "A":
mode = SplitMode.A
elif args.mode == "B":
mode = SplitMode.B
else:
mode = SplitMode.C
output = sys.stdout
if args.fpath_out:
output = open(args.fpath_out, "w", encoding="utf-8")
stdout_logger = logging.getLogger(__name__)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
stdout_logger.addHandler(handler)
stdout_logger.setLevel(logging.DEBUG)
stdout_logger.propagate = False
print_all = args.a
enable_dump = args.d
try:
dict_ = Dictionary(config_path=args.fpath_setting,
dict_type=args.system_dict_type)
tokenizer_obj = dict_.create()
input_ = fileinput.input(
args.in_files, openhook=fileinput.hook_encoded("utf-8"))
run(tokenizer_obj, mode, input_, output,
stdout_logger, print_all, enable_dump)
finally:
if args.fpath_out:
output.close()
def _command_build(args, print_usage):
raise NotImplementedError(
"Build dictionary feature is not yet implemented. Please use sudachipy<0.6.")
def _command_user_build(args, print_usage):
raise NotImplementedError(
"Build dictionary feature is not yet implemented. Please use sudachipy<0.6.")
def print_version():
print('sudachipy {}'.format(__version__))
def main():
parser = argparse.ArgumentParser(
description="Japanese Morphological Analyzer")
subparsers = parser.add_subparsers(description='')
# root, tokenizer parser
parser_tk = subparsers.add_parser(
'tokenize', help='(default) see `tokenize -h`', description='Tokenize Text')
parser_tk.add_argument("-r", dest="fpath_setting",
metavar="file", help="the setting file in JSON format")
parser_tk.add_argument(
"-m", dest="mode", choices=["A", "B", "C"], default="C", help="the mode of splitting")
parser_tk.add_argument("-o", dest="fpath_out",
metavar="file", help="the output file")
parser_tk.add_argument("-s", dest="system_dict_type", metavar='string', choices=["small", "core", "full"],
help="sudachidict type")
parser_tk.add_argument("-a", action="store_true",
help="print all of the fields")
parser_tk.add_argument("-d", action="store_true",
help="print the debug information")
parser_tk.add_argument("-v", "--version", action="store_true",
dest="version", help="print sudachipy version")
parser_tk.add_argument("in_files", metavar="file",
nargs=argparse.ZERO_OR_MORE, help='text written in utf-8')
parser_tk.set_defaults(handler=_command_tokenize,
print_usage=parser_tk.print_usage)
# build dictionary parser
parser_bd = subparsers.add_parser(
'build', help='see `build -h`', description='Build Sudachi Dictionary')
parser_bd.add_argument('-o', dest='out_file', metavar='file', default='system.dic',
help='output file (default: system.dic)')
parser_bd.add_argument('-d', dest='description', default='', metavar='string', required=False,
help='description comment to be embedded on dictionary')
required_named_bd = parser_bd.add_argument_group(
'required named arguments')
required_named_bd.add_argument('-m', dest='matrix_file', metavar='file', required=True,
help='connection matrix file with MeCab\'s matrix.def format')
parser_bd.add_argument("in_files", metavar="file", nargs=argparse.ONE_OR_MORE,
help='source files with CSV format (one of more)')
parser_bd.set_defaults(handler=_command_build,
print_usage=parser_bd.print_usage)
# build user-dictionary parser
parser_ubd = subparsers.add_parser(
'ubuild', help='see `ubuild -h`', description='Build User Dictionary')
parser_ubd.add_argument('-d', dest='description', default='', metavar='string', required=False,
help='description comment to be embedded on dictionary')
parser_ubd.add_argument('-o', dest='out_file', metavar='file', default='user.dic',
help='output file (default: user.dic)')
parser_ubd.add_argument('-s', dest='system_dic', metavar='file', required=False,
help='system dictionary path (default: system core dictionary path)')
parser_ubd.add_argument("in_files", metavar="file", nargs=argparse.ONE_OR_MORE,
help='source files with CSV format (one or more)')
parser_ubd.set_defaults(handler=_command_user_build,
print_usage=parser_ubd.print_usage)
parser.set_default_subparser('tokenize')
args = parser.parse_args()
if hasattr(args, 'handler'):
args.handler(args, args.print_usage)
else:
parser.print_help()
if __name__ == '__main__':
main()
| 38.807692 | 115 | 0.622646 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,400 | 0.297324 |
ea891fd6d6e8d2f7b5c477a52da9998bb0a91863 | 4,487 | py | Python | behave/formatter/json.py | stackedsax/behave | 0cdd1d7e0ebcb43e08d7c5d0b25f62d7ffbfeb5f | [
"BSD-2-Clause"
] | null | null | null | behave/formatter/json.py | stackedsax/behave | 0cdd1d7e0ebcb43e08d7c5d0b25f62d7ffbfeb5f | [
"BSD-2-Clause"
] | null | null | null | behave/formatter/json.py | stackedsax/behave | 0cdd1d7e0ebcb43e08d7c5d0b25f62d7ffbfeb5f | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import base64
try:
import json
except ImportError:
import simplejson as json
from behave.formatter.base import Formatter
class JSONFormatter(Formatter):
name = 'json'
description = 'JSON dump of test run'
dumps_kwargs = {}
def __init__(self, stream, config):
super(JSONFormatter, self).__init__(stream, config)
self._gherkin_object = None
self._step_index = 0
self._features = []
def uri(self, uri):
pass
def feature(self, feature):
self._gherkin_object = {
'keyword': feature.keyword,
'tags': list(feature.tags),
'description': feature.description,
'location': feature.location,
}
def background(self, background):
self._add_feature_element({
'keyword': background.keyword,
'location': background.location,
'steps': [],
})
self._step_index = 0
# -- ADD BACKGROUND STEPS: Support *.feature file regeneration.
for step_ in background.steps:
self.step(step_)
def scenario(self, scenario):
self._add_feature_element({
'keyword': scenario.keyword,
'name': scenario.name,
'tags': scenario.tags,
'location': scenario.location,
'steps': [],
})
self._step_index = 0
def scenario_outline(self, scenario_outline):
self._add_feature_element({
'keyword': scenario_outline.keyword,
'name': scenario_outline.name,
'tags': scenario_outline.tags,
'location': scenario_outline.location,
'steps': [],
'examples': [],
})
self._step_index = 0
@classmethod
def make_table(cls, table):
table_data = {
'headings': table.headings,
'rows': [ list(row) for row in table.rows ]
}
return table_data
def examples(self, examples):
e = {
'keyword': examples.keyword,
'name': examples.name,
'location': examples.location,
}
if examples.table:
e['table'] = self.make_table(examples.table)
element = self._feature_element()
element['examples'].append(e)
def step(self, step):
s = {
'keyword': step.keyword,
'step_type': step.step_type,
'name': step.name,
'location': step.location,
}
if step.text:
s['text'] = step.text
if step.table:
s['table'] = self.make_table(step.table)
element = self._feature_element()
element['steps'].append(s)
def match(self, match):
args = []
for argument in match.arguments:
arg = {
'original': argument.original,
'value': argument.value,
}
if argument.name:
arg['name'] = argument.name
args.append(arg)
match = {
'location': match.location,
'arguments': args,
}
steps = self._feature_element()['steps']
steps[self._step_index]['match'] = match
def result(self, result):
steps = self._feature_element()['steps']
steps[self._step_index]['result'] = {
'status': result.status,
'duration': result.duration,
}
self._step_index += 1
def embedding(self, mime_type, data):
step = self._feature_element()['steps'][-1]
step['embeddings'].append({
'mime_type': mime_type,
'data': base64.b64encode(data).replace('\n', ''),
})
def eof(self):
if not self.stream:
return
self._features.append(self._gherkin_object)
def close(self):
obj = {'features': self._features}
self.stream.write(json.dumps(obj, **self.dumps_kwargs))
def _add_feature_element(self, element):
if 'elements' not in self._gherkin_object:
self._gherkin_object['elements'] = []
self._gherkin_object['elements'].append(element)
def _feature_element(self):
return self._gherkin_object['elements'][-1]
class PrettyJSONFormatter(JSONFormatter):
name = 'json-pretty'
description = 'JSON dump of test run (human readable)'
dumps_kwargs = { 'indent': 2, 'sort_keys': True }
| 27.869565 | 71 | 0.556274 | 4,287 | 0.955427 | 0 | 0 | 199 | 0.04435 | 0 | 0 | 634 | 0.141297 |
ea898ea65cb361916f48efa47b1354040802b387 | 54,368 | py | Python | MUNIT/networks.py | NoaBrazilay/DeepLearningProject | 5c44d21069de1fc5fa2687c4121286670be3d773 | [
"MIT"
] | 2 | 2021-09-03T11:44:31.000Z | 2021-09-22T11:51:47.000Z | MUNIT/networks.py | NoaBrazilay/MISSGAN | 5c44d21069de1fc5fa2687c4121286670be3d773 | [
"MIT"
] | null | null | null | MUNIT/networks.py | NoaBrazilay/MISSGAN | 5c44d21069de1fc5fa2687c4121286670be3d773 | [
"MIT"
] | 1 | 2020-10-20T08:06:50.000Z | 2020-10-20T08:06:50.000Z | """
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
from torch import nn
from torch.autograd import Variable
import torch
import torch.nn.functional as F
import utils
import functools
try:
from itertools import izip as zip
except ImportError: # will be 3.x series
pass
from torch.nn import init
##################################################################################
# Discriminator
##################################################################################
# Defines the PatchGAN discriminator with the specified arguments.
class PatchDis(nn.Module):
def __init__(self, input_dim, params):
super(PatchDis, self).__init__()
self.n_layer = params['patch_n_layer']
self.dim = params['dim']
self.norm = params['norm']
self.activ = params['activ']
self.num_scales = params['num_scales']
self.gan_type = params['gan_type']
self.pad_type = params['pad_type']
self.use_sigmoid = not (self.gan_type =='lsgan')
self.input_dim = input_dim
self.cnns = nn.ModuleList()
for _ in range(self.num_scales):
self.cnns.append(self._make_net())
def _make_net(self):
dim = self.dim
kw = 4
padw = 1
sequence = [nn.Conv2d(self.input_dim, self.dim, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)]
for i in range(self.n_layer - 1):
sequence += [
Conv2dBlock(dim, dim * 2, kw, 2, padw, norm=self.norm, activation=self.activ, pad_type='zero')]
dim *= 2
sequence +=[
Conv2dBlock(dim, dim * 2, kw, 1, padw, norm=self.norm, activation=self.activ, pad_type='zero')]
sequence += [nn.Conv2d(dim * 2, 1, kernel_size=kw, stride=1, padding=padw)]
if self.use_sigmoid:
sequence += [nn.Sigmoid()]
sequence = nn.Sequential(*sequence)
return sequence
def forward(self, x):
outputs = []
for model in self.cnns:
outputs.append(model(x))
# x = self.downsample(x)
return outputs
def calc_dis_loss(self, input_fake, input_real):
# calculate the loss to train D
outs0 = self.forward(input_fake)
outs1 = self.forward(input_real)
loss = 0
for it, (out0, out1) in enumerate(zip(outs0, outs1)):
if self.gan_type == 'lsgan':
loss += torch.mean((out0 - 0)**2) + torch.mean((out1 - 1)**2)
elif self.gan_type == 'nsgan':
all0 = Variable(torch.zeros_like(out0.data).cuda(), requires_grad=False)
all1 = Variable(torch.ones_like(out1.data).cuda(), requires_grad=False)
loss += torch.mean(F.binary_cross_entropy(F.sigmoid(out0), all0) +
F.binary_cross_entropy(F.sigmoid(out1), all1))
else:
assert 0, "Unsupported GAN type: {}".format(self.gan_type)
return loss
def calc_gen_loss(self, input_fake):
# calculate the loss to train G
outs0 = self.forward(input_fake)
loss = 0
for it, (out0) in enumerate(outs0):
if self.gan_type == 'lsgan':
loss += torch.mean((out0 - 1)**2) # LSGAN
elif self.gan_type == 'nsgan':
all1 = Variable(torch.ones_like(out0.data).cuda(), requires_grad=False)
loss += torch.mean(F.binary_cross_entropy(F.sigmoid(out0), all1))
else:
assert 0, "Unsupported GAN type: {}".format(self.gan_type)
return loss
class MsImageDis(nn.Module):
# Multi-scale discriminator architecture
def __init__(self, input_dim, params):
super(MsImageDis, self).__init__()
self.n_layer = params['n_layer']
self.gan_type = params['gan_type']
self.dim = params['dim']
self.norm = params['norm']
self.activ = params['activ']
self.num_scales = params['num_scales']
self.pad_type = params['pad_type']
self.input_dim = input_dim
self.downsample = nn.AvgPool2d(3, stride=2, padding=[1, 1], count_include_pad=False)
self.cnns = nn.ModuleList()
for _ in range(self.num_scales):
self.cnns.append(self._make_net())
def _make_net(self):
dim = self.dim
cnn_x = []
cnn_x += [Conv2dBlock(self.input_dim, dim, 4, 2, 1, norm='none', activation=self.activ, pad_type=self.pad_type)]
for i in range(self.n_layer - 1):
cnn_x += [Conv2dBlock(dim, dim * 2, 4, 2, 1, norm=self.norm, activation=self.activ, pad_type=self.pad_type)]
dim *= 2
cnn_x += [nn.Conv2d(dim, 1, 1, 1, 0)]
cnn_x = nn.Sequential(*cnn_x)
return cnn_x
def forward(self, x):
outputs = []
for model in self.cnns:
outputs.append(model(x))
x = self.downsample(x)
return outputs
def calc_dis_loss(self, input_fake, input_real):
# calculate the loss to train D
outs0 = self.forward(input_fake)
outs1 = self.forward(input_real)
loss = 0
for it, (out0, out1) in enumerate(zip(outs0, outs1)):
if self.gan_type == 'lsgan':
loss += torch.mean((out0 - 0)**2) + torch.mean((out1 - 1)**2)
elif self.gan_type == 'nsgan':
all0 = Variable(torch.zeros_like(out0.data).cuda(), requires_grad=False)
all1 = Variable(torch.ones_like(out1.data).cuda(), requires_grad=False)
loss += torch.mean(F.binary_cross_entropy(F.sigmoid(out0), all0) +
F.binary_cross_entropy(F.sigmoid(out1), all1))
else:
assert 0, "Unsupported GAN type: {}".format(self.gan_type)
return loss
def calc_gen_loss(self, input_fake):
# calculate the loss to train G
outs0 = self.forward(input_fake)
loss = 0
for it, (out0) in enumerate(outs0):
if self.gan_type == 'lsgan':
loss += torch.mean((out0 - 1)**2) # LSGAN
elif self.gan_type == 'nsgan':
all1 = Variable(torch.ones_like(out0.data).cuda(), requires_grad=False)
loss += torch.mean(F.binary_cross_entropy(F.sigmoid(out0), all1))
else:
assert 0, "Unsupported GAN type: {}".format(self.gan_type)
return loss
##################################################################################
# Generator
##################################################################################
class AdaINGen(nn.Module):
# AdaIN auto-encoder architecture
def __init__(self, input_dim, params):
super(AdaINGen, self).__init__()
dim = params['dim']
style_dim = params['style_dim']
n_downsample = params['n_downsample']
n_res = params['n_res']
activ = params['activ']
pad_type = params['pad_type']
mlp_dim = params['mlp_dim']
mpl_n_blk = params['mlp_n_blk']
# style encoder
self.enc_style = StyleEncoder(4, input_dim, dim, style_dim, norm='none', activ=activ, pad_type=pad_type)
# content encoder
self.enc_content = ContentEncoder(n_downsample, n_res, input_dim, dim, 'in', activ, pad_type=pad_type)
self.dec = Decoder(n_downsample, n_res, self.enc_content.output_dim, input_dim, res_norm='adain', activ=activ, pad_type=pad_type)
# MLP to generate AdaIN parameters
self.mlp = MLP(style_dim, self.get_num_adain_params(self.dec), mlp_dim, mpl_n_blk, norm='none', activ=activ)
def forward(self, images):
# reconstruct an image
content, style_fake = self.encode(images)
images_recon = self.decode(content, style_fake)
return images_recon
def encode(self, images):
# encode an image to its content and style codes
style_fake = self.enc_style(images)
content = self.enc_content(images)
return content, style_fake
def decode(self, content, style):
# decode content and style codes to an image
adain_params = self.mlp(style)
self.assign_adain_params(adain_params, self.dec)
images = self.dec(content)
return images
def assign_adain_params(self, adain_params, model):
# assign the adain_params to the AdaIN layers in model
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
mean = adain_params[:, :m.num_features]
std = adain_params[:, m.num_features:2*m.num_features]
m.bias = mean.contiguous().view(-1)
m.weight = std.contiguous().view(-1)
if adain_params.size(1) > 2*m.num_features:
adain_params = adain_params[:, 2*m.num_features:]
def get_num_adain_params(self, model):
# return the number of AdaIN parameters needed by the model
num_adain_params = 0
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
num_adain_params += 2*m.num_features
return num_adain_params
class AdaINGanilla(nn.Module):
# ------------ ADAIN WITH GANILLA GENRATOR ------------ #
# AdaIN Ganilla Generator auto-encoder architecture
# Generator architector taken from:
# https://github.com/giddyyupp/ganilla
# ------------------------------------------------------- #
def __init__(self, input_dim, params):
super(AdaINGanilla, self).__init__()
dim = params['dim']
style_dim = params['style_dim']
n_downsample = params['n_downsample']
n_res = params['n_res']
activ = params['activ']
pad_type = params['pad_type']
mlp_dim = params['mlp_dim']
ganilla_ngf = params['ganilla_ngf']
ganilla_block_nf = params['ganilla_block_nf']
ganilla_layer_nb = params['ganilla_layer_nb']
use_dropout = params['use_dropout']
output_dim = params['output_dim']
use_style_enc_simple = params['use_style_enc_simple']
# Ganilla Style Encoder
if use_style_enc_simple:
self.enc_style = StyleEncoder(4, input_dim, dim, style_dim, norm='none', activ=activ, pad_type=pad_type)
else:
self.enc_style = GanillaStyleEncoder(input_dim, style_dim, ganilla_ngf, ganilla_block_nf, ganilla_layer_nb,
use_dropout, norm = 'none', pad_type =pad_type)
# Ganilla Content Encoder
self.enc_content = GanillaContentEncoder(input_dim, ganilla_ngf, ganilla_block_nf, ganilla_layer_nb,
use_dropout, norm = 'in', pad_type =pad_type)
sk_sizes = [self.enc_content.layer1[ganilla_layer_nb[0] - 1].conv2.out_channels,
self.enc_content.layer2[ganilla_layer_nb[1] - 1].conv2.out_channels,
self.enc_content.layer3[ganilla_layer_nb[2] - 1].conv2.out_channels,
self.enc_content.layer4[ganilla_layer_nb[3] - 1].conv2.out_channels]
self.dec = GanillaDecoder(output_dim, *sk_sizes, res_norm='adain', activ=activ, pad_type=pad_type)
#self.dec = GanillaDecoder2(n_res,output_dim, *sk_sizes, res_norm='adain', activ=activ, pad_type=pad_type)
# MLP to generate AdaIN parameters
self.mlp = MLP(style_dim, self.get_num_adain_params(self.dec), mlp_dim, 3, norm='none', activ=activ)
# input_dim, output_dim, dim, n_blk, norm = 'none', activ = 'relu'
def forward(self, images):
# reconstruct an image
content, style_fake = self.encode(images)
images_recon = self.decode(content, style_fake)
return images_recon
def encode(self, images):
# encode an image to its content and style codes
style_fake = self.enc_style(images)
content = self.enc_content(images)
return content, style_fake
def decode(self, content, style):
# decode content and style codes to an image
adain_params = self.mlp(style)
self.assign_adain_params(adain_params, self.dec)
images = self.dec(content)
return images
def assign_adain_params(self, adain_params, model):
# assign the adain_params to the AdaIN layers in model
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
mean = adain_params[:, :m.num_features]
std = adain_params[:, m.num_features:2*m.num_features]
m.bias = mean.contiguous().view(-1)
m.weight = std.contiguous().view(-1)
if adain_params.size(1) > 2*m.num_features:
adain_params = adain_params[:, 2*m.num_features:]
def get_num_adain_params(self, model):
# return the number of AdaIN parameters needed by the model
num_adain_params = 0
for m in model.modules():
if m.__class__.__name__ == "AdaptiveInstanceNorm2d":
num_adain_params += 2*m.num_features
return num_adain_params
class VAEGen(nn.Module):
# VAE architecture
def __init__(self, input_dim, params):
super(VAEGen, self).__init__()
dim = params['dim']
n_downsample = params['n_downsample']
n_res = params['n_res']
activ = params['activ']
pad_type = params['pad_type']
# content encoder
self.enc = ContentEncoder(n_downsample, n_res, input_dim, dim, 'in', activ, pad_type=pad_type)
self.dec = Decoder(n_downsample, n_res, self.enc.output_dim, input_dim, res_norm='in', activ=activ, pad_type=pad_type)
def forward(self, images):
# This is a reduced VAE implementation where we assume the outputs are multivariate Gaussian distribution with mean = hiddens and std_dev = all ones.
hiddens = self.encode(images)
if self.training == True:
noise = Variable(torch.randn(hiddens.size()).cuda(hiddens.data.get_device()))
images_recon = self.decode(hiddens + noise)
else:
images_recon = self.decode(hiddens)
return images_recon, hiddens
def encode(self, images):
hiddens = self.enc(images)
noise = Variable(torch.randn(hiddens.size()).cuda(hiddens.data.get_device()))
return hiddens, noise
def decode(self, hiddens):
images = self.dec(hiddens)
return images
##################################################################################
# Encoder and Decoders
##################################################################################
class StyleEncoder(nn.Module):
def __init__(self, n_downsample, input_dim, dim, style_dim, norm, activ, pad_type):
super(StyleEncoder, self).__init__()
self.model = []
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type)]
for i in range(2):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)]
dim *= 2
for i in range(n_downsample - 2):
self.model += [Conv2dBlock(dim, dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)]
self.model += [nn.AdaptiveAvgPool2d(1)] # global average pooling
self.model += [nn.Conv2d(dim, style_dim, 1, 1, 0)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x):
return self.model(x)
class GanillaStyleEncoder(nn.Module):
def __init__(self, input_dim, style_dim, ganilla_ngf, ganilla_block_nf, ganilla_layer_nb, use_dropout, norm, pad_type):
super(GanillaStyleEncoder, self).__init__()
self.layer0 = FirstBlock_Ganilla(input_dim, ganilla_ngf, norm=norm, pad_type=pad_type)
# residuals
self.layer1 = self._make_layer_ganilla(BasicBlock_Ganilla, ganilla_ngf, ganilla_block_nf[0], ganilla_layer_nb[0],
use_dropout, norm, stride=1)
self.layer2 = self._make_layer_ganilla(BasicBlock_Ganilla, ganilla_block_nf[0], ganilla_block_nf[1],
ganilla_layer_nb[1], use_dropout, norm, stride=2)
self.layer3 = self._make_layer_ganilla(BasicBlock_Ganilla, ganilla_block_nf[1], ganilla_block_nf[2],
ganilla_layer_nb[2], use_dropout, norm, stride=2)
self.layer4 = self._make_layer_ganilla(BasicBlock_Ganilla, ganilla_block_nf[2], ganilla_block_nf[3],
ganilla_layer_nb[3], use_dropout, norm, stride=2)
self.pool_layer = nn.AdaptiveAvgPool2d(1) # global average pooling
self.fc_style = nn.Conv2d(ganilla_block_nf[3], style_dim, 1, 1, 0)
def _make_layer_ganilla(self, block, inplanes, planes, blocks, use_dropout, norm, stride=1):
strides = [stride] + [1] * (blocks - 1)
layers = []
for stride in strides:
layers.append(block(inplanes, planes, use_dropout, stride=stride, norm = norm, pad_type = 'reflect'))
inplanes = planes # * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
# Ganilla Encoder
x = self.layer0(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
# Global Pooling & FC -> Style Code
x = self.pool_layer(x4)
x = self.fc_style(x)
out = x
return out
class GanillaContentEncoder(nn.Module):
def __init__(self, input_dim, ganilla_ngf, ganilla_block_nf, ganilla_layer_nb, use_dropout, norm, pad_type):
super(GanillaContentEncoder, self).__init__()
self.layer0 = FirstBlock_Ganilla(input_dim, ganilla_ngf, norm=norm, pad_type=pad_type)
# residuals
self.layer1 = self._make_layer_ganilla(BasicBlock_Ganilla, ganilla_ngf, ganilla_block_nf[0], ganilla_layer_nb[0],
use_dropout, norm, stride=1)
self.layer2 = self._make_layer_ganilla(BasicBlock_Ganilla, ganilla_block_nf[0], ganilla_block_nf[1],
ganilla_layer_nb[1], use_dropout, norm, stride=2)
self.layer3 = self._make_layer_ganilla(BasicBlock_Ganilla, ganilla_block_nf[1], ganilla_block_nf[2],
ganilla_layer_nb[2], use_dropout, norm, stride=2)
self.layer4 = self._make_layer_ganilla(BasicBlock_Ganilla, ganilla_block_nf[2], ganilla_block_nf[3],
ganilla_layer_nb[3], use_dropout, norm, stride=2)
def _make_layer_ganilla(self, block, inplanes, planes, blocks, use_dropout, norm, stride=1):
strides = [stride] + [1] * (blocks - 1)
layers = []
for stride in strides:
layers.append(block(inplanes, planes, use_dropout, stride=stride, norm = norm, pad_type = 'reflect'))
inplanes = planes # * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
# Ganilla Encoder
x = self.layer0(x)
x1 = self.layer1(x)
x2 = self.layer2(x1)
x3 = self.layer3(x2)
x4 = self.layer4(x3)
out = x4
return [x1, x2, x3, out]
class ContentEncoder(nn.Module):
def __init__(self, n_downsample, n_res, input_dim, dim, norm, activ, pad_type):
super(ContentEncoder, self).__init__()
self.model = []
# Conv2dBlock parameters- input_dim, output_dim, kernel_size, stride, padding = 0, norm = 'none', activation = 'relu', pad_type = 'zero')
self.model += [Conv2dBlock(input_dim, dim, 7, 1, 3, norm=norm, activation=activ, pad_type=pad_type)]
# downsampling blocks
for i in range(n_downsample):
self.model += [Conv2dBlock(dim, 2 * dim, 4, 2, 1, norm=norm, activation=activ, pad_type=pad_type)]
dim *= 2
# residual blocks
self.model += [ResBlocks(n_res, dim, norm=norm, activation=activ, pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
self.output_dim = dim
def forward(self, x):
return self.model(x)
class GanillaDecoder(nn.Module):
def __init__(self, output_dim, C2_size, C3_size, C4_size, C5_size, res_norm='none', activ='lrelu', pad_type='reflect', feature_size=128):
super(GanillaDecoder, self).__init__()
# upsample C5 to get P5 from the FPN paper
kw_adain = 3
pdw_adain=1
self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P5_2 = Conv2dBlock(feature_size, feature_size, kw_adain, 1, pdw_adain, norm=res_norm, activation=activ,
pad_type=pad_type)
# add P5 elementwise to C4
self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P4_2 = Conv2dBlock(feature_size, feature_size, kw_adain, 1, pdw_adain, norm=res_norm, activation=activ,
pad_type=pad_type)
# add P4 elementwise to C3
self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P3_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P3_2 = Conv2dBlock(feature_size, feature_size, kw_adain, 1, pdw_adain, norm=res_norm, activation=activ,
pad_type=pad_type)
self.P2_1 = nn.Conv2d(C2_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P2_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.rp4 = nn.ReflectionPad2d(1)
self.P2_2 = nn.Conv2d(int(feature_size), int(feature_size / 2), kernel_size=3, stride=1, padding=0)
self.final= Conv2dBlock(int(feature_size / 2), output_dim, 7, 1, padding=output_dim, norm='none', activation='tanh',
pad_type=pad_type)
def forward(self, inputs):
C2, C3, C4, C5 = inputs
i = 0
P5_x = self.P5_1(C5)
P5_upsampled_x = self.P5_upsampled(P5_x)
P5_adain_x = self.P5_2(P5_upsampled_x)
i += 1
P4_x = self.P4_1(C4)
P4_x = P5_adain_x + P4_x
P4_upsampled_x = self.P4_upsampled(P4_x)
P4_adain_x = self.P4_2(P4_upsampled_x)
i += 1
P3_x = self.P3_1(C3)
P3_x = P3_x + P4_adain_x
P3_upsampled_x = self.P3_upsampled(P3_x)
P3_adain_x = self.P3_2(P3_upsampled_x)
i += 1
P2_x = self.P2_1(C2)
P2_x = P2_x + P3_adain_x
P2_upsampled_x = self.P2_upsampled(P2_x)
P2_x = self.rp4(P2_upsampled_x)
P2_x = self.P2_2(P2_x)
out = self.final(P2_x)
return out
class GanillaDecoder2(nn.Module):
def __init__(self, n_res, output_dim, C2_size, C3_size, C4_size, C5_size, res_norm='none', activ='lrelu', pad_type='reflect', feature_size=128):
super(GanillaDecoder2, self).__init__()
# upsample C5 to get P5 from the FPN paper
kw_adain = 3
pdw_adain=1
self.res_block_model = []
# AdaIN residual blocks
self.res_block_model += [ResBlocks(n_res, C5_size, res_norm, activ, pad_type=pad_type)]
self.res_block_model = nn.Sequential(*self.res_block_model)
self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P5_2 = Conv2dBlock(feature_size, feature_size, kw_adain, 1, pdw_adain, norm='ln', activation=activ,
pad_type=pad_type)
# add P5 elementwise to C4
self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P4_2 = Conv2dBlock(feature_size, feature_size, kw_adain, 1, pdw_adain, norm='ln', activation=activ,
pad_type=pad_type)
# add P4 elementwise to C3
self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P3_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.P3_2 = Conv2dBlock(feature_size, feature_size, kw_adain, 1, pdw_adain, norm='ln', activation=activ,
pad_type=pad_type)
self.P2_1 = nn.Conv2d(C2_size, feature_size, kernel_size=1, stride=1, padding=0)
self.P2_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
self.rp4 = nn.ReflectionPad2d(1)
self.P2_2 = nn.Conv2d(int(feature_size), int(feature_size / 2), kernel_size=3, stride=1, padding=0)
self.final= Conv2dBlock(int(feature_size / 2), output_dim, 7, 1, padding=output_dim, norm='none', activation='tanh',
pad_type=pad_type)
def forward(self, inputs):
C2, C3, C4, C5 = inputs
res_x = self.res_block_model(C5)
i = 0
P5_x = self.P5_1(res_x)
P5_upsampled_x = self.P5_upsampled(P5_x)
#P5_adain_x = self.P5_2(P5_upsampled_x)
i += 1
P4_x = self.P4_1(C4)
P4_x = P5_upsampled_x + P4_x
P4_upsampled_x = self.P4_upsampled(P4_x)
#P4_adain_x = self.P4_2(P4_upsampled_x)
i += 1
P3_x = self.P3_1(C3)
P3_x = P3_x + P4_upsampled_x
P3_upsampled_x = self.P3_upsampled(P3_x)
#P3_adain_x = self.P3_2(P3_upsampled_x)
i += 1
P2_x = self.P2_1(C2)
P2_x = P2_x + P3_upsampled_x
P2_upsampled_x = self.P2_upsampled(P2_x)
P2_x = self.rp4(P2_upsampled_x)
P2_x = self.P2_2(P2_x)
out = self.final(P2_x)
return out
class Decoder(nn.Module):
def __init__(self, n_upsample, n_res, dim, output_dim, res_norm='adain', activ='relu', pad_type='zero'):
super(Decoder, self).__init__()
self.model = []
# AdaIN residual blocks
self.model += [ResBlocks(n_res, dim, res_norm, activ, pad_type=pad_type)]
# upsampling blocks
for i in range(n_upsample):
self.model += [nn.Upsample(scale_factor=2),
Conv2dBlock(dim, dim // 2, 5, 1, 2, norm='ln', activation=activ, pad_type=pad_type)]
dim //= 2
# use reflection padding in the last conv layer
self.model += [Conv2dBlock(dim, output_dim, 7, 1, 3, norm='none', activation='tanh', pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
##################################################################################
# Sequential Models
##################################################################################
class ResBlocks(nn.Module):
def __init__(self, num_blocks, dim, norm='in', activation='relu', pad_type='zero'):
super(ResBlocks, self).__init__()
self.model = []
for i in range(num_blocks):
self.model += [ResBlock(dim, norm=norm, activation=activation, pad_type=pad_type)]
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x)
class MLP(nn.Module):
def __init__(self, input_dim, output_dim, dim, n_blk, norm='none', activ='relu'):
super(MLP, self).__init__()
self.model = []
self.model += [LinearBlock(input_dim, dim, norm=norm, activation=activ)]
for i in range(n_blk - 2):
self.model += [LinearBlock(dim, dim, norm=norm, activation=activ)]
self.model += [LinearBlock(dim, output_dim, norm='none', activation='none')] # no output activations
self.model = nn.Sequential(*self.model)
def forward(self, x):
return self.model(x.view(x.size(0), -1))
##################################################################################
# Basic Blocks
##################################################################################
class ResBlock(nn.Module):
def __init__(self, dim, norm='in', activation='relu', pad_type='zero'):
super(ResBlock, self).__init__()
model = []
model += [Conv2dBlock(dim ,dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type)]
model += [Conv2dBlock(dim ,dim, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type)]
self.model = nn.Sequential(*model)
def forward(self, x):
residual = x
out = self.model(x)
out += residual
return out
class BasicBlock_Ganilla(nn.Module):
# inputs should be input_dim, output_dim, kernel_size, stride, padding = 0, norm = 'none', activation = 'relu', pad_type = 'zero'
def __init__(self, input_dim, output_dim, use_dropout, kernel_size=3, stride=1, padding = 1, norm = 'none', pad_type='reflect'):
super(BasicBlock_Ganilla, self).__init__()
self.expansion = 1
# initialize padding
if pad_type == 'reflect':
# Pads the input tensor using the reflection of the input boundary
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = output_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
# self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True)
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'none' or norm == 'sn':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
self.rp1 = self.pad
self.conv1 = nn.Conv2d(input_dim, output_dim, kernel_size=kernel_size, stride=stride, padding=0, bias=False)
self.bn1 = self.norm
self.use_dropout = use_dropout
if use_dropout:
self.dropout = nn.Dropout(0.5)
self.rp2 = self.pad
self.conv2 = nn.Conv2d(output_dim, output_dim, kernel_size=kernel_size, stride=1, padding=0, bias=False)
self.bn2 = self.norm
self.out_planes = output_dim
self.shortcut = nn.Sequential()
if stride != 1 or input_dim != output_dim:
# self.shortcut = nn.Sequential(
# nn.Conv2d(input_dim, output_dim, kernel_size=1, stride=stride, bias=False),
# self.norm
# )
shortcut_layers = [nn.Conv2d(input_dim, output_dim, kernel_size=1, stride=stride, bias=False)]
if self.norm:
shortcut_layers += [self.norm]
self.shortcut = nn.Sequential(*shortcut_layers)
# self.final_conv = nn.Sequential(
# self.pad,
# nn.Conv2d(self.expansion * output_dim * 2, self.expansion * output_dim, kernel_size=3, stride=1,
# padding=0, bias=False),
# self.norm
# )
final_conv_layers = [self.pad]
final_conv_layers += [nn.Conv2d(self.expansion * output_dim * 2, self.expansion * output_dim, kernel_size=3, stride=1,
padding=0, bias=False)]
if self.norm:
final_conv_layers += [self.norm]
self.final_conv = nn.Sequential(*final_conv_layers)
else:
# self.final_conv = nn.Sequential(
# self.pad,
# nn.Conv2d(output_dim * 2, output_dim, kernel_size=3, stride=1, padding=0, bias=False),
# self.norm
# )
final_conv_layers = [self.pad]
final_conv_layers += [nn.Conv2d(output_dim * 2, output_dim, kernel_size=3, stride=1, padding=0, bias=False)]
if self.norm:
final_conv_layers += [self.norm]
self.final_conv = nn.Sequential(*final_conv_layers)
def forward(self, x):
out = self.conv1(self.rp1(x))
if self.norm:
out = self.norm(out)
out = F.relu(out)
if self.use_dropout:
out = self.dropout(out)
out = self.conv2(self.rp2(out))
if self.norm:
out = self.norm(out)
inputt = self.shortcut(x)
catted = torch.cat((out, inputt), 1)
out = self.final_conv(catted)
out = F.relu(out)
return out
class FirstBlock_Ganilla(nn.Module):
# input_dim = input_nc, output_dim = ngf (number generator filters in the first layer)
def __init__(self, input_dim, output_dim, padding=1, norm='none', pad_type='reflect'):
super(FirstBlock_Ganilla, self).__init__()
self.expansion = 1
# initialize padding
if pad_type == 'reflect':
# Pads the input tensor using the reflection of the input boundary
self.pad = nn.ReflectionPad2d
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = output_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
# self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True)
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'none' or norm == 'sn':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
self.pad1 = self.pad(input_dim)
self.conv1 = nn.Conv2d(input_dim, output_dim, kernel_size=7, stride=1, padding=0, bias=True)
# self.norm = nn.InstanceNorm2d(output_dim)
self.relu = nn.ReLU(inplace=True)
self.pad2 = self.pad(padding)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0)
def forward(self, x):
x = self.pad1(x)
x = self.conv1(x)
if self.norm:
x = self.norm(x)
x = self.relu(x)
x = self.pad2(x)
out = self.maxpool(x)
return out
class Conv2dBlock(nn.Module):
def __init__(self, input_dim ,output_dim, kernel_size, stride,
padding=0, norm='none', activation='relu', pad_type='zero'):
super(Conv2dBlock, self).__init__()
self.use_bias = True
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'replicate':
self.pad = nn.ReplicationPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
# initialize normalization
norm_dim = output_dim
if norm == 'bn':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'in':
#self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=True)
self.norm = nn.InstanceNorm2d(norm_dim)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'adain':
self.norm = AdaptiveInstanceNorm2d(norm_dim)
elif norm == 'none' or norm == 'sn':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
# initialize convolution
if norm == 'sn':
self.conv = SpectralNorm(nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias))
else:
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)
def forward(self, x):
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class LinearBlock(nn.Module):
def __init__(self, input_dim, output_dim, norm='none', activation='relu'):
super(LinearBlock, self).__init__()
use_bias = True
# initialize fully connected layer
if norm == 'sn':
self.fc = SpectralNorm(nn.Linear(input_dim, output_dim, bias=use_bias))
else:
self.fc = nn.Linear(input_dim, output_dim, bias=use_bias)
# initialize normalization
norm_dim = output_dim
if norm == 'bn':
self.norm = nn.BatchNorm1d(norm_dim)
elif norm == 'in':
self.norm = nn.InstanceNorm1d(norm_dim)
elif norm == 'ln':
self.norm = LayerNorm(norm_dim)
elif norm == 'none' or norm == 'sn':
self.norm = None
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
def forward(self, x):
out = self.fc(x)
if self.norm:
out = self.norm(out)
if self.activation:
out = self.activation(out)
return out
class BigGUnetDecoder(nn.Module):
def __init__(self, D_init='ortho', output_dim=1, D_activation=nn.ReLU(inplace=False), D_wide=True, SN_eps=1e-12):
# Network Architecture
ch = 64
self.resolution = [128, 64, 32, 16, 8, 4]
self.out_channels = [item * ch for item in [1, 2, 4, 8, 16, 16]]
self.in_channels = [3] + [ch*item for item in [1, 2, 4, 8, 16]]
self.which_conv = functools.partial(SNConv2d,
kernel_size=3, padding=1,
num_svs=1, num_itrs=1,
eps=self.SN_eps)
self.which_linear = functools.partial(SNLinear,
num_svs=1, num_itrs=1,
eps=self.SN_eps)
self.which_embedding = functools.partial(SNEmbedding,
num_svs=1, num_itrs=1,
eps=self.SN_eps)
# Epsilon for Spectral Norm?
self.SN_eps = SN_eps
# Use Wide D as in BigGAN
self.D_wide = D_wide
# Activation
self.activation = D_activation
self.blocks = []
for index in range(len(self.out_channels)):
self.blocks += [[BigGDBlock(in_channels=self.in_channels[index],
out_channels=self.out_channels[index],
which_conv=self.which_conv,
wide=self.D_wide,
activation=self.activation,
preactivation=(index > 0),
downsample=(nn.AvgPool2d(2) if self.arch['downsample'][index] else None))]]
# Turn self.blocks into a ModuleList so that it's all properly registered.
self.blocks = nn.ModuleList([nn.ModuleList(block) for block in self.blocks])
# larger if we're e.g. turning this into a VAE with an inference output
self.linear = self.which_linear(self.arch['out_channels'][-1], output_dim)
# Embedding for projection discrimination
# self.embed = self.which_embedding(self.n_classes, self.arch['out_channels'][-1])
self.init_weights()
# self.optim = optim.Adam(params=self.parameters(), lr=self.lr,
# betas=(self.B1, self.B2), weight_decay=0, eps=self.adam_eps)
# Initialize
def init_weights(self):
self.param_count = 0
for module in self.modules():
if (isinstance(module, nn.Conv2d)
or isinstance(module, nn.Linear)
or isinstance(module, nn.Embedding)):
if self.init == 'ortho':
init.orthogonal_(module.weight)
elif self.init == 'N02':
init.normal_(module.weight, 0, 0.02)
elif self.init in ['glorot', 'xavier']:
init.xavier_uniform_(module.weight)
else:
print('Init style not recognized...')
self.param_count += sum([p.data.nelement() for p in module.parameters()])
print('Param count for G''s initialized parameters: %d' % self.param_count)
def forward(self, x, y=None):
# Stick x into h for cleaner for loops without flow control
h = x
# Loop over blocks
for index, blocklist in enumerate(self.blocks):
for block in blocklist:
h = block(h)
# Apply global sum pooling as in SN-GAN
h = torch.sum(self.activation(h), [2, 3])
# Get initial class-unconditional output
out = self.linear(h)
# Get projection of final featureset onto class vectors and add to evidence
out = out + torch.sum(self.embed(y) * h, 1, keepdim=True)
return out
# Residual block for the discriminator
class BigGDBlock(nn.Module):
def __init__(self, in_channels, out_channels, wide=True,
preactivation=False, activation=None, downsample=None, ):
super(BigGDBlock, self).__init__()
self.in_channels, self.out_channels = in_channels, out_channels
# If using wide D (as in SA-GAN and BigGAN), change the channel pattern
self.hidden_channels = self.out_channels if wide else self.in_channels
self.which_conv = SNConv2d
self.preactivation = preactivation
self.activation = activation
self.downsample = downsample
# Conv layers
self.conv1 = self.which_conv(self.in_channels, self.hidden_channels)
self.conv2 = self.which_conv(self.hidden_channels, self.out_channels)
self.learnable_sc = True if (in_channels != out_channels) or downsample else False
if self.learnable_sc:
self.conv_sc = self.which_conv(in_channels, out_channels,
kernel_size=1, padding=0)
def shortcut(self, x):
if self.preactivation:
if self.learnable_sc:
x = self.conv_sc(x)
if self.downsample:
x = self.downsample(x)
else:
if self.downsample:
x = self.downsample(x)
if self.learnable_sc:
x = self.conv_sc(x)
return x
def forward(self, x):
if self.preactivation:
# h = self.activation(x) # NOT TODAY SATAN
# Andy's note: This line *must* be an out-of-place ReLU or it
# will negatively affect the shortcut connection.
h = F.relu(x)
else:
h = x
h = self.conv1(h)
h = self.conv2(self.activation(h))
if self.downsample:
h = self.downsample(h)
return h + self.shortcut(x)
##################################################################################
# VGG network definition
##################################################################################
class Vgg16(nn.Module):
def __init__(self):
super(Vgg16, self).__init__()
self.conv1_1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1)
self.conv1_2 = nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1)
self.conv2_1 = nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1)
self.conv2_2 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.conv3_1 = nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1)
self.conv3_2 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.conv3_3 = nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1)
self.conv4_1 = nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1)
self.conv4_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv4_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_1 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_2 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
self.conv5_3 = nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1)
def forward(self, X):
h = F.relu(self.conv1_1(X), inplace=True)
h = F.relu(self.conv1_2(h), inplace=True)
# relu1_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv2_1(h), inplace=True)
h = F.relu(self.conv2_2(h), inplace=True)
# relu2_2 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv3_1(h), inplace=True)
h = F.relu(self.conv3_2(h), inplace=True)
h = F.relu(self.conv3_3(h), inplace=True)
# relu3_3 = h
h = F.max_pool2d(h, kernel_size=2, stride=2)
h = F.relu(self.conv4_1(h), inplace=True)
h = F.relu(self.conv4_2(h), inplace=True)
h = F.relu(self.conv4_3(h), inplace=True)
# relu4_3 = h
h = F.relu(self.conv5_1(h), inplace=True)
h = F.relu(self.conv5_2(h), inplace=True)
h = F.relu(self.conv5_3(h), inplace=True)
relu5_3 = h
return relu5_3
# return [relu1_2, relu2_2, relu3_3, relu4_3]
##################################################################################
# Normalization layers
##################################################################################
class AdaptiveInstanceNorm2d(nn.Module):
def __init__(self, num_features, eps=1e-5, momentum=0.1):
super(AdaptiveInstanceNorm2d, self).__init__()
self.num_features = num_features
self.eps = eps
self.momentum = momentum
# weight and bias are dynamically assigned
self.weight = None
self.bias = None
# just dummy buffers, not used
self.register_buffer('running_mean', torch.zeros(num_features))
self.register_buffer('running_var', torch.ones(num_features))
def forward(self, x):
assert self.weight is not None and self.bias is not None, "Please assign weight and bias before calling AdaIN!"
b, c = x.size(0), x.size(1)
running_mean = self.running_mean.repeat(b)
running_var = self.running_var.repeat(b)
# Apply instance norm
x_reshaped = x.contiguous().view(1, b * c, *x.size()[2:])
out = F.batch_norm(
x_reshaped, running_mean, running_var, self.weight, self.bias,
True, self.momentum, self.eps)
return out.view(b, c, *x.size()[2:])
def __repr__(self):
return self.__class__.__name__ + '(' + str(self.num_features) + ')'
class LayerNorm(nn.Module):
def __init__(self, num_features, eps=1e-5, affine=True):
super(LayerNorm, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
# print(x.size())
if x.size(0) == 1:
# These two lines run much faster in pytorch 0.4 than the two lines listed below.
mean = x.view(-1).mean().view(*shape)
std = x.view(-1).std().view(*shape)
else:
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
def l2normalize(v, eps=1e-12):
return v / (v.norm() + eps)
class SpectralNorm(nn.Module):
"""
Based on the paper "Spectral Normalization for Generative Adversarial Networks" by Takeru Miyato, Toshiki Kataoka, Masanori Koyama, Yuichi Yoshida
and the Pytorch implementation https://github.com/christiancosgrove/pytorch-spectral-normalization-gan
"""
def __init__(self, module, name='weight', power_iterations=1):
super(SpectralNorm, self).__init__()
self.module = module
self.name = name
self.power_iterations = power_iterations
if not self._made_params():
self._make_params()
def _update_u_v(self):
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
height = w.data.shape[0]
for _ in range(self.power_iterations):
v.data = l2normalize(torch.mv(torch.t(w.view(height,-1).data), u.data))
u.data = l2normalize(torch.mv(w.view(height,-1).data, v.data))
# sigma = torch.dot(u.data, torch.mv(w.view(height,-1).data, v.data))
sigma = u.dot(w.view(height, -1).mv(v))
setattr(self.module, self.name, w / sigma.expand_as(w))
def _made_params(self):
try:
u = getattr(self.module, self.name + "_u")
v = getattr(self.module, self.name + "_v")
w = getattr(self.module, self.name + "_bar")
return True
except AttributeError:
return False
def _make_params(self):
w = getattr(self.module, self.name)
height = w.data.shape[0]
width = w.view(height, -1).data.shape[1]
u = nn.Parameter(w.data.new(height).normal_(0, 1), requires_grad=False)
v = nn.Parameter(w.data.new(width).normal_(0, 1), requires_grad=False)
u.data = l2normalize(u.data)
v.data = l2normalize(v.data)
w_bar = nn.Parameter(w.data)
del self.module._parameters[self.name]
self.module.register_parameter(self.name + "_u", u)
self.module.register_parameter(self.name + "_v", v)
self.module.register_parameter(self.name + "_bar", w_bar)
def forward(self, *args):
self._update_u_v()
return self.module.forward(*args)
# Spectral normalization base class based on BigGan implementation
class SN(object):
def __init__(self, num_svs, num_itrs, num_outputs, transpose=False, eps=1e-12):
# Number of power iterations per step
self.num_itrs = num_itrs
# Number of singular values
self.num_svs = num_svs
# Transposed?
self.transpose = transpose
# Epsilon value for avoiding divide-by-0
self.eps = eps
# Register a singular vector for each sv
for i in range(self.num_svs):
self.register_buffer('u%d' % i, torch.randn(1, num_outputs))
self.register_buffer('sv%d' % i, torch.ones(1))
# Singular vectors (u side)
@property
def u(self):
return [getattr(self, 'u%d' % i) for i in range(self.num_svs)]
# Singular values;
# note that these buffers are just for logging and are not used in training.
@property
def sv(self):
return [getattr(self, 'sv%d' % i) for i in range(self.num_svs)]
# Compute the spectrally-normalized weight
def W_(self):
W_mat = self.weight.view(self.weight.size(0), -1)
if self.transpose:
W_mat = W_mat.t()
# Apply num_itrs power iterations
for _ in range(self.num_itrs):
svs, us, vs = utils.power_iteration(W_mat, self.u, update=self.training, eps=self.eps)
# Update the svs
if self.training:
with torch.no_grad(): # Make sure to do this in a no_grad() context or you'll get memory leaks!
for i, sv in enumerate(svs):
self.sv[i][:] = sv
return self.weight / svs[0]
##################################################################################
# Convolution layers
##################################################################################
# 2D Conv layer with spectral norm
class SNConv2d(nn.Conv2d, SN):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True,
num_svs=1, num_itrs=1, eps=1e-12):
nn.Conv2d.__init__(self, in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
SN.__init__(self, num_svs, num_itrs, out_channels, eps=eps)
def forward(self, x):
return F.conv2d(x, self.W_(), self.bias, self.stride,
self.padding, self.dilation, self.groups) | 42.60815 | 157 | 0.580672 | 52,128 | 0.958799 | 0 | 0 | 196 | 0.003605 | 0 | 0 | 9,415 | 0.173172 |
ea8d3c9ed97f275534ba12a7a2adf9b3f80643b1 | 24,084 | py | Python | nyc_bike_flow.py | AngeloManzatto/NYCBikeFlow | cd7f936c4d4627e4a90e17d416fb1f628b2445c6 | [
"MIT"
] | 1 | 2020-09-09T01:36:57.000Z | 2020-09-09T01:36:57.000Z | nyc_bike_flow.py | AngeloManzatto/NYCBikeFlow | cd7f936c4d4627e4a90e17d416fb1f628b2445c6 | [
"MIT"
] | null | null | null | nyc_bike_flow.py | AngeloManzatto/NYCBikeFlow | cd7f936c4d4627e4a90e17d416fb1f628b2445c6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 5 14:01:56 2019
@author: Angelo Antonio Manzatto
This implementation use ST-ResNet for inflow / outflow bike prediction on the city of NY
Article: https://arxiv.org/pdf/1610.00081.pdf
References and credits:
Junbo Zhang, Yu Zheng, Dekang Qi. Deep Spatio-Temporal Residual Networks for Citywide Crowd Flows Prediction. In AAAI 2017.
The dataset can be download checking the information on the following link:
https://github.com/lucktroy/DeepST/tree/master/data/BikeNYC
"""
##################################################################################
# Libraries
##################################################################################
import os
import math
from datetime import datetime
from datetime import timedelta
import numpy as np
import h5py
import matplotlib.pyplot as plt
import matplotlib.cm
import seaborn as sns
sns.set()
import keras.backend as K
from keras.models import Model
from keras.layers import Input, Dense, Reshape, Activation, Add, LeakyReLU
from keras.layers import Conv2D , BatchNormalization, Lambda, concatenate
from keras.callbacks import ModelCheckpoint, CSVLogger, EarlyStopping
from keras.optimizers import Adam
from keras.engine.topology import Layer
np.random.seed(42) # My nickname Recruta42
############################################################################################
# Load Dataset
############################################################################################
dataset_folder = 'dataset'
dataset_file = os.path.join(dataset_folder,'NYC14_M16x8_T60_NewEnd.h5')
images_folder = 'images'
nyc_map = plt.imread(os.path.join(images_folder,'nyc.jpg'))
# Plot New York Map
f, ax = plt.subplots(figsize=(8,8))
ax.imshow(nyc_map)
# Load dataset file
f = h5py.File(dataset_file)
data = f['data'][()]
timestamps = f['date'][()]
# Convert data from [batch x flow matrices x map height x map width] to [batch x map height x map width x flow matrices]
data = np.transpose(data, (0, 2, 3, 1))
# Plot some samples from dataset
n_samples = 5
for i in range(n_samples):
# define the size of images
f, (ax1, ax2) = plt.subplots(1, 2)
f.set_figwidth(12)
f.set_figheight(8)
# randomly select a sample
idx = np.random.randint(0, len(data))
inflow = data[idx][:,:,0] #input flow is the first matrix
outflow = data[idx][:,:,1] #output flow is the second matrix
date = datetime.strptime(timestamps[idx].decode("utf-8"), '%Y%m%d%H')
hmax1 = sns.heatmap(inflow, cmap = matplotlib.cm.winter, alpha = 0.3, annot = False,zorder = 2, ax=ax1)
hmax1.imshow(nyc_map,aspect = hmax1.get_aspect(),extent = hmax1.get_xlim() + hmax1.get_ylim(), zorder = 1)
ax1.set_title('In Flow: {0}'.format(date))
hmax2 = sns.heatmap(outflow, cmap = matplotlib.cm.winter, alpha = 0.3, annot = False,zorder = 2, ax=ax2)
hmax2.imshow(nyc_map,aspect = hmax2.get_aspect(),extent = hmax2.get_xlim() + hmax2.get_ylim(), zorder = 1)
ax2.set_title('Out Flow: {0}'.format(date))
############################################################################################
# Pre-Process Dataset
############################################################################################
# Convert timestamps from ASCII format to string
formated_timestamps = []
for ts in timestamps:
formated_timestamps.append(ts.decode("utf-8"))
# Scale in flow and out flow values on the map matrices to a range between [-1,1]
min_value = data.min()
max_value = data.max()
print("Minimum values: {0} , Maximum value: {1}".format(min_value,max_value))
data_scaled = 1. * (data - min_value) / (max_value - min_value)
data_scaled = 2. * data_scaled - 1.
print("Minimum scaled values: {0} , Maximum scaled value: {1}".format(data_scaled.min(),data_scaled.max()))
############################################################################################
# Create Train / Target data
############################################################################################
'''
Minimum granularity will be 1 hour
To create the input for our model we need to aggregate the inflow and outflow matrices according
to three interval of times defined in the article as: closeness, period and trend.
For this project:
* Closeness is a difference in 1 hour period between two matrices
* Period is a difference is 24 hours period between two matrices
* Trend is a difference is 7 days period between two matrices
This means that for example, for a data (16 x 8 x 2) inflow/outflow matrices collected
at time stamp: 2014 08 07 01:00:00 we will have to do the following transformations:
Input closeness = len closeness stack of consecutive matrices distant between closeness interval.
Ex: Len = 3 and interval = 1 hour - stack [2014 08 07 01:00:00, 2014 08 07 02:00:00 , 2014 08 07 03:00:00] matrices
Input period = len period stack of consecutive matrices distant between period interval.
Ex: Len = 4 and interval = 24 hours - stack [2014 08 07 01:00:00, 2014 08 08 01:00:00 , 2014 08 09 01:00:00, 2014 08 10 01:00:00] matrices
Input trend = len trend stack of consecutive matrices distant between trend interval.
Ex: Len = 4 and interval = 168 hours - stack [2014 08 07 01:00:00, 2014 08 14 01:00:00 , 2014 08 21 01:00:00, 2014 08 28 01:00:00] matrices
This is an important information and the dataset should have little or almost NO disconnected interval between two
inflow / outflow matrices meaning that we should avoid missing hours.
'''
# Simple function that receives a string in format YmdH and convert to a datetime object
def str_to_date(timestamp):
# We can't direct stripe the data using datetime.strptime(ts, '%Y%m%d%H')
# because the hours are in 01 to 24 format instead of 00 to 23
year, month, day, hour = int(timestamp[:4]), int(timestamp[4:6]), int(timestamp[6:8]), int(timestamp[8:])-1
converted_time = datetime(year, month, day, hour)
return converted_time
# Convert timestamp to a one hot encoded vector taking into account week way and if it is weekend or not
def one_hot_day_week(timestamp):
converted_time = str_to_date(timestamp)
i = converted_time.weekday()
one_hot_encoded = np.zeros((8))
# Day week (sunday, monday...) encoder
one_hot_encoded[i] = 1
# Weekend / Not Weekend encoder
if i >= 5:
one_hot_encoded[7] = 0
else:
one_hot_encoded[7] = 1
return one_hot_encoded
closeness_interval = 1 # distance between hours
period_interval = 24 * closeness_interval # number of time intervals in one day
trend_interval = 7 * period_interval
closeness_len = 3 # recent time (closeness)
period_len = 4 # near history (period)
trend_len = 4 # distant history (trend)
closeness_range = [x * closeness_interval for x in range(1,closeness_len+1)]
period_range = [x * period_interval for x in range(1,period_len + 1)]
trend_range = [x * trend_interval for x in range(1,trend_len+1)]
# Build a dictionary of time stamps. This will ease our work to convert between timestamps to indices to get
# the in/out flow matrices.
ts_dict = {}
ts_list = []
for i, ts in enumerate(formated_timestamps):
converted_time = str_to_date(ts)
# Add converted time from string to a list for iteration and for a dictionary for search purposes
ts_list.append(str_to_date(ts))
ts_dict[converted_time] = i
# Create X, y data
X_Closeness, X_Period, X_Trend, X_External, Y , Y_timestamp = [],[],[],[],[],[]
# Crete the datasets for closeness, period and trend
# Since we have future predictions as output we need to build the dataset based on the lates trend period as starting point
starting_period = trend_interval * trend_len
# We construct the X, y datasets based on a reversed time interval, from the latest trend to starting closeness
for i in range(starting_period, len(formated_timestamps)):
# Starting period
date = str_to_date(formated_timestamps[i])
check_dates = []
# Get all dates in the closeness interval near the target
for c in closeness_range:
check_dates.append(date - timedelta(hours=c))
for p in period_range:
check_dates.append(date - timedelta(hours=p))
for t in trend_range:
check_dates.append(date - timedelta(hours=t))
# Check if all those selected dates exists in our timestamp dictionary and if not go to the next iteration
break_flag = False
for check_date in check_dates:
if check_date not in ts_dict:
print("Date frame missing!: {0} ".format(formated_timestamps[i]))
break_flag = True
if break_flag:
continue
# Parse again to create de dataset stacking the time range for closeness, period and trend
# X Closeness
xc = []
for c in closeness_range:
xc.append(data_scaled[ts_dict[date - timedelta(hours=c)]])
xc = np.concatenate(xc,axis=-1)
# X Period
xp = []
for p in period_range:
xp.append(data_scaled[ts_dict[date - timedelta(hours=p)]])
xp = np.concatenate(xp,axis=-1)
# X Trend
xt = []
for t in trend_range:
xt.append(data_scaled[ts_dict[date - timedelta(hours=t)]])
xt = np.concatenate(xt,axis=-1)
# Target
y = data_scaled[ts_dict[date]]
# Add each created set to the final datasets
X_Closeness.append(xc)
X_Period.append(xp)
X_Trend.append(xt)
X_External.append(one_hot_day_week(formated_timestamps[i]))
Y.append(y)
Y_timestamp.append(formated_timestamps[i])
X_Closeness = np.asarray(X_Closeness)
X_Period = np.asarray(X_Period)
X_Trend = np.asarray(X_Trend)
X_External = np.asarray(X_External)
Y = np.asarray(Y)
print("X_Closeness shape: ", X_Closeness.shape)
print("X_Period shape: ", X_Period.shape)
print("X_Trend shape: ", X_Trend.shape)
print("X_External shape: ", X_External.shape)
print( "Y shape:", Y.shape)
############################################################################################
# Split dataset into Train / Test
############################################################################################
days_test = 10
n_test = 24 * days_test
# Split dataset into training / test sets
XC_train, XP_train, XT_train,XE_train, Y_train = X_Closeness[:-n_test], X_Period[:-n_test], X_Trend[:-n_test],X_External[:-n_test], Y[:-n_test]
XC_test, XP_test, XT_test, XE_test, Y_test = X_Closeness[-n_test:], X_Period[-n_test:], X_Trend[-n_test:],X_External[-n_test:], Y[-n_test:]
# Time stamp split so we can track the period
timestamp_train, timestamp_test = Y_timestamp[:-n_test], Y_timestamp[-n_test:]
# Concatenate closeness , period and trend
X_train = [XC_train,XP_train,XT_train,XE_train]
X_test = [XC_test,XP_test,XT_test,XE_test]
print("X Train size: ", len(X_train))
print("X Test size: ", len(X_test))
############################################################################################
# Spatial Temporal Residual Network
############################################################################################
############################################################################################
# ResNet Identity Block
############################################################################################
def identity_block(inputs, filters, block_id):
x = BatchNormalization(name='block_' + block_id + '_identity_batch_1')(inputs)
x = Activation('relu', name='block_' + block_id + '_identity_relu_1')(x)
x = Conv2D(filters, kernel_size=(3,3), strides=(1,1), padding='same', kernel_initializer='he_normal', name='block_' + block_id + '_identity_conv2d_1')(x)
x = BatchNormalization(name='block_' + block_id + '_identity_batch_2')(x)
x = Activation('relu',name='block_' + block_id + '_identity_relu_2')(x)
x = Conv2D(filters, kernel_size=(3,3), strides=(1,1), padding='same', kernel_initializer='he_normal', name='block_' + block_id + '_identity_conv2d_2')(x)
x = Add(name='block_' + block_id + '_add')([inputs,x])
return x
############################################################################################
# ResNet bottleNeck block
############################################################################################
def bottleneck_block(inputs,kernel_size, filters, block_id):
f1, f2, f3 = filters
x = Conv2D(f1, kernel_size=(1,1), use_bias=False, kernel_initializer='he_normal', name='block_' + block_id + '_identity_conv2d_1')(inputs)
x = BatchNormalization(name='block_' + block_id + '_identity_batch_1')(x)
x = Activation('relu', name='block_' + block_id + '_identity_relu_1')(x)
x = Conv2D(f2, kernel_size = kernel_size, padding='same', use_bias=False, kernel_initializer='he_normal', name='block_' + block_id + '_identity_conv2d_2')(x)
x = BatchNormalization(name='block_' + block_id + '_identity_batch_2')(x)
x = Activation('relu',name='block_' + block_id + '_identity_relu_2')(x)
x = Conv2D(f3, kernel_size=(1,1), use_bias=False, kernel_initializer='he_normal', name='block_' + block_id + '_identity_conv2d_3')(x)
x = BatchNormalization(name='block_' + block_id + '_identity_batch_3')(x)
x = Add(name='block_' + block_id + '_add')([x, inputs])
x = Activation('relu', name='block_' + block_id + '_identity_relu_3')(x)
return x
############################################################################################
# ResNetXt group block
############################################################################################
def grouped_block(inputs, filters, cardinality, block_id):
assert not filters % cardinality
convolution_groups = []
n_convs = filters // cardinality
for j in range(cardinality):
group = Lambda(lambda z: z[:, :, :, j * n_convs:j * n_convs + n_convs])(inputs)
convolution_groups.append(Conv2D(n_convs, kernel_size=(3, 3), strides=(1,1) , padding='same')(group))
x = concatenate(convolution_groups, name='block_Xt' + block_id + '_concatenate')
return x
############################################################################################
# ResNet bottleNeck block
############################################################################################
def resnetXt_block(inputs, filters, cardinality, block_id):
f1, f2, f3 = filters
x = Conv2D(f1, kernel_size=(1,1), use_bias=False, kernel_initializer='he_normal', name='block_' + block_id + '_xt_conv2d_1')(inputs)
x = BatchNormalization(name='block_' + block_id + '_xt_batch_1')(x)
x = LeakyReLU(name='block_' + block_id + '_identity_leakyrelu_1')(x)
x = grouped_block(x, f2, cardinality, block_id)
x = BatchNormalization(name='block_' + block_id + '_identity_batch_2')(x)
x = Activation('relu',name='block_' + block_id + '_identity_relu_2')(x)
x = Conv2D(f3, kernel_size=(1,1), use_bias=False, kernel_initializer='he_normal', name='block_' + block_id + '_identity_conv2d_3')(x)
x = BatchNormalization(name='block_' + block_id + '_identity_batch_3')(x)
x = Add(name='block_' + block_id + '_add')([x, inputs])
x = LeakyReLU(name='block_' + block_id + '_identity_leakyrelu_relu_3')(x)
return x
############################################################################################
# Fusion Block
############################################################################################
class FusionLayer(Layer):
def __init__(self, **kwargs):
super(FusionLayer, self).__init__(**kwargs)
def build(self, input_shape):
# Create a trainable weight variable for this layer.
self.kernel = self.add_weight(name='kernel',
shape=(input_shape[1:]),
initializer='uniform',
trainable=True)
super(FusionLayer, self).build(input_shape) # Be sure to call this at the end
def call(self, x, mask=None):
return x * self.kernel
def get_output_shape_for(self, input_shape):
return input_shape
############################################################################################
# ST-ResNet version 1
############################################################################################
def STResNet_v1(c_conf=(32, 32, 2, 3),
p_conf=(32, 32, 2, 3),
t_conf=(32, 32, 2, 3),
output_shape = (32, 32, 2),
res_units=3,
external_dim = None):
height, width, n_flows = output_shape
main_inputs = []
Input_c = Input(shape=(c_conf[0], c_conf[1], c_conf[2] * c_conf[3]), name='input_c')
Input_p = Input(shape=(p_conf[0], p_conf[1], p_conf[2] * p_conf[3]), name='input_p')
Input_t = Input(shape=(t_conf[0], t_conf[1], t_conf[2] * t_conf[3]), name='input_t')
main_inputs.append(Input_c)
main_inputs.append(Input_p)
main_inputs.append(Input_t)
# Input
x_c = Conv2D(64, kernel_size=(3,3),strides=(1,1), padding="same", name= 'conv_input_c')(Input_c)
x_p = Conv2D(64, kernel_size=(3,3),strides=(1,1), padding="same", name= 'conv_input_p')(Input_p)
x_t = Conv2D(64, kernel_size=(3,3),strides=(1,1), padding="same", name= 'conv_input_t')(Input_t)
for i in range(res_units):
x_c = identity_block(x_c, 64, block_id= str(i) +'_c')
x_p = identity_block(x_p, 64, block_id= str(i) +'_p')
x_t = identity_block(x_t, 64, block_id= str(i) +'_t')
x_c = Conv2D(1, kernel_size=(3,3),strides=(1,1), padding="same", name= 'conv_output_c')(x_c)
x_p = Conv2D(1, kernel_size=(3,3),strides=(1,1), padding="same", name= 'conv_output__p')(x_p)
x_t = Conv2D(1, kernel_size=(3,3),strides=(1,1), padding="same", name= 'conv_output__t')(x_t)
# Fusion Layers
x_c = FusionLayer()(x_c)
x_p = FusionLayer()(x_p)
x_t = FusionLayer()(x_t)
fusion = Add(name='temporal_fusion')([x_c,x_p,x_t])
#########################################################################
# External Block
#########################################################################
if external_dim != None and external_dim > 0:
# Concatenate external inputs with temporal inputs
external_input = Input(shape=(external_dim,), name='external_input')
main_inputs.append(external_input)
embedding = Dense(10, name='external_dense_1')(external_input)
embedding = Activation('relu')(embedding)
embedding = Dense(height * width * n_flows* channels)(embedding)
embedding = Activation('relu')(embedding)
external_output = Reshape((height, width, n_flows ) ,name='external_output')(embedding)
# Fuse with external output
fusion = Add(name='external_fusion')([fusion,external_output])
final_output = Activation('tanh', name='Tanh')(fusion)
model = Model(inputs=main_inputs,outputs=final_output)
return model
############################################################################################
# Training pipeline
############################################################################################
# Metric for our model
def rmse(y_true, y_pred):
return K.mean(K.square(y_pred - y_true)) ** 0.5
# Hyperparameters
epochs = 500
batch_size = 32
learning_rate = 0.0002
# callbacks
model_path = 'saved_models'
# File were the best model will be saved during checkpoint
model_file = os.path.join(model_path,'nyc_bike_flow.h5')
# Early stop to avoid overfitting our model
early_stopping = EarlyStopping(monitor='val_rmse', patience=5, mode='min')
# Check point for saving the best model
check_pointer = ModelCheckpoint(model_file, monitor='val_rmse', mode='min',verbose=1, save_best_only=True)
# Heatmap parameters
map_height = 16
map_width = 8
n_flows = 2
c_conf=(map_height, map_width, n_flows, closeness_len) # closeness
p_conf=(map_height, map_width, n_flows, period_len) # period
t_conf=(map_height, map_width, n_flows, trend_len) # trend
output_shape=(map_height, map_width, n_flows)
external_dim = 8
# Create ST-ResNet Model
model = STResNet_v1(c_conf,p_conf,t_conf, output_shape, res_units=3, external_dim = external_dim,unit_type = 'v2')
# Create Optimizer
optimizer = Adam(lr=learning_rate)
model.compile(optimizer, loss='mse' , metrics=[rmse])
model.summary()
# Train the model
history = model.fit(X_train, Y_train,
epochs=epochs,
batch_size=batch_size,
validation_split=0.1,
callbacks=[check_pointer,early_stopping],
verbose=1)
############################################################################################
# Predict
############################################################################################
# If we want to test on a pre trained model use the following line
model.load_weights(os.path.join(model_path,'bikenyc-0.0020.h5'), by_name=False)
n_samples = 3
for i in range(n_samples):
f, (ax1, ax2, ax3,ax4) = plt.subplots(1, 4)
f.set_figwidth(14)
f.set_figheight(6)
# randomly select a sample
idx = np.random.randint(0, len(X_test[0]))
# Add single dimension to each input to simulate batch
X = [X_test[0][idx][np.newaxis,...],X_test[1][idx][np.newaxis,...],X_test[2][idx][np.newaxis,...],X_test[3][idx][np.newaxis,...]]
y_true = Y_test[idx]
# Predict values using our trained model
y_pred = model.predict(X)
y_pred = np.squeeze(y_pred)
date =
hmax1 = sns.heatmap(y_true[:,:,0], cmap = matplotlib.cm.winter, alpha = 0.3, annot = False,zorder = 2, ax=ax1)
hmax1.imshow(nyc_map,aspect = hmax1.get_aspect(),extent = hmax1.get_xlim() + hmax1.get_ylim(), zorder = 1)
ax1.set_title('True In Flow: {0}'.format(timestamps[idx].decode("utf-8")))
hmax2 = sns.heatmap(y_pred[:,:,0], cmap = matplotlib.cm.winter, alpha = 0.3, annot = False,zorder = 2, ax=ax2)
hmax2.imshow(nyc_map,aspect = hmax2.get_aspect(),extent = hmax2.get_xlim() + hmax2.get_ylim(), zorder = 1)
ax2.set_title('Pred In Flow: {0}'.format(timestamps[idx].decode("utf-8")))
hmax3 = sns.heatmap(y_true[:,:,1], cmap = matplotlib.cm.winter, alpha = 0.3, annot = False,zorder = 2, ax=ax3)
hmax3.imshow(nyc_map,aspect = hmax3.get_aspect(),extent = hmax3.get_xlim() + hmax3.get_ylim(), zorder = 1)
ax3.set_title('True Out Flow: {0}'.format(timestamps[idx].decode("utf-8")))
hmax4 = sns.heatmap(y_pred[:,:,1], cmap = matplotlib.cm.winter, alpha = 0.3, annot = False,zorder = 2, ax=ax4)
hmax4.imshow(nyc_map,aspect = hmax4.get_aspect(),extent = hmax4.get_xlim() + hmax4.get_ylim(), zorder = 1)
ax4.set_title('Pred Out Flow: {0}'.format(timestamps[idx].decode("utf-8")))
############################################################################################
# Evaluate
############################################################################################
# This information was provided in the original article an file !
'''
For NYC Bike data, there are 81 available grid-based areas, each of
which includes at least ONE bike station. Therefore, we modify the final
RMSE by multiplying the following factor (i.e., factor).
'''
nb_area = 81
m_factor = math.sqrt(1. * map_height * map_width / nb_area)
score = model.evaluate(X_train, Y_train, batch_size=Y_train.shape[0] // 48, verbose=0)
print('Train score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
(score[0], score[1], score[1] * (max_value - min_value) / 2. * m_factor))
score = model.evaluate(X_test, Y_test, batch_size=Y_test.shape[0], verbose=0)
print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
(score[0], score[1], score[1] * (max_value - min_value) / 2. * m_factor))
| 40.073211 | 161 | 0.594918 | 691 | 0.028691 | 0 | 0 | 0 | 0 | 0 | 0 | 9,971 | 0.414009 |
ea8e6944b6396b7bf5801d8ee489a2d3e3266596 | 802 | py | Python | scripts/reduce.py | inlgmeeting/inlgmeeting.github.io | 5af7273eaa04407afc894374d11cbc8587fd343b | [
"MIT"
] | null | null | null | scripts/reduce.py | inlgmeeting/inlgmeeting.github.io | 5af7273eaa04407afc894374d11cbc8587fd343b | [
"MIT"
] | null | null | null | scripts/reduce.py | inlgmeeting/inlgmeeting.github.io | 5af7273eaa04407afc894374d11cbc8587fd343b | [
"MIT"
] | 1 | 2022-03-08T11:22:31.000Z | 2022-03-08T11:22:31.000Z | import argparse
import csv
import json
import sklearn.manifold
import torch
def parse_arguments():
parser = argparse.ArgumentParser(description="MiniConf Portal Command Line")
parser.add_argument("papers", default=False, help="paper file")
parser.add_argument("embeddings", default=False, help="embeddings file to shrink")
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
emb = torch.load(args.embeddings)
out = sklearn.manifold.TSNE(n_components=2).fit_transform(emb.cpu().numpy())
d = []
with open(args.papers, "r") as f:
abstracts = list(csv.DictReader(f))
for i, row in enumerate(abstracts):
d.append({"id": row["UID"], "pos": out[i].tolist()})
print(json.dumps(d))
| 28.642857 | 87 | 0.65586 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 116 | 0.144638 |
ea8fe32f856495dc2c83b201fa1c4de92c36e8f3 | 1,964 | py | Python | .vim/bundle-deactivated/python-mode/pylibs/ropemode/environment.py | chrislaskey/.dot-files | 44be2364f2824a2c70300f7c0f2963a592c7083e | [
"MIT"
] | null | null | null | .vim/bundle-deactivated/python-mode/pylibs/ropemode/environment.py | chrislaskey/.dot-files | 44be2364f2824a2c70300f7c0f2963a592c7083e | [
"MIT"
] | null | null | null | .vim/bundle-deactivated/python-mode/pylibs/ropemode/environment.py | chrislaskey/.dot-files | 44be2364f2824a2c70300f7c0f2963a592c7083e | [
"MIT"
] | null | null | null | class Environment(object):
def ask(self, prompt, default=None, starting=None):
pass
def ask_values(self, prompt, values, default=None, starting=None):
pass
def ask_directory(self, prompt, default=None, starting=None):
pass
def ask_completion(self, prompt, values, starting=None):
pass
def message(self, message):
pass
def yes_or_no(self, prompt):
pass
def y_or_n(self, prompt):
pass
def get(self, name, default=None):
pass
def get_offset(self):
pass
def get_text(self):
pass
def get_region(self):
pass
def filename(self):
pass
def is_modified(self):
pass
def goto_line(self, lineno):
pass
def insert_line(self, line, lineno):
pass
def insert(self, text):
pass
def delete(self, start, end):
pass
def filenames(self):
pass
def save_files(self, filenames):
pass
def reload_files(self, filenames, moves=None):
pass
def find_file(self, filename, readonly=False, other=False):
pass
def create_progress(self, name):
pass
def current_word(self):
pass
def push_mark(self):
pass
def pop_mark(self):
pass
def prefix_value(self, prefix):
pass
def show_occurrences(self, locations):
pass
def show_doc(self, docs, altview=False):
pass
def preview_changes(self, diffs):
pass
def local_command(self, name, callback, key=None, prefix=False):
pass
def global_command(self, name, callback, key=None, prefix=False):
pass
def add_hook(self, name, callback, hook):
pass
@staticmethod
def _completion_text(proposal):
return proposal.name.partition(':')[0].strip()
def _completion_data(self, proposal):
return self._completion_text(proposal)
| 18.704762 | 70 | 0.599796 | 1,963 | 0.999491 | 0 | 0 | 104 | 0.052953 | 0 | 0 | 3 | 0.001527 |
ea8ffc8379c8866cb56248edf88ff78b6a856f02 | 1,838 | py | Python | Breast_cancer_prediction1.py | HagerBesar/Breast_cancer_prediction1 | f391a37f8064cabefdf9c416f2dbb40e3bd0e98a | [
"MIT"
] | 1 | 2021-03-23T15:03:39.000Z | 2021-03-23T15:03:39.000Z | Breast_cancer_prediction1.py | HagerBesar/Breast_cancer_prediction1 | f391a37f8064cabefdf9c416f2dbb40e3bd0e98a | [
"MIT"
] | null | null | null | Breast_cancer_prediction1.py | HagerBesar/Breast_cancer_prediction1 | f391a37f8064cabefdf9c416f2dbb40e3bd0e98a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
####################################<<<<Breast_cancer_prediction>>>>>>####################################
# In[ ]:
#part(1)--By:Manar Moeanse
# In[1]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# In[2]:
DB = pd.read_csv('Breast_cancer_data.csv')
DB
# In[3]:
DB.head(5)
# In[4]:
DB.describe()
# In[ ]:
DB.info()
# In[ ]:
#part(2)--By:Mariam Mamdoh
# In[5]:
uneff = DB[DB.diagnosis == 0]
eff = DB[DB.diagnosis == 1]
len(uneff)
# In[ ]:
len(eff)
# In[6]:
uneffected = (len(uneff)/len(DB)) *100
print('people are uneffected = ', uneffected , '% .')
effected = (len(eff)/len(DB)) *100
print('people are effected = ', effected , '% .')
# In[ ]:
#part(3)--By:Hemat Shawky.
# In[7]:
plt.scatter(DB['diagnosis'],DB['mean_area'])
# In[8]:
plt.scatter(DB['mean_area'],DB['mean_texture'])
# In[9]:
plt.scatter(DB['mean_radius'],DB['mean_perimeter'])
# In[10]:
import seaborn as sns
sns.pairplot(data=DB)
# In[ ]:
#part(4)--By:Hager Mohamed.
# In[11]:
x = DB.drop('diagnosis', 1)
y = DB['diagnosis']
x
# In[12]:
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2)
print(x_train.shape,x_test.shape)
print(y_train.shape,y_test.shape)
# In[13]:
from sklearn.linear_model import LinearRegression
model = LinearRegression ()
# In[14]:
model.fit(x_train,y_train)
# In[15]:
pred =model.predict(x_test)
# In[16]:
from sklearn.metrics import mean_squared_error
# In[17]:
error=np.sqrt(mean_squared_error(y_pred=pred,y_true=y_test))
print(error)
# In[18]:
print(model.score(x_test,y_test))
# In[ ]:
"""" BY:
1-Manar Moeanse.
2-Mariam Mamdoh.
3-Hemat Shawky.
4-Hager Mohamed.
| 10.268156 | 106 | 0.600653 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 646 | 0.351469 |
ea9175a6523f472b33f34201baced746430025fa | 290 | py | Python | regphot/pyprofit.py | raphaelshirley/regphot | 05b382e5360099dadd7599187b5f25c1fcbd5a61 | [
"MIT"
] | null | null | null | regphot/pyprofit.py | raphaelshirley/regphot | 05b382e5360099dadd7599187b5f25c1fcbd5a61 | [
"MIT"
] | null | null | null | regphot/pyprofit.py | raphaelshirley/regphot | 05b382e5360099dadd7599187b5f25c1fcbd5a61 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 1 17:13:54 2017
Should have similar functions as galfit and allow a model object to use functions to
calculate chisq and optimse using standard optimisation.
@author: rs548
"""
import pyprofit
def optimise():
| 18.125 | 85 | 0.713793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 248 | 0.855172 |
ea9198eb50dfdf75e05b7e2f2c6b062c8ea681fb | 6,061 | py | Python | chrome/common/extensions/docs/server2/branch_utility_test.py | pozdnyakov/chromium-crosswalk | 0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | chrome/common/extensions/docs/server2/branch_utility_test.py | pozdnyakov/chromium-crosswalk | 0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | chrome/common/extensions/docs/server2/branch_utility_test.py | pozdnyakov/chromium-crosswalk | 0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
from branch_utility import BranchUtility
from fake_url_fetcher import FakeUrlFetcher
from object_store_creator import ObjectStoreCreator
class BranchUtilityTest(unittest.TestCase):
def setUp(self):
self._branch_util = BranchUtility(
os.path.join('branch_utility', 'first.json'),
os.path.join('branch_utility', 'second.json'),
FakeUrlFetcher(os.path.join(sys.path[0], 'test_data')),
ObjectStoreCreator.ForTest())
def testSplitChannelNameFromPath(self):
self.assertEquals(('stable', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'stable/extensions/stuff.html'))
self.assertEquals(('dev', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'dev/extensions/stuff.html'))
self.assertEquals(('beta', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'beta/extensions/stuff.html'))
self.assertEquals(('trunk', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'trunk/extensions/stuff.html'))
self.assertEquals((None, 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'extensions/stuff.html'))
self.assertEquals((None, 'apps/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'apps/stuff.html'))
self.assertEquals((None, 'extensions/dev/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'extensions/dev/stuff.html'))
self.assertEquals((None, 'stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'stuff.html'))
def testNewestChannel(self):
self.assertEquals('trunk',
self._branch_util.NewestChannel(('trunk', 'dev', 'beta', 'stable')))
self.assertEquals('trunk',
self._branch_util.NewestChannel(('stable', 'beta', 'dev', 'trunk')))
self.assertEquals('dev',
self._branch_util.NewestChannel(('stable', 'beta', 'dev')))
self.assertEquals('dev',
self._branch_util.NewestChannel(('dev', 'beta', 'stable')))
self.assertEquals('beta',
self._branch_util.NewestChannel(('beta', 'stable')))
self.assertEquals('beta',
self._branch_util.NewestChannel(('stable', 'beta')))
self.assertEquals('stable', self._branch_util.NewestChannel(('stable',)))
self.assertEquals('beta', self._branch_util.NewestChannel(('beta',)))
self.assertEquals('dev', self._branch_util.NewestChannel(('dev',)))
self.assertEquals('trunk', self._branch_util.NewestChannel(('trunk',)))
def testGetChannelInfo(self):
self.assertEquals('trunk',
self._branch_util.GetChannelInfo('trunk').channel)
self.assertEquals('trunk',
self._branch_util.GetChannelInfo('trunk').branch)
self.assertEquals('trunk',
self._branch_util.GetChannelInfo('trunk').version)
self.assertEquals('dev',
self._branch_util.GetChannelInfo('dev').channel)
self.assertEquals(1500,
self._branch_util.GetChannelInfo('dev').branch)
self.assertEquals(28,
self._branch_util.GetChannelInfo('dev').version)
self.assertEquals('beta',
self._branch_util.GetChannelInfo('beta').channel)
self.assertEquals(1453,
self._branch_util.GetChannelInfo('beta').branch)
self.assertEquals(27,
self._branch_util.GetChannelInfo('beta').version)
self.assertEquals('stable',
self._branch_util.GetChannelInfo('stable').channel)
self.assertEquals(1410,
self._branch_util.GetChannelInfo('stable').branch)
self.assertEquals(26,
self._branch_util.GetChannelInfo('stable').version)
def testGetLatestVersionNumber(self):
self.assertEquals(28, self._branch_util.GetLatestVersionNumber())
def testGetBranchForVersion(self):
self.assertEquals(1453,
self._branch_util.GetBranchForVersion(27))
self.assertEquals(1410,
self._branch_util.GetBranchForVersion(26))
self.assertEquals(1364,
self._branch_util.GetBranchForVersion(25))
self.assertEquals(1312,
self._branch_util.GetBranchForVersion(24))
self.assertEquals(1271,
self._branch_util.GetBranchForVersion(23))
self.assertEquals(1229,
self._branch_util.GetBranchForVersion(22))
self.assertEquals(1180,
self._branch_util.GetBranchForVersion(21))
self.assertEquals(1132,
self._branch_util.GetBranchForVersion(20))
self.assertEquals(1084,
self._branch_util.GetBranchForVersion(19))
self.assertEquals(1025,
self._branch_util.GetBranchForVersion(18))
self.assertEquals(963,
self._branch_util.GetBranchForVersion(17))
self.assertEquals(696,
self._branch_util.GetBranchForVersion(11))
self.assertEquals(396,
self._branch_util.GetBranchForVersion(5))
def testGetChannelForVersion(self):
self.assertEquals('trunk',
self._branch_util.GetChannelForVersion('trunk'))
self.assertEquals('dev',
self._branch_util.GetChannelForVersion(28))
self.assertEquals('beta',
self._branch_util.GetChannelForVersion(27))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(26))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(22))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(18))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(14))
self.assertEquals(None,
self._branch_util.GetChannelForVersion(30))
self.assertEquals(None,
self._branch_util.GetChannelForVersion(42))
if __name__ == '__main__':
unittest.main()
| 41.513699 | 77 | 0.683881 | 5,648 | 0.931859 | 0 | 0 | 0 | 0 | 0 | 0 | 1,033 | 0.170434 |
ea931348dc1add94d3976c115411349771183b34 | 6,818 | py | Python | src/mlServiceAPI.py | juliangruendner/ketos_brain_api | 6ec7e01a0996abb03dba090d832a5e1020df4180 | [
"MIT"
] | null | null | null | src/mlServiceAPI.py | juliangruendner/ketos_brain_api | 6ec7e01a0996abb03dba090d832a5e1020df4180 | [
"MIT"
] | null | null | null | src/mlServiceAPI.py | juliangruendner/ketos_brain_api | 6ec7e01a0996abb03dba090d832a5e1020df4180 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_restful_swagger_2 import Api
from resources.userResource import UserListResource, UserResource, UserLoginResource
from resources.imageResource import ImageListResource, ImageResource
from resources.environmentResource import EnvironmentListResource, EnvironmentResource, UserEnvironmentListResource
from resources.featureResource import FeatureListResource, FeatureResource, UserFeatureListResource
from resources.featureSetResource import FeatureSetListResource, FeatureSetResource, UserFeatureSetListResource, FeatureSetFeatureListResource
from resources.mlModelResource import MLModelListResource, MLModelResource, UserMLModelListResource, MLModelPredicitionResource
from resources.mlModelResource import MLModelExportResource, MLModelImportResource, MLModelImportSuitableEnvironmentResource, MLModelImportSuitableFeatureSetResource
from resources.dataResource import DataListResource, DataResource
from resources.resourceConfigResource import ResourceConfig
from resources.annotationResource import AnnotationTaskListResource, AnnotationTaskResource, UserAnnotationTaskListResource, AnnotationTaskEntryListResource, AnnotationTaskResultListResource, AnnotatorResource
from resources.annotationResource import AnnotationTaskScaleEntryListResource, AnnotationTaskAnnotatorListResource, AnnotationResultListResource, AnnotatorResultListResource, EntriesForAnnotatorResource, AnnotationTaskScaleEntry
from resources.predictionOutcomeResource import ModelPredictionOutcomeListResource, PredictionOutcomeListResource, PredictionOutcomeResource
from resources.atlasCohortResource import AtlasCohortResource
from rdb.rdb import connect_to_db, create_all, create_admin_user, create_default_images, create_default_features
from flask_cors import CORS
import json
import logging
import logging.config
logging.config.dictConfig(json.load(open("logging_config.json", "r")))
app = Flask(__name__)
CORS(app)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
api = Api(app,
add_api_spec_resource=True, api_version='0.0', api_spec_url='/api/swagger', schemes=["http"], #, "https", {"securitySchemes": {"basicAuth": {"type": "http"}}}],
security=[{"basicAuth": []}], security_definitions={"basicAuth": {"type": "basic"}}) # Wrap the Api and add /api/swagger endpoint
connect_to_db(app)
create_all()
create_admin_user()
create_default_images()
create_default_features()
api.add_resource(UserListResource, '/users', endpoint='users')
api.add_resource(UserLoginResource, '/users/login', endpoint='user_login')
api.add_resource(UserResource, '/users/<int:user_id>', endpoint='user')
api.add_resource(UserEnvironmentListResource, '/users/<int:user_id>/environments', endpoint='environments_for_user')
api.add_resource(UserMLModelListResource, '/users/<int:user_id>/models', endpoint='models_for_user')
api.add_resource(UserFeatureListResource, '/users/<int:user_id>/features', endpoint='features_for_user')
api.add_resource(UserFeatureSetListResource, '/users/<int:user_id>/featuresets', endpoint='feature_sets_for_user')
api.add_resource(EnvironmentListResource, '/environments', endpoint='environments')
api.add_resource(EnvironmentResource, '/environments/<int:env_id>', endpoint='environment')
api.add_resource(MLModelListResource, '/models', endpoint='models')
api.add_resource(MLModelResource, '/models/<int:model_id>', endpoint='model')
api.add_resource(MLModelExportResource, '/models/<int:model_id>/export', endpoint='model_export')
api.add_resource(MLModelImportResource, '/models/import', endpoint='model_import')
api.add_resource(MLModelImportSuitableEnvironmentResource, '/models/import/suitable-environments', endpoint='model_import_suitable_environments')
api.add_resource(MLModelImportSuitableFeatureSetResource, '/models/import/suitable-feature-sets', endpoint='model_import_suitable_feature_sets')
api.add_resource(MLModelPredicitionResource, '/models/<int:model_id>/prediction', endpoint='model_prediction')
api.add_resource(ImageListResource, '/images', endpoint='images')
api.add_resource(ImageResource, '/images/<int:image_id>', endpoint='image')
api.add_resource(DataListResource, '/data', endpoint='datalist')
api.add_resource(DataResource, '/data/<datarequest_id>', endpoint='data')
api.add_resource(FeatureListResource, '/features', endpoint='features')
api.add_resource(FeatureResource, '/features/<int:feature_id>', endpoint='feature')
api.add_resource(FeatureSetListResource, '/featuresets', endpoint='feature_sets')
api.add_resource(FeatureSetResource, '/featuresets/<int:feature_set_id>', endpoint='feature_set')
api.add_resource(FeatureSetFeatureListResource, '/featuresets/<int:feature_set_id>/features', endpoint='feature_set_features')
api.add_resource(ResourceConfig, '/resources_config', endpoint='resources_config')
api.add_resource(AnnotationTaskListResource, '/annotation_tasks', endpoint='annotation_tasks')
api.add_resource(AnnotationTaskResource, '/annotation_tasks/<int:task_id>', endpoint='annotation_task')
api.add_resource(UserAnnotationTaskListResource, '/users/<int:user_id>/annotation_tasks', endpoint='annotation_tasks_for_user')
api.add_resource(AnnotationTaskEntryListResource, '/annotation_tasks/<int:task_id>/entries', endpoint='entries_for_annotation_task')
api.add_resource(AnnotationTaskScaleEntryListResource, '/annotation_tasks/<int:task_id>/scale_entries', endpoint='scale_entries_for_annotation_task')
api.add_resource(AnnotationTaskAnnotatorListResource, '/annotation_tasks/<int:task_id>/annotators', endpoint='annotators_for_annotation_task')
api.add_resource(AnnotationResultListResource, '/annotation_tasks/results', endpoint='annotation_tasks_results')
api.add_resource(AnnotatorResultListResource, '/annotators/<int:annotator_id>/results', endpoint='results_for_annotator')
api.add_resource(AnnotationTaskResultListResource, '/annotation_tasks/<int:task_id>/results', endpoint='results_for_annotation_task')
api.add_resource(EntriesForAnnotatorResource, '/annotators/<string:token>/entries', endpoint='entries_for_annotators')
api.add_resource(AnnotatorResource, '/annotators/<string:token>', endpoint='annotator')
api.add_resource(AnnotationTaskScaleEntry, '/annotation_tasks/<int:task_id>/scale_entries/<int:scale_entry_id>', endpoint='scale_entry')
api.add_resource(AtlasCohortResource, '/atlas/cohorts/<int:cohort_id>/patients', endpoint='patients_for_atlas_cohort')
api.add_resource(ModelPredictionOutcomeListResource, '/models/<int:model_id>/outcomes', endpoint='prediction_outcomes')
api.add_resource(PredictionOutcomeListResource, '/models/prediction/outcomes', endpoint='model_prediction_outcome')
api.add_resource(PredictionOutcomeResource, '/models/outcomes/<int:pred_outcome_id>', endpoint='prediction_outcome')
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)
| 80.211765 | 228 | 0.839689 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,235 | 0.327809 |
ea93a4863758bc25492b7cc4b0d277a031dd4e69 | 427 | py | Python | Python/Tutorial - 3/check.py | JC2295/FCC_Tutorial_Projects | 990e1221b2177acb9e4db0264adab518620404a0 | [
"MIT"
] | null | null | null | Python/Tutorial - 3/check.py | JC2295/FCC_Tutorial_Projects | 990e1221b2177acb9e4db0264adab518620404a0 | [
"MIT"
] | null | null | null | Python/Tutorial - 3/check.py | JC2295/FCC_Tutorial_Projects | 990e1221b2177acb9e4db0264adab518620404a0 | [
"MIT"
] | null | null | null | x = float(input("Enter Number: "))
if(x % 2) == 0 and x > 0:
print("The number you entered is positive and even.")
elif(x % 2) == 0 and x < 0:
print("The number you entered is negative and even.")
elif(x % 2) != 0 and x > 0:
print("The number you entered is positive and odd.")
elif(x % 2) != 0 and x < 0:
print("The number you entered is negative and odd.")
else:
print("Please enter a non zero number.")
| 32.846154 | 57 | 0.618267 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 231 | 0.540984 |
ea9492335887a907444003adbfacda8abe09f88a | 18,763 | py | Python | solver.py | itrosen/hall-solver | 70ca5364b6c16bf62b7faa69ac30d14f972d7320 | [
"MIT"
] | null | null | null | solver.py | itrosen/hall-solver | 70ca5364b6c16bf62b7faa69ac30d14f972d7320 | [
"MIT"
] | null | null | null | solver.py | itrosen/hall-solver | 70ca5364b6c16bf62b7faa69ac30d14f972d7320 | [
"MIT"
] | null | null | null | """
Created on Dec 16 2021
@author: Ilan Rosen
Poisson equation solver for the Hall effect.
Includes classes for Hall bars, Hall bars in a nonlocal geometry, and Corbino disks.
The Hall bar class has build in methods for longitudinal and Hall 4-probe resistance measurements.
Plotting functions assume coordinates are in microns, but the Poisson equation is scale-invariant.
"""
import time
import math
import numpy as np
import scipy.sparse as sp # import sparse matrix library
import matplotlib.pyplot as plt
from scipy.sparse.linalg import spsolve
# import the file where the differentiation matrix operators are defined
from diff_matrices import Diff_mat_1D, Diff_mat_2D
class hallbar():
"""The class for a Hall bar device
Source is the left terminal, drain is the right terminal.
Args:
Lx : length in x direction
Ly : length in y direction
Nx : number of points in grid along x
Ny : number of points in grid along y
"""
def __init__(self, Lx, Ly, Nx = 301, Ny = 201):
# Initiate with no contacts
self.contacts = []
# Define coordinate variables
self.Nx = Nx
self.Ny = Ny
self.Lx = Lx
self.Ly = Ly
self.x = np.linspace(0,self.Lx,self.Nx)
self.y = np.linspace(0,self.Ly,self.Ny)
self.dx = self.x[1] - self.x[0] # grid spacing along x direction
self.dy = self.y[1] - self.y[0] # grid spacing along y direction
self.X,self.Y = np.meshgrid(self.x,self.y) # 2D meshgrid
# 1D indexing
self.Xu = self.X.ravel() # Unravel 2D meshgrid to 1D array
self.Yu = self.Y.ravel()
# Search for boundary indices
start_time = time.time()
self.ind_unravel_L = np.squeeze(np.where(self.Xu==self.x[0])) # Left boundary
self.ind_unravel_R = np.squeeze(np.where(self.Xu==self.x[self.Nx-1])) # Right boundary
self.ind_unravel_B = np.squeeze(np.where(self.Yu==self.y[0])) # Bottom boundary
self.ind_unravel_T = np.squeeze(np.where(self.Yu==self.y[self.Ny-1])) # Top boundary
self.ind_boundary_unravel = np.squeeze(np.where((self.Xu==self.x[0]) | (self.Xu==self.x[self.Nx-1]) | (self.Yu==self.y[0]) | (self.Yu==self.y[self.Ny-1]))) # outer boundaries 1D unravel indices
self.ind_boundary = np.where((self.X==self.x[0]) | (self.X==self.x[self.Nx-1]) | (self.Y==self.y[0]) | (self.Y==self.y[self.Ny-1])) # outer boundary
print("Boundary search time = %1.4s" % (time.time()-start_time))
# Load finite difference matrix operators
self.Dx_2d, self.Dy_2d, self.D2x_2d, self.D2y_2d = Diff_mat_2D(self.Nx,self.Ny)
# Initiate empty solution matrix
self.u = 0
def solve(self, lmbda):
# constructs matrix problem and solves Poisson equation
# Args: lmbda : sigma_xy / sigma_xx. Must be finite
# Returns: self.u : electric potential
self.lmbda = lmbda
# Construct system matrix without boundary conditions
start_time = time.time()
I_sp = sp.eye(self.Nx*self.Ny).tocsr()
L_sys = self.D2x_2d/self.dx**2 + self.D2y_2d/self.dy**2
# Boundary operators
BD = I_sp # Dirichlet boundary operator
BNx = self.Dx_2d / (2 * self.dx) # Neumann boundary operator for x component
BNy = self.Dy_2d / (2 * self.dy) # Neumann boundary operator for y component
# DIRICHLET BOUNDARY CONDITIONS FOR CONTACTS
L_sys[self.ind_unravel_L,:] = BD[self.ind_unravel_L,:] # Boundaries at the left layer
L_sys[self.ind_unravel_R,:] = BD[self.ind_unravel_R,:] # Boundaries at the right edges
# CURRENT THROUGH EDGES
L_sys[self.ind_unravel_T,:] = BNy[self.ind_unravel_T,:] - lmbda * BNx[self.ind_unravel_T,:] # Boundaries at the top layer
L_sys[self.ind_unravel_B,:] = BNy[self.ind_unravel_B,:] - lmbda * BNx[self.ind_unravel_B,:] # Boundaries at the bottom layer
# Source function (right hand side vector)
g = np.zeros(self.Nx*self.Ny)
# Insert boundary values at the boundary points
g[self.ind_unravel_L] = 1 # Dirichlet boundary condition at source
g[self.ind_unravel_R] = 0 # Dirichlet boundary condition at drain
g[self.ind_unravel_T] = 0 # No current through top
g[self.ind_unravel_B] = 0 # No current through bottom
print("System matrix and right hand vector computation time = %1.6s" % (time.time()-start_time))
start_time = time.time()
self.u = spsolve(L_sys,g).reshape(self.Ny,self.Nx).T
print("spsolve() time = %1.6s" % (time.time()-start_time))
def voltage_measurement(self, x1, x2, side='top'):
# Args: x1 : point of V_A
# x2 : point of V_B
# side ('top', 'bottom', or 'hall') : which side of Hall bar to measure
# Returns: V_A - V_B
if np.all(self.u==0):
raise Exception('System has not been solved')
if x1 > self.Lx or x1 < 0 or x2 > self.Lx or x2 < 0:
raise Exception('Points out of bounds')
if side=='top':
ya = self.Ny-1
yb = self.Ny-1
elif side=='bottom':
ya = 0
yb = 0
elif side=='hall':
ya = 0
yb = self.Ny-1
else:
raise Exception('Side must be top or bottom')
# Find nearest index value to input coordinates
xa = np.searchsorted(self.x, x1, side='left')
xb = np.searchsorted(self.x, x2, side='left')
return self.u[xa, ya] - self.u[xb, yb]
def plot_potential(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
fig = plt.figure(figsize = [8,5])
plt.contourf(self.x,self.y,self.u.T,41,cmap = 'inferno')
cbar = plt.colorbar(ticks = np.arange(0, 1.01, 0.2), label = r'$\phi / \phi_s$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'y ($\mu$m)');
plt.show()
def plot_resistance(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
r_top = (self.u[0:-1, -1] - self.u[1:, -1]) * 25812 * self.Ly / self.dx
r_bottom = (self.u[0:-1, 0] - self.u[1:, 0]) * 25812 * self.Ly / self.dx
rxx = 25812 / self.lmbda
fig = plt.figure(figsize = [8,5])
plt.plot(self.x[0:-1] - self.dx, r_top, 'r', label='top')
plt.plot(self.x[0:-1] - self.dx, r_bottom, 'b', label='bottom')
plt.hlines(rxx, self.x[0], self.x[-1], linestyle='dashed', color='grey', label=r'$\rho_{xx}$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'$\rho_{xx}$ $(\Omega)$');
plt.legend()
plt.ylim([0, 12000]);
plt.show()
def add_contact(self, contact):
if contact.x1 > self.Lx or contact.x2 > self.Lx:
raise Exception('Contact out of bounds')
self.contacts.append(contact)
def measure_contact_voltageonly(self, contact):
# Args: contact instance
# Returns: measured resistivity
# Voltage is averaged across voltage tap
# THIS FUNCTION DOES NOT CHECK THE CURRENT!
# This method assumes 2terminal resistance is h/e2, which in general is wrong
if np.all(self.u==0):
raise Exception('System has not been solved')
if contact.side=='top':
y = self.Ny-1
elif contact.side=='bottom':
y = 0
else:
raise Exception('Side must be top or bottom')
# Average voltage A
A_indices = np.where(np.abs(self.x - contact.x1) < contact.width)[0]
A_voltage = self.u[A_indices, y].mean()
# Average voltage A
B_indices = np.where(np.abs(self.x - contact.x2) < contact.width)[0]
B_voltage = self.u[B_indices, y].mean()
# voltage difference
v = A_voltage - B_voltage
# length between contacts
dx = np.abs(contact.x1 - contact.x2)
# return apparent resistivity
return 25812 * v * self.Ly / dx
def measure_all_contacts_voltageonly(self):
# Args: none
# Returns: array; resistivity measurement of all contacts
if np.all(self.u==0):
raise Exception('System has not been solved')
result = []
for contact in self.contacts:
result.append(self.measure_contact_voltageonly(contact))
return result
def measure_contact(self, contact, sxx, sxy):
'''
Voltage is averaged across voltage tap
This method checks the current and outputs resistivity.
Args:
contact : contact instance
sxx : longitudinal
sxy : hall. sxy/sxx should match self.lmbda
Returns: measured resistivity
'''
if np.all(self.u==0):
raise Exception('System has not been solved')
if contact.side=='top':
ya = self.Ny-1
yb = self.Ny-1
elif contact.side=='bottom':
ya = 0
yb = 0
elif contact.side=='hall':
ya = 0
yb = self.Ny-1
else:
raise Exception('Side must be top or bottom')
# Average voltage A
A_indices = np.where(np.abs(self.x - contact.x1) < contact.width)[0]
A_voltage = self.u[A_indices, ya].mean()
# Average voltage B
B_indices = np.where(np.abs(self.x - contact.x2) < contact.width)[0]
B_voltage = self.u[B_indices, yb].mean()
# voltage difference
v = A_voltage - B_voltage
# length between contacts
dx = np.abs(contact.x1 - contact.x2)
i = self.measure_current(sxx, sxy)
# return apparent resistivity
if contact.side=='hall':
return v / i
else:
return v / i * self.Ly / dx
def measure_all_contacts(self, sxx, sxy):
# Args: none
# Returns: array; resistivity measurement of all contacts
if np.all(self.u==0):
raise Exception('System has not been solved')
result = []
for contact in self.contacts:
result.append(self.measure_contact(contact, sxx, sxy))
return result
def measure_current(self, sxx, sxy):
'''
ARGS : sxx and sxy : longitudinal and Hall conductivity. units e2/h
Returns : current moving through device
'''
# choose place to measure: halfway across Hallbar
ind_x = int(self.Nx/2)
# calculate electric field using E = -\nabla V
# x electric field, using second order central finite difference
E_x = 0.5 * (self.u[ind_x - 1, :] - self.u[ind_x + 1, :]) / self.dx
# y electric field, need forward/backward differences for edges
Dy_1d, D2y_1d = Diff_mat_1D(self.Ny)
E_y = - 0.5 * Dy_1d.dot(self.u[ind_x, :]) / self.dy
# calculate x current using j = sigma E; integrate and convert to SI units
current = np.sum(sxx * E_x + sxy * E_y) * self.dy / 25812
return current
class contact():
"""The class for a voltage contact
Args:
x1 : coordinate location of V_A
x2 : coordinate location of V_B
side ('top', 'bottom', or 'hall') : which side of the Hall bar to measure
width : width of voltage tap in microns
"""
def __init__(self, x1, x2, side='top', width=6):
self.x1 = x1
self.x2 = x2
self.side = side
self.width = width
class nonlocal_hb():
"""The class for nonlocal measurements
Contacts are on the bottom edge of the device
Args:
Lx : length in x direction
Ly : length in y direction
Nx : number of points in grid along x
Ny : number of points in grid along y
settings : positions of contacts
"""
def __init__(self, Lx, Ly, Nx = 301, Ny = 201, settings = {}):
# Initiate with no contacts
self.contacts = []
# Define coordinate variables
self.Nx = Nx
self.Ny = Ny
self.Lx = Lx
self.Ly = Ly
self.x = np.linspace(0,self.Lx,self.Nx)
self.y = np.linspace(0,self.Ly,self.Ny)
self.dx = self.x[1] - self.x[0] # grid spacing along x direction
self.dy = self.y[1] - self.y[0] # grid spacing along y direction
self.X,self.Y = np.meshgrid(self.x,self.y) # 2D meshgrid
# 1D indexing
self.Xu = self.X.ravel() # Unravel 2D meshgrid to 1D array
self.Yu = self.Y.ravel()
# Nonlocal contacts
self.source_x1 = settings.get("source_x1", Lx/4)
self.source_x2 = settings.get("source_x2", Lx/3)
self.drain_x1 = settings.get("drain_x1", 2*Lx/3)
self.drain_x2 = settings.get("drain_x2", 3*Lx/4)
# Search for boundary indices
start_time = time.time()
self.ind_unravel_L = np.squeeze(np.where(self.Xu==self.x[0])) # Left boundary
self.ind_unravel_R = np.squeeze(np.where(self.Xu==self.x[self.Nx-1])) # Right boundary
self.ind_unravel_B = np.squeeze(np.where(self.Yu==self.y[0])) # Bottom boundary
self.ind_unravel_T = np.squeeze(np.where(self.Yu==self.y[self.Ny-1])) # Top boundary
self.ind_boundary_unravel = np.squeeze(np.where((self.Xu==self.x[0]) | (self.Xu==self.x[self.Nx-1]) | (self.Yu==self.y[0]) | (self.Yu==self.y[self.Ny-1]))) # outer boundaries 1D unravel indices
self.ind_boundary = np.where((self.X==self.x[0]) | (self.X==self.x[self.Nx-1]) | (self.Y==self.y[0]) | (self.Y==self.y[self.Ny-1])) # outer boundary
self.ind_unravel_source = np.squeeze(np.where( (self.Yu==self.y[0]) & (self.Xu >= self.source_x1) & (self.Xu <= self.source_x2) )) # Source
self.ind_unravel_drain = np.squeeze(np.where( (self.Yu==self.y[0]) & (self.Xu >= self.drain_x1) & (self.Xu <= self.drain_x2) )) # Drain
print("Boundary search time = %1.4s" % (time.time()-start_time))
# Load finite difference matrix operators
self.Dx_2d, self.Dy_2d, self.D2x_2d, self.D2y_2d = Diff_mat_2D(self.Nx,self.Ny)
# Initiate empty solution matrix
self.u = 0
def solve(self, lmbda):
''' Constructs matrix problem and solves Poisson equation
# Args:
lmbda : sigma_xy / sigma_xx. Must be finite
# Returns:
self.u : electric potential
'''
self.lmbda = lmbda
# Construct system matrix without boundary conditions
start_time = time.time()
I_sp = sp.eye(self.Nx*self.Ny).tocsr()
L_sys = self.D2x_2d/self.dx**2 + self.D2y_2d/self.dy**2
# Boundary operators
BD = I_sp # Dirichlet boundary operator
BNx = self.Dx_2d / (2 * self.dx) # Neumann boundary operator for x component
BNy = self.Dy_2d / (2 * self.dy) # Neumann boundary operator for y component
# CURRENT THROUGH TOP/BOTTOM EDGES
L_sys[self.ind_unravel_T,:] = BNy[self.ind_unravel_T,:] - lmbda * BNx[self.ind_unravel_T,:] # Boundaries at the top layer
L_sys[self.ind_unravel_B,:] = BNy[self.ind_unravel_B,:] - lmbda * BNx[self.ind_unravel_B,:] # Boundaries at the bottom layer
# CURRENT THROUGH LEFT/RIGHT EDGES
L_sys[self.ind_unravel_L,:] = BNx[self.ind_unravel_L,:] + lmbda * BNy[self.ind_unravel_L,:]
L_sys[self.ind_unravel_R,:] = BNx[self.ind_unravel_R,:] + lmbda * BNy[self.ind_unravel_R,:]
# REPLACE WITH DIRICHLET BOUNDARY CONDITIONS FOR SOURCE/DRAIN
L_sys[self.ind_unravel_source,:] = BD[self.ind_unravel_source,:]
L_sys[self.ind_unravel_drain,:] = BD[self.ind_unravel_drain,:]
# Source function (right hand side vector)
g = np.zeros(self.Nx*self.Ny)
# No current boundary conditions
g[self.ind_unravel_L] = 0
g[self.ind_unravel_R] = 0
g[self.ind_unravel_T] = 0
g[self.ind_unravel_B] = 0
# Replace source with potential
g[self.ind_unravel_source] = 1
print("System matrix and right hand vector computation time = %1.6s" % (time.time()-start_time))
start_time = time.time()
self.u = spsolve(L_sys,g).reshape(self.Ny,self.Nx).T
print("spsolve() time = %1.6s" % (time.time()-start_time))
def voltage_measurement(self, x1, x2, side='top'):
# Args: x1 : point of V_A
# x2 : point of V_B
# side ('top' or 'bottom') : which side of Hall bar to measure
# Returns: V_A - V_B
if np.all(self.u==0):
raise Exception('System has not been solved')
if x1 > self.Lx or x1 < 0 or x2 > self.Lx or x2 < 0:
raise Exception('Points out of bounds')
if side=='top':
y = self.Ny-1
elif side=='bottom':
y = 0
else:
raise Exception('Side must be top or bottom')
# Find nearest index value to input coordinates
xa = np.searchsorted(self.x, x1, side='left')
xb = np.searchsorted(self.x, x2, side='left')
return self.u[xa, y] - self.u[xb, y]
def plot_potential(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
fig = plt.figure(figsize = [8,5])
# plt.contour(self.x,self.y,self.u.T,41,cmap = 'viridis', vmin=0, vmax=1)
plt.pcolormesh(self.X, self.Y, self.u.T, cmap='inferno', vmin=0, vmax=1)
cbar = plt.colorbar(ticks = np.arange(0, 1.01, 0.2), label = r'$\phi / \phi_s$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'y ($\mu$m)');
plt.show()
class corbino():
"""The class for a Corbino disk
Args:
ro : outer radius
ri : inner radius
Nx : number of points in grid along x
Ny : number of points in grid along y
"""
def __init__(self, ro, ri, Nx = 301, Ny = 201):
# Initiate with no contacts
self.contacts = []
# Define coordinate variables
self.Nx = Nx
self.Ny = Ny
self.ro = ro
self.ri = ri
self.x = np.linspace(-self.ro, self.ro, self.Nx)
self.y = np.linspace(-self.ro, self.ro, self.Ny)
self.dx = self.x[1] - self.x[0] # grid spacing along x direction
self.dy = self.y[1] - self.y[0] # grid spacing along y direction
self.X,self.Y = np.meshgrid(self.x,self.y) # 2D meshgrid
# 1D indexing
self.Xu = self.X.ravel() # Unravel 2D meshgrid to 1D array
self.Yu = self.Y.ravel()
# Search for boundary indices
start_time = time.time()
self.ind_unravel_outer = np.squeeze(np.where(self.Xu**2 + self.Yu**2 >= self.ro**2)) # outer boundary
self.ind_unravel_inner = np.squeeze(np.where(self.Xu**2 + self.Yu**2 <= self.ri**2)) # inner boundary
self.ind_boundary_unravel = np.squeeze(np.where((self.Xu**2 + self.Yu**2 >= self.ro**2) | (self.Xu**2 + self.Yu**2 <= self.ri**2))) # boundary 1D unravel indices
self.ind_boundary = np.where((self.Xu**2 + self.Yu**2 >= self.ro**2) | (self.Xu**2 + self.Yu**2 <= self.ri**2)) # boundary
print("Boundary search time = %1.4s" % (time.time()-start_time))
# Load finite difference matrix operators
self.Dx_2d, self.Dy_2d, self.D2x_2d, self.D2y_2d = Diff_mat_2D(self.Nx,self.Ny)
# Initiate empty solution matrix
self.u = 0
def solve(self, lmbda):
# constructs matrix problem and solves Poisson equation
# Args: lmbda : sigma_xy / sigma_xx. Must be finite
# Returns: self.u : electric potential
self.lmbda = lmbda
# Construct system matrix without boundary conditions
start_time = time.time()
I_sp = sp.eye(self.Nx*self.Ny).tocsr()
L_sys = self.D2x_2d/self.dx**2 + self.D2y_2d/self.dy**2
# Boundary operators
BD = I_sp # Dirichlet boundary operator
# DIRICHLET BOUNDARY CONDITIONS FOR CONTACTS
L_sys[self.ind_boundary_unravel,:] = BD[self.ind_boundary_unravel,:]
# Source function (right hand side vector)
g = np.zeros(self.Nx*self.Ny)
# Insert boundary values at the boundary points
g[self.ind_unravel_outer] = 1 # Dirichlet boundary condition at source
g[self.ind_unravel_inner] = 0 # Dirichlet boundary condition at drain
print("System matrix and right hand vector computation time = %1.6s" % (time.time()-start_time))
start_time = time.time()
self.u = spsolve(L_sys,g).reshape(self.Ny,self.Nx).T
print("spsolve() time = %1.6s" % (time.time()-start_time))
def plot_potential(self):
if np.all(self.u==0):
raise Exception('System has not been solved')
fig = plt.figure(figsize = [8,5])
plt.contourf(self.x,self.y,self.u.T,41,cmap = 'inferno')
cbar = plt.colorbar(ticks = np.arange(0, 1.01, 0.2), label = r'$\phi / \phi_s$')
plt.xlabel(r'x ($\mu$m)');
plt.ylabel(r'y ($\mu$m)');
plt.show() | 33.807207 | 196 | 0.666525 | 18,058 | 0.962426 | 0 | 0 | 0 | 0 | 0 | 0 | 7,292 | 0.388637 |
ea94f186e674605c91c929e6c34cf14e026507ea | 2,078 | py | Python | tools/python/boutiques/util/utils.py | glatard/boutiques | a75de1b2fcf96e0eb196974ee2376b014972e847 | [
"MIT"
] | 2 | 2016-11-01T15:08:37.000Z | 2018-09-07T20:56:43.000Z | tools/python/boutiques/util/utils.py | glatard/boutiques | a75de1b2fcf96e0eb196974ee2376b014972e847 | [
"MIT"
] | null | null | null | tools/python/boutiques/util/utils.py | glatard/boutiques | a75de1b2fcf96e0eb196974ee2376b014972e847 | [
"MIT"
] | 1 | 2018-03-20T15:51:00.000Z | 2018-03-20T15:51:00.000Z | import os
import simplejson as json
from boutiques.logger import raise_error
# Parses absolute path into filename
def extractFileName(path):
# Helps OS path handle case where "/" is at the end of path
if path is None:
return None
elif path[:-1] == '/':
return os.path.basename(path[:-1]) + "/"
else:
return os.path.basename(path)
class LoadError(Exception):
pass
# Helper function that loads the JSON object coming from either a string,
# a local file or a file pulled from Zenodo
def loadJson(userInput, verbose=False):
# Check for JSON file (local or from Zenodo)
json_file = None
if os.path.isfile(userInput):
json_file = userInput
elif userInput.split(".")[0].lower() == "zenodo":
from boutiques.puller import Puller
puller = Puller([userInput], verbose)
json_file = puller.pull()[0]
if json_file is not None:
with open(json_file, 'r') as f:
return json.loads(f.read())
# JSON file not found, so try to parse JSON object
e = ("Cannot parse input {}: file not found, "
"invalid Zenodo ID, or invalid JSON object").format(userInput)
if userInput.isdigit():
raise_error(LoadError, e)
try:
return json.loads(userInput)
except ValueError:
raise_error(LoadError, e)
# Helper function that takes a conditional path template key as input,
# and outputs a formatted string that isolates variables/values from
# operators, parentheses, and python keywords with a space.
# ex: "(opt1>2)" becomes " ( opt1 > 2 ) "
# "(opt1<=10.1)" becomes " ( opt1 <= 10.1 ) "
def conditionalExpFormat(s):
cleanedExpression = ""
idx = 0
while idx < len(s):
c = s[idx]
if c in ['=', '!', '<', '>']:
cleanedExpression += " {0}{1}".format(
c, "=" if s[idx+1] == "=" else " ")
idx += 1
elif c in ['(', ')']:
cleanedExpression += " {0} ".format(c)
else:
cleanedExpression += c
idx += 1
return cleanedExpression
| 31.484848 | 73 | 0.606833 | 36 | 0.017324 | 0 | 0 | 0 | 0 | 0 | 0 | 737 | 0.354668 |
ea95124323bddaa7f489eebee0d325f17c311f5a | 214 | py | Python | python/exercism/word_count.py | vesche/snippets | 7a9d598df99c26c4e0c63669f9f95a94eeed0d08 | [
"Unlicense"
] | 7 | 2016-01-03T19:42:07.000Z | 2018-10-23T14:03:12.000Z | python/exercism/word_count.py | vesche/snippets | 7a9d598df99c26c4e0c63669f9f95a94eeed0d08 | [
"Unlicense"
] | null | null | null | python/exercism/word_count.py | vesche/snippets | 7a9d598df99c26c4e0c63669f9f95a94eeed0d08 | [
"Unlicense"
] | 1 | 2018-03-09T08:52:01.000Z | 2018-03-09T08:52:01.000Z | import re
def word_count(s):
d = {}
s = re.sub('[^0-9a-zA-Z]+', ' ', s.lower()).split()
for word in s:
if word in d:
d[word] += 1
else:
d[word] = 1
return d
| 17.833333 | 55 | 0.411215 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.084112 |
ea966da9c7d51e718e4e2994743bb27ac7739401 | 1,913 | py | Python | src/core/forms.py | artinnok/billing | 11a100462e898bf059f89fe77ebb405c2da2806f | [
"MIT"
] | null | null | null | src/core/forms.py | artinnok/billing | 11a100462e898bf059f89fe77ebb405c2da2806f | [
"MIT"
] | null | null | null | src/core/forms.py | artinnok/billing | 11a100462e898bf059f89fe77ebb405c2da2806f | [
"MIT"
] | null | null | null | from django import forms
from core.models import Profile
def get_sender_choices():
return list(Profile.objects.all().values_list('pk', 'inn'))
class TransactionForm(forms.Form):
sender = forms.ChoiceField(
label='Отправитель',
help_text='Выберите ИНН отправителя',
choices=get_sender_choices,
)
receiver_list = forms.CharField(
label='Список получателей',
help_text='Укажите ИНН получателей через запятую',
)
amount = forms.DecimalField(
max_digits=12,
decimal_places=2,
label='Сумма перевода',
help_text='С точностью до 2 знаков',
)
def clean_receiver_list(self):
try:
receiver_list = self.cleaned_data['receiver_list'].split(',')
receiver_list = set([item.strip() for item in receiver_list if item])
rel_receiver_list = set(Profile.objects.filter(inn__in=receiver_list).values_list('inn', flat=True))
subtract = receiver_list - rel_receiver_list
if subtract == set():
return receiver_list
raise forms.ValidationError(
message='Users with this INN {} not found'.format(sorted(list(subtract))),
code='some_users_not_found',
)
except KeyError:
pass
def clean(self):
try:
profile = Profile.objects.get(pk=self.cleaned_data['sender'])
if profile.balance < self.cleaned_data['amount']:
raise forms.ValidationError(
message='Not enough funds',
code='not_enough_funds',
)
return self.cleaned_data
except Profile.DoesNotExist:
raise forms.ValidationError(
message='Profile not found',
code='profile_not_found',
)
except KeyError:
pass
| 28.132353 | 112 | 0.588082 | 1,874 | 0.924519 | 0 | 0 | 0 | 0 | 0 | 0 | 431 | 0.21263 |
ea96d847da2a30f013707da8c63074bc8c9579c8 | 19,736 | py | Python | fabfile.py | CCMS-UCSD/ProteoSAFe_Workflow_Deployment | 2ce1e68d0c153b76316ccd2b6956d1faa1ad6217 | [
"CC0-1.0"
] | 1 | 2021-04-12T18:40:34.000Z | 2021-04-12T18:40:34.000Z | fabfile.py | CCMS-UCSD/ProteoSAFe_Workflow_Deployment | 2ce1e68d0c153b76316ccd2b6956d1faa1ad6217 | [
"CC0-1.0"
] | 28 | 2019-09-05T20:41:22.000Z | 2021-06-23T18:10:06.000Z | fabfile.py | CCMS-UCSD/CCMSDeployments | 2ce1e68d0c153b76316ccd2b6956d1faa1ad6217 | [
"CC0-1.0"
] | null | null | null | from fabric2 import Connection
from fabric2 import task
from fabric2 import config
import os
import time
from xml.etree import ElementTree as ET
import uuid
import glob
import json
import urllib.parse
import io
workflow_components = ['input.xml', 'binding.xml', 'flow.xml', 'result.xml', 'tool.xml']
@task
def release_text(c, workflow_name):
base_dir = '.'
tools = read_all_tools('..')
dependencies = output_tool_dependencies(workflow_name, base_dir)
makefile = read_makefile(base_dir)
readme = 'README.md'
previous_readme_lines = []
if os.path.isfile(readme):
with open(readme) as f:
for previous_readme_line in f:
previous_readme_lines.append(previous_readme_line)
if "CCMS_DEPLOYMENTS_HEADER_BREAK_ELEMENT_CAUTION_ANYTHING_ABOVE_WILL_BE_AUTOGENERATED" in previous_readme_line:
previous_readme_lines = []
version = makefile["WORKFLOW_VERSION"]
name = makefile.get("WORKFLOW_LABEL")
if name:
name = name[1:-1]
else:
name = workflow_name
description = makefile.get("WORKFLOW_DESCRIPTION")
update_text = "Last updated: {}.".format(makefile['LAST_UPDATED'])
dependency_text = []
seen = {}
for (dependency, dependency_version) in dependencies:
status = "N/V"
if dependency not in seen or (dependency in seen and seen[dependency] != dependency_version):
if dependency in tools:
local_version, workflow = tools[dependency]
if dependency_version == local_version:
status = "({})".format(dependency_version)
else:
status = "({}, latest is {})".format(dependency_version, local_version)
dependency_text.append("* {} {}".format(dependency, status))
else:
dependency_text.append("* {} (untracked)".format(dependency))
seen[dependency] = dependency_version
with open(readme, 'w') as w:
w.write('## {}\n\n'.format(name))
w.write('#### Version: {}\n\n'.format(version))
if description:
w.write('#### Description: \n{}\n\n'.format(description[1:-1]))
if len(dependency_text) > 0:
w.write('#### Dependencies: \n{}\n\n'.format("\n".join(dependency_text)))
w.write('_{}_\n\n'.format(update_text))
w.write('<data id=CCMS_DEPLOYMENTS_HEADER_BREAK_ELEMENT_CAUTION_ANYTHING_ABOVE_WILL_BE_AUTOGENERATED />\n\n')
for previous_readme_line in previous_readme_lines:
w.write(previous_readme_line)
@task
def read_branch(c, workflow_name):
branch_name = None
with io.StringIO() as f:
c.local('cd {} && git branch | grep \*'.format(workflow_name), out_stream = f)
branch = f.getvalue().replace('\n','').replace('* ','')
if not ('HEAD detached' in branch or 'master' in branch or 'main' in branch):
branch_name = branch
return branch_name
def read_makefile(workflow_name):
params = {}
makefile_location = os.path.join(workflow_name,'Makefile')
with open(makefile_location) as f:
for l in f:
split_line = l.rstrip().split('=')
if len(split_line) >= 2:
params[split_line[0]] = '='.join(split_line[1:])
params['LAST_UPDATED'] = time.ctime(os.path.getmtime(makefile_location))
return params
@task
def update_workflow_from_makefile(c, workflow_name, subcomponents):
params = read_makefile(workflow_name)
update_all(c, params["WORKFLOW_VERSION"], params.get("WORKFLOW_NAME"), params.get("TOOL_FOLDER_NAME"), params.get("WORKLFLOW_LABEL"), params.get("WORKLFLOW_DESCRIPTION"), workflow_name, subcomponents=subcomponents)
@task
def update_all(c, workflow_version, workflow_name=None, tool_name=None, workflow_label=None, workflow_description=None, base_dir=".", subcomponents=None, force_update_string='yes'):
production = "production" in c
if workflow_version == None:
exit("A workflow cannot be deployed without a version.")
branch_name = read_branch(c, base_dir)
if branch_name and not production:
workflow_version = '{}+{}'.format(workflow_version, branch_name.replace(' ','_'))
if workflow_name:
update_workflow_xml(c, workflow_name, tool_name, workflow_version, workflow_label, workflow_description, base_dir=base_dir, subcomponents=subcomponents, force_update_string=force_update_string)
if tool_name:
update_tools(c, tool_name, workflow_version, base_dir)
if workflow_name:
server_url_base = "https://{}/ProteoSAFe/index.jsp?params=".format(c.host)
workflow_url = server_url_base + urllib.parse.quote(json.dumps({"workflow":workflow_name.upper(), "workflow_version":workflow_version}))
print("SUCCESS:\n\n{} updated at with version:\n\n{}\n\n".format(workflow_name, workflow_url))
if force_update_string == 'yes':
server_url_base = "https://{}/ProteoSAFe/index.jsp?params=".format(c.host)
workflow_url = server_url_base + urllib.parse.quote(json.dumps({"workflow":workflow_name.upper()}))
print("And default version :\n\n{}\n\n".format(workflow_url))
@task
def read_workflows_from_yml(c):
workflows_to_deploy = []
if "workflows" not in c:
exit("Deploy all only works if a list of workflows to deploy is specified.")
for workflow in c["workflows"]:
workflow_name = None
subcomponents = workflow_components
if isinstance(workflow,dict):
for workflow, xml in workflow.items():
workflow_name = workflow
subcomponents = xml
else:
workflow_name = workflow
workflows_to_deploy.append((workflow_name, subcomponents))
return workflows_to_deploy
def read_all_tools(base_dir = '.'):
all_tools = {}
all_submodules = glob.glob(os.path.join(base_dir, '*'))
for submodule in all_submodules:
if 'CCMSDeployments' not in submodule and os.path.isdir(submodule):
try:
submodule_params = read_makefile(submodule)
tool_name = submodule_params.get("TOOL_FOLDER_NAME")
version = submodule_params["WORKFLOW_VERSION"]
if tool_name:
all_tools[tool_name] = (version, submodule)
except:
pass
return all_tools
@task
def deploy_all(c):
for workflow, subcomponents in read_workflows_from_yml(c):
update_workflow_from_makefile(c, workflow, subcomponents)
@task
def read_dependencies(c, workflow_name, rewrite_string = 'no', base_dir = '.'):
tools = read_all_tools('..')
rewrite = rewrite_string == 'yes'
output_updates(c, workflow_name, tool_name = None, base_dir = base_dir, tools = tools, seen = {}, rewrite = rewrite)
print('')
@task
def is_on_server(c, tool_name, tool_version):
tool_path = os.path.join(c["paths"]["tools"],tool_name, tool_version)
production = "production" in c
production_user = c["production"]["workflow_user"] if production else None
on_server = False
if production_user:
on_server = c.sudo("test -e {}".format(tool_path), user=production_user, pty=True)
else:
on_server = c.run("test -e {}".format(tool_path))
return not on_server.return_code
def output_updates(c, workflow_name = None, tool_name = None, base_dir = '.', tools = None, seen = {}, rewrite = False):
updates = {}
if workflow_name:
dependencies = output_tool_dependencies(workflow_name, base_dir)
outputs = []
for (dependency, version) in dependencies:
status = "N/V"
if dependency not in seen or (dependency in seen and seen[dependency] != version):
update = False
deployed = False
if dependency in tools:
local_version, workflow = tools[dependency]
if version == local_version:
status = "{}".format(version)
else:
update = True
updates[dependency] = local_version
status = "{}->{}".format(version, local_version)
if version and is_on_server(c, dependency, local_version):
deployed = True
deployed_str = " (deployed)" if deployed else " (needs deployment)"
# if rewrite:
# if not deployed:
# update_workflow_from_makefile(c, workflow, workflow_components, True)
# status += " (updated)"
# else:
# status += " (already deployed)"
# else:
# status += deployed_str
status += deployed_str
outputs.append((update or deployed,"\t{} {}".format(dependency, status)))
else:
outputs.append((update or deployed,"\t{} untracked".format(dependency)))
seen[dependency] = version
if not rewrite:
print('\nDepenencies for {}:'.format(workflow_name))
for output in outputs:
print(output[1])
else:
print('\nUpdated depenencies for {}:'.format(workflow_name))
for output in outputs:
if output[0]:
print(output[1])
rewrite_tool_w_new_dependencies(workflow_name, updates, base_dir = base_dir)
def output_tool_dependencies(workflow_name, base_dir = '.'):
dependencies = []
local = os.path.join(base_dir, workflow_name, 'tool.xml')
tree = ET.parse(local)
root = tree.getroot()
for path in root.findall('pathSet'):
if not '$base' in path.attrib['base']:
split_full_path = path.attrib['base'].split('/')
tool_name = split_full_path[0]
if len(split_full_path) >= 2:
tool_name = '/'.join(split_full_path[0:-1])
tool_version = split_full_path[-1]
else:
tool_version = "NV"
dependencies.append((tool_name, tool_version))
return dependencies
def rewrite_tool_w_new_dependencies(workflow_name, updates, rewrite = False, base_dir = '.'):
changes_made = False
dependencies = []
local = os.path.join(base_dir, workflow_name, 'tool.xml')
tree = ET.parse(local)
root = tree.getroot()
for path in root.findall('pathSet'):
if not '$base' in path.get('base'):
split_full_path = path.get('base').split('/')
tool_name = split_full_path[0]
if tool_name in updates and updates[tool_name]:
changes_made = True
if len(split_full_path[2:]) == 0:
path.set('base',os.path.join(tool_name, updates[tool_name]))
else:
path.set('base',os.path.join(tool_name, updates[tool_name], '/'.join(split_full_path[2:])))
if changes_made:
tree.write(local)
@task
def generate_manifest(c):
for workflow, subcomponents in read_workflows_from_yml(c):
params = read_makefile(workflow)
flag = ""
if "WORKFLOW_NAME" not in params:
flag = " (Tool only)"
elif "TOOL_FOLDER_NAME" not in params:
flag = " (Workflow only)"
print('{}{}, version: {}, last updated: {}'.format(workflow,flag,params['WORKFLOW_VERSION'],params['LAST_UPDATED']))
@task
def update_workflow_xml(c, workflow_name, tool_name, workflow_version, workflow_label, workflow_description, base_dir=".", subcomponents=None, force_update_string='yes'):
if not subcomponents:
subcomponents = workflow_components
force_update = force_update_string == 'yes'
production = "production" in c
production_user = c["production"]["workflow_user"] if production else None
local_temp_path = os.path.join("/tmp/{}_{}_{}".format(workflow_name, workflow_version, str(uuid.uuid4())))
c.local("mkdir -p {}".format(local_temp_path))
for component in subcomponents:
rewrite_workflow_component(component, base_dir, workflow_name, tool_name, workflow_version, workflow_label, workflow_description, local_temp_path)
#Performing Workflow Files Validation
try:
validate_workflow_xml(local_temp_path)
except:
print("Validation Failed in Exception")
base_workflow_path = os.path.join(c["paths"]["workflows"], workflow_name, "versions")
versioned_workflow_path = os.path.join(c["paths"]["workflows"], workflow_name, "versions", workflow_version)
if production_user:
c.sudo("mkdir -p {}".format(base_workflow_path), user=production_user, pty=True)
c.sudo("mkdir -p {}".format(versioned_workflow_path), user=production_user, pty=True)
else:
c.run("mkdir -p {}".format(base_workflow_path))
c.run("mkdir -p {}".format(versioned_workflow_path))
for component in subcomponents:
# print(component)
if force_update:
update_workflow_component(c, local_temp_path, workflow_name, component, production_user=production_user) #Adding to active default version
update_workflow_component(c, local_temp_path, workflow_name, component, workflow_version=workflow_version, production_user=production_user) #Explicitly adding versioned
if not production_user:
c.run("chmod 777 {}".format(versioned_workflow_path))
c.run("chmod -R 777 {}".format(versioned_workflow_path))
for xml_filename in workflow_components:
c.run("chmod 777 {}".format(os.path.join(c["paths"]["workflows"], workflow_name, xml_filename)))
#Uploading the actual tools to the server
@task
def update_tools(c, workflow_name, workflow_version, base_dir="."):
production = "production" in c
production_user = c["production"]["tool_user"] if production else None
final_path = os.path.join(c["paths"]["tools"],workflow_name, workflow_version)
if production_user:
c.sudo("mkdir -p {}".format(final_path), user=production_user, pty=True)
else:
c.run("mkdir -p {}".format(final_path))
local_path = os.path.join(base_dir, 'tools', workflow_name)
update_folder(c, local_path, final_path, production_user=production_user)
if not production_user:
c.run("chmod 777 {}".format(final_path))
c.run("chmod -R 777 {}".format(final_path))
#Utility Functions
def rewrite_workflow_component(component, base_dir, workflow_name, tool_name, workflow_version, workflow_label, workflow_description, local_temp_path):
local = os.path.join(base_dir, workflow_name, component)
temp = os.path.join(local_temp_path,component)
tree = ET.parse(local)
root = tree.getroot()
if component in ['input.xml','result.xml']:
root.set('id', workflow_name)
root.set('version', workflow_version)
if component in ['input.xml']:
for path in root.findall('workflow-id'):
path.text = workflow_name.upper()
for path in root.findall('workflow-label'):
if workflow_label:
path.text = workflow_label
if workflow_description is not None:
description_block = ET.Element("block")
root.insert(0, description_block)
description_block.attrib["label"] = "Workflow Description"
description_row = ET.SubElement(description_block, "row")
description_cell = ET.SubElement(description_row, "cell")
description_label = ET.SubElement(description_cell, "label")
description_label.attrib["prefix"] = "false"
description_content = ET.SubElement(description_label, "content")
description_content.text = '<div style="5px;padding:1px; border:2px;margin-left:8%;margin-right:8%;text-align:left">\
<br><strong>{}</strong> \
<hr style="margin-top:5px;margin-bottom:5px"> \
{} \
<hr style="margin-top:5px;margin-bottom:5px"> \
<small>Workflow version {} </small> \
</div>'.format(workflow_label if workflow_label else workflow_name.upper(), workflow_description, workflow_version)
elif component in ['flow.xml']:
root.set('name', workflow_name)
elif component in ['tool.xml']:
for path in root.findall('pathSet'):
if '$base' in path.get('base'):
if tool_name:
path.set('base',path.get('base').replace('$base',os.path.join(tool_name,workflow_version)))
else:
exit("Cannot rewrite tool.xml without specifying tool name.")
tree.write(temp)
def validate_workflow_xml(local_temp_path):
import workflow_validator
flow_path = os.path.join(local_temp_path, "flow.xml")
binding_path = os.path.join(local_temp_path, "binding.xml")
tool_path = os.path.join(local_temp_path, "tool.xml")
workflow_obj = workflow_validator.Workflow(flow_path, binding_path, tool_path)
workflow_obj.validate()
print(workflow_obj.printerrors())
#TODO: Validate that the xml is also a valid workflow
def update_workflow_component(c, local_temp_path, workflow_filename, component, workflow_version=None, production_user=None):
local = os.path.join(local_temp_path,component)
if workflow_version:
server = os.path.join(c["paths"]["workflows"], workflow_filename, "versions", workflow_version, component)
else:
server = os.path.join(c["paths"]["workflows"], workflow_filename, component)
update_file(c, local, server, production_user=production_user)
#Update File
def update_file(c, local_path, final_path, production_user = None):
if production_user:
remote_temp_path = os.path.join("/tmp/{}_{}".format(local_path.replace("/", "_"), str(uuid.uuid4())))
c.put(local_path, remote_temp_path, preserve_mode=True)
c.sudo('cp {} {}'.format(remote_temp_path, final_path), user=production_user, pty=True)
if os.path.split(os.path.normpath(remote_temp_path))[0] == '/tmp':
c.run('rm {}'.format(remote_temp_path))
else:
try:
c.put(local_path, final_path, preserve_mode=True)
except:
c.put(local_path, final_path, preserve_mode=False)
#TODO: update this to work with rsync
def update_folder(c, local_path, final_path, production_user = None):
#Tar up local folder and upload to temporary space on server and untar
local_temp_path = os.path.join("/tmp/{}_{}.tar".format(local_path.replace("/", "_"), str(uuid.uuid4())))
cmd = "tar -C {} -chf {} .".format(local_path, local_temp_path)
# print(cmd)
os.system(cmd)
remote_temp_tar_path = os.path.join("/tmp/{}_{}.tar".format(local_path.replace("/", "_"), str(uuid.uuid4())))
c.put(local_temp_path, remote_temp_tar_path, preserve_mode=True)
remote_temp_path = os.path.join("/tmp/{}_{}".format(local_path.replace("/", "_"), str(uuid.uuid4())))
c.run("mkdir {}".format(remote_temp_path))
c.run("tar -C {} -xf {}".format(remote_temp_path, remote_temp_tar_path))
if production_user:
c.sudo('rsync -rlptD {}/ {}'.format(remote_temp_path, final_path), user=production_user, pty=True)
else:
c.run('rsync -rlptD {}/ {}'.format(remote_temp_path, final_path))
if os.path.split(os.path.normpath(remote_temp_path))[0] == '/tmp':
c.run('rm -rf {}'.format(remote_temp_path))
if os.path.split(os.path.normpath(remote_temp_tar_path))[0] == '/tmp':
c.run('rm {}'.format(remote_temp_tar_path))
| 42.443011 | 218 | 0.641214 | 0 | 0 | 0 | 0 | 9,339 | 0.473196 | 0 | 0 | 3,575 | 0.181141 |
ea983ff474c1e9c14d38adcbbefd09dbeedc005c | 5,529 | py | Python | tests/test_volume.py | mathieuboudreau/electropy | 586f93b076448f39255727ff36afe50edb6255bc | [
"MIT"
] | 5 | 2019-04-06T02:40:34.000Z | 2020-09-09T20:31:56.000Z | tests/test_volume.py | mathieuboudreau/electropy | 586f93b076448f39255727ff36afe50edb6255bc | [
"MIT"
] | 6 | 2019-04-06T02:40:36.000Z | 2021-03-03T17:46:07.000Z | tests/test_volume.py | mathieuboudreau/electropy | 586f93b076448f39255727ff36afe50edb6255bc | [
"MIT"
] | 1 | 2020-04-10T19:22:17.000Z | 2020-04-10T19:22:17.000Z | import unittest
from electropy.charge import Charge
import numpy as np
from electropy import volume
class VolumeTest(unittest.TestCase):
def setUp(self):
self.position_1 = [0, 0, 0]
self.position_2 = [-2, 4, 1]
self.charge = 7e-9
def tearDown(self):
pass
# Potential function volume tests
def test_potential_volume_at_point_equal_class_potential(self):
charge = Charge(self.position_1, self.charge)
potential_volume = volume.potential(
[charge],
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
)
# Point = [-6, -6, -6]
potential_at_point = potential_volume[4][4][4]
expected_potential = charge.potential([-6, -6, -6])
np.testing.assert_equal(potential_at_point, expected_potential)
def test_two_charge_potential_volume_eq_sum_of_class_potential(self):
charges = [Charge(self.position_1, self.charge)]
charges.append(Charge(self.position_2, -self.charge))
potential_volume = volume.potential(
charges,
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
)
# Point = [-6, -5, -3]
potential_at_point = potential_volume[4][5][7]
expected_potential = np.add(
charges[0].potential([-6, -5, -3]),
charges[1].potential([-6, -5, -3]),
)
np.testing.assert_equal(potential_at_point, expected_potential)
# Field function volume tests
def test_field_volume_at_point_equal_class_field(self):
charge = Charge(self.position_1, self.charge)
field_volume = volume.field(
[charge],
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
)
# Point = [-10, -6, -3]
field_at_point = field_volume[0][4][7]
expected_field = charge.field([-10, -6, -3])
np.testing.assert_equal(field_at_point, expected_field)
def test_two_charge_field_volume_eq_sum_of_class_field(self):
charges = [Charge(self.position_1, self.charge)]
charges.append(Charge(self.position_2, -self.charge))
field_volume = volume.field(
charges,
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
)
# Point = [-6, -5, -3]
field_at_point = field_volume[4][5][7]
expected_field = np.add(
charges[0].field([-6, -5, -3]), charges[1].field([-6, -5, -3])
)
np.testing.assert_equal(field_at_point, expected_field)
def test_charge_field_volume_x_components_eq_sum_of_class_field_x(self):
charges = [Charge(self.position_1, self.charge)]
charges.append(Charge(self.position_2, -self.charge))
field_volume = volume.field(
charges,
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
component="x",
)
# Point = [-6, -5, -3]
field_at_point = field_volume[4][5][7]
expected_field = np.add(
charges[0].field([-6, -5, -3], component="x"),
charges[1].field([-6, -5, -3], component="x"),
)
np.testing.assert_equal(field_at_point, expected_field)
def test_charge_field_volume_y_components_eq_sum_of_class_field_y(self):
charges = [Charge(self.position_1, self.charge)]
charges.append(Charge(self.position_2, -self.charge))
field_volume = volume.field(
charges,
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
component="y",
)
# Point = [-6, -5, -3]
field_at_point = field_volume[4][5][7]
expected_field = np.add(
charges[0].field([-6, -5, -3], component="y"),
charges[1].field([-6, -5, -3], component="y"),
)
np.testing.assert_equal(field_at_point, expected_field)
def test_charge_field_volume_z_components_eq_sum_of_class_field_z(self):
charges = [Charge(self.position_1, self.charge)]
charges.append(Charge(self.position_2, -self.charge))
field_volume = volume.field(
charges,
x_range=[-10, 10],
y_range=[-10, 10],
z_range=[-10, 10],
h=1,
component="z",
)
# Point = [-6, -5, -3]
field_at_point = field_volume[4][5][7]
expected_field = np.add(
charges[0].field([-6, -5, -3], component="z"),
charges[1].field([-6, -5, -3], component="z"),
)
np.testing.assert_equal(field_at_point, expected_field)
def test_field_returns_singleton_dim_for_single_slice(self):
charge = Charge(self.position_1, self.charge)
field_volume = volume.field(
[charge],
x_range=[-10, 10],
y_range=[1, 1],
z_range=[-10, 10],
h=0.1,
)
expected_shape = (201, 1, 201)
actual_shape = field_volume.shape
np.testing.assert_equal(actual_shape, expected_shape)
def test__arange_almost_equals_numpy_arange(self):
actual = volume._arange(-10, 10, 0.1) # Mine is rounder anyways =)
expected = np.arange(-10, 10 + 0.1, 0.1)
np.testing.assert_almost_equal(actual, expected)
| 28.06599 | 76 | 0.558691 | 5,425 | 0.98119 | 0 | 0 | 0 | 0 | 0 | 0 | 272 | 0.049195 |
ea99720b3ead320ff8f07f3efbaae5fb0d1681fb | 94 | py | Python | aiida_icl/__init__.py | chrisjsewell/aiida-cx1scheduler | fd9a1723f65d3858aee678bf035a7b63a03c5885 | [
"MIT"
] | null | null | null | aiida_icl/__init__.py | chrisjsewell/aiida-cx1scheduler | fd9a1723f65d3858aee678bf035a7b63a03c5885 | [
"MIT"
] | null | null | null | aiida_icl/__init__.py | chrisjsewell/aiida-cx1scheduler | fd9a1723f65d3858aee678bf035a7b63a03c5885 | [
"MIT"
] | 2 | 2020-07-01T07:43:13.000Z | 2020-07-07T20:00:02.000Z | """
AiiDA Plugin Template
Adapt this template for your own needs.
"""
__version__ = '0.3.4'
| 11.75 | 39 | 0.691489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 77 | 0.819149 |
ea9a826f91fe349d070b4bf03eccad1b591d9ea9 | 794 | py | Python | opt/resource/test_out.py | cosee-concourse/mysql-resource | 765f9f5eae1026b024b1148c10a5605dad77f1ca | [
"MIT"
] | 2 | 2018-11-15T16:42:14.000Z | 2020-01-30T22:32:04.000Z | opt/resource/test_out.py | cosee-concourse/mysql-resource | 765f9f5eae1026b024b1148c10a5605dad77f1ca | [
"MIT"
] | null | null | null | opt/resource/test_out.py | cosee-concourse/mysql-resource | 765f9f5eae1026b024b1148c10a5605dad77f1ca | [
"MIT"
] | null | null | null | import unittest
from concourse_common import testutil
import out
class TestOut(unittest.TestCase):
def test_invalid_json(self):
testutil.put_stdin(
"""
{
"source": {
"user": "user",
"password": "password",
"host": "hostname"
},
"params": {
}
}
""")
self.assertEqual(out.execute('/'), -1)
def test_params_required_json(self):
testutil.put_stdin(
"""
{
"source": {
"user": "user",
"password": "password",
"host": "hostname"
}
}
""")
self.assertEqual(out.execute('/'), -1)
| 20.358974 | 46 | 0.399244 | 723 | 0.910579 | 0 | 0 | 0 | 0 | 0 | 0 | 441 | 0.555416 |
ea9d55f992bddeb052a30ca5d5fa389dffc4128f | 2,877 | py | Python | auctionCrawler/poxy.py | wd18535470628/PythonCraw | 9be3519da1219ad4ffc0d26cc97ceabcb0a7c06b | [
"Apache-2.0"
] | null | null | null | auctionCrawler/poxy.py | wd18535470628/PythonCraw | 9be3519da1219ad4ffc0d26cc97ceabcb0a7c06b | [
"Apache-2.0"
] | null | null | null | auctionCrawler/poxy.py | wd18535470628/PythonCraw | 9be3519da1219ad4ffc0d26cc97ceabcb0a7c06b | [
"Apache-2.0"
] | null | null | null | #-*- coding=utf-8 -*-
import urllib2, time, datetime
from lxml import etree
import sqlite3,time
class getProxy():
def __init__(self):
self.user_agent = "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)"
self.header = {"User-Agent": self.user_agent}
self.dbname="proxy.db"
self.now = time.strftime("%Y-%m-%d")
def getContent(self, num):
nn_url = "http://www.xicidaili.com/nn/" + str(num)
#国内高匿
req = urllib2.Request(nn_url, headers=self.header)
resp = urllib2.urlopen(req, timeout=10)
content = resp.read()
et = etree.HTML(content)
result_even = et.xpath('//tr[@class=""]')
result_odd = et.xpath('//tr[@class="odd"]')
#因为网页源码中class 分开了奇偶两个class,所以使用lxml最方便的方式就是分开获取。
#刚开始我使用一个方式获取,因而出现很多不对称的情况,估计是网站会经常修改源码,怕被其他爬虫的抓到
#使用上面的方法可以不管网页怎么改,都可以抓到ip 和port
for i in result_even:
t1 = i.xpath("./td/text()")[:2]
print "IP:%s\tPort:%s" % (t1[0], t1[1])
if self.isAlive(t1[0], t1[1]):
self.insert_db(self.now,t1[0],t1[1])
for i in result_odd:
t2 = i.xpath("./td/text()")[:2]
print "IP:%s\tPort:%s" % (t2[0], t2[1])
if self.isAlive(t2[0], t2[1]):
self.insert_db(self.now,t2[0],t2[1])
def insert_db(self,date,ip,port):
dbname=self.dbname
try:
conn=sqlite3.connect(dbname)
except:
print "Error to open database%" %self.dbname
create_tb='''
CREATE TABLE IF NOT EXISTS PROXY
(DATE TEXT,
IP TEXT,
PORT TEXT
);
'''
conn.execute(create_tb)
insert_db_cmd='''
INSERT INTO PROXY (DATE,IP,PORT) VALUES ('%s','%s','%s');
''' %(date,ip,port)
conn.execute(insert_db_cmd)
conn.commit()
conn.close()
def loop(self,page):
for i in range(1,page):
self.getContent(i)
#查看爬到的代理IP是否还能用
def isAlive(self,ip,port):
proxy={'http':ip+':'+port}
print proxy
#使用这个方式是全局方法。
proxy_support=urllib2.ProxyHandler(proxy)
opener=urllib2.build_opener(proxy_support)
urllib2.install_opener(opener)
#使用代理访问腾讯官网,进行验证代理是否有效
test_url="http://www.qq.com"
req=urllib2.Request(test_url,headers=self.header)
try:
#timeout 设置为10,如果你不能忍受你的代理延时超过10,就修改timeout的数字
resp=urllib2.urlopen(req,timeout=10)
if resp.code==200:
print "work"
return True
else:
print "not work"
return False
except :
print "Not work"
return False
if __name__ == "__main__":
now = datetime.datetime.now()
print "Start at %s" % now
obj=getProxy()
obj.loop(5) | 30.284211 | 91 | 0.54814 | 3,008 | 0.930405 | 0 | 0 | 0 | 0 | 0 | 0 | 1,133 | 0.350448 |
ea9e52a902c07d07faf47b9e8450b5190bcaf693 | 214 | py | Python | Agents/utils/readMonFiles.py | mbay-SAG/cumulocity-thinedge-example | e0fa9a52fab16ad791093e0ab3d4383c653a10fd | [
"Apache-2.0"
] | 1 | 2021-03-12T12:22:46.000Z | 2021-03-12T12:22:46.000Z | Agents/utils/readMonFiles.py | mbay-SAG/cumulocity-thinedge-example | e0fa9a52fab16ad791093e0ab3d4383c653a10fd | [
"Apache-2.0"
] | 2 | 2020-11-20T16:58:35.000Z | 2020-11-25T15:37:06.000Z | Agents/utils/readMonFiles.py | SoftwareAG/cumulocity-thinedge-example | 6311a7f2e3d6515b85af5de24758f799e389aaed | [
"Apache-2.0"
] | null | null | null | import sys
def content(name):
try:
with open('../apama-mqtt-connect/monitors/' + str(name) + '.mon', 'r') as file:
data = file.read()
return data
except:
return []
| 19.454545 | 87 | 0.509346 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 42 | 0.196262 |
ea9e964b93453f94a10dfd91125416960a5c91b6 | 1,046 | py | Python | tests/test_search.py | capellaspace/console-client | bad631f207043231630c9b8c0893a1b4382d4061 | [
"MIT"
] | 23 | 2021-07-28T19:32:25.000Z | 2022-03-19T07:57:36.000Z | tests/test_search.py | capellaspace/console-client | bad631f207043231630c9b8c0893a1b4382d4061 | [
"MIT"
] | 6 | 2021-07-16T22:31:56.000Z | 2022-03-11T21:09:40.000Z | tests/test_search.py | capellaspace/console-client | bad631f207043231630c9b8c0893a1b4382d4061 | [
"MIT"
] | 1 | 2022-01-05T18:38:46.000Z | 2022-01-05T18:38:46.000Z | #!/usr/bin/env python
import pytest
from .test_data import get_search_test_cases, search_catalog_get_stac_ids
from capella_console_client import client
from capella_console_client.validate import _validate_uuid
from capella_console_client.search import _paginated_search
@pytest.mark.parametrize("search_args,expected", get_search_test_cases())
def test_search(search_args, expected, search_client):
search_client.search(**search_args)
assert client._paginated_search.call_args[0][1] == expected
def test_validate_uuid_raises():
with pytest.raises(ValueError):
_validate_uuid("123")
def test_paginated_search_single_page(single_page_search_client):
results = _paginated_search(single_page_search_client._sesh, payload={"limit": 1})
assert len(results) == 1
assert results[0] == search_catalog_get_stac_ids()["features"][0]
def test_paginated_search_multi_page(multi_page_search_client):
results = _paginated_search(multi_page_search_client._sesh, payload={"limit": 10})
assert len(results) == 10
| 33.741935 | 86 | 0.799235 | 0 | 0 | 0 | 0 | 232 | 0.221797 | 0 | 0 | 72 | 0.068834 |
eaa18ae73ed1cc5860673f523cf535ff57268751 | 6,514 | py | Python | lib/score_functions/mahalanobis_score.py | alabrashJr/Maha-Odd | cce4bab1f30589cf3d52636fe511c0269058679e | [
"MIT"
] | null | null | null | lib/score_functions/mahalanobis_score.py | alabrashJr/Maha-Odd | cce4bab1f30589cf3d52636fe511c0269058679e | [
"MIT"
] | null | null | null | lib/score_functions/mahalanobis_score.py | alabrashJr/Maha-Odd | cce4bab1f30589cf3d52636fe511c0269058679e | [
"MIT"
] | null | null | null | # Copyright (C) 2021. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
import numpy as np
from sklearn.decomposition import PCA
import torch
from torch.nn import Module
from lib.score_functions import register_score
class AbstractMahalanobisScore(Module):
def __init__(self, dim):
super(AbstractMahalanobisScore, self).__init__()
self.dim = dim
self.register_buffer(
'covariance_matrix',
torch.eye(dim, dtype=torch.float)
)
def __call__(self, features):
raise NotImplementedError
def update(self, train_feats, train_labels):
raise NotImplementedError
def _check_scores(self, scores):
if scores.dim() == 0:
return scores.view(-1)
return scores
def update_inv_convmat(self, centered_feats):
self.covariance_matrix.zero_()
for feat in centered_feats:
self.covariance_matrix += feat.view(-1, 1) @ feat.view(-1, 1).transpose(0, 1)
self.covariance_matrix = self.covariance_matrix / centered_feats.shape[0]
self.covariance_matrix = self.covariance_matrix.inverse()
@register_score('mahalanobis')
class MahalanobisScore(AbstractMahalanobisScore):
def __init__(self, dim, num_labels):
super(MahalanobisScore, self).__init__(dim)
self.num_labels = num_labels
self.register_buffer(
'means',
torch.zeros(self.num_labels, dim, dtype=torch.float)
)
def _get_min_dist(self, r):
dist = r @ self.covariance_matrix @ r.transpose(2, 3)
dist = dist.squeeze()
min_dist, min_idx = dist.min(-1)
return min_dist, min_idx
def _get_centered_vectors(self, features):
r = features.unsqueeze(1) - self.means.unsqueeze(0)
r = r.unsqueeze(2)
return r
def __call__(self, features):
r = self._get_centered_vectors(features)
min_dist, _ = self._get_min_dist(r)
return self._check_scores(min_dist)
def update_means(self, train_feats, train_labels):
self.means.zero_()
label_cnt = torch.zeros(self.num_labels, 1, device=train_feats.device)
for label, feat in zip(train_labels, train_feats):
self.means[label] += feat
label_cnt[label] += 1
self.means.div_(label_cnt)
def center_feats(self, train_feats, train_labels):
centered_feats = torch.zeros_like(train_feats)
for idx, (label, feat) in enumerate(zip(train_labels, train_feats)):
centered_feats[idx] = feat - self.means[label]
return centered_feats
def update(self, train_feats, train_labels):
self.update_means(train_feats, train_labels)
centered_feats = self.center_feats(train_feats, train_labels)
self.update_inv_convmat(centered_feats)
@register_score('euclidean')
class EuclideanDistanceScore(MahalanobisScore):
def update(self, train_feats, train_labels):
self.update_means(train_feats, train_labels)
@register_score('mahalanobis-pca')
class MahalanobisPCAScore(MahalanobisScore):
def __init__(self, dim, num_labels, start_elem):
super(MahalanobisPCAScore, self).__init__(dim, num_labels)
self.start_elem = start_elem
self.pca = PCA(n_components=dim).fit(np.random.randn(dim, dim))
def update_pca(self, centered_feats):
centered_feats = centered_feats.cpu().numpy()
self.pca = PCA(n_components=self.dim).fit(centered_feats)
def update(self, train_feats, train_labels):
self.update_means(train_feats, train_labels)
centered_feats = self.center_feats(train_feats, train_labels)
self.update_pca(centered_feats)
def __call__(self, features):
r = self._get_centered_vectors(features)
min_dist, min_idx = self._get_min_dist(r)
r_centered = r.squeeze(2)[torch.arange(len(min_idx)), min_idx]
r_components = self.pca.transform(r_centered.cpu().numpy())
scores = np.power(r_components[:, self.start_elem:], 2) / \
self.pca.explained_variance_[self.start_elem:].reshape(1, -1)
scores = torch.from_numpy(scores).sum(-1)
return self._check_scores(scores)
@register_score('marginal-mahalanobis')
class MarginalMahalanobisScore(AbstractMahalanobisScore):
def __init__(self, dim):
super(MarginalMahalanobisScore, self).__init__(dim)
self.register_buffer(
'mean',
torch.zeros(dim, dtype=torch.float)
)
def __call__(self, features):
r = features - self.mean
r = r.unsqueeze(1)
dist = r @ self.covariance_matrix @ r.transpose(1, 2)
return self._check_scores(dist.squeeze())
def center_feats(self, train_feats):
centered_feats = torch.zeros_like(train_feats)
for idx, feat in enumerate(train_feats):
centered_feats[idx] = feat - self.mean
return centered_feats
def update(self, train_feats, train_labels):
self.mean = train_feats.mean(dim=0)
centered_feats = self.center_feats(train_feats)
self.update_inv_convmat(centered_feats)
@register_score('marginal-mahalanobis-pca')
class MarginalMahalanobisPCAScore(MarginalMahalanobisScore):
def __init__(self, dim, start_elem):
super(MarginalMahalanobisPCAScore, self).__init__(dim)
self.start_elem = start_elem
self.pca = PCA(n_components=dim).fit(np.random.randn(dim, dim))
def __call__(self, features):
r = features - self.mean
r_components = self.pca.transform(r.cpu().numpy())
scores = np.power(r_components[:, self.start_elem:], 2) / \
self.pca.explained_variance_[self.start_elem:].reshape(1, -1)
ood_scores = torch.from_numpy(scores).sum(-1)
return self._check_scores(ood_scores)
def update_pca(self, centered_feats):
centered_feats = centered_feats.cpu().numpy()
self.pca = PCA(n_components=self.dim).fit(centered_feats)
def update(self, train_feats, train_labels):
self.mean = train_feats.mean(dim=0)
centered_feats = self.center_feats(train_feats)
self.update_pca(centered_feats)
| 37.436782 | 89 | 0.682069 | 5,759 | 0.884096 | 0 | 0 | 5,029 | 0.772029 | 0 | 0 | 525 | 0.080596 |
575b35db9c979401cf63c36c72e33a04e3269d4a | 69 | py | Python | win/devkit/other/pymel/extras/completion/py/maya/app/sceneAssembly/__init__.py | leegoonz/Maya-devkit | b81fe799b58e854e4ef16435426d60446e975871 | [
"ADSL"
] | 21 | 2015-04-27T05:01:36.000Z | 2021-11-22T13:45:14.000Z | python/maya/site-packages/pymel-1.0.5/extras/completion/py/maya/app/sceneAssembly/__init__.py | 0xb1dd1e/PipelineConstructionSet | 621349da1b6d1437e95d0c9e48ee9f36d59f19fd | [
"BSD-3-Clause"
] | null | null | null | python/maya/site-packages/pymel-1.0.5/extras/completion/py/maya/app/sceneAssembly/__init__.py | 0xb1dd1e/PipelineConstructionSet | 621349da1b6d1437e95d0c9e48ee9f36d59f19fd | [
"BSD-3-Clause"
] | 9 | 2018-06-02T09:18:49.000Z | 2021-12-20T09:24:35.000Z | from . import adskPrepareRender
import maya.cmds as cmd
import maya
| 13.8 | 31 | 0.811594 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
575bf6573208aa2419b03e74d851335d108eb00a | 503 | py | Python | ask_user_data.py | sukarita/basics-phyton | 8e05d3b19fc0dcbf2a4c16473aa7fbe45e5eb27e | [
"MIT"
] | null | null | null | ask_user_data.py | sukarita/basics-phyton | 8e05d3b19fc0dcbf2a4c16473aa7fbe45e5eb27e | [
"MIT"
] | null | null | null | ask_user_data.py | sukarita/basics-phyton | 8e05d3b19fc0dcbf2a4c16473aa7fbe45e5eb27e | [
"MIT"
] | null | null | null | #Ask user for name
name = input("What is your name?: ")
#Ask user for the age
age = input("How old are you? ")
#Ask user for city
city = input("What city do you live in? ")
#Ask user what they enjoy
hobbies = input("What are your hobbies?, What do you love doing? ")
#Create output text using placeholders to concatenate data
string = "Your name is {} and you are {} years old. You live in {} and you love {}"
output = string.format(name, age, city, hobbies)
#Print output to screen
print(output)
| 25.15 | 83 | 0.697813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 356 | 0.707753 |
575d46348486aea3cf156ef36a9337580b89061b | 2,568 | py | Python | skexplain/main/PermutationImportance/multiprocessing_utils.py | monte-flora/scikit-explain | d93ca4c77d1d47e613479ae36cc055ffaafea88c | [
"MIT"
] | null | null | null | skexplain/main/PermutationImportance/multiprocessing_utils.py | monte-flora/scikit-explain | d93ca4c77d1d47e613479ae36cc055ffaafea88c | [
"MIT"
] | 7 | 2022-03-06T01:51:33.000Z | 2022-03-28T13:06:34.000Z | skexplain/main/PermutationImportance/multiprocessing_utils.py | monte-flora/scikit-explain | d93ca4c77d1d47e613479ae36cc055ffaafea88c | [
"MIT"
] | null | null | null | """These are utilities designed for carefully handling communication between
processes while multithreading.
The code for ``pool_imap_unordered`` is copied nearly wholesale from GrantJ's
`Stack Overflow answer here
<https://stackoverflow.com/questions/5318936/python-multiprocessing-pool-lazy-iteration?noredirect=1&lq=1>`_.
It allows for a lazy imap over an iterable and the return of very large objects
"""
from multiprocessing import Process, Queue, cpu_count
try:
from Queue import Full as QueueFull
from Queue import Empty as QueueEmpty
except ImportError: # python3
from queue import Full as QueueFull
from queue import Empty as QueueEmpty
__all__ = ["pool_imap_unordered"]
def worker(func, recvq, sendq):
for args in iter(recvq.get, None):
# The args are training_data, scoring_data, var_idx
# Thus, we want to return the var_idx and then
# send those args to the abstract runner.
result = (args[-1], func(*args))
sendq.put(result)
def pool_imap_unordered(func, iterable, procs=cpu_count()):
"""Lazily imaps in an unordered manner over an iterable in parallel as a
generator
:Author: Grant Jenks <https://stackoverflow.com/users/232571/grantj>
:param func: function to perform on each iterable
:param iterable: iterable which has items to map over
:param procs: number of workers in the pool. Defaults to the cpu count
:yields: the results of the mapping
"""
# Create queues for sending/receiving items from iterable.
sendq = Queue(procs)
recvq = Queue()
# Start worker processes.
for rpt in range(procs):
Process(target=worker, args=(func, sendq, recvq)).start()
# Iterate iterable and communicate with worker processes.
send_len = 0
recv_len = 0
itr = iter(iterable)
try:
value = next(itr)
while True:
try:
sendq.put(value, True, 0.1)
send_len += 1
value = next(itr)
except QueueFull:
while True:
try:
result = recvq.get(False)
recv_len += 1
yield result
except QueueEmpty:
break
except StopIteration:
pass
# Collect all remaining results.
while recv_len < send_len:
result = recvq.get()
recv_len += 1
yield result
# Terminate worker processes.
for rpt in range(procs):
sendq.put(None)
| 29.181818 | 109 | 0.633178 | 0 | 0 | 1,547 | 0.602414 | 0 | 0 | 0 | 0 | 1,177 | 0.458333 |
575d9a80ceb32000695149a7edeb2329ae95f171 | 4,786 | py | Python | mi/dataset/parser/test/test_flntu_x_mmp_cds.py | petercable/mi-dataset | d3c1607ea31af85fbba5719a31d4a60bf39f8dd3 | [
"BSD-2-Clause"
] | 1 | 2015-05-10T01:08:44.000Z | 2015-05-10T01:08:44.000Z | mi/dataset/parser/test/test_flntu_x_mmp_cds.py | petercable/mi-dataset | d3c1607ea31af85fbba5719a31d4a60bf39f8dd3 | [
"BSD-2-Clause"
] | 33 | 2017-04-25T19:53:45.000Z | 2022-03-18T17:42:18.000Z | mi/dataset/parser/test/test_flntu_x_mmp_cds.py | petercable/mi-dataset | d3c1607ea31af85fbba5719a31d4a60bf39f8dd3 | [
"BSD-2-Clause"
] | 31 | 2015-03-04T01:01:09.000Z | 2020-10-28T14:42:12.000Z | #!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_flcdrpf_ckl_mmp_cds
@file marine-integrations/mi/dataset/parser/test/test_flcdrpf_ckl_mmp_cds.py
@author Mark Worden
@brief Test code for a flcdrpf_ckl_mmp_cds data parser
"""
import os
from nose.plugins.attrib import attr
from mi.core.exceptions import SampleException
from mi.core.log import get_logger
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.flntu_x.mmp_cds.resource import RESOURCE_PATH
from mi.dataset.parser.mmp_cds_base import MmpCdsParser
from mi.dataset.test.test_parser import ParserUnitTestCase
log = get_logger()
@attr('UNIT', group='mi')
class FlntuXMmpCdsParserUnitTestCase(ParserUnitTestCase):
"""
flntu_x_mmp_cds Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self.config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.flntu_x_mmp_cds',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'FlntuXMmpCdsParserDataParticle'
}
def test_simple(self):
"""
This test reads in a small number of particles and verifies the result of one of the particles.
"""
with open(os.path.join(RESOURCE_PATH, 'flntu_1_20131124T005004_458.mpk'), 'rb') as stream_handle:
parser = MmpCdsParser(self.config, stream_handle, self.exception_callback)
particles = parser.get_records(6)
# this yml file only has particle 0 in it
self.assert_particles(particles[0:1], 'first.yml', RESOURCE_PATH)
# this yml file only has particle 1 in it
self.assert_particles(particles[1:2], 'second.yml', RESOURCE_PATH)
# this yml file only has particle 5 in it
self.assert_particles(particles[5:6], 'good.yml', RESOURCE_PATH)
def test_get_many(self):
"""
This test exercises retrieving 20 particles, verifying the 20th particle, then retrieves 30 particles
and verifies the 30th particle.
"""
with open(os.path.join(RESOURCE_PATH, 'flntu_1_20131124T005004_458.mpk'), 'rb') as stream_handle:
parser = MmpCdsParser(self.config, stream_handle, self.exception_callback)
particles = parser.get_records(20)
# Should end up with 20 particles
self.assertTrue(len(particles) == 20)
# this yml file only has particle 0 in it
self.assert_particles(particles[0:1], 'first.yml', RESOURCE_PATH)
# this yml file only has particle 19 in it
self.assert_particles(particles[19:20], 'get_many_one.yml', RESOURCE_PATH)
particles = parser.get_records(30)
# Should end up with 30 particles
self.assertTrue(len(particles) == 30)
# this yml file only has particle 29 in it
self.assert_particles(particles[29:30], 'get_many_two.yml', RESOURCE_PATH)
def test_long_stream(self):
"""
This test exercises retrieve approximately 200 particles.
"""
# Using two concatenated msgpack files to simulate two chunks to get more particles.
with open(os.path.join(RESOURCE_PATH, 'flntu_concat.mpk'), 'rb') as stream_handle:
parser = MmpCdsParser(self.config, stream_handle, self.exception_callback)
# Attempt to retrieve 200 particles, but we will retrieve less
particles = parser.get_records(200)
# Should end up with 172 particles
self.assertTrue(len(particles) == 184)
def test_bad_data_one(self):
"""
This test verifies that a SampleException is raised when msgpack data is malformed.
"""
with open(os.path.join(RESOURCE_PATH, 'flntu_1_20131124T005004_458-BAD.mpk'), 'rb') as stream_handle:
parser = MmpCdsParser(self.config, stream_handle, self.exception_callback)
parser.get_records(1)
self.assertEqual(len(self.exception_callback_value), 1)
self.assert_(isinstance(self.exception_callback_value[0], SampleException))
def test_bad_data_two(self):
"""
This test verifies that a SampleException is raised when an entire msgpack buffer is not msgpack.
"""
with open(os.path.join(RESOURCE_PATH, 'not-msg-pack.mpk'), 'rb') as stream_handle:
parser = MmpCdsParser(self.config, stream_handle, self.exception_callback)
parser.get_records(1)
self.assertTrue(len(self.exception_callback_value) >= 1)
self.assert_(isinstance(self.exception_callback_value[0], SampleException))
| 36.815385 | 110 | 0.661513 | 4,092 | 0.854994 | 0 | 0 | 4,119 | 0.860635 | 0 | 0 | 1,725 | 0.360426 |
575dbd1d29e5f8459314be7e87d2a432414925fc | 6,602 | py | Python | lib/python3.6/site-packages/pkginfo/commandline.py | backcountryinfosec/iocparser | 99d6b2cf8506c7731ea2fcb1b75689eac2da7ced | [
"MIT"
] | 4 | 2018-07-27T05:48:13.000Z | 2020-05-12T06:54:55.000Z | lib/python3.6/site-packages/pkginfo/commandline.py | backcountryinfosec/iocparser | 99d6b2cf8506c7731ea2fcb1b75689eac2da7ced | [
"MIT"
] | 4 | 2020-07-26T02:10:42.000Z | 2021-03-31T18:48:58.000Z | lib/python3.6/site-packages/pkginfo/commandline.py | backcountryinfosec/iocparser | 99d6b2cf8506c7731ea2fcb1b75689eac2da7ced | [
"MIT"
] | 2 | 2020-02-20T20:59:54.000Z | 2022-03-07T06:28:51.000Z | """Print the metadata for one or more Python package distributions.
Usage: %prog [options] path+
Each 'path' entry can be one of the following:
o a source distribution: in this case, 'path' should point to an existing
archive file (.tar.gz, .tar.bz2, or .zip) as generated by 'setup.py sdist'.
o a binary distribution: in this case, 'path' should point to an existing
archive file (.egg)
o a "develop" checkout: in ths case, 'path' should point to a directory
intialized via 'setup.py develop' (under setuptools).
o an installed package: in this case, 'path' should be the importable name
of the package.
"""
try:
from configparser import ConfigParser
except ImportError: # pragma: NO COVER
from ConfigParser import ConfigParser
from csv import writer
import optparse
import os
import sys
from .utils import get_metadata
def _parse_options(args=None):
parser = optparse.OptionParser(usage=__doc__)
parser.add_option("-m", "--metadata-version", default=None,
help="Override metadata version")
parser.add_option("-f", "--field", dest="fields", action="append",
help="Specify an output field (repeatable)",
)
parser.add_option("-d", "--download-url-prefix",
dest="download_url_prefix",
help="Download URL prefix",
)
parser.add_option("--simple", dest="output", action="store_const",
const='simple', default='simple',
help="Output as simple key-value pairs",
)
parser.add_option("-s", "--skip", dest="skip", action="store_true",
default=True,
help="Skip missing values in simple output",
)
parser.add_option("-S", "--no-skip", dest="skip", action="store_false",
help="Don't skip missing values in simple output",
)
parser.add_option("--single", dest="output", action="store_const",
const='single',
help="Output delimited values",
)
parser.add_option("--item-delim", dest="item_delim", action="store",
default=';',
help="Delimiter for fields in single-line output",
)
parser.add_option("--sequence-delim", dest="sequence_delim",
action="store", default=',',
help="Delimiter for multi-valued fields",
)
parser.add_option("--csv", dest="output", action="store_const",
const='csv',
help="Output as CSV",
)
parser.add_option("--ini", dest="output", action="store_const",
const='ini',
help="Output as INI",
)
options, args = parser.parse_args(args)
if len(args)==0:
parser.error("Pass one or more files or directories as arguments.")
else:
return options, args
class Base(object):
_fields = None
def __init__(self, options):
if options.fields:
self._fields = options.fields
def finish(self): # pragma: NO COVER
pass
class Simple(Base):
def __init__(self, options):
super(Simple, self).__init__(options)
self._skip = options.skip
def __call__(self, meta):
for field in self._fields or list(meta):
value = getattr(meta, field)
if (not self._skip) or (value is not None and value!=()):
print("%s: %s" % (field, value))
class SingleLine(Base):
_fields = None
def __init__(self, options):
super(SingleLine, self).__init__(options)
self._item_delim = options.item_delim
self._sequence_delim = options.sequence_delim
def __call__(self, meta):
if self._fields is None:
self._fields = list(meta)
values = []
for field in self._fields:
value = getattr(meta, field)
if isinstance(value, (tuple, list)):
value = self._sequence_delim.join(value)
else:
value = str(value)
values.append(value)
print(self._item_delim.join(values))
class CSV(Base):
_writer = None
def __init__(self, options):
super(CSV, self).__init__(options)
self._sequence_delim = options.sequence_delim
def __call__(self, meta):
if self._fields is None:
self._fields = list(meta) # first dist wins
fields = self._fields
if self._writer is None:
self._writer = writer(sys.stdout)
self._writer.writerow(fields)
values = []
for field in fields:
value = getattr(meta, field)
if isinstance(value, (tuple, list)):
value = self._sequence_delim.join(value)
else:
value = str(value)
values.append(value)
self._writer.writerow(values)
class INI(Base):
_fields = None
def __init__(self, options):
super(INI, self).__init__(options)
self._parser = ConfigParser()
def __call__(self, meta):
name = meta.name
version = meta.version
section = '%s-%s' % (name, version)
if self._parser.has_section(section):
raise ValueError('Duplicate distribution: %s' % section)
self._parser.add_section(section)
for field in self._fields or list(meta):
value = getattr(meta, field)
if isinstance(value, (tuple, list)):
value = '\n\t'.join(value)
self._parser.set(section, field, value)
def finish(self):
self._parser.write(sys.stdout) # pragma: NO COVER
_FORMATTERS = {
'simple': Simple,
'single': SingleLine,
'csv': CSV,
'ini': INI,
}
def main(args=None):
"""Entry point for pkginfo tool
"""
options, paths = _parse_options(args)
format = getattr(options, 'output', 'simple')
formatter = _FORMATTERS[format](options)
for path in paths:
meta = get_metadata(path, options.metadata_version)
if meta is None:
continue
if options.download_url_prefix:
if meta.download_url is None:
filename = os.path.basename(path)
meta.download_url = '%s/%s' % (options.download_url_prefix,
filename)
formatter(meta)
formatter.finish()
| 32.204878 | 77 | 0.567404 | 2,737 | 0.414571 | 0 | 0 | 0 | 0 | 0 | 0 | 1,622 | 0.245683 |
575f3985ac6999b8a0ba401f38fcd2e12e1655c9 | 2,109 | py | Python | app_jumanji/migrations/0007_resume.py | arifgafizov/jumanji_v2 | e06ec7556544abef66b35cace5a0456f6021bcca | [
"MIT"
] | 1 | 2021-12-02T11:23:39.000Z | 2021-12-02T11:23:39.000Z | app_jumanji/migrations/0007_resume.py | arifgafizov/jumanji_v2 | e06ec7556544abef66b35cace5a0456f6021bcca | [
"MIT"
] | null | null | null | app_jumanji/migrations/0007_resume.py | arifgafizov/jumanji_v2 | e06ec7556544abef66b35cace5a0456f6021bcca | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-08-16 18:03
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('app_jumanji', '0006_auto_20200815_2218'),
]
operations = [
migrations.CreateModel(
name='Resume',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('surname', models.CharField(max_length=100)),
('status', models.CharField(choices=[('not_in_search', 'Не ищу работу'),
('consideration', 'Рассматриваю предложения'), ('in_search', 'Ищу работу')], max_length=100)),
('salary', models.FloatField()),
('specialty', models.CharField(choices=[('frontend', 'Фронтенд'), ('backend', 'Бэкенд'),
('gamedev', 'Геймдев'), ('devops', 'Девопс'), ('design', 'Дизайн'), ('products', 'Продукты'),
('management', 'Менеджмент'), ('testing', 'Тестирование')], max_length=100)),
('grade', models.CharField(choices=[('intern', 'intern'), ('junior', 'junior'), ('middle', 'middle'),
('senior', 'senior'), ('lead', 'lead')], max_length=100)),
('education', models.CharField(choices=[('missing', 'Отсутствует'), ('secondary', 'Среднее'),
('vocational', 'Средне-специальное'), ('incomplete_higher', 'Неполное высшее'),
('higher', 'Высшее')], max_length=100)),
('experience', models.CharField(max_length=500)),
('portfolio', models.CharField(max_length=500)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='resume', to=settings.AUTH_USER_MODEL)),
],
),
]
| 54.076923 | 141 | 0.556188 | 2,111 | 0.929956 | 0 | 0 | 0 | 0 | 0 | 0 | 809 | 0.356388 |
575f61b15fc74259a459d89fa6848538684d3cf4 | 1,135 | py | Python | molecule/resources/tests_err/test_err.py | fletort/rpi_noobs_recovery | ee81e148bf730ec110bbfda1e46b3ce0820ebcca | [
"MIT"
] | null | null | null | molecule/resources/tests_err/test_err.py | fletort/rpi_noobs_recovery | ee81e148bf730ec110bbfda1e46b3ce0820ebcca | [
"MIT"
] | null | null | null | molecule/resources/tests_err/test_err.py | fletort/rpi_noobs_recovery | ee81e148bf730ec110bbfda1e46b3ce0820ebcca | [
"MIT"
] | null | null | null | import os
import pytest
import json
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.fixture()
def waited_failed_task_name(host):
all_variables = host.ansible.get_variables()
return all_variables['waited_failed_task_name']
@pytest.fixture()
def waited_failed_result_msg(host):
all_variables = host.ansible.get_variables()
return all_variables['waited_failed_result_msg']
@pytest.fixture()
def failure_info(host):
all_variables = host.ansible.get_variables()
json_file_path = "{}/failure_{}.json".format(
os.environ['MOLECULE_EPHEMERAL_DIRECTORY'],
all_variables['inventory_hostname']
)
with open(json_file_path) as json_file:
data = json.load(json_file)
return data
def test_failed_task_name(host, failure_info, waited_failed_task_name):
assert failure_info['task_name'] == waited_failed_task_name
def test_failed_message(host, failure_info, waited_failed_result_msg):
assert failure_info['return']['msg'] == waited_failed_result_msg
| 27.682927 | 71 | 0.765639 | 0 | 0 | 0 | 0 | 646 | 0.569163 | 0 | 0 | 175 | 0.154185 |
575fbd0bfed909e1050ca5a148b9e03632bf0387 | 54,744 | py | Python | sed_vis/visualization.py | TUT-ARG/sed_vis | 41ae82ce4191caf1c3d9cdd985c53b135d7550a0 | [
"MIT"
] | 79 | 2016-03-07T01:35:55.000Z | 2022-03-31T02:52:08.000Z | sed_vis/sed_vis/visualization.py | jim-schwoebel/sound_event_detection | 24cd385d23da6382ab92c37587d9478e4324cda0 | [
"ECL-2.0",
"Apache-2.0"
] | 6 | 2017-10-11T09:41:29.000Z | 2021-10-15T23:55:34.000Z | sed_vis/visualization.py | TUT-ARG/sed_vis | 41ae82ce4191caf1c3d9cdd985c53b135d7550a0 | [
"MIT"
] | 27 | 2016-05-21T15:59:52.000Z | 2021-12-03T07:06:31.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Visualization
==================
This is module contains a simple visualizer to show event lists along with the audio.
The visualizer can show multiple event lists for the same reference audio allowing the
comparison of the reference and estimated event lists.
.. image:: visualization.png
.. autosummary::
:toctree: generated/
EventListVisualizer
EventListVisualizer.show
"""
from __future__ import print_function, absolute_import
from sed_vis.util import AudioPlayer, AudioThread
import dcase_util
import numpy
import math
import time
import scipy.fftpack
import scipy.signal
from numpy.lib.stride_tricks import as_strided
from sys import platform as _platform
import matplotlib
if _platform == "darwin":
# MAC OS X
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.patches as patches
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib.widgets import Button, SpanSelector
from matplotlib.patches import Rectangle
class EventListVisualizer(object):
"""Event List visualizer.
Examples
--------
>>> # Load audio signal first
>>> audio_container = dcase_util.containers.AudioContainer().load('data/audio.wav')
>>> # Load event lists
>>> reference_event_list = dcase_util.containers.MetaDataContainer().load('data/reference.txt')
>>> estimated_event_list = dcase_util.containers.MetaDataContainer().load('data/estimated.txt')
>>> event_lists = {'reference': reference_event_list, 'estimated': estimated_event_list}
>>> # Visualize the data
>>> vis = sed_vis.visualization.EventListVisualizer(event_lists=event_lists,audio_signal=audio_container.data,sampling_rate=audio_container.fs)
>>> vis.show()
"""
def __init__(self, *args, **kwargs):
"""Constructor
Parameters
----------
event_lists : dict of event lists
Dict of event lists
event_list_order : list
Order of event list, if None alphabetical order used
(Default value=None)
active_events : list
List of active sound event classes, if None all used.
(Default value=None)
audio_signal : np.ndarray
Audio signal
sampling_rate : int
Sampling rate [0:96000]
mode: str
Signal visualization mode ['spectrogram', 'time_domain']
(Default value = 'spectrogram')
spec_hop_size : int
Spectrogram calculation hop length in samples
(Default value=256)
spec_win_size: int ,
Spectrogram calculation window length in samples
(Default value=1024)
spec_fft_size: int
FFT length
(Default value=1024)
spec_cmap : str
Color map used for spectrogram, see examples: http://matplotlib.org/examples/color/colormaps_reference.html
(Default value='magma')
spec_interpolation : str
Matrix interpolation method for spectrogram (e.g. nearest, bilear, bicubic, quadric, gaussian)
(Default value='nearest')
event_roll_cmap : str
Color map used for spectrogram, see examples: http://matplotlib.org/examples/color/colormaps_reference.html
(Default value='rainbow')
minimum_event_length : float > 0.0
Minimum event length in seconds, shorten than given are filtered out from the output.
(Default value=None)
minimum_event_gap : float > 0.0
Minimum allowed gap between events in seconds from same event label class.
(Default value=None)
color : color hex
Main color code used in highlighting things
use_blit : bool
Use blit
(Default value=False)
publication_mode : bool
Strip visual elements, can be used to prepare figures for publications.
(Default value=False)
show_selector : bool
Show highlight selector
(Default value=True)
labels: dict
Text labels overrides
button_color : dict
Button color overrides
Returns
-------
Nothing
"""
if kwargs.get('event_lists', []):
self._event_lists = kwargs.get('event_lists', [])
if kwargs.get('event_list_order') is None:
self._event_list_order = sorted(self._event_lists.keys())
else:
self._event_list_order = kwargs.get('event_list_order')
events = dcase_util.containers.MetaDataContainer()
for event_list_label in self._event_lists:
events += self._event_lists[event_list_label]
self.event_labels = sorted(events.unique_event_labels, reverse=True)
self.event_label_count = events.event_label_count
if kwargs.get('active_events') is None:
self.active_events = self.event_labels
else:
self.active_events = sorted(kwargs.get('active_events'), reverse=True)
for name in self._event_lists:
self._event_lists[name] = self._event_lists[name].process_events(
minimum_event_length=kwargs.get('minimum_event_length'),
minimum_event_gap=kwargs.get('minimum_event_gap')
)
else:
self._event_lists = None
if kwargs.get('audio_signal') is not None and kwargs.get('sampling_rate') is not None:
audio_signal = kwargs.get('audio_signal') / numpy.max(numpy.abs(kwargs.get('audio_signal')))
self.audio = AudioPlayer(
signal=audio_signal,
sampling_rate=kwargs.get('sampling_rate')
)
if kwargs.get('mode') not in ['spectrogram', 'time_domain']:
self.mode = 'spectrogram'
else:
self.mode = kwargs.get('mode')
self.auto_play = kwargs.get('auto_play', False)
self.spec_hop_size = kwargs.get('spec_hop_size', 256)
self.spec_win_size = kwargs.get('spec_win_size', 1024)
self.spec_fft_size = kwargs.get('spec_fft_size', 1024)
self.spec_cmap = kwargs.get('spec_cmap', 'magma')
self.spec_interpolation = kwargs.get('spec_interpolation', 'nearest')
self.color = kwargs.get('color', '#339933')
self.button_color = {
'off': 'grey',
'on': 'red'
}
self.button_color.update(kwargs.get('button_color',{}))
self.labels = {
'close': 'Close',
'play': 'Play',
'stop': 'Stop',
'quit': 'Quit',
'selection': 'Selection',
'waveform': 'Waveform',
'spectrogram': 'Spectrogram',
'verification': 'Verification',
'verification_info': '',
'info': '',
}
self.labels.update(kwargs.get('labels',{}))
self.indicator_line_color = self.color
# Initialize members
self.fig = None
# Panels
self.ax1 = None
self.ax2 = None
self.ax3 = None
self.D = None
self.x = None
self.timedomain_locations = None
self.begin_time = None
self.end_time = None
self.playback_offset = 0
# Play indicators
self.animation_event_roll_panel = None
self.animation_selector_panel = None
self.animation_highlight_panel = None
self.event_panel_indicator_line = None
self.selector_panel_indicator_line = None
self.highlight_panel_indicator_line = None
self.slider_time = None
# Buttons
self.buttons = {
'play': True,
'pause': False,
'stop': True,
'close': True,
'quit': False,
'verification': False,
}
self.buttons.update(kwargs.get('buttons',{}))
self.button_play = None
self.button_pause = None
self.button_stop = None
self.button_close = None
self.use_blit = kwargs.get('use_blit', False)
self.publication_mode = kwargs.get('publication_mode', False)
self.show_selector = kwargs.get('show_selector', True)
self.panel_title_font_size = 14
self.legend_font_size = 12
self.event_roll_label_font_size = 14
self.event_roll_time_font_size = 10
self.waveform_selector_point_hop = kwargs.get('waveform_selector_point_hop', 1000)
self.waveform_highlight_point_hop = 100
self.waveform_highlight_color = self.color
self.selector_panel_height = 10
self.highlight_panel_height = 25
self.event_roll_panel_height = 50
self.selector_panel_loc = 0
self.highlight_panel_loc = 17
self.event_roll_panel_loc = 45
self.event_roll_item_opacity = 0.5
self.fig_shape = (14, 6)
self._quit = False
if self.publication_mode:
self.panel_title_font_size = 14
self.legend_font_size = 16
self.event_roll_time_font_size = 12
self.spec_cmap = 'magma_r'
self.spec_interpolation = 'bicubic'
if not self.waveform_selector_point_hop:
self.waveform_selector_point_hop = 5000
self.waveform_highlight_point_hop = 500
self.waveform_highlight_color = 'black'
if self.show_selector:
if self.mode == 'time_domain':
self.fig_shape = (30, 4)
elif self.mode == 'spectrogram':
self.fig_shape = (20, 5)
if self._event_lists:
if self.event_label_count == 1:
self.selector_panel_height = 10
self.highlight_panel_height = 33
self.event_roll_panel_height = 33
self.selector_panel_loc = 0
self.highlight_panel_loc = 17
self.event_roll_panel_loc = 53
self.event_roll_time_font_size = 16
else:
self.selector_panel_height = 10
self.highlight_panel_height = 15
self.event_roll_panel_height = 60
self.selector_panel_loc = 0
self.highlight_panel_loc = 17
self.event_roll_panel_loc = 35
else:
self.selector_panel_height = 30
self.highlight_panel_height = 66
self.event_roll_panel_height = 0
self.selector_panel_loc = 0
self.highlight_panel_loc = 37
self.event_roll_panel_loc = 0
self.event_roll_time_font_size = 16
else:
if self.mode == 'time_domain':
self.fig_shape = (30, 4)
elif self.mode == 'spectrogram':
self.fig_shape = (20, 4)
self.selector_panel_height = 0
self.highlight_panel_height = 15
self.event_roll_panel_height = 75
self.selector_panel_loc = 0
self.highlight_panel_loc = 0
self.event_roll_panel_loc = 17
self.event_roll_item_opacity = 1.0
self.label_colormap = cm.get_cmap(name=kwargs.get('event_roll_cmap','rainbow'))
def generate_GUI(self):
"""Generates the visualizer GUI.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
self.fig = plt.figure(figsize=self.fig_shape)
# Selector panel
# ====================================
if self.show_selector:
self.ax1 = plt.subplot2grid(shape=(100, 1), loc=(self.selector_panel_loc, 0), rowspan=self.selector_panel_height, colspan=1)
self.timedomain_locations = numpy.arange(0, self.audio.signal.shape[0])
self.ax1.fill_between(
self.timedomain_locations[::self.waveform_selector_point_hop],
self.audio.signal[::self.waveform_selector_point_hop],
-self.audio.signal[::self.waveform_selector_point_hop],
color='0.5'
)
plt.yticks([])
plt.axis('tight')
self.ax1.set_xlim(self.timedomain_locations[0], self.timedomain_locations[-1])
self.ax1.set_ylim(-1, 1)
self.time_ticks(
locations=self.timedomain_locations,
n_ticks=10,
sampling_rate=self.audio.fs
)
self.ax1.yaxis.grid(False, which='major')
self.ax1.yaxis.grid(False, which='minor')
self.ax1.xaxis.grid(True, which='major')
self.ax1.xaxis.grid(True, which='minor')
self.ax1.yaxis.set_label_position("right")
plt.title(self.labels['selection'], fontsize=self.panel_title_font_size)
# Highlight panel
# ====================================
self.ax2 = plt.subplot2grid(shape=(100, 1), loc=(self.highlight_panel_loc, 0), rowspan=self.highlight_panel_height, colspan=1)
self.x = numpy.arange(0, self.audio.duration_samples)
self.begin_time = self.x[0] / float(self.audio.fs)
self.end_time = self.x[-1] / float(self.audio.fs)
if self.mode == 'spectrogram':
self.D = self.get_spectrogram(
audio=self.audio.signal,
n_fft=self.spec_fft_size,
win_length=self.spec_win_size,
hop_length=self.spec_hop_size
)
self.plot_spectrogram(
self.D,
sampling_rate=self.audio.fs,
interpolation=self.spec_interpolation,
cmap=self.spec_cmap
)
if not self.publication_mode:
self.ax2.yaxis.grid(False, which='major')
self.ax2.yaxis.grid(False, which='minor')
self.ax2.xaxis.grid(False, which='major')
self.ax2.xaxis.grid(False, which='minor')
plt.ylabel(self.labels['spectrogram'], fontsize=self.panel_title_font_size)
else:
self.ax2.get_yaxis().set_visible(False)
elif self.mode == 'time_domain':
self.ax2.fill_between(
self.x[::self.waveform_highlight_point_hop], self.audio.signal[::self.waveform_highlight_point_hop], -self.audio.signal[::self.waveform_highlight_point_hop],
color=self.waveform_highlight_color
)
self.ax2.set_ylim(-1, 1)
self.ax2.set_xlim(self.x[0], self.x[-1])
segment_begin = self.x[0] / float(self.audio.fs)
segment_end = self.x[-1] / float(self.audio.fs)
locs = numpy.arange(segment_begin, segment_end)
plt.xlim([locs[0], locs[-1]])
self.time_ticks(locations=locs, n_ticks=20)
plt.yticks([])
plt.xticks([])
self.ax2.yaxis.grid(False, which='major')
self.ax2.yaxis.grid(False, which='minor')
self.ax2.xaxis.grid(True, which='major')
self.ax2.xaxis.grid(True, which='minor')
if not self.publication_mode:
plt.ylabel(self.labels['waveform'], fontsize=self.panel_title_font_size)
self.ax2.set_xlim(self.timedomain_locations[0], self.timedomain_locations[-1])
self.ax2.yaxis.set_label_position("right")
# Event roll panel
# ====================================
if self._event_lists:
event_list_count = len(self._event_lists)
self.begin_time = 0
self.end_time = self.audio.duration_seconds
if event_list_count == 1:
norm = colors.Normalize(
vmin=0,
vmax=self.event_label_count
)
self.ax3 = plt.subplot2grid(
shape=(100, 1),
loc=(self.event_roll_panel_loc, 0),
rowspan=self.event_roll_panel_height+10,
colspan=1
)
else:
norm = colors.Normalize(
vmin=0,
vmax=event_list_count
)
self.ax3 = plt.subplot2grid(
shape=(100, 1),
loc=(self.event_roll_panel_loc, 0),
rowspan=self.event_roll_panel_height,
colspan=1
)
m = cm.ScalarMappable(norm=norm, cmap=self.label_colormap)
line_margin = 0.1
y = 0
annotation_height = (1.0-line_margin*2)/event_list_count
for label in self.active_events:
for event_list_id, event_list_label in enumerate(self._event_list_order):
offset = (len(self._event_list_order)-1-event_list_id) * annotation_height
event_y = y - 0.5 + line_margin + offset
# grid line
line = plt.Rectangle(
(0, y-0.5),
height=0.001,
width=self.end_time,
edgecolor='black',
facecolor='black'
)
plt.gca().add_patch(line)
for event in self._event_lists[event_list_label]:
if event['event_label'] == label:
event_length = event['offset'] - event['onset']
if 'probability' in event:
if event_list_count == 1:
color = m.to_rgba(x=y + offset, alpha=event['probability'])
else:
color = m.to_rgba(x=event_list_id, alpha=event['probability'])
rectangle = plt.Rectangle(
(event['onset'], event_y),
height=annotation_height,
width=event_length,
edgecolor='black',
facecolor=color,
linewidth=0,
picker=5
)
else:
if event_list_count == 1:
color = m.to_rgba(x=y + offset)
else:
color = m.to_rgba(x=event_list_id)
rectangle = plt.Rectangle(
(event['onset'], event_y),
height=annotation_height,
width=event_length,
edgecolor='black',
facecolor=color,
linewidth=0,
alpha=self.event_roll_item_opacity,
picker=5
)
plt.gca().add_patch(rectangle)
y += 1
# grid line
line = plt.Rectangle((0, y - 0.5),
height=0.001,
width=self.end_time,
edgecolor='black',
facecolor='black')
plt.gca().add_patch(line)
# Axis
plt.axis([0, self.audio.duration_seconds, -0.5, len(self.active_events) + 0.5])
locs = numpy.arange(0, self.audio.duration_seconds, 0.00001)
plt.xlim([locs[0], locs[-1]])
plt.axis('tight')
# X axis
self.ax3.xaxis.grid(True, which='major')
self.ax3.xaxis.grid(True, which='minor')
plt.tick_params(axis='x', which='major', labelsize=self.event_roll_time_font_size)
# Y axis
plt.yticks(
numpy.arange(len(self.active_events)),
self.active_events,
fontsize=self.event_roll_label_font_size
)
plt.ylabel('Event Roll', fontsize=self.panel_title_font_size)
self.ax3.yaxis.set_label_position('right')
self.ax3.yaxis.grid(False, which='major')
self.ax3.yaxis.grid(False, which='minor')
# Set event list legends panel
self.ax3.set_xlim(self.begin_time, self.end_time)
if event_list_count > 1:
span = 0
for event_list_id, event_list_label in enumerate(self._event_list_order):
ax_legend_color = plt.axes([0.225+span, 0.02, 0.02, 0.02])
Button(
ax_legend_color,
'',
color=m.to_rgba(event_list_id),
hovercolor=m.to_rgba(event_list_id)
)
ax_legend_label = plt.axes([0.225+0.025+span, 0.02, 0.10, 0.04])
ax_legend_label.axis('off')
ax_legend_label.text(0, 0, event_list_label, fontsize=self.legend_font_size)
span += 0.15
if self.show_selector:
self.slider_time = SpanSelector(
ax=self.ax1,
onselect=self.on_select,
minspan=None,
direction='horizontal',
span_stays=True,
useblit=self.use_blit,
onmove_callback=None,
rectprops=dict(alpha=0.15, facecolor=self.color)
)
if not self.publication_mode:
ax_legend_label = plt.axes([0.92, 0.02, 0.10, 0.04])
ax_legend_label.axis('off')
ax_legend_label.text(0, 0, 'sed_vis', fontsize=16)
# Buttons
# ====================================
ax_play = plt.axes([0.125, 0.93, 0.07, 0.04])
ax_stop = plt.axes([0.205, 0.93, 0.07, 0.04])
ax_close = plt.axes([0.92, 0.93, 0.07, 0.04])
self.button_play = Button(
ax_play,
self.labels['play'],
color=self.button_color['off'],
hovercolor=self.button_color['on']
)
self.button_stop = Button(
ax_stop,
self.labels['stop'],
color=self.button_color['off'],
hovercolor=self.button_color['on']
)
self.button_close = Button(
ax_close,
self.labels['close'],
color=self.button_color['off'],
hovercolor=self.button_color['on']
)
self.button_play.on_clicked(self.on_play)
self.button_stop.on_clicked(self.on_stop)
self.button_close.on_clicked(self.on_close_window)
self.fig.canvas.mpl_connect('pick_event', self.on_pick)
else:
plt.subplots_adjust(left=0.12, bottom=0.05, right=.97, top=0.95, wspace=0, hspace=0)
if self.auto_play:
self.on_play(None)
def show(self):
"""Shows the visualizer.
Parameters
----------
Nothing
Returns
-------
Nothing
"""
self.generate_GUI()
plt.show()
def save(self, filename=None):
if filename:
self.generate_GUI()
plt.savefig(filename, bbox_inches='tight')
def on_close_window(self, event):
if self.audio.playing:
self.audio.stop()
self.audio = None
plt.close(self.fig)
def on_pick(self, event):
if isinstance(event.artist, Rectangle):
if self.audio.playing:
self.audio.stop() # Stop current playback
try:
self.event_panel_indicator_line.set_visible(False)
self.event_panel_indicator_line.remove()
except:
pass
if self.animation_event_roll_panel is not None:
self.animation_event_roll_panel._stop()
self.animation_event_roll_panel = None
try:
self.selector_panel_indicator_line.set_visible(False)
self.selector_panel_indicator_line.remove()
except:
pass
if self.animation_selector_panel is not None:
self.animation_selector_panel._stop()
self.animation_selector_panel = None
try:
self.highlight_panel_indicator_line.set_visible(False)
self.highlight_panel_indicator_line.remove()
except:
pass
if self.animation_highlight_panel is not None:
self.animation_highlight_panel._stop()
self.animation_highlight_panel = None
self.fig.canvas.draw()
time.sleep(0.25) # Wait until playback has stopped
self.playback_offset = event.artist.get_x()
self.audio.play(
offset=event.artist.get_x(),
duration=event.artist.get_width()
)
# Set up play indicators animations
self.animation_event_roll_panel = animation.FuncAnimation(
self.fig,
self.event_roll_panel_play_indicator_update,
init_func=self.event_roll_panel_play_indicator_init,
interval=10,
blit=self.use_blit,
repeat=False
)
self.animation_selector_panel = animation.FuncAnimation(
self.fig,
self.selector_panel_play_indicator_update,
init_func=self.selector_panel_play_indicator_init,
interval=10,
blit=self.use_blit,
repeat=False
)
self.animation_highlight_panel = animation.FuncAnimation(
self.fig,
self.highlight_panel_play_indicator_update,
init_func=self.highlight_panel_play_indicator_init,
interval=10,
blit=self.use_blit,
repeat=False
)
self.fig.canvas.draw()
def on_select(self, x_min, x_max):
x_min = int(x_min)
x_max = int(x_max)
if math.fabs(x_min-x_max) < 10:
# Reset highlight
self.begin_time = self.x[0] / float(self.audio.fs)
self.end_time = self.x[-1] / float(self.audio.fs)
if self.ax3:
self.ax3.set_xlim(self.begin_time, self.end_time)
# Set signal highlight panel
if self.mode == 'spectrogram':
self.ax2.set_xlim(0, self.D.shape[1])
elif self.mode == 'time_domain':
self.ax2.set_xlim(self.timedomain_locations[0], self.timedomain_locations[-1])
self.slider_time.stay_rect.set_visible(False)
else:
# Set annotation panel
self.begin_time = float(x_min) / self.audio.fs
self.end_time = float(x_max) / self.audio.fs
if self.ax3:
self.ax3.set_xlim(self.begin_time, self.end_time)
# Set signal highlight panel
if self.mode == 'spectrogram':
spec_min = int(x_min / float(self.spec_hop_size))
spec_max = int(x_max / float(self.spec_hop_size))
self.ax2.set_xlim(spec_min, spec_max)
elif self.mode == 'time_domain':
index_min, index_max = numpy.searchsorted(self.x, (x_min, x_max))
index_max = min(len(self.x) - 1, index_max)
this_x = self.timedomain_locations[index_min:index_max]
self.ax2.set_xlim(this_x[0], this_x[-1])
self.slider_time.stay_rect.set_visible(True)
self.fig.canvas.draw()
def on_play(self, event):
if self.audio.playing:
self.audio.stop() # Stop current playback
try:
self.event_panel_indicator_line.set_visible(False)
self.event_panel_indicator_line.remove()
except:
pass
if self.animation_event_roll_panel is not None:
self.animation_event_roll_panel._stop()
self.animation_event_roll_panel = None
try:
self.selector_panel_indicator_line.set_visible(False)
self.selector_panel_indicator_line.remove()
except:
pass
if self.animation_selector_panel is not None:
self.animation_selector_panel._stop()
self.animation_selector_panel = None
try:
self.highlight_panel_indicator_line.set_visible(False)
self.highlight_panel_indicator_line.remove()
except:
pass
if self.animation_highlight_panel is not None:
self.animation_highlight_panel._stop()
self.animation_highlight_panel = None
self.fig.canvas.draw()
time.sleep(0.25) # Wait until playback has stopped
self.audio.play(
offset=self.begin_time,
duration=self.end_time-self.begin_time
)
self.button_play.color = self.button_color['on']
self.button_play.hovercolor = self.button_color['on']
self.button_stop.color = self.button_color['off']
self.button_stop.hovercolor = self.button_color['off']
self.playback_offset = self.begin_time
self.animation_event_roll_panel = animation.FuncAnimation(
self.fig,
self.event_roll_panel_play_indicator_update,
init_func=self.event_roll_panel_play_indicator_init,
interval=50,
blit=self.use_blit,
repeat=False
)
self.animation_selector_panel = animation.FuncAnimation(
self.fig,
self.selector_panel_play_indicator_update,
init_func=self.selector_panel_play_indicator_init,
interval=50,
blit=self.use_blit,
repeat=False
)
self.animation_highlight_panel = animation.FuncAnimation(
self.fig,
self.highlight_panel_play_indicator_update,
init_func=self.highlight_panel_play_indicator_init,
interval=50,
blit=self.use_blit,
repeat=False
)
self.fig.canvas.draw()
def on_pause(self, event):
self.audio.pause()
self.button_play.color = self.button_color['off']
self.button_play.hovercolor = self.button_color['off']
self.button_stop.color = self.button_color['off']
self.button_stop.hovercolor = self.button_color['off']
self.fig.canvas.draw()
def on_stop(self, event):
self.audio.stop()
self.button_play.color = self.button_color['off']
self.button_play.hovercolor = self.button_color['off']
self.button_stop.color = self.button_color['off']
self.button_stop.hovercolor = self.button_color['off']
self.fig.canvas.draw()
def on_quit(self, event):
self._quit = True
self.on_close_window(event=event)
@property
def quit(self):
return self._quit
def event_roll_panel_play_indicator_init(self):
indicator_width = (self.end_time-self.begin_time) / 1000
if indicator_width > 0.5:
indicator_width = 0.5
self.event_panel_indicator_line = patches.Rectangle(
(self.playback_offset + self.audio.get_time(), -0.5),
height=self.event_label_count,
width=indicator_width,
edgecolor=self.indicator_line_color,
facecolor=self.indicator_line_color,
alpha=0.8
)
self.ax3.add_patch(self.event_panel_indicator_line)
return self.event_panel_indicator_line,
def event_roll_panel_play_indicator_update(self, i):
if self.audio.playing:
self.event_panel_indicator_line.set_x(self.playback_offset + self.audio.get_time())
else:
self.event_panel_indicator_line.set_visible(False)
if self.animation_event_roll_panel is not None:
self.animation_event_roll_panel.event_source.stop()
self.animation_event_roll_panel = None
self.fig.canvas.draw()
return self.event_panel_indicator_line,
def selector_panel_play_indicator_init(self):
self.selector_panel_indicator_line = patches.Rectangle(
(0, -1),
height=2,
width=0.5,
edgecolor=self.indicator_line_color,
facecolor=self.indicator_line_color,
alpha=0.8
)
self.ax1.add_patch(self.selector_panel_indicator_line)
return self.selector_panel_indicator_line,
def selector_panel_play_indicator_update(self, i):
if self.audio.playing:
self.selector_panel_indicator_line.set_x((self.playback_offset + self.audio.get_time())*self.audio.fs)
else:
self.selector_panel_indicator_line.set_visible(False)
if self.animation_selector_panel is not None:
self.animation_selector_panel.event_source.stop()
self.animation_selector_panel = None
return self.selector_panel_indicator_line,
def highlight_panel_play_indicator_init(self):
indicator_width = 0.5
if self.mode == 'spectrogram':
indicator_height = self.spec_fft_size
indicator_y = 0
elif self.mode == 'time_domain':
indicator_height = 2
indicator_y = -1
else:
indicator_height = 2
indicator_y = -1
self.highlight_panel_indicator_line = patches.Rectangle(
(0, indicator_y),
height=indicator_height,
width=indicator_width,
edgecolor=self.indicator_line_color,
facecolor=self.indicator_line_color,
alpha=0.8
)
self.ax2.add_patch(self.highlight_panel_indicator_line)
return self.highlight_panel_indicator_line,
def highlight_panel_play_indicator_update(self, i):
if self.audio.playing:
if self.mode == 'spectrogram':
self.highlight_panel_indicator_line.set_x(
(self.playback_offset + self.audio.get_time()) * self.audio.fs / float(self.spec_hop_size)
)
elif self.mode == 'time_domain':
self.highlight_panel_indicator_line.set_x(
(self.playback_offset + self.audio.get_time()) * self.audio.fs
)
else:
self.highlight_panel_indicator_line.set_visible(False)
if self.animation_highlight_panel is not None:
self.animation_highlight_panel.event_source.stop()
self.animation_highlight_panel = None
return self.highlight_panel_indicator_line,
def time_ticks(self, locations, n_ticks=10, sampling_rate=44100):
times = self.samples_to_time(locations, sampling_rate=sampling_rate)
positions = numpy.linspace(0, len(locations)-1, n_ticks, endpoint=True).astype(int)
locations = locations[positions]
times = times[positions]
times = ['{:0.2f}s'.format(t) for t in times]
return plt.xticks(locations, times)
@staticmethod
def time_to_samples(time, sampling_rate=44100):
return (numpy.atleast_1d(time) * sampling_rate).astype(int)
@staticmethod
def samples_to_time(samples, sampling_rate=44100):
return numpy.atleast_1d(samples) / float(sampling_rate)
@staticmethod
def get_spectrogram(audio, n_fft=256, win_length=1024, hop_length=1024):
fft_window = scipy.signal.hann(win_length, sym=False).reshape((-1, 1))
audio = numpy.pad(array=audio,
pad_width=int(n_fft // 2),
mode='reflect')
n_frames = 1 + int((len(audio) - n_fft) / hop_length)
y_frames = as_strided(x=audio,
shape=(n_fft, n_frames),
strides=(audio.itemsize, int(hop_length * audio.itemsize)))
S = numpy.empty((int(1 + n_fft // 2), y_frames.shape[1]), dtype=numpy.complex64, order='F')
max_memory_block = 2**8 * 2**10
n_columns = int(max_memory_block / (S.shape[0] * S.itemsize))
for bl_s in range(0, S.shape[1], n_columns):
bl_t = min(bl_s + n_columns, S.shape[1])
# RFFT and Conjugate here to match phase from DPWE code
S[:, bl_s:bl_t] = scipy.fftpack.fft(fft_window * y_frames[:, bl_s:bl_t], axis=0)[:S.shape[0]].conj()
magnitude = numpy.abs(S) ** 2
ref = numpy.max(magnitude)
amin=1e-10
top_db = 80.0
log_spec = 10.0 * numpy.log10(numpy.maximum(amin, magnitude))
log_spec -= 10.0 * numpy.log10(numpy.maximum(amin, ref))
log_spec = numpy.maximum(log_spec, log_spec.max() - top_db)
return log_spec
@staticmethod
def plot_spectrogram(data, sampling_rate=44100, n_yticks=5, interpolation='nearest', cmap='magma'):
axes = plt.imshow(data, aspect='auto', origin='lower', interpolation=interpolation, cmap=plt.get_cmap(cmap))
# X axis
plt.xticks([])
# Y axis
positions = numpy.linspace(0, data.shape[0]-1, n_yticks, endpoint=True).astype(int)
values = numpy.linspace(0, 0.5 * sampling_rate, data.shape[0], endpoint=True).astype(int)
t_log = (data.shape[0] * (1 - numpy.logspace(-numpy.log2(data.shape[0]), 0, data.shape[0], base=2, endpoint=True))[::-1]).astype(int)
t_inv = numpy.arange(len(t_log))
for i in range(len(t_log)-1):
t_inv[t_log[i]:t_log[i+1]] = i
plt.yticks(positions, values[t_inv[positions]])
return axes
class EventListVerifier(EventListVisualizer):
def __init__(self, *args, **kwargs):
super(EventListVerifier, self).__init__(*args, **kwargs)
self.verification_answer_id = None
self.verification_answer_value = None
self.verification_values = kwargs.get('verification_values', [
'A',
'B',
'C'
])
self.verification_button_colors = kwargs.get('verification_button_colors',[
{
'off': '#AAAAAA',
'on':'#b32400',
},
{
'off': '#AAAAAA',
'on': '#996600',
},
{
'off': '#AAAAAA',
'on': '#009933',
},
])
self.button_color = kwargs.get('button_color', {
'off': '#AAAAAA',
'on': '#666666'
})
self.button_verification = {}
self.button_verification_axis = {}
if self.buttons['verification']:
# Verification panel
# ====================================
self.selector_panel_loc = 20
self.highlight_panel_loc = 37
self.event_roll_panel_loc = 75
self.selector_panel_height = 10
self.highlight_panel_height = 35
self.event_roll_panel_height = 20
def generate_GUI(self):
"""Generates the visualizer GUI."""
self.fig = plt.figure(figsize=self.fig_shape)
# Selector panel
# ====================================
if self.show_selector:
self.ax1 = plt.subplot2grid(
shape=(100, 1),
loc=(self.selector_panel_loc, 0),
rowspan=self.selector_panel_height,
colspan=1
)
self.timedomain_locations = numpy.arange(0, self.audio.signal.shape[0])
self.ax1.fill_between(
self.timedomain_locations[::self.waveform_selector_point_hop],
self.audio.signal[::self.waveform_selector_point_hop],
-self.audio.signal[::self.waveform_selector_point_hop],
color='0.5'
)
plt.yticks([])
plt.axis('tight')
self.ax1.set_xlim(self.timedomain_locations[0], self.timedomain_locations[-1])
self.ax1.set_ylim(-1, 1)
self.time_ticks(
locations=self.timedomain_locations,
n_ticks=10,
sampling_rate=self.audio.fs
)
self.ax1.yaxis.grid(False, which='major')
self.ax1.yaxis.grid(False, which='minor')
self.ax1.xaxis.grid(True, which='major')
self.ax1.xaxis.grid(True, which='minor')
self.ax1.yaxis.set_label_position("right")
plt.title(self.labels['selection'], fontsize=self.panel_title_font_size)
# Highlight panel
# ====================================
self.ax2 = plt.subplot2grid(
shape=(100, 1),
loc=(self.highlight_panel_loc, 0),
rowspan=self.highlight_panel_height,
colspan=1
)
self.x = numpy.arange(0, self.audio.duration_samples)
self.begin_time = float(self.x[0]) / self.audio.fs
self.end_time = float(self.x[-1]) / self.audio.fs
if self.mode == 'spectrogram':
self.D = self.get_spectrogram(
audio=self.audio.signal,
n_fft=self.spec_fft_size,
win_length=self.spec_win_size,
hop_length=self.spec_hop_size
)
self.plot_spectrogram(
self.D,
sampling_rate=self.audio.fs,
interpolation=self.spec_interpolation,
cmap=self.spec_cmap
)
if not self.publication_mode:
self.ax2.yaxis.grid(False, which='major')
self.ax2.yaxis.grid(False, which='minor')
self.ax2.xaxis.grid(False, which='major')
self.ax2.xaxis.grid(False, which='minor')
plt.ylabel(self.labels['spectrogram'], fontsize=self.panel_title_font_size)
else:
self.ax2.get_yaxis().set_visible(False)
elif self.mode == 'time_domain':
self.ax2.fill_between(
self.x[::self.waveform_highlight_point_hop],
self.audio.signal[::self.waveform_highlight_point_hop],
-self.audio.signal[::self.waveform_highlight_point_hop],
color=self.waveform_highlight_color
)
self.ax2.set_ylim(-1, 1)
self.ax2.set_xlim(self.x[0], self.x[-1])
segment_begin = self.time_to_samples(time=0, sampling_rate=self.audio.fs)
segment_end = self.time_to_samples(time=self.audio.duration_seconds, sampling_rate=self.audio.fs)
locs = numpy.arange(segment_begin, segment_end)
plt.xlim([locs[0], locs[-1]])
self.time_ticks(locations=locs, n_ticks=20)
plt.yticks([])
plt.xticks([])
self.ax2.yaxis.grid(False, which='major')
self.ax2.yaxis.grid(False, which='minor')
self.ax2.xaxis.grid(True, which='major')
self.ax2.xaxis.grid(True, which='minor')
if not self.publication_mode:
plt.ylabel(self.labels['waveform'], fontsize=self.panel_title_font_size)
self.ax2.yaxis.set_label_position("right")
plt.axis('tight')
# Event roll panel
# ====================================
if self._event_lists:
event_list_count = len(self._event_lists)
self.begin_time = 0
self.end_time = self.audio.duration_seconds
if event_list_count == 1:
norm = colors.Normalize(
vmin=0,
vmax=self.event_label_count
)
self.ax3 = plt.subplot2grid(
shape=(100, 1),
loc=(self.event_roll_panel_loc, 0),
rowspan=self.event_roll_panel_height+10,
colspan=1
)
else:
norm = colors.Normalize(
vmin=0,
vmax=event_list_count
)
self.ax3 = plt.subplot2grid(
shape=(100, 1),
loc=(self.event_roll_panel_loc, 0),
rowspan=self.event_roll_panel_height,
colspan=1
)
m = cm.ScalarMappable(
norm=norm,
cmap=self.label_colormap
)
line_margin = 0.1
y = 0
annotation_height = (1.0-line_margin*2)/event_list_count
for label in self.active_events:
for event_list_id, event_list_label in enumerate(self._event_list_order):
offset = (len(self._event_list_order)-1-event_list_id) * annotation_height
event_y = y - 0.5 + line_margin + offset
# grid line
line = plt.Rectangle(
(0, y-0.5),
height=0.001,
width=self.end_time,
edgecolor='black',
facecolor='black'
)
plt.gca().add_patch(line)
for event in self._event_lists[event_list_label]:
if event['event_label'] == label:
event_length = event['offset'] - event['onset']
if event_list_count == 1:
color = m.to_rgba(y + offset)
else:
color = m.to_rgba(event_list_id)
rectangle = plt.Rectangle(
(event['onset'], event_y),
height=annotation_height,
width=event_length,
edgecolor='black',
facecolor=color,
linewidth=0,
alpha=self.event_roll_item_opacity,
picker=5
)
plt.gca().add_patch(rectangle)
y += 1
# Grid line
line = plt.Rectangle(
(0, y - 0.5),
height=0.001,
width=self.end_time,
edgecolor='black',
facecolor='black'
)
plt.gca().add_patch(line)
# Axis
plt.axis([0, self.audio.duration_seconds, -0.5, len(self.active_events) + 0.5])
locs = numpy.arange(0, self.audio.duration_seconds, 0.00001)
plt.xlim([locs[0], locs[-1]])
plt.axis('tight')
# X axis
self.ax3.xaxis.grid(True, which='major')
self.ax3.xaxis.grid(True, which='minor')
plt.tick_params(
axis='x',
which='major',
labelsize=self.event_roll_time_font_size
)
# Y axis
plt.yticks(
numpy.arange(len(self.active_events)),
self.active_events,
fontsize=self.event_roll_label_font_size
)
plt.ylabel('Event Roll', fontsize=self.panel_title_font_size)
self.ax3.yaxis.set_label_position('right')
self.ax3.yaxis.grid(False, which='major')
self.ax3.yaxis.grid(False, which='minor')
# Set event list legends panel
self.ax3.set_xlim(self.begin_time, self.end_time)
if event_list_count > 1:
span = 0
for event_list_id, event_list_label in enumerate(self._event_list_order):
ax_legend_color = plt.axes([0.225+span, 0.02, 0.02, 0.02])
Button(
ax_legend_color,
'',
color=m.to_rgba(event_list_id),
hovercolor=m.to_rgba(event_list_id)
)
ax_legend_label = plt.axes([0.225+0.025+span, 0.02, 0.10, 0.04])
ax_legend_label.axis('off')
ax_legend_label.text(0, 0, event_list_label, fontsize=self.legend_font_size)
span += 0.15
if self.show_selector:
self.slider_time = SpanSelector(
ax=self.ax1,
onselect=self.on_select,
minspan=None,
direction='horizontal',
span_stays=True,
useblit=self.use_blit,
onmove_callback=None,
rectprops=dict(alpha=0.15, facecolor=self.color)
)
if not self.publication_mode:
ax_legend_label = plt.axes([0.92, 0.02, 0.10, 0.04])
ax_legend_label.axis('off')
ax_legend_label.text(0, 0, 'sed_vis', fontsize=16)
# Buttons
# ====================================
if self.buttons['play']:
ax_play = plt.axes([0.125, 0.93, 0.07, 0.04])
self.button_play = Button(
ax_play,
self.labels['play'],
color=self.button_color['off'],
hovercolor=self.button_color['on']
)
self.button_play.on_clicked(self.on_play)
if self.buttons['stop']:
ax_stop = plt.axes([0.205, 0.93, 0.07, 0.04])
self.button_stop = Button(
ax_stop,
self.labels['stop'],
color=self.button_color['off'],
hovercolor=self.button_color['on']
)
self.button_stop.on_clicked(self.on_stop)
if self.buttons['close']:
ax_close = plt.axes([0.92, 0.93, 0.07, 0.04])
self.button_close = Button(
ax_close,
self.labels['close'],
color=self.button_color['off'],
hovercolor=self.button_color['on']
)
self.button_close.on_clicked(self.on_close_window)
if self.buttons['quit']:
ax_quit = plt.axes([0.78, 0.93, 0.07, 0.04])
self.button_quit = Button(
ax_quit,
self.labels['quit'],
color=self.button_color['off'],
hovercolor=self.button_color['on']
)
self.button_quit.on_clicked(self.on_quit)
if self.buttons['verification']:
start_x = 0.125 + 0.2 #25
end_x = 0.125+0.775 -0.2 #1 - start_x
width = (end_x - start_x)
spacing = width / float(len(self.verification_values))
# Verification text
plt.axes([0.125, 0.88, 0.775, 0.04], frameon=False)
plt.xticks([])
plt.yticks([])
plt.text(
0.5, 0.5,
self.labels['verification'],
fontsize=14,
horizontalalignment='center',
verticalalignment='center'
)
# Verification-info text
plt.axes([0.00, 0.80, 0.125 + 0.18, 0.08], frameon=False)
plt.xticks([])
plt.yticks([])
plt.text(
1, 0.5,
self.labels['verification_info'],
fontsize=18,
horizontalalignment='right',
verticalalignment='center'
)
for i, label in enumerate(self.verification_values):
self.button_verification_axis[i] = plt.axes([start_x+(i*spacing), 0.80, 0.12, 0.08])
self.button_verification[i] = Button(
self.button_verification_axis[i],
label,
color=self.verification_button_colors[i]['off'],
hovercolor=self.verification_button_colors[i]['off']
)
self.button_verification[i].label.set_fontsize(12)
self.button_verification[i].on_clicked(self.on_verification)
# Info text
plt.axes([start_x, 0.02, width, 0.04], frameon=False)
plt.xticks([])
plt.yticks([])
plt.text(0.5, 0.5,
self.labels['info'],
fontsize=12,
horizontalalignment='center',
verticalalignment='center'
)
self.fig.canvas.mpl_connect('pick_event', self.on_pick)
else:
plt.subplots_adjust(
left=0.12,
bottom=0.05,
right=.97,
top=0.95,
wspace=0,
hspace=0
)
if self.auto_play:
self.on_play(None)
def on_verification(self, event):
for i in self.button_verification:
if event.inaxes == self.button_verification_axis[i]:
self.button_verification[i].color = self.verification_button_colors[i]['on']
self.button_verification[i].hovercolor = self.verification_button_colors[i]['on']
self.verification_answer_id =i
self.verification_answer_value = self.verification_values[i]
else:
self.button_verification[i].color = self.verification_button_colors[i]['off']
self.button_verification[i].hovercolor = self.verification_button_colors[i]['off']
def get_answer(self):
return self.verification_answer_id, self.verification_answer_value
| 35.295938 | 173 | 0.536643 | 53,651 | 0.980034 | 0 | 0 | 2,545 | 0.046489 | 0 | 0 | 7,262 | 0.132654 |
576046bf13a9a9a373df939e68641affef31278c | 498 | py | Python | myApp/views.py | geniyong/oauth_practice | c62b461a59299a28538472d9955c57c86c61cad3 | [
"MIT"
] | null | null | null | myApp/views.py | geniyong/oauth_practice | c62b461a59299a28538472d9955c57c86c61cad3 | [
"MIT"
] | null | null | null | myApp/views.py | geniyong/oauth_practice | c62b461a59299a28538472d9955c57c86c61cad3 | [
"MIT"
] | null | null | null | from django.shortcuts import render
# Create your views here.
from django.shortcuts import render,redirect, render_to_response
from .models import *
from django.views.generic import TemplateView, ListView, DetailView, CreateView, UpdateView, DeleteView
from django.urls import reverse
from django.http import HttpResponseRedirect
def loginView(request):
if request.method =="GET":
pass
elif request.method == 'POST':
pass
return render(request, 'myApp/login.html')
| 26.210526 | 103 | 0.757028 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 54 | 0.108434 |
57605a45872280f1b96cf87232eb6af9882ebf02 | 1,131 | py | Python | console/info.py | alexeysp11/sdc-console-python | 237ec357b0883d0d033cd0f5609ac8a4f064f86f | [
"MIT"
] | null | null | null | console/info.py | alexeysp11/sdc-console-python | 237ec357b0883d0d033cd0f5609ac8a4f064f86f | [
"MIT"
] | null | null | null | console/info.py | alexeysp11/sdc-console-python | 237ec357b0883d0d033cd0f5609ac8a4f064f86f | [
"MIT"
] | null | null | null | class Info:
"""
Allows to print out information about the application.
"""
def commands():
print('''Main modules:
imu | Inertial Measurement Unit (GPS, gyro, accelerometer)
gps | GPS
gyro | Gyroscope
accel | Accelerometer
fuzzy | Fuzzy Controller
nn | Neural Network
test | Unit-tests for each module
exit | Exit
''')
def help():
print('''SDC CONSOLE APP
SDC stands for Self-Driving Cars.
So this app allows you to simulate some modules of SDC within a console.
All modules can be called by:
sdc module --mode
All information modules can be called by:
sdc -commands
sdc -help
''')
def imu():
print('''sdc imu:
--p | IMU (position)
--v | IMU (velocity)
--a | IMU (acceleration)
''')
def gps():
print('''sdc gps:
--p | GPS (position)
--v | GPS (velocity)
--a | GPS (acceleration)
''')
| 23.5625 | 74 | 0.482759 | 1,130 | 0.999116 | 0 | 0 | 0 | 0 | 0 | 0 | 970 | 0.857648 |
5762429b1b1dd01cf1b9389178f64cf1b092cca8 | 2,095 | py | Python | apps/establishment_system/search_indexes.py | camilortte/RecomendadorUD | ebf9ee4482c4093d4751a27c90f56637a9c692a4 | [
"MIT"
] | 4 | 2015-01-29T17:17:26.000Z | 2021-03-03T08:17:03.000Z | apps/establishment_system/search_indexes.py | camilortte/RecomendadorUD | ebf9ee4482c4093d4751a27c90f56637a9c692a4 | [
"MIT"
] | null | null | null | apps/establishment_system/search_indexes.py | camilortte/RecomendadorUD | ebf9ee4482c4093d4751a27c90f56637a9c692a4 | [
"MIT"
] | 1 | 2015-09-22T08:35:26.000Z | 2015-09-22T08:35:26.000Z | # -*- encoding: utf-8 -*-
"""
search_indexex.py: Creacion de los indices de busqueda.
@author Camilo Ramírez
@contact camilolinchis@gmail.com
camilortte@hotmail.com
@camilortte on Twitter
@copyright Copyright 2014-2015, RecomendadorUD
@license GPL
@date 2014-10-10
@satus Pre-Alpha
@version= 0..215
"""
#import datetime
from haystack import indexes
from .models import Establecimiento, Categoria
class EstablecimientoIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True)
nombre = indexes.EdgeNgramField(model_attr='nombre')
#nombre_auto = indexes.EdgeNgramField(model_attr='nombre')
email = indexes.EdgeNgramField(model_attr='email')
web_page = indexes.EdgeNgramField(model_attr='web_page')
address= indexes.EdgeNgramField(model_attr='address')
sub_categorias = indexes.EdgeNgramField(model_attr='sub_categorias')
# content_auto = indexes.EdgeNgramField(model_attr='nombre')
def get_model(self):
return Establecimiento
def index_queryset(self, using=None):
# using select_related here should avoid an extra query for getting
# the manufacturer when indexing
return self.get_model().objects.all().select_related('sub_categorias')
# def search(self):
# if hasattr(self,'cleaned_data') and self.cleaned_data['q']:
# self.cleaned_data['q']=self.cleaned_data['q'].encode('translit/one/ascii', 'replace')
# sqs = super(RtSearchForm, self).search()
# return sqs
class CategoriaIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
nombre_categoria= indexes.EdgeNgramField(model_attr='nombre')
def get_model(self):
return Categoria
def index_queryset(self, using=None):
"""Used when the entire index for model is updated."""
#return self.get_model().objects.filter(pub_date__lte=datetime.datetime.now())
return self.get_model().objects.all() | 35.508475 | 99 | 0.690215 | 1,597 | 0.761927 | 0 | 0 | 0 | 0 | 0 | 0 | 1,083 | 0.516698 |
576265ad1cf9770159b84c4c099a658ff418fcd7 | 1,968 | py | Python | utils.py | jeffasante/captcha | 7e97d3ed0caf253efd400acfa9f7a754e314c93b | [
"MIT"
] | null | null | null | utils.py | jeffasante/captcha | 7e97d3ed0caf253efd400acfa9f7a754e314c93b | [
"MIT"
] | null | null | null | utils.py | jeffasante/captcha | 7e97d3ed0caf253efd400acfa9f7a754e314c93b | [
"MIT"
] | 1 | 2022-01-02T10:51:37.000Z | 2022-01-02T10:51:37.000Z | ''' Handling the data io '''
from torchvision import transforms, datasets
import numpy as np
import zipfile
from io import open
import glob
from PIL import Image, ImageOps
import os
import string
# Read data
def extractZipFiles(zip_file, extract_to):
''' Extract from zip '''
with zipfile.ZipFile(zip_file, 'r')as zipped_ref:
zipped_ref.extractall(extract_to)
print('done')
data_dir = 'data/captcha_images_v2/*.png'
def findFiles(path): return glob.glob(path)
# find letter inde from targets_flat
def letterToIndex(letter):
return all_letters.find(letter)
# print(letterToIndex('l'))
# index to letter
indexToLetter = {letterToIndex(i):i for i in all_letters}
data = [img for img in findFiles(data_dir)]
targets = [os.path.basename(x)[:-4] for x in glob.glob(data_dir)]
# abcde -> [a, b, c, d, e]
pre_targets_flat = [[c for c in x] for x in targets]
encoded_targets = np.array([[letterToIndex(c) for c in x] for x in pre_targets_flat])
targets_flat = [char for word in pre_targets_flat for char in word]
unique_letters = set(char for word in targets for char in word)
class CaptchaDataset(Dataset):
"""
Args:
data (string): Path to the file with all the images.
target (string): Path to the file with annotations.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
def __init__(self, data, target=None, transform=None):
self.data = data
self.target = target
self.transform = transform
def __getitem__(self, index):
# read image
x = Image.open(self.data[index]).convert('RGB')
y = self.target[index]
# resize, turn to 0,1
if self.transform:
x = self.transform(x)
return x, torch.tensor(y, dtype=torch.long)
return x, y
def __len__(self):
return len(self.data)
| 24.296296 | 85 | 0.643801 | 838 | 0.425813 | 0 | 0 | 0 | 0 | 0 | 0 | 510 | 0.259146 |
5766b267912e78674e78fd7d2805ec7078a5c543 | 4,072 | py | Python | tests/test_metaregistry.py | kkaris/bioregistry | e8cdaf8e8c5670873ce10a5a67d7850b76e5eff7 | [
"MIT"
] | null | null | null | tests/test_metaregistry.py | kkaris/bioregistry | e8cdaf8e8c5670873ce10a5a67d7850b76e5eff7 | [
"MIT"
] | null | null | null | tests/test_metaregistry.py | kkaris/bioregistry | e8cdaf8e8c5670873ce10a5a67d7850b76e5eff7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Tests for the metaregistry."""
import unittest
import bioregistry
from bioregistry.export.rdf_export import metaresource_to_rdf_str
from bioregistry.schema import Registry
class TestMetaregistry(unittest.TestCase):
"""Tests for the metaregistry."""
def test_minimum_metadata(self):
"""Test the metaregistry entries have a minimum amount of data."""
for metaprefix, registry_pydantic in bioregistry.read_metaregistry().items():
self.assertIsInstance(registry_pydantic, Registry)
data = registry_pydantic.dict()
with self.subTest(metaprefix=metaprefix):
self.assertIn('name', data)
self.assertIn('homepage', data)
self.assertIn('example', data)
self.assertIn('description', data)
# When a registry is a provider, it means it
# provides for its entries
self.assertIn('provider', data)
if data['provider']:
self.assertIn('provider_url', data)
self.assertIn('$1', data['provider_url'])
# When a registry is a resolver, it means it
# can resolve entries (prefixes) + identifiers
self.assertIn('resolver', data)
if data['resolver']:
self.assertIn('resolver_url', data)
self.assertIn('$1', data['resolver_url'])
self.assertIn('$2', data['resolver_url'])
invalid_keys = set(data).difference({
'prefix', 'name', 'homepage', 'download',
'provider', 'resolver', 'description', 'provider_url',
'example', 'resolver_url', 'contact',
})
self.assertEqual(set(), invalid_keys, msg='invalid metadata')
def test_get_registry(self):
"""Test getting a registry."""
self.assertIsNone(bioregistry.get_registry('nope'))
self.assertIsNone(bioregistry.get_registry_name('nope'))
self.assertIsNone(bioregistry.get_registry_homepage('nope'))
self.assertIsNone(bioregistry.get_registry_url('nope', ...))
self.assertIsNone(bioregistry.get_registry_example('nope'))
self.assertIsNone(bioregistry.get_registry_description('nope'))
self.assertIsNone(bioregistry.get_registry_url('n2t', ...)) # no provider available for N2T registry
metaprefix = 'uniprot'
registry = bioregistry.get_registry(metaprefix)
self.assertIsInstance(registry, Registry)
self.assertEqual(metaprefix, registry.prefix)
self.assertEqual(registry.description, bioregistry.get_registry_description(metaprefix))
homepage = 'https://www.uniprot.org/database/'
self.assertEqual(homepage, registry.homepage)
self.assertEqual(homepage, bioregistry.get_registry_homepage(metaprefix))
name = 'UniProt Cross-ref database'
self.assertEqual(name, registry.name)
self.assertEqual(name, bioregistry.get_registry_name(metaprefix))
example = '0174'
self.assertEqual(example, registry.example)
self.assertEqual(example, bioregistry.get_registry_example(metaprefix))
url = bioregistry.get_registry_url(metaprefix, example)
self.assertEqual('https://www.uniprot.org/database/DB-0174', url)
def test_resolver(self):
"""Test generating resolver URLs."""
# Can't resolve since nope isn't a valid registry
self.assertIsNone(bioregistry.get_registry_resolve_url('nope', 'chebi', '1234'))
# Can't resolve since GO isn't a resolver
self.assertIsNone(bioregistry.get_registry_resolve_url('go', 'chebi', '1234'))
url = bioregistry.get_registry_resolve_url('bioregistry', 'chebi', '1234')
self.assertEqual('https://bioregistry.io/chebi:1234', url)
def test_get_rdf(self):
"""Test conversion to RDF."""
s = metaresource_to_rdf_str('uniprot')
self.assertIsInstance(s, str)
| 43.319149 | 109 | 0.638507 | 3,867 | 0.949656 | 0 | 0 | 0 | 0 | 0 | 0 | 1,098 | 0.269646 |
57697b1541d35be66a3cab37aa617e895a535842 | 341 | py | Python | ex038.py | honeyhugh/PythonCurso | e5b8efe04e100ea0b0c0aacde1caf7ae52489f40 | [
"MIT"
] | null | null | null | ex038.py | honeyhugh/PythonCurso | e5b8efe04e100ea0b0c0aacde1caf7ae52489f40 | [
"MIT"
] | null | null | null | ex038.py | honeyhugh/PythonCurso | e5b8efe04e100ea0b0c0aacde1caf7ae52489f40 | [
"MIT"
] | null | null | null | print('Analisador de números')
print('=-=' * 15)
n1 = int(input('Digite o primeiro número: '))
n2 = int(input('Digite o segundo número: '))
if n1 > n2:
print('O número {} é maior que o número {}'.format(n1, n2))
elif n2 > n1:
print('O número {} é maior que o número {}'.format(n2, n1))
else:
print('Os dois valores são iguais.')
| 31 | 63 | 0.624633 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 196 | 0.558405 |
576acce6fb8aee36735ff671a49a541abb4f2890 | 750 | py | Python | setup.py | ksachdeva/symbulate | 409188680c599622140ff6c984c95703173472e8 | [
"MIT"
] | 25 | 2017-04-04T01:55:22.000Z | 2022-03-28T17:57:49.000Z | setup.py | ksachdeva/symbulate | 409188680c599622140ff6c984c95703173472e8 | [
"MIT"
] | 67 | 2017-06-27T23:32:29.000Z | 2022-01-15T19:57:28.000Z | setup.py | ksachdeva/symbulate | 409188680c599622140ff6c984c95703173472e8 | [
"MIT"
] | 21 | 2017-04-04T01:55:22.000Z | 2022-01-11T20:03:52.000Z | from setuptools import setup, find_packages
setup(
name="symbulate",
version="0.5.5",
description="A symbolic algebra for specifying simulations.",
url="https://github.com/dlsun/symbulate",
author="Dennis Sun",
author_email="dsun09@calpoly.edu",
license="GPLv3",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Education',
'Topic :: Scientific/Engineering :: Mathematics',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Programming Language :: Python :: 3',
],
keywords='probability simulation',
packages=find_packages(),
install_requires=[
'numpy',
'scipy',
'matplotlib'
]
)
| 22.058824 | 75 | 0.612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 407 | 0.542667 |
576b51feddb0ad573026e5dec3bf36423cf565ad | 833 | py | Python | snakemake/scripts/pipeline/gc.py | BDI-pathogens/ShiverCovid | a032a7a786288b28994eae51215e7851f7571018 | [
"MIT"
] | null | null | null | snakemake/scripts/pipeline/gc.py | BDI-pathogens/ShiverCovid | a032a7a786288b28994eae51215e7851f7571018 | [
"MIT"
] | null | null | null | snakemake/scripts/pipeline/gc.py | BDI-pathogens/ShiverCovid | a032a7a786288b28994eae51215e7851f7571018 | [
"MIT"
] | null | null | null | from __future__ import print_function
import gzip
import os
import sys
from decimal import Decimal
def calculate_gc(inpath):
inf = gzip.open(inpath) if inpath.endswith('.gz') else open(inpath)
ttl_bases = 0
gc_bases = 0
for i, l in enumerate(inf):
if i % 4 == 1:
s = l.strip().upper()
ttl_bases += len(s)
gc_bases += (s.count('G') + s.count('C'))
return gc_bases, ttl_bases
if __name__ == '__main__':
if not len(sys.argv) > 1 or not os.path.isfile(sys.argv[1]):
sys.stderr.write('Usage: gc.py <fastq[.gz] file with no blank lines>\n')
sys.exit(1)
gc, ttl = calculate_gc(sys.argv[1])
# The original pipeline returns 12 decimal places, so round this for consistency
calc = round(Decimal(gc / float(ttl)), 12)
print(gc, ttl, calc)
| 28.724138 | 84 | 0.623049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 155 | 0.186074 |
576cac074c13eb095065c6d2030dffe84a3c2e7c | 725 | py | Python | blind_automation/event/blocker.py | RaphiOriginal/blindAutomation | 47f087be0ef33983cfc372abe09760c9a64f1849 | [
"MIT"
] | 1 | 2020-08-20T19:43:14.000Z | 2020-08-20T19:43:14.000Z | blind_automation/event/blocker.py | RaphiOriginal/blindAutomation | 47f087be0ef33983cfc372abe09760c9a64f1849 | [
"MIT"
] | null | null | null | blind_automation/event/blocker.py | RaphiOriginal/blindAutomation | 47f087be0ef33983cfc372abe09760c9a64f1849 | [
"MIT"
] | null | null | null | from typing import Optional, TypeVar
from .event import EventBlocker
T = TypeVar('T')
class Blocker(EventBlocker):
def __init__(self):
self.__block = False
self.__block_list: [T] = []
def block(self):
self.__block = True
def unblock(self):
self.__block = False
def update(self, task: T):
self.__block_list.append(task)
@property
def last(self) -> Optional[T]:
if len(self.__block_list) > 0:
return self.__block_list.pop()
return None
@property
def blocking(self) -> bool:
return self.__block
def __repr__(self):
return 'Blocker: {blocking: %s, blocked: %s}' % (self.blocking, self.__block_list)
| 21.323529 | 90 | 0.609655 | 634 | 0.874483 | 0 | 0 | 215 | 0.296552 | 0 | 0 | 41 | 0.056552 |
576cb2eea467a88f13b66f007bd906188c23f5fc | 4,239 | py | Python | lib/systems/chlorophyll_c2.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | lib/systems/chlorophyll_c2.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | lib/systems/chlorophyll_c2.py | pulsar-chem/BPModule | f8e64e04fdb01947708f098e833600c459c2ff0e | [
"BSD-3-Clause"
] | null | null | null | import pulsar as psr
def load_ref_system():
""" Returns chlorophyll_c2 as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C -2.51105 2.48309 -0.00367
C -1.01315 4.06218 0.09798
C 0.16582 4.71018 0.03445
C 1.49771 4.10555 -0.16166
C 3.11156 2.64885 -0.24011
C 3.76129 1.46796 -0.14139
C 3.09159 0.16424 -0.04831
C 1.53984 -1.29933 -0.10261
C -0.91181 -1.59853 -0.35248
C -2.45607 -0.05240 -0.35298
C -3.10620 1.27291 -0.16460
N 1.75017 2.78429 -0.23961
N 1.76493 0.00621 -0.16383
N -1.13225 -0.25365 -0.35470
C -3.20202 3.66639 0.28881
C 2.68521 4.82968 -0.22081
C 3.68747 -1.09005 0.15938
C -2.24542 4.67945 0.36245
C -2.41991 6.13752 0.71407
C -3.56473 6.73948 1.06129
C 3.72098 3.91621 -0.29248
N -1.17823 2.72953 -0.07212
Mg 0.26793 1.27490 -0.35377
C 0.34760 -2.11926 -0.19964
C 2.64511 -1.99016 0.09433
C 2.31747 -3.38932 0.17127
O 3.13845 -4.27117 0.35919
C 0.80633 -3.58955 -0.01558
O -0.47000 -3.54277 1.99390
C 0.19951 -4.22772 1.23456
O 0.33390 -5.55184 1.51538
C -2.14828 -2.27838 -0.46597
C -2.34627 -3.76961 -0.59495
C -3.52338 -4.42976 -0.61516
C -3.58867 -5.90449 -0.76226
O -2.56916 -6.57072 -0.87406
O -4.79233 -6.51526 -0.77059
C -3.12715 -1.27243 -0.46107
C 0.83624 -6.50254 0.65250
C 5.12089 -1.42417 0.42504
C -4.62172 -1.44902 -0.55794
C -4.68427 3.81267 0.53984
C 2.86086 6.32939 -0.22433
C 6.20198 3.61878 -0.78465
C 5.15458 4.35926 -0.39637
H 0.16297 5.77206 0.17065
H 4.82596 1.44735 -0.07176
H -4.17158 1.24062 -0.09244
H -1.55035 6.77752 0.72933
H -4.51438 6.24702 1.11665
H -3.54680 7.79385 1.31901
H 0.60711 -4.17663 -0.93384
H -1.47776 -4.37959 -0.70310
H -4.46318 -3.92775 -0.52160
H -4.86588 -7.47619 -0.86197
H 0.78374 -7.49457 1.14518
H 1.89335 -6.30524 0.40486
H 0.22776 -6.53794 -0.27557
H 5.75783 -0.51879 0.44869
H 5.50014 -2.12026 -0.34842
H 5.18953 -1.91331 1.42119
H -5.18859 -0.50460 -0.59248
H -4.99451 -2.00986 0.32226
H -4.86783 -1.97442 -1.50531
H -5.27115 2.88458 0.45319
H -5.12053 4.53158 -0.18161
H -4.83661 4.15056 1.58664
H 1.91301 6.89359 -0.19061
H 3.38213 6.63177 -1.15809
H 3.47321 6.63697 0.64765
H 7.18160 4.08465 -0.84048
H 6.13795 2.59183 -1.09149
H 5.36680 5.40361 -0.19182
""")
| 52.333333 | 70 | 0.362114 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 4,161 | 0.981599 |
5771a8c9a11c0b65fd2a5981efe83e45f9b456e5 | 14,669 | py | Python | fth.py | anonymous-sys19/fth | cbde3030f43d048970e1471dc9c4fafba50fedb8 | [
"Apache-2.0"
] | 2 | 2021-02-01T18:31:19.000Z | 2021-05-28T19:14:51.000Z | fth.py | anonymous-sys19/fth | cbde3030f43d048970e1471dc9c4fafba50fedb8 | [
"Apache-2.0"
] | null | null | null | fth.py | anonymous-sys19/fth | cbde3030f43d048970e1471dc9c4fafba50fedb8 | [
"Apache-2.0"
] | null | null | null | import os
import time
import sys
import random
user_pass = ('''admin
admi
admin
universo
html
veggeta
Admin
bados
free-fire
royale
clang
free
fire
anonimo
anonimous
anoni
bills
anonymous
Aanonimous
pass
password
wordlist
kali
linux
kali-linux
start
Hacker
parrot
ubuntu
blacken
redhat
deepin
lubuntu
depin
gogeta
hacker
tor
2000
error
2001
2002
1999
root
home
space
2003
2004
2005
red-tor
redtor
enero
2006
2007
2008
home
2009
2010
2020
goku
febrero
user
usuario
xmr7
marzo
administrador
abril
mayo
junio
administrativo
2011
homme
2013
2012
security
2014
wine
seguridad
2015
2016
2017
2018
2019
hack
black
hackblack
julio
anonsurf
decsec
agosto
metasploit
supersu
super
user-root
septiembre
octubre
october
novienbre
juan
adrian
diciembre
cuarenta
curentena
1234
4321
0000
docker
python
aaaa
dead
deat
muerte
sudo
sudosu
sudo su
we are hacker
2222
1010
wearehacker
123456
1111
12345
mexico
peru
amor
123
vida
love
loveyou
you
live
5678
scan
56789
mylife
estudio
mrhacker
mr hacker
jhom
jhon
fores
benjamin
mr-rebot
mr robot
mr-roboth
mr roboth
roboth
scryp
1010
tool
nombre
anom''')
def colores ():
listas = ['\033[1;36m','\033[1;31m','\033[1;34m','\033[1;30m','\033[1;37m','\033[1;35m','\033[1;32m','\033[1;33m']
indice=random.randrange(len(listas))
lista=listas[indice]
print(lista)
user_user = ('''admin
admi
admin
Admin
anonimo
anonimous
anoni
anonymous
benjamin
mr-rebot
mr robot
mr-roboth
mr roboth
roboth
Aanonimous
pass
password
wordlist
kali
linux
kali-linux
start
Hacker
parrot
ubuntu
redhat
deepin
depin
gogeta
hacker
tor
2000
2001
2002
1999
root
home
space
2003
2004
2005
red-tor
redtor
enero
2006
2007
2008
2009
2010
2020
goku
febrero
user
usuario
marzo
administrador
abril
mayo
junio
administrativo
2011
2013
2012
2014
2015
2016
2017
2018
2019
hack
black
hackblack
julio
anonsurf
agosto
metasploit
supersu
super
user-root
septiembre
octubre
october
novienbre
juan
diciembre
cuarenta
curentena
1234
4321
0000
docker
aaaa
dead
deat
muerte
sudo
sudosu
sudo su
we are hacker
2222
1010
wearehacker
123456
1111
12345
mexico
peru
amor
123
vida
love
loveyou
you
live
5678
scan
56789
mylife
estudio
mrhacker
mr hacker
jhom
jhon
fores
scryp
1010
tool
anom
''')
#print (user)
def slowprint(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(1./70)
def fth ():
colores()
listalinuxs = ['''
██████ █████ █████
███░░███ ░░███ ░░███
░███ ░░░ ███████ ░███████
███████ ░░░███░ ░███░░███
░░░███░ ░███ ░███ ░███
░███ ░███ ███ ░███ ░███
█████ ██ ░░█████ ██ ████ █████
░░░░░ ░░ ░░░░░ ░░ ░░░░ ░░░░░
''','''
'########::::::'########::::::'##::::'##:
##.....:::::::... ##..::::::: ##:::: ##:
##::::::::::::::: ##::::::::: ##:::: ##:
######::::::::::: ##::::::::: #########:
##...:::::::::::: ##::::::::: ##.... ##:
##:::::::'###:::: ##::::'###: ##:::: ##:
##::::::: ###:::: ##:::: ###: ##:::: ##:
..::::::::...:::::..:::::...::..:::::..::
''','''
███████╗████████╗██╗ ██╗
██╔════╝╚══██╔══╝██║ ██║
█████╗ ██║ ███████║
██╔══╝ ██║ ██╔══██║
██║██╗ ██║██╗██║ ██║
╚═╝╚═╝ ╚═╝╚═╝╚═╝ ╚═╝
''','''
O)) O)) O))
O) O)) O)) O)) O)
O)O) O)O)O) O)O)) O)) O)) O)) O)) O))O)) O))
O)) O)) O) O) O))O)) O)) O))O)) O)) O) O))
O)) O)) O)) O)) O))O)) O)) O))O)) O)) O)
O)) O)) O) O)) O))O)) O)) O))O)) O)) O) O))
O)) O)) O)) O))O)))O))O))) O)) O))O))O)) O))
''','''
.%%%%%%..%%%%%%..%%..%%..%%......%%%%%%..%%..%%..%%..%%..%%..%%.
.%%........%%....%%..%%..%%........%%....%%%.%%..%%..%%...%%%%..
.%%%%......%%....%%%%%%..%%........%%....%%.%%%..%%..%%....%%...
.%%........%%....%%..%%..%%........%%....%%..%%..%%..%%...%%%%..
.%%........%%....%%..%%..%%%%%%..%%%%%%..%%..%%...%%%%...%%..%%.
................................................................
''','''
@@@@@@@@ @@@@@@@ @@@ @@@ @@@ @@@ @@@ @@@ @@@ @@@ @@@ @@@
@@@@@@@@ @@@@@@@ @@@ @@@ @@@ @@@ @@@@ @@@ @@@ @@@ @@@ @@@
@@! @@! @@! @@@ @@! @@! @@!@!@@@ @@! @@@ @@! !@@
!@! !@! !@! @!@ !@! !@! !@!!@!@! !@! @!@ !@! @!!
@!!!:! @!! @!@!@!@! @!@!@!@!@ @!! !!@ @!@ !!@! @!@ !@! !@@!@!
!!!!!: !!! !!!@!!!! !!!@!@!!! !!! !!! !@! !!! !@! !!! @!!!
!!: !!: !!: !!! !!: !!: !!: !!! !!: !!! !: :!!
:!: :!: :!: !:! :!: :!: :!: !:! :!: !:! :!: !:!
:: :: :: ::: :: :::: :: :: :: ::::: :: :: :::
: : : : : : :: : : : :: : : : : : ::
''']
indice=random.randrange(len(listalinuxs))
listalinux=listalinuxs[indice]
slowprint(listalinux)
fth()
def clear ():
os.system('clear')
def help ():
print ('\n ------Menu_de ayuda_------\n')
ayuda = '''
Creacion de usuario = use user [ no son util ]
Creacion de password = use pass [ no son util ]\n
create_new_pass [Crea una nueva lista de deccionario manual]\n\n
create_new_user [Crea una nueva lista de diccionario manual ]\n
create pass list [crea la lista de diccionario creada por el usuario]\n
create user list [crea la lista de diccionario creada por el usuario]\n
clear [LIMPIA LA PANTALLA ]\n
use force_brute [con este comando podra hacer fuerza bruta a ftp,ssh.etc]
open msfconsole [podras tambien ejecutar metasploit desde la misma scryp]
\n
use hacker_git [como instancia con solo este comando se instalara una tools]
creada por el usuario disponible en github
\n
create automated payload [ crearas un payload ya automatizado [windows/android][1]]
\n
Tambien podras llamar cualquier herramienta del sistema..
escribiendo el nombre de la herramienta
Ejemplo [ nmap 127.0.0.1 ] etc
\n\n
------Nota------
\n\n
Para ahorrarte trabajo el mismo creador del scrypt a creado un diccionario
con posibles password y user puedes crearlos y darles usos en el apartado
de contraseña y usuario --------------- tambien el creador de contraseña
te generara y podras guardar cada [user y pass] en la lista de diccionarios
del scrypt para darle uso en otra ocacion --------------------------------\n\n
Tambien tendra el privilejio de editar sus diccionarios desde el directorio
de la herramienta.. ya que automaticamente el scrypt le heredara privilegios
--------------------------------super-user----------------------------------
'''
print (ayuda)
def create_new_Pass ():
print('Creando .txt ....')
time.sleep(2)
os.system ('nano New_Pass.txt')
print('Creado con exito ....')
def create_new_user ():
print('Creando .txt ....\n')
time.sleep(2)
os.system ('nano New_user.txt')
print('Creado con exito ....')
def password ():
diccionaripass = open('user_pass.txt', 'a')
diccionaripass.write(user_pass)
def usuarios ():
diccionariuser = open('user_user.txt', 'a')
diccionariuser.write(user_user)
def menu ():
colores()
Menu_de_inicio = ('''
[Escribe [help] para ver modo de uso ]
\n
Generar diccionario\n
Crear nuevo diccionario\n
Editar diccionario existente
''')
print('\n -----------------------------------welcome------------------------------\n')
print(Menu_de_inicio)
menu()
def chmod ():
os.system('chmod 777 -R *')
def creacion ():
chmod()
usr_pas = input('menu $ ')
if usr_pas == 'use user' :
print('''\n\n
para agregar tus propias palabras\n
debes crear generar el diccionario user-pas \n\n
espera
\n\n''')
print ('\nSi ya lo isiste antes preciona [n]\n\n')
dic = input('[y/n] $ ')
if dic == 'y' :
usuarios()
password()
print('Listo quieres agregar palabras de [use](pass/user) escribe ')
pausr = input('[pass/user] $ ')
if pausr == 'pass' :
os.system ('nano user_pass.txt')
creacion()
elif pausr == 'user' :
os.system('nano user_user.txt')
creacion()
elif pausr == 'create_new pass' :
os.system('nano New_Pass.txt')
creacion()
elif pausr == 'create_new user' :
os.system('nano New_user.txt')
creacion()
else:
print('ERROR OPCION INVALIDA ... ')
elif dic == 'n' :
pausr = input('[pass/user] $ ')
if pausr == 'pass' :
os.system ('nano user_pass.txt')
creacion()
elif pausr == 'user' :
os.system('nano user_user.txt')
creacion()
elif pausr == 'create_new pass' :
os.system('nano New_Pass.txt')
creacion()
elif pausr == 'create_new user' :
os.system('nano New_user.txt')
creacion()
elif pausr == 'clear' :
clear()
creacion()
else:
print('ERROR OPCION INVALIDA ... ')
elif usr_pas == 'use pass' :
print('''\n\n
para agregar tus propias palabras\n
debes crear generar el diccionario user-pas \n\n
espera
\n\n''')
print ('\nSi ya lo isiste antes preciona [n]\n\n')
dic = input('[y/n] $ ')
if dic == 'y' :
usuarios()
password()
print('Listo quieres agregar palabras de [use](pass/user) escribe \n\n')
pausr = input('[pass/user] $ ')
if pausr == 'pass' :
os.system ('nano user_pass.txt')
creacion()
elif pausr == 'user' :
os.system('nano user_user.txt')
creacion()
elif pausr == 'create_new pass' :
os.system('nano New_Pass.txt')
creacion()
elif pausr == 'create_new user' :
os.system('nano New_user.txt')
creacion()
elif pausr == 'help' :
help()
creacion()
elif pausr == 'exit' :
print ('Saludes un gusto adios')
os.system('exit')
elif pausr == 'clear' :
clear()
creacion()
else:
print('ERROR OPCION INVALIDA ... ')
elif dic == 'n' :
pausr = input('[pass/user] $ ')
if pausr == 'pass' :
os.system ('nano user_pass.txt')
creacion()
elif pausr == 'user' :
os.system('nano user_user.txt')
creacion()
elif pausr == 'create_new pass' :
os.system('nano New_Pass.txt')
creacion()
elif pausr == 'create_new user' :
os.system('nano New_user.txt')
creacion()
elif pausr == 'help' :
help()
creacion()
elif pausr == 'exit' :
print ('Saludes un gusto adios')
os.system('exit')
elif pausr == 'clear' :
clear()
creacion()
else:
print('ERROR OPCION INVALIDA ... ')
elif usr_pas == 'help' :
clear()
colores()
help()
colores()
creacion()
elif usr_pas == 'use hacker_git' :
colores()
print (' Instalando ')
time.sleep(1)
print('Instalacion..escribe la ruta a guardar la herramienta .. \n')
instalacion = input('$rute $ ')
os.system ('git clone https://github.com/anonymous-sys19/Hacker.git')
os.system ("mv Hacker " + instalacion)
print('\n listo se instalo con exito\n')
creacion()
elif usr_pas == 'create_new_pass' :
create_new_Pass()
elif usr_pas == 'create_new_user' :
create_new_user()
elif usr_pas == 'exit' :
clear()
colores()
print ('\n--------Saludes un gusto adios-----\n\n')
os.system('exit')
elif usr_pas == 'clear' :
clear()
colores()
creacion()
elif usr_pas == 'use force_brute' :
colores()
print ('..........')
time.sleep(0.50)
print (' ..........' )
time.sleep(0.50)
print ('-----abriendo modulo-----')
colores()
time.sleep(0.50)
print ('..........')
time.sleep(0.50)
print ('..........')
time.sleep(0.50)
clear()
os.system('python3 modulos/mod_force.py')
colores()
creacion()
elif usr_pas == 'create user list' :
usuarios()
print('Creado con exito ....')
time.sleep(2)
chmod()
clear()
creacion()
elif usr_pas == 'create pass list' :
password()
print('Creado con exito .... ')
time.sleep(2)
chmod()
clear()
creacion()
elif usr_pas == 'open msfconsole' :
print('pleace wait ... execute msfconsole \n\n')
os.system('msfconsole')
creacion()
elif usr_pas == 'create automated payload' :
os.system('bash modulos/modsh/modulo_payload.sh')
colores()
creacion()
elif usr_pas == usr_pas :
os.system(usr_pas)
colores()
creacion()
else:
clear()
print('\n\n error ')
creacion()
creacion() | 23.247227 | 118 | 0.439703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10,043 | 0.659292 |
5771c942a9c694ad105aa8ab9cf5971afb8d6de1 | 5,637 | py | Python | AN-24_Nizhneangarsk/data/google_earth.py | paulross/pprune-calc | ebed46e7e029bc7a3067f50ad180a1df236052cf | [
"MIT"
] | 1 | 2020-10-24T15:46:18.000Z | 2020-10-24T15:46:18.000Z | AN-24_Nizhneangarsk/data/google_earth.py | paulross/pprune-calc | ebed46e7e029bc7a3067f50ad180a1df236052cf | [
"MIT"
] | null | null | null | AN-24_Nizhneangarsk/data/google_earth.py | paulross/pprune-calc | ebed46e7e029bc7a3067f50ad180a1df236052cf | [
"MIT"
] | null | null | null | import itertools
import math
import pprint
import sys
import typing
import map_funcs
GOOGLE_EARTH_AIRPORT_IMAGES = {
'GoogleEarth_AirportCamera_C.jpg' : {
'path': 'video_images/GoogleEarth_AirportCamera_C.jpg',
'width': 4800,
'height': 3011,
# Originally measured on the 100m legend as 181 px
# 'm_per_px': 100 / (4786 - 4605),
# Corrected to 185 to give runway length of 1650.5 m
'm_per_px': 100 / (4786 - 4601),
'datum': 'runway_23_start',
'measurements': {
# 'datum_1': 'runway_23_end',
'runway_23_start': map_funcs.Point(3217, 204),
'runway_23_end': map_funcs.Point((1310 + 1356) / 2, (2589 + 2625) / 2),
'perimeter_fence': map_funcs.Point(967, 2788),
'red_building': map_funcs.Point(914, 2827),
'helicopter': map_funcs.Point(2630, 1236),
'camera_B': map_funcs.Point(2890, 1103),
'buildings_apron_edge': map_funcs.Point(2213, 1780),
# The next three are from camera B frame 850
# Dark smudge on right
'right_dark_grass': map_funcs.Point(2742, 1137),
# Pale smudge on right where tarmac meets grass
'right_light_grass': map_funcs.Point(2755, 1154),
# Pale smudge on left where tarmac taxiway meets grass
# 'left_light_grass': map_funcs.Point(2492, 1488),
# Bright roofed house
'bright_roofed_house': map_funcs.Point(1067, 2243),
}
},
}
# This is an estimate of the absolute position error in metres of a single point in isolation.
ABSOLUTE_POSITION_ERROR_M = 10.0
# Points this close together have an accuracy of ABSOLUTE_POSITION_ERROR_M.
# If closer then the error is proportionally less.
# If further apart then the error is proportionally greater.
RELATIVE_POSITION_ERROR_BASELINE_M = 1000.0
RELATIVE_BEARING_ERROR_DEG = 0.5
def relative_position_error(distance_between: float) -> float:
"""This returns a relative position error estimate of two points separated by distance_between.
It holds the idea that it is extremely unlikely that two points close together have extreme errors
but as they separate the error is likely to be greater.
"""
return ABSOLUTE_POSITION_ERROR_M * distance_between / RELATIVE_POSITION_ERROR_BASELINE_M
RUNWAY_LENGTH_M = map_funcs.distance(
GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']['runway_23_start'],
GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']['runway_23_end'],
GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['m_per_px'],
)
RUNWAY_HEADING_DEG = map_funcs.bearing(
GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']['runway_23_start'],
GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']['runway_23_end'],
)
def measurements_relative_to_runway() -> typing.Dict[str, map_funcs.Point]:
"""Returns a dict of measurements in metres that are reduced to the runway axis."""
ret: typing.Dict[str, map_funcs.Point] = {}
datum_name = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['datum']
origin = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements'][datum_name]
m_per_px = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['m_per_px']
for k in GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']:
pt = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements'][k]
new_pt = map_funcs.translate_rotate(pt, RUNWAY_HEADING_DEG, origin)
ret[k] = map_funcs.Point(m_per_px * new_pt.x, m_per_px * new_pt.y)
return ret
def bearings_from_camera_b() -> typing.Dict[str, float]:
ret: typing.Dict[str, float] = {}
camera_b = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements']['camera_B']
m_per_px = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['m_per_px']
for k, v in GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['measurements'].items():
if k != 'camera_B':
b = map_funcs.bearing(camera_b, v)
b_min, b_max = map_funcs.bearing_min_max(camera_b, v, ABSOLUTE_POSITION_ERROR_M / m_per_px)
ret[k] = b, b_min, b_max
return ret
def main() -> int:
# Check scale and runway length
m_per_px = GOOGLE_EARTH_AIRPORT_IMAGES['GoogleEarth_AirportCamera_C.jpg']['m_per_px']
print(f'GoogleEarth_AirportCamera_C.jpg scale {m_per_px:0.4f} (m/pixel)')
print(f'GoogleEarth_AirportCamera_C.jpg runway length {RUNWAY_LENGTH_M:.1f} (m)')
print(f'GoogleEarth_AirportCamera_C.jpg runway heading {RUNWAY_HEADING_DEG:.2f} (degrees)')
measurements = measurements_relative_to_runway()
print('X-Y Relative')
for k in measurements:
print(f'{k:24} : x={measurements[k].x:8.1f} y={measurements[k].y:8.1f}')
bearings = bearings_from_camera_b()
print('Bearings')
for k in bearings:
# print(f'{k:24} : {bearings[k]:8.1f}')
b, b_min, b_max = bearings[k]
# print(f'{k:24} : {bearings[k]}')
print(f'{k:24} : {b:8.2f} ± {b_max - b:.2f}/{b_min - b:.2f}')
for a, b in itertools.combinations(('red_building', 'helicopter', 'buildings_apron_edge'), 2):
ba, ba_min, ba_max = bearings[a]
bb, bb_min, bb_max = bearings[b]
print(a, '<->', b)
print(f'{ba - bb:4.2f} {ba_max - bb_min:4.2f} {ba_min - bb_max:4.2f}')
return 0
if __name__ == '__main__':
sys.exit(main()) | 45.829268 | 105 | 0.693809 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,661 | 0.471976 |
5772fd07bf52569383712e22c1c25c0c240b9a7f | 4,527 | py | Python | mongo_queue/queue.py | shunyeka/mongo_queue | df20894e96bfb93c6b710cd5b3d604498fb69576 | [
"MIT"
] | 6 | 2020-08-05T13:08:34.000Z | 2022-02-10T10:01:03.000Z | mongo_queue/queue.py | shunyeka/mongo_queue | df20894e96bfb93c6b710cd5b3d604498fb69576 | [
"MIT"
] | null | null | null | mongo_queue/queue.py | shunyeka/mongo_queue | df20894e96bfb93c6b710cd5b3d604498fb69576 | [
"MIT"
] | 4 | 2020-08-05T13:08:48.000Z | 2022-03-29T17:13:18.000Z | import pymongo
from datetime import datetime, timedelta
from mongo_queue.job import Job
from uuid import uuid4
from pymongo import errors
DEFAULT_INSERT = {
"attempts": 0,
"locked_by": None,
"locked_at": None,
"last_error": None
}
class Queue:
def __init__(self, collection, consumer_id, timeout=300, max_attempts=3):
"""
"""
self.collection = collection
self.consumer_id = consumer_id
self.timeout = timeout
self.max_attempts = max_attempts
self.ensure_indexes()
def ensure_indexes(self):
"""
"locked_by": None,
"locked_at": None,
"channel": channel,
"attempts"
"""
next_index = pymongo.IndexModel([("locked_by", pymongo.ASCENDING), ("locked_at", pymongo.ASCENDING),
("channel", pymongo.ASCENDING),
("attempts", pymongo.ASCENDING)], name="next_index")
update_index = pymongo.IndexModel([("_id", pymongo.ASCENDING),
("locked_by", pymongo.ASCENDING)], name="update_index")
unique_index = pymongo.IndexModel([("job_id", pymongo.ASCENDING),
("channel", pymongo.ASCENDING)], name="unique_index", unique=True)
self.collection.create_indexes([next_index, update_index, unique_index])
def close(self):
"""Close the in memory queue connection.
"""
self.collection.connection.close()
def clear(self):
"""Clear the queue.
"""
return self.collection.drop()
def size(self):
"""Total size of the queue
"""
return self.collection.count_documents(filter={})
def repair(self):
"""Clear out stale locks.
Increments per job attempt counter.
"""
self.collection.find_one_and_update(
filter={
"locked_by": {"$ne": None},
"locked_at": {
"$lt": datetime.now() - timedelta(self.timeout)}},
update={
"$set": {"locked_by": None, "locked_at": None},
"$inc": {"attempts": 1}}
)
def drop_max_attempts(self):
"""
"""
self.collection.update_many( filter={},
update={"attempts": {"$gte": self.max_attempts}},
remove=True)
def put(self, payload, priority=0, channel="default", job_id=None):
"""Place a job into the queue
"""
job = dict(DEFAULT_INSERT)
job['priority'] = priority
job['payload'] = payload
job['channel'] = channel
job['job_id'] = job_id or str(uuid4())
try:
return self.collection.insert_one(job)
except errors.DuplicateKeyError as e:
return False
def next(self, channel="default"):
return self._wrap_one(self.collection.find_one_and_update(
filter={"locked_by": None,
"locked_at": None,
"channel": channel,
"attempts": {"$lt": self.max_attempts}},
update={"$set": {"locked_by": self.consumer_id,
"locked_at": datetime.now()}},
sort=[('priority', pymongo.DESCENDING)],
))
def _jobs(self):
return self.collection.find(
query={"locked_by": None,
"locked_at": None,
"attempts": {"$lt": self.max_attempts}},
sort=[('priority', pymongo.DESCENDING)],
)
def _wrap_one(self, data):
return data and Job(self, data) or None
def stats(self):
"""Get statistics on the queue.
Use sparingly requires a collection lock.
"""
js = """function queue_stat(){
return db.eval(
function(){
var a = db.%(collection)s.count(
{'locked_by': null,
'attempts': {$lt: %(max_attempts)i}});
var l = db.%(collection)s.count({'locked_by': /.*/});
var e = db.%(collection)s.count(
{'attempts': {$gte: %(max_attempts)i}});
var t = db.%(collection)s.count();
return [a, l, e, t];
})}""" % {
"collection": self.collection.name,
"max_attempts": self.max_attempts}
return dict(zip(
["available", "locked", "errors", "total"],
self.collection.database.eval(js)))
| 33.533333 | 109 | 0.522863 | 4,276 | 0.944555 | 0 | 0 | 0 | 0 | 0 | 0 | 1,474 | 0.325602 |
5773699f898326e720821addb402b1324a622597 | 776 | py | Python | CursoGuanabara/ex71_aula15_guanabara.py | cirino/python | 6c45b5305aebeeeebb7ffef335700e41cc0b6b3b | [
"MIT"
] | 1 | 2018-05-06T01:25:28.000Z | 2018-05-06T01:25:28.000Z | CursoGuanabara/ex71_aula15_guanabara.py | cirino/python | 6c45b5305aebeeeebb7ffef335700e41cc0b6b3b | [
"MIT"
] | 1 | 2019-02-10T18:46:37.000Z | 2019-02-12T21:17:50.000Z | CursoGuanabara/ex71_aula15_guanabara.py | cirino/python | 6c45b5305aebeeeebb7ffef335700e41cc0b6b3b | [
"MIT"
] | null | null | null | print('''
Exercício 71 da aula 15 de Python
Curso do Guanabara
Day 24 Code Python - 23/05/2018
''')
print('{:^30}'.format('BANCO DO CIRINO'))
print('=' * 30)
n = int(input('Qual o valor para sacar? R$ '))
total = n
nota = 50 # começar de cima para baixo na estrutura
qtdNota = 0
while True:
if total >= nota:
total -= nota
qtdNota += 1
else:
if qtdNota > 0:
print(f'Total de {qtdNota} notas de {nota}')
if nota == 50:
nota = 20
elif nota == 20:
nota = 10
elif nota == 10:
nota = 5
elif nota == 5:
nota = 1
qtdNota = 0
if total == 0:
break
print('=' * 30)
print('{:^30}'.format('FIM'))
print('=' * 30)
| 18.47619 | 56 | 0.487113 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.335476 |
5773c7f1c83fb78f2af7b935f1a55fb941e08abf | 1,823 | py | Python | models/face_completion.py | MartinKondor/MachineLearning | 2deb257796cb63bcc4d13594484e266a93ab7a83 | [
"MIT"
] | null | null | null | models/face_completion.py | MartinKondor/MachineLearning | 2deb257796cb63bcc4d13594484e266a93ab7a83 | [
"MIT"
] | null | null | null | models/face_completion.py | MartinKondor/MachineLearning | 2deb257796cb63bcc4d13594484e266a93ab7a83 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils.validation import check_random_state
from sklearn.datasets import fetch_olivetti_faces
from sklearn.externals import joblib
rng = check_random_state(21)
dataset = fetch_olivetti_faces()
X = dataset.images.reshape(dataset.images.shape[0], -1)
train = X[dataset.target < 30]
test = X[dataset.target >= 30]
n_faces = 3
face_ids = rng.randint(test.shape[0], size=(n_faces,))
test = test[face_ids, :]
n_pixels = X.shape[1]
# Upper half of the faces
X_train = train[:, :(n_pixels + 1) // 2]
# Lower half of the faces
y_train = train[:, n_pixels // 2:]
X_test = test[:, :(n_pixels + 1) // 2]
y_test = test[:, n_pixels // 2:]
n_rows = 2
imshape = (64, 64,)
def test_model(y_pred, model_name):
plt.figure(figsize=(1.7*n_faces, 4))
plt.suptitle('Face completion with ' + model_name, size=12)
# plot the true faces first
for i in range(n_faces):
plt.subplot(int( '{}{}{}'.format( n_rows, n_faces, i + 1 ) ))
plt.axis('off')
plt.imshow(np.hstack((X_test[i], y_test[i])).reshape(imshape), cmap=plt.cm.gray, interpolation='nearest')
# then plot the predictions
for i in range(n_faces):
plt.subplot(int( '{}{}{}'.format( n_rows, n_faces, i + n_faces + 1 ) ))
plt.axis('off')
plt.imshow(np.hstack((X_test[i], y_pred[i])).reshape(imshape), cmap=plt.cm.gray, interpolation='nearest')
test_model(joblib.load('../trained_models/nn_face_completion.pkl').predict(X_test), 'Face completion with a Neural Network')
test_model(joblib.load('../trained_models/knn_face_completion.pkl').predict(X_test), 'Face completion with a k-Nearest Neighbors')
test_model(joblib.load('../trained_models/dt_face_completion.pkl').predict(X_test), 'Face completion with a Decision Tree')
plt.show()
| 34.396226 | 130 | 0.690071 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 419 | 0.229841 |
57742842c8a7ea0601406b324c435a1136e7bdad | 1,815 | py | Python | tests/utility.py | dpazel/music_rep | 2f9de9b98b13df98f1a0a2120b84714725ce527e | [
"MIT"
] | 1 | 2021-05-06T19:45:54.000Z | 2021-05-06T19:45:54.000Z | tests/utility.py | dpazel/music_rep | 2f9de9b98b13df98f1a0a2120b84714725ce527e | [
"MIT"
] | null | null | null | tests/utility.py | dpazel/music_rep | 2f9de9b98b13df98f1a0a2120b84714725ce527e | [
"MIT"
] | null | null | null | TONES = list('CDEFGAB')
def build_offset_list(scale):
iter_scale = iter(scale)
first = next(iter_scale)
base = first.tonal_offset
last_diff = 0
tonal_offsets = []
for dt in iter_scale:
diff = dt.tonal_offset - base
if diff < 0:
diff += 12
# This is the difference in which we are interested
delta = diff - last_diff
# Typically on the last letter, we return to the small value again, and need to
# normalize mode 12
if delta < 0:
delta += 12
tonal_offsets.append(delta)
last_diff = diff
return tonal_offsets
def build_incremental_intervals(scale):
from tonalmodel.diatonic_pitch import DiatonicPitch
from tonalmodel.interval import Interval
partition = 4
iter_scale = iter(scale)
first = next(iter_scale)
prior_pitch = DiatonicPitch(partition, first)
prior = TONES.index(first.diatonic_letter)
intervals = [Interval.parse('P:1')]
for dt in iter_scale:
if TONES.index(dt.diatonic_letter) - prior < 0:
partition += 1
prior = TONES.index(dt.diatonic_letter)
current_pitch = DiatonicPitch(partition, dt)
intervals.append(Interval.create_interval(prior_pitch, current_pitch))
prior_pitch = current_pitch
return intervals
def build_letter_offset_list(scale):
iter_scale = iter(scale)
first = next(iter_scale)
prior = TONES.index(first.diatonic_letter)
offsets = []
for dt in iter_scale:
offset = TONES.index(dt.diatonic_letter) - prior
offset = offset if offset >= 0 else offset + 7
prior = TONES.index(dt.diatonic_letter)
offsets.append(offset)
return offsets
def get_symbol(dt):
return dt.diatonic_symbol
| 29.274194 | 88 | 0.648485 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 164 | 0.090358 |
577497e84fee800ed831d409f33393bf3915e7c8 | 3,447 | py | Python | backend/api/serializers/information_serializer.py | ferdn4ndo/infotrem | 4728c5fe8385dcc0a1c75068429fa20e2afbf6f2 | [
"MIT"
] | null | null | null | backend/api/serializers/information_serializer.py | ferdn4ndo/infotrem | 4728c5fe8385dcc0a1c75068429fa20e2afbf6f2 | [
"MIT"
] | 1 | 2020-06-21T18:38:14.000Z | 2020-06-21T21:57:09.000Z | backend/api/serializers/information_serializer.py | ferdn4ndo/infotrem | 4728c5fe8385dcc0a1c75068429fa20e2afbf6f2 | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django.db.models import Sum
from rest_framework import serializers
from api.models.information_model import Information
from api.models.information_effect_model import InformationEffect
from api.models.information_vote_model import InformationVote
from api.serializers.user_serializer import UserSerializer
class InformationEffectSerializer(serializers.ModelSerializer):
information_id = serializers.CharField(required=True, write_only=True)
old_value = serializers.CharField(required=False, allow_blank=True)
class Meta:
model = InformationEffect
fields = ['information_id', 'field_name', 'old_value', 'new_value']
class InformationSerializer(serializers.ModelSerializer):
author = UserSerializer()
created_by = UserSerializer()
updated_by = UserSerializer()
effects = InformationEffectSerializer(many=True)
votes_up = serializers.SerializerMethodField('get_votes_up')
votes_down = serializers.SerializerMethodField('get_votes_down')
votes_sum = serializers.SerializerMethodField('get_votes_sum')
@staticmethod
def get_votes_up(obj):
return len(InformationVote.objects.filter(information=obj, value__gt=0))
@staticmethod
def get_votes_down(obj):
return len(InformationVote.objects.filter(information=obj, value__lt=0))
@staticmethod
def get_votes_sum(obj):
return InformationVote.objects.filter(information=obj).aggregate(Sum('value'))['value__sum']
class Meta:
model = Information
fields = [
'id',
'author',
'content',
'effects',
'status',
'references',
'created_by',
'created_at',
'updated_by',
'updated_at',
'votes_up',
'votes_down',
'votes_sum',
]
def create(self, validated_data):
effects_data = validated_data.pop('effects')
information = Information.objects.create(**validated_data)
for effect_data in effects_data:
InformationEffect.objects.create(information=information, **effect_data)
return information
class InformationVoteSerializer(serializers.ModelSerializer):
information = InformationSerializer(write_only=True)
class Meta:
model = InformationVote
fields = ['information', 'value']
@staticmethod
def get_value_for_user(information: Information, user: User):
try:
vote = InformationVote.objects.get(information=information, voter=user)
return vote.value
except InformationVote.DoesNotExist:
return 0
@staticmethod
def check_vote_value(vote_value):
max_value, min_value = InformationVote.POSITIVE_VOTE_VALUE, InformationVote.NEGATIVE_VOTE_VALUE
if vote_value > max_value or vote_value < min_value:
raise serializers.ValidationError(
'Vote value may not be greater than {} and lower than {}'.format(max_value, min_value)
)
def create(self, validated_data):
self.check_vote_value(validated_data['value'])
return InformationVote.objects.create(**validated_data)
def update(self, instance, validated_data):
self.check_vote_value(validated_data['value'])
instance.value = validated_data['value']
instance.save()
return instance
| 34.818182 | 103 | 0.691326 | 3,081 | 0.893821 | 0 | 0 | 1,038 | 0.301131 | 0 | 0 | 352 | 0.102118 |
5774e1717dedb11588476f4f78df3c3d0a8e219f | 3,493 | py | Python | insightface/face_model.py | dniku/insightface | 763060b2608aaa9e77103ac6aaa393878501dee3 | [
"MIT"
] | null | null | null | insightface/face_model.py | dniku/insightface | 763060b2608aaa9e77103ac6aaa393878501dee3 | [
"MIT"
] | null | null | null | insightface/face_model.py | dniku/insightface | 763060b2608aaa9e77103ac6aaa393878501dee3 | [
"MIT"
] | null | null | null | import os
import cv2
import mxnet as mx
import numpy as np
from . import face_preprocess
from .mtcnn_detector import MtcnnDetector
def get_model(ctx, image_size, model_str, layer):
_vec = model_str.split(',')
assert len(_vec) == 2
prefix = _vec[0]
epoch = int(_vec[1])
print('loading', prefix, epoch)
sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
all_layers = sym.get_internals()
sym = all_layers[layer + '_output']
model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
# model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])
model.bind(data_shapes=[('data', (1, 3, image_size[0], image_size[1]))])
model.set_params(arg_params, aux_params)
return model
class FaceModel:
def __init__(self, image_size, model, ga_model, det, threshold, gpu):
self.det = det
ctx = mx.gpu(gpu)
_vec = image_size.split(',')
assert len(_vec) == 2
image_size = (int(_vec[0]), int(_vec[1]))
self.model = None
self.ga_model = None
if len(model) > 0:
self.model = get_model(ctx, image_size, model, 'fc1')
if len(ga_model) > 0:
self.ga_model = get_model(ctx, image_size, ga_model, 'fc1')
self.threshold = threshold
self.det_minsize = 50
self.det_threshold = [0.6, 0.7, 0.8]
# self.det_factor = 0.9
self.image_size = image_size
mtcnn_path = os.path.join(os.path.dirname(__file__), 'mtcnn-model')
if det == 0:
detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark=True,
threshold=self.det_threshold)
else:
detector = MtcnnDetector(model_folder=mtcnn_path, ctx=ctx, num_worker=1, accurate_landmark=True,
threshold=[0.0, 0.0, 0.2])
self.detector = detector
def get_input(self, face_img):
ret = self.detector.detect_face(face_img, det_type=self.det)
if ret is None:
return None
bbox, points = ret
if bbox.shape[0] == 0:
return None
bbox = bbox[0, 0:4]
points = points[0, :].reshape((2, 5)).T
# print(bbox)
# print(points)
nimg = face_preprocess.preprocess(face_img, bbox, points, image_size='112,112')
nimg = cv2.cvtColor(nimg, cv2.COLOR_BGR2RGB)
aligned = np.transpose(nimg, (2, 0, 1))
return aligned
def get_feature(self, aligned):
assert aligned.ndim in (3, 4)
input_blob = np.expand_dims(aligned, axis=0) if aligned.ndim == 3 else aligned
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,))
self.model.forward(db, is_train=False)
embedding = self.model.get_outputs()[0].asnumpy()
embedding /= np.linalg.norm(embedding, axis=1, keepdims=True)
return embedding
def get_ga(self, aligned):
input_blob = np.expand_dims(aligned, axis=0)
data = mx.nd.array(input_blob)
db = mx.io.DataBatch(data=(data,))
self.ga_model.forward(db, is_train=False)
ret = self.ga_model.get_outputs()[0].asnumpy()
g = ret[:, 0:2].flatten()
gender = np.argmax(g)
a = ret[:, 2:202].reshape((100, 2))
a = np.argmax(a, axis=1)
age = int(sum(a))
return gender, age
| 37.159574 | 146 | 0.603206 | 2,659 | 0.761237 | 0 | 0 | 0 | 0 | 0 | 0 | 255 | 0.073003 |
577604174c837cf71abd588c86d1070a895c9358 | 316 | py | Python | src/newspaperkk.py | harakiriboy/Python-Final-exam- | 11a08453fe4dd547d09c97baeda57f3899b1e17a | [
"MIT"
] | null | null | null | src/newspaperkk.py | harakiriboy/Python-Final-exam- | 11a08453fe4dd547d09c97baeda57f3899b1e17a | [
"MIT"
] | null | null | null | src/newspaperkk.py | harakiriboy/Python-Final-exam- | 11a08453fe4dd547d09c97baeda57f3899b1e17a | [
"MIT"
] | null | null | null | import newspaper
from newspaper import Article
def getarticle(url):
articleurl = url
article = Article(articleurl)
try:
article.download()
article.parse()
alltext = article.text
return alltext
except:
return "this website is not available"
| 18.588235 | 47 | 0.607595 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.098101 |
577688ba8ec27db2c2499b1d093cb54fc4a94c4c | 71 | py | Python | Server/app/schema/utils/__init__.py | Team-SeeTo/SeeTo-Backend | 19990cd6f4895e773eaa504f7b7a07ddbb5856e5 | [
"Apache-2.0"
] | 4 | 2018-06-18T06:50:12.000Z | 2018-11-15T00:08:24.000Z | Server/app/schema/utils/__init__.py | Team-SeeTo/SeeTo-Backend | 19990cd6f4895e773eaa504f7b7a07ddbb5856e5 | [
"Apache-2.0"
] | null | null | null | Server/app/schema/utils/__init__.py | Team-SeeTo/SeeTo-Backend | 19990cd6f4895e773eaa504f7b7a07ddbb5856e5 | [
"Apache-2.0"
] | null | null | null | from .activity_logger import idea_activity_logger, todo_activity_logger | 71 | 71 | 0.915493 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
57778b9212cbd84692109c1076197d5f351d5c74 | 1,302 | py | Python | h2o-py/tests/testdir_misc/pyunit_pubdev_7506_model_download_with_cv.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 6,098 | 2015-05-22T02:46:12.000Z | 2022-03-31T16:54:51.000Z | h2o-py/tests/testdir_misc/pyunit_pubdev_7506_model_download_with_cv.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 2,517 | 2015-05-23T02:10:54.000Z | 2022-03-30T17:03:39.000Z | h2o-py/tests/testdir_misc/pyunit_pubdev_7506_model_download_with_cv.py | vishalbelsare/h2o-3 | 9322fb0f4c0e2358449e339a434f607d524c69fa | [
"Apache-2.0"
] | 2,199 | 2015-05-22T04:09:55.000Z | 2022-03-28T22:20:45.000Z | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
import h2o
import os
from h2o.estimators.gbm import H2OGradientBoostingEstimator
from tests import pyunit_utils
def model_download_with_cv():
prostate = h2o.import_file(pyunit_utils.locate("smalldata/prostate/prostate.csv"))
prostate["CAPSULE"] = prostate["CAPSULE"].asfactor()
prostate_gbm = H2OGradientBoostingEstimator(nfolds=2, keep_cross_validation_predictions=True)
prostate_gbm.train(x=["AGE", "RACE", "PSA", "DCAPS"], y="CAPSULE", training_frame=prostate)
path = pyunit_utils.locate("results")
model_path = h2o.download_model(prostate_gbm, path=path, export_cross_validation_predictions=True)
assert os.path.isfile(model_path), "Expected model artifact {0} to exist, but it does not.".format(model_path)
h2o.remove_all()
prostate_gbm_reloaded = h2o.upload_model(model_path)
assert isinstance(prostate_gbm_reloaded, H2OGradientBoostingEstimator), \
"Expected H2OGradientBoostingEstimator, but got {0}".format(prostate_gbm_reloaded)
holdout_frame_id = prostate_gbm.cross_validation_holdout_predictions().frame_id
assert h2o.get_frame(holdout_frame_id) is not None
if __name__ == "__main__":
pyunit_utils.standalone_test(model_download_with_cv)
else:
model_download_with_cv()
| 38.294118 | 114 | 0.771889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 256 | 0.196621 |
5777cb4aa061f3c293f389812b1b6779768ca886 | 890 | py | Python | CNN/VGGNET/vgg16_features.py | reddyprasade/Deep-Learning
| 35fea69af72f94f6ad62a0f308de7bd515c27e7a
| [
"MIT"
] | 15 | 2020-01-23T12:01:22.000Z | 2022-03-29T21:07:41.000Z | CNN/VGGNET/vgg16_features.py | reddyprasade/Deep-Learning
| 35fea69af72f94f6ad62a0f308de7bd515c27e7a
| [
"MIT"
] | null | null | null | CNN/VGGNET/vgg16_features.py | reddyprasade/Deep-Learning
| 35fea69af72f94f6ad62a0f308de7bd515c27e7a
| [
"MIT"
] | 10 | 2020-02-12T02:52:04.000Z | 2021-07-04T07:38:39.000Z | import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras import models
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input
import numpy as np
import cv2
# prebuild model with pre-trained weights on imagenet
base_model = VGG16(weights='imagenet', include_top=True)
print (base_model)
for i, layer in enumerate(base_model.layers):
print (i, layer.name, layer.output_shape)
# extract features from block4_pool block
model = models.Model(inputs=base_model.input,
outputs=base_model.get_layer('block4_pool').output)
img_path = 'cat.jpg'
img = image.load_img(img_path, target_size=(224, 224))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
# get the features from this block
features = model.predict(x)
print(features)
| 30.689655 | 65 | 0.769663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 163 | 0.183146 |
57785e3742249e6e69fdb5606027bac6121de3f7 | 3,262 | py | Python | evaluate.py | m4ln/HIWI_classification | f872c3da03bf999aeddd870eeed34332c8a9471a | [
"MIT"
] | 1 | 2020-09-07T10:02:07.000Z | 2020-09-07T10:02:07.000Z | evaluate.py | m4ln/HIWI_classification | f872c3da03bf999aeddd870eeed34332c8a9471a | [
"MIT"
] | 1 | 2020-09-16T14:26:01.000Z | 2020-09-16T14:26:01.000Z | evaluate.py | m4ln/HIWI_classification | f872c3da03bf999aeddd870eeed34332c8a9471a | [
"MIT"
] | 1 | 2020-09-07T11:29:47.000Z | 2020-09-07T11:29:47.000Z | """
call in shell: python evaluate.py --dir <rootdir/experiment/> --epoch <epoch to>
e.g. in shell: python evaluate.py --dir Runs/se_resnet_trained_final/ --epoch 149
loops over all folds and calculates + stores the accuracies in a file in the root folder of the experiment
you might change the model in line 45 from resnet to se_resnet (see comment)
"""
import torch
from torch.utils.data import Dataset, DataLoader
#import matplotlib.pyplot as plt
#import seaborn as sns; sns.set()
import numpy as np
import os
from os.path import join
import argparse
import Training_custom.load_dataset
from senet.baseline import resnet20
from senet.se_resnet import se_resnet20
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dir', type=str, metavar='', required=True, help='Directory of the x_folds.')
parser.add_argument('-e', '--epoch', type=int, metavar='', required=True, help='from which epoch should the model be loaded?')
args = parser.parse_args()
working_dir = os.getcwd()
rootpath = join(working_dir, args.dir)
def evaluate(fold_i):
# path zu einem checkpoint
CHKPT = f"{args.dir}/fold_{fold_i}/checkpoints/train_chkpt_{args.epoch}.tar"
# Das file train_chkpt_100.tar is ein dictionary das ein snapshot vom trainingszustand
# der 100. epoche ist.
# es interessieren eig nur die keys "train_loss", "val_loss" und "model_state_dict".
# Train und val loss sind 1D torch tensors die den mean loss von der jeweiligen epoche (idx)
# halten.
train_status = torch.load(CHKPT, map_location='cpu')
#print(train_status)
# model wiederherstellen
model = resnet20(num_classes=4) #resnet20(num_classes=4) or alternatively: se_resnet20(num_classes=4, reduction=16)
model.load_state_dict(train_status['model_state_dict'])
model.eval()
test_data = Training_custom.load_dataset.imagewise_dataset(datadir = '/home/vbarth/HIWI/classificationDataValentin/mixed_cropped/test')
#dataloader = DataLoader(test_data, batch_size=16,
# shuffle=False, num_workers=0)
acc=0 #initialize accuracy
i = 0 #will count up
for x, y in test_data: #iterate over testset
x = x.unsqueeze(0) #add one dimension (batch missing) to get 4d tensor
y_pred = model(x).squeeze()
pred, ind = torch.max(y_pred, 0)
if y.item() == ind.item():
acc = acc + 1 #add one when the prediction was right else add nothing
i = i +1 ##print every 3000th sampel
if i % 3000 == 0:
print("Sample: ", i, "\n y_pred: ",y_pred, "\n pred: ", pred, "\n ind: ", ind, "\n y: ", y.item())
acc = acc/len(test_data)
#print("Accuracy: ", acc ) ##def of accuracy
return f"folder: {fold_i}, accuracy: {acc} \n"
if __name__ == "__main__":
n_files = (len([name for name in os.listdir(rootpath)]))
#print(n_files)
accs = []
for fold in range(n_files):
print(f"Processing folder number {fold}")
acc_str = evaluate(fold+1)
accs.append(acc_str)
with open(join(rootpath, "accuracies"), 'w') as f:
for string in accs:
f.write(string)
| 34.336842 | 139 | 0.656039 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,628 | 0.49908 |
5778b44bc575b3aed34007cd02dacc145c82ec6d | 18,413 | py | Python | Allura/allura/webhooks.py | brondsem/allura | 37f4f4a68e71c7f1b8d72f4eee4c0b3d08c1a906 | [
"Apache-2.0"
] | null | null | null | Allura/allura/webhooks.py | brondsem/allura | 37f4f4a68e71c7f1b8d72f4eee4c0b3d08c1a906 | [
"Apache-2.0"
] | null | null | null | Allura/allura/webhooks.py | brondsem/allura | 37f4f4a68e71c7f1b8d72f4eee4c0b3d08c1a906 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import json
import hmac
import hashlib
import time
import socket
import ssl
from bson import ObjectId
from tg import expose, validate, redirect, flash, config
from tg.decorators import with_trailing_slash, without_trailing_slash
from tg import tmpl_context as c
from tg import response, request
from formencode import validators as fev, schema, Invalid
from ming.odm import session
from webob import exc
from pymongo.errors import DuplicateKeyError
from paste.deploy.converters import asint, aslist
from allura.app import AdminControllerMixin
from allura.controllers import BaseController
from allura.lib import helpers as h
import requests
from allura.lib import validators as v
from allura.lib.decorators import require_post, task
from allura.lib.utils import DateJSONEncoder
from allura import model as M
import six
from six.moves import map
log = logging.getLogger(__name__)
class WebhookValidator(fev.FancyValidator):
def __init__(self, sender, app, **kw):
self.app = app
self.sender = sender
super(WebhookValidator, self).__init__(**kw)
def _to_python(self, value, state):
wh = None
if isinstance(value, M.Webhook):
wh = value
elif isinstance(value, ObjectId):
wh = M.Webhook.query.get(_id=value)
else:
try:
wh = M.Webhook.query.get(_id=ObjectId(value))
except Exception:
pass
if wh and wh.type == self.sender.type and wh.app_config_id == self.app.config._id:
return wh
raise Invalid('Invalid webhook', value, state)
class WebhookCreateForm(schema.Schema):
url = fev.URL(not_empty=True)
secret = v.UnicodeString()
class WebhookEditForm(WebhookCreateForm):
def __init__(self, sender, app):
super(WebhookEditForm, self).__init__()
self.add_field('webhook', WebhookValidator(
sender=sender, app=app, not_empty=True))
class WebhookControllerMeta(type):
def __call__(cls, sender, app, *args, **kw):
"""Decorate post handlers with a validator that references
the appropriate webhook sender for this controller.
"""
if hasattr(cls, 'create'):
cls.create = validate(
cls.create_form(),
error_handler=getattr(cls.index, '__func__', cls.index),
)(cls.create)
if hasattr(cls, 'edit'):
cls.edit = validate(
cls.edit_form(sender, app),
error_handler=getattr(cls._default, '__func__', cls._default),
)(cls.edit)
return type.__call__(cls, sender, app, *args, **kw)
class WebhookController(six.with_metaclass(WebhookControllerMeta, BaseController, AdminControllerMixin)):
create_form = WebhookCreateForm
edit_form = WebhookEditForm
def __init__(self, sender, app):
super(WebhookController, self).__init__()
self.sender = sender()
self.app = app
def gen_secret(self):
return h.cryptographic_nonce(20)
def update_webhook(self, wh, url, secret=None):
if not secret:
secret = self.gen_secret()
wh.hook_url = url
wh.secret = secret
try:
session(wh).flush(wh)
except DuplicateKeyError:
session(wh).expunge(wh)
msg = '_the_form: "{}" webhook already exists for {} {}'.format(
wh.type, self.app.config.options.mount_label, url)
raise Invalid(msg, None, None)
@with_trailing_slash
@expose('jinja:allura:templates/webhooks/create_form.html')
def index(self, **kw):
if not c.form_values and kw:
# Executes if update_webhook raises an error
c.form_values = {'url': kw.get('url'),
'secret': kw.get('secret')}
return {'sender': self.sender,
'action': 'create',
'form': self.create_form()}
@expose('jinja:allura:templates/webhooks/create_form.html') # needed when we "return self.index(...)"
@require_post()
# @validate set dynamically in WebhookControllerMeta
def create(self, url, secret):
if self.sender.enforce_limit(self.app):
webhook = M.Webhook(
type=self.sender.type,
app_config_id=self.app.config._id)
try:
self.update_webhook(webhook, url, secret)
except Invalid as e:
# trigger error_handler directly
c.form_errors['_the_form'] = e
return self.index(url=url, secret=secret)
M.AuditLog.log('add webhook %s %s %s',
webhook.type, webhook.hook_url,
webhook.app_config.url())
flash('Created successfully', 'ok')
else:
flash('You have exceeded the maximum number of webhooks '
'you are allowed to create for this project/app', 'error')
redirect(self.app.admin_url + 'webhooks')
@expose('jinja:allura:templates/webhooks/create_form.html') # needed when we "return self._default(...)"
@require_post()
# @validate set dynamically in WebhookControllerMeta
def edit(self, webhook, url, secret):
old_url = webhook.hook_url
old_secret = webhook.secret
try:
self.update_webhook(webhook, url, secret)
except Invalid as e:
# trigger error_handler directly
c.form_errors['_the_form'] = e
return self._default(webhook=webhook, url=url, secret=secret)
M.AuditLog.log('edit webhook %s\n%s => %s\n%s',
webhook.type, old_url, url,
'secret changed' if old_secret != secret else '')
flash('Edited successfully', 'ok')
redirect(self.app.admin_url + 'webhooks')
@expose('json:')
@require_post()
def delete(self, webhook, **kw):
form = self.edit_form(self.sender, self.app)
try:
wh = form.fields['webhook'].to_python(webhook)
except Invalid:
raise exc.HTTPNotFound()
wh.delete()
M.AuditLog.log('delete webhook %s %s %s',
wh.type, wh.hook_url, wh.app_config.url())
return {'status': 'ok'}
@without_trailing_slash
@expose('jinja:allura:templates/webhooks/create_form.html')
def _default(self, webhook, **kw):
form = self.edit_form(self.sender, self.app)
try:
wh = form.fields['webhook'].to_python(webhook)
except Invalid:
raise exc.HTTPNotFound()
c.form_values = {'url': kw.get('url') or wh.hook_url,
'secret': kw.get('secret') or wh.secret,
'webhook': six.text_type(wh._id)}
return {'sender': self.sender,
'action': 'edit',
'form': form}
class WebhookRestController(BaseController):
def __init__(self, sender, app):
super(WebhookRestController, self).__init__()
self.sender = sender()
self.app = app
self.create_form = WebhookController.create_form
self.edit_form = WebhookController.edit_form
def _error(self, e):
error = getattr(e, 'error_dict', None)
if error:
_error = {}
for k, val in six.iteritems(error):
_error[k] = six.text_type(val)
return _error
error = getattr(e, 'msg', None)
if not error:
error = getattr(e, 'message', '')
return error
def update_webhook(self, wh, url, secret=None):
controller = WebhookController(self.sender.__class__, self.app)
controller.update_webhook(wh, url, secret)
@expose('json:')
@require_post()
def index(self, **kw):
response.content_type = str('application/json')
try:
params = {'secret': kw.pop('secret', ''),
'url': kw.pop('url', None)}
valid = self.create_form().to_python(params)
except Exception as e:
response.status_int = 400
return {'result': 'error', 'error': self._error(e)}
if self.sender.enforce_limit(self.app):
webhook = M.Webhook(
type=self.sender.type,
app_config_id=self.app.config._id)
try:
self.update_webhook(webhook, valid['url'], valid['secret'])
except Invalid as e:
response.status_int = 400
return {'result': 'error', 'error': self._error(e)}
M.AuditLog.log('add webhook %s %s %s',
webhook.type, webhook.hook_url,
webhook.app_config.url())
response.status_int = 201
# refetch updated values (e.g. mod_date)
session(webhook).expunge(webhook)
webhook = M.Webhook.query.get(_id=webhook._id)
return webhook.__json__()
else:
limits = {
'max': M.Webhook.max_hooks(
self.sender.type,
self.app.config.tool_name),
'used': M.Webhook.query.find({
'type': self.sender.type,
'app_config_id': self.app.config._id,
}).count(),
}
resp = {
'result': 'error',
'error': 'You have exceeded the maximum number of webhooks '
'you are allowed to create for this project/app',
'limits': limits,
}
response.status_int = 400
return resp
@expose('json:')
def _default(self, webhook, **kw):
form = self.edit_form(self.sender, self.app)
try:
wh = form.fields['webhook'].to_python(webhook)
except Invalid:
raise exc.HTTPNotFound()
if request.method == 'POST':
return self._edit(wh, form, **kw)
elif request.method == 'DELETE':
return self._delete(wh)
else:
return wh.__json__()
def _edit(self, webhook, form, **kw):
old_secret = webhook.secret
old_url = webhook.hook_url
try:
params = {'secret': kw.pop('secret', old_secret),
'url': kw.pop('url', old_url),
'webhook': six.text_type(webhook._id)}
valid = form.to_python(params)
except Exception as e:
response.status_int = 400
return {'result': 'error', 'error': self._error(e)}
try:
self.update_webhook(webhook, valid['url'], valid['secret'])
except Invalid as e:
response.status_int = 400
return {'result': 'error', 'error': self._error(e)}
M.AuditLog.log(
'edit webhook %s\n%s => %s\n%s',
webhook.type, old_url, valid['url'],
'secret changed' if old_secret != valid['secret'] else '')
# refetch updated values (e.g. mod_date)
session(webhook).expunge(webhook)
webhook = M.Webhook.query.get(_id=webhook._id)
return webhook.__json__()
def _delete(self, webhook):
webhook.delete()
M.AuditLog.log(
'delete webhook %s %s %s',
webhook.type,
webhook.hook_url,
webhook.app_config.url())
return {'result': 'ok'}
class SendWebhookHelper(object):
def __init__(self, webhook, payload):
self.webhook = webhook
self.payload = payload
@property
def timeout(self):
return asint(config.get('webhook.timeout', 30))
@property
def retries(self):
t = aslist(config.get('webhook.retry', [60, 120, 240]))
return list(map(int, t))
def sign(self, json_payload):
signature = hmac.new(
self.webhook.secret.encode('utf-8'),
json_payload.encode('utf-8'),
hashlib.sha1)
return 'sha1=' + signature.hexdigest()
def log_msg(self, msg, response=None):
message = '{}: {} {} {}'.format(
msg,
self.webhook.type,
self.webhook.hook_url,
self.webhook.app_config.url())
if response is not None:
message = '{} {} {} {}'.format(
message,
response.status_code,
response.text,
response.headers)
return message
def send(self):
json_payload = json.dumps(self.payload, cls=DateJSONEncoder)
signature = self.sign(json_payload)
headers = {'content-type': 'application/json',
'User-Agent': 'Allura Webhook (https://allura.apache.org/)',
'X-Allura-Signature': signature}
ok = self._send(self.webhook.hook_url, json_payload, headers)
if not ok:
log.info('Retrying webhook in: %s', self.retries)
for t in self.retries:
log.info('Retrying webhook in %s seconds', t)
time.sleep(t)
ok = self._send(self.webhook.hook_url, json_payload, headers)
if ok:
return
def _send(self, url, data, headers):
try:
r = requests.post(
url,
data=data,
headers=headers,
timeout=self.timeout)
except (requests.exceptions.RequestException,
socket.timeout,
ssl.SSLError):
log.exception(self.log_msg('Webhook send error'))
return False
if r.status_code >= 200 and r.status_code < 300:
log.info(self.log_msg('Webhook successfully sent'))
return True
else:
log.error(self.log_msg('Webhook send error', response=r))
return False
@task()
def send_webhook(webhook_id, payload):
webhook = M.Webhook.query.get(_id=webhook_id)
SendWebhookHelper(webhook, payload).send()
class WebhookSender(object):
"""Base class for webhook senders.
Subclasses are required to implement :meth:`get_payload()` and set
:attr:`type` and :attr:`triggered_by`.
"""
type = None
triggered_by = []
controller = WebhookController
api_controller = WebhookRestController
def get_payload(self, **kw):
"""Return a dict with webhook payload"""
raise NotImplementedError('get_payload')
def send(self, params_or_list):
"""Post a task that will send webhook payload
:param params_or_list: dict with keyword parameters to be passed to
:meth:`get_payload` or a list of such dicts. If it's a list for each
element appropriate payload will be submitted, but limit will be
enforced only once for each webhook.
"""
if not isinstance(params_or_list, list):
params_or_list = [params_or_list]
webhooks = M.Webhook.query.find(dict(
app_config_id=c.app.config._id,
type=self.type,
)).all()
if webhooks:
payloads = [self.get_payload(**params)
for params in params_or_list]
for webhook in webhooks:
if webhook.enforce_limit():
webhook.update_limit()
for payload in payloads:
send_webhook.post(webhook._id, payload)
else:
log.warn('Webhook fires too often: %s. Skipping', webhook)
def enforce_limit(self, app):
'''
Checks if limit of webhooks created for given project/app is reached.
Returns False if limit is reached, True otherwise.
'''
count = M.Webhook.query.find(dict(
app_config_id=app.config._id,
type=self.type,
)).count()
limit = M.Webhook.max_hooks(self.type, app.config.tool_name)
return count < limit
class RepoPushWebhookSender(WebhookSender):
type = 'repo-push'
triggered_by = ['git', 'hg', 'svn']
def _before(self, repo, commit_ids):
if len(commit_ids) > 0:
ci = commit_ids[-1]
parents = repo.commit(ci).parent_ids
if len(parents) > 0:
# Merge commit will have multiple parents. As far as I can tell
# the last one will be the branch head before merge
return self._convert_id(parents[-1])
return ''
def _after(self, commit_ids):
if len(commit_ids) > 0:
return self._convert_id(commit_ids[0])
return ''
def _convert_id(self, _id):
if ':' in _id:
_id = 'r' + _id.rsplit(':', 1)[1]
return _id
def get_payload(self, commit_ids, **kw):
app = kw.get('app') or c.app
commits = [app.repo.commit(ci).webhook_info for ci in commit_ids]
for ci in commits:
ci['id'] = self._convert_id(ci['id'])
before = self._before(app.repo, commit_ids)
after = self._after(commit_ids)
payload = {
'size': len(commits),
'commits': commits,
'before': before,
'after': after,
'repository': {
'name': app.config.options.mount_label,
'full_name': app.url,
'url': h.absurl(app.url),
},
}
if kw.get('ref'):
payload['ref'] = kw['ref']
return payload
| 36.174853 | 109 | 0.576875 | 16,393 | 0.890295 | 0 | 0 | 6,073 | 0.329821 | 0 | 0 | 3,962 | 0.215174 |
5779139c5edd9a302a13fd80a4c26bb0aae39343 | 11,139 | py | Python | monitor/monitor_v6_diagnostic.py | nlourie/vent-flowmeter | 7aa6e3814a7bf8a7da4d81eb80da69ceb5e8da80 | [
"MIT"
] | 2 | 2020-04-13T19:22:45.000Z | 2020-04-14T17:17:12.000Z | monitor/monitor_v6_diagnostic.py | nlourie/vent-flowmeter | 7aa6e3814a7bf8a7da4d81eb80da69ceb5e8da80 | [
"MIT"
] | null | null | null | monitor/monitor_v6_diagnostic.py | nlourie/vent-flowmeter | 7aa6e3814a7bf8a7da4d81eb80da69ceb5e8da80 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Apr 7 08:38:28 2020
pyqt realtime plot tutorial
source: https://www.learnpyqt.com/courses/graphics-plotting/plotting-pyqtgraph/
@author: nlourie
"""
from PyQt5 import QtWidgets, QtCore,uic
from pyqtgraph import PlotWidget, plot,QtGui
import pyqtgraph as pg
import sys # We need sys so that we can pass argv to QApplication
import os
from datetime import datetime
import numpy as np
from scipy import signal
import board
import busio
import adafruit_lps35hw
import time
from scipy import interpolate
#import monitor_utils as mu
# Initialize the i2c bus
i2c = busio.I2C(board.SCL, board.SDA)
# Using the adafruit_lps35hw class to read in the pressure sensor
# note the address must be in decimal.
# allowed addresses are:
# 92 (0x5c - if you put jumper from SDO to Gnd)
# 93 (0x5d - default)
p2 = adafruit_lps35hw.LPS35HW(i2c, address = 92)
p1 = adafruit_lps35hw.LPS35HW(i2c, address = 93)
p1.data_rate = adafruit_lps35hw.DataRate.RATE_75_HZ
p2.data_rate = adafruit_lps35hw.DataRate.RATE_75_HZ
mbar2cmh20 = 1.01972
# Now read out the pressure difference between the sensors
print('p1_0 = ',p1.pressure,' mbar')
print('p1_0 = ',p1.pressure*mbar2cmh20,' cmH20')
print('p2_0 = ',p2.pressure,' mbar')
print('p2_0 = ',p2.pressure*mbar2cmh20,' cmH20')
print('')
print('Now zero the pressure:')
# Not sure why sometimes I have to do this twice??
p1.zero_pressure()
p1.zero_pressure()
time.sleep(1)
p2.zero_pressure()
p2.zero_pressure()
time.sleep(1)
print('p1_0 = ',p1.pressure,' mbar')
print('p1_0 = ',p1.pressure*mbar2cmh20,' cmH20')
print('p2_0 = ',p2.pressure,' mbar')
print('p2_0 = ',p2.pressure*mbar2cmh20,' cmH20')
print()
def breath_detect_coarse(flow,fs,plotflag = False):
"""
%% This function detects peaks of flow signal
% Inputs:
% flow: flow signal
% fs: sampling frequency
% plotflag: set to 1 to plot
% Output:
% peak (location, amplitude)
% Written by: Chinh Nguyen, PhD
% Email: c.nguyen@neura.edu.au
% Updated on: 12 Nov 2015.
% Ver: 1.0
# Converted to python by: Nate Lourie, PhD
# Email: nlourie@mit.edu
# Updated on: April, 2020
"""
# detect peaks of flow signal
minpeakwidth = fs*0.3
peakdistance = fs*1.5
#print('peakdistance = ',peakdistance)
minPeak = 0.05 # flow threshold = 0.05 (L/s)
minpeakprominence = 0.05
peak_index, _ = signal.find_peaks(flow,
height = minPeak,
distance = peakdistance,
prominence = minpeakprominence,
width = minpeakwidth)
"""
valley_index, _ = signal.find_peaks(-1*flow,
height = minPeak,
distance = peakdistance,
prominence = minpeakprominence,
width = minpeakwidth)
"""
print('found peaks at index = ',peak_index)
return peak_index
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setWindowTitle("Standalone Respiratory Monitor")
self.graph0 = pg.PlotWidget()
self.graph1 = pg.PlotWidget()
self.graph2 = pg.PlotWidget()
self.graph3 = pg.PlotWidget()
layout = QtWidgets.QVBoxLayout()
layout.addWidget(self.graph0)
layout.addWidget(self.graph1)
layout.addWidget(self.graph2)
layout.addWidget(self.graph3)
widget = QtWidgets.QWidget()
widget.setLayout(layout)
# make the window with a graph widget
#self.graph1 = pg.PlotWidget()
self.setCentralWidget(widget)
# set the plot properties
self.graph1.setBackground('k')
self.graph0.showGrid(x = True, y = True)
self.graph1.showGrid(x=True,y=True)
self.graph2.showGrid(x = True, y = True)
self.graph3.showGrid(x = True, y = True)
# Set the label properties with valid CSS commands -- https://groups.google.com/forum/#!topic/pyqtgraph/jS1Ju8R6PXk
labelStyle = {'color': '#FFF', 'font-size': '12pt'}
self.graph0.setLabel('left','P','cmH20',**labelStyle)
self.graph1.setLabel('left','Flow','L/s',**labelStyle)
self.graph3.setLabel('bottom', 'Time', 's', **labelStyle)
#self.graph2.setLabel('left', 'V raw','L',**labelStyle)
self.graph3.setLabel('left','V corr','L',**labelStyle)
# change the plot range
#self.graph0.setYRange(-30,30,padding = 0.1)
#self.graph1.setYRange(-2,2,padding = 0.1)
#self.graph3.setYRange(-0.5,1.5,padding = 0.1)
#self.graph3.setYRange(200,200,padding = 0.1)
self.x = [0]
self.t = [datetime.utcnow().timestamp()]
self.dt = [0]
self.x = [0]
self.dt = [0]
#self.y = [honeywell_v2f(chan.voltage)]
self.dp = [(p1.pressure - p2.pressure)*mbar2cmh20]
self.p1 = [(p1.pressure)*mbar2cmh20]
self.p2 = [(p2.pressure)*mbar2cmh20]
self.flow = [0]
self.vol = [0]
print('P1 = ',p1.pressure,' cmH20')
print('P2 = ',p2.pressure,' cmH20')
# plot data: x, y values
# make a QPen object to hold the marker properties
pen = pg.mkPen(color = 'y',width = 1)
pen2 = pg.mkPen(color = 'b',width = 2)
self.data_line01 = self.graph0.plot(self.dt,self.p1,pen = pen)
self.data_line02 = self.graph0.plot(self.dt,self.p2,pen = pen2)
self.data_line1 = self.graph1.plot(self.dt, self.flow,pen = pen)
# graph2
self.data_line21 = self.graph2.plot(self.dt,self.flow,pen = pen)
self.data_line22 = self.graph2.plot(self.dt,self.flow,pen = pen)
# graph3
self.data_line3 = self.graph3.plot(self.dt,self.vol,pen = pen)
self.calibrating = False
"""
# Slower timer
self.t_cal = 100
self.cal_timer = QtCore.QTimer()
self.cal_timer.setInterval(self.t_cal)
self.cal_timer.timeout.connect(self.update_cal)
self.cal_timer.start()
"""
# Stuff with the timer
self.t_update = 10 #update time of timer in ms
self.timer = QtCore.QTimer()
self.timer.setInterval(self.t_update)
self.timer.timeout.connect(self.update_plot_data)
self.timer.start()
self.drift_model = [0,datetime.utcnow().timestamp()/1000*self.t_update]
self.i_valleys = []
self.time_to_show = 30 #s
def update_plot_data(self):
# This is what happens every timer loop
if self.dt[-1] >= self.time_to_show:
self.x = self.x[1:] # Remove the first element
#self.y = self.y[1:] # remove the first element
self.dp = self.dp[1:]
self.t = self.t[1:] # remove the first element
self.dt= self.dt[1:]
self.p1 = self.p1[1:]
self.p2 = self.p2[1:]
self.vol = self.vol[1:]
self.flow = self.flow[1:]
self.x.append(self.x[-1] + 1) # add a new value 1 higher than the last
self.t.append(datetime.utcnow().timestamp())
self.dt = [(ti - self.t[0]) for ti in self.t]
dp_cmh20 = ((p1.pressure - p2.pressure))*mbar2cmh20
self.dp.append(dp_cmh20)
self.flow.append(dp_cmh20)
self.p1.append(p1.pressure*mbar2cmh20)
self.p2.append(p2.pressure*mbar2cmh20)
# remove any linear trend in the volume data since it's just nonsense.
# THis should zero it out okay if there's no noticeable "dips"
self.vol = signal.detrend(np.cumsum(self.flow))
self.fs = 1/(self.t[-1] - self.t[-2])
print('Sample Freq = ',self.fs)
negative_mean_subtracted_volume = [-1*(v-np.mean(self.vol)) for v in self.vol]
i_valleys = breath_detect_coarse(negative_mean_subtracted_volume,fs = self.fs,plotflag = False)
self.i_valleys = i_valleys
#print('i_valleys = ',self.i_valleys)
#print('datatype of i_valleys = ',type(self.i_valleys))
if len(self.i_valleys) >= 2:
t = np.array(self.t)
vol = np.array(self.vol)
dt = np.array(self.dt)
print('found peaks at dt = ',dt[self.i_valleys])
#self.drift_model = np.polyfit(t[self.i_valleys],vol[self.i_valleys],1)
#self.v_drift = np.polyval(self.drift_model,t)
#self.vol_corr = vol - self.v_drift
#self.data_line22.setData(self.dt,self.v_drift)
self.drift_model = interpolate.interp1d(t[i_valleys],vol[i_valleys],kind = 'linear')
v_drift_within_spline = self.drift_model(t[i_valleys[0]:i_valleys[-1]])
v_drift = np.zeros(len(t))
v_drift[0:self.i_valleys[1]] = np.polyval(np.polyfit(t[i_valleys[0:1]],vol[self.i_valleys[0:1]],1),t[0:self.i_valleys[1]],)
v_drift[self.i_valleys[0]:self.i_valleys[-1]] = v_drift_within_spline
v_drift[self.i_valleys[-1]:] = np.polyval(np.polyfit(t[self.i_valleys[-2:]],vol[self.i_valleys[-2:]],1),t[self.i_valleys[-1]:])
self.v_drift = v_drift
self.vol_corr = vol - v_drift
self.data_line22.setData(self.dt,self.v_drift)
else:
self.vol_corr = self.vol
self.data_line01.setData(self.dt,self.p1)
self.data_line02.setData(self.dt,self.p2)
self.data_line1.setData(self.dt,self.flow) #update the data
self.data_line21.setData(self.dt,self.vol)
self.data_line3.setData(self.dt,self.vol_corr)
"""
def update_cal(self) :
print ('len dt = ',len(self.dt))
if len(self.dt) > 50:
# try to run the monitor utils functions
fs = 1000/self.t_update
i_peaks,i_valleys,i_infl_points,vol_last_peak,flow,self.vol_corr,self.vol_offset,time,vol,drift_model = mu.get_processed_flow(np.array(self.t),np.array(self.y),fs,SmoothingParam = 0,smoothflag=True,plotflag = False)
if len(i_peaks) > 2:
self.drift_model = drift_model
print('updating calibration')
self.calibrating = True
self.data_line2.setData(self.dt,vol)
self.data_line5.setData(self.dt,np.polyval(self.drift_model,time))
self.data_line3.setData(self.dt,vol - np.polyval(self.drift_model,time))
print('drift model = ',self.drift_model)
"""
def main():
app = QtWidgets.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 34.700935 | 227 | 0.589281 | 7,785 | 0.698896 | 0 | 0 | 0 | 0 | 0 | 0 | 4,313 | 0.387198 |
577b488a4c6bb95a24f07ede098c7e6465673424 | 380 | py | Python | src/quotes_crawlspider/quotes/items.py | azzamsa/learn-scrapy | 8d408763ff0be21fe2e0933b91b4ad1fe082b3ec | [
"MIT"
] | null | null | null | src/quotes_crawlspider/quotes/items.py | azzamsa/learn-scrapy | 8d408763ff0be21fe2e0933b91b4ad1fe082b3ec | [
"MIT"
] | null | null | null | src/quotes_crawlspider/quotes/items.py | azzamsa/learn-scrapy | 8d408763ff0be21fe2e0933b91b4ad1fe082b3ec | [
"MIT"
] | null | null | null | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
class QuotesItem(scrapy.Item):
# define the fields for your item here like:
author_name = scrapy.Field()
author_location = scrapy.Field()
author_date = scrapy.Field()
text = scrapy.Field()
tags = scrapy.Field()
| 23.75 | 53 | 0.705263 | 234 | 0.615789 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.442105 |