max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
FLAME.py
|
commanderchewbacca/Newsfilter
| 0
|
12775451
|
"""
FLAME - Fuzzy clustering by Local Approximation of MEmbership
"""
from __future__ import print_function
import numpy as np
from scipy import sparse
from scipy.sparse import csr_matrix, lil_matrix
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.utils import check_array
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.preprocessing import normalize
from math import sqrt
# __author__ = "<NAME>"
'''
IMPORTANT!!!
I DID NOT DO THIS!! CREDIT GOES TO Matthew Billson github link: https://github.com/yclicc/FLAME-python
'''
class FLAME(BaseEstimator, ClusterMixin):
def __init__(self, metric="euclidean", cluster_neighbors=5, iteration_neighbors=5, max_iter=np.inf, eps=1e-10, thd=-2, verbose=0):
self.metric = metric
self.cluster_neighbors = cluster_neighbors
self.iteration_neighbors = iteration_neighbors
self.max_iter = max_iter
self.eps = eps
self.thd = thd
self.verbose = verbose
def _get_nearest(self, distances, n_neighbors, n_samples):
# Make a numpy arange for iteration purposes.
sample_range = np.arange(n_samples)[:, None]
# Do an introsort on each row of the distances matrix to put the nth smallest distance in the nth position and all
# smaller elements before it. Then keep only the first n+1 elements (including the element itself which will have
# distance 0 from itself and is removed later).
nearest_np = np.argpartition(distances, n_neighbors, axis=1)
nearest_np = nearest_np[:, :n_neighbors + 1]
# Find the largest distance of the kth closest points.
largest_distance = distances[sample_range, nearest_np[sample_range, -1]]
# Make two arrays of sets the first containing only the n nearest other elements to each element not
# including the element itself and the second containing the same plus any other elements tied for nth nearest
# again excluding the element itself (though if there are k other elements all 0 distance away other problems
# will result).
nearest = []
nearest_with_ties = []
for i in range(n_samples):
ties_for_largest_distance = np.where(distances[i] == largest_distance[i])
nearest.append(set(nearest_np[i, :].tolist()))
print(nearest)
print(i)
print(nearest_np[i])
nearest[-1].remove(i)
ties_for_largest_distance = set(ties_for_largest_distance[0].tolist())
ties_for_largest_distance.discard(i)
nearest_with_ties.append(nearest[i] | ties_for_largest_distance)
return nearest, nearest_with_ties
def _get_densities(self, distances, nearest, n_samples):
# Make a numpy arange for iteration purposes.
sample_range = np.arange(n_samples)[:, None]
nearest_np = np.array([list(s) for s in nearest])
n_shortest_distances = distances[sample_range, nearest_np]
local_distance_sums = n_shortest_distances.sum(axis=1)
largest_local_sum = local_distance_sums.max(axis=0)
densities = np.asarray(largest_local_sum / local_distance_sums)
return densities
def _get_supports(self, densities, nearest_with_ties, n_samples):
density_sum = densities.sum()
density_mean = density_sum / n_samples
density_sum2 = (densities * densities).sum()
thd = density_mean + self.thd * sqrt(density_sum2 / n_samples - density_mean * density_mean)
csos = []
outliers = []
remaining = []
for i in range(n_samples):
if densities[i] < thd:
outliers.append(i)
elif densities[i] > densities[list(nearest_with_ties[i])].max():
csos.append(i)
else:
remaining.append(i)
return csos, outliers, remaining
def _get_weights(self, distances, nearest_with_ties, fixed, n_samples):
nearest_with_ties = [sorted(list(s)) for s in nearest_with_ties]
weights = lil_matrix((n_samples, n_samples))
for i in range(n_samples):
if i in fixed:
weights[i, i] = 1
else:
for j in nearest_with_ties[i]:
weights[i, j] = distances[i, j]
if self.verbose: print("Assigned weights {0}.".format(i))
weights = weights.tocsr()
weights = normalize(weights, norm='l1', axis=1, copy=False)
return weights
def _get_starting_membership(self, csos, outliers, fixed, n_samples):
M = len(csos) + 1
starting_membership = np.zeros(shape=(n_samples, M))
general_row = np.ndarray(shape=(1, M))
general_row.fill(1. / M)
for i in range(n_samples):
if i not in fixed:
starting_membership[i, :] = general_row
for index, value in enumerate(csos):
starting_membership[value, index] = 1
for i in outliers:
starting_membership[i, -1] = 1
return starting_membership
def _flame(self, X):
"""
Pass Numpy or Pandas array of data as X. As metric pass any string as in sklearn.metrics.pairwise.pairwise_distances
or a callable on pairs of members of X. FLAME is computed with n_neighbors until max_iter or convergence up to eps.
thd is the threshold for outliers: Any element which has less than mean(density) + thd * std(density) will be an outlier.
"""
if sparse.issparse(X) and self.metric not in {"precomputed", "cityblock", "cosine", "euclidean", "l1", "l2",
"manhattan"} and not callable(self.metric):
raise TypeError("The metric {0} does not support sparse data.".format(self.metric))
# Convert pandas objects to numpy arrays.
if 'pandas' in str(X.__class__):
X = X.values
X = check_array(X, accept_sparse="csr", dtype=None)
# Get the number of samples. We use this a lot.
n_samples, _ = X.shape
distances = pairwise_distances(X, metric=self.metric)
nearest, nearest_with_ties = self._get_nearest(distances, self.cluster_neighbors, n_samples)
if self.verbose: print("Got distances and nearest.")
densities = self._get_densities(distances, nearest, n_samples)
if self.verbose: print("Got densities.")
csos, outliers, remaining = self._get_supports(densities, nearest_with_ties, n_samples)
if self.verbose: print("Got suppports.")
if self.verbose: print("There are {0} clusters and {1} outliers.".format(len(csos), len(outliers)))
fixed = set(csos) | set(outliers)
_, nearest_with_ties_for_iteration = self._get_nearest(distances, self.iteration_neighbors, n_samples)
weights = self._get_weights(distances, nearest_with_ties_for_iteration, fixed, n_samples)
if self.verbose: print("Got weights.")
membership_proba = self._get_starting_membership(csos, outliers, fixed, n_samples)
if self.verbose: print("Got starting memberships.")
i = 0
while i < self.max_iter:
lastMembership = membership_proba.copy()
membership_proba = weights.dot(membership_proba)
delta = np.absolute(membership_proba - lastMembership).max()
i += 1
if self.verbose: print("Done iteration {0}.".format(i))
if delta < self.eps:
break
num_clusters = membership_proba.shape[1] - 1
# Get cluster assignment.
pred = np.argmax(membership_proba, axis=1)
# Replace predictions of the outlier group with -1.
pred[pred == num_clusters] = -1
return membership_proba, pred, csos, outliers, densities
def fit(self, X):
self.membership_proba_, self.labels_, self.csos_, self.outliers_, self.densities_ = \
self._flame(X)
return self
def fit_predict(self, X, y=None):
y = self.fit(X).labels_
return y
def fit_predict_proba(self, X, y=None):
y = self.fit(X).membership_proba_
return y
if __name__== "__main__":
X = np.array(
[[0, 0, 0], [1.1, 0, 0], [0, 0.8, 0], [0, 0, 1.3], [10, 10, 10], [11.1, 10, 10], [10, 10.8, 10], [10, 11, 12]])
print(X)
model = FLAME(cluster_neighbors=3, iteration_neighbors=3,verbose=1)
membership = model.fit_predict(X)
print(membership
)
| 2.75
| 3
|
Leetcode/27#_Remove Element_06170601.py
|
yanjiyue/sac
| 0
|
12775452
|
#Given an array nums and a value val, remove all instances of that value in-place and return the new length.
#Do not allocate extra space for another array, you must do this by modifying the input array in-place with O(1) extra memory.
#The order of elements can be changed. It doesn't matter what you leave beyond the new length.
#Example 1:
#Given nums = [3,2,2,3], val = 3,
#Your function should return length = 2, with the first two elements of nums being 2.
#It doesn't matter what you leave beyond the returned length.
#Example 2:
#Given nums = [0,1,2,2,3,0,4,2], val = 2,
#Your function should return length = 5, with the first five elements of nums containing 0, 1, 3, 0, and 4.
#Note that the order of those five elements can be arbitrary.
#It doesn't matter what values are set beyond the returned length.
#Clarification:
#Confused why the returned value is an integer but your answer is an array?
#Note that the input array is passed in by reference, which means modification to the input array will be known to the caller as well.
#Internally you can think of this:
#// nums is passed in by reference. (i.e., without making a copy)
#int len = removeElement(nums, val);
#// any modification to nums in your function would be known by the caller.
#// using the length returned by your function, it prints the first len elements.
#for (int i = 0; i < len; i++) {
# print(nums[i]);
#}
class Solution:
def removeElement(self, nums: List[int], val: int) -> int:
result=[]
m=0
for n in range(len(nums)):
if nums[n]!=val:
nums[m]=nums[n]
m+=1
return m
| 4.125
| 4
|
python_examples/__init__.py
|
gdahlm/python_examples
| 0
|
12775453
|
<reponame>gdahlm/python_examples<filename>python_examples/__init__.py
"""
python_examples
===============
Provides coding examples covering the following topics:
1. Array Sequences
2. Stacks, Queues, Deques
3. Linked Lists
4. Recursion
5. Trees
6. Searching and Sorting
7. Graph Algorithms
Documentation
-------------
Code examples are indicated by three greater-than signs::
>>> foo(bar)
... # doctest: +SKIP
Available submodules
---------------------
data_structures: Basic Arrays, Stacks, Queues, and Deques
recursion: Recursion
TODO: Linked Lists
TODO: Trees
TODO: Searching and Sorting
TODO: Graph Algorithems
Utilities
---------
test
Run unittests
__version__
version string
"""
from __future__ import division, print_function
__version_info__ = (0, 1, 0)
__version__ = '.'.join(map(str, __version_info__))
if __name__ == "__main__": # pragma: no cover
pass
| 3
| 3
|
stanford_battery/protocol/run_session.py
|
thomasbazeille/public_protocols
| 3
|
12775454
|
import json
import os
import subprocess
import time
subid = raw_input('Enter subject id (i.e. s999): ')
training = raw_input('Enter 0 for training, 1 for main tasks: ')
if training == '1':
run_file = 'scanner_tasks_order1'
else:
run_file = 'practice_tasks'
taskset = raw_input('Enter task group (1, 2 or 3): ')
if taskset == '1':
tasks = ['stop_signal','attention_network_task','twobytwo']
elif taskset == '2':
tasks = ['motor_selective_stop_signal', 'stroop', 'discount_fixed']
elif taskset == '3':
tasks = ['dot_pattern_expectancy', 'columbia_card_task_hot',
'ward_and_allport']
else:
raise ValueError('Invalid session number')
print('\n'.join(tasks))
json.dump(tasks, open('temp_tasklist.json','w'))
for task in tasks:
print('***************************************************************')
if os.name == 'posix':
subprocess.call("expfactory --run --folder {0} --battery expfactory-battery/ "
"--experiments {1} --subid {2} &".format(run_file, task, subid), shell=True)
else:
subprocess.call("start expfactory --run --folder {0} --battery expfactory-battery/ "
"--experiments {1} --subid {2}".format(run_file, task, subid), shell=True)
time.sleep(1)
| 2.3125
| 2
|
api/views.py
|
rbs2rbs/simalirade_spotify
| 0
|
12775455
|
<gh_stars>0
from django.shortcuts import render
from django.http import HttpResponseNotFound
from rest_framework.response import Response
import json
from api.funcoes import Funcoes,Comp
from rest_framework.views import APIView
from django.core import serializers
class MusicaView(APIView):
def post(self, request):
try:
funcoes = Funcoes()
musica = json.loads(request.body)['musica']
musica = musica.split('/')
saida = funcoes.get_musica(music_id = musica[-1][0:22])
request.session['musica'] = saida
return Response(saida)
except:
return HttpResponseNotFound("error")
class TopView(APIView):
def post(self, request):
funcoes = Funcoes()
top = json.loads(request.body)['top']
saida = funcoes.get_playlist_audio_features('renan_bispo',top)
top_null = []
for i in saida:
if not i is None:
top_null.append(i)
request.session['top'] = top_null
return Response(top_null)
class CompView(APIView):
def get(self,request):
comp = Comp(request.session['top'],request.session['musica'])
for key in list(request.session.keys()):
try:
del request.session[key]
except:
pass
saida = comp.comp()
funcoes = Funcoes()
saida['musica'] = funcoes.get_musica(music_id = saida['parecida'])
return Response(saida)
| 2.1875
| 2
|
lackey/KeyCodes.py
|
Inobitec/lackey
| 599
|
12775456
|
<reponame>Inobitec/lackey
class Button():
LEFT = 0
CENTER = 1
RIGHT = 2
class Key():
""" Key codes for InputEmulation.Keyboard object.
Can be entered directly or concatenated with an existing string, e.g. ``type(Key.TAB)`` """
ENTER = "{ENTER}"
ESC = "{ESC}"
BACKSPACE = "{BACKSPACE}"
DELETE = "{DELETE}"
F1 = "{F1}"
F2 = "{F2}"
F3 = "{F3}"
F4 = "{F4}"
F5 = "{F5}"
F6 = "{F6}"
F7 = "{F7}"
F8 = "{F8}"
F9 = "{F9}"
F10 = "{F10}"
F11 = "{F11}"
F12 = "{F12}"
F13 = "{F13}"
F14 = "{F14}"
F15 = "{F15}"
F16 = "{F16}"
HOME = "{HOME}"
END = "{END}"
LEFT = "{LEFT}"
RIGHT = "{RIGHT}"
DOWN = "{DOWN}"
UP = "{UP}"
PAGE_DOWN = "{PAGE_DOWN}"
PAGE_UP = "{PAGE_UP}"
TAB = "{TAB}"
CAPS_LOCK = "{CAPS_LOCK}"
NUM_LOCK = "{NUM_LOCK}"
SCROLL_LOCK = "{SCROLL_LOCK}"
INSERT = "{INSERT}"
SPACE = "{SPACE}"
PRINTSCREEN = "{PRINTSCREEN}"
ALT = "{ALT}"
CMD = "{CMD}"
CTRL = "{CTRL}"
META = "{META}"
SHIFT = "{SHIFT}"
WIN = "{WIN}"
PAUSE = "{PAUSE}"
NUM0 = "{NUM0}"
NUM1 = "{NUM1}"
NUM2 = "{NUM2}"
NUM3 = "{NUM3}"
NUM4 = "{NUM4}"
NUM5 = "{NUM5}"
NUM6 = "{NUM6}"
NUM7 = "{NUM7}"
NUM8 = "{NUM8}"
NUM9 = "{NUM9}"
SEPARATOR = "{SEPARATOR}"
ADD = "{ADD}"
MINUS = "{MINUS}"
MULTIPLY = "{MULTIPLY}"
DIVIDE = "{DIVIDE}"
class KeyModifier():
""" Can be used with type() to modify another key, e.g. ``type(Key.DELETE, Key.CTRL+Key.ALT)`` """
CTRL = "{CTRL}"
SHIFT = "{SHIFT}"
ALT = "{ALT}"
META = "{META}"
CMD = "{CMD}"
WIN = "{WIN}"
| 3.09375
| 3
|
PyOpenGL-3.0.2/OpenGL/GL/ARB/sync.py
|
frederica07/Dragon_Programming_Process
| 0
|
12775457
|
'''OpenGL extension ARB.sync
This module customises the behaviour of the
OpenGL.raw.GL.ARB.sync to provide a more
Python-friendly API
Overview (from the spec)
This extension introduces the concept of "sync objects". Sync
objects are a synchronization primitive - a representation of events
whose completion status can be tested or waited upon. One specific
type of sync object, the "fence sync object", is supported in this
extension, and additional types can easily be added in the future.
Fence sync objects have corresponding fences, which are inserted
into the OpenGL command stream at the time the sync object is
created. A sync object can be queried for a given condition. The
only condition supported for fence sync objects is completion of the
corresponding fence command. Fence completion allows applications to
request a partial Finish, wherein all commands prior to the fence
will be forced to complete before control is returned to the calling
process.
These new mechanisms allow for synchronization between the host CPU
and the GPU, which may be accessing the same resources (typically
memory), as well as between multiple GL contexts bound to multiple
threads in the host CPU.
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/sync.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ARB.sync import *
### END AUTOGENERATED SECTION
from OpenGL.constants import GLint
from OpenGL.arrays import GLintArray
from OpenGL.lazywrapper import lazy
def glGetSync( sync, pname, bufSize=1,length=None,values=None ):
"""Wrapper around glGetSynciv that auto-allocates buffers
sync -- the GLsync struct pointer (see glGetSynciv)
pname -- constant to retrieve (see glGetSynciv)
bufSize -- defaults to 1, maximum number of items to retrieve,
currently all constants are defined to return a single
value
length -- None or a GLint() instance (ONLY!), must be a byref()
capable object with a .value attribute which retrieves the
set value
values -- None or an array object, if None, will be a default
return-array-type of length bufSize
returns values[:length.value], i.e. an array with the values set
by the call, currently always a single-value array.
"""
if values is None:
values = GLintArray.zeros( (bufSize,) )
if length is None:
length = GLint()
glGetSynciv( sync, pname, bufSize, length, values )
written = length.value
return values[:written]
| 1.734375
| 2
|
osf/files/addons-figshare-local.py
|
sifulan-access-federation/helm-charts
| 0
|
12775458
|
import os
CLIENT_ID = os.environ['FIGSHARE_CLIENT_ID']
CLIENT_SECRET = os.environ['FIGSHARE_CLIENT_SECRET']
| 1.21875
| 1
|
nunchuk_tester.py
|
mlegere1323/WiiNunchukMouse
| 1
|
12775459
|
<filename>nunchuk_tester.py
import nunchuk
from pymouse import PyMouse
delta = 1
pressed = 1
no_press = 0
m = PyMouse()
x_dim, y_dim = m.screen_size()
while True:
#[joy_x, joy_y, accel_x, accel_y, accel_z, c_button, z_button]...TODO {shaken}
reading = nunchuk.get_input()
curr_joy_x = reading[0]
curr_joy_y = reading[1]
#print(reading)
curr_x, curr_y = m.position()
curr_z_state = reading[6]# > 215 and < 40 oscillating for "shake" signature
curr_c_state = reading[5]
if curr_z_state == pressed:
m.press(curr_x, curr_y)
if curr_z_state == no_press:
m.release(curr_x, curr_y)
if curr_c_state == no_press:
if curr_joy_x != 0 or curr_joy_y != 0:
m.move(curr_x + delta * curr_joy_x, curr_y - delta * curr_joy_y)
else:
curr_accel_x = reading[2]
curr_accel_y = reading[3]
m.move(curr_x + delta * curr_accel_x, curr_y - delta * curr_accel_y)
| 2.625
| 3
|
ooobuild/csslo/security/__init__.py
|
Amourspirit/ooo_uno_tmpl
| 0
|
12775460
|
# coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from ...lo.security.access_control_exception import AccessControlException as AccessControlException
from ...lo.security.access_controller import AccessController as AccessController
from ...lo.security.all_permission import AllPermission as AllPermission
from ...lo.security.cert_alt_name_entry import CertAltNameEntry as CertAltNameEntry
from ...lo.security.certificate_characters import CertificateCharacters as CertificateCharacters
from ...lo.security.certificate_container import CertificateContainer as CertificateContainer
from ...lo.security.certificate_container_status import CertificateContainerStatus as CertificateContainerStatus
from ...lo.security.certificate_exception import CertificateException as CertificateException
from ...lo.security.certificate_kind import CertificateKind as CertificateKind
from ...lo.security.certificate_validity import CertificateValidity as CertificateValidity
from ...lo.security.cryptography_exception import CryptographyException as CryptographyException
from ...lo.security.document_digital_signatures import DocumentDigitalSignatures as DocumentDigitalSignatures
from ...lo.security.document_signature_information import DocumentSignatureInformation as DocumentSignatureInformation
from ...lo.security.encryption_exception import EncryptionException as EncryptionException
from ...lo.security.ext_alt_name_type import ExtAltNameType as ExtAltNameType
from ...lo.security.key_exception import KeyException as KeyException
from ...lo.security.key_usage import KeyUsage as KeyUsage
from ...lo.security.no_password_exception import NoPasswordException as NoPasswordException
from ...lo.security.policy import Policy as Policy
from ...lo.security.runtime_permission import RuntimePermission as RuntimePermission
from ...lo.security.security_infrastructure_exception import SecurityInfrastructureException as SecurityInfrastructureException
from ...lo.security.signature_exception import SignatureException as SignatureException
from ...lo.security.x_access_control_context import XAccessControlContext as XAccessControlContext
from ...lo.security.x_access_controller import XAccessController as XAccessController
from ...lo.security.x_action import XAction as XAction
from ...lo.security.x_certificate import XCertificate as XCertificate
from ...lo.security.x_certificate_container import XCertificateContainer as XCertificateContainer
from ...lo.security.x_certificate_extension import XCertificateExtension as XCertificateExtension
from ...lo.security.x_document_digital_signatures import XDocumentDigitalSignatures as XDocumentDigitalSignatures
from ...lo.security.x_policy import XPolicy as XPolicy
from ...lo.security.x_san_extension import XSanExtension as XSanExtension
| 1.367188
| 1
|
cottonformation/res/ram.py
|
gitter-badger/cottonformation-project
| 5
|
12775461
|
# -*- coding: utf-8 -*-
"""
This module
"""
import attr
import typing
from ..core.model import (
Property, Resource, Tag, GetAtt, TypeHint, TypeCheck,
)
from ..core.constant import AttrMeta
#--- Property declaration ---
#--- Resource declaration ---
@attr.s
class ResourceShare(Resource):
"""
AWS Object Type = "AWS::RAM::ResourceShare"
Resource Document: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html
Property Document:
- ``rp_Name``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-name
- ``p_AllowExternalPrincipals``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-allowexternalprincipals
- ``p_PermissionArns``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-permissionarns
- ``p_Principals``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-principals
- ``p_ResourceArns``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-resourcearns
- ``p_Tags``: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-tags
"""
AWS_OBJECT_TYPE = "AWS::RAM::ResourceShare"
rp_Name: TypeHint.intrinsic_str = attr.ib(
default=None,
validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type),
metadata={AttrMeta.PROPERTY_NAME: "Name"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-name"""
p_AllowExternalPrincipals: bool = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.instance_of(bool)),
metadata={AttrMeta.PROPERTY_NAME: "AllowExternalPrincipals"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-allowexternalprincipals"""
p_PermissionArns: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "PermissionArns"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-permissionarns"""
p_Principals: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Principals"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-principals"""
p_ResourceArns: typing.List[TypeHint.intrinsic_str] = attr.ib(
default=None,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(TypeCheck.intrinsic_str_type), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "ResourceArns"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-resourcearns"""
p_Tags: typing.List[typing.Union[Tag, dict]] = attr.ib(
default=None,
converter=Tag.from_list,
validator=attr.validators.optional(attr.validators.deep_iterable(member_validator=attr.validators.instance_of(Tag), iterable_validator=attr.validators.instance_of(list))),
metadata={AttrMeta.PROPERTY_NAME: "Tags"},
)
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#cfn-ram-resourceshare-tags"""
@property
def rv_Arn(self) -> GetAtt:
"""Doc: http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ram-resourceshare.html#aws-resource-ram-resourceshare-return-values"""
return GetAtt(resource=self, attr_name="Arn")
| 2.1875
| 2
|
bin/system/logging_setup.py
|
fdkz/sensed
| 1
|
12775462
|
<filename>bin/system/logging_setup.py
import os
import sys
import time
import math
import logging
import logging.handlers
_console_logger = None
def start_logging_system(log_folder, log_filename="log.txt"):
"""
"""
# patch Handler.handleError to reraise exceptions. don't know what magic
# happens in the default handler, but the combination of asyncore and
# logging module loses my traceback info! a nightmare to debug..
def logging_handleError(self, record):
if logging.raiseExceptions: raise
logging.Handler.handleError = logging_handleError
logformat = MiscFormatter()
log_globalfile = logging.handlers.TimedRotatingFileHandler(os.path.join(log_folder, log_filename), "midnight", utc=True) # backupCount=7
log_globalfile.setFormatter(logformat)
# conf the root logger to output everything both to console
rootlogger = logging.getLogger()
rootlogger.addHandler(log_globalfile)
global _console_logger
_console_logger = logging.StreamHandler()
_console_logger.setFormatter(logformat)
rootlogger.addHandler(_console_logger)
rootlogger.setLevel(logging.NOTSET)
# This line has to exist, because sometimes we could get the following error after redirecting all of stdout
# to the logging module and the error would never appear:
# The process cannot access the file because it is being used by another process
rootlogger.info("(logger output test)")
# route all raw print statements through the logging system. add an ERROR prefix
# to encourage use of the logging module.
sys.stdout = StdLogger()
sys.stderr = sys.stdout
def remove_console_logger():
rootlogger = logging.getLogger()
# _console_logger.disabled = True also works?
rootlogger.removeHandler(_console_logger)
#
# ---------------------------------------------------------------------------
#
# create a new Formatter class for the logging module.
class MiscFormatter(logging.Formatter):
"""
purpose:
instead of this:
18-01-2010 18:40:42,235 INFO startup ok
18-01-2010 18:40:42,235 DEBUG count: 4 init: True
18-01-2010 18:40:42,235 WARNING object not found!
we'll get this:
18-01-2010 16:40:42.235Z startup ok
18-01-2010 16:40:42.235Z DEBUG count: 4 init: True
18-01-2010 16:40:42.235Z WARNING object not found!
"""
def __init__(self):
logging.Formatter.__init__(self)
def formatTime(self, record, datefmt=None):
""" remove the comma and return '18-01-2010 18:40:42.235Z' utc time """
if datefmt:
return time.strftime(datefmt)
else:
msecs = min(999, record.msecs) # for some reason, record.msecs can be 1000, and that screws with the %03 formatting.
return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(record.created)) + ".%03.fZ" % msecs
def format(self, record):
# skip the INFO text on every line, but show DEBUG and ERROR and others.
if record.levelno == logging.INFO:
self._fmt = "%(asctime)s %(name)s %(message)s"
else:
self._fmt = "%(asctime)s %(name)s %(levelname)s %(message)s"
return logging.Formatter.format(self, record)
# last line of unsolicited stdout defence.
# catch stdout and redirect to log.
class StdLogger:
def __init__(self):
self.isatty = sys.__stdout__.isatty()
def write(self, txt):
logging.info("STDOUT " + txt.rstrip())
def flush(self):
pass
| 2.40625
| 2
|
Blender Export/objc-export-2.5/objc_blend_2.5.6/export_objc.py
|
JavaZava/iOS-OpenGLES-Stuff
| 199
|
12775463
|
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import os
import time
import shutil
import bpy
import mathutils
def write_objc(filepath,context):
out = open(filepath, 'w')
current_scene = bpy.context.scene
objs = current_scene.objects
#i know there has to be an easier way to do this, but i'm too lazy to look it up
for next_obj in objs:
if next_obj.type == 'MESH':
mesh = next_obj
print("Writing Object")
for i in current_scene.objects:
i.select = False #deselect all objects
mesh.select = True
current_scene.objects.active = mesh #set the mesh object to current
bpy.ops.object.mode_set(mode='EDIT') #Operators
bpy.ops.mesh.select_all(action='SELECT')#select all the face/vertex/edge
bpy.ops.mesh.quads_convert_to_tris() #Operators
current_scene.update()
bpy.ops.object.mode_set(mode='OBJECT') # set it in object
mesh = mesh.data
objectname = mesh.name
basename = objectname.capitalize()
out.write('#import "OpenGLCommon.h"\n\n\n')
if len(mesh.uv_textures) > 0:
out.write('static const TexturedVertexData3D %sVertexData[] = {\n' % basename)
#for face in uv: #loop through the faces
uv_layer = mesh.active_uv_texture
for face in mesh.faces:
faceUV = uv_layer.data[face.index]
i=0
for index in face.vertices:
if len(face.vertices) == 3:
vert = mesh.vertices[index]
out.write('\t{/*v:*/{%f, %f, %f}, ' % (vert.co.x, vert.co.y, vert.co.z) )
out.write('/*n:*/{%f, %f, %f}, ' % (vert.normal.x, vert.normal.y, vert.normal.z))
out.write('/*t:*/{%f, %f}' % ( faceUV.uv[i][0], faceUV.uv[i][1] ) )
out.write('},\n')
i+=1
out.write('};\n\n')
elif len(mesh.vertex_colors) > 0:
out.write('static const ColoredVertexData3D %sVertexData[] = {\n' % basename)
color_layer = mesh.active_vertex_color
for face in mesh.faces:
if len(face.vertices) == 3:
faceC = color_layer.data[face.index]
i=0
for index in face.vertices:
vert = mesh.vertices[index]
out.write('\t{/*v:*/{%f, %f, %f}, ' % (vert.co.x, vert.co.y, vert.co.z) )
out.write('/*n:*/{%f, %f, %f}, ' % (vert.normal.x, vert.normal.y, vert.normal.z))
out.write('/*c:*/{%f, %f, %f, %f}' % ( faceC.color1[i], faceC.color2[i], faceC.color3[i], faceC.color4[i]) )
out.write('},\n')
i+=1
out.write('};\n\n')
else:
out.write
out.write('static const VertexData3D %sVertexData[] = {\n' % basename)
for face in mesh.faces:
if len(face.vertices) == 3:
for index in face.vertices:
vert = mesh.vertices[index]
out.write('\t{/*v:*/{%f, %f, %f}, ' % (vert.co.x, vert.co.y, vert.co.z) )
out.write('/*n:*/{%f, %f, %f} ' % (vert.normal.x, vert.normal.y, vert.normal.z))
out.write('},\n')
out.write('};\n\n')
#if editmode: Window.EditMode(1)
out.write('#define k%sNumberOfVertices\t%i\n' % (basename, len(mesh.faces) * 3) )
out.write('// Drawing Code:\n')
out.write('// glEnableClientState(GL_VERTEX_ARRAY);\n')
if len(mesh.uv_textures) > 0:
out.write('// glEnableClientState(GL_TEXTURE_COORD_ARRAY);\n')
elif len(mesh.vertex_colors) > 0:
out.write('// glEnableClientState(GL_COLOR_ARRAY);\n')
out.write('// glEnable(GL_COLOR_MATERIAL)\n')
out.write('// glEnableClientState(GL_NORMAL_ARRAY);\n')
out.write('// glVertexPointer(3, GL_FLOAT, sizeof(')
if len(mesh.uv_textures) > 0:
out.write('TexturedVertexData3D')
elif len(mesh.vertex_colors) > 0:
out.write('ColoredVertexData3D')
else:
out.write('VertexData3D')
out.write('), &%sVertexData[0].vertex);\n' % basename)
out.write('// glNormalPointer(GL_FLOAT, sizeof(')
if len(mesh.uv_textures) > 0:
out.write('TexturedVertexData3D')
elif len(mesh.vertex_colors) > 0:
out.write('ColoredVertexData3D')
else:
out.write('VertexData3D')
out.write('), &%sVertexData[0].normal);\n' % basename)
if len(mesh.uv_textures) > 0:
out.write('// glTexCoordPointer(2, GL_FLOAT, sizeof(TexturedVertexData3D), &%sVertexData[0].texCoord);\n' % basename)
elif len(mesh.vertex_colors) > 0:
out.write('// glColorPointer(4, GL_FLOAT, sizeof(ColoredVertexData3D), &%sVertexData[0].color);\n' % basename)
out.write('// glDrawArrays(GL_TRIANGLES, 0, k%sNumberOfVertices);\n' % basename)
out.write('// glDisableClientState(GL_VERTEX_ARRAY);\n')
if len(mesh.uv_textures) > 0:
out.write('// glDisableClientState(GL_TEXTURE_COORD_ARRAY);\n')
elif len(mesh.vertex_colors) > 0:
out.write('// glDisableClientState(GL_NORMAL_ARRAY);\n')
out.write('// glDisable(GL_COLOR_MATERIAL);\n')
out.write('// glDisableClientState(GL_NORMAL_ARRAY);\n\n\n')
out.close()
def save(operator, context, filepath="",
use_triangles=False,
use_edges=True,
use_normals=False,
use_hq_normals=False,
use_uvs=True,
use_materials=True,
copy_images=False,
use_modifiers=True,
use_rotate_x90=True,
use_blen_objects=True,
group_by_object=False,
group_by_material=False,
keep_vertex_order=False,
use_vertex_groups=False,
use_nurbs=True,
use_selection=True,
use_all_scenes=False,
use_animation=False,
):
write_objc(filepath,context)
return {'FINISHED'}
| 2.265625
| 2
|
pgist.py
|
plship/pgist
| 0
|
12775464
|
<gh_stars>0
#!/usr/bin/env python
"""A Python command-line wrapper with github3.py library to access GitHub Gist.
"""
import os
import sys
if sys.version_info.major < 2:
reload(sys)
sys.setdefaultencoding('utf-8')
import uuid
from functools import wraps, update_wrapper
from getpass import getpass, getuser
import click
import requests
from github3 import authorize, login
from github3.models import GitHubError
try:
from urlparse import urlparse
except ImportError:
# py3k
from urllib.parse import urlparse
def auth_check(func):
"""Decorator to note which object methods require authorization"""
@wraps(func)
def check_wrapper(self, *args, **kwargs):
"""A wrapper to check if a token exists"""
if not kwargs.get('anonymous'):
try:
with open(os.path.expanduser('~/.pgist'), 'r') as tkf:
self.token = tkf.readline()
self.github = login(token=self.token)
except IOError:
raise SystemExit('Please use `pgist --login` authenticate ' \
'gist on this computer')
try:
return func(self, *args, **kwargs)
except GitHubError as ghe:
if ghe.code in (401, 403):
raise SystemExit('Your current gist authorize is bad, ' \
'please use `pgist --login` to authenticate it again!')
raise SystemExit(ghe + '\nPlease report this bug to the author!')
return check_wrapper
def token_request():
"""Request app token from GitHub to operate gists"""
try:
prompt = raw_input
except NameError:
prompt = input
user = prompt('GitHub username(default is {0}): '.format(getuser())) \
or getuser()
password = ''
while not password:
password = getpass('GitHub password for {0}: '.format(user))
# Ensure github tokens have a unique description
note = 'the unique pgist -> {0}'.format(uuid.uuid4())
note_url = 'https://github.com/douglarek/pgist'
scopes = ['gist']
try:
auth = authorize(user, password, scopes, note, note_url)
except GitHubError as ghe:
if 'two-factor' in str(ghe):
raise SystemExit('GitHub 2-factor auth is not supported ' \
'temporarily o(>_<)o, please disable it to use pgist !')
raise SystemExit('Gist authorize failed, please check your username '\
'or password!')
with open(os.path.expanduser('~/.pgist'), 'w') as tkf:
tkf.write(auth.token)
click.echo('Done ...')
def url_shorten(long_url):
"""Shorten a long url with git.io service"""
req = requests.post('http://git.io', data={'url' : long_url})
return req.headers['location']
def upload_files(files):
"""Build up uploaded or updated files' structure"""
_upload_files = {}
for obj in files:
content = obj.readlines()
if not content:
continue
_upload_files[obj.name.split('/')[-1]] = {'content' : \
''.join(content)}
obj.close()
del obj
if not _upload_files:
raise SystemExit('All of your files are empty, WTF?')
return _upload_files
def find_gist_by_id(github, _id):
"""Find a gist by _id"""
dest = None
for gist in github.iter_gists():
if _id == gist.id or _id == gist.html_url:
dest = gist
break
if dest is None:
raise SystemExit('The gist ID or URL is not found, is it right?')
return dest
def get_id(_id):
"""Convert _id(ID or URL) to ID"""
result = urlparse(_id)
if result.path:
return result.path.split('/')[-1]
raise SystemExit('Your url(id): {0} is invalid !'.format(_id))
class Gist(object):
"""Define known gist operations"""
def __init__(self):
self.token, self.github = None, None
@auth_check
def list_gists(self, _all=False):
"""List all gists or public only ones"""
click.echo('List of {0} gists: \n'.format(['public','all'][_all]))
if _all:
for gist in self.github.iter_gists():
click.echo('{0}{1}'.format(gist.html_url.ljust(50),
[g.name for g in gist.iter_files()][0]))
else:
for gist in self.github.iter_gists():
if gist.is_public():
click.echo('{0}{1}'.format(gist.html_url.ljust(50),
[g.name for g in gist.iter_files()][0]))
@auth_check
def create_gist(self,
description=None,
files=(),
public=True,
anonymous=False,
short_url=False):
"""Create public, private or anonymous gists"""
if description is None:
description = ''
if anonymous:
from github3 import create_gist
gist = create_gist(description, upload_files(files))
else:
gist = self.github.create_gist(description, upload_files(files), \
public)
click.echo(url_shorten(gist.html_url) if short_url else gist.html_url)
@auth_check
def update_gist(self,
_id,
description=None,
files=()):
"""Update a gist"""
if description is None:
description = ''
dest = find_gist_by_id(self.github, get_id( _id))
if dest.edit(description, upload_files(files)):
click.echo('<{0}> has been updated successfully.'.format(dest.html_url))
@auth_check
def delete_gist(self, _id):
"""Delete a gist by _id"""
dest = find_gist_by_id(self.github, get_id(_id))
if dest.delete():
click.echo('<{0}> has been deleted successfully.'.format(dest.html_url))
@auth_check
def fork_gist(self, _id):
"""Fork a gist by ID or URL"""
try:
new = self.github.gist(get_id(_id)).fork()
except AttributeError:
raise SystemExit('The gist {0} is not found !'.format(_id))
if new is None:
raise SystemExit('Enha, maybe you are forking yourself ?')
click.echo('{0} is forked to {1}'.format(_id, new.html_url))
def compatcallback(f):
if getattr(click, '__version__', '0.0') >= '2.0':
return f
return update_wrapper(lambda ctx, value: f(ctx, None, value), f)
@compatcallback
def print_help(ctx, param, value):
"""A callback func, when type `-h`, show help"""
if not value:
return
click.echo(ctx.get_help())
ctx.exit()
@click.command()
@click.option('-l', 'list_', is_flag=True, help='List public gists, with `-A` list all ones')
@click.option('-A', 'all_', is_flag=True)
@click.option('-s', 'shorten', is_flag=True, help='Shorten the gist URL using git.io')
@click.option('-u', 'update', metavar='URL/ID', help='Update an existing gist')
@click.option('-d', 'desc', metavar='DESCRIPTION', help='Adds a description to your gist')
@click.option('-D', 'delete', metavar='URL/ID', help='Detele an existing gist')
@click.option('-f', 'fork', metavar='URL/ID', help='Fork an existing gist')
@click.option('-p', 'private', is_flag=True, help='Makes your gist private')
@click.option('-a', 'anonymous', is_flag=True, help='Create an anonymous gist')
@click.option('--login', 'login', is_flag=True, help='Authenticate gist on this computer')
@click.option('-h', is_flag=True, callback=print_help, expose_value=False, is_eager=True)
@click.argument('files', nargs=-1, required=False, type=click.File())
@click.pass_context
def cli(ctx, files, list_, all_, shorten, update, desc, delete, fork, private, anonymous, login):
"""A Python command-line wrapper with github3.py library to access GitHub gists"""
gist = Gist()
if list_:
gist.list_gists(_all=all_)
elif update and files:
gist.update_gist(update, desc, files)
elif delete:
gist.delete_gist(delete)
elif files:
gist.create_gist(desc, files, [True, False][private], anonymous=anonymous, short_url=shorten)
elif login:
token_request()
else:
click.echo(ctx.get_help())
if __name__ == '__main__':
cli()
| 2.84375
| 3
|
TFTutorialStanford1.py
|
AdityaPrasadMishra/TensorflowPractice
| 0
|
12775465
|
<filename>TFTutorialStanford1.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 9 18:49:49 2017
@author: aditya
"""
import tensorflow as tf
a = tf.constant(2)
b = tf.constant(3)
x = tf.add(a, b)
# This is to activate tensorboard.
with tf.Session() as sess:
writer = tf.summary.FileWriter('./graphs', sess.graph)
print(sess.run(x,feed_dict=None, options=None, run_metadata=None))
writer.close()
aa = tf.constant([2,2],name="a")
bb = tf.constant([3,6],name="b")
xx = tf.add(aa, bb,name="add")
# This is to activate tensorboard.
with tf.Session() as sess:
writer = tf.summary.FileWriter('./graphs', sess.graph)
# tf.Session.run(fetches, feed_dict=None, options=None, run_metadata=None)
print(sess.run(xx,feed_dict=None, options=None, run_metadata=None))
writer.close()
# tf.constant(value, dtype=None, shape=None, name='Const', verify_shape=False)
# constant of 1d tensor (vector)
a = tf.constant([2, 2], name="vector")
# constant of 2x2 tensor (matrix)
b = tf.constant([[0, 1], [2, 3]], name="b")
#tf.zeros(shape, dtype=tf.float32, name=None)
# create a tensor of shape and all elements are zeros
tf.zeros([2, 3], tf.int32)
tf.zeros_like(a)
tf.ones([2, 3], tf.int32)
#==> [[1, 1, 1], [1, 1, 1]]
#tf.fill(dims, value, name=None)
tf.fill([2, 3], 8)
#==> [[8, 8, 8], [8, 8, 8]]
tf.linspace(10.0, 13.0, 4, name="linspace")
#==> [10.0 11.0 12.0 13.0]
# 'start' is 3, 'limit' is 18, 'delta' is 3
tf.range(3, 18, 3)
#==> [3, 6, 9, 12, 15]
# 'start' is 3, 'limit' is 1, 'delta' is -0.5
tf.range(3, 1, 0.5)
#==> [3, 2.5, 2, 1.5]
# 'limit' is 5
tf.range(5)
#==> [0, 1, 2, 3, 4]
#tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)
#tf.truncated_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None,
#name=None)
#tf.random_uniform(shape, minval=0, maxval=None, dtype=tf.float32, seed=None,
#name=None)
#tf.random_shuffle(value, seed=None, name=None)
#tf.random_crop(value, size, seed=None, name=None)
#tf.multinomial(logits, num_samples, seed=None, name=None)
#tf.random_gamma(shape, alpha, beta=None, dtype=tf.float32, seed=None, name=None)
| 3.03125
| 3
|
apps/equipment/migrations/0002_checkout_user.py
|
Ralst0n/inspection-manager
| 1
|
12775466
|
<filename>apps/equipment/migrations/0002_checkout_user.py
# Generated by Django 2.0.2 on 2018-08-22 15:38
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('inspectors', '0001_initial'),
('equipment', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='checkout',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='inspectors.Inspector'),
),
]
| 1.296875
| 1
|
ammf/utils/demo_utils.py
|
bangquanxie/ASM3D
| 1
|
12775467
|
import copy
import numpy as np
import tensorflow as tf
from ammf.utils.wavedata.tools.obj_detection import obj_utils
from ammf.utils.wavedata.tools.obj_detection import evaluation
from ammf.core import anchor_projector
from ammf.core import box_3d_encoder
COLOUR_SCHEME_PREDICTIONS = {
"Easy GT": (255, 255, 0), # Yellow
"Medium GT": (255, 128, 0), # Orange
"Hard GT": (255, 0, 0), # Red
"Prediction": (50, 255, 50), # Green
}
def get_gts_based_on_difficulty(dataset, img_idx):
"""Returns lists of ground-truth based on difficulty.
"""
# Get all ground truth labels
all_gt_objs = obj_utils.read_labels(dataset.label_dir, img_idx)
# Filter to dataset classes
gt_objs = dataset.kitti_utils.filter_labels(all_gt_objs)
# Filter objects to desired difficulty
easy_gt_objs = dataset.kitti_utils.filter_labels(
copy.deepcopy(gt_objs), difficulty=0)
medium_gt_objs = dataset.kitti_utils.filter_labels(
copy.deepcopy(gt_objs), difficulty=1)
hard_gt_objs = dataset.kitti_utils.filter_labels(
copy.deepcopy(gt_objs), difficulty=2)
for gt_obj in easy_gt_objs:
gt_obj.type = 'Easy GT'
for gt_obj in medium_gt_objs:
gt_obj.type = 'Medium GT'
for gt_obj in hard_gt_objs:
gt_obj.type = 'Hard GT'
return easy_gt_objs, medium_gt_objs, hard_gt_objs, all_gt_objs
def get_max_ious_3d(all_gt_boxes_3d, pred_boxes_3d):
"""Helper function to calculate 3D IoU for the given predictions.
Args:
all_gt_boxes_3d: A list of the same ground-truth boxes in box_3d
format.
pred_boxes_3d: A list of predictions in box_3d format.
"""
# Only calculate ious if there are predictions
if pred_boxes_3d:
# Convert to iou format
gt_objs_iou_fmt = box_3d_encoder.box_3d_to_3d_iou_format(
all_gt_boxes_3d)
pred_objs_iou_fmt = box_3d_encoder.box_3d_to_3d_iou_format(
pred_boxes_3d)
max_ious_3d = np.zeros(len(all_gt_boxes_3d))
for gt_obj_idx in range(len(all_gt_boxes_3d)):
gt_obj_iou_fmt = gt_objs_iou_fmt[gt_obj_idx]
ious_3d = evaluation.three_d_iou(gt_obj_iou_fmt,
pred_objs_iou_fmt)
max_ious_3d[gt_obj_idx] = np.amax(ious_3d)
else:
# No detections, all ious = 0
max_ious_3d = np.zeros(len(all_gt_boxes_3d))
return max_ious_3d
def tf_project_to_image_space(anchors, calib_p2, image_shape, img_idx):
"""Helper function to convert data to tensors and project
to image space using the tf projection function.
"""
anchors_tensor = tf.convert_to_tensor(anchors, tf.float32)
calib_p2_tensor = tf.convert_to_tensor(calib_p2, tf.float32)
image_shape_tensor = tf.convert_to_tensor(image_shape, tf.float32)
projected_boxes_tensor, _ = \
anchor_projector.tf_project_to_image_space(
anchors_tensor,
calib_p2_tensor,
image_shape_tensor)
sess = tf.Session()
with sess.as_default():
projected_boxes = projected_boxes_tensor.eval()
return projected_boxes
| 2.171875
| 2
|
classroom_tools/student_repositories/create_protected_branch_from_master.py
|
TestOrgJustAymeric/ClassroomTools
| 0
|
12775468
|
import argparse
import pprint
from colorama import Fore
from classroom_tools import github_utils
parser = argparse.ArgumentParser(
'Create a protected branch to freeze assignment submissions using the latest commit on master')
parser.add_argument(
'--token',
required=True,
help='GitHub personal access token with repo permissions'
)
parser.add_argument(
'--org_name',
required=True,
help='GitHub organization name'
)
parser.add_argument(
'--repo_filter',
required=True,
help='Prefix to filter repositories for a given assignment or exercise'
)
parser.add_argument(
'--branch',
required=True,
help='Name of protected branch'
)
def create_or_update_ref(repo, branch_name):
master_branch = repo.get_branch('master')
try:
branch = repo.get_branch(branch_name)
if branch.protected:
branch.remove_protection()
ref = repo.get_git_ref(f'heads/{branch_name}')
ref.edit(sha=master_branch.commit.sha, force=True)
except:
repo.create_git_ref(f'refs/heads/{branch_name}', sha=master_branch.commit.sha)
def add_push_restrictions(repo, branch_name):
branch = repo.get_branch(branch_name)
branch.edit_protection(
user_push_restrictions=['']
)
def main(args):
print('\n\n' + 'Creating protected branches'.center(80, '='))
args = parser.parse_args(args)
print('Args:\n' + ''.join(f'\t{k}: {v}\n' for k, v in vars(args).items()))
github_utils.verify_token(args.token)
repositories = github_utils.get_students_repositories(
token=args.token,
org_name=args.org_name,
repo_filter=args.repo_filter
)
num_fail = 0
for repo in repositories:
try:
create_or_update_ref(repo=repo, branch_name=args.branch)
add_push_restrictions(repo=repo, branch_name=args.branch)
print(f'{Fore.GREEN}Repo: {repo.full_name}')
except Exception as e:
print(f'{Fore.RED}Repo: {repo.full_name}')
pprint.pprint(vars(repo))
print(f'{Fore.RED}{e}')
num_fail += 1
print('\nSummary:')
print(f'\tTotal number of repositories: {len(repositories)}')
print(f'\tTotal number failed: {num_fail}')
if num_fail > 0:
raise Exception(f'{Fore.RED}Couldn\'t create protected branches')
if __name__ == '__main__':
import sys
main(sys.argv[1:])
| 2.703125
| 3
|
note/meiduo34/mall/apps/areas/views.py
|
gaosong666/taobao
| 0
|
12775469
|
from django.shortcuts import render
# Create your views here.
# from django.shortcuts import render
# from rest_framework import mixins
# from rest_framework import status
# from rest_framework.decorators import action
# from rest_framework.permissions import IsAuthenticated
# from rest_framework.response import Response
from rest_framework.viewsets import ReadOnlyModelViewSet, GenericViewSet
from areas.serializers import AreaSerializer, SubAreaSerializer, AddressSerializer, AddressTitleSerializer
from .models import Area
# from .serializers import AreaSerializer, SubAreaSerializer, AddressSerializer, AddressTitleSerializer
from rest_framework_extensions.cache.mixins import CacheResponseMixin
# Create your views here.
class AreasViewSet(CacheResponseMixin, ReadOnlyModelViewSet):
"""
行政区划信息
"""
pagination_class = None # 区划信息不分页
def get_queryset(self):
"""
提供数据集
"""
if self.action == 'list':
return Area.objects.filter(parent=None)
else:
return Area.objects.all()
def get_serializer_class(self):
"""
提供序列化器
"""
if self.action == 'list':
return AreaSerializer
else:
return SubAreaSerializer
# class AddressViewSet(mixins.ListModelMixin,mixins.CreateModelMixin,mixins.UpdateModelMixin,GenericViewSet):
# """
# 用户地址新增与修改
# list GET: /users/addresses/
# create POST: /users/addresses/
# destroy DELETE: /users/addresses/
# action PUT: /users/addresses/pk/status/
# action PUT: /users/addresses/pk/title/
# """
#
# #制定序列化器
# serializer_class = AddressSerializer
# #添加用户权限
# permission_classes = [IsAuthenticated]
# #由于用户的地址有存在删除的状态,所以我们需要对数据进行筛选
# def get_queryset(self):
# return self.request.user.addresses.filter(is_deleted=False)
#
# def create(self, request, *args, **kwargs):
# """
# 保存用户地址数据
# """
# count = request.user.addresses.count()
# if count >= 20:
# return Response({'message':'保存地址数量已经达到上限'},status=status.HTTP_400_BAD_REQUEST)
#
# return super().create(request,*args,**kwargs)
#
# def list(self, request, *args, **kwargs):
# """
# 获取用户地址列表
# """
# # 获取所有地址
# queryset = self.get_queryset()
# # 创建序列化器
# serializer = self.get_serializer(queryset, many=True)
# user = self.request.user
# # 响应
# return Response({
# 'user_id': user.id,
# 'default_address_id': user.default_address_id,
# 'limit': 20,
# 'addresses': serializer.data,
# })
#
# def destroy(self, request, *args, **kwargs):
# """
# 处理删除
# """
# address = self.get_object()
#
# # 进行逻辑删除
# address.is_deleted = True
# address.save()
#
# return Response(status=status.HTTP_204_NO_CONTENT)
#
#
# @action(methods=['put'], detail=True)
# def title(self, request, pk=None, address_id=None):
# """
# 修改标题
# """
# address = self.get_object()
# serializer = AddressTitleSerializer(instance=address, data=request.data)
# serializer.is_valid(raise_exception=True)
# serializer.save()
# return Response(serializer.data)
#
# @action(methods=['put'], detail=True)
# def status(self, request, pk=None, address_id=None):
# """
# 设置默认地址
# """
# address = self.get_object()
# request.user.default_address = address
# request.user.save()
# return Response({'message': 'OK'}, status=status.HTTP_200_OK)
| 2.03125
| 2
|
maddpg/common/env_utils_test.py
|
iminders/maddpg
| 1
|
12775470
|
<reponame>iminders/maddpg
# -*- coding:utf-8 -*-
import sys
import pytest
from maddpg.arguments import parse_experiment_args
from maddpg.common.env_utils import get_shapes, make_env, uniform_action
from maddpg.common.tf_utils import set_global_seeds
class TestEnvUtils:
def setup(self):
args = parse_experiment_args()
self.env = make_env(args=args, id=0)
set_global_seeds(0)
def test_get_shapes(self):
act_shapes = get_shapes(self.env.action_space)
from gym import spaces
assert isinstance(self.env.action_space[0], spaces.Discrete)
assert [5, 5, 5] == act_shapes
obs_shapes = get_shapes(self.env.observation_space)
assert [4, 4, 4] == obs_shapes
def test_uniform_action(self):
actions = uniform_action(self.env.action_space)
actions = [a.tolist() for a in actions]
assert [0.0976270078546495, 0.43037873274483895, 0.20552675214328775,
0.08976636599379373, -0.15269040132219058] == actions[0]
assert [0.29178822613331223, -0.12482557747461498, 0.7835460015641595,
0.9273255210020586, -0.2331169623484446] == actions[1]
assert [0.5834500761653292, 0.05778983950580896, 0.13608912218786462,
0.8511932765853221, -0.8579278836042261] == actions[2]
if __name__ == "__main__":
sys.exit(pytest.main([__file__]))
| 2.25
| 2
|
elastalert_modules/tst_ist_tz_enhancement.py
|
nareshbalajia/elastalert
| 0
|
12775471
|
from datetime import datetime
from pytz import timezone
from elastalert.enhancements import BaseEnhancement
from elastalert.util import ts_to_dt, pretty_ts, elastalert_logger
"""
This Class will convert the incoming Timezone object of UTC offset to Taiwan/India Standard Timezone
"""
class ConvertTzInfo(BaseEnhancement):
# The enhancement is run against every match
# The match is passed to the process function where it can be modified in any way
# ElastAlert will do this for each enhancement linked to a rule
def process(self, match):
elastalert_logger.info("Received UTC Time %s" % (match['@timestamp']))
utc_ts = match['@timestamp']
if not isinstance(utc_ts, datetime):
utc_ts = ts_to_dt(utc_ts)
taipei_tz = timezone('Asia/Taipei')
india_tz = timezone('Asia/Kolkata')
ist_tz = utc_ts.astimezone(india_tz)
tst_tz = utc_ts.astimezone(taipei_tz)
ist_tz_str = pretty_ts(ist_tz, False)
tst_tz_str = pretty_ts(tst_tz, False)
tz_str = ist_tz_str + " Or " + tst_tz_str
match['@timestamp'] = tz_str
| 2.875
| 3
|
ovspy/client.py
|
kamaboko123/ovspy
| 2
|
12775472
|
import ipaddress
import socket
import json
from . import ovsdb_query
from .bridge import OvsBridge
from .port import OvsPort
from datetime import datetime, timedelta
from . import ovspy_error
import sys
import time
class OvsClient:
SEND_DEBUG = False
RECV_DEBUG = False
def __init__(self, ovsdb_port, ovsdb_ip="127.0.0.1", timeout=5, buffer_size=4096):
self._ovsdb_ip = ipaddress.ip_address(ovsdb_ip)
self._ovsdb_port = int(ovsdb_port)
self._query_timeout = timeout
self._buffer_size = buffer_size
def _send(self, query):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((str(self._ovsdb_ip), self._ovsdb_port))
if self.SEND_DEBUG:
sys.stderr.write("[SEND] %s\n" % json.dumps(query).encode())
s.send(json.dumps(query).encode())
#s.shutdown(socket.SHUT_RDWR)
buf = bytes()
timeout = datetime.now() + timedelta(seconds=self._query_timeout)
while True:
if datetime.now() >= timeout:
raise ovspy_error.TransactionError("Timeout")
buf += s.recv(self._buffer_size)
try:
query_result = json.loads(buf.decode())
#echo method
#https://tools.ietf.org/html/rfc7047
if "method" in query_result.keys() and query_result["method"] == "echo":
echo_reply= {
"method": "echo",
"params": query_result["params"],
"id": query_result["id"]
}
s.send(json.loads(echo_reply).encode())
buf = bytes()
continue
else:
break
except json.JSONDecodeError:
pass
s.close()
if self.RECV_DEBUG:
sys.stderr.write("[RECV] %s\n" % query_result)
self._check_error(query_result)
return query_result
@staticmethod
def _check_error(query_result_json):
if "result" in query_result_json.keys():
for item in query_result_json["result"]:
if "error" in item.keys():
raise ovspy_error.TransactionError("[QueryError] %s" % item["details"])
elif len(query_result_json["error"]) != 0:
raise ovspy_error.TransactionError("[QueryError] %s" % query_result_json["error"])
#get Open_vSwitch table
def get_ovs_raw(self):
query = ovsdb_query.Generator.get_ovs()
result = self._send(query)
return result
#get id of Open_vSwitch entry from Open_vSwitch table
def get_uuid(self):
return self.get_ovs_raw()["result"][0]["rows"][0]["_uuid"][1]
def get_bridge_raw(self, bridge_id=None):
query = ovsdb_query.Generator.get_bridges()
result = self._send(query)
if bridge_id is None:
return result["result"][0]["rows"]
else:
for br in result["result"][0]["rows"]:
if br['_uuid'][1] == bridge_id:
return br
return None
def get_bridges(self):
bridges = self.get_bridge_raw()
ret = []
for br in bridges:
_br = OvsBridge(br['_uuid'][1])
_br.set_client(self)
ret.append(_br)
return ret
def find_bridge(self, bridge_name):
for br in self.get_bridges():
if br.get_name() == bridge_name:
return br
return None
def find_port(self, port_name):
for p in self.get_port_raw():
if p["name"] == port_name:
return p
return None
def get_port_raw(self, port_id=None):
if port_id is None:
query = ovsdb_query.Generator.get_ports()
result = self._send(query)
return result["result"][0]["rows"]
else:
query = ovsdb_query.Generator.get_port(port_id)
result = self._send(query)
for p in result["result"][0]["rows"]:
if p['_uuid'][1] == port_id:
return p
return None
def add_port_to_bridge(self, bridge, port_name, vlan=None):
bridge_raw = bridge.get_raw()
if bridge_raw is None:
raise ovspy_error.NotFound("bridge is not found")
if self.find_port(port_name) is not None:
raise ovspy_error.Duplicate("port is already exist")
#print(bridge.get_raw())
exist_ports = []
for p in bridge.get_ports():
exist_ports.append(p.get_uuid())
query = ovsdb_query.Generator.add_port(bridge.get_uuid(), exist_ports, port_name, vlan=vlan)
self._send(query)
def del_port_from_bridge(self, bridge, port_name):
target_port = bridge.find_port(port_name)
exist_ports = []
for p in bridge.get_ports():
exist_ports.append(p.get_uuid())
exist_ports = list(set(exist_ports))
if target_port is None:
raise ovspy_error.NotFound("Specified port(%s) is not exist in bridge(%s)." % (port_name, bridge.get_name()))
if target_port.get_uuid() not in exist_ports:
raise ovspy_error.NotFound("Specified port(%s) is not exist in bridge(%s)." % (port_name, bridge.get_name()))
query = ovsdb_query.Generator.del_port(bridge.get_uuid(), exist_ports, target_port.get_uuid())
self._send(query)
def add_bridge(self, bridge_name):
exist_bridges = []
for br in self.get_bridges():
if bridge_name == br.get_name():
raise ovspy_error.Duplicate("Bridge(%s) is already exist." % bridge_name)
exist_bridges.append(br.get_uuid())
exist_bridges = list(set(exist_bridges))
query = ovsdb_query.Generator.add_bridge(self.get_uuid(), bridge_name, exist_bridges)
self._send(query)
def del_bridge(self, bridge_name):
target_bridge = self.find_bridge(bridge_name)
exist_bridges = []
for br in self.get_bridges():
exist_bridges.append(br.get_uuid())
if target_bridge is None:
raise ovspy_error.NotFound("Bridge(%s) is not exist." % bridge_name)
if target_bridge.get_uuid() not in exist_bridges:
raise ovspy_error.NotFound("Bridge(%s) is not exist." % bridge_name)
query = ovsdb_query.Generator.del_bridge(self.get_uuid(), exist_bridges, target_bridge.get_uuid())
self._send(query)
| 2.515625
| 3
|
gaphor/diagram/tests/test_group.py
|
MartinIIOT/gaphor
| 0
|
12775473
|
<filename>gaphor/diagram/tests/test_group.py
from gaphor.core.modeling import Diagram, Element
from gaphor.diagram.group import can_group, group, ungroup
def test_group_diagram(element_factory):
diagram = element_factory.create(Diagram)
parent = element_factory.create(Element)
assert group(parent, diagram)
assert diagram.element is parent
def test_ungroup_diagram(element_factory):
diagram = element_factory.create(Diagram)
parent = element_factory.create(Element)
diagram.element = parent
assert ungroup(parent, diagram)
assert diagram.element is None
def test_do_not_ungroup_diagram_from_wrong_parent(element_factory):
diagram = element_factory.create(Diagram)
parent = element_factory.create(Element)
wrong_parent = element_factory.create(Element)
diagram.element = parent
assert not ungroup(wrong_parent, diagram)
assert diagram.element is parent
def test_can_group_with_type(element_factory):
parent = element_factory.create(Element)
assert can_group(parent, Diagram)
def test_can_group_with_instance(element_factory):
diagram = element_factory.create(Diagram)
parent = element_factory.create(Element)
assert can_group(parent, diagram)
| 2.3125
| 2
|
apps/projects/views.py
|
Ralst0n/inspection-manager
| 1
|
12775474
|
<gh_stars>1-10
from django.contrib.auth.models import User, Group
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Max, Sum
from django.http import JsonResponse
from django.shortcuts import render
from django.views import generic
from datetime import timedelta
from rest_framework import generics, viewsets
from rest_framework.renderers import JSONRenderer, TemplateHTMLRenderer
from .models import Project
from .serializers import ProjectSerializer
from apps.invoices.models import Invoice
# Create your views here.
@login_required
def all_projects(request):
groups = []
for group in request.user.groups.all():
groups.append(str(group))
project_list = Project.objects.all().filter(office__in=groups)
return render(request, 'all_project_list.html',
{
'project_list': project_list,
})
class ProjectSearchView(LoginRequiredMixin, generic.ListView):
model = Project
template_name = 'all_project_list.html'
def get_queryset(self):
office = self.request.user.profile.office
return Project.objects.filter(office=office)
class ProjectsListView(LoginRequiredMixin, generic.ListView):
model = Project
def get_queryset(self):
"""
filter which projects show in list view
"""
office = self.request.user.profile.office
groups = []
for group in self.request.user.groups.all():
groups.append(str(group))
return Project.objects.filter(office=office).filter(completed__exact=False)
class ProjectsDetailView(LoginRequiredMixin, generic.DetailView):
model = Project
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# self.object gives you access to the object the detail page is for.
context['invoices'] = Invoice.objects.filter(project=self.object, status__gte=1).order_by('-end_date')
return context
def get_object(self):
_object = Project.objects.get(pk=self.kwargs['pk'])
if _object.office != self.request.user.profile.office:
return {}
else:
return _object
class ProjectViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows projects to be viewed or edited.
"""
queryset = Project.objects.all().order_by('prudent_number')
serializer_class = ProjectSerializer
class ProjectDetail(generics.RetrieveUpdateAPIView):
queryset = Project.objects.all()
render_classes = (JSONRenderer,)
def get_info(request):
'''Get the information to auto populate the estimate number and start date for an invoice'''
prudent_number = request.POST.get("prudent_number")
project = Project.objects.get(prudent_number=prudent_number)
# if there are no previous invoices, tell js there's no previous data.
if project.invoice_set.count() == 0:
start_date = project.start_date.strftime("%Y-%m-%d")
data = {
"estimate_number": 1,
"start_date": start_date
}
else:
latest_invoice = project.invoice_set.latest('end_date')
estimate_number = latest_invoice.estimate_number + 1
start_date = latest_invoice.end_date + timedelta(days=1)
start_date = start_date.strftime("%Y-%m-%d")
data = {
"previous": True,
"estimate_number": estimate_number,
"start_date": start_date
}
return JsonResponse(data)
| 2.046875
| 2
|
navbar.py
|
jchudb93/website
| 0
|
12775475
|
<filename>navbar.py<gh_stars>0
import dash_bootstrap_components as dbc
import dash_html_components as html
def Navbar():
links = dbc.Row(
[
dbc.DropdownMenu(
className = "nav-links-dd",
color="link",
label="Dataset",
children=[
dbc.DropdownMenuItem("Data", href="/dataset"),
dbc.DropdownMenuItem(divider=True),
dbc.DropdownMenuItem("Insights", href="/interactive-graph"),
],
),
dbc.DropdownMenu(
className = "nav-links-dd",
color="link",
label="Projections",
children=[
dbc.DropdownMenuItem("Case Predictions", href="/projections"),
dbc.DropdownMenuItem(divider=True),
dbc.DropdownMenuItem("Policy Evaluation", href="/policies"),
dbc.DropdownMenuItem(divider=True),
dbc.DropdownMenuItem("Ventilator Allocation", href="/ventilator_allocation"),
],
),
dbc.DropdownMenu(
className = "nav-links-dd",
color="link",
label="Risk Calculators",
children=[
dbc.DropdownMenuItem("Mortality Risk Calculator", href="/mortality_calculator"),
dbc.DropdownMenuItem(divider=True),
dbc.DropdownMenuItem("Infection Risk Calculator", href="/infection_calculator")
]
),
dbc.DropdownMenu(
className = "nav-links-dd",
color="link",
label="About Us",
children=[
dbc.DropdownMenuItem("The Team", href="/team"),
dbc.DropdownMenuItem(divider=True),
dbc.DropdownMenuItem("Collaborators", href="/collaborators"),
dbc.DropdownMenuItem(divider=True),
dbc.DropdownMenuItem("Contact Us", href="/contact"),
],
),
dbc.Button(
"In the Press",
className = "nav-links-dd",
color="link", href="/press",
),
],
id="navbar-links",
style={"position":"static"},
no_gutters=True,
className="ml-auto",
)
navbar = dbc.Navbar(
[
html.A(
dbc.Row(
[
dbc.Col(html.Img(src="assets/images/logo_black.png", height="60px")),
],
align="center",
),
href="/home",
),
dbc.NavbarToggler(id="navbar-toggler"),
dbc.Collapse(links, id="navbar-collapse", navbar=True),
],
id="navbar",
color="black",
dark=True,
)
return navbar
| 2.375
| 2
|
epochbot/constants.py
|
jaloo555/solana-easy-py
| 4
|
12775476
|
REFRESH_TIME = 15
LOAD_TIME = 10
| 1.015625
| 1
|
src/ydata_quality/utils/modelling.py
|
poga/ydata-quality
| 242
|
12775477
|
"""
Utilities based on building baseline machine learning models.
"""
from typing import Union, Optional
from pandas import DataFrame, Series
from numpy import mean, tile, empty, std, square, sqrt, log as nplog, reciprocal
from scipy.stats import boxcox, normaltest, mode
from sklearn.compose import ColumnTransformer
from sklearn.exceptions import ConvergenceWarning, DataConversionWarning
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.metrics import mean_squared_error, roc_auc_score
from sklearn.mixture import GaussianMixture
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import (FunctionTransformer, OneHotEncoder,
RobustScaler, StandardScaler,
label_binarize)
from sklearn.utils._testing import ignore_warnings
from .auxiliary import infer_dtypes
from .enum import PredictionTask
BASELINE_CLASSIFIER = Pipeline([
('imputer', SimpleImputer()),
('classifier', LogisticRegression())
])
BASELINE_REGRESSION = Pipeline([
('imputer', SimpleImputer()),
('classifier', LinearRegression())
])
NUMERIC_TRANSFORMER = Pipeline([
('imputer', SimpleImputer()),
('scaler', StandardScaler())])
CATEGORICAL_TRANSFORMER = Pipeline([
('imputer', SimpleImputer(strategy='most_frequent')),
('encoder', OneHotEncoder(handle_unknown='ignore'))])
ORDINAL_TRANSFORMER = None # Not implemented
def get_prediction_task(df: DataFrame, label: str):
"Heuristics to infer prediction task (classification/regression)."
return 'classification' if len(set(df[label])) == 2 else 'regression'
@ignore_warnings(category=ConvergenceWarning)
def baseline_predictions(df: DataFrame, label: str, task='classification'):
"Train a baseline model and predict for a test set"
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline model
model = BASELINE_CLASSIFIER if task == 'classification' else BASELINE_REGRESSION
# 2. Train overall model
x_orig, y_orig = df.drop(label, axis=1), label_binarize(df[label], classes=list(set(df[label])))
x_train, x_test, y_train, y_test = train_test_split(x_orig, y_orig, test_size=0.3, random_state=42)
model.fit(x_train.select_dtypes('number'), y_train)
# 3. Predict
if task == 'regression':
y_pred = model.predict(x_test.select_dtypes('number'))
elif task == 'classification':
y_pred = model.predict_proba(x_test.select_dtypes('number'))[:, 1]
# 4. Return both the predictions and x_test, y_test to analyze the performances
return y_pred, x_test, y_test
@ignore_warnings(category=DataConversionWarning)
def baseline_performance(df: DataFrame, label: str,
task: PredictionTask = PredictionTask.CLASSIFICATION,
adjusted_metric: bool = False):
"""Train a baseline model, predict for a test set and return the performance.
Args:
- df (DataFrame): original dataset
- label (str): name of target feature column
- task (PredictionTask): classification, regression
- adjusted_metric (bool): if True, return metric as percentage of max achievable performance
"""
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline performance metric
metric = roc_auc_score if task == 'classification' else mean_squared_error
# 2. Get the baseline predictions
y_pred, _, y_test = baseline_predictions(df=df, label=label, task=task)
# 3. Get the performance
if adjusted_metric:
perf = adjusted_performance(y_test, y_pred, task=task, metric=metric)
else:
perf = metric(y_test, y_pred)
return perf
def adjusted_performance(y_true, y_pred, task: PredictionTask, metric: callable):
"""Calculates the adjusted metric as ratio of real to maximum performance.
Returns the percentage to the best achievable performance starting from a baseline.
"""
task = PredictionTask(task)
y_default = mean(y_true) if task == PredictionTask.CLASSIFICATION else mode(y_true).mode[0] # define the value
y_base = tile(y_default, (len(y_true), 1)) # create an array with default value
best_perf = metric(y_true, y_true)
base_perf = metric(y_true, y_base)
real_perf = metric(y_true, y_pred)
return (real_perf - base_perf) / (best_perf - base_perf)
@ignore_warnings(category=DataConversionWarning)
def performance_per_feature_values(df: DataFrame, feature: str, label: str, task='classification'):
"""Performance achieved per each value of a groupby feature."""
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline performance metric
metric = roc_auc_score if task == 'classification' else mean_squared_error
# 2. Get the baseline predictions
y_pred, x_test, y_test = baseline_predictions(df=df, label=label, task=task)
# 3. Get the performances per feature value
uniques = set(x_test[feature])
results = {}
for value in uniques: # for each category
y_pred_cat = y_pred[x_test[feature] == value]
y_true_cat = y_test[x_test[feature] == value]
try:
results[value] = metric(y_true_cat, y_pred_cat)
except ValueError as exc:
results[value] = f'[ERROR] Failed performance metric with message: {exc}'
return results
def performance_per_missing_value(df: DataFrame, feature: str, label: str, task='classification'):
"""Performance difference between valued and missing values in feature."""
# 0. Infer the prediction task
task = get_prediction_task(df=df, label=label)
# 1. Define the baseline performance metric
metric = roc_auc_score if task == 'classification' else mean_squared_error
# 2. Get the baseline predictions
y_pred, x_test, y_test = baseline_predictions(df=df, label=label, task=task)
# 3. Get the performance per valued vs missing feature
missing_mask = x_test[feature].isna()
results = {}
results['missing'] = metric(y_test[missing_mask], y_pred[missing_mask])
results['valued'] = metric(y_test[~missing_mask], y_pred[~missing_mask])
return results
@ignore_warnings(category=ConvergenceWarning)
def predict_missingness(df: DataFrame, feature: str):
"Train a baseline model to predict the missingness of a feature value."
# 0. Preprocessing
df = df.copy() # avoid altering the original DataFrame
target = f'is_missing_{feature}'
# 1. Define the baseline model
model = BASELINE_CLASSIFIER
# 2. Create the new target
df[target] = df[feature].isna()
# 3. Train overall model
x_orig, y_orig = df.drop([feature, target], axis=1), df[target]
x_train, x_test, y_train, y_test = train_test_split(x_orig, y_orig, test_size=0.3, random_state=42)
model.fit(x_train.select_dtypes('number'), y_train)
# 4. Predict
y_pred = model.predict_proba(x_test.select_dtypes('number'))[:, 1]
# 5. Return the area under the roc curve
return roc_auc_score(y_test, y_pred)
def standard_transform(df, dtypes, skip: Optional[list] = None, robust=False):
"""Applies standard transformation to the dataset (imputation, centering and scaling), returns transformed data
and the fitted transformer.
Numerical data is imputed with mean, centered and scaled by 4 standard deviations.
Categorical data is imputed with mode. Encoding is not performed in this stage to preserve the same columns.
If robust is passed as True, will truncate numerical data before computing statistics.
[1]From 1997 <NAME>; Martinez, <NAME>. -
Improved Heterogeneous Distance Functions https://arxiv.org/pdf/cs/9701101.pdf
"""
skip = [] if skip is None else skip
numerical_features = [key for key, value in dtypes.items() if value == 'numerical' and key not in skip]
categorical_features = [key for key, value in dtypes.items() if value == 'categorical' and key not in skip]
assert len(numerical_features + categorical_features +
skip) == len(df.columns), 'the union of dtypes keys with skip should be the same as the df columns'
if robust:
numeric_transformer = Pipeline([
('imputer', SimpleImputer()),
('scaler', RobustScaler(quantile_range=(5.0, 95.0)))])
else:
numeric_transformer = NUMERIC_TRANSFORMER
preprocessor = ColumnTransformer(
transformers=[ # Numerical vars are scaled by 4sd so that most of the data are fit in the [-1, 1] range
('num', Pipeline(numeric_transformer.steps + \
[('divby4', FunctionTransformer(lambda x: x / 4))]), numerical_features),
('cat', Pipeline([('impute', SimpleImputer(strategy='most_frequent'))]), categorical_features)],
remainder='passthrough')
new_column_order = numerical_features + categorical_features + skip
tdf = DataFrame(preprocessor.fit_transform(df), index=df.index, columns=new_column_order)
return tdf, preprocessor
def performance_one_vs_rest(df: DataFrame, label_feat: str, _class: str, dtypes=None):
"""Train a classifier to predict a class in binary fashion against all other classes.
A normalized dataframe should be passed for best results"""
# 0. Preprocessing
df = df.copy() # avoid altering the original DataFrame
# 1. Define the baseline model
if not dtypes:
dtypes = infer_dtypes(df)
categorical_features = [key for key, value in dtypes.items() if value == 'categorical' and key != label_feat]
preprocessor = ColumnTransformer(
transformers=[('cat', CATEGORICAL_TRANSFORMER, categorical_features)]) # OHE categorical variables
model = Pipeline([('preprocessing', preprocessor), ('classifier', LogisticRegression())])
# 2. Train overall model
x_orig, y_orig = df.drop(label_feat, axis=1), label_binarize(df[label_feat], classes=[_class]).squeeze()
x_train, x_test, y_train, y_test = train_test_split(x_orig, y_orig, test_size=0.3, random_state=24)
model.fit(x_train, y_train)
# 3. Predict
y_pred = model.predict_proba(x_test)[:, 1]
# 4. Return the area under the roc curve
return roc_auc_score(y_test, y_pred)
def center_of_mass_statistic(column: Series, col_dtype: str) -> Union[float, int, str]:
"Returns a center of mass statistic of a column based on its dtype."
return column.mean() if col_dtype == 'numerical' else column.mode()[0] # only first mode
def estimate_centroid(df: DataFrame, dtypes: dict = None):
"""Makes a centroid estimation for a given dataframe.
Will use provided dtypes or infer in order to use best statistic columnwise"""
if dtypes:
if not all((col in dtypes for col in df.columns)):
dtypes = dtypes.update(infer_dtypes(df, skip=dtypes.columns))
else:
dtypes = infer_dtypes(df)
centroid = Series(df.iloc[0])
for col in centroid.index:
centroid[col] = center_of_mass_statistic(df[col], dtypes[col])
return centroid
def heom(x_df: DataFrame, y_df, dtypes):
"""Implements the Heterogeneous Euclidean-Overlap Metric between a sample x and a reference y.
The data is assumed to already be preprocessed (normalized and imputed).
[1]From 1997 <NAME>; <NAME>. -
Improved Heterogeneous Distance Functions https://arxiv.org/pdf/cs/9701101.pdf
"""
distances = DataFrame(empty(x_df.shape), index=x_df.index, columns=x_df.columns)
distance_funcs = {'categorical': lambda x, y: 0 if x == y else 1,
'numerical': lambda x, y: abs(x - y)} # Here we are assuming the data to be previously scaled
for col_idx, column in enumerate(distances.columns):
distances[column] = x_df[column].apply(distance_funcs[dtypes[column]], args=[y_df[col_idx]])
return distances
def estimate_sd(sample: DataFrame, reference=None, dtypes=None):
"""Estimates the standard deviation of a sample of records.
A reference can be passed in order to avoid new computation of mean or to use distances to another reference point.
The reference is expected as a (1, N) array where N is the number of columns in the sample.
Returns:
std_dev: the standard deviation of the distance vectors of the sample to the reference point
std_distances: the distances of the sample points to the reference point scaled by std_dev
"""
if dtypes: # Ensure dtypes are compatible with sample
if not all((col in dtypes for col in sample.columns)):
dtypes = dtypes.update(infer_dtypes(sample, skip=dtypes.columns))
else:
dtypes = infer_dtypes(sample)
if reference is None:
reference = estimate_centroid(sample, dtypes)
else:
assert len(reference) == len(
sample.columns), "The provided reference point does not have the same dimension as the sample records"
distances = heom(x_df=sample, y_df=reference, dtypes=dtypes)
euclidean_distances = (distances.apply(square).sum(axis=1) / len(sample.columns)).apply(sqrt)
std_dev = std(euclidean_distances)
std_distances = euclidean_distances / std_dev
return std_dev, std_distances
def gmm_clustering(data, n_gaussians):
"""Produces a GMM model with n_gaussians to cluster provided data."""
gmm_ = GaussianMixture(n_components=n_gaussians).fit(data)
return gmm_.predict(data), gmm_.aic(data)
def normality_test(data, suite='full', p_th=5e-3):
"""Performs a normality test on the data. Null hypothesis, data comes from normal distribution.
A transformations taken from a suite is applied to the data before each run of the normal test.
The first transformation in the suite that passes the normalcy test is returned
Returns:
result: True if any transformation led to a positive normal test, False otherwise
test: The first test in the suite to lead to positive normal test"""
transforms = {None: lambda x: x,
'inverse': reciprocal,
'square root': sqrt,
'log': nplog,
'Box Cox': boxcox}
if suite == 'full':
suite = transforms.keys()
else:
suite = list(suite) if isinstance(suite, str) else suite
for transform in suite:
try:
transformed_data = transforms[transform](data)
_, p_stat = normaltest(transformed_data, nan_policy='raise')
except (AttributeError, TypeError, ZeroDivisionError, ValueError):
continue
if p_stat > p_th:
return True, transform, p_stat
return False, None, None
| 2.703125
| 3
|
project_generator/exporters/gccarm.py
|
sg-/project_generator
| 0
|
12775478
|
<gh_stars>0
# Copyright 2014-2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os.path import basename, relpath, join, normpath
from .exporter import Exporter
from ..targets import Targets
class MakefileGccArmExporter(Exporter):
optimization_options = ['O0', 'O1', 'O2', 'O3', 'Os']
def list_files(self, data, attribute, rel_path):
""" Creates a list of all files based on the attribute. """
file_list = []
for groups in data[attribute]:
try:
for k, v in groups.items():
for file in v:
file_list.append(join(rel_path, normpath(file)))
except:
continue
data[attribute] = file_list
def libraries(self, key, value, data):
""" Add defined GCC libraries. """
for option in value:
if key == "libraries":
data['source_files_lib'].append(option)
def compiler_options(self, key, value, data):
""" Compiler flags """
for option in value:
if key == "compiler_options":
data['compiler_options'].append(option)
def linker_options(self, key, value, data):
""" Linker flags """
for option in value:
if key == "linker_options":
data['linker_options'].append(option)
def optimization(self, key, value, data):
""" Optimization setting. """
for option in value:
if option in self.optimization_options:
data['optimization_level'] = option
def cc_standard(self, key, value, data):
""" C++ Standard """
if key == "cc_standard":
data['cc_standard'] = value
def c_standard(self, key, value, data):
""" C Standard """
if key == "c_standard":
data['c_standard'] = value
def parse_specific_options(self, data):
""" Parse all uvision specific setttings. """
data['compiler_options'] = []
for dic in data['misc']:
for k, v in dic.items():
self.libraries(k, v, data)
self.compiler_options(k, v, data)
self.optimization(k, v, data)
self.cc_standard(k, v, data)
self.c_standard(k, v, data)
data['linker_options'] = []
for dic in data['misc']:
for k, v in dic.items():
self.linker_options(k, v, data)
def fix_paths(self, data, name, env_settings):
# get relative path and fix all paths within a project
fixed_paths = []
for path in data['include_paths']:
fixed_paths.append(join(data['output_dir']['rel_path'], normpath(path)))
data['include_paths'] = fixed_paths
fixed_paths = []
for path in data['source_files_lib']:
fixed_paths.append(join(data['output_dir']['rel_path'], normpath(path)))
data['source_files_lib'] = fixed_paths
fixed_paths = []
for path in data['source_files_obj']:
fixed_paths.append(join(data['output_dir']['rel_path'], normpath(path)))
data['source_files_obj'] = fixed_paths
fixed_paths = []
for path in data['source_paths']:
fixed_paths.append(join(data['output_dir']['rel_path'], normpath(path)))
data['source_paths'] = fixed_paths
if data['linker_file']:
data['linker_file'] = join(data['output_dir']['rel_path'], normpath(data['linker_file']))
def generate(self, data, env_settings):
""" Processes misc options specific for GCC ARM, and run generator. """
self.process_data_for_makefile(data, env_settings, "make_gcc_arm")
project_path, makefile = self.gen_file('makefile_gcc.tmpl', data, 'Makefile', data['output_dir']['path'])
return project_path, [makefile]
def process_data_for_makefile(self, data, env_settings, name):
self.fix_paths(data, name, env_settings)
self.list_files(data, 'source_files_c', data['output_dir']['rel_path'])
self.list_files(data, 'source_files_cpp', data['output_dir']['rel_path'])
self.list_files(data, 'source_files_s', data['output_dir']['rel_path'])
self.parse_specific_options(data)
data['toolchain'] = 'arm-none-eabi-'
data['toolchain_bin_path'] = env_settings.get_env_settings('gcc')
target = Targets(env_settings.get_env_settings('definitions'))
if target.get_mcu_core(data['target']):
data['core'] = target.get_mcu_core(data['target'])[0]
else:
raise RuntimeError(
"Target: %s not found, Please add them to https://github.com/0xc0170/project_generator_definitions" % data['target'].lower())
# gcc arm is funny about cortex-m4f.
if data['core'] == 'cortex-m4f':
data['core'] = 'cortex-m4'
# change cortex-m0+ to cortex-m0plus
if data['core'] == 'cortex-m0+':
data['core'] = 'cortex-m0plus'
# set default values
if 'optimization_level' not in data:
data['optimization_level'] = self.optimization_options[0]
| 2.015625
| 2
|
openmdao.lib/src/openmdao/lib/optproblems/api.py
|
swryan/OpenMDAO-Framework
| 0
|
12775479
|
from sellar import SellarProblem, SellarProblemWithDeriv
from branin import BraninProblem
from scalable import UnitScalableProblem
| 1.195313
| 1
|
iutest/core/gotocode.py
|
mgland/iutest
| 10
|
12775480
|
# Copyright 2019-2020 by <NAME>, MGLAND animation studio. All rights reserved.
# This file is part of IUTest, and is released under the "MIT License Agreement".
# Please see the LICENSE file that should have been included as part of this package.
import logging
import os
from iutest.core import appsettings
from iutest.core import constants
from iutest.qt import QtCore, Signal
logger = logging.getLogger(__name__)
class CodeLineVisitor(QtCore.QObject):
_editorSetting = None
errorIssued = Signal(str)
@classmethod
def initEditorSetting(cls):
cls._editorSetting = appsettings.get().simpleConfigStrValue(
constants.CONFIG_KEY_CODE_EDITOR, constants.CONFIG_KEY_CODE_EDITOR_DEFAULT
)
@classmethod
def config(cls):
if not cls._editorSetting:
cls.initEditorSetting()
return cls._editorSetting
def __init__(self, parent=None):
QtCore.QObject.__init__(self, parent=parent)
self._lastCmd = None
self._process = QtCore.QProcess(self)
self._process.error.connect(self._onGoToCodeError)
self._process.readyReadStandardError.connect(self._onReadyReadStandardError)
@staticmethod
def _goToCmd(template, filePath, lineNumber):
cmd = template.replace(constants.CODE_FILE_VAR, filePath)
return cmd.replace(constants.CODE_LINE_VAR, str(lineNumber))
def goTo(self, filePath, lineNumber=0):
if not os.path.isfile(filePath):
logger.warning("%s is not a valid file.", filePath)
self._lastCmd = self._goToCmd(self.config(), filePath, lineNumber)
logger.debug(self._lastCmd)
self._process.start(self._lastCmd)
def _onGoToCodeError(self, err):
msg = "<font color=red><b>Error: </b></font>"
if err == self._process.FailedToStart:
msg = (
msg
+ "Failed to launch the program as it was either missing or insufficient permissions.<br><br>"
)
msg = (
msg
+ "You might need to change the goToCode setting in Preference Dialog, e.g.<br>Specify full path to the program, etc."
)
elif err == self._process.Crashed:
msg = msg + "The program to browse the code has crashed."
elif err == self._process.Timedout:
msg = msg + "The last goToCodeProcess.waitFor...() function timed out."
elif err == self._process.WriteError:
msg = (
msg
+ "An error occurred when attempting to write to the goToCode process."
)
elif err == self._process.ReadError:
msg = (
msg
+ "An error occurred when attempting to read to the goToCode process."
)
else:
msg = msg + "An unknown error occurred when attempting to go to the code."
msg = msg + "<hr><font color=red><b>Failed Command:</b></font><br>{}".format(
self._lastCmd
)
self.errorIssued.emit(msg)
def _onReadyReadStandardError(self):
logger.error(self.readAllStandardError())
| 1.601563
| 2
|
visualization/TextArt/generate_text.py
|
Leterax/Visualization
| 28
|
12775481
|
from pathlib import Path
import numpy as np
from PIL import ImageFont
from scipy.ndimage import convolve
from scipy.spatial import cKDTree
resource_dir = (Path(__file__) / "../resources").absolute()
class Particle:
def __init__(self, x, y, color, ball_size=1):
self.pos = np.array([x, y]).astype(float)
self.vel = np.zeros(2)
self.acc = np.zeros(2)
self.target = self.pos
self.radius = ball_size
self.max_speed = 10
self.max_force = 0.6
self.color = np.array(color, dtype=np.uint8)
def update(self):
self.pos += self.vel
self.vel += self.acc
self.acc *= 0
def arrive(self):
# calculate the distance
dist = np.linalg.norm(self.target - self.pos)
# normalize it
desired = (self.target - self.pos) / dist
# if we are less than 100px away from our target, start to slow down
if dist < 100:
speed = dist / 100 * self.max_speed
else:
# otherwise go at full speed
speed = self.max_speed
# set the magnitude of our desired vector
desired *= speed
steer = desired - self.vel
steer_mag = np.linalg.norm(steer)
if steer_mag > self.max_force:
steer = steer / steer_mag * self.max_force
return steer
def render_text_perimeter_balls(
txt, pos=(0, 0), scale=16, color=(235, 64, 52), ball_size=4.5
):
# place particles on the text outline without overlapping them.
font = ImageFont.truetype(
(resource_dir / "VCR_OSD_MONO_1.001.ttf").as_posix(), scale
)
a = font.getmask(txt)
out = np.empty(a.size)
for y in range(a.size[0]):
for x in range(a.size[1]):
out[y, x] = a.getpixel((y, x))
out = out / 255
out = np.where(out > 0, 1, 0)
out = np.rot90(out)
kernel = np.array([[1, 1, 1], [1, 0, 1], [1, 1, 1]])
out = convolve(out, kernel, mode="constant")
outline = np.where(out == 5, 1, 0)
indices = np.transpose(outline.nonzero()) + np.array(pos)
particles = []
for xy in indices:
particles.append(Particle(xy[1], xy[0], color, ball_size))
quadTree = cKDTree([p.pos for p in particles])
# loop over particles. remove all touching particles
to_remove = set()
for particle in particles:
if particle in to_remove:
continue
colliding_particles = [
particles[i]
for i in quadTree.query_ball_point(particle.pos, particle.radius * 2)
]
for p in colliding_particles:
if p != particle:
to_remove.add(p)
for particle in to_remove:
particles.remove(particle)
out = np.array([p.pos for p in particles])
# out = out/np.linalg.norm(out)
return out
if __name__ == "__main__":
# generate the particles with their target position
render_text_perimeter_balls("Hey!", scale=300, pos=(75, 250), color=(226, 53, 31))
render_text_perimeter_balls(
"#show-your-projects",
scale=70,
pos=(10, 150),
color=(231, 201, 49),
ball_size=2,
)
| 2.625
| 3
|
aggregables/sequences/suffix_trees/suffix_trees/test/test_simple.py
|
nevesnunes/aggregables
| 106
|
12775482
|
<filename>aggregables/sequences/suffix_trees/suffix_trees/test/test_simple.py
from suffix_trees import STree
def test_lcs():
a = ["abeceda", "abecednik", "abeabecedabeabeced",
"abecedaaaa", "aaabbbeeecceeeddaaaaabeceda"]
st = STree.STree(a)
assert st.lcs() == "abeced", "LCS test"
def test_missing():
text = "name language w en url http w namelanguage en url http"
stree = STree.STree(text)
assert stree.find("law") == -1
assert stree.find("ptth") == -1
assert stree.find("name language w en url http w namelanguage en url httpp") == -1
def test_find():
st = STree.STree("abcdefghab")
assert st.find("abc") == 0
assert st.find_all("ab") == {0, 8}
| 2.953125
| 3
|
polkadot_prometheus_exporter/_blockchain.py
|
w3f-community/polkadot-prometheus-exporter
| 8
|
12775483
|
<reponame>w3f-community/polkadot-prometheus-exporter
# Copyright (C) 2019 MixBytes, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND (express or implied).
from polkadot_prometheus_exporter._rpc import get_block_num
class BlockCache:
"""
Simple code which caches blocks.
"""
def __init__(self, rpc, size=256):
self._rpc = rpc
self.size = size
# hash
self._cache = dict()
def get(self, block_hash):
block_hash = block_hash.lower()
if block_hash not in self._cache:
block = self._rpc.request('chain_getBlock', [block_hash])['result']
if block is None:
# not caching negative results
return None
# the simplest cleanup algorithm with amortized constant time complexity
if len(self._cache) >= self.size * 2:
ordered_by_block_num = sorted(self._cache.items(), key=lambda i: get_block_num(i[1]), reverse=True)
self._cache = dict(ordered_by_block_num[:self.size])
self._cache[block_hash] = block
return self._cache[block_hash]
| 2.5625
| 3
|
mysite/blog/models.py
|
janusnic/dj-blog
| 0
|
12775484
|
<filename>mysite/blog/models.py<gh_stars>0
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
class Category( models.Model ):
name = models.CharField( max_length= 50, unique= True )
slug = models.SlugField( max_length= 50, unique= True )
def __str__(self):
return self.name
class Post( models.Model ):
author = models.ForeignKey( User )
title = models.CharField( max_length= 100 )
slug = models.SlugField( max_length= 100, unique= True )
content = models.TextField()
date_added = models.DateTimeField( default= timezone.now )
categories = models.ManyToManyField( Category )
def __str__(self):
return self.title
class Meta:
ordering = ( '-date_added', )
| 2.359375
| 2
|
Traidoo/settings/third_party/jwt.py
|
stanwood/traidoo-api
| 3
|
12775485
|
<filename>Traidoo/settings/third_party/jwt.py
import datetime
SIMPLE_JWT = {
"ACCESS_TOKEN_LIFETIME": datetime.timedelta(minutes=15),
"REFRESH_TOKEN_LIFETIME": datetime.timedelta(days=1),
"ROTATE_REFRESH_TOKENS": False,
}
| 1.5625
| 2
|
chrome/common/extensions/docs/server2/object_store_creator_test.py
|
codenote/chromium-test
| 0
|
12775486
|
<gh_stars>0
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from test_object_store import TestObjectStore
from object_store_creator import ObjectStoreCreator
class _FooClass(object):
def __init__(self): pass
class ObjectStoreCreatorTest(unittest.TestCase):
def setUp(self):
self.creator = ObjectStoreCreator(_FooClass, store_type=TestObjectStore)
def testVanilla(self):
store = self.creator.Create()
self.assertEqual('_FooClass', store.namespace)
def testWithVersion(self):
store = self.creator.Create(version=42)
self.assertEqual('_FooClass/42', store.namespace)
def testWithCategory(self):
store = self.creator.Create(category='cat')
self.assertEqual('_FooClass/cat', store.namespace)
def testWithVersionAndCategory(self):
store = self.creator.Create(version=43, category='mat')
self.assertEqual('_FooClass/mat/43', store.namespace)
def testIllegalIinput(self):
self.assertRaises(AssertionError, self.creator.Create, category='5')
self.assertRaises(AssertionError, self.creator.Create, category='forty2')
self.assertRaises(AssertionError, self.creator.Create, version='twenty')
self.assertRaises(AssertionError, self.creator.Create, version='7a')
def testFactoryWithBranch(self):
store = ObjectStoreCreator.Factory().Create(
_FooClass, store_type=TestObjectStore).Create()
self.assertEqual('_FooClass', store.namespace)
store = ObjectStoreCreator.Factory(branch='dev').Create(
_FooClass, store_type=TestObjectStore).Create()
self.assertEqual('_FooClass@dev', store.namespace)
if __name__ == '__main__':
unittest.main()
| 2.375
| 2
|
examples/location.py
|
timgates42/glass.py
| 0
|
12775487
|
# Import glass library
import glass
# Import app configs
import configs
app = glass.Application(
name="Hello",
client_id=configs.CLIENT_ID,
client_secret=configs.CLIENT_SECRET
)
@app.subscriptions.login
def login(user):
profile = user.profile()
print "user : %s" % profile.get("given_name")
user.timeline.post(text="Hello %s!" % profile.get("given_name"))
@app.subscriptions.location
def change_location(user):
# Get last known location
location = user.location()
# Post card with location infos
user.timeline.post(text="You move to (Lat: %s, Long: %s) (Accuracy: %s meters)" % (
location.get('latitude'),
location.get('longitude'),
location.get('accuracy')
))
if __name__ == '__main__':
app.run(port=8080)
| 2.71875
| 3
|
pywhale/app/server.py
|
stefan2200/pywhale
| 1
|
12775488
|
import os.path
from flask import Flask, render_template, jsonify, request
from pywhale.whale import PyWhale
curr_file = os.path.abspath(os.path.dirname(__file__))
app_path = os.path.join(curr_file)
static_path = os.path.join(curr_file, 'static')
template_path = os.path.join(curr_file, 'templates')
app = Flask("PyWhale", root_path=app_path, template_folder=template_path)
@app.route('/', methods=['GET'])
def main(): # pragma: no cover
return render_template("app.html")
@app.route('/api/process', methods=['POST'])
def process(): # pragma: no cover
whale = PyWhale.process(request.form.get("body"))
return jsonify(whale)
def start(host="127.0.0.1", port=3333):
app.run(host=host, port=port)
| 2.34375
| 2
|
examples/Inverse design of Splitter/splitter_opt_2D_TE_topology_minfeaturesize.py
|
tshwang0928/Lumopt
| 0
|
12775489
|
<reponame>tshwang0928/Lumopt<filename>examples/Inverse design of Splitter/splitter_opt_2D_TE_topology_minfeaturesize.py
######## IMPORTS ########
# General purpose imports
import numpy as np
import os
import sys
import scipy as sp
# Optimization specific imports
from lumopt import CONFIG
from lumopt.geometries.topology import TopologyOptimization2D
from lumopt.utilities.load_lumerical_scripts import load_from_lsf
from lumopt.figures_of_merit.modematch import ModeMatch
from lumopt.optimization import Optimization
from lumopt.optimizers.generic_optimizers import ScipyOptimizers
from lumopt.utilities.wavelengths import Wavelengths
cur_path = os.path.dirname(os.path.abspath(__file__))
######## RUNS TOPOLOGY OPTIMIZATION OF A 2D STRUCTURE ########
def runSim(params, eps_min, eps_max, x_pos, y_pos, filter_R, working_dir):
######## DEFINE A 2D TOPOLOGY OPTIMIZATION REGION ########
geometry = TopologyOptimization2D(params=params, eps_min=eps_min, eps_max=eps_max, x=x_pos, y=y_pos, z=0, filter_R=filter_R, min_feature_size=filter_R)
######## DEFINE FIGURE OF MERIT ########
# The base simulation script defines a field monitor named 'fom' at the point where we want to modematch to the fundamental TE mode
fom = ModeMatch(monitor_name = 'fom', mode_number = 'Fundamental TE mode', direction = 'Forward', norm_p = 2, target_fom = 0.5)
######## DEFINE OPTIMIZATION ALGORITHM ########
optimizer = ScipyOptimizers(max_iter=60, method='L-BFGS-B', scaling_factor=1, pgtol=1e-6, ftol=1e-5, scale_initial_gradient_to=0.25)
######## LOAD TEMPLATE SCRIPT AND SUBSTITUTE PARAMETERS ########
script = load_from_lsf(os.path.join(cur_path, 'splitter_base_2D_TE_topology.lsf'))
## Here, we substitute the size of the optimization region to properly scale the simulation domain
size_x = max(x_pos) - min(x_pos)
script = script.replace('opt_size_x=3.5e-6','opt_size_x={:1.6g}'.format(size_x))
size_y = max(y_pos) - min(y_pos)
script = script.replace('opt_size_y=3.5e-6','opt_size_y={:1.6g}'.format(2*size_y)) #< Factor 2 is because of symmetry in y-direction
######## SETTING UP THE OPTIMIZER ########
wavelengths = Wavelengths(start = 1450e-9, stop = 1650e-9, points = 11)
opt = Optimization(base_script=script, wavelengths = wavelengths, fom=fom, geometry=geometry, optimizer=optimizer, use_deps=False, hide_fdtd_cad=True, plot_history=False, store_all_simulations=False)
opt.continuation_max_iter = 40 #< How many iterations per binarization step (default is 20)
######## RUN THE OPTIMIZER ########
opt.run(working_dir = working_dir)
if __name__ == '__main__':
size_x = 3000 #< Length of the device (in nm). Longer devices typically lead to better performance
delta_x = 20 #< Size of a pixel along x-axis (in nm)
size_y = 1800 #< Since we use symmetry, this is only have the extent along the y-axis (in nm)
delta_y = 20 #< Size of a pixel along y-axis (in nm)
filter_R = 150 #< Radius of the smoothing filter which removes small features and sharp corners (in nm)
eps_max = 2.8**2 #< Effective permittivity for a Silicon waveguide with a thickness of 220nm
eps_min = 1.44**2 #< Permittivity of the SiO2 cladding
x_points=int(size_x/delta_x)+1
y_points=int(size_y/delta_y)+1
x_pos = np.linspace(-size_x/2,size_x/2,x_points)*1e-9
y_pos = np.linspace(0,size_y,y_points)*1e-9
## Set initial conditions
initial_cond = 0.5*np.ones((x_points,y_points)) #< Start with the domain filled with (eps_max+eps_min)/2
## Alternative initial conditions
#initial_cond = None #< Use the structure 'initial_guess' as defined in the project file
#initial_cond = np.ones((x_points,y_points)) #< Start with the domain filled with eps_max
#initial_cond = np.zeros((x_points,y_points)) #< Start with the domain filled with eps_min
working_dir = os.path.join(cur_path, 'splitter_2D_TE_topo_x{:04d}_y{:04d}_f{:04d}'.format(size_x,size_y,int(filter_R)))
runSim(initial_cond, eps_min, eps_max, x_pos, y_pos, filter_R*1e-9, working_dir)
| 2.796875
| 3
|
programs/OLD/cluster3.py
|
CORDEA/analysis_of_1000genomes-data
| 0
|
12775490
|
#!/bin/env/python
infileList = []
keyList = []
cList = (
"GBR",
"FIN",
"CHS",
"PUR",
"CLM",
"IBS",
"CEU",
"YRI",
"CHB",
"JPT",
"LWK",
"ASW",
"MXL",
"TSI",
)
for i in range(17,23):
infileList.append("proc_input.chr" + str(i) + ".vcf")
keyList.append("cluster_chr" + str(i))
infileList.append("proc_input.chrX.vcf")
keyList.append("cluster_chrX")
for l in range(len(infileList)):
files = open(infileList[l], "r")
outFile = open("output/" + keyList[l], "w")
logFile = open("output/cluster.log", "w")
line = files.readline()
count = 0
cDict = {}
while line:
if not count == 0:
tmp = line.split(",")
for k in range(len(tmp)):
if not k == 0:
value = tmp[k].split(":")
if count == 1:
cDict[cList[k - 1]] = value
logFile.write("cList: " + str(cList[k-1]))
for j in range(len(cDict[cList[k - 1]])):
cDict[cList[k - 1]][j] = int(cDict[cList[k - 1]][j])
else:
for j in range(len(value)):
if not j == 3:
cDict[cList[k - 1]][j] += int(value[j])
count += 1
line = files.readline()
print(str(count) + " end")
logFile.write("cDict: " + str(cDict))
for k, list in cDict.items():
sum = 0
for v in list:
sum += int(v)
logFile.write("sum: " + str(sum))
for v in range(len(list)):
if not v == len(list) - 1:
outFile.write(str(round((float(list[v]) / float(sum)) * 100, 2)) + ",")
else:
outFile.write(str(round((float(list[v]) / float(sum)) * 100, 2)) + "\n")
print(infileList[l] + " success")
files.close()
outFile.close()
logFile.close()
| 2.078125
| 2
|
Tests/image_tests/renderpasses/graphs/ForwardRendering.py
|
wsqjny/Falcor
| 1,615
|
12775491
|
from falcor import *
def render_graph_ForwardRendering():
loadRenderPassLibrary("DepthPass.dll")
loadRenderPassLibrary("ForwardLightingPass.dll")
loadRenderPassLibrary("BlitPass.dll")
testForwardRendering = RenderGraph("ForwardRenderer")
DepthPass = createPass("DepthPass", {'depthFormat': ResourceFormat.D32Float})
testForwardRendering.addPass(DepthPass, "DepthPass")
SkyBox = createPass("SkyBox")
testForwardRendering.addPass(SkyBox, "SkyBox")
ForwardLightingPass = createPass("ForwardLightingPass", {'sampleCount': 1, 'enableSuperSampling': False})
testForwardRendering.addPass(ForwardLightingPass, "ForwardLightingPass")
BlitPass = createPass("BlitPass", {'filter': SamplerFilter.Linear})
testForwardRendering.addPass(BlitPass, "BlitPass")
testForwardRendering.addEdge("ForwardLightingPass.color", "BlitPass.src")
testForwardRendering.addEdge("DepthPass.depth", "ForwardLightingPass.depth")
testForwardRendering.addEdge("DepthPass.depth", "SkyBox.depth")
testForwardRendering.addEdge("SkyBox.target", "ForwardLightingPass.color")
testForwardRendering.markOutput("BlitPass.dst")
testForwardRendering.markOutput("ForwardLightingPass.motionVecs")
return testForwardRendering
ForwardRendering = render_graph_ForwardRendering()
try: m.addGraph(ForwardRendering)
except NameError: None
| 2.140625
| 2
|
natch/decorators/eq.py
|
ertgl/natch
| 2
|
12775492
|
from natch.core import Decoration
from natch.rules import Eq
eq = Decoration.make_rule_decorator(Eq)
| 1.25
| 1
|
my_app.py
|
RaMantik2/test
| 0
|
12775493
|
from PyQt5.QtWidgets import QApplication, QWidget,QLabel,QPushButton,QVBoxLayout,QHBoxLayout
from PyQt5.QtCore import Qt
| 1.414063
| 1
|
train/loader.py
|
dhesin/team_sf_lidar
| 0
|
12775494
|
<filename>train/loader.py
import sys
sys.path.append('../')
import os
import numpy as np
import glob
import argparse
import csv
import sys
import pickle
import pandas as pd
import globals
from common.csv_utils import foreach_dirset
from random import randrange
from collections import defaultdict
from encoder import generate_label, get_label_bounds, generate_camera_label
import cv2
from cv_bridge import CvBridge
def usage():
print('Loads training data with ground truths and generate training batches')
print('Usage: python loader.py --input_csv_file [csv file of data folders]')
def data_number_of_batches_per_epoch(data, BATCH_SIZE):
size = len(data)
return int(size / BATCH_SIZE) + (1 if size % BATCH_SIZE != 0 else 0)
#
# rotate images/labels randomly
#
def data_random_rotate(image, label, obj_center, rz, obj_size, img_width, img_height, use_regression):
label_dim = globals.NUM_CLASSES
if use_regression:
label_dim = globals.NUM_CLASSES + globals.NUM_REGRESSION_OUTPUTS
else:
rz = 0. #remove after regressin is added for camera model
# get bounding box of object in 2D
(upper_left_x, upper_left_y), (lower_right_x, lower_right_y) = \
get_label_bounds(obj_center[0], obj_center[1], obj_center[2], rz, obj_size[0], obj_size[1], obj_size[2])
# do not rotate if object rolls partially to right/left of the image
# get another random number
rotate_by = randrange(0, img_width)
while upper_left_x+rotate_by <= img_width <= lower_right_x+rotate_by:
rotate_by = randrange(0, img_width)
#print "rotate_by: " + str(rotate_by)
label_reshaped = np.reshape(label, (img_height, img_width, label_dim))
rotated_label = np.roll(label_reshaped, rotate_by, axis=1)
rotated_flatten_label = np.reshape(rotated_label, (img_height*img_width, label_dim))
rotated_img = np.roll(image, rotate_by, axis=1)
# copy back rotated parts to original images/label
np.copyto(image, rotated_img)
np.copyto(label, rotated_flatten_label)
#
# rotate data in a given batch
#
def batch_random_rotate(indicies, images, labels, tx, ty, tz, rz, obsl, obsw, obsh, img_width, img_height, use_regression):
img_ind = 0
for ind in indicies:
obj_center = [tx[ind], ty[ind], tz[ind]]
obj_size = [obsl[ind], obsw[ind], obsh[ind]]
data_random_rotate(images[img_ind], labels[img_ind], obj_center, rz[ind], obj_size, img_width, img_height, use_regression)
img_ind += 1
def generate_index_list(indicies_list, randomize, num_batches, batch_size):
if randomize:
np.random.shuffle(indicies_list)
indicies = indicies_list
if len(indicies_list) < num_batches * batch_size:
# add records from entire set to fill remaining space in batch
indicies_list_rem = np.arange(len(indicies_list))
if randomize:
np.random.shuffle(indicies_list_rem)
rem = num_batches * batch_size - len(indicies_list)
indicies = np.concatenate((indicies_list, indicies_list_rem[0:rem]))
return indicies
#
# read in images/ground truths batch by batch
#
def data_generator_train(obs_centroids_and_rotation, obs_size, pickle_dir_and_prefix,
BATCH_SIZE, IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS, NUM_CLASSES,
data_source, camera_model=None, cache=None, randomize=True, augment=True, use_regression=True):
tx = obs_centroids_and_rotation[0]
ty = obs_centroids_and_rotation[1]
tz = obs_centroids_and_rotation[2]
rx = obs_centroids_and_rotation[3]
ry = obs_centroids_and_rotation[4]
rz = obs_centroids_and_rotation[5]
obsl = obs_size[0]
obsw = obs_size[1]
obsh = obs_size[2]
if data_source == "lidar":
images = np.ndarray(shape=(BATCH_SIZE, IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS), dtype=float)
obj_labels = np.ndarray(shape=(BATCH_SIZE, IMG_HEIGHT*IMG_WIDTH, NUM_CLASSES+globals.NUM_REGRESSION_OUTPUTS), dtype=np.uint8)
elif data_source == "camera":
images = np.ndarray(shape=(BATCH_SIZE, IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS), dtype=float)
obj_labels = np.ndarray(shape=(BATCH_SIZE, IMG_HEIGHT*IMG_WIDTH, NUM_CLASSES), dtype=np.uint8)
else:
print "invalid data source type"
exit(1)
num_batches = data_number_of_batches_per_epoch(pickle_dir_and_prefix, BATCH_SIZE)
indicies_list = np.arange(len(tx))
is_cache_avail = False
if cache is not None:
is_cache_avail = cache['data'] is not None and cache['labels'] is not None
if not is_cache_avail:
cache['data'] = np.ndarray(shape=(len(pickle_dir_and_prefix), IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS), dtype=float)
if data_source == "lidar":
cache['labels'] = np.ndarray(shape=(len(tx), IMG_HEIGHT*IMG_WIDTH, NUM_CLASSES+globals.NUM_REGRESSION_OUTPUTS), dtype=np.uint8)
elif data_source == "camera":
cache['labels'] = np.ndarray(shape=(len(tx), IMG_HEIGHT*IMG_WIDTH, NUM_CLASSES), dtype=np.uint8)
while 1:
indicies = generate_index_list(indicies_list, randomize, num_batches, BATCH_SIZE)
for batch in range(num_batches):
batch_indicies = indicies[batch * BATCH_SIZE:batch * BATCH_SIZE + BATCH_SIZE]
if not is_cache_avail:
load_data(batch_indicies, images, pickle_dir_and_prefix, data_source, NUM_CHANNELS)
load_label_data(batch_indicies, images, obj_labels, tx, ty, tz, rx, ry, rz, obsl, obsw, obsh,
(IMG_HEIGHT, IMG_WIDTH, NUM_CLASSES),
data_source, camera_model)
if cache is not None:
# save to cache
i = 0
for ind in batch_indicies:
np.copyto(cache['data'][ind], images[i])
np.copyto(cache['labels'][ind], obj_labels[i])
i += 1
else:
# copy from cache
i = 0
for ind in batch_indicies:
np.copyto(images[i], cache['data'][ind])
np.copyto(obj_labels[i], cache['labels'][ind])
i += 1
if augment:
batch_random_rotate(batch_indicies, images, obj_labels, tx, ty, tz, rz, obsl, obsw, obsh, IMG_WIDTH, IMG_HEIGHT, use_regression)
yield (images, obj_labels)
def data_generator_predict(pickle_dir_and_prefix, BATCH_SIZE, IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS, NUM_CLASSES):
images = np.ndarray(shape=(BATCH_SIZE, IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS), dtype=float)
num_batches = data_number_of_batches_per_epoch(pickle_dir_and_prefix, BATCH_SIZE)
indicies_list = np.arange(len(pickle_dir_and_prefix))
while 1:
indicies = generate_index_list(indicies_list, True, num_batches, BATCH_SIZE)
for batch in range(num_batches):
batch_indicies = indicies[batch * BATCH_SIZE:batch * BATCH_SIZE + BATCH_SIZE]
load_lidar_data(batch_indicies, images, pickle_dir_and_prefix)
yield images
def load_lidar_image_data(path):
f = open(path, 'rb')
pickle_data = pickle.load(f)
img_arr = np.asarray(pickle_data, dtype='float32')
f.close()
return img_arr
def load_lidar_data(indicies, images, pickle_dir_and_prefix):
batch_index = 0
for ind in indicies:
fname = pickle_dir_and_prefix[ind] + "_distance_float.lidar.p"
img_arr = load_lidar_image_data(fname)
np.copyto(images[batch_index, :, :, 0], img_arr)
fname = pickle_dir_and_prefix[ind] + "_height_float.lidar.p"
img_arr = load_lidar_image_data(fname)
np.copyto(images[batch_index, :, :, 1], img_arr)
fname = pickle_dir_and_prefix[ind] + "_intensity_float.lidar.p"
img_arr = load_lidar_image_data(fname)
np.copyto(images[batch_index, :, :, 2], img_arr)
batch_index += 1
def load_camera_data(indicies, images, pickle_dir_and_prefix, num_channels):
batch_index = 0
#read_mode = cv2.IMREAD_UNCHANGED
if num_channels == 3:
read_mode = cv2.IMREAD_COLOR
elif num_channels == 1:
read_mode = cv2.IMREAD_GRAYSCALE
for ind in indicies:
fname = pickle_dir_and_prefix[ind] + "_image.png"
img = cv2.imread(fname, read_mode)
#cv2.imwrite(str(ind)+"_grayimg.png", img)
img_arr = np.expand_dims(np.asarray(img, dtype='float64'), 2)
#img_arr = img_arr/255.0 - 0.5
np.copyto(images[batch_index], img_arr)
batch_index += 1
def load_data(indicies, images, pickle_dir_and_prefix, data_source, num_channels):
if data_source == "lidar":
load_lidar_data(indicies, images, pickle_dir_and_prefix)
elif data_source == "camera":
load_camera_data(indicies, images, pickle_dir_and_prefix, num_channels)
else:
print "invalid data source"
exit(1)
def load_lidar_label_data(indicies, images, obj_labels, tx, ty, tz, rx, ry, rz, obsl, obsw, obsh, shape):
batch_index = 0
for ind in indicies:
label = generate_label(tx[ind], ty[ind], tz[ind], rx[ind], ry[ind], rz[ind],
obsl[ind], obsw[ind], obsh[ind], shape, image=images[batch_index,:,:,:2])
# label = np.ones(shape=(IMG_HEIGHT, IMG_WIDTH),dtype=np.dtype('u2'))
np.copyto(obj_labels[batch_index], np.uint8(label))
batch_index += 1
def load_camera_label_data(indicies, obj_labels, tx, ty, tz, obsl, obsw, obsh,
shape, camera_model):
batch_index = 0
for ind in indicies:
label, _, _, _, _, _ = generate_camera_label(tx[ind], ty[ind], tz[ind], obsl[ind], obsw[ind], obsh[ind], shape, camera_model)
np.copyto(obj_labels[batch_index], np.uint8(label))
batch_index += 1
def load_label_data(indicies, images, obj_labels, tx, ty, tz, rx, ry, rz, obsl, obsw, obsh, shape,
data_source, camera_model=None):
if data_source == "lidar":
load_lidar_label_data(indicies, images, obj_labels, tx, ty, tz, rx, ry, rz, obsl, obsw, obsh, shape)
elif data_source == "camera":
load_camera_label_data(indicies, obj_labels, tx, ty, tz, obsl, obsw, obsh,
shape, camera_model)
else:
print "invalid data source"
exit(1)
def filter_camera_data_and_gt(camera_model, data, camera_bounds):
centroid = data[0]
size = data[2]
tx = centroid[0]
ty = centroid[1]
tz = centroid[2]
obsl = size[0]
obsw = size[1]
obsh = size[2]
index = 0
total_removed = 0
for i in range(len(data[1])):
centroid_2d = camera_model.project_lidar_points_to_camera_2d([
[tx[index], ty[index], tz[index], 1.0]
])[0]
if not(camera_bounds[0][0] < centroid_2d[0] < camera_bounds[0][1] and
camera_bounds[1][0] < centroid_2d[1] < camera_bounds[1][1]):
del tx[index]
del ty[index]
del tz[index]
del obsl[index]
del obsw[index]
del obsh[index]
del data[1][index]
total_removed += 1
else:
index += 1
assert len(tx) == len(ty) and len(tz) == len(tx)
assert len(obsl) == len(tx) and len(obsw) == len(tx) and len(obsh) == len(tx)
assert len(data[1]) == len(tx)
print "camera data {} out of {} removed".format(total_removed, len(data[1]))
def get_data(csv_sources, parent_dir, data_source):
txl = []
tyl = []
tzl = []
rxl = []
ryl = []
rzl = []
obsl = []
obsw = []
obsh = []
pickle_dir_and_prefix = []
def process(dirset):
# load timestamps
if data_source == 'lidar':
timestamp_truth_fname = dirset.dir+"/lidar_timestamps.csv"
elif data_source == 'camera':
timestamp_truth_fname = dirset.dir+"/camera_timestamps.csv"
else:
print "invalid data source type"
assert(0)
with open(timestamp_truth_fname) as csvfile:
readCSV = csv.DictReader(csvfile, delimiter=',')
for row in readCSV:
ts = row['timestamp']
pickle_dir_and_prefix.append(file_prefix_for_timestamp(dirset.dir, data_source, ts))
txl.append(1.0)
tyl.append(1.0)
tzl.append(1.0)
rxl.append(1.0)
ryl.append(1.0)
rzl.append(1.0)
obsl.append(1.0)
obsw.append(1.0)
obsh.append(1.0)
foreach_dirset(csv_sources, parent_dir, process)
obs_centroid_and_rotation = [txl, tyl, tzl, rxl, ryl, rzl]
obs_size = [obsl, obsw, obsh]
return obs_centroid_and_rotation, pickle_dir_and_prefix, obs_size
#
# read input csv file to get the list of directories
#
def get_data_and_ground_truth(csv_sources, parent_dir, data_source):
txl = []
tyl = []
tzl = []
rxl = []
ryl = []
rzl = []
obsl = []
obsw = []
obsh = []
pickle_dir_and_prefix = []
def process(dirset):
if data_source == 'lidar':
timestamp_truth_fname = dirset.dir+"/obs_poses_interp_transform.csv"
elif data_source == 'camera':
timestamp_truth_fname = dirset.dir+"/obs_poses_camera.csv"
else:
print "invalid data source type"
assert(0)
with open(timestamp_truth_fname) as csvfile_2:
readCSV_2 = csv.DictReader(csvfile_2, delimiter=',')
for row2 in readCSV_2:
ts = row2['timestamp']
tx = row2['tx']
ty = row2['ty']
tz = row2['tz']
rx = row2['rx']
ry = row2['ry']
rz = row2['rz']
pickle_dir_prefix = file_prefix_for_timestamp(dirset.dir, data_source, ts)
pickle_dir_and_prefix.append(pickle_dir_prefix)
txl.append(float(tx))
tyl.append(float(ty))
tzl.append(float(tz))
rxl.append(float(rx))
ryl.append(float(ry))
rzl.append(float(rz))
obsl.append(float(dirset.mdr['l']))
obsw.append(float(dirset.mdr['w']))
obsh.append(float(dirset.mdr['h']))
foreach_dirset(csv_sources, parent_dir, process)
obs_centroid_and_rotation = [txl, tyl, tzl, rxl, ryl, rzl]
obs_size = [obsl, obsw, obsh]
return obs_centroid_and_rotation, pickle_dir_and_prefix, obs_size
def file_prefix_for_timestamp(parent_dir, data_source, ts=None):
if data_source == "lidar":
return parent_dir + "/lidar_360/" + (ts if ts is not None else '')
elif data_source == "camera":
return parent_dir + "/camera/" + (ts if ts is not None else '')
# ***** main loop *****
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Load training data and ground truths")
parser.add_argument("input_csv_file", type=str, default="data_folders.csv", help="data folder .csv")
parser.add_argument("--dir_prefix", type=str, default="", help="absolute path to folders")
args = parser.parse_args()
input_csv_file = args.input_csv_file
dir_prefix = args.dir_prefix
try:
f = open(input_csv_file)
f.close()
except:
print('Unable to read file: %s' % input_csv_file)
f.close()
sys.exit()
# determine list of data sources and ground truths to load
obs_centroids, pickle_dir_and_prefix, obs_size = get_data_and_ground_truth(input_csv_file, dir_prefix)
# generate data in batches
generator = data_generator_train(obs_centroids, obs_size, pickle_dir_and_prefix,
globals.BATCH_SIZE, globals.IMG_HEIGHT, globals.IMG_WIDTH, globals.NUM_CHANNELS,
globals.NUM_CLASSES, randomize=True, use_regression=False)
images, obj_labels = next(generator)
#print car pixels
print("car pixels: ", len(np.where(obj_labels[:, :, 1] == 1)[1]))
print("non-car pixels: ", len(np.where(obj_labels[:, :, 1] == 0)[1]))
| 2.75
| 3
|
liquid/filters/shopify.py
|
victorbnl/liquidpy
| 0
|
12775495
|
"""Provides shopify filters"""
from .manager import FilterManager
# pylint: disable=invalid-name
shopify_filter_manager = FilterManager()
# TODO: color filters
# https://shopify.dev/api/liquid/filters/color-filters
# TODO: font filters
# https://shopify.dev/api/liquid/filters/font-filters
# TODO: html filters
# https://shopify.dev/api/liquid/filters/html-filters
# TODO: media filters
# https://shopify.dev/api/liquid/filters/media-filters
# TODO: metafield filters
# https://shopify.dev/api/liquid/filters/metafield-filters
# TODO: money filters
# https://shopify.dev/api/liquid/filters/money-filters
# TODO: string filters
# https://shopify.dev/api/liquid/filters/string-filters
# TODO: url filters
# https://shopify.dev/api/liquid/filters/url-filters
# TODO: additional filters
# https://shopify.dev/api/liquid/filters/additional-filters
| 1.554688
| 2
|
app/__init__.py
|
MRichardN/Questioner-api
| 0
|
12775496
|
<reponame>MRichardN/Questioner-api
"""
Main file.
"""
import datetime
#Third party import
from flask import Flask
from flask_jwt_extended import JWTManager
#Local import
from instance.config import app_config
from app.api.v1.views.user_views import version1 as usersBlueprint
from app.api.v1.views.meetup_views import version1 as meetupsBlueprint
from app.api.v1.views.question_views import version1 as questionsBlueprint
def create_app(config_name):
""" Create app."""
app = Flask(__name__, instance_relative_config=True)
app.config.from_object(app_config[config_name])
app.config.from_pyfile('config.py')
# register Version1 blueprints
app.register_blueprint(usersBlueprint)
app.register_blueprint(meetupsBlueprint)
app.register_blueprint(questionsBlueprint)
# JWT
app.config['JWT_BLACKLIST_ENABLED'] = True
app.config['JWT_BLACKLIST_TOKEN_CHECKS'] = ['access', 'refresh']
app.config['JWT_SECRET_KEY'] = '<KEY>'
app.config['JWT_EXPIRATION_DELTA'] = datetime.timedelta(days=1)
jwt = JWTManager(app)
@jwt.token_in_blacklist_loader
def check_blacklisted(decrypted_token):
from app.api.v1.models.token_model import RevokedTokenModel
jti = decrypted_token['jti']
return RevokedTokenModel().inBlacklist(jti)
return app
| 2.09375
| 2
|
examples/games/pong/pong.py
|
zormit/miniworldmaker
| 9
|
12775497
|
from miniworldmaker import *
class PongBoard(PixelBoard):
def on_setup(self):
self.add_background((100, 0, 0, 255))
self.player1 = Paddle((10, 130), width=10, height=80, thickness=0)
self.player2 = Paddle((780, 280), width=10, height=80, thickness=0)
self.ball = Ball((395, 295))
self.physics_property.damping = 1
self.left = Line((0, 0), (0, 600), 5)
self.top = Line((0, 0), (800, 0), 5)
self.right = Line((795, 600), (795, 0), thickness=10)
self.bottom = Line((800, 595), (0, 595), 5)
self.points_left = NumberToken((100, 100), 0, 100)
self.points_left.size = (200, 200)
self.points_right = NumberToken((600, 100), 0, 100)
self.points_right.size = (200, 200)
def on_key_pressed_w(self):
self.player1.move_in_direction("up")
def on_key_pressed_s(self):
self.player1.move_in_direction("down")
def on_key_pressed_u(self):
self.player2.move_in_direction("up")
def on_key_pressed_j(self):
self.player2.move_in_direction("down")
class Line(Line):
def setup_physics(self):
self.physics.mass = 1
self.physics.elasticity = 1
class Paddle(Rectangle):
def setup(self):
self.size = (10, 80)
self.costume.is_rotatable = False
def setup_physics(self):
self.physics.stable = True
self.physics.can_move = True
self.physics.mass = "inf"
self.physics.friction = 0
self.physics.gravity = False
self.physics.elasticity = 1
self.physics.shape_type = "rect"
class Ball(Circle):
def on_setup(self):
self.direction = 30
self.physics.impulse_in_direction(300)
def setup_physics(self):
self.physics.mass = 1
self.physics.elasticity = 1
self.physics.friction = 0
self.physics.shape_type = "circle"
self.physics.gravity = False
self.physics.stable = False
def on_touching_line(self, line, collision):
if line == self.board.left:
self.board.points_right.inc()
if line == self.board.right:
self.board.points_left.inc()
board = PongBoard(800, 600)
board.show(fullscreen=False)
| 3.265625
| 3
|
network/attention.py
|
Coldog2333/DGMN-pytorch
| 0
|
12775498
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from config_file import *
class Identity_TransformerBlock(nn.Module):
def __init__(self):
super(Identity_TransformerBlock, self).__init__()
def forward(self, Q, K, V, episilon=1e-8):
# assert (Q == K and Q == V and K == V)
return Q
class TransformerBlock(nn.Module):
def __init__(self, input_size, is_layer_norm=False):
super(TransformerBlock, self).__init__()
self.is_layer_norm = is_layer_norm
if is_layer_norm:
self.layer_norm1 = nn.LayerNorm(normalized_shape=input_size)
self.layer_norm2 = nn.LayerNorm(normalized_shape=input_size)
self.relu = nn.ReLU()
self.linear1 = nn.Linear(input_size, input_size)
self.linear2 = nn.Linear(input_size, input_size)
self.init_weights()
def init_weights(self):
init.xavier_normal_(self.linear1.weight)
init.xavier_normal_(self.linear2.weight)
init.constant_(self.linear1.bias, 0.0)
init.constant_(self.linear2.bias, 0.0)
init.constant_(self.layer_norm1.weight, 1.)
init.constant_(self.layer_norm1.bias, 0.)
init.constant_(self.layer_norm2.weight, 1.)
init.constant_(self.layer_norm2.bias, 0.)
def FFN(self, X):
return self.linear2(self.relu(self.linear1(X)))
def forward(self, Q, K, V, attention_mask=None, episilon=1e-8):
"""
:param Q: (batch_size, max_r_words, embedding_dim)
:param K: (batch_size, max_u_words, embedding_dim)
:param V: (batch_size, max_u_words, embedding_dim)
:return: output: (batch_size, max_r_words, embedding_dim) same size as Q
"""
attention_mask = torch.zeros(size=(Q.size(0), Q.size(1), K.size(1))) if attention_mask is None else attention_mask
attention_mask = attention_mask.to(self.args.device)
dk = torch.Tensor([max(1.0, Q.size(-1))]).to(self.args.device)
Q_K = Q.bmm(K.permute(0, 2, 1)) / (torch.sqrt(dk) + episilon)
Q_K = Q_K + attention_mask # mask some scores
# (batch_size, max_r_words, max_u_words)
Q_K_score = F.softmax(Q_K, dim=-1)
V_att = Q_K_score.bmm(V)
if self.is_layer_norm:
# (batch_size, max_r_words, embedding_dim)
X = self.layer_norm1(Q + V_att)
output = self.layer_norm2(self.FFN(X) + X)
else:
X = Q + V_att
output = self.FFN(X) + X
return output
class AttentionBlock(nn.Module):
"refer: DGMN codes provided by Zhao"
def __init__(self, args):
self.args = args
super(AttentionBlock, self).__init__()
self.layernorm = nn.LayerNorm(normalized_shape=(args.emb_size))
self.layernorm_ffn = nn.LayerNorm(normalized_shape=(args.emb_size))
self.ffn = nn.Sequential(
nn.Linear(args.emb_size, args.emb_size, bias=True),
nn.ReLU(),
nn.Linear(args.emb_size, args.emb_size, bias=True)
)
self.init_weight()
def init_weight(self):
init.constant_(self.layernorm.weight, 1.)
init.constant_(self.layernorm.bias, 0.)
init.constant_(self.layernorm_ffn.weight, 1.)
init.constant_(self.layernorm_ffn.bias, 0.)
init.xavier_uniform_(self.ffn[0].weight)
init.xavier_uniform_(self.ffn[2].weight)
def attention_dot(self, queries, keys, query_masks, key_masks, episilon=1e-8):
"""
:param queries:
:param keys:
:param query_masks: (B, L_q)
:param key_masks: (B, L_k) e.g. [[1,1,1,0],[1,1,1,1]]
:param episilon:
:return:
"""
sim = torch.einsum('bik,bjk->bij', queries, keys) # [B, L_q, L_k]
scale = torch.Tensor([max(1.0, queries.size(-1))]).to(self.args.device)
sim = sim / (torch.sqrt(scale) + episilon)
# Key Masking
masks = key_masks.unsqueeze(1).repeat(1, queries.shape[1], 1) # (B, L_q, L_k)
paddings = (torch.ones_like(sim) * (-2 ** 32 + 1)).to(self.args.device)
sim = torch.where(masks == 0, paddings, sim) # (B, L_q, L_k)
# Activation
sim = torch.softmax(sim, dim=-1)
# Query Masking
sim = sim * query_masks.unsqueeze(-1)
outputs = torch.einsum('bij,bjk->bik', sim, keys)
return outputs
def feedforward(self, inputs):
outputs = self.ffn(inputs)
outputs = outputs + inputs
outputs = self.layernorm_ffn(outputs)
return outputs
def forward(self, queries, keys, query_masks, key_masks, residual=True, epsilon=1e-8):
outputs = self.attention_dot(queries, keys, query_masks, key_masks, epsilon)
if residual:
outputs = self.layernorm(outputs + queries)
else:
outputs = self.layernorm(outputs)
outputs = self.feedforward(outputs)
return outputs
class NNSubmulti(nn.Module):
def __init__(self, args):
self.args = args
super(NNSubmulti, self).__init__()
self.linear_ff_sim = nn.Sequential(
nn.Linear(in_features=args.emb_size * 2, out_features=100, bias=True),
nn.Tanh(),
nn.Linear(in_features=100, out_features=1, bias=False)
)
self.linear_last = nn.Linear(in_features=args.emb_size * 2, out_features=args.emb_size, bias=True)
self.init_weight()
def init_weight(self):
init.xavier_uniform_(self.linear_ff_sim[0].weight)
init.xavier_uniform_(self.linear_ff_sim[2].weight)
init.xavier_uniform_(self.linear_last.weight)
def ff_sim(self, queries, keys):
T_q = queries.shape[1]
T_k = keys.shape[1]
expand_queries = queries.unsqueeze(2).repeat(1, 1, T_k, 1)
expand_keys = keys.unsqueeze(1).repeat(1, T_q, 1, 1)
# TODO: add a vector >> ref: source codes of Xueliang Zhao
features = torch.cat([expand_queries, expand_keys], dim=-1)
outputs = self.linear_ff_sim(features)
outputs = outputs.view(-1, T_q, T_k)
return outputs
def attention_fc(self, queries, keys, query_masks, key_masks):
sim = self.ff_sim(queries, keys) # [B, L_q, L_k]
# Key Masking
masks = key_masks.unsqueeze(1).repeat(1, queries.shape[1], 1) # (B, L_q, L_k)
paddings = torch.ones_like(sim) * (-2 ** 32 + 1)
sim = torch.where(masks == 0, paddings, sim) # (B, L_q, L_k)
# Activation
sim = torch.softmax(sim, dim=-1) # (B, L_q, L_k)
# Query Masking
sim = sim * query_masks.unsqueeze(-1)
# Weighted sum
outputs = torch.einsum('bij,bjk->bik', sim, keys) # (B, T_q, C)
return outputs
def forward(self, queries, keys, query_masks, key_masks):
keys_attn = self.attention_fc(keys, queries, key_masks, query_masks) # TODO: check有没有反了
feature_mul = keys_attn * keys
feature_sub = (keys_attn - keys) * (keys_attn - keys)
feature_last = torch.cat([feature_mul, feature_sub], dim=-1)
feature_last = torch.relu(self.linear_last(feature_last))
return feature_last
class HierarchicalNNSubmulti(nn.Module):
def __init__(self, args):
self.args = args
super(HierarchicalNNSubmulti, self).__init__()
self.linear_last = nn.Linear(in_features=args.emb_size * 2, out_features=args.emb_size, bias=True)
self.init_weight()
def init_weight(self):
init.xavier_uniform_(self.linear_last.weight)
def hierarchical_attention(self, queries, keys, query_masks, key_masks):
L_q = queries.shape[1]
N = keys.shape[1]
sim1 = torch.einsum('bik,bnjk->binj', queries, keys) # [B, L_q, N, L_k]
# scale = torch.Tensor([max(1.0, queries.size(-1))]).to(self.args.device)
# scale = torch.sqrt(scale)
# sim1 = sim1 / scale
masks = key_masks.unsqueeze(1).repeat(1, L_q, 1, 1) # [B, L_q, N, L_k]
paddings = torch.ones_like(sim1) * (-2 ** 32 + 1)
sim1 = torch.where(masks == 0, paddings, sim1) # [B, L_q, N, L_k]
sim1 = torch.softmax(sim1, dim=-1)
masks = query_masks.unsqueeze(2).repeat(1, 1, N)
sim1 = sim1 * masks.unsqueeze(-1) # [B, L_q, N, L_k]
outputs1 = torch.einsum('binj,bnjk->bink', sim1, keys)
sim2 = torch.einsum('bik,bink->bin', queries, outputs1) # [B, L_k, N]
# # Scale
# scale = torch.Tensor([max(1.0, queries.size(-1))]).to(self.args.device)
# scale = torch.sqrt(scale)
# sim2 = sim2 / scale
masks = torch.sign(torch.sum(key_masks, dim=-1)) # [B, N]
masks = masks.unsqueeze(1).repeat(1, L_q, 1) # [B, L_q, N]
paddings = torch.ones_like(sim2) * (-2 ** 32 + 1)
sim2 = torch.where(masks == 0, paddings, sim2)
sim2 = torch.softmax(sim2, dim=-1)
sim2 = sim2 * query_masks.unsqueeze(-1)
outputs2 = torch.einsum('bin,bink->bik', sim2, outputs1)
return outputs2
def forward(self, queries, keys, query_masks, key_masks):
keys_attn = self.hierarchical_attention(keys, queries, key_masks, query_masks) # TODO: check有没有搞反
feature_mul = keys_attn * keys
feature_sub = (keys_attn - keys) * (keys_attn - keys)
feature_last = torch.cat([feature_mul, feature_sub], dim=-1)
feature_last = torch.relu(self.linear_last(feature_last))
return feature_last
class FusionBlock(nn.Module):
def __init__(self, input_size, is_layer_norm=False):
super(FusionBlock, self).__init__()
self.is_layer_norm = is_layer_norm
if is_layer_norm:
self.layer_norm1 = nn.LayerNorm(normalized_shape=input_size)
self.layer_norm2 = nn.LayerNorm(normalized_shape=input_size)
self.relu = nn.ReLU()
self.linear1 = nn.Linear(input_size, input_size)
self.linear2 = nn.Linear(input_size, input_size)
self.init_weights()
def init_weights(self):
init.xavier_normal_(self.linear1.weight)
init.xavier_normal_(self.linear2.weight)
init.constant_(self.linear1.bias, 0.0)
init.constant_(self.linear2.bias, 0.0)
init.constant_(self.layer_norm1.weight, 1.)
init.constant_(self.layer_norm1.bias, 0.)
init.constant_(self.layer_norm2.weight, 1.)
init.constant_(self.layer_norm2.bias, 0.)
def FFN(self, X):
return self.linear2(self.relu(self.linear1(X)))
def forward(self, Q, K, V, attention_mask=None, episilon=1e-8, output_score=False):
"""
:param Q: (batch size, n_turn, max_u_words, embedding_dim)
:param K: (batch size, n_doc, max_d_words, embedding_dim)
:param V: (batch size, n_doc, max_d_words, embedding_dim)
:param episilon:
:return: output: (batch size, n_turn, n_doc, max_u_words, embedding_dim)
"""
attention_mask = torch.zeros(size=(Q.size(0), Q.size(1), K.size(1), Q.size(2), K.size(2))) if attention_mask is None else attention_mask
attention_mask = attention_mask.to(self.args.device)
batch_size, n_turn, max_u_words, embedding_dim = Q.shape
batch_size, n_doc, max_d_words, embedding_dim = K.shape
dk = torch.Tensor([max(1.0, Q.size(-1))]).to(self.args.device)
Q_K = torch.einsum('btue,bdpe->btdup', Q, K) / (torch.sqrt(dk) + episilon)
Q_K = Q_K + attention_mask
Q_K_score = F.softmax(Q_K, dim=-1)
V_att = torch.einsum('btdup,bdpe->btdue', Q_K_score, V)
Q_repeat = Q.view(batch_size, n_turn, 1, max_u_words, embedding_dim).repeat(1, 1, n_doc, 1, 1)
X = Q_repeat + V_att
if self.is_layer_norm:
X = self.layer_norm1(X)
output = self.layer_norm2(self.FFN(X) + X)
else:
output = self.FFN(X) + X
if output_score:
return output, Q_K_score
else:
return output
class MLP_Attention(nn.Module):
def __init__(self, input_size, hidden_size):
super(MLP_Attention, self).__init__()
self.linear_X = nn.Linear(input_size, hidden_size, bias=True)
self.linear_ref = nn.Linear(input_size, hidden_size, bias=True)
self.v = nn.Linear(hidden_size, out_features=1)
def init_weight(self):
init.xavier_normal_(self.linear_X.weight)
init.xavier_normal_(self.linear_ref.weight)
init.xavier_normal_(self.v.weight)
init.constant_(self.linear1.bias, 0.0)
init.constant_(self.linear2.bias, 0.0)
init.constant_(self.v.bias, 0.0)
def forward(self, X, ref):
batch_size, n_X, _ = X.shape
_, n_ref, _ = ref.shape
stacking_X = self.linear_X(X).view(batch_size, n_X, 1, -1).repeat(1, 1, n_ref, 1)
stacking_ref = self.linear_ref(ref).view(batch_size, 1, n_ref, -1).repeat(1, n_X, 1, 1)
out = self.v(torch.tanh(stacking_X + stacking_ref)).squeeze()
attention_scores = torch.softmax(out, dim=1)
weighted_X = torch.einsum('bxe,bxr->bre', X, attention_scores)
return weighted_X
if __name__ == '__main__':
mlp_attention = MLP_Attention(300, 128)
X = torch.rand(16, 25, 300)
ref = torch.rand(16, 25, 300)
out = mlp_attention(X, ref)
print(out.shape)
| 2.75
| 3
|
apps/member/pipeline.py
|
TransparentHealth/smh-organization
| 3
|
12775499
|
from django.shortcuts import reverse
from apps.notifications.models import Notification
def connection_notifications(backend, user, response, *args, **kwargs):
if backend.name in ['sharemyhealth']:
# Dismiss the notification prompting the user to connect
notifications = Notification.objects.filter(
notify_id=user.id,
actor_id=user.id,
actions__contains=f'''"url": "{reverse('social:begin', args=[backend.name])}"''',
)
for notification in notifications:
notification.dismissed = True
notification.save()
# Dismiss any notifications related to this backend
action_url = reverse('social:disconnect', args=[backend.name])
notifications = Notification.objects.filter(
notify_id=user.id,
actor_id=user.id,
actions__contains=f'''"url": "{action_url}"''',
)
for notification in notifications:
notification.dismissed = True
notification.save()
# Create a notification that the user connected to the backend
Notification.objects.create(
notify=user,
actor=user,
actions=[{'url': action_url, 'text': 'Disconnect'}],
message='You connected to <b>HIXNY</b>',
)
def disconnection_notifications(backend, user, *args, **kwargs):
if backend.name in ['sharemyhealth']:
# Dismiss any notifications related to this backend
action_url = reverse('social:disconnect', args=[backend.name])
notifications = Notification.objects.filter(
notify_id=user.id,
actor_id=user.id,
actions__contains=f'''"url": "{action_url}"''',
)
for notification in notifications:
notification.dismissed = True
notification.save()
| 2.296875
| 2
|
evaluation/eval_shapenet_seg.py
|
vistart/PointCNN
| 1
|
12775500
|
#!/usr/bin/python3
"""Calculate IoU of part segmentation task."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import argparse
import data_utils
import numpy as np
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--folder_gt', '-g', help='Path to ground truth folder', required=True)
parser.add_argument('--folder_pred', '-p', help='Path to prediction folder', required=True)
parser.add_argument('--folder_data', '-d', help='Path to point cloud data folder')
parser.add_argument('--part_avg', '-a', action='store_true', help='Use part level average')
args = parser.parse_args()
print(args)
category_id_to_name = {
2691156: 'Airplane',
2773838: 'Bag',
2954340: 'Cap',
2958343: 'Car',
3001627: 'Chair',
3261776: 'Earphone',
3467517: 'Guitar',
3624134: 'Knife',
3636649: 'Lamp',
3642806: 'Laptop',
3790512: 'Motorbike',
3797390: 'Mug',
3948459: 'Pistol',
4099429: 'Rocket',
4225987: 'Skateboard',
4379243: 'Table'}
categories = sorted(os.listdir(args.folder_gt))
label_min = sys.maxsize
for category in categories:
category_folder_gt = os.path.join(args.folder_gt, category)
filenames = sorted(os.listdir(category_folder_gt))
for filename in filenames:
filepath_gt = os.path.join(category_folder_gt, filename)
label_gt = np.loadtxt(filepath_gt).astype(np.int32)
label_min = min(label_min, np.amin(label_gt))
IoU = 0.0
total_num = 0
for category in categories:
category_folder_gt = os.path.join(args.folder_gt, category)
category_folder_pred = os.path.join(args.folder_pred, category)
if args.folder_data:
category_folder_data = os.path.join(args.folder_data, category)
category_folder_err = os.path.join(args.folder_pred+'_err_ply', category)
IoU_category = 0.0
filenames = sorted(os.listdir(category_folder_gt))
for filename in filenames:
filepath_gt = os.path.join(category_folder_gt, filename)
filepath_pred = os.path.join(category_folder_pred, filename)
label_gt = np.loadtxt(filepath_gt).astype(np.int32) - label_min
label_pred = np.loadtxt(filepath_pred).astype(np.int32)
if args.folder_data:
filepath_data = os.path.join(category_folder_data, filename[:-3]+'pts')
filepath_err = os.path.join(category_folder_err, filename[:-3] + 'ply')
coordinates = [[float(value) for value in xyz.split(' ')]
for xyz in open(filepath_data, 'r') if len(xyz.split(' ')) == 3]
assert (label_gt.shape[0] == len(coordinates))
data_utils.save_ply_property(np.array(coordinates), (label_gt == label_pred), 6, filepath_err)
if args.part_avg:
label_max = np.amax(label_gt)
IoU_part = 0.0
for label_idx in range(label_max+1):
locations_gt = (label_gt == label_idx)
locations_pred = (label_pred == label_idx)
I_locations = np.logical_and(locations_gt, locations_pred)
U_locations = np.logical_or(locations_gt, locations_pred)
I = np.sum(I_locations) + np.finfo(np.float32).eps
U = np.sum(U_locations) + np.finfo(np.float32).eps
IoU_part = IoU_part + I/U
IoU_sample = IoU_part / (label_max+1)
else:
label_correct_locations = (label_gt == label_pred)
IoU_sample = np.sum(label_correct_locations) / label_gt.size
IoU_category = IoU_category + IoU_sample
IoU = IoU + IoU_category
IoU_category = IoU_category / len(filenames)
if category.isdigit():
print("IoU of %s: " % (category_id_to_name[int(category)]), IoU_category)
else:
print("IoU of %s: " % category, IoU_category)
total_num = total_num + len(filenames)
IoU = IoU / total_num
print("IoU: ", IoU)
if __name__ == '__main__':
main()
| 2.265625
| 2
|
generateDataSet.py
|
zclyne/2048Game
| 1
|
12775501
|
GAME_SIZE = 4
SCORE_TO_WIN = 2048
from game2048.game import Game
from game2048.agents import ExpectiMaxAgent
# save the dataset
f_256 = open("dataset_256.txt", "w")
f_512 = open("dataset_512.txt", "w")
f_1024 = open("dataset_1024.txt", "w")
for i in range(30000):
print("i = ", i)
game = Game(size=GAME_SIZE)
agent = ExpectiMaxAgent(game=game)
while True:
direction = agent.step()
if (game.end == True):
break
maxNum = 0
for i in range(4):
for j in range(4):
if game.board[i, j] > maxNum:
maxNum = game.board[i, j]
if maxNum == 2048: # start the next turn
break
if maxNum <= 256:
for i in range(4):
for j in range(4):
print(game.board[i, j], file = f_256)
print(direction, file = f_256)
elif maxNum == 512:
for i in range(4):
for j in range(4):
print(game.board[i, j], file = f_512)
print(direction, file = f_512)
if maxNum == 1024:
for i in range(4):
for j in range(4):
print(game.board[i, j], file = f_1024)
print(direction, file = f_1024)
game.move(direction)
| 3.421875
| 3
|
scripts/py/bsg_ast_wire_reg_decl_opt_inplace.py
|
developandplay/bsg_sv2v
| 16
|
12775502
|
'''
bsg_ast_wire_reg_decl_opt_inplace.py
This optimization pass takes all the wires and regs defined in a module and
consolodates them into a WireList or RegList respectfully. WireList and RegList
is a new pyverilog AST that represent a comma separated collection of wire
and reg declarations.
'''
import logging
from pyverilog.vparser.ast import *
# ast_wire_reg_decl_opt_inplace( node )
#
# This optimization pass takes all the wires and regs in the module definition
# and consolodates them into WireLists and RegLists. This will make the codegen
# printout the wire are reg declarations as a comma separated collection of
# wires and regs making the outputed netlist much cleaner.
#
def ast_wire_reg_decl_opt_inplace( node ):
# Find modules definitions
if type(node) == ModuleDef:
ports = list() ;# List of all port declarations (input and output statements)
wires = list() ;# List of all wire datatype declarations
regs = list() ;# List of all reg datatype declarations
asts = list() ;# All other ast inside the module (everything else)
# Split up all items into lists of ports, wires, regs and other asts
for item in node.items:
if type(item) == Decl:
assert len(item.list) == 1
if type(item.list[0]) == Output or type(item.list[0]) == Input:
ports.append(item.list[0])
elif type(item.list[0]) == Wire:
wires.append(item.list[0])
elif type(item.list[0]) == Reg:
regs.append(item.list[0])
else:
asts.append(item.list[0])
else:
asts.append(item)
# Group wires based on width and sign
wire_groups = []
top_index = 0
while top_index < len(wires):
ref_wire = wires[top_index]
group = [ref_wire]
bot_index = top_index + 1
while bot_index < len(wires):
if ref_wire.signed == wires[bot_index].signed and ref_wire.width == wires[bot_index].width:
group.append(wires.pop(bot_index))
else:
bot_index += 1
wire_groups.append(group)
top_index += 1
# Create a WireList for each group of wires
wire_lists = []
for group in wire_groups:
wire_lists.append( WireList( [w.name for w in group], group[0].width, group[0].signed ) )
# Group regs based on width and sign
reg_groups = []
top_index = 0
while top_index < len(regs):
ref_reg = regs[top_index]
group = [ref_reg]
bot_index = top_index + 1
while bot_index < len(regs):
if ref_reg.signed == regs[bot_index].signed and ref_reg.width == regs[bot_index].width:
group.append(regs.pop(bot_index))
else:
bot_index += 1
reg_groups.append(group)
top_index += 1
# Create a RegList for each group of regs
reg_lists = []
for group in reg_groups:
reg_lists.append( RegList( [w.name for w in group], group[0].width, group[0].signed ) )
# Reconstruct the new items for the module
node.items = [Decl([p]) for p in ports if p] \
+ [Decl([w]) for w in wire_lists if w] \
+ [Decl([r]) for r in reg_lists if r] \
+ [a for a in asts if a]
# Recursivly walk down all other nodes
else:
for c in node.children():
ast_wire_reg_decl_opt_inplace(c)
| 2.765625
| 3
|
InventorySystem/wood/apps.py
|
guyueming/PythonWeb
| 0
|
12775503
|
from django.apps import AppConfig
class WoodConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'wood'
| 1.390625
| 1
|
rules_approval.py
|
benjaminkrenn/abcvoting
| 0
|
12775504
|
# Implementations of approval-based multi-winner voting rules
from __future__ import print_function
import math
import sys
from itertools import combinations
try:
from gmpy2 import mpq as Fraction
except ImportError:
from fractions import Fraction
from rules_approval_ilp import compute_monroe_ilp, compute_thiele_methods_ilp,\
compute_optphragmen_ilp, compute_minimaxav_ilp
from committees import sort_committees,\
enough_approved_candidates,\
print_committees
import score_functions as sf
########################################################################
MWRULES = {
"av": "Approval Voting",
"sav": "Satisfaction Approval Voting",
"pav-ilp": "Proportional Approval Voting (PAV) via ILP",
"pav-noilp": "Proportional Approval Voting (PAV) via branch-and-bound",
"seqpav": "Sequential Proportional Approval Voting (seq-PAV)",
"revseqpav": "Reverse Sequential Prop. Approval Voting (revseq-PAV)",
"slav-ilp": "Sainte-Lague Approval Voting (SLAV) via ILP",
"slav-noilp": "Sainte-Lague Approval Voting (SLAV) via branch-and-bound",
"seqslav": "Sequential Sainte-Lague Approval Voting (seq-SLAV)",
"phrag": "Phragmen's sequential rule (seq-Phragmen)",
"optphrag": "Phragmen's optimization rule (opt-Phragmen)",
"monroe-ilp": "Monroe's rule via ILP",
"monroe-noilp": "Monroe's rule via flow algorithm",
"greedy-monroe": "Greedy Monroe rule",
"cc-ilp": "Chamberlin-Courant (CC) via ILP",
"cc-noilp": "Chamberlin-Courant (CC) via branch-and-bound",
"seqcc": "Sequential Chamberlin-Courant (seq-CC)",
"revseqcc": "Reverse Sequential Chamberlin-Courant (revseq-CC)",
"minimaxav-noilp": "Minimax Approval Voting via brute-force",
"minimaxav-ilp": "Minimax Approval Voting via ILP",
"rule-x": "Rule X",
"phragmen-enestroem": "Phragmen's first method / Enestroeom’s method",
}
def compute_rule(name, profile, committeesize, resolute=False):
"""Returns the list of winning committees according to the named rule"""
if name == "seqpav":
return compute_seqpav(profile, committeesize, resolute=resolute)
elif name == "revseqpav":
return compute_revseqpav(profile, committeesize, resolute=resolute)
elif name == "av":
return compute_av(profile, committeesize, resolute=resolute)
elif name == "sav":
return compute_sav(profile, committeesize, resolute=resolute)
elif name == "pav-ilp":
return compute_pav(profile, committeesize,
ilp=True, resolute=resolute)
elif name == "pav-noilp":
return compute_pav(profile, committeesize,
ilp=False, resolute=resolute)
elif name == "seqslav":
return compute_seqslav(profile, committeesize, resolute=resolute)
elif name == "slav-ilp":
return compute_slav(profile, committeesize,
ilp=True, resolute=resolute)
elif name == "slav-noilp":
return compute_slav(profile, committeesize,
ilp=False, resolute=resolute)
elif name == "phrag":
return compute_seqphragmen(profile, committeesize, resolute=resolute)
elif name == "monroe-ilp":
return compute_monroe(profile, committeesize,
ilp=True, resolute=resolute)
elif name == "monroe-noilp":
return compute_monroe(profile, committeesize,
ilp=False, resolute=resolute)
elif name == "greedy-monroe":
return compute_greedy_monroe(profile, committeesize)
elif name == "cc-ilp":
return compute_cc(profile, committeesize,
ilp=True, resolute=resolute)
elif name == "cc-noilp":
return compute_cc(profile, committeesize,
ilp=False, resolute=resolute)
if name == "seqcc":
return compute_seqcc(profile, committeesize, resolute=resolute)
elif name == "revseqcc":
return compute_revseqcc(profile, committeesize, resolute=resolute)
elif name == "minimaxav-noilp":
return compute_minimaxav(profile, committeesize,
ilp=False, resolute=resolute)
elif name == "minimaxav-ilp":
return compute_minimaxav(profile, committeesize,
ilp=True, resolute=resolute)
elif name == "optphrag":
return compute_optphragmen_ilp(profile, committeesize,
resolute=resolute)
elif name == "rule-x":
return compute_rule_x(profile, committeesize, resolute=resolute)
elif name == "phragmen-enestroem":
return compute_phragmen_enestroem(profile, committeesize,
resolute=resolute)
else:
raise NotImplementedError("voting method " + str(name)
+ " not known")
def allrules(profile, committeesize, ilp=True, include_resolute=False):
"""Prints the winning committees for all implemented rules"""
for rule in list(MWRULES.keys()):
if not ilp and "-ilp" in rule:
continue
print(MWRULES[rule] + ":")
com = compute_rule(rule, profile, committeesize)
print_committees(com)
if include_resolute:
print(MWRULES[rule] + " (with tie-breaking):")
com = compute_rule(rule, profile, committeesize, resolute=True)
print_committees(com)
########################################################################
# computes arbitrary Thiele methods via branch-and-bound
def compute_thiele_methods_branchandbound(profile, committeesize,
scorefct_str, resolute=False):
enough_approved_candidates(profile, committeesize)
scorefct = sf.get_scorefct(scorefct_str, committeesize)
best_committees = []
init_com = compute_seq_thiele_resolute(profile, committeesize,
scorefct_str)
best_score = sf.thiele_score(profile, init_com[0], scorefct_str)
part_coms = [[]]
while part_coms:
part_com = part_coms.pop(0)
# potential committee, check if at least as good
# as previous best committee
if len(part_com) == committeesize:
score = sf.thiele_score(profile, part_com, scorefct_str)
if score == best_score:
best_committees.append(part_com)
elif score > best_score:
best_committees = [part_com]
best_score = score
else:
if len(part_com) > 0:
largest_cand = part_com[-1]
else:
largest_cand = -1
missing = committeesize - len(part_com)
marg_util_cand = sf.additional_thiele_scores(profile, part_com,
scorefct)
upper_bound = (
sum(sorted(marg_util_cand[largest_cand + 1:])[-missing:])
+ sf.thiele_score(profile, part_com, scorefct_str))
if upper_bound >= best_score:
for c in range(largest_cand + 1,
profile.num_cand - missing + 1):
part_coms.insert(0, part_com + [c])
committees = sort_committees(best_committees)
if resolute:
return [committees[0]]
else:
return committees
# Sequential PAV
def compute_seqpav(profile, committeesize, resolute=False):
"""Returns the list of winning committees according sequential PAV"""
if resolute:
return compute_seq_thiele_resolute(profile, committeesize, 'pav')
else:
return compute_seq_thiele_methods(profile, committeesize, 'pav')
# Sequential SLAV
def compute_seqslav(profile, committeesize, resolute=False):
"""Returns the list of winning committees according sequential SLAV"""
if resolute:
return compute_seq_thiele_resolute(profile, committeesize, 'slav')
else:
return compute_seq_thiele_methods(profile, committeesize, 'slav')
# Reverse Sequential PAV
def compute_revseqpav(profile, committeesize, resolute=False):
if resolute:
return compute_revseq_thiele_methods_resolute(profile,
committeesize, 'pav')
else:
return compute_revseq_thiele_methods(profile, committeesize, 'pav')
# Sequential Chamberlin-Courant
def compute_seqcc(profile, committeesize, resolute=False):
"""Returns the list of winning committees according to sequential CC"""
if resolute:
return compute_seq_thiele_resolute(profile, committeesize, 'cc')
else:
return compute_seq_thiele_methods(profile, committeesize, 'cc')
# Reverse Sequential Chamberlin-Courant
def compute_revseqcc(profile, committeesize, resolute=False):
if resolute:
return compute_revseq_thiele_methods_resolute(profile, committeesize,
'cc')
else:
return compute_revseq_thiele_methods(profile, committeesize, 'cc')
# Satisfaction Approval Voting (SAV)
def compute_sav(profile, committeesize, resolute=False):
return compute_av(profile, committeesize, resolute, sav=True)
# Approval Voting (AV)
def compute_av(profile, committeesize, resolute=False, sav=False):
"""Returns the list of winning committees according to Approval Voting"""
enough_approved_candidates(profile, committeesize)
appr_scores = [0] * profile.num_cand
for pref in profile.preferences:
for cand in pref.approved:
if sav:
# Satisfaction Approval Voting
appr_scores[cand] += Fraction(pref.weight, len(pref.approved))
else:
# (Classic) Approval Voting
appr_scores[cand] += pref.weight
# smallest score to be in the committee
cutoff = sorted(appr_scores)[-committeesize]
certain_cand = [c for c in range(profile.num_cand)
if appr_scores[c] > cutoff]
possible_cand = [c for c in range(profile.num_cand)
if appr_scores[c] == cutoff]
missing = committeesize - len(certain_cand)
if resolute:
return sort_committees([(certain_cand + possible_cand[:missing])])
else:
return sort_committees([(certain_cand + list(selection))
for selection
in combinations(possible_cand, missing)])
# Sequential Thiele methods (resolute)
def compute_seq_thiele_methods(profile, committeesize, scorefct_str):
enough_approved_candidates(profile, committeesize)
scorefct = sf.get_scorefct(scorefct_str, committeesize)
comm_scores = {(): 0}
# build committees starting with the empty set
for _ in range(0, committeesize):
comm_scores_next = {}
for committee, score in comm_scores.items():
# marginal utility gained by adding candidate to the committee
additional_score_cand = sf.additional_thiele_scores(
profile, committee, scorefct)
for c in range(profile.num_cand):
if additional_score_cand[c] >= max(additional_score_cand):
next_comm = tuple(sorted(committee + (c,)))
comm_scores_next[next_comm] = (comm_scores[committee]
+ additional_score_cand[c])
# remove suboptimal committees
comm_scores = {}
cutoff = max(comm_scores_next.values())
for com, score in comm_scores_next.items():
if score >= cutoff:
comm_scores[com] = score
return sort_committees(list(comm_scores.keys()))
# Sequential Thiele methods with resolute
def compute_seq_thiele_resolute(profile, committeesize, scorefct_str):
enough_approved_candidates(profile, committeesize)
scorefct = sf.get_scorefct(scorefct_str, committeesize)
committee = []
# build committees starting with the empty set
for _ in range(0, committeesize):
additional_score_cand = sf.additional_thiele_scores(
profile, committee, scorefct)
next_cand = additional_score_cand.index(max(additional_score_cand))
committee.append(next_cand)
return [sorted(committee)]
# required for computing Reverse Sequential Thiele methods
def __least_relevant_cands(profile, comm, utilityfct):
# marginal utility gained by adding candidate to the committee
marg_util_cand = [0] * profile.num_cand
for pref in profile.preferences:
for c in pref.approved:
satisfaction = len(pref.approved.intersection(comm))
marg_util_cand[c] += pref.weight * utilityfct(satisfaction)
for c in range(profile.num_cand):
if c not in comm:
# do not choose candidates that already have been removed
marg_util_cand[c] = max(marg_util_cand) + 1
# find smallest elements in marg_util_cand and return indices
return ([cand for cand in range(profile.num_cand)
if marg_util_cand[cand] == min(marg_util_cand)],
min(marg_util_cand))
# Reverse Sequential Thiele methods without resolute
def compute_revseq_thiele_methods(profile, committeesize, scorefct_str):
enough_approved_candidates(profile, committeesize)
scorefct = sf.get_scorefct(scorefct_str, committeesize)
allcandcomm = tuple(range(profile.num_cand))
comm_scores = {allcandcomm: sf.thiele_score(profile, allcandcomm,
scorefct_str)}
for _ in range(0, profile.num_cand - committeesize):
comm_scores_next = {}
for committee, score in comm_scores.items():
cands_to_remove, score_reduction = \
__least_relevant_cands(profile, committee, scorefct)
for c in cands_to_remove:
next_comm = tuple(set(committee) - set([c]))
comm_scores_next[next_comm] = score - score_reduction
# remove suboptimal committees
comm_scores = {}
cutoff = max(comm_scores_next.values())
for com, score in comm_scores_next.items():
if score >= cutoff:
comm_scores[com] = score
return sort_committees(list(comm_scores.keys()))
# Reverse Sequential Thiele methods with resolute
def compute_revseq_thiele_methods_resolute(profile, committeesize,
scorefct_str):
enough_approved_candidates(profile, committeesize)
scorefct = sf.get_scorefct(scorefct_str, committeesize)
committee = set(range(profile.num_cand))
for _ in range(0, profile.num_cand - committeesize):
cands_to_remove, _ = __least_relevant_cands(profile, committee,
scorefct)
committee.remove(cands_to_remove[0])
return [sorted(list(committee))]
# Phragmen's Sequential Rule
def compute_seqphragmen(profile, committeesize, resolute=False):
"""Returns the list of winning committees
according to sequential Phragmen"""
enough_approved_candidates(profile, committeesize)
load = {v: 0 for v in profile.preferences}
comm_loads = {(): load}
approvers_weight = {}
for c in range(profile.num_cand):
approvers_weight[c] = sum(v.weight
for v in profile.preferences
if c in v.approved)
# build committees starting with the empty set
for _ in range(0, committeesize):
comm_loads_next = {}
for committee, load in comm_loads.items():
approvers_load = {}
for c in range(profile.num_cand):
approvers_load[c] = sum(v.weight * load[v]
for v in profile.preferences
if c in v.approved)
new_maxload = [Fraction(approvers_load[c] + 1, approvers_weight[c])
if approvers_weight[c] > 0 else committeesize + 1
for c in range(profile.num_cand)]
for c in range(profile.num_cand):
if c in committee:
new_maxload[c] = sys.maxsize
for c in range(profile.num_cand):
if new_maxload[c] <= min(new_maxload):
new_load = {}
for v in profile.preferences:
if c in v.approved:
new_load[v] = new_maxload[c]
else:
new_load[v] = load[v]
comm_loads_next[tuple(sorted(committee + (c,)))] = new_load
# remove suboptimal committees
comm_loads = {}
cutoff = min([max(load.values()) for load in comm_loads_next.values()])
for com, load in comm_loads_next.items():
if max(load.values()) <= cutoff:
comm_loads[com] = load
if resolute:
committees = sort_committees(list(comm_loads.keys()))
comm = tuple(committees[0])
comm_loads = {comm: comm_loads[comm]}
committees = sort_committees(list(comm_loads.keys()))
if resolute:
return [committees[0]]
else:
return committees
# Minimax Approval Voting
def compute_minimaxav(profile, committeesize, ilp=True, resolute=False):
"""Returns the list of winning committees according to Minimax AV"""
if ilp:
return compute_minimaxav_ilp(profile, committeesize, resolute)
def hamming(a, b, elements):
diffs = 0
for x in elements:
if (x in a and x not in b) or (x in b and x not in a):
diffs += 1
return diffs
def mavscore(committee, profile):
score = 0
for vote in profile.preferences:
hamdistance = hamming(vote.approved, committee,
list(range(profile.num_cand)))
if hamdistance > score:
score = hamdistance
return score
enough_approved_candidates(profile, committeesize)
opt_committees = []
opt_mavscore = profile.num_cand + 1
for comm in combinations(list(range(profile.num_cand)), committeesize):
score = mavscore(comm, profile)
if score < opt_mavscore:
opt_committees = [comm]
opt_mavscore = score
elif mavscore(comm, profile) == opt_mavscore:
opt_committees.append(comm)
opt_committees = sort_committees(opt_committees)
if resolute:
return [opt_committees[0]]
else:
return sort_committees(opt_committees)
# Proportional Approval Voting
def compute_pav(profile, committeesize, ilp=True, resolute=False):
"""Returns the list of winning committees according to Proportional AV"""
if ilp:
return compute_thiele_methods_ilp(profile, committeesize,
'pav', resolute)
else:
return compute_thiele_methods_branchandbound(profile, committeesize,
'pav', resolute)
# Sainte-Lague Approval Voting
def compute_slav(profile, committeesize, ilp=True, resolute=False):
"""Returns the list of winning committees according to Proportional AV"""
if ilp:
return compute_thiele_methods_ilp(profile, committeesize,
'slav', resolute)
else:
return compute_thiele_methods_branchandbound(profile, committeesize,
'slav', resolute)
# Chamberlin-Courant
def compute_cc(profile, committeesize, ilp=True, resolute=False):
"""Returns the list of winning committees
according to Chamblerlin-Courant"""
if ilp:
return compute_thiele_methods_ilp(profile, committeesize,
'cc', resolute)
else:
return compute_thiele_methods_branchandbound(profile, committeesize,
'cc', resolute)
# Monroe's rule
def compute_monroe(profile, committeesize, ilp=True, resolute=False):
"""Returns the list of winning committees according to Monroe's rule"""
if ilp:
return compute_monroe_ilp(profile, committeesize, resolute)
else:
return compute_monroe_bruteforce(profile, committeesize, resolute)
# Monroe's rule, computed via (brute-force) matching
def compute_monroe_bruteforce(profile, committeesize,
resolute=False, flowbased=True):
"""Returns the list of winning committees via brute-force Monroe's rule"""
enough_approved_candidates(profile, committeesize)
if not profile.has_unit_weights():
raise Exception("Monroe is only defined for unit weights (weight=1)")
if profile.totalweight() % committeesize != 0 or flowbased:
monroescore = sf.monroescore_flowbased
else:
monroescore = sf.monroescore_matching
opt_committees = []
opt_monroescore = -1
for comm in combinations(list(range(profile.num_cand)), committeesize):
score = monroescore(profile, comm)
if score > opt_monroescore:
opt_committees = [comm]
opt_monroescore = score
elif monroescore(profile, comm) == opt_monroescore:
opt_committees.append(comm)
opt_committees = sort_committees(opt_committees)
if resolute:
return [opt_committees[0]]
else:
return opt_committees
def compute_greedy_monroe(profile, committeesize):
""""Returns the winning committee of the greedy monroe.
Always selects the candidate with the highest approval.
Always removes the first n/k (rounding depends) voters that approve
with the selected candidate. (voter sorted by their rankings)
"""
enough_approved_candidates(profile, committeesize)
if not profile.has_unit_weights():
raise Exception("Greedy Monroe is only defined for unit weights"
+ " (weight=1)")
v = list(enumerate(list(profile.preferences)))
# list of tuples (nr, Preferences)
# sorted by sorted approved list of preferences
voters = sorted(v, key=lambda p: sorted(p[1].approved))
n = len(voters) # number of voters
cands = set(range(profile.num_cand))
not_s, committee = (voters, set()) # not_s .. not satisfied voters
for t in range(1, committeesize+1):
remaining_cands = cands - committee
approval = {c: 0 for c in remaining_cands}
for nr, voter in not_s:
for c in voter.approved:
if c in remaining_cands:
approval[c] += 1
max_approval = max(approval.values())
winner = [c for c in remaining_cands
if approval[c] == max_approval][0]
# round how many are removed, either up or down
if t <= n - committeesize * math.floor(n / committeesize):
to_remove = math.ceil(float(n) / committeesize)
else:
to_remove = math.floor(n / committeesize)
# not more than the voters that approve
# the candidate can be removed
to_remove = min(max_approval, to_remove)
next_voters = []
for nr, voter in not_s:
if to_remove > 0 and winner in voter.approved:
to_remove -= 1
else:
next_voters.append((nr, voter))
not_s = next_voters
committee.add(winner)
return sort_committees([committee])
def compute_rule_x(profile, committeesize, resolute=False):
"""Returns the list of winning candidates according to rule x.
But rule x does stop if not enough budget is there to finance a
candidate. As this is not optimal the committee is filled with the
candidates that have the most remaining budget as support.
Rule from:
https://arxiv.org/pdf/1911.11747.pdf (Page 7)"""
enough_approved_candidates(profile, committeesize)
if not profile.has_unit_weights():
raise Exception("Rule X is only defined \
for unit weights (weight=1)")
num_voters = len(profile.preferences)
price = Fraction(num_voters, committeesize)
start_budget = {v: Fraction(1, 1) for v in range(num_voters)}
cands = range(profile.num_cand)
committees = [(start_budget, set())]
final_committees = []
for _ in range(committeesize):
next_committees = []
for committee in committees:
budget = committee[0]
q_affordability = {}
curr_cands = set(cands) - committee[1]
for c in curr_cands:
approved_by = set()
for v, vote in enumerate(profile.preferences):
if c in vote.approved and budget[v] > 0.0:
approved_by.add(v)
too_poor = set()
already_available = Fraction(0)
rich = set(approved_by)
q = 0.0
while already_available < price and q == 0.0 and len(rich) > 0:
fair_split = Fraction(price-already_available, len(rich))
still_rich = set()
for v in rich:
if budget[v] <= fair_split:
too_poor.add(v)
already_available += budget[v]
else:
still_rich.add(v)
if len(still_rich) == len(rich):
q = fair_split
q_affordability[c] = q
elif already_available == price:
q = fair_split
q_affordability[c] = q
else:
rich = still_rich
if len(q_affordability) > 0:
min_q = min(q_affordability.values())
cheapest_split = [c for c in q_affordability
if q_affordability[c] == min_q]
for c in cheapest_split:
b = dict(committee[0])
for v, vote in enumerate(profile.preferences):
if c in vote.approved:
b[v] -= min(budget[v], min_q)
comm = set(committee[1])
comm.add(c)
next_committees.append((b, comm))
else: # no affordable candidate remains
comms = fill_remaining_committee(committee, curr_cands,
committeesize, profile)
# after filling the remaining spots these committees
# have size committeesize
for b, comm in comms:
final_committees.append(comm)
if resolute:
if len(next_committees) > 0:
committees = [next_committees[0]]
else:
committees = []
else:
committees = next_committees
# The committees that could be fully filled with Rule X:
for b, comm in committees: # budget and committee
final_committees.append(comm)
committees = sort_committees(final_committees)
if resolute:
if len(committees) > 0:
return [committees[0]]
else:
return []
else:
return committees
def fill_remaining_committee(committee, curr_cands, committee_size,
profile):
"""
Rule X has no definition of how to fill remaining committee spots.
This function takes the candidates with the most remaining budget
selecting one candidate depletes all budgets of the voters that
approve that candidate.
This can produce multiple possible committees.
"""
missing = committee_size - len(committee[1])
committees = [committee]
for _ in range(missing):
next_comms = []
for comm in committees:
budget, appr_set = comm
remaining_cands = curr_cands - appr_set
budget_support = {}
for cand in remaining_cands:
budget_support[cand] = 0
for v, vote in enumerate(profile.preferences):
if cand in vote.approved:
budget_support[cand] += budget[v]
max_support = max(budget_support.values())
winners = [c for c in remaining_cands
if budget_support[c] == max_support]
for c in winners:
budget_c = {}
for voter, value in budget.items():
if c in profile.preferences[voter].approved:
budget_c[voter] = 0
else:
budget_c[voter] = value
next_comms.append((budget_c, appr_set.union([c])))
committees = next_comms
return committees
def compute_phragmen_enestroem(profile, committeesize, resolute=False):
""""Returns the winning committees with
Phragmen's first method (Enestroem's method) –
STV with unordered ballots
In every step the candidate with the highest combined budget of
their supporters gets into a committee.
For equal voting power multiple committees are computed.
Method from:
https://arxiv.org/pdf/1611.08826.pdf (18.5, Page 59)
"""
enough_approved_candidates(profile, committeesize)
num_voters = len(profile.preferences)
start_budget = {v: Fraction(profile.preferences[v].weight) for v in range(num_voters)}
price = Fraction(sum(start_budget.values()), committeesize)
cands = range(profile.num_cand)
committees = [(start_budget, set())]
for i in range(committeesize):
# here the committees with i+1 candidates are
# stored (together with budget)
next_committees = []
# loop in case multiple possible committees
# with i filled candidates
for committee in committees:
budget, comm = committee
curr_cands = set(cands) - comm
support = {c: 0 for c in curr_cands}
for nr, pref in enumerate(profile.preferences):
voting_power = budget[nr]
if voting_power <= 0:
continue
for cand in pref.approved:
if cand in curr_cands:
support[cand] += voting_power
max_support = max(support.values())
winners = [c for c, s in support.items()
if s == max_support]
for cand in winners:
b = dict(budget) # new copy of budget
if max_support > price: # supporters can afford it
# (voting_power - price) / voting_power
multiplier = Fraction(max_support - price,
max_support)
else: # set supporters to 0
multiplier = 0
for nr, pref in enumerate(profile.preferences):
if cand in pref.approved:
b[nr] *= multiplier
c = comm.union([cand]) # new committee with candidate
next_committees.append((b, c))
if resolute: # only one is requested
if len(next_committees) > 0:
committees = [next_committees[0]]
else: # should not happen
committees = []
raise Exception("phragmen enestroem failed to find "
+ "next candidate for", committees)
else:
committees = next_committees
committees = [comm for b, comm in committees]
committees = sort_committees(committees)
if resolute:
if len(committees) > 0:
return [committees[0]]
else:
return []
else:
return committees
| 2.484375
| 2
|
classes/slave_class.py
|
HubertKSK/map_reduce
| 0
|
12775505
|
<filename>classes/slave_class.py<gh_stars>0
#!/usr/bin/python
import logging.handlers
import os
import socket
import uuid
import json
from nltk.tokenize import word_tokenize
if not os.path.exists("log"):
os.makedirs("log")
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.DEBUG)
FORMATTER = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
FILE_HANDLER = logging.handlers.RotatingFileHandler(
'log/client.log', maxBytes=100000, backupCount=0)
FILE_HANDLER.setFormatter(FORMATTER)
LOGGER.addHandler(FILE_HANDLER)
class Slave(object):
"""docstring for Slave."""
def __init__(self):
super(Slave, self).__init__()
self.msg = None
self.HEADER = 64
self.PORT = 5050
self.SERVER = "127.0.0.1"
self.ADDR = (self.SERVER, self.PORT)
self.FORMAT = 'utf-8'
self.DISCONNECT_MESSAGE = "!DISCONNECT"
self.UUID = uuid.uuid4()
LOGGER.info(f"[{self.UUID}] starting Slave")
self.client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client.connect(self.ADDR)
self.register()
def register(self):
message = str(self.UUID)
message = message.encode(self.FORMAT)
self.client.send(message)
def send(self, msg):
LOGGER.debug(f"[{self.UUID}] Sending msg: {msg}")
message = msg.encode(self.FORMAT)
msg_length = len(message)
send_length = str(msg_length).encode(self.FORMAT)
send_length += b' ' * (self.HEADER - len(send_length))
self.client.send(send_length)
self.client.send(message)
LOGGER.debug(self.client.recv(2048).decode(self.FORMAT))
def map(self, contents):
results = []
contents = ' '.join(contents)
data = word_tokenize(contents.lower())
for word in data:
results.append((word, 1))
return results
def dissconect(self):
self.send(self.DISCONNECT_MESSAGE)
def reduce(self, key, values):
return key, sum(value for value in values)
def run_reduce(self, shuffle):
key_values_map = dict(shuffle)
key_value_list = []
for key in key_values_map:
key_value_list.append(self.reduce(key, key_values_map[key]))
return key_value_list
def listen(self):
LOGGER.debug(f"[{self.UUID}] listening")
print("Listening")
msg_length = self.client.recv(self.HEADER).decode(self.FORMAT)
if msg_length:
msg_length = int(msg_length)
self.msg = self.client.recv(msg_length).decode(self.FORMAT)
LOGGER.debug(f"[{self.UUID}] Received Message ")
self.client.send("ACK".encode(self.FORMAT))
self.controller()
def controller(self):
control_state, msg = json.loads(self.msg)
result = "null"
if control_state == 0:
LOGGER.info(f"[{self.UUID}] Starting map")
result = self.map(msg)
self.send(json.dumps(result))
elif control_state == 1:
LOGGER.info(f"[{self.UUID}] Starting reduce")
result = self.run_reduce(msg)
self.send(json.dumps(result))
else:
LOGGER.error(f"[{self.UUID}] Unknown command")
if __name__ == "__main__":
print("Test run")
inputfile = "'/Users/hubertkowalczyk/Documents/Studia/sem3/map_reduce/testcode.py'"
slave_instance = Slave()
slave_instance.send("hello")
slave_instance.send("World")
slave_instance.listen()
slave_instance.dissconect()
| 2.375
| 2
|
Index.py
|
ImdWf/Catalog_Crawler
| 0
|
12775506
|
<filename>Index.py
# -*- coding: utf-8 -*-
import urllib
from Seite import Seiten
class Indexseiten(Seiten):
def add(self, data):
Exclude = ['erstes_Datum','letztes_Datum']
return super(Indexseiten, self).add(data,Exclude)
def sort(self, Kriterium = 'Indexnummer'):
super(Indexseiten, self).sort(Kriterium)
class Element(Seiten.Element):
def __init__(self, daten):
self.Eintraege = []
self.erstes_Datum = ""
self.letztes_Datum = ""
self.Indexnummer = 0
self.url = ""
# import pdb; pdb.set_trace()
self.fill_values(daten)
self.set_html_source()
def set_html_source(self):
if self.url:
seite = urllib.urlopen(self.url)
self.html_source = seite.read()
else:
self.html_source = ''
def Zeitraum(self):
return self.erstes_Datum, self.letztes_Datum
# def set_eintraege(self, Eintraege):
# for eintrag in Eintraege:
# self.Eintraege.append(eintrag)
# def set_erstes_Datum(self, datum):
# self.erstes_Datum = datum
# def set_letztes_Datum(self, datum):
# self.letztes_Datum = datum
# def set_Indexnummer(self, nummer):
# self.Indexnummer = nummer
# def get_Eintraege(self):
# return self.Eintraege
# def get_Eintraege_iter(self):
# for eintrag in self.Eintrage:
# yield eintrag
# def get_erstes_Datum(self):
# return self.erstes_Datum
# def get_letztes_Datum(self):
# return self.letztes_Datum
# def get_Indexnummer(self):
# return self.Indexnummer
# def get_html_source(self):
# return self.html_source
| 2.84375
| 3
|
src/conclusions/experiment_rendering_speed.py
|
BLannoo/LocationAnalysis
| 1
|
12775507
|
<gh_stars>1-10
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %%
# auto format code at cell execution
# %load_ext lab_black
root_location = "../../"
import sys
import matplotlib.pyplot as plt
sys.path.append(root_location)
from src.tools.pre_processing import read_csv_as_geopandas
from src.tools.visualising import (
load_or_get_belgium_roads,
determine_extrema_with_border,
)
# %% [markdown]
# # Parameters
# %%
input_file_name = root_location + "data/samples/year_2018_country_Belgium.csv"
# %% [markdown]
# # Load data
# %%
gdf = read_csv_as_geopandas(input_file_name)
# %% [markdown]
# # Select 1 day of data (2018/12/01)
# %%
selection = gdf[(gdf.month == 12) & (gdf.day == 1)]
# %% [markdown]
# # Render all roads of Belgium
# %%
# %%time
(
x_max_with_border,
x_min_with_border,
y_max_with_border,
y_min_with_border,
) = determine_extrema_with_border(selection)
ax = load_or_get_belgium_roads(repository_root_location=root_location).plot(
edgecolor="gray", figsize=(10, 6), zorder=-1
)
selection.plot(ax=ax, marker="o", color="red", markersize=15, zorder=0)
# plt.xlim([x_min_with_border, x_max_with_border])
# plt.ylim([y_min_with_border, y_max_with_border])
# %% [markdown]
# # Render only the roads that are near to the data
# %%
# %%time
(
x_max_with_border,
x_min_with_border,
y_max_with_border,
y_min_with_border,
) = determine_extrema_with_border(selection)
gdf_belgian_roads = load_or_get_belgium_roads(repository_root_location=root_location)
ax = gdf_belgian_roads.cx[x_min_with_border:x_max_with_border, y_min_with_border:y_max_with_border].plot(
edgecolor="gray", figsize=(10, 6), zorder=-1
)
selection.plot(ax=ax, marker="o", color="red", markersize=15, zorder=0)
# plt.xlim([x_min_with_border, x_max_with_border])
# plt.ylim([y_min_with_border, y_max_with_border])
# %%
| 2.5
| 2
|
setup.py
|
janharlass/xorca
| 12
|
12775508
|
"""Setup xorca."""
from setuptools import setup
setup(name='xorca',
description='Work on the ORCA grid with XGCM and Xarray',
packages=['xorca'],
package_dir={'xorca': 'xorca'},
install_requires=['setuptools', ],
zip_safe=False)
| 1.1875
| 1
|
tasks/snf.py
|
blanchpk/supernovae
| 25
|
12775509
|
"""Import tasks for the Nearby Supernova Factory.
"""
import csv
import os
from glob import glob
from astrocats.catalog.utils import jd_to_mjd, pbar, pretty_num, uniq_cdl
from astropy.time import Time as astrotime
from decimal import Decimal
from ..supernova import SUPERNOVA
def do_snf_aliases(catalog):
file_path = os.path.join(
catalog.get_current_task_repo(), 'SNF/snf-aliases.csv')
with open(file_path, 'r') as f:
for row in [x.split(',') for x in f.read().splitlines()]:
name, source = catalog.new_entry(
row[0], bibcode=catalog.OSC_BIBCODE, srcname=catalog.OSC_NAME,
url=catalog.OSC_URL, secondary=True)
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, row[1], source)
catalog.journal_entries()
return
def do_snf_spectra(catalog):
task_str = catalog.get_current_task_str()
bibcodes = {'SN2005gj': '2006ApJ...650..510A',
'SN2006D': '2007ApJ...654L..53T',
'SN2007if': '2010ApJ...713.1073S',
'SN2011fe': '2013A&A...554A..27P'}
oldname = ''
snfcnt = 0
eventfolders = next(os.walk(os.path.join(
catalog.get_current_task_repo(), 'SNFactory')))[1]
for eventfolder in pbar(eventfolders, task_str):
oname = eventfolder
name = catalog.get_preferred_name(oname)
if oldname and name != oldname:
catalog.journal_entries()
oldname = name
name = catalog.add_entry(name)
sec_reference = 'Nearby Supernova Factory'
sec_refurl = 'http://snfactory.lbl.gov/'
sec_bibcode = '2002SPIE.4836...61A'
sec_source = catalog.entries[name].add_source(
name=sec_reference, url=sec_refurl, bibcode=sec_bibcode,
secondary=True)
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, oname, sec_source)
bibcode = bibcodes[oname]
source = catalog.entries[name].add_source(bibcode=bibcode)
sources = uniq_cdl([source, sec_source])
use_path = os.path.join(
catalog.get_current_task_repo(), 'SNFactory', eventfolder, '*.dat')
eventspectra = glob(use_path)
for spectrum in pbar(eventspectra, task_str):
filename = os.path.basename(spectrum)
with open(spectrum) as spec_file:
specdata = list(csv.reader(
spec_file, delimiter=' ', skipinitialspace=True))
specdata = list(filter(None, specdata))
newspec = []
time = ''
telescope = ''
instrument = ''
observer = ''
observatory = ''
if 'Keck_20060202_R' in spectrum:
time = '53768.23469'
elif 'Spectrum05_276' in spectrum:
time = pretty_num(astrotime('2005-10-03').mjd, sig=5)
elif 'Spectrum05_329' in spectrum:
time = pretty_num(astrotime('2005-11-25').mjd, sig=5)
elif 'Spectrum05_336' in spectrum:
time = pretty_num(astrotime('2005-12-02').mjd, sig=5)
for row in specdata:
if row[0][0] == '#':
joinrow = (' '.join(row)).split('=')
if len(joinrow) < 2:
continue
field = joinrow[0].strip('# ')
value = joinrow[1].split('/')[0].strip('\' ')
if not time:
if field == 'JD':
time = str(jd_to_mjd(Decimal(value)))
elif field == 'MJD':
time = value
elif field == 'MJD-OBS':
time = value
if field == 'OBSERVER':
observer = value.capitalize()
if field == 'OBSERVAT':
observatory = value.capitalize()
if field == 'TELESCOP':
telescope = value.capitalize()
if field == 'INSTRUME':
instrument = value.capitalize()
else:
newspec.append(row)
if not time:
raise ValueError('Time missing from spectrum.')
specdata = newspec
haserrors = len(specdata[0]) == 3 and specdata[
0][2] and specdata[0][2] != 'NaN'
specdata = [list(i) for i in zip(*specdata)]
wavelengths = specdata[0]
fluxes = specdata[1]
errors = ''
if haserrors:
errors = specdata[2]
unit_err = ('Variance' if oldname == 'SN2011fe' else
'erg/s/cm^2/Angstrom')
unit_flx = 'erg/s/cm^2/Angstrom'
catalog.entries[name].add_spectrum(
u_wavelengths='Angstrom', u_fluxes=unit_flx, u_time='MJD',
time=time,
wavelengths=wavelengths, fluxes=fluxes, errors=errors,
observer=observer, observatory=observatory,
telescope=telescope, instrument=instrument, u_errors=unit_err,
source=sources, filename=filename)
snfcnt = snfcnt + 1
if (catalog.args.travis and
snfcnt % catalog.TRAVIS_QUERY_LIMIT == 0):
break
catalog.journal_entries()
return
| 2.28125
| 2
|
src/datasets_util.py
|
ZHAOZHIHAO/ClusterRouting
| 0
|
12775510
|
from __future__ import print_function
import os
import numpy as np
import torch
from torchvision import datasets, transforms
from .smallnorb_dataset_helper import smallnorb, smallnorb_equivariance
from .utils import random_split, CustomDataset
def get_dataset(args):
if args.dataset == "cifar10":
train_transform = transforms.Compose([
transforms.ColorJitter(brightness=.2, contrast=.2),
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
])
if args.valid_mode:
train_transform.transforms.insert(0, transforms.ToPILImage())
test_transform.transforms.insert(0, transforms.ToPILImage())
valid_transform = test_transform
train_dataset = datasets.CIFAR10('./data', train=True, download=True, transform=train_transform)
test_dataset = datasets.CIFAR10('./data', train=False, transform=test_transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, num_workers=8, pin_memory=True, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size, num_workers=8, pin_memory=True, shuffle=False)
# For spliting the original traning set to a new training set and a validation set.
# The new training set and validation set are named valid_mode_train and valid_mode_valid
# valid_mode_train + valid_mode_valid is the original training set
data, labels = random_split(data=train_dataset.data,
labels=np.array(train_dataset.targets),
n_classes=10,
n_samples_per_class=np.repeat(1000, 10).reshape(-1))
# make channels last and convert to np arrays
#data['valid_mode_train'] = np.moveaxis(np.array(data['valid_mode_train']), 1, -1)
#data['valid_mode_valid'] = np.moveaxis(np.array(data['valid_mode_valid']), 1, -1)
#print("data['valid_mode_train'].shape", data['valid_mode_train'].shape)
# dataloader
valid_mode_train_dataset = CustomDataset(data=data['valid_mode_train'], labels=labels['valid_mode_train'], transform=train_transform)
valid_mode_valid_dataset = CustomDataset(data=data['valid_mode_valid'], labels=labels['valid_mode_valid'], transform=valid_transform)
valid_mode_train_loader = torch.utils.data.DataLoader(valid_mode_train_dataset, batch_size=args.batch_size, shuffle=True)
valid_mode_valid_loader = torch.utils.data.DataLoader(valid_mode_valid_dataset, batch_size=args.test_batch_size, shuffle=False)
return train_loader, test_loader, valid_mode_train_loader, valid_mode_valid_loader, train_transform
elif args.dataset == "Fashion-MNIST":
train_transform = transforms.Compose([
transforms.ColorJitter(brightness=.2, contrast=.2),
transforms.RandomCrop(32, padding=4),
#transforms.RandomAffine(degrees=0, translate=[0.2, 0.2]),
#transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.2862,), (0.3529,))
])
test_transform = transforms.Compose([
transforms.Pad(padding=2),
transforms.ToTensor(),
transforms.Normalize((0.2862,), (0.3529,))
])
if args.valid_mode:
train_transform.transforms.insert(0, transforms.ToPILImage())
test_transform.transforms.insert(0, transforms.ToPILImage())
valid_transform = test_transform
train_dataset = datasets.FashionMNIST('./data', train=True, download=True, transform=train_transform)
test_dataset = datasets.FashionMNIST('./data', train=False, transform=test_transform)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size, shuffle=False)
# For spliting the original traning set to a new training set and a validation set.
# The new training set and validation set are named valid_mode_train and valid_mode_valid
# valid_mode_train + valid_mode_valid is the original training set
data, labels = random_split(data=train_dataset.train_data, labels=train_dataset.train_labels,
n_classes=10, n_samples_per_class=np.repeat(1000, 10).reshape(-1))
# convert to np arrays
# data['valid_mode_train'] = np.array(data['valid_mode_train'])
# data['valid_mode_valid'] = np.array(data['valid_mode_valid'])
# data['valid_mode_train'] = np.moveaxis(np.array(data['valid_mode_train']), 1, -1)
# data['valid_mode_valid'] = np.moveaxis(np.array(data['valid_mode_valid']), 1, -1)
# dataloader
valid_mode_train_dataset = CustomDataset(data=data['valid_mode_train'], labels=labels['valid_mode_train'], transform=train_transform)
valid_mode_valid_dataset = CustomDataset(data=data['valid_mode_valid'], labels=labels['valid_mode_valid'], transform=valid_transform)
valid_mode_train_loader = torch.utils.data.DataLoader(valid_mode_train_dataset, batch_size=args.batch_size, shuffle=True)
valid_mode_valid_loader = torch.utils.data.DataLoader(valid_mode_valid_dataset, batch_size=args.test_batch_size, shuffle=False)
return train_loader, test_loader, valid_mode_train_loader, valid_mode_valid_loader, train_transform
elif args.dataset == "svhn":
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
#transforms.ColorJitter(brightness=.2, contrast=.2),
transforms.ToTensor(),
transforms.Normalize((0.4376821, 0.4437697, 0.47280442), (0.19803012, 0.20101562, 0.19703614))
])
print("train_transform", train_transform)
test_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4376821, 0.4437697, 0.47280442), (0.19803012, 0.20101562, 0.19703614))
])
if args.valid_mode:
train_transform.transforms.insert(0, transforms.ToPILImage())
test_transform.transforms.insert(0, transforms.ToPILImage())
valid_transform = test_transform
# extra_dataset = datasets.SVHN(
# './data', split='extra', transform=train_transform, download=True)
# # Combine both training splits (https://arxiv.org/pdf/1605.07146.pdf)
# data = np.concatenate([train_dataset.data, extra_dataset.data], axis=0)
# labels = np.concatenate([train_dataset.labels, extra_dataset.labels], axis=0)
# train_dataset.data = data
# train_dataset.labels = labels
train_dataset = datasets.SVHN(
'./data', split='train', transform=train_transform, download=True)
test_dataset = datasets.SVHN(
'./data', split='test', transform=test_transform, download=True)
train_loader = torch.utils.data.DataLoader(
dataset=train_dataset, num_workers=8, pin_memory=True,
batch_size=args.batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
dataset=test_dataset, num_workers=8, pin_memory=True,
batch_size=args.test_batch_size, shuffle=True)
# For spliting the original traning set to a new training set and a validation set.
# The new training set and validation set are named valid_mode_train and valid_mode_valid
# valid_mode_train + valid_mode_valid is the original training set
data, labels = random_split(data=train_dataset.data,
labels=train_dataset.labels,
n_classes=10,
n_samples_per_class=np.repeat(1000, 10).reshape(-1))
# make channels last and convert to np arrays
data['valid_mode_train'] = np.moveaxis(np.array(data['valid_mode_train']), 1, -1)
data['valid_mode_valid'] = np.moveaxis(np.array(data['valid_mode_valid']), 1, -1)
print("data['valid_mode_train'].shape", data['valid_mode_train'].shape)
# dataloader
valid_mode_train_dataset = CustomDataset(data=data['valid_mode_train'], labels=labels['valid_mode_train'], transform=train_transform)
valid_mode_valid_dataset = CustomDataset(data=data['valid_mode_valid'], labels=labels['valid_mode_valid'], transform=valid_transform)
valid_mode_train_loader = torch.utils.data.DataLoader(valid_mode_train_dataset, batch_size=args.batch_size, shuffle=True)
valid_mode_valid_loader = torch.utils.data.DataLoader(valid_mode_valid_dataset, batch_size=args.test_batch_size, shuffle=False)
return train_loader, test_loader, valid_mode_train_loader, valid_mode_valid_loader, train_transform
elif args.dataset == "smallnorb":
working_dir = args.working_dir
dataset_paths = {'train': os.path.join(working_dir, 'train'),
'test': os.path.join(working_dir, 'test')}
dataloaders, train_transf = smallnorb(args, dataset_paths)
train_loader = dataloaders['train_valid']
test_loader = dataloaders['test']
valid_mode_train_loader = dataloaders['train']
valid_mode_valid_loader = dataloaders['valid']
# print("len(train_loader.dataset)", len(train_loader.dataset))
# print("len(train_loader.dataset)", len(train_loader.dataset))
# print("len(test_loader.dataset)", len(test_loader.dataset))
# print("len(valid_mode_train_loader.dataset)", len(valid_mode_train_loader.dataset))
# print("len(valid_mode_valid_loader.dataset)", len(valid_mode_valid_loader.dataset))
return train_loader, test_loader, valid_mode_train_loader, valid_mode_valid_loader, train_transf
elif args.dataset == "smallNORB_48_azimuth" or args.dataset == "smallNORB_48_elevation":
working_dir = args.working_dir
dataset_paths = {'train': os.path.join(working_dir, 'train'),
'test_novel': os.path.join(working_dir, 'test_novel'),
'test_familiar': os.path.join(working_dir, 'test_familiar')}
dataloaders, train_transform = smallnorb_equivariance(args, dataset_paths)
train_loader = dataloaders['train']
test_novel_loader = dataloaders['test_novel']
test_familiar_loader = dataloaders['test_familiar']
print("len(train_loader.dataset)", len(train_loader.dataset))
print("len(test_novel_loader.dataset)", len(test_novel_loader.dataset))
print("len(test_familiar_loader.dataset)", len(test_familiar_loader.dataset))
return train_loader, test_novel_loader, test_familiar_loader, train_transform
else:
print("Unsupported dataset.")
quit()
return train_loader, test_loader
| 2.546875
| 3
|
main/admin.py
|
codeg8/HRMS
| 5
|
12775511
|
<reponame>codeg8/HRMS
from django.apps import apps
from django.conf import settings
from django.contrib import admin, messages
from django.contrib.admin import AdminSite
from django.contrib.admin.options import IS_POPUP_VAR
from django.contrib.admin.utils import unquote
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.forms import AdminPasswordChangeForm
from django.core.exceptions import PermissionDenied
from django.db import router, transaction, models
from django.forms.forms import BoundField
from django.http import Http404, HttpResponseRedirect
from django.template.response import TemplateResponse
from django.urls import path, reverse, NoReverseMatch
from django.utils.decorators import method_decorator
from django.utils.html import escape
from django.utils.text import capfirst
from django.utils.translation import gettext, gettext_lazy as _
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.debug import sensitive_post_parameters
from main import widgets as my_widgets
from django.forms import widgets
from django.contrib.admin import widgets as admin_widgets
from .helpers import HRMSActionForm
from .forms import EmployeeCreationForm, EmployeeChangeForm, AdminLoginForm
from .models import Designation, Employee, Department
csrf_protect_m = method_decorator(csrf_protect)
sensitive_post_parameters_m = method_decorator(sensitive_post_parameters())
HORIZONTAL, VERTICAL = 1, 2
def get_ul_class(radio_style):
return 'md-radio-list' if radio_style == VERTICAL else 'md-radio-inline'
# Function to add a control-label class to the labels
def add_control_label(f):
def control_label_tag(self, contents=None, attrs=None, label_suffix=None):
if attrs is None:
attrs = {}
attrs['class'] = 'control-label'
return f(self, contents, attrs, label_suffix)
return control_label_tag
# MonkeyPath the label_tag to add the control Label
BoundField.label_tag = add_control_label(BoundField.label_tag)
# Override the default AdminSite Class to customize
class HRMSAdminSite(AdminSite):
login_form = AdminLoginForm
index_title = _('Dashboard')
def _build_app_dict(self, request, label=None):
"""
Build the app dictionary. The optional `label` parameter filters models
of a specific app.
"""
app_dict = {}
if label:
models = {
m: m_a for m, m_a in self._registry.items()
if m._meta.app_label == label
}
else:
models = self._registry
for model, model_admin in models.items():
app_label = model._meta.app_label
has_module_perms = model_admin.has_module_permission(request)
if not has_module_perms:
continue
perms = model_admin.get_model_perms(request)
# Check whether user has any perm for this module.
# If so, add the module to the model_list.
if True not in perms.values():
continue
info = (app_label, model._meta.model_name)
model_dict = {
'name': capfirst(model._meta.verbose_name_plural),
'object_name': model._meta.object_name,
'icon': model._meta.icon,
'perms': perms,
}
if perms.get('change'):
try:
model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)
except NoReverseMatch:
pass
if perms.get('add'):
try:
model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)
except NoReverseMatch:
pass
if app_label in app_dict:
app_dict[app_label]['models'].append(model_dict)
else:
app_dict[app_label] = {
'name': apps.get_app_config(app_label).verbose_name,
'app_label': app_label,
'app_url': reverse(
'admin:app_list',
kwargs={'app_label': app_label},
current_app=self.name,
),
'has_module_perms': has_module_perms,
'models': [model_dict],
}
if label:
return app_dict.get(label)
return app_dict
def index(self, request, extra_context=None):
"""
Display the main admin index page, which lists all of the installed
apps that have been registered in this site.
"""
context = dict(
self.each_context(request),
title=self.index_title,
sub_heading='dashboard & statics'
)
context.update(extra_context or {})
request.current_app = self.name
return TemplateResponse(request, self.index_template or 'admin/index.html', context)
def each_context(self, request):
result = super(HRMSAdminSite, self).each_context(request)
result.update({'app_list': self.get_app_list(request)})
return result
def login(self, request, extra_context=None):
new_user = False
user = None
username = request.POST.get('username')
if username:
user = Employee.objects.filter(username=username).first()
if user:
new_user = user.last_login is None
r = super(HRMSAdminSite, self).login(request, extra_context)
if new_user and request.user == user and isinstance(r, HttpResponseRedirect):
return HttpResponseRedirect(reverse('admin:auth_user_password_change', args=[user.id]))
return r
def password_change(self, request, extra_context=None):
"""
Handle the "change password" task -- both form display and validation.
"""
from main.forms import AdminPasswordChangeForm
from django.contrib.auth.views import PasswordChangeView
url = reverse('admin:password_change_done', current_app=self.name)
defaults = {
'form_class': AdminPasswordChangeForm,
'success_url': url,
'extra_context': {**self.each_context(request), **(extra_context or {})},
}
if self.password_change_template is not None:
defaults['template_name'] = self.password_change_template
request.current_app = self.name
return PasswordChangeView.as_view(**defaults)(request)
admin_site = HRMSAdminSite(name='HRMS-admin')
class HrmsModelAdmin(admin.ModelAdmin):
formfield_overrides = {
models.CharField: {'widget': widgets.TextInput(attrs={'class': 'form-control'})},
models.IntegerField: {'widget': widgets.NumberInput(attrs={'class': 'form-control'})},
models.FloatField: {'widget': widgets.NumberInput(attrs={'class': 'form-control'})},
models.EmailField: {'widget': widgets.EmailInput(attrs={'class': 'form-control'})},
models.TextField: {'widget': widgets.Textarea(attrs={'class': 'form-control'})},
models.BooleanField: {'widget': widgets.CheckboxInput(attrs={'class': 'make-switch form-control'})},
models.ForeignKey: {'widget': widgets.Select(attrs={'class': 'form-control bs-select f-dd'})},
models.DateField: {'widget': my_widgets.DatePicker(attrs={'class': 'form-control date-picker'})},
# TODO: Create widgets for below Fields
# models.DateTimeField: {'widget': widgets.Textarea(attrs={'class': 'form-control'})},
# models.FilePathField: {},
# models.TimeField: {}
}
def formfield_for_choice_field(self, db_field, request, **kwargs):
"""
Get a form Field for a database Field that has declared choices.
"""
# If the field is named as a radio_field, use a RadioSelect
if db_field.name in self.radio_fields:
# Avoid stomping on custom widget/choices arguments.
if 'widget' not in kwargs:
kwargs['widget'] = my_widgets.HRMSRadioSelect(attrs={
'class': get_ul_class(self.radio_fields[db_field.name]),
})
if 'choices' not in kwargs:
kwargs['choices'] = db_field.get_choices(
include_blank=db_field.blank,
blank_choice=[('', _('None'))]
)
return db_field.formfield(**kwargs)
# def formfield_for_foreignkey(self, db_field, request, **kwargs):
# """
# Get a form Field for a ForeignKey.
# """
# db = kwargs.get('using')
# if 'widget' not in kwargs:
# if db_field.name in self.get_autocomplete_fields(request):
# kwargs['widget'] = AutocompleteSelect(db_field.remote_field, self.admin_site, using=db)
# pass
# elif db_field.name in self.raw_id_fields:
# kwargs['widget'] = admin_widgets.ForeignKeyRawIdWidget(db_field.remote_field, self.admin_site, using=db)
# elif db_field.name in self.radio_fields:
# kwargs['widget'] = admin_widgets.AdminRadioSelect(attrs={
# 'class': get_ul_class(self.radio_fields[db_field.name]),
# })
# kwargs['empty_label'] = _('None') if db_field.blank else None
#
# if 'queryset' not in kwargs:
# queryset = self.get_field_queryset(db, db_field, request)
# if queryset is not None:
# kwargs['queryset'] = queryset
#
# return db_field.formfield(**kwargs)
action_form = HRMSActionForm
def changelist_view(self, request, extra_context=None):
cl = self.get_changelist_instance(request)
extra_context = dict(
title=capfirst(cl.opts.verbose_name_plural),
icon=cl.opts.icon
)
return super(HrmsModelAdmin, self).changelist_view(request, extra_context)
@admin.register(Designation, site=admin_site)
class DesignationAdmin(HrmsModelAdmin):
search_fields = ('name',)
ordering = ('name',)
filter_horizontal = ('permissions',)
def formfield_for_manytomany(self, db_field, request=None, **kwargs):
if db_field.name == 'permissions':
qs = kwargs.get('queryset', db_field.remote_field.model.objects)
# Avoid a major performance hit resolving permission names which
# triggers a content_type load:
kwargs['queryset'] = qs.select_related('content_type')
return super().formfield_for_manytomany(db_field, request=request, **kwargs)
@admin.register(Employee, site=admin_site)
class EmployeeAdmin(HrmsModelAdmin):
add_form_template = 'admin/auth/user/add_form.html'
change_user_password_template = None
date_hierarchy = 'date_joined'
readonly_fields = ('username', 'password', 'email')
fieldsets = (
(_('Account info'), {'fields': (('username', 'email'),)}),
(_('Personal info'), {
'fields': (
('first_name', 'last_name'),
('gender', 'dob'),
'address',
)
}),
(_('Employment info'), {
'fields': (('date_joined', 'department'), ('groups', 'manager'))
}),
(_('Permissions'), {
'fields': (
('is_active', 'is_superuser'),
'user_permissions'
)
}),
)
add_fieldsets = (
(_('Account info'), {
'classes': ('wide',),
'fields': ('username', '<PASSWORD>', '<PASSWORD>'),
}),
)
form = EmployeeChangeForm
add_form = EmployeeCreationForm
change_password_form = AdminPasswordChangeForm
list_display = ('emp_id', 'full_name', 'username', 'email', 'department', 'groups',
'manager', 'is_active', 'is_superuser')
list_filter = ('department', 'groups', 'is_superuser', 'is_active', )
search_fields = ('username', 'first_name', 'last_name', 'email')
# list_editable = ('email', )
ordering = ('username',)
filter_horizontal = ('user_permissions',)
radio_fields = {"gender": admin.HORIZONTAL}
def has_change_permission(self, request, obj=None):
# Allow if user is trying to update his own details.
if obj is not None and request.user == obj:
self.readonly_fields = (
'username', 'password', 'email', 'date_joined', 'department', 'groups',
'manager', 'is_active', 'is_superuser', 'user_permissions'
)
return True
else:
return super(EmployeeAdmin, self).has_change_permission(request, obj)
def get_fieldsets(self, request, obj=None):
if not obj:
return self.add_fieldsets
return super().get_fieldsets(request, obj)
def get_form(self, request, obj=None, **kwargs):
"""
Use special form during user creation
"""
defaults = {}
if obj is None:
defaults['form'] = self.add_form
defaults.update(kwargs)
return super().get_form(request, obj, **defaults)
def get_urls(self):
return [
path(
'<id>/password/',
self.admin_site.admin_view(self.user_change_password),
name='auth_user_password_change',
),
] + super().get_urls()
def lookup_allowed(self, lookup, value):
# See #20078: we don't want to allow any lookups involving passwords.
if lookup.startswith('password'):
return False
return super().lookup_allowed(lookup, value)
@sensitive_post_parameters_m
@csrf_protect_m
def add_view(self, request, form_url='', extra_context=None):
with transaction.atomic(using=router.db_for_write(self.model)):
return self._add_view(request, form_url, extra_context)
def _add_view(self, request, form_url='', extra_context=None):
# It's an error for a user to have add permission but NOT change
# permission for users. If we allowed such users to add users, they
# could create superusers, which would mean they would essentially have
# the permission to change users. To avoid the problem entirely, we
# disallow users from adding users if they don't have change
# permission.
if not self.has_change_permission(request):
if self.has_add_permission(request) and settings.DEBUG:
# Raise Http404 in debug mode so that the user gets a helpful
# error message.
raise Http404(
'Your user does not have the "Change user" permission. In '
'order to add users, It is mandatory that your user '
'account have both the "Add user" and "Change user" '
'permissions set. Please contact admin.')
raise PermissionDenied
if extra_context is None:
extra_context = {}
username_field = self.model._meta.get_field(self.model.USERNAME_FIELD)
defaults = {
'auto_populated_fields': (),
'username_help_text': username_field.help_text,
}
extra_context.update(defaults)
return super().add_view(request, form_url, extra_context)
@sensitive_post_parameters_m
def user_change_password(self, request, id, form_url=''):
user = self.get_object(request, unquote(id))
if not self.has_change_permission(request, user):
raise PermissionDenied
if user is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {
'name': self.model._meta.verbose_name,
'key': escape(id),
})
if request.method == 'POST':
form = self.change_password_form(user, request.POST)
if form.is_valid():
form.save()
change_message = self.construct_change_message(request, form, None)
self.log_change(request, user, change_message)
msg = gettext('Password changed successfully.')
messages.success(request, msg)
update_session_auth_hash(request, form.user)
return HttpResponseRedirect(
reverse(
'%s:%s_%s_change' % (
self.admin_site.name,
user._meta.app_label,
user._meta.model_name,
),
args=(user.pk,),
)
)
else:
form = self.change_password_form(user)
fieldsets = [(None, {'fields': list(form.base_fields)})]
adminForm = admin.helpers.AdminForm(form, fieldsets, {})
context = {
'title': _('Change password: %s') % escape(user.get_username()),
'adminForm': adminForm,
'form_url': form_url,
'form': form,
'is_popup': (IS_POPUP_VAR in request.POST or
IS_POPUP_VAR in request.GET),
'add': True,
'change': False,
'has_delete_permission': False,
'has_change_permission': True,
'has_absolute_url': False,
'opts': self.model._meta,
'original': user,
'save_as': False,
'show_save': True,
}
context.update(self.admin_site.each_context(request))
request.current_app = self.admin_site.name
return TemplateResponse(
request,
self.change_user_password_template or
'admin/auth/user/change_password.html',
context,
)
def response_add(self, request, obj, post_url_continue=None):
"""
Determine the HttpResponse for the add_view stage. It mostly defers to
its superclass implementation but is customized because the User model
has a slightly different workflow.
"""
# We should allow further modification of the user just added i.e. the
# 'Save' button should behave like the 'Save and continue editing'
# button except in two scenarios:
# * The user has pressed the 'Save and add another' button
# * We are adding a user in a popup
if '_addanother' not in request.POST and IS_POPUP_VAR not in request.POST:
request.POST = request.POST.copy()
request.POST['_continue'] = 1
return super().response_add(request, obj, post_url_continue)
@admin.register(Department, site=admin_site)
class DepartmentAdmin(HrmsModelAdmin):
pass
| 1.742188
| 2
|
Competitve/Python/New Students.py
|
EdwaRen/Competitve-Programming
| 1
|
12775512
|
<filename>Competitve/Python/New Students.py<gh_stars>1-10
n = int(input())
sumI = 0
for i in range(n):
sumI = sumI + int(input())
o = int(input())
for i in range(o):
sumI = sumI + int(input())
print('{:.3f}'.format((round(sumI*1000/(n+i+1)))/1000))
| 3.25
| 3
|
training.py
|
ali77sina/Adaptive_Filters
| 0
|
12775513
|
from Creating_Synthetic_Dataset import x_train, y_train
import tensorflow as tf
from scipy.fft import fft, fftfreq
#global variables
l = 50000
low_lim = 100
high_lim = 150
fs = 512
sep_ind = int(0.8*l)
length_of_input = 60
# Size of FFT analysis
N = 60
def fir_freqz(b):
# Get the frequency response
X = np.fft.fft(b, N)
# Take the magnitude
Xm = np.abs(X)
# Convert the magnitude to decibel scale
Xdb = 20*np.log10(Xm/Xm.max())
# Frequency vector
f = np.arange(N)*fs/N
return Xdb, f
def plot(coeffs,high_lim,low_lim):
# FIR filter coefficients
#b = np.array(list(reversed(coeffs)))
b = np.array(coeffs)
# Window to be used
win = np.kaiser(len(b), 15)
# Windowed filter coefficients
b_win = win*b
# Get frequency response of filter
Xdb, f = fir_freqz(b)
# ... and it mirrored version
Xdb_win, f = fir_freqz(b_win)
# Plot the impulse response
plt.subplot(211)
plt.stem(b, linefmt='b-', markerfmt='bo', basefmt='k-', label='Orig. coeff.')
plt.grid(True)
plt.title('Impulse reponse')
plt.xlabel('Sample')
plt.ylabel('Amplitude')
# Plot the frequency response
plt.subplot(212)
plt.plot(f, Xdb, 'b', label='Orig. coeff.')
plt.grid(True)
plt.title('Frequency reponse for range {} - {} Hz'.format(low_lim,high_lim))
plt.xlabel('Frequency [Hz]')
plt.ylabel('Amplitude [dB]')
plt.xlim((0, fs/2)) # Set the frequency limit - being lazy
plt.tight_layout()
plt.show()
#creating and training the CNN
model = tf.keras.Sequential()
model.add(tf.keras.layers.Input(shape=(60,1)))
model.add(tf.keras.layers.Conv1D(filters=1,kernel_size=6, use_bias=False))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(16,activation='relu'))
model.add(tf.keras.layers.Dense(1,activation='sigmoid'))
model.compile(optimizer = 'adam', loss = 'binary_crossentropy', metrics = ['accuracy'])
model.fit(x_train, y_train, batch_size = 100, epochs=5)
#getting the convulting filters weights and plotting the frequency and step response
coeffs = []
for j in model.trainable_variables[0]:
coeffs.append(float(j[0]))
plot(coeffs,high_lim,low_lim)
| 2.84375
| 3
|
plugins/graphite/icon_graphite/connection/connection.py
|
lukaszlaszuk/insightconnect-plugins
| 46
|
12775514
|
import komand
from .schema import ConnectionSchema
# Custom imports below
import requests
class Connection(komand.Connection):
def __init__(self):
super(self.__class__, self).__init__(input=ConnectionSchema())
self.request = None
def connect(self, params):
url = params.get("graphite_url")
port = params.get("graphite_port")
ssl_verify = params.get("ssl_verify")
def _request(method, urlparts, **kwargs):
try:
self.logger.info("Connection: Connecting to API")
response = requests.request(
method=method,
url="%s:%s/%s" % (url, port, "/".join(map(str, urlparts))),
verify=ssl_verify,
**kwargs,
)
except requests.exceptions.RequestException as e:
self.logger.error("Connection: Failed to connect to API - %s" % e)
raise e
else:
return response
self.request = _request
def test(self):
"""
The Graphite API offers an index endpoint which returns successfully if
the API is functioning and the request is valid.
"""
try:
response = self.request("GET", ("metrics", "index.json"))
except requests.exceptions.HTTPError:
pass # will be handled below
if response.status_code == 200:
msg = "Graphite API: API connection was successful"
self.logger.info(msg)
return {"success": True, "message": msg}
else:
self.logger.error("Graphite API: API connection failed")
response.raise_for_status()
| 2.59375
| 3
|
desafios/d018.py
|
moises-moreira/PYTHON
| 0
|
12775515
|
nome = str(input('Digite seu nome: ')).strip()
print('Seu nome em letras maiúsculas: {}'.format(nome.upper()))
print('Seu nome em letras minúsculas: {}'.format(nome.lower()))
print('Seu nome sem espaços: {}'.format(len(nome.replace(' ', ''))))
print('Seu primeiro nome tem: {} caracteres'.format(len(nome.split()[0])))
| 4.03125
| 4
|
ChessRender/RenderFsmCommon/RenderFsmStates/registration_render_state.py
|
PavelLebed20/chess_classic
| 1
|
12775516
|
from ChessRender.RenderFsmCommon.button_fsm import ButtonFsm
from ChessRender.RenderFsmCommon.screen_states import ScreenState
from ChessRender.RenderFsmCommon.screen_text_fsm import ScreenTextFsm
from ChessRender.RenderFsmCommon.text_field_fsm import TextFieldFsm
class FsmStateRegistration(ScreenState):
def __init__(self, process_login):
ScreenState.__init__(self)
self.screen_atributes.buttons["but:Confirm"] = ButtonFsm("Confirm", (0, 0, -0.5))
self.screen_atributes.buttons["but:Back"] = ButtonFsm("Back", (0, 0, -0.8))
self.screen_atributes.text_fields["text_field:Login"] = TextFieldFsm("text_field_login", (-0.5, 0, 0.5), False)
self.screen_atributes.text_fields["text_field:Email"] = TextFieldFsm("text_field_email", (-0.5, 0, 0.3), False)
self.screen_atributes.text_fields["text_field:Password"] = TextFieldFsm("text_field_password", (-0.5, 0, 0.1), True)
self.screen_atributes.screen_texts["scrtext:Login"] = ScreenTextFsm("Login: ", (-0.7, 0.5))
self.screen_atributes.screen_texts["scrtext:Email"] = ScreenTextFsm("Email:", (-0.7, 0.3))
self.screen_atributes.screen_texts["scrtext:Password"] = ScreenTextFsm("Password:", (-0.7, 0.1))
self.initialize_button_links()
self.login_field = None
self.email = None
self.password_field = None
self.process_login = process_login
def initialize_button_links(self):
self.screen_atributes.buttons["but:Confirm"].add_command(self.confirm_command)
self.screen_atributes.buttons["but:Back"].add_link("fsm:Multiplayer")
def confirm_command(self):
process_login_arg = {"Login": self.gui_text_fields["text_field_login"].get(),
"Email": self.gui_text_fields["text_field_email"].get(),
"Password": self.gui_text_fields["text_field_password"].get()}
self.process_login(process_login_arg)
| 2.265625
| 2
|
load_QTs.py
|
tgandor/urban_oculus
| 0
|
12775517
|
import argparse
import os
from tqdm import tqdm
from couch import db
from jpeg import get_QTs, identify_quality
parser = argparse.ArgumentParser()
parser.add_argument('filenames', nargs='+')
parser.add_argument('--type', default='JpgImg')
parser.add_argument('--dataset')
args = parser.parse_args()
for filename in tqdm(args.filenames):
data = {
"type": args.type,
"name": os.path.basename(filename),
"quantization": get_QTs(filename),
"quality": identify_quality(filename),
}
if args.dataset:
data["dataset"] = args.dataset
db.save(data)
| 2.796875
| 3
|
Catch.py
|
haithamaouati/Catch
| 2
|
12775518
|
#!/usr/bin/env python3
# Author: @haithamaouati
# Version:1.0
import argparse
import colorama
import os
import requests
import time
from colorama import Fore, Back, Style
colorama.init()
os.system('cls' if os.name == 'nt' else 'clear')
print('''\
_._ _,-'""`-._
(,-.`._,'( |\`-/|
`-.-' \ )-`( , o o)
`- \`_`"'-
''')
print(' Author: ' + Fore.CYAN + '@haithamaouati' + Fore.WHITE + ' Version: ' + Fore.YELLOW + '1.0\n' + Fore.WHITE)
print(' A simple admin panel finder tool\n')
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--url', metavar='<url>', type=str, help='URL website (e.g. http://127.0.0.1/)')
parser.add_argument('-w', '--wordlist', metavar='<wordlist>', type=str, help='Wordlist file (e.g. wordlist.txt)')
args = parser.parse_args()
if args.url == None or args.wordlist == None:
parser.print_help()
exit();
url = args.url
wordlist = args.wordlist
with open(wordlist,'r') as list:
for i in list:
time.sleep(1)
x = i.rstrip('\n')
check = requests.get(url + x)
try:
if check.status_code == 200:
with open("result.txt",'a') as result:
result.write(url + x +"\n")
print(Fore.GREEN + '[+] ' + Fore.WHITE + url + x + Fore.GREEN + ' [200]')
print(Fore.GREEN + '[*] ' + Fore.WHITE + 'Saved to: ' + Fore.YELLOW + 'result.txt')
result.close()
else:
print(Fore.RED + '[-] ' + Fore.WHITE + url + x + Fore.RED + ' [404]')
except ValueError:
print(Fore.RED + '[!] ' + Fore.WHITE + 'Something wrong')
| 2.8125
| 3
|
homework/第 5 课/Yulia python第五课作业一、二/�������ҵ��.py
|
xrandx/-Dating-with-python-this-winter
| 3
|
12775519
|
<gh_stars>1-10
#-*- codeing = utf-8 -*-
#@Time : 2021-01-14 19:41
#@Author : 苏苏
#@File : 第五课作业二.py
#@Software : PyCharm
#用递归实现阶乘函数factorial(n),对于任意的整数n都能返回其对应的阶乘。
def factorial(n):
if n == 0:
return 1
else:
return n*factorial(n-1)
| 2.796875
| 3
|
unsigned_bot/cogs/collection/cog.py
|
marsmanXmachina/unsigned-bot
| 0
|
12775520
|
"""
Module for collection cog
"""
import math
import discord
from discord.ext import commands
from discord_slash import cog_ext, SlashContext
from discord_slash.utils.manage_commands import create_choice, create_option
from unsigned_bot import IMAGE_PATH
from unsigned_bot.constants import MAX_AMOUNT
from unsigned_bot.config import GUILD_IDS
from unsigned_bot.log import logger
from unsigned_bot.emojis import *
from unsigned_bot.draw import (
gen_grid,
delete_image_files
)
from unsigned_bot.matching import get_similar_unsigs
from unsigned_bot.parsing import get_numbers_from_string
from unsigned_bot.embedding import add_disclaimer
from unsigned_bot.cogs.checks import valid_channel, valid_unsig
from .embeds import embed_siblings, embed_collection_grid
class CollectionCog(commands.Cog, name="Collection"):
"""commands for your unsig collection"""
COG_EMOJI = EMOJI_FRAME
def __init__(self, bot: commands.Bot):
self.bot = bot
@cog_ext.cog_slash(
name="siblings",
description="show siblings of your unsig",
guild_ids=GUILD_IDS,
options=[
create_option(
name="number",
description="number of your unsig",
required=True,
option_type=3,
)
]
)
async def _siblings(self, ctx: SlashContext, number: str):
"show siblings of your unsig"
if not await valid_channel(ctx):
return
if not await valid_unsig(ctx, number):
return
collection_numbers = range(0,MAX_AMOUNT)
similar_unsigs = get_similar_unsigs(number, collection_numbers, structural=False)
siblings_numbers = list(set().union(*similar_unsigs.values()))
selected_numbers = [int(number), *siblings_numbers]
embed = embed_siblings(number, siblings_numbers, selected_numbers, self.bot.offers, cols=2)
if self.bot.offers and siblings_numbers:
add_disclaimer(embed, self.bot.offers_updated)
if not siblings_numbers:
await ctx.send(embed=embed)
return
try:
image_path = await gen_grid(selected_numbers, cols=2)
image_file = discord.File(image_path, filename="siblings.png")
embed.set_image(url="attachment://siblings.png")
delete_image_files(IMAGE_PATH)
except:
await ctx.send(content=f"I can't generate the siblings of your unsig.")
return
else:
await ctx.send(file=image_file, embed=embed)
@cog_ext.cog_slash(
name="show",
description="show collection of your unsigs",
guild_ids=GUILD_IDS,
options=[
create_option(
name="numbers",
description="Numbers of your unsigs",
required=True,
option_type=3,
),
create_option(
name="columns",
description="no. of unsigs side by side",
required=False,
option_type=3,
),
]
)
async def _show(self, ctx: SlashContext, numbers: str, columns: str = None):
"""show collection of your unsigs"""
if not await valid_channel(ctx):
return
unsig_numbers = get_numbers_from_string(numbers)
if not unsig_numbers:
await ctx.send(content=f"Please enter numbers of your unsigs")
return
numbers_cleaned = list()
for number in unsig_numbers:
try:
number = str(int(number))
except:
await ctx.send(content=f"unsig{number} does not exist!\nPlease enter number between 0 and {MAX_AMOUNT-1}.")
return
else:
numbers_cleaned.append(number)
LIMIT_DISPLAY = 20
if len(numbers_cleaned) > LIMIT_DISPLAY:
numbers_cleaned = numbers_cleaned[:LIMIT_DISPLAY]
if not columns:
columns = math.ceil(math.sqrt(len(numbers_cleaned)))
else:
try:
columns = int(columns)
except:
await ctx.send(content=f"Please enter the number of unsigs you want to show")
return
embed = embed_collection_grid(numbers_cleaned)
try:
image_path = await gen_grid(numbers_cleaned, columns)
image_file = discord.File(image_path, filename="collection.png")
embed.set_image(url="attachment://collection.png")
delete_image_files(IMAGE_PATH)
except:
await ctx.send(content=f"I can't generate the collection of your unsigs.")
return
else:
await ctx.send(file=image_file, embed=embed)
def setup(bot: commands.Bot):
bot.add_cog(CollectionCog(bot))
logger.debug(f"{CollectionCog.__name__} loaded")
| 2.4375
| 2
|
ts/simulation_change.py
|
firefly-uics/tensor_flow_demo
| 0
|
12775521
|
import datetime
import logging
import tushare as ts
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow import keras
import tensorflow as tf
from ts.build_model import BuildModel
from ts.db_utils import get_daily_by_trade_date
from ts.simulation_history import SimulationHistory
from ts.st_history_data import x_train_col_index
class Change(SimulationHistory):
model_cache = {}
t1_predictions = None
t0_predictions = 0
t0_index = ''
def is_sell(self, index, row):
logging.debug('index: %s, date: %s', index, row['date'])
today = datetime.datetime.strptime(row['date'], '%Y-%m-%d %H:%M:%S')
df = get_daily_by_trade_date(self.get_code(), today.strftime('%Y%m%d'))
change_predictions, true_predictions = self.predictions(df, ['open', 'high', 'low', 'close'], 'pct_chg',
self.get_code() + '_pct_chg_model.h5')
logging.debug('change_predictions:%s, true_predictions:%s', change_predictions, true_predictions)
if len(df) == 0:
return False
if self.t0_predictions == None:
return False
if self.t0_predictions <= 0:
return False
logging.debug('row[ma5] * (1+self.t0_predictions/100) :%s, ma5: %s, price:%s', row['ma5'] * (1+self.t0_predictions/100), row['ma5'], row['close'])
return row['close'] > row['ma5'] * (1+self.t0_predictions/100)
def is_buy(self, index, row):
logging.debug('index: %s, date: %s', index, row['date'])
today = datetime.datetime.strptime(row['date'], '%Y-%m-%d %H:%M:%S')
df = get_daily_by_trade_date(self.get_code(), today.strftime('%Y%m%d'))
change_predictions, true_predictions = self.predictions(df, ['open', 'high', 'low', 'close'], 'pct_chg',
self.get_code() + '_pct_chg_model.h5')
self.t0_predictions = change_predictions
logging.debug('change_predictions:%s, true_predictions:%s', change_predictions, true_predictions)
if self.t0_index != index:
self.t1_predictions = self.t0_predictions
self.t0_index = index
if len(df) == 0:
return False
if self.t0_predictions <= 0:
return False
logging.debug('row[ma5] * (1-change_predictions/100) :%s, ma5: %s, price:%s', row['ma5'] * (1-self.t0_predictions/100), row['ma5'], row['close'])
return row['close'] < row['ma5'] * (1-self.t0_predictions/100)
def predictions(self, df, column_names, label_name, module_name):
columns = df.columns.values.tolist()
stock_data = np.array(df)
x_train_col = x_train_col_index(columns, column_names)
y_train_col = x_train_col_index(columns, [label_name])[0]
x = np.array(stock_data[:, x_train_col])
y = np.array(stock_data[:, y_train_col])
if len(x) == 0:
return 0, 0
model = self.model_cache.get(module_name)
if model == None:
model = keras.models.load_model(module_name)
optimizer = tf.train.RMSPropOptimizer(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae'])
self.model_cache[module_name] = model
predictions = model.predict(x).flatten()[0]/10 + 1.5
return predictions, y[0]
| 2.359375
| 2
|
transcrypt/development/automated_tests/relimport/rimport.py
|
kochelmonster/Transcrypt
| 0
|
12775522
|
import tpackage
def run(test):
test.check(type(tpackage.peer2.func).__name__)
test.check(type(tpackage.func1).__name__)
| 1.476563
| 1
|
idiokit/heap.py
|
AbuseSA/idiokit
| 18
|
12775523
|
class HeapError(Exception):
pass
class Heap(object):
def __init__(self, iterable=()):
self._heap = []
for value in iterable:
self.push(value)
def _get(self, node):
if not self._heap:
raise HeapError("empty heap")
if node is None:
return self._heap[0]
if len(self._heap) <= node._index or self._heap[node._index] is not node:
raise HeapError("node not in the heap")
return node
def push(self, value):
node = _Node(len(self._heap), value)
self._heap.append(node)
_up(self._heap, node)
return node
def peek(self, node=None):
return self._get(node)._value
def pop(self, node=None):
node = self._get(node)
last = self._heap.pop()
if last is not node:
self._heap[node._index] = last
last._index = node._index
_down(self._heap, last)
_up(self._heap, last)
return node._value
def head(self):
return self._get(None)
def __len__(self):
return len(self._heap)
class _Node(object):
__slots__ = "_index", "_value"
def __init__(self, index, value):
self._index = index
self._value = value
def _swap(array, left, right):
array[left._index] = right
array[right._index] = left
left._index, right._index = right._index, left._index
def _up(array, node):
while node._index > 0:
parent = array[(node._index - 1) // 2]
if parent._value <= node._value:
break
_swap(array, node, parent)
def _down(array, node):
length = len(array)
while True:
smallest = node
left_index = 2 * node._index + 1
if left_index < length:
left = array[left_index]
if left._value < smallest._value:
smallest = left
right_index = left_index + 1
if right_index < length:
right = array[right_index]
if right._value < smallest._value:
smallest = right
if node is smallest:
break
_swap(array, node, smallest)
| 3.8125
| 4
|
agent.py
|
grayvalley/DQNAMM
| 6
|
12775524
|
import numpy as np
from tensorflow.keras.models import load_model
from buffer import (
ReplayBuffer,
build
)
class DQNAgent:
def __init__(self, alpha, gamma, n_actions, epsilon,
batch_size, input_dims, fc1_dims, fc2_dims, epsilon_dec=0.996,
epsilon_end=0.01, mem_size=1000000,
fname='dqn_model.h5'):
self.action_space = [i for i in range(n_actions)]
self.n_actions = n_actions
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_dec = epsilon_dec
self.epsilon_min = epsilon_end
self.batch_size = batch_size
self.model_file = fname
self.memory = ReplayBuffer(mem_size, input_dims, n_actions, discrete=True)
self.q_eval = build(alpha, n_actions, input_dims, fc1_dims, fc2_dims)
def remember(self, state, action, reward, new_state, done):
self.memory.store_transition(state, action, reward, new_state, done)
def choose_action(self, state):
"""
Choose action given state of the game.
"""
state = state[np.newaxis, :]
# epsilon greedy
rand = np.random.random()
if rand < self.epsilon:
action = np.random.choice(self.action_space)
else:
actions = self.q_eval.predict(state)
action = np.argmax(actions)
return action
def learn(self):
if self.memory.mem_cntr < self.batch_size:
return
state, action, reward, new_state, done = self.memory.sample_buffer(self.batch_size)
action_values = np.array(self.action_space, dtype=np.int8)
action_indices = np.dot(action, action_values)
q_eval = self.q_eval.predict(state)
q_next = self.q_eval.predict(new_state)
q_target = q_eval.copy()
batch_index = np.arange(self.batch_size, dtype=np.int32)
q_target[batch_index, action_indices] = reward + self.gamma * np.max(q_next, axis=1) * done
_ = self.q_eval.fit(state, q_target, verbose=0)
self.epsilon = self.epsilon*self.epsilon_dec if self.epsilon > self.epsilon_min else self.epsilon_min
def save_model(self):
self.q_eval.save(self.model_file)
def load_model(self):
self.q_eval = load_model(self.model_file)
| 2.734375
| 3
|
database.py
|
sstive/DBHelper
| 0
|
12775525
|
import pymysql
import json
from os import environ as env
# TODO: Write scripts for managing
class Database:
def __init__(self, database, **input_options):
"""
:param database: File/string/dict/json with database structure
:param input_options: Options and login data for database
:key check: Check tables (create missing), should be boolean
:key add_cols: Add new columns added to database info file, should be boolean
:key update_cols: Update data types of columns, should be boolean
:key remove_cols: Remove columns that not exists in database info file, should be boolean
:key drop: List of tables to drop or string '*' (all), should be list or string
:key use_warning: Require confirmation in console for dropping, should be boolean
:key host: Hostname, should be string
:key port: Port, should be int
:key user: Username, should be string
:key password: Password, should be string
:key database: Name of database to work with, should be string
:key db: Alias for database, should be string
"""
# Global dicts #
# states (checked, etc)
self._states = {
'checked': False,
'dropped': False,
'cols_updated': False,
'cols_added': False,
'cols_removed': False
}
# connection data (login, password, etc)
self._connection_data = {
'host': None,
'port': 3306,
'database': None,
'db': None,
'user': None,
'password': <PASSWORD>
}
# options (update_columns, drop_table, etc)
self._options = {
'check': True, # Checking database on missing tables
'add_cols': True, # Adding new columns
'update_cols': False, # Updating data tpe in columns
'remove_cols': False, # Removing unknown columns
'drop': [], # Dropping tables
'use_warning': True # Use warning before dropping
}
# custom functions
self._custom_functions = {}
# --- #
# Getting information about database #
if type(database) is dict:
self._data = database
elif type(database) is str:
# Json string
if database[0] == '{':
self._data = json.loads(database)
# File path
else:
f = open(database)
self._data = json.load(f)
f.close()
# File reader
else:
self._data = json.loads(database.read())
# --- #
# Updating options
# Custom functions
if 'functions' in input_options.keys():
self._custom_functions = input_options['functions']
# Other options
for opt in self._options.keys():
# Checking environment
env_opt = 'DBH_' + opt.upper()
if env_opt in env:
if opt == 'drop':
# Getting list of tables to drop from environ
self._options[opt] = env[env_opt].replace(' ', '').split(',')
elif env[env_opt].lower() in ['true', 'yes', 'y', '1']:
self._options[opt] = True # Str to boolean
elif env[env_opt].lower() in ['false', 'no', 'n', '0']:
self._options[opt] = False # Str to boolean
# Checking kwargs
elif opt in input_options:
self._options[opt] = input_options[opt] # Getting option from kwargs
# Getting connection info
for field in self._connection_data.keys():
# Checking environment
if 'DBH_' + field.upper() in env:
if field == 'port':
self._connection_data[field] = int(env['DBH_' + field.upper()])
else:
self._connection_data[field] = env['DBH_' + field.upper()]
# Checking user options
elif field in input_options:
self._connection_data[field] = input_options[field]
# Checking config file
elif 'connection' in self._data.keys() and field in self._data['connection'].keys():
self._connection_data[field] = self._data['connection'][field]
# Starting first connection
self._con = None
if 'start_con' in input_options and not input_options['start_con']:
pass
else:
self.begin()
def __del__(self):
self.end()
# Connection #
def begin(self):
if self._con is not None and self._con.open:
return
self._con = pymysql.connect(**self._connection_data)
self._drop_tables()
self._check()
def end(self):
if self._con.open:
self._con.close()
# ----- #
# Utils #
def _get_db_tables(self):
with self._con.cursor() as cur:
cur.execute("SHOW TABLES")
tables = []
for el in cur.fetchall():
tables.append(el[0])
return tables
def _gen_table_creating_cmd(self, table):
columns = self._data['tables'][table]
command = f"CREATE TABLE {table} ("
for col, params in columns.items():
if col == '_KEY':
command += f'PRIMARY KEY({params})'
elif col == '_ADDITION':
command += params
elif col == 'FOREIGN KEY':
command += col + f"({params[0]}) REFERENCES {params[1]}"
else:
command += col + ' '
if params == 'KEY':
command += "INT AUTO_INCREMENT NOT NULL PRIMARY KEY"
elif params == 'FOREIGN KEY':
command += "INT"
else:
command += ' '.join(params)
command += ', '
return command[:-2] + ');'
@staticmethod
def _compare_data_types(local, db):
if local in ['KEY', 'FOREIGN KEY']:
return True
local = local[0]
local = local.upper()
db = db.upper()
if '(' not in local:
if local == db.split('(', 1)[0]:
return True
return False
if local == db:
return True
return False
@staticmethod
def _prepare_vals(values: dict, **options):
new_vals = {}
for key, val in values.items():
if type(val) == str and val != '*':
val = val.replace('\'', '\"')
new_vals[key] = f"\'{val}\'"
elif val is None:
new_vals[key] = "NULL"
elif 'all_str' in options.keys() and options['all_str']:
new_vals[key] = str(val)
return new_vals
# ----- #
# Options #
def _drop_tables(self):
# If already dropped
if self._states['dropped']:
return
# If drop all
if len(self._options['drop']) > 0 and self._options['drop'][0] == '*':
self._options['drop'] = self._get_db_tables()
# If table list empty
if not self._options['drop']:
return
# Confirming
if self._options['use_warning']:
print(f"Are you sure want to drop {', '.join(self._options['drop'])}? (y/n)")
if input() != 'y':
return
# Executing SQL
self._con.cursor().execute("DROP TABLE " + ', '.join(self._options['drop']))
self._con.commit()
# Changing states
self._states['checked'] = False
self._states['dropped'] = True
# Recreating tables
self._check()
# TODO: Fix adding/changing foreign key, _key, key and _addition
def _check(self):
if self._states['checked'] or not self._options['check']:
return
with self._con.cursor() as cur:
cur.execute("SHOW TABLES")
tables_in_db = self._get_db_tables()
# Checking tables
for table in tables_in_db:
# Skipping unknown tables
if table not in self._data['tables'].keys():
continue
# Getting list of columns in table
cur.execute(f"DESCRIBE {table}")
db_cols = {}
for col_in_db in cur.fetchall():
db_cols[col_in_db[0]] = col_in_db[1:]
# Removing unknown columns
for col in db_cols.keys():
if col not in self._data['tables'][table].keys():
try:
cur.execute(f"ALTER TABLE {table} DROP COLUMN {col};")
except pymysql.err.OperationalError as e:
if e.args[0] != 1828:
raise e
key = e.args[1].split()[-1][1:-1]
cur.execute(f"ALTER TABLE {table} DROP CONSTRAINT {key}")
cur.execute(f"ALTER TABLE {table} DROP COLUMN {col};")
# Checking columns
for col in self._data['tables'][table].keys():
# Skipping keywords
if col.upper() in ['_ADDITION', '_KEY', 'FOREIGN KEY']:
continue
# TODO: Refactor
if self._data['tables'][table][col] in ['FOREIGN KEY', 'KEY']:
continue
# Adding missing columns
if self._options['add_cols'] and col not in db_cols.keys():
cur.execute(f"ALTER TABLE {table} ADD {col} {' '.join(self._data['tables'][table][col])};")
continue
# Changing data type
if self._options['update_cols'] and not self._compare_data_types(self._data['tables'][table][col],
db_cols[col][0]):
cur.execute(
f"ALTER TABLE {table} MODIFY COLUMN {col} {' '.join(self._data['tables'][table][col])};")
# Checking tables
for table in self._data['tables']:
if table not in tables_in_db:
cur.execute(self._gen_table_creating_cmd(table))
# Committing changes
self._con.commit()
# ----- #
# Methods #
# Custom functions
def run(self, function, **kwargs):
return self._custom_functions[function](self, **kwargs)
def insert(self, table, **values):
# Options
return_request = ('_request' in values.keys() and values['_request'])
return_id = ('_return_id' in values.keys() and values['_return_id'])
if return_request:
del values['_request']
if return_id:
del values['_return_id']
values = self._prepare_vals(values, all_str=True)
request = f"INSERT INTO {table} ({', '.join(values.keys())}) VALUES ({', '.join(values.values())});"
if return_request:
return request
self.execute(request)
if return_id:
return self.execute("SELECT LAST_INSERT_ID();")[0][0]
def insert_or_update(self, table, **values):
# Options
return_request = ('_request' in values.keys() and values['_request'])
return_id = ('_return_id' in values.keys() and values['_return_id'])
if return_request:
del values['_request']
if return_id:
del values['_return_id']
values = self._prepare_vals(values, all_str=True)
request = f"INSERT INTO {table} ({', '.join(values.keys())}) VALUES ({', '.join(values.values())}) " \
f"ON DUPLICATE KEY UPDATE {', '.join(list(map(lambda pair: '='.join(pair), values.items())))};"
if return_request:
return request
self.execute(request)
if return_id:
return self.execute("SELECT LAST_INSERT_ID();")[0][0]
def select(self, table, columns, addition="", **params):
request = f"SELECT {', '.join(columns)} FROM {table} {addition}"
if addition == "":
request = request[:-1] + ';'
# Returning request
if '_request' in params.keys() and params['_request']:
return request
return self.execute(request)
def update(self, table, condition="", **values):
# Returning request
return_request = ('_request' in values.keys() and values['_request'])
if return_request:
del values['_request']
values = self._prepare_vals(values, all_str=True)
request = f"UPDATE {table} SET {', '.join(list(map(lambda pair: '='.join(pair), values.items())))}"
# Condition
c = condition.split(' ')
if c[0].upper() == 'WHERE':
condition = ' '.join(c[1:])
if len(condition) > 0:
request = request + ' WHERE ' + condition
request += ';'
if return_request:
return request
self.execute(request)
def delete(self, table, condition, **params):
request = f"DELETE FROM {table} WHERE {condition};"
if condition == '*':
request = f"DELETE * FROM {table};"
# Returning request
if '_request' in params.keys() and params['_request']:
return request
self.execute(request)
def execute(self, sql):
self.begin()
with self._con.cursor() as cur:
cur.execute(sql)
self._con.commit()
resp = cur.fetchall()
return resp
# ----- #
| 3.09375
| 3
|
emannotationschemas/schemas/nucleus_detection.py
|
seung-lab/EMAnnotationSchemas
| 0
|
12775526
|
<gh_stars>0
import marshmallow as mm
from emannotationschemas.schemas.base import (
FlatSegmentationReferenceSinglePoint,
SpatialPoint,
)
class NucleusDetection(FlatSegmentationReferenceSinglePoint):
volume = mm.fields.Float(description="the volume of the nucleus detected in um^3")
bb_start = mm.fields.Nested(
SpatialPoint,
required=False,
description="low corner of the bounding box",
)
bb_end = mm.fields.Nested(
SpatialPoint,
required=False,
description="high corner of the bounding box",
)
| 2.1875
| 2
|
srmcollidermetabo.py
|
premyshan/DIAColliderMetabo
| 1
|
12775527
|
<filename>srmcollidermetabo.py
#SRMColliderMetabo
"""
Evaluating complex backgrounds that may cause ambiguities in the measurement
of metabolites. This tool first filters a list of identified metabolites to
remove steroisomers (using the Inchikey) and the given experimental conditions.
This filtered list is then used to profile different methods for unique transitions
as follows using MS1 and MS2 windows with the mentioned filters to identify the
number of unique ion signatures (UIS) per molecular id (mol_id).
MS1/MS2
MS1 - 0.7 Da / - ; 25ppm / -
MRM - 0.7 Da / 0.7 Da
SWATH - 25 Da / 25 ppm; 25 ppm / 25 ppm
This tool will also measure the number of interferences for each transition
(the number of identical transitions within the range of metabolites filtered
as specified above).
"""
import pandas as pd
import numpy as np
import rdkit
import re
import itertools
import time
import math
from operator import itemgetter
from tqdm import tqdm
import joblib
import contextlib
def my_round(val, decimal=2):
multiplier = 10**decimal
return math.floor(val*multiplier+0.5)/multiplier
"""
function read:
input: a list of compounds, a list of spectra
output: pandas dataframes (allcomp and spectra)
"""
def read(compounds, spectra):
allcomp = pd.read_pickle(compounds)
allcomp = allcomp.dropna(subset = ['mol_id'])
allcomp = allcomp.loc[allcomp.sanitize==True]
allcomp.loc[:,"mol_id"] = allcomp.mol_id.astype(int)
spectra = pd.read_pickle(spectra)
spectra = spectra.dropna(subset = ['mol_id'])
spectra.loc[:,"mol_id"] = spectra.mol_id.astype(int)
cf = allcomp
assert not cf["inchi"].isna().any()
assert not cf["inchikey"].isna().any()
spectra = spectra.loc[spectra['mol_id'].isin(cf.mol_id)]
return cf, spectra
"""
function filter:
Filter the compound list (stereoisomers and experimental conditions given)
input: list of compounds, list of spectra, collision energy, collision gas, ion mode, instrument type, adducts
output: Filtered compound list
"""
def filter_comp(compounds_filt, spectra, col_energy = 35, col_gas = 'N2', ion_mode = 'P',inst_type = ['Q-TOF', 'HCD'], adduct = ['[M+H]+', '[M+Na]+']):
compounds_filt['inchikey'] = compounds_filt['inchikey'].str[:14]
compounds_filt = compounds_filt.drop_duplicates(subset='inchikey', keep=False)
spectra_filt_all = spectra.loc[spectra['mol_id'].isin(compounds_filt.mol_id)]
if ion_mode != '':
spectra_filt_all = spectra_filt_all.loc[spectra_filt_all['ion_mode'] == str(ion_mode)]
spectra_filt_all = spectra_filt_all.loc[spectra_filt_all['res']>=2]
if inst_type != '':
inst_type = [str(x) for x in inst_type]
spectra_filt_all = spectra_filt_all.loc[spectra_filt_all['inst_type'].isin(inst_type)]
spectra_filt_all['col_energy'] = spectra_filt_all['col_energy'].apply(lambda x: str(x).split('%')[-1])
spectra_filt_all = spectra_filt_all.loc[spectra_filt_all['col_energy']!=""]
spectra_filt_all['col_energy'].replace(regex=True,inplace=True,to_replace='[^0-9.]',value=r'')
spectra_filt_all.loc[:,'col_energy'] = spectra_filt_all['col_energy'].astype(float)
spectra_filt_all = spectra_filt_all.loc[spectra_filt_all['col_energy']!=0.0]
if col_energy != 0:
low = col_energy-5
high = col_energy+5
spectra_filt_all = spectra_filt_all.loc[spectra_filt_all['col_energy'].between(low, high, inclusive = True)]
if col_gas != '':
spectra_filt_all = spectra_filt_all.loc[spectra_filt_all['col_gas'] == str(col_gas)]
spectra_filt_all.loc[:,'peaks'] = spectra_filt_all['peaks'].apply(lambda x: [(a,b/(max(x,key=itemgetter(1))[1])) for (a,b) in x])
spectra_filt_all = spectra_filt_all.loc[spectra_filt_all['spec_type'] == 'MS2']
spectra_filt_all.loc[:,'prec_mz'] = spectra_filt_all['prec_mz'].astype(float)
if adduct != []:
adduct = [str(x) for x in adduct]
spectra_filt_add = spectra_filt_all.loc[spectra_filt_all['prec_type'].isin(adduct)]
else:
spectra_filt_add = spectra_filt_all
compounds_filt = compounds_filt.loc[compounds_filt['mol_id'].isin(spectra_filt_add.mol_id)]
return compounds_filt, spectra_filt_all
"""
function choose_background_and_query:
Choosing the background for each query (based on mol_id), based on the given MS1 (Q1) and MS2 (Q3) window sizes.
Fragment spectra are filtered according the top_n value (% relative intensity) and the given n for UIS.
Input: spectra, mol_id, MS1/MS2 window sizes (Q1/Q3, MS1 - change/ppm, MS2 - change_q3/ppm_q3 - if ppm is filled, that will take priority over change),
query parameters (col_energy, adducts), Q3 parameters (if q3 = True, will take into account change_q3 or ppm_q3 parameters, otherwise only Q1),
top_n (the top n% of fragment ions), uis (n number of transitions chosen)
Output: query ids, background ids, number of transitions, uis (boolean if compound is unique), interferences (number of interferences per compound)
"""
def choose_background_and_query(spectra_filt, mol_id, change = 0, ppm = 0, change_q3 = 0, ppm_q3 = 0, adduct = ['[M+H]+', '[M+Na]+'], col_energy = 35, q3 = False, top_n = 0.1, uis_num = 0, choose = True):
query_opt = spectra_filt.loc[(spectra_filt['mol_id'] == mol_id)]
if adduct != []:
adduct = [str(x) for x in adduct]
query_opt = query_opt.loc[query_opt['prec_type'].isin(adduct)]
# note: it is possible for query_opt to have 0 elements here!
query_opt = query_opt.reset_index(drop=True)
same = spectra_filt.loc[spectra_filt['mol_id']==mol_id]
background_filt = spectra_filt.drop(index=same.index) #drop spectra from same mol_id
if (choose==True) and (len(query_opt)!=0):
if len(query_opt)>1:
query_opt['ce']=(query_opt['col_energy'] - col_energy).abs()
query_opt['add'] = pd.Categorical(query_opt['prec_type'], ordered=True, categories=['[M+H]+','[M+Na]+'])
query_opt = query_opt.sort_values(['res','ce','add'], ascending=[False,True,True])
query=query_opt.iloc[:1]
else:
query=query_opt
query_prec_mz = query['prec_mz'].item()
#choosing background
if ppm != 0:
change = (ppm/1000000.0)*(query_prec_mz)
low = query_prec_mz - (change/2.0)
high = query_prec_mz + (change/2.0)
background_filt = background_filt.loc[background_filt['prec_mz'].between(low, high, inclusive = True)]
#choosing the fragment
query_frag_mz = list(query['peaks'])[0]
query_frag_mz = [(a,b) for (a,b) in query_frag_mz if (b>(top_n))]
query_frag_mz.sort(key = lambda x: x[1], reverse = True)
f1 = my_round(query_frag_mz[0][0])
f2 = my_round(query_prec_mz)
if f1 != f2:
start = 0
else:
start = 1
uis_num += 1
query_frag_mz = query_frag_mz[start:uis_num]
query_frag_mz_values = [query[0] for query in query_frag_mz]
transitions=len(query_frag_mz_values)
if q3 == True:
if top_n < 0.1: #default of 0.1 for background relative intensity filter
top_n = 0.1
for transition in query_frag_mz_values:
if ppm_q3 != 0:
change_q3 = (ppm_q3/1000000.0)*(transition)
low = transition - (change_q3/2.0)
high = transition + (change_q3/2.0)
transitions_q1 = [[(a,b) for (a,b) in peaklist if a>=low and a<=high and (b>(top_n))] for peaklist in background_filt['peaks']]
transitions_q1 = [x for x in transitions_q1 if x!= []]
transitions_q1 = list(itertools.chain.from_iterable(transitions_q1))
transitions_q1.sort(key = lambda x: x[1], reverse = True)
background_filt = background_filt.loc[(background_filt['peaks'].apply(lambda x: any(transition in x for transition in transitions_q1)))]
interferences = len(np.unique(background_filt.mol_id))
if interferences == 0:
uis=1
else:
uis=0
elif (choose==False) and (len(query_opt)!=0): #not choosing one query, MS1 only filter
assert len(adduct) == 1, adduct
query=query_opt
query_prec_mz=list(query_opt['prec_mz'])[0]
if ppm != 0:
change = (ppm/1000000.0)*(query_prec_mz)
low = query_prec_mz - (change/2.0)
high = query_prec_mz + (change/2.0)
background_filt = background_filt.loc[background_filt['prec_mz'].between(low, high, inclusive = True)]
uis = -1
interferences = -1
transitions=-1
else:
query=query_opt
uis = -1
interferences = -1
transitions = -1
# convert full dfs to just ids
query_ids = query[["spectrum_id","mol_id"]]
background_ids = background_filt[["spectrum_id","mol_id"]]
return query_ids, background_ids, uis, interferences, transitions
"""
Function to integrate joblib with tqdm progress bar
https://stackoverflow.com/a/58936697/6937913
"""
@contextlib.contextmanager
def tqdm_joblib(tqdm_object):
"""Context manager to patch joblib to report into tqdm progress bar given as argument"""
class TqdmBatchCompletionCallback(joblib.parallel.BatchCompletionCallBack):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
tqdm_object.update(n=self.batch_size)
return super().__call__(*args, **kwargs)
old_batch_callback = joblib.parallel.BatchCompletionCallBack
joblib.parallel.BatchCompletionCallBack = TqdmBatchCompletionCallback
try:
yield tqdm_object
finally:
joblib.parallel.BatchCompletionCallBack = old_batch_callback
tqdm_object.close()
"""
function profile:
Based on the given parameters calculates the number of uis and interferences by mol_id.
Input: parameters for choose_background_and_query
Output: query ids, background ids, number of transitions, uis (boolean if compound is unique), interferences (number of interferences per compound)
"""
def profile(compounds_filt, spectra_filt, change = 0, ppm = 0, change_q3 = 0, ppm_q3 = 0, adduct = ['[M+H]+', '[M+Na]+'], col_energy=35, q3 = False, top_n = 0.1, mol_id = 0, uis_num=0):
uis_all = []
int_all = []
trans_all = []
# only keep necessary columns, to reduce memory footprint
_spectra_filt = spectra_filt[["spectrum_id","mol_id","prec_type","col_energy","res","prec_mz","peaks"]]
mol_ids = compounds_filt["mol_id"]
with tqdm_joblib(tqdm(desc="bg & q", total=mol_ids.shape[0])) as pbar:
par_func = joblib.delayed(choose_background_and_query)
pool = joblib.Parallel()
results = pool(
par_func(
mol_id = mol_id, change = change, ppm = ppm,
change_q3 = change_q3, ppm_q3 = ppm_q3,
adduct = adduct, col_energy = col_energy,
q3 = q3, top_n = top_n, spectra_filt = _spectra_filt,
uis_num=uis_num
) for idx, mol_id in mol_ids.iteritems()
)
query_ids_all, background_ids_all, uis_all, int_all, trans_all = zip(*results)
compounds_filt['UIS'] = uis_all
compounds_filt['Interferences'] = int_all
compounds_filt['Transitions'] = trans_all
return compounds_filt
"""
function method_profiler:
Profiles datasets according to specific MS1/MS2 (Q1/Q3) windows
Input: compounds, spectra, parameters for profile/choose_background_and_query
Output: compounds list with added columns of 'UIS' and 'Interferences'
"""
def method_profiler(compounds_filt, spectra_filt, change = 0, ppm = 0, change_q3 = 0, ppm_q3 = 0, adduct = ['[M+H]+', '[M+Na]+'], col_energy = 35, q3 = False, top_n = 0.1, mol_id = 0, uis_num = 0):
start = time.time()
profiled = profile(change = change, ppm = ppm, change_q3 = change_q3, ppm_q3 = ppm_q3, adduct = adduct, col_energy = col_energy,
q3 = q3, top_n = top_n, mol_id = mol_id, compounds_filt = compounds_filt, spectra_filt = spectra_filt, uis_num = uis_num)
profiled_filtered = profiled.loc[profiled['Interferences'] != -1]
end = time.time()
list_mol_ids = list(profiled_filtered.mol_id)
print("The unique identities and interferences for all mol_id will now be shown:")
print("The number of unique mol_id is: " + str(len([x for x in profiled['UIS'] if x == 1])))
print("Time to completion of profiler: " + str(end-start))
return profiled
"""
function optimal_ce_filter:
Filter function for collision_energy_optimizer
"""
def optimal_ce_filter(compounds_filt, spectra_filt, adduct):
spectra_filt = spectra_filt[spectra_filt["prec_type"] == adduct].reset_index(drop=True)
# this adds mzs and ints column to the spectra
def get_mzs(peaks):
mzs = [my_round(mz) for mz in list(zip(*peaks))[0]]
return mzs
def get_ints(peaks):
ints = list(zip(*peaks))[1]
return ints
spectra_filt.loc[:,"mzs"] = spectra_filt["peaks"].apply(get_mzs)
spectra_filt.loc[:,"ints"] = spectra_filt["peaks"].apply(get_ints)
def compute_num_trans(row):
prec_mz = my_round(row["prec_mz"])
mzs = row["mzs"]
same_count = np.sum(mz == prec_mz for mz in mzs)
return len(mzs) - same_count
spectra_filt['num_trans'] = spectra_filt.apply(compute_num_trans,axis=1)
spectra_filt = spectra_filt.loc[spectra_filt['num_trans'] >= 3]
spectra_filt = spectra_filt[spectra_filt['mol_id'].map(spectra_filt['mol_id'].value_counts()) > 1]
compounds_filt = compounds_filt.loc[compounds_filt['mol_id'].isin(spectra_filt.mol_id)]
spectra_filt = spectra_filt.reset_index(drop=True)
compounds_filt = compounds_filt.reset_index(drop=True)
return compounds_filt, spectra_filt
"""
function collision_energy_optimizer:
Finds pairwise-optimal collision energies (POCE) per compound
"""
def collision_energy_optimizer(compounds_filt, spectra_filt):
# quick check that spectra mz are bounded
max_mz = spectra_filt["mzs"].apply(max).max()
assert max_mz < 2000., max_mz
def compute_spec(row, mz_max=2000.):
mzs = np.array(row["mzs"])
ints = 100*np.array(row["ints"])
mz_bins = np.arange(0.5,mz_max+0.5,step=1.0)
mz_bin_idxs = np.digitize(mzs,bins=mz_bins,right=False)
spec = np.zeros([len(mz_bins)],dtype=float)
for i in range(len(mz_bin_idxs)):
spec[mz_bin_idxs[i]] += ints[i]
assert np.isclose(np.sum(spec),np.sum(ints)), np.abs(np.sum(spec)-np.sum(ints))
return spec
# compute ce diff matrix
ce_vec = spectra_filt["col_energy"].to_numpy().reshape(-1,1)
query_mat = np.broadcast_to(ce_vec,[ce_vec.shape[0],ce_vec.shape[0]])
background_mat = np.broadcast_to(ce_vec.T,[ce_vec.shape[0],ce_vec.shape[0]])
ce_diff_mat = query_mat - background_mat
# compute cosine sim matrix
spec = spectra_filt.apply(compute_spec,axis=1)
spec_vec = np.stack(spec.tolist(),axis=0).reshape(spec.shape[0],-1)
cos_vec = spec_vec / np.sqrt(np.sum(spec_vec**2,axis=1)).reshape(-1,1)
cos_sim_mat = np.matmul(cos_vec,cos_vec.T)
# stack them all
all_mat = np.stack([query_mat,background_mat,ce_diff_mat,cos_sim_mat],axis=-1)
# get mapping from spectrum id to idx of the matrix
spec_id2idx = {spec_id:spec_idx for spec_idx,spec_id in enumerate(spectra_filt["spectrum_id"].tolist())}
# number of interfering spectra, per query
num_spectra = []
# number of interfering compounds, per query
num_comps = []
# the set of minimal CEs per interfering compound, per query
all_min_ces = []
# the precursor mz of the query
prec_mzs = []
# only keep necessary columns, to reduce memory footprint
spectra_filt = spectra_filt[["spectrum_id","mol_id","prec_type","col_energy","res","prec_mz","peaks"]].copy()
# find optimal CE for each compound
for i, mol_id in tqdm(compounds_filt["mol_id"].iteritems(),desc="> optimal_ce",total=compounds_filt.shape[0]):
query_ids, background_ids, _, _, _ = choose_background_and_query(
mol_id = mol_id, col_energy = 0, change=25,
q3 = False, spectra_filt = spectra_filt,
choose=False, top_n=0, adduct=['[M+H]+']
)
if query_ids.shape[0] == 0:
# this happens when the mol_id only corresponds to adducts that are not "[M+H]+"
import pdb; pdb.set_trace()
query_spec_idx = query_ids["spectrum_id"].map(spec_id2idx).to_numpy()
background_ids["spec_idx"] = background_ids["spectrum_id"].map(spec_id2idx)
bg_mol_ids = background_ids["mol_id"].unique().tolist()
query_prec_mzs = spectra_filt[spectra_filt["mol_id"].isin(query_ids["mol_id"])]["prec_mz"]
assert query_prec_mzs.nunique() == 1, query_prec_mzs.nunique()
num_comps.append(len(bg_mol_ids))
prec_mzs.append(query_prec_mzs.tolist()[0])
num_spectra.append(background_ids['spectrum_id'].nunique())
cur_min_ces = []
for bg_mol_id in bg_mol_ids:
background_spec_idx = background_ids[background_ids["mol_id"] == bg_mol_id]["spec_idx"].to_numpy()
score_mat = all_mat[query_spec_idx][:,background_spec_idx]
assert not score_mat.size == 0
cur_min_ces.append(compute_optimal_ces(score_mat))
all_min_ces.append(cur_min_ces)
compounds_filt['AllCE'] = all_min_ces
compounds_filt['NumSpectra'] = num_spectra
compounds_filt['NumComp'] = num_comps
compounds_filt['m/z'] = prec_mzs
return compounds_filt
"""
function compute_optimal_ces:
Helper function for computing optimal POCE (collision_energy_optimizer)
"""
def compute_optimal_ces(score_mat):
row_mat = score_mat[:,:,0]
col_mat = score_mat[:,:,1]
ce_diff_mat = score_mat[:,:,2] # this is difference
cos_sim_mat = score_mat[:,:,3]
ce_abs_diff_mat = np.abs(ce_diff_mat) # this is absolute difference
min_ce_diff_row = np.min(ce_abs_diff_mat, axis=1)
min_ce_diff_mask_row = ce_abs_diff_mat.T == min_ce_diff_row
min_ce_diff_col = np.min(ce_abs_diff_mat, axis=0)
min_ce_diff_mask_col = ce_abs_diff_mat == min_ce_diff_col
min_ce_diff_mask_entries = min_ce_diff_mask_row.T + min_ce_diff_mask_col
row_lt = (ce_diff_mat <= 0).astype(np.float) #rows less than
col_lt = (ce_diff_mat > 0).astype(np.float) #cols less than
threshold = 0.25
thresh_mat = threshold * (row_lt*row_mat + col_lt*col_mat) #min of col and row, 25% is threshold
min_ce_diff_mask_thresh = ce_abs_diff_mat <= thresh_mat
min_ce_diff_mask = min_ce_diff_mask_entries & min_ce_diff_mask_thresh
fails_thresh = not np.any(min_ce_diff_mask)
if fails_thresh:
min_row_ces = []
else:
min_cos_sim = np.min(cos_sim_mat[min_ce_diff_mask])
min_cos_sim_mask = cos_sim_mat == min_cos_sim
both_mask = min_ce_diff_mask & min_cos_sim_mask
argmin_row_mask = np.max(both_mask,axis=1)
# these are the query CEs that achieve minimum (1 or more)
min_row_ces = row_mat[:,0][argmin_row_mask].tolist()
# print(min_row_ces)
# import sys; sys.exit(0)
return min_row_ces
"""
testing
"""
def test_optimal_ce_1():
query_ce = np.array([1.,3.,5.,7.]).reshape(-1,1)
bg_ce = np.array([1.,2.,4.,6.,7.,10.]).reshape(1,-1)
query_mat = np.broadcast_to(query_ce,[query_ce.shape[0],bg_ce.shape[1]])
background_mat = np.broadcast_to(bg_ce,[query_ce.shape[0],bg_ce.shape[1]])
ce_diff_mat = query_mat - background_mat
sim_mat = np.array([
[.1,.3,.5,.2,.1,.1],
[.2,.1,.1,.1,.1,.1],
[.2,.1,.4,.1,.1,.1],
[.3,.3,.2,.3,.1,.1]
])
score_mat = np.stack([query_mat,background_mat,ce_diff_mat,sim_mat],axis=-1)
expected_minimal_ces = [1.,5.,7.]
computed_minimal_ces = compute_optimal_ces(score_mat)
print(expected_minimal_ces,computed_minimal_ces)
def test_optimal_ce_2():
query_ce = np.array([1.,3.,5.]).reshape(-1,1)
bg_ce = np.array([2.,4.,6.]).reshape(1,-1)
query_mat = np.broadcast_to(query_ce,[query_ce.shape[0],bg_ce.shape[1]])
background_mat = np.broadcast_to(bg_ce,[query_ce.shape[0],bg_ce.shape[1]])
ce_diff_mat = query_mat - background_mat
sim_mat = np.array([
[.1,.1,.1],
[.1,.1,.1],
[.1,.1,.1]
])
score_mat = np.stack([query_mat,background_mat,ce_diff_mat,sim_mat],axis=-1)
expected_minimal_ces = [5.]
computed_minimal_ces = compute_optimal_ces(score_mat)
print(expected_minimal_ces,computed_minimal_ces)
def test_optimal_ce_3():
query_ce = np.array([1.,3.]).reshape(-1,1)
bg_ce = np.array([8.,9.,11.]).reshape(1,-1)
query_mat = np.broadcast_to(query_ce,[query_ce.shape[0],bg_ce.shape[1]])
background_mat = np.broadcast_to(bg_ce,[query_ce.shape[0],bg_ce.shape[1]])
ce_diff_mat = query_mat - background_mat
sim_mat = np.array([
[.1,.2,.3],
[.1,.2,.3]
])
score_mat = np.stack([query_mat,background_mat,ce_diff_mat,sim_mat],axis=-1)
expected_minimal_ces = []
computed_minimal_ces = compute_optimal_ces(score_mat)
print(expected_minimal_ces,computed_minimal_ces)
if __name__ == "__main__":
test_optimal_ce_1()
test_optimal_ce_2()
test_optimal_ce_3()
| 2.28125
| 2
|
annogesiclib/stat_sublocal.py
|
Sung-Huan/ANNOgesic
| 26
|
12775528
|
<reponame>Sung-Huan/ANNOgesic
import csv
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def plot(subs, total, unknown, strain, prefix_name):
nums = []
nums_no_unknown = []
classes = []
classes_no_unknown = []
width = 0.4
tmp_unknown = ["Unknown", 0]
sort_subs = sorted(subs.items(),
key=lambda x: (x[1]), reverse=True)
for datas in sort_subs:
if datas[0] == "Unknown":
tmp_unknown = datas
else:
nums.append(datas[1])
nums_no_unknown.append(datas[1])
classes.append(datas[0])
classes_no_unknown.append(datas[0])
nums.append(tmp_unknown[1])
classes.append(tmp_unknown[0])
plt.figure(figsize=(12, 16))
plt.subplot(211)
ind = np.arange(len(nums))
plt.bar(ind, nums, width, color='#FF9999')
plt.title('Subcellular localization including Unknown\n', fontsize=24)
plt.ylabel('Amount', fontsize=20)
plt.yticks(fontsize=16)
plt.xlim([0, len(nums) + 1])
plt.xticks(ind+width, classes, rotation=40, fontsize=20, ha='right')
plt.tight_layout(2, None, None, None)
plt.subplot(212)
ind = np.arange(len(nums_no_unknown))
plt.bar(ind, nums_no_unknown, width, color='#FF9999')
plt.title('Subcellular localization excluding Unknown\n', fontsize=24)
plt.ylabel('Amount', fontsize=20)
plt.xlim([0, len(nums_no_unknown) + 1])
plt.xticks(ind+width, classes_no_unknown, rotation=40,
fontsize=20, ha='right')
plt.yticks(fontsize=16)
plt.tight_layout(2, None, None, None)
plt.savefig("_".join([prefix_name, strain, "sublocal.png"]))
def read_table(psortb_file):
subs = {}
subs["all_genome"] = {}
total_nums = {}
total_nums["all_genome"] = 0
unknown_nums = {}
unknown_nums["all_genome"] = 0
pre_strain = ""
f_h = open(psortb_file, "r")
for row in csv.reader(f_h, delimiter="\t"):
if not row[0].startswith("#"):
if pre_strain != row[0]:
subs[row[0]] = {}
pre_strain = row[0]
total_nums[row[0]] = 0
unknown_nums[row[0]] = 0
if row[5] not in subs[row[0]].keys():
if row[5] == "Unknown":
unknown_nums[row[0]] += 1
subs[row[0]][row[5]] = 1
total_nums[row[0]] += 1
else:
if row[5] == "Unknown":
unknown_nums[row[0]] += 1
subs[row[0]][row[5]] += 1
total_nums[row[0]] += 1
if row[5] not in subs["all_genome"].keys():
if row[5] == "Unknown":
unknown_nums["all_genome"] += 1
subs["all_genome"][row[5]] = 1
total_nums["all_genome"] += 1
else:
if row[5] == "Unknown":
unknown_nums["all_genome"] += 1
subs["all_genome"][row[5]] += 1
total_nums["all_genome"] += 1
f_h.close()
return subs, total_nums, unknown_nums
def print_file_and_plot(sub, total_nums, unknown_nums,
strain, out_stat, prefix_name):
plot(sub, total_nums[strain], unknown_nums[strain], strain, prefix_name)
out_stat.write(strain + ":\n")
out_stat.write("Total including Unknown is {0}; "
"Total excluding Unknown is {1}\n".format(
total_nums[strain],
total_nums[strain] - unknown_nums[strain]))
for local, num in sub.items():
if local != "Unknown":
out_stat.write(
"\t{0}\t{1}(including Unknown {2}; "
"excluding Unknonwn {3})\n".format(
local, num, float(num) / float(total_nums[strain]),
float(num) / (float(total_nums[strain]) - float(
unknown_nums[strain]))))
else:
out_stat.write("\t{0}\t{1}(including Unknown {2})\n".format(
local, num, float(num) / float(total_nums[strain])))
def stat_sublocal(psortb_file, prefix_name, stat_file):
subs, total_nums, unknown_nums = read_table(psortb_file)
out_stat = open(stat_file, "w")
if len(subs) > 2:
print_file_and_plot(subs["all_genome"], total_nums, unknown_nums,
"all_genome", out_stat, prefix_name)
for strain, sub in subs.items():
if strain != "all_genome":
print_file_and_plot(sub, total_nums, unknown_nums, strain,
out_stat, prefix_name)
| 2.09375
| 2
|
Codewars/Beginner_series_sum_of_numbers -(7 kyu).py
|
maxcohen31/A-bored-math-student
| 0
|
12775529
|
# Sum of numbers 3
def get_sum(x, y):
s = 0
if x > y:
for index in range(y, x + 1):
s = s + index
return s
elif x < y:
for index in range(x, y + 1):
s = s + index
return s
else:
return x
print(get_sum(2, 1))
print(get_sum(0, -1))
| 3.90625
| 4
|
PiSnapND/s2a_fm/Snap!Files/Snap!Mobile/arduino/PyMata/pymata_serial.py
|
rasplay/PiSnap-
| 0
|
12775530
|
<filename>PiSnapND/s2a_fm/Snap!Files/Snap!Mobile/arduino/PyMata/pymata_serial.py<gh_stars>0
__author__ = 'Copyright (c) 2013 <NAME> All rights reserved.'
"""
Created on Tue Sep 3 07:12:01 2013
@author: <NAME>
Copyright (c) 2013-14 <NAME> All rights reserved.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import threading
import serial
class PyMataSerial(threading.Thread):
"""
This class manages the serial port for Arduino serial communications
"""
# class variables
arduino = serial.Serial()
port_id = ""
baud_rate = 115200
timeout = 1
command_deque = None
def __init__(self, port_id, command_deque):
"""
Constructor:
@param command_deque: A reference to the deque shared with the _command_handler
"""
self.port_id = port_id
self.command_deque = command_deque
threading.Thread.__init__(self)
self.daemon = True
self.arduino = serial.Serial(self.port_id, self.baud_rate,
timeout=int(self.timeout))
def open(self):
"""
open the serial port using the configuration data
returns a reference to this instance
"""
# open a serial port
print '\nOpening Arduino Serial port %s ' % self.port_id
try:
# in case the port is already open, let's close it and then
#reopen it
self.arduino.close()
self.arduino.open()
return self.arduino
except Exception:
# opened failed - will report back to caller
raise
def close(self):
"""
Close the serial port
return: None
"""
self.arduino.close()
def write(self, data):
"""
write the data to the serial port
return: None
"""
self.arduino.write(data)
def run(self):
"""
This method continually runs. If an incoming character is available on the serial port
it is read and placed on the _command_deque
@return: Never Returns
"""
while 1:
# we can get an OSError: [Errno9] Bad file descriptor when shutting down
# just ignore it
try:
if self.arduino.inWaiting():
c = self.arduino.read()
self.command_deque.append(ord(c))
except OSError:
pass
| 2.625
| 3
|
sympy/concrete/products.py
|
lidavidm/sympy
| 1
|
12775531
|
from __future__ import print_function, division
from sympy.core.containers import Tuple
from sympy.core.core import C
from sympy.core.expr import Expr
from sympy.core.mul import Mul
from sympy.core.singleton import S
from sympy.core.sympify import sympify
from sympy.functions.elementary.piecewise import piecewise_fold
from sympy.polys import quo, roots
from sympy.simplify import powsimp
from sympy.core.compatibility import xrange
class Product(Expr):
r"""Represents unevaluated products.
``Product`` represents a finite or infinite product, with the first
argument being the general form of terms in the series, and the second
argument being ``(dummy_variable, start, end)``, with ``dummy_variable``
taking all integer values from ``start`` through ``end``. In accordance
with long-standing mathematical convention, the end term is included in
the product.
Finite products
===============
For finite products (and products with symbolic limits assumed to be finite)
we follow the analogue of the summation convention described by Karr [1],
especially definition 3 of section 1.4. The product:
.. math::
\prod_{m \leq i < n} f(i)
has *the obvious meaning* for `m < n`, namely:
.. math::
\prod_{m \leq i < n} f(i) = f(m) f(m+1) \cdot \ldots \cdot f(n-2) f(n-1)
with the upper limit value `f(n)` excluded. The product over an empty set is
one if and only if `m = n`:
.. math::
\prod_{m \leq i < n} f(i) = 1 \quad \mathrm{for} \quad m = n
Finally, for all other products over empty sets we assume the following
definition:
.. math::
\prod_{m \leq i < n} f(i) = \frac{1}{\prod_{n \leq i < m} f(i)} \quad \mathrm{for} \quad m > n
It is important to note that above we define all products with the upper
limit being exclusive. This is in contrast to the usual mathematical notation,
but does not affect the product convention. Indeed we have:
.. math::
\prod_{m \leq i < n} f(i) = \prod_{i = m}^{n - 1} f(i)
where the difference in notation is intentional to emphasize the meaning,
with limits typeset on the top being inclusive.
Examples
========
>>> from sympy.abc import a, b, i, k, m, n, x
>>> from sympy import Product, factorial, oo
>>> Product(k,(k,1,m))
Product(k, (k, 1, m))
>>> Product(k,(k,1,m)).doit()
factorial(m)
>>> Product(k**2,(k,1,m))
Product(k**2, (k, 1, m))
>>> Product(k**2,(k,1,m)).doit()
(factorial(m))**2
Wallis' product for pi:
>>> W = Product(2*i/(2*i-1) * 2*i/(2*i+1), (i, 1, oo))
>>> W
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, oo))
Direct computation currently fails:
>>> W.doit()
nan
But we can approach the infinite product by a limit of finite products:
>>> from sympy import limit
>>> W2 = Product(2*i/(2*i-1)*2*i/(2*i+1), (i, 1, n))
>>> W2
Product(4*i**2/((2*i - 1)*(2*i + 1)), (i, 1, n))
>>> W2e = W2.doit()
>>> W2e
2**(-2*n)*4**n*(factorial(n))**2/(RisingFactorial(1/2, n)*RisingFactorial(3/2, n))
>>> limit(W2e, n, oo)
pi/2
By the same formula we can compute sin(pi/2):
>>> from sympy import pi, gamma, simplify
>>> P = pi * x * Product(1 - x**2/k**2,(k,1,n))
>>> P = P.subs(x, pi/2)
>>> P
pi**2*Product(1 - pi**2/(4*k**2), (k, 1, n))/2
>>> Pe = P.doit()
>>> Pe
pi**2*RisingFactorial(1 + pi/2, n)*RisingFactorial(-pi/2 + 1, n)/(2*(factorial(n))**2)
>>> Pe = Pe.rewrite(gamma)
>>> Pe
pi**2*gamma(n + 1 + pi/2)*gamma(n - pi/2 + 1)/(2*gamma(1 + pi/2)*gamma(-pi/2 + 1)*gamma(n + 1)**2)
>>> Pe = simplify(Pe)
>>> Pe
sin(pi**2/2)*gamma(n + 1 + pi/2)*gamma(n - pi/2 + 1)/gamma(n + 1)**2
>>> limit(Pe, n, oo)
sin(pi**2/2)
Products with the lower limit being larger than the upper one:
>>> Product(1/i, (i, 6, 1)).doit()
120
>>> Product(i, (i, 2, 5)).doit()
120
The empty product:
>>> Product(i, (i, n, n-1)).doit()
1
An example showing that the symbolic result of a product is still
valid for seemingly nonsensical values of the limits. Then the Karr
convention allows us to give a perfectly valid interpretation to
those products by interchanging the limits according to the above rules:
>>> P = Product(2, (i, 10, n)).doit()
>>> P
2**(n - 9)
>>> P.subs(n, 5)
1/16
>>> Product(2, (i, 10, 5)).doit()
1/16
>>> 1/Product(2, (i, 6, 9)).doit()
1/16
An explicit example of the Karr summation convention applied to products:
>>> P1 = Product(x, (i, a, b)).doit()
>>> P1
x**(-a + b + 1)
>>> P2 = Product(x, (i, b+1, a-1)).doit()
>>> P2
x**(a - b - 1)
>>> simplify(P1 * P2)
1
And another one:
>>> P1 = Product(i, (i, b, a)).doit()
>>> P1
RisingFactorial(b, a - b + 1)
>>> P2 = Product(i, (i, a+1, b-1)).doit()
>>> P2
RisingFactorial(a + 1, -a + b - 1)
>>> P1 * P2
RisingFactorial(b, a - b + 1)*RisingFactorial(a + 1, -a + b - 1)
>>> simplify(P1 * P2)
1
See Also
========
Sum, summation
product
References
==========
.. [1] <NAME>, "Summation in Finite Terms", Journal of the ACM,
Volume 28 Issue 2, April 1981, Pages 305-350
http://dl.acm.org/citation.cfm?doid=322248.322255
.. [2] http://en.wikipedia.org/wiki/Multiplication#Capital_Pi_notation
.. [3] http://en.wikipedia.org/wiki/Empty_product
"""
__slots__ = ['is_commutative']
def __new__(cls, function, *symbols, **assumptions):
from sympy.integrals.integrals import _process_limits
# Any embedded piecewise functions need to be brought out to the
# top level so that integration can go into piecewise mode at the
# earliest possible moment.
function = piecewise_fold(sympify(function))
if function is S.NaN:
return S.NaN
if not symbols:
raise ValueError("Product variables must be given")
limits, sign = _process_limits(*symbols)
# Only limits with lower and upper bounds are supported; the indefinite
# Product is not supported
if any(len(l) != 3 or None in l for l in limits):
raise ValueError(
'Product requires values for lower and upper bounds.')
obj = Expr.__new__(cls, **assumptions)
arglist = [sign*function]
arglist.extend(limits)
obj._args = tuple(arglist)
obj.is_commutative = function.is_commutative # limits already checked
return obj
@property
def term(self):
return self._args[0]
function = term
@property
def limits(self):
return self._args[1:]
@property
def variables(self):
"""Return a list of the product variables
>>> from sympy import Product
>>> from sympy.abc import x, i
>>> Product(x**i, (i, 1, 3)).variables
[i]
"""
return [l[0] for l in self.limits]
@property
def free_symbols(self):
"""
This method returns the symbols that will affect the value of
the Product when evaluated. This is useful if one is trying to
determine whether a product depends on a certain symbol or not.
>>> from sympy import Product
>>> from sympy.abc import x, y
>>> Product(x, (x, y, 1)).free_symbols
set([y])
"""
from sympy.integrals.integrals import _free_symbols
if self.function.is_zero or self.function == 1:
return set()
return _free_symbols(self)
@property
def is_zero(self):
"""A Product is zero only if its term is zero.
"""
return self.term.is_zero
@property
def is_number(self):
"""
Return True if the Product will result in a number, else False.
Examples
========
>>> from sympy import log, Product
>>> from sympy.abc import x, y, z
>>> log(2).is_number
True
>>> Product(x, (x, 1, 2)).is_number
True
>>> Product(y, (x, 1, 2)).is_number
False
>>> Product(1, (x, y, z)).is_number
True
>>> Product(2, (x, y, z)).is_number
False
"""
return self.function.is_zero or self.function == 1 or not self.free_symbols
def as_dummy(self):
from sympy.integrals.integrals import _as_dummy
return _as_dummy(self)
def doit(self, **hints):
f = self.function
for index, limit in enumerate(self.limits):
i, a, b = limit
dif = b - a
if dif.is_Integer and dif < 0:
a, b = b + 1, a - 1
f = 1 / f
g = self._eval_product(f, (i, a, b))
if g is None:
return self.func(powsimp(f), *self.limits[index:])
else:
f = g
if hints.get('deep', True):
return f.doit(**hints)
else:
return powsimp(f)
def _eval_adjoint(self):
if self.is_commutative:
return self.func(self.function.adjoint(), *self.limits)
return None
def _eval_conjugate(self):
return self.func(self.function.conjugate(), *self.limits)
def _eval_product(self, term, limits):
from sympy.concrete.delta import deltaproduct, _has_simple_delta
from sympy.concrete.summations import summation
from sympy.functions import KroneckerDelta
(k, a, n) = limits
if k not in term.free_symbols:
return term**(n - a + 1)
if a == n:
return term.subs(k, a)
if term.has(KroneckerDelta) and _has_simple_delta(term, limits[0]):
return deltaproduct(term, limits)
dif = n - a
if dif.is_Integer:
return Mul(*[term.subs(k, a + i) for i in xrange(dif + 1)])
elif term.is_polynomial(k):
poly = term.as_poly(k)
A = B = Q = S.One
all_roots = roots(poly, multiple=True)
for r in all_roots:
A *= C.RisingFactorial(a - r, n - a + 1)
Q *= n - r
if len(all_roots) < poly.degree():
arg = quo(poly, Q.as_poly(k))
B = self.func(arg, (k, a, n)).doit()
return poly.LC()**(n - a + 1) * A * B
elif term.is_Add:
p, q = term.as_numer_denom()
p = self._eval_product(p, (k, a, n))
q = self._eval_product(q, (k, a, n))
return p / q
elif term.is_Mul:
exclude, include = [], []
for t in term.args:
p = self._eval_product(t, (k, a, n))
if p is not None:
exclude.append(p)
else:
include.append(t)
if not exclude:
return None
else:
arg = term._new_rawargs(*include)
A = Mul(*exclude)
B = self.func(arg, (k, a, n)).doit()
return A * B
elif term.is_Pow:
if not term.base.has(k):
s = summation(term.exp, (k, a, n))
return term.base**s
elif not term.exp.has(k):
p = self._eval_product(term.base, (k, a, n))
if p is not None:
return p**term.exp
elif isinstance(term, Product):
evaluated = term.doit()
f = self._eval_product(evaluated, limits)
if f is None:
return self.func(evaluated, limits)
else:
return f
def _eval_transpose(self):
if self.is_commutative:
return self.func(self.function.transpose(), *self.limits)
return None
def _eval_subs(self, old, new):
from sympy.integrals.integrals import _eval_subs
return _eval_subs(self, old, new)
def product(*args, **kwargs):
r"""
Compute the product.
The notation for symbols is similiar to the notation used in Sum or
Integral. product(f, (i, a, b)) computes the product of f with
respect to i from a to b, i.e.,
::
b
_____
product(f(n), (i, a, b)) = | | f(n)
| |
i = a
If it cannot compute the product, it returns an unevaluated Product object.
Repeated products can be computed by introducing additional symbols tuples::
>>> from sympy import product, symbols
>>> i, n, m, k = symbols('i n m k', integer=True)
>>> product(i, (i, 1, k))
factorial(k)
>>> product(m, (i, 1, k))
m**k
>>> product(i, (i, 1, k), (k, 1, n))
Product(factorial(k), (k, 1, n))
"""
prod = Product(*args, **kwargs)
if isinstance(prod, Product):
return prod.doit(deep=False)
else:
return prod
| 2.8125
| 3
|
setup.py
|
1grasse/conflowgen
| 0
|
12775532
|
<filename>setup.py
import os.path
from setuptools import setup, find_packages
this_directory = os.path.abspath(os.path.dirname(__file__))
# Load metadata that is also available for the code
metadata = {}
with open(os.path.join(this_directory, "conflowgen", "metadata.py"), encoding="utf-8") as fp:
exec(fp.read(), metadata)
# Load long description
with open(os.path.join(this_directory, 'Readme.md'), encoding='utf-8') as f:
long_description = f.read()
# Define actual setup
# noinspection SpellCheckingInspection
setup(
name='ConFlowGen',
version=metadata['__version__'],
long_description=long_description,
long_description_content_type='text/markdown',
packages=find_packages(),
url='https://github.com/1kastner/conflowgen',
install_requires=[
'pandas', # CSV/Excel import and export
'numpy', # used in combination with pandas for column types
'openpyxl', # optional dependency of pandas that is compulsory for xlsx export
'peewee', # ORM mapper
'enum_tools', # used for documenting enums via decorators
# for creating the visuals
'matplotlib', # default plots such as bar charts, pie charts, etc.
'plotly', # useful for e.g. Sankey diagrams
'seaborn', # exchanges matplotlib color palletes
'kaleido', # plotly depends on this package for SVG export
],
extras_require={
# Only needed to run the unittests and generate the documentation
'dev': [
# testing
'pytest', # running the unit tests
'pytest-cov', # create coverage report
'pytest-github-actions-annotate-failures', # turns pytest failures into action annotations
# build documentation
'sphinx',
'sphinx-rtd-theme',
'sphinx-toolbox',
'myst-parser',
'sphinxcontrib-bibtex',
'sphinx-math-dollar',
'nbsphinx',
'jupyterlab',
# checking code quality
'pylint',
'flake8',
# publish at PyPI
'twine'
]
},
license=metadata['__license__'],
author=metadata['__author__'],
author_email=metadata['__email__'],
description='A generator for synthetic container flows at maritime container terminals with a focus is on yard '
'operations'
)
| 1.804688
| 2
|
app/src/services/__init__.py
|
avillia/weatheRESTua
| 0
|
12775533
|
<reponame>avillia/weatheRESTua
from .weather_fetcher import obtain_weather_for_5_cities
| 1.234375
| 1
|
binaryspaghettisort_package/build/lib/binaryspaghettisort.py
|
Sinnefa/Binary-Spaghetti-Sort-Binary-Spaghetti-Heads-Index
| 0
|
12775534
|
<reponame>Sinnefa/Binary-Spaghetti-Sort-Binary-Spaghetti-Heads-Index<gh_stars>0
import random
import math
import time
class SparseList(list):
def __setitem__(self, index, value):
missing = index - len(self) + 1
if missing > 0:
self.extend([None] * missing)
list.__setitem__(self, index, value)
def __getitem__(self, index):
try: return list.__getitem__(self, index)
except IndexError: return None
def BinarySpaghettiSort(values, reverse=False):
length = len(values)
sorted = [0]*length
maximum = max(values)
minimum = min(values)
binary_spaghetti_heads_index=[0]*(maximum-minimum+1)
#binary_spaghetti_heads_index=SparseList() # De-Comment to use sparse data structure
for i,n in enumerate(values):
#if binary_spaghetti_heads_index[(n-minimum)] is None: # De-Comment to use sparse data structure
# binary_spaghetti_heads_index[(n-minimum)] = 0 # De-Comment to use sparse data structure
binary_spaghetti_heads_index[(n-minimum)] += 1<<(length-1-i)
index = 0
start = 0
stop = len(binary_spaghetti_heads_index)
pace = 1
if reverse:
start = len(binary_spaghetti_heads_index)-1
stop = -1
pace = -1
for i in range(start,stop,pace):
xor = binary_spaghetti_heads_index[i]
#if xor is None: # De-Comment to use sparse data structure
# continue # De-Comment to use sparse data structure
while xor > 0:
k = int(math.log2(xor))
sorted[index] = values[length-1-k]
xor = xor ^ (1 << k)
index += 1
return sorted
if __name__ == "__main__":
values = [random.randint(1, 5000) for _ in range(5000)]
start_time = time.time()
vett = BinarySpaghettiSort(values)
print("Binary Spaghetti Sort 5000 random N+ numbers took: ",time.time() - start_time, "seconds")
start_time = time.time()
sorted(values)
print("Python Sort 5000 random N+ numbers took: ",time.time() - start_time, "seconds")
print("Is Binary Spaghetti Sort equal to Python sort?: ", sorted(values)==vett)
print()
# reversed
values = [random.randint(1, 5000) for _ in range(5000)]
start_time = time.time()
vett = BinarySpaghettiSort(values,True)
print("Reversed Binary Spaghetti Sort 5000 random N+ numbers took: ",time.time() - start_time, "seconds")
start_time = time.time()
sorted(values,reverse=True)
print("Reversed Python Sort 5000 random N+ numbers took: ",time.time() - start_time, "seconds")
print("Is Binary Spaghetti Sort equal to Python sort?: ", sorted(values,reverse=True)==vett)
| 3.25
| 3
|
tests/lib/collectors/bigquery.py
|
dfjxs/dftimewolf
| 191
|
12775535
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests the BigQuery collector."""
import unittest
import mock
from dftimewolf.lib import state
from dftimewolf.lib.collectors import bigquery
from dftimewolf import config
class BigQueryCollectorTest(unittest.TestCase):
"""Tests for the BigQuery collector."""
def testInitialization(self):
"""Tests that the collector can be initialized."""
test_state = state.DFTimewolfState(config.Config)
bq_collector = bigquery.BigQueryCollector(test_state)
self.assertIsNotNone(bq_collector)
@mock.patch('google.cloud.bigquery.Client')
def testQuery(self, mock_bq):
"""Tests that the collector calls the BQ client."""
mock_bq().query().to_dataframe().to_json.return_value = "{'foo':1}"
test_state = state.DFTimewolfState(config.Config)
bq_collector = bigquery.BigQueryCollector(test_state)
bq_collector.SetUp('test_project', 'test_query', 'test_description')
bq_collector.Process()
mock_bq().query.assert_called_with('test_query')
mock_bq().query().to_dataframe().to_json.assert_called_once()
if __name__ == '__main__':
unittest.main()
| 2.609375
| 3
|
day11.py
|
mrpolyonymous/adventofcode2020
| 0
|
12775536
|
# EMPTY = "L"
# OCCUPIED = "#"
# FIXED = "."
EMPTY = 0
OCCUPIED = 1
FIXED = -1
def read_input(file_name):
seats = []
with open(file_name) as input_file:
for line in input_file:
line = line.strip()
seats.append( list(map(map_input, line) ))
return seats
# Failed attempt to speed up the program
def map_input(x):
if x == ".":
return FIXED
elif x == "L":
return EMPTY
elif x == "#":
return OCCUPIED
else:
raise RuntimeError("Unhandled input")
# The following rules are applied to every seat simultaneously:
# If a seat is empty (L) and there are no occupied seats adjacent to it, the seat becomes occupied.
# If a seat is occupied (#) and four or more seats adjacent to it are also occupied, the seat becomes empty.
# Otherwise, the seat's state does not change.
# Floor (.) never changes; seats don't move, and nobody sits on the floor.
# adjacent = left, right, or diagonal from the seat
def part1(seats):
# My answer: 2303
return run_iterations(seats, 1, 4)
# Part 2: like part 1, but different rules for state change
def part2(seats):
# my answer: 2057
return run_iterations(seats, max(len(seats), len(seats[0])), 5)
def run_iterations(seats, max_extent, occupied_limit):
num_cycles = 0
num_rows = len(seats)
num_columns = len(seats[0])
seats_copy = [row.copy() for row in seats]
new_seat_state = [ [FIXED for j in range(num_columns)] for i in range(num_rows) ]
while True:
num_cycles += 1
num_changes = 0
for row in range(num_rows):
for column in range(num_columns):
current_state = seats_copy[row][column]
if current_state != FIXED:
occupied = count_occupied(seats_copy, row, column, max_extent)
if current_state == EMPTY and occupied == 0:
new_seat_state[row][column] = OCCUPIED
num_changes += 1
elif current_state == OCCUPIED and occupied >= occupied_limit:
new_seat_state[row][column] = EMPTY
num_changes += 1
else:
new_seat_state[row][column] = current_state
if num_changes == 0 or num_cycles > 1000:
break
# else:
# print("Iteration {} num changes: {}".format(num_cycles, num_changes))
tmp = new_seat_state
new_seat_state = seats_copy
seats_copy = tmp
num_occupied = 0
for row in seats_copy:
for seat in row:
if seat == OCCUPIED:
num_occupied += 1
return num_occupied
def count_occupied(seats, row, column, max_extent):
occupied = 0
offsets = [-1, 0, 1]
num_rows = len(seats)
num_columns = len(seats[0])
for r in offsets:
for c in offsets:
if r == 0 and c == 0:
continue
for i in range(1, max_extent + 1):
offset_row = row + r * i
if offset_row < 0 or offset_row >= num_rows:
break
offset_column = column + c * i
if offset_column < 0 or offset_column >= num_columns:
break
current_state = seats[offset_row][offset_column]
if current_state == OCCUPIED:
occupied += 1
break
elif current_state == EMPTY:
break
return occupied
# This is pathetically slow. Not sure why, this would be fast in Java.
seats = read_input('day11_input.txt')
num_seats_filled = part1(seats)
print("Part 1: number of seats filled : {}".format(num_seats_filled))
num_seats_filled = part2(seats)
print("Part 2: number of seats filled {}".format(num_seats_filled))
| 3.65625
| 4
|
tasks/medium-150/nyan-cat/exploit.py
|
C4T-BuT-S4D/nordctf-2019-finals
| 5
|
12775537
|
#!/usr/bin/env python3
import re
import sys
import jwt
import requests
from Crypto.PublicKey import RSA
from Crypto.Util.number import inverse
def recover_d(n, e, p):
q = n // p
phi = (p - 1) * (q - 1)
return inverse(e, phi)
def main(url):
n = 16158503035655503650357438344334975980222051334857742016065172713762327569433945446598600705761456731844358980460949009747059779575245460547544076193224141560315438683650498045875098875194826053398028819192033784138396109321309878080919047169238085235290822926018152521443787945791354642779162369073575510464676307738745137368236340488336468229438062757591864495832519542800353637624510899960409953559448637052209587086542698189198304456374481505084668512138820151645511298243115434132458196567714649584872980882921433923431323025741438248940081524739535046106494564661952078162997692569171205852699326923237775905163
e = 65537
p = 0b10000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010111011001
d = recover_d(n, e, p)
rsa = RSA.construct((n, e, d))
session = {'broken': False}
cookie = jwt.encode(session, rsa.exportKey(), algorithm='RS512')
response = requests.get(url, cookies={'session': cookie.decode()}).text
flag = re.search(r'flag\{.*?\}', response).group(0)
print(flag)
if __name__ == '__main__':
main(sys.argv[1])
| 2.734375
| 3
|
FSISAC_STIX_Parser.py
|
ch4meleon/FSISAC_Stix_Downloader
| 3
|
12775538
|
#!/usr/bin/env python
"""
FSISAC STIX Parser (FSISAC_STIX_Parser.py)
Written by <NAME> (<EMAIL>)
"""
import os
import sys
import socket
import types
import collections
import json
import re
import io
import urllib2
import dateutil
import datetime
import time
import pytz
import pprint
import getpass
import csv
import iocextract
import dicttoxml
import libtaxii as t
import libtaxii.messages_11 as tm11
import libtaxii.clients as tc
import lxml.etree
from stix.core import STIXPackage
from StringIO import StringIO
from urlparse import urlparse
from optparse import OptionParser
from optparse import BadOptionError
from optparse import AmbiguousOptionError
from stix.core import STIXPackage, STIXHeader
from stix.utils.parser import EntityParser
from stix.common import vocabs
from stix.common.vocabs import VocabString
from stix.common.vocabs import IndicatorType
from xml.etree.ElementTree import XML, XMLParser, tostring, TreeBuilder
class FSISAC_STIX_Parser:
def __init__(self):
pass
""" Extract observables from STIX. Used by extractObservables function """
def extractObservable(self, obs, values):
typ = obs["properties"]["xsi:type"]
val = None
if typ == "AddressObjectType":
# Handle if Address_Value is a plain string or one with datatype
if isinstance(obs["properties"]["address_value"], basestring):
val = obs["properties"]["address_value"]
elif 'value' in obs["properties"]["address_value"]:
val = obs["properties"]["address_value"]["value"]
elif typ == "URIObjectType" or typ == "DomainNameObjectType" or typ == "HostnameObjectType":
val = obs["properties"]["value"]
if 'value' in val:
val = obs["properties"]["value"]["value"]
else:
val = obs["properties"]["value"]
elif typ == "UserAccountObjectType":
val = obs["properties"]["username"]
elif typ == "FileObjectType":
val = []
theList = obs["properties"]["hashes"][0]
if len(theList['simple_hash_value']) > 2:
val.append( theList['simple_hash_value'] )
else:
val.append( obs["properties"]["hashes"][0]['simple_hash_value']['value'] )
if val:
if ( not isinstance(val, basestring) ) and isinstance(val, collections.Iterable):
for addr in val:
values.append( addr )
else:
values.append( val )
else:
if args[0].strict:
raise Exception("Encountered unsupported CybOX observable type: " + typ)
else:
print >> sys.stderr, "Encountered unsupported CybOX observable type: " + typ + ", ignoring..."
""" Extract observables from STIX """
def extractObservables(self, indicators):
values = []
STIX_TYPE = ""
for indicator in indicators:
# Check if we were passed a list of indicators, or observables
obs = indicator
# print("===========================")
# print("OBS:")
# pprint.pprint(obs)
# print("===========================")
# print("")
if "observable" in indicator:
obs = indicator["observable"]
### To handle FSISAC which put data in 'description' ###
IS_FSISAC = False
if "observable" in indicator:
tmp_obs = indicator['observable']
if 'idref' in tmp_obs:
if "fsisac" or "NCCIC" in tmp_obs['idref']:
IS_FSISAC = True
if IS_FSISAC == True:
STIX_TYPE = "type1"
# print "FOUND FSISAC"
#iocs = dict()
#title = "TESTING"
#iocs = {'title' : '', 'domain':[], 'ip':[], 'email':[], 'hash':[], 'url':[], 'hash':[], 'yara':[], 'other' : []}
title = indicator['title']
description = indicator["description"]
iocs = self.parse_indicators_from_description_string(description, title)
return (STIX_TYPE, iocs)
sys.exit(0)
#return iocs
else:
try:
STIX_TYPE = "other"
if 'object' in obs:
self.extractObservable(obs["object"], values)
elif 'observable_composition' in obs:
for observable in obs["observable_composition"]["observables"]:
if 'object' in observable:
self.extractObservable(observable["object"], values )
else:
print "EXCEPTION999"
print "-" * 100
print "INDICATOR:"
print indicator
print "-" * 100
raise Exception("Unknown Object Type!! Please Investigate")
# if IS_FSISAC == True:
# print "FOUND FSISAC"
# description = indicator["description"]
# title = indicator["title"]
# print "-" * 100
# print "INDICATOR:"
# print indicator
# print "-" * 100
# raise Exception("BYEBYEBYE")
# iocs = self.parse_indicators_from_description_string(description)
# iocs['title'] = title
# # return iocs
# else:
# raise Exception("Unknown Object Type!! Please Investigate")
except:
print >> sys.stderr, "Could not handle observable/indicator:\n"
pprint.pprint( indicator, sys.stderr )
raise
# print "=" * 100
# print "extractObservables - values:"
# print values
# print "=" * 100
return (STIX_TYPE, values)
# Processes a STIX package dictionary
def process_stix_dict(self, stix_dict):
iocs = {'title' : '', 'domain':[], 'ip':[], 'email':[], 'hash':[], 'url':[], 'hash':[], 'yara':[], 'other' : []}
result = []
key = ""
value = ""
""" Retrieve title """
try:
title = stix_dict['observables']['observables'][0]['title']
iocs['title'] = title
except:
# Do something if necessary
pass
if "observables" in stix_dict:
result.extend(self.extractObservables(stix_dict["observables"]["observables"]))
if "indicators" in stix_dict:
result.extend(self.extractObservables(stix_dict["indicators"]))
# print "=" * 100
# print "VALUES2"
# print result
# print "=" * 100
stix_type = result[0]
if stix_type == "type1": # No need to process, already in IOC dict format
return result[1]
values = result[1]
if len(values) > 0:
for item in values:
try:
## send data to stdout if needed and/or save to a simple text file.
if re.match("^(http|https)", item):
u = urlparse(item)
# print 'Web Site: %s | Path: %s' % ( u.netloc, u.path )
iocs['url'].append(u.netloc)
elif re.match("[^@]+@[^@]+\.[^@]+", item ):
# print 'Email Address: %s' % ( item )
iocs['email'].append(item)
elif re.match("^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", item):
# print 'IP Address: %s' % ( item )
iocs['ip'].append(item)
elif re.match("^:", item):
item = item[2:]
myitem = 'http://' + item
d = urlparse(myitem)
item = d.netloc
# print 'Domain: %s' % ( d.netloc )
iocs['domain'].append(d.netloc)
# elif re.match("^(([a-z0-9]\-*[a-z0-9]*){1,63}\.){1,255}$", item):
# # print 'Domain: %s' % ( item )
# iocs['domain'].append(item)
elif re.match("^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}$", item):
data = item.split(":")
#print data
# print 'IP Address: %s | Dest Port: %s' % ( data[0], data[1] )
iocs['ip'].append(data[0])
elif re.match(r"^([a-fA-F\d]{32})$", item):
# print 'Hash: %s' % ( item )
iocs['hash'].append(item)
elif re.match(r"^([a-fA-F\d]{40})$", item):
# print 'Hash: %s' % ( item )
iocs['hash'].append(item)
elif re.match(r"^([a-fA-F\d]{64})$", item):
# print 'Hash: %s' % ( item )
iocs['hash'].append(item)
else:
# print 'Indicator: %s' % ( item )
iocs['other'].append(item)
except ValueError:
print >> sys.stderr, "Could not parse values.."
print >> sys.stderr, item
raise
# print "END" * 100
# print iocs
# print "END" * 100
return iocs
""" Extract IOC(s) from the DESCRIPTION string (string type) """
def parse_indicators_from_description_string(self, description_string, title):
# print type(description_string)
iocs = {'title' : title, 'domain':[], 'ip':[], 'email':[], 'hash':[], 'url':[], 'hash':[], 'yara':[], 'other' : []}
on9strings = {'[.]':'.', 'hxxp':'http', '[@]':'@'}
# Convert the first STIXPackage dictionary into another STIXPackage via the from_dict() method.
# Pattern for domain / email and IP addresses
raw_iocs = re.findall(r'[a-zA-Z0-9-\.]*\[\.?\@?\][a-zA-Z0-9-\.\[\.\@\]]*[-a-zA-Z0-9@:%_\+.~#?&//=]*', description_string)
# print(len(raw_iocs))
# for i in range(len(raw_iocs)):
# # Replace the on9 strings
# for on9string in on9strings:
# raw_iocs[i] = raw_iocs[i].replace(on9string, on9strings[on9string])
# # Import those IOCs into the array.
# if re.match(r'.*[@]+', raw_iocs[i]):
# iocs['email'].append(raw_iocs[i])
# elif re.match(r'.*[//].*', raw_iocs[i]):
# iocs['url'].append(raw_iocs[i])
# elif re.match(r'.*[a-zA-Z]', raw_iocs[i]):
# iocs['domain'].append(raw_iocs[i])
# # Extract hashes by their plugin
# for hash_extracted in iocextract.extract_hashes(description_string):
# iocs['hash'].append(hash_extracted)
# # Extract Yara rule
# for yara_extracted in iocextract.extract_yara_rules(description_string):
# iocs['yara'].append(yara_extracted)
# # Extract IP
# for ip_extracted in iocextract.extract_ips(description_string, refang=True):
# iocs['ip'].append(ip_extracted)
for i in range(len(raw_iocs)):
# Replace the on9 strings
for on9string in on9strings:
raw_iocs[i] = raw_iocs[i].replace(on9string, on9strings[on9string])
# Import those IOCs into the array.
if re.match(r'.*[@]+', raw_iocs[i]):
iocs['email'].append(raw_iocs[i])
iocs['email'] = list(set(iocs['email']))
elif re.match(r'.*[//].*', raw_iocs[i]):
iocs['url'].append(raw_iocs[i])
iocs['url'] = list(set(iocs['url']))
elif re.match(r'.*[a-zA-Z]', raw_iocs[i]):
if re.match("^([a-z0-9]+(-[a-z0-9]+)*\.)+[a-z]{2,}$", raw_iocs[i]):
iocs['domain'].append(raw_iocs[i])
iocs['domain'] = list(set(iocs['domain']))
# Extract hashes by their plugin
for hash_extracted in iocextract.extract_hashes(description_string):
iocs['hash'].append(hash_extracted)
iocs['hash'] = list(set(iocs['hash']))
# Extract Yara rule
for yara_extracted in iocextract.extract_yara_rules(description_string):
iocs['yara'].append(yara_extracted)
iocs['yara'] = list(set(iocs['yara']))
# Extract IP
for ip_extracted in iocextract.extract_ips(description_string, refang=True):
# Use regex to validate the IP format
if re.match(r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b", ip_extracted):
iocs['ip'].append(ip_extracted)
iocs['ip'] = list(set(iocs['ip']))
# for key in iocs:
# for item in iocs[key]:
# print(key + ":" + item)
return iocs
""" Extract IOC(s) from the DESCRIPTION section in FSISAC Stix """
def _parse_indicators_from_stix_description(self, xml_content):
iocs = {'title' : '', 'domain':[], 'ip':[], 'email':[], 'hash':[], 'url':[], 'hash':[], 'yara':[], 'other' : []}
on9strings = {'[.]':'.', 'hxxp':'http', '[@]':'@'}
# Parse input file
stix_package = STIXPackage.from_xml(xml_content)
# Convert STIXPackage to a Python
stix_dict = stix_package.to_dict()
# Extract description from the indicator (suitable for indicator only)
# print "-" * 100
# print stix_dict
# print "-" * 100
description = stix_dict["indicators"][0]["description"]
# Extract title
title = stix_dict["indicators"][0]["title"]
iocs['title'] = [title]
# Convert the first STIXPackage dictionary into another STIXPackage via the from_dict() method.
# Pattern for domain / email and IP addresses
raw_iocs = re.findall(r'[a-zA-Z0-9-\.]*\[\.?\@?\][a-zA-Z0-9-\.\[\.\@\]]*[-a-zA-Z0-9@:%_\+.~#?&//=]*', description)
# print(len(raw_iocs))
for i in range(len(raw_iocs)):
# Replace the on9 strings
for on9string in on9strings:
raw_iocs[i] = raw_iocs[i].replace(on9string, on9strings[on9string])
# Import those IOCs into the array.
if re.match(r'.*[@]+', raw_iocs[i]):
iocs['email'].append(raw_iocs[i])
iocs['email'] = list(set(iocs['email']))
elif re.match(r'.*[//].*', raw_iocs[i]):
iocs['url'].append(raw_iocs[i])
iocs['url'] = list(set(iocs['url']))
elif re.match(r'.*[a-zA-Z]', raw_iocs[i]):
if re.match("^([a-z0-9]+(-[a-z0-9]+)*\.)+[a-z]{2,}$", raw_iocs[i]):
iocs['domain'].append(raw_iocs[i])
iocs['domain'] = list(set(iocs['domain']))
# Extract hashes by their plugin
for hash_extracted in iocextract.extract_hashes(description):
iocs['hash'].append(hash_extracted)
iocs['hash'] = list(set(iocs['hash']))
# Extract Yara rule
for yara_extracted in iocextract.extract_yara_rules(description):
iocs['yara'].append(yara_extracted)
iocs['yara'] = list(set(iocs['yara']))
# Extract IP
for ip_extracted in iocextract.extract_ips(description, refang=True):
# Use regex to validate the IP format
if re.match(r"\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b", ip_extracted):
iocs['ip'].append(ip_extracted)
iocs['ip'] = list(set(iocs['ip']))
# for key in iocs:
# for item in iocs[key]:
# print(key + ":" + item)
return iocs
""" Convert iocs dict to JSON """
def convert_to_json(self, iocs):
result = {}
# Get title first
title = ""
for ioc in iocs:
if ioc == "title":
try:
title = iocs[ioc]
except:
pass
l = []
for ioc in iocs:
if ioc != "title":
for item in iocs[ioc]:
new_dict_item = dict()
new_dict_item['title'] = title
new_dict_item['type'] = ioc
new_dict_item['value'] = item
l.append(new_dict_item)
result = json.dumps({'IOCS' : l})
return result
""" Parse by stix file """
def parse_stix_file(self, filename):
stix_package = STIXPackage.from_xml(filename)
stixParser = FSISAC_STIX_Parser()
iocs = stixParser.process_stix_dict(stix_package.to_dict())
j = stixParser.convert_to_json(iocs)
return j
def test():
# Process a XML file on disk
stix_package = STIXPackage.from_xml(sys.argv[1])
stixParser = FSISAC_STIX_Parser()
iocs = stixParser.process_stix_dict(stix_package.to_dict())
j = stixParser.convert_to_json(iocs)
print j
# def test2():
# content = open(sys.argv[1]).read()
# sio = StringIO(content)
# stixParser = FSISAC_STIX_Parser()
# iocs = stixParser._parse_indicators_from_stix_description(sio)
# j = stixParser.convert_to_json(iocs)
# parsed = json.loads(j)
# print(json.dumps(parsed, indent=4, sort_keys=True))
# def test3():
# from glob import glob
# stixParser = FSISAC_STIX_Parser()
# stix_files = glob("stix_files/*.xml")
# for s in stix_files:
# print "Processing file...(%s)" % s
# r = stixParser.parse_stix_file(s)
# print (r)
# print ""
# if __name__ == "__main__":
# test()
| 2.375
| 2
|
loops/test_break_stmt.py
|
Rhoynar/pysel
| 1
|
12775539
|
<gh_stars>1-10
# When user enters 'exit', exit program
while (True):
inp = raw_input('> ')
if inp.lower() == 'exit':
break
else:
print(inp)
| 3.53125
| 4
|
tests/__init__.py
|
Mgancita/data-preprocessor
| 8
|
12775540
|
"""tests namespace."""
| 1.132813
| 1
|
src/mclatte/mclatte/dataset.py
|
Jason-Y-Z/McLatte
| 0
|
12775541
|
<gh_stars>0
"""
Dataset and DataLoader designed for representing
Multi-cause ITE problem setting.
"""
# Author: <NAME> (<EMAIL>)
# License: BSD 3 clause
from typing import Optional
import numpy as np
import pytorch_lightning as pl
import torch
from torch.utils.data import Dataset, DataLoader, random_split
class McLatteDataset(Dataset):
"""
Pytorch Dataset defined to provide input for McLatte.
"""
def __init__(
self,
X: np.ndarray,
M: np.ndarray,
Y_pre: np.ndarray,
Y_post: np.ndarray,
A: np.ndarray,
T: np.ndarray,
) -> None:
super().__init__()
self._X = X
self._M = M
self._Y_pre = Y_pre
self._Y_post = Y_post
self._A = A
self._T = T
def __len__(self) -> int:
return self._X.shape[0]
def __getitem__(self, idx):
x = torch.from_numpy(self._X[idx] * self._M[idx]).float() # masked covariates
a = torch.from_numpy(self._A[idx]).float() # treatment
t = torch.from_numpy(self._T[idx]).float() # measurement time
mask = torch.from_numpy(self._M[idx]).float() # masking vectors
y_pre = (
None if self._Y_pre is None else torch.from_numpy(self._Y_pre[idx]).float()
) # pre-treatment outcomes
y_post = torch.from_numpy(self._Y_post[idx]).float() # post-treatment outcomes
return x, a, t, mask, y_pre, y_post
class McLatteDataModule(pl.LightningDataModule):
"""
Pytorch Lightning DataModule defined to provide
input for McLatte.
"""
def __init__(
self,
X: np.ndarray,
M: np.ndarray,
Y_pre: np.ndarray,
Y_post: np.ndarray,
A: np.ndarray,
T: np.ndarray,
batch_size: int,
use_persistent_workers: bool = True,
):
super().__init__()
self._X = X
self._M = M
self._Y_pre = Y_pre
self._Y_post = Y_post
self._A = A
self._T = T
self._batch_size = batch_size
self._use_persistent_workers = use_persistent_workers
self._train_dataset, self._valid_dataset = None, None
def setup(self, stage: Optional[str] = None) -> None:
if stage in (None, "fit"):
# Train-Validation split
full_dataset = McLatteDataset(
self._X,
self._M,
self._Y_pre,
self._Y_post,
self._A,
self._T,
)
seq_length = len(full_dataset)
self._train_dataset, self._valid_dataset = random_split(
full_dataset, [round(seq_length * 0.8), round(seq_length * 0.2)]
)
def train_dataloader(self):
return DataLoader(
self._train_dataset,
batch_size=self._batch_size,
shuffle=True,
num_workers=16,
persistent_workers=self._use_persistent_workers,
)
def val_dataloader(self):
return DataLoader(
self._valid_dataset,
batch_size=self._batch_size,
num_workers=4,
persistent_workers=self._use_persistent_workers,
)
def test_dataloader(self):
return DataLoader(
self._valid_dataset,
batch_size=self._batch_size,
num_workers=4,
persistent_workers=self._use_persistent_workers,
)
def predict_dataloader(self):
return DataLoader(
self._valid_dataset,
batch_size=self._batch_size,
num_workers=4,
persistent_workers=self._use_persistent_workers,
)
| 2.59375
| 3
|
tests/test_tests.py
|
mehrdad-shokri/retdec-regression-tests-framework
| 21
|
12775542
|
<reponame>mehrdad-shokri/retdec-regression-tests-framework
"""
Tests for the :mod:`regression_tests.test` module.
"""
import unittest
from regression_tests.test import Test
from regression_tests.test_settings import TestSettings
class TestTests(unittest.TestCase):
"""Tests for `Test`."""
def test_settings_returns_given_test_settings(self):
TEST_SETTINGS = TestSettings(input='file.exe')
test = Test(TEST_SETTINGS)
self.assertEqual(test.settings, TEST_SETTINGS)
def test_settings_cannot_be_changed(self):
test = Test(TestSettings(input='file.exe'))
with self.assertRaises(AttributeError):
test.settings = TestSettings(input='file.exe')
class TestSettingsCombinationsTests(unittest.TestCase):
"""Tests for `Test.settings_combinations()`."""
def test_returns_empty_list_upon_no_combinations(self):
class MyTest(Test):
pass
self.assertEqual(MyTest.settings_combinations(), [])
def test_returns_single_combination_upon_single_combination(self):
class MyTest(Test):
settings = TestSettings(input='file.exe')
self.assertEqual(
MyTest.settings_combinations(),
[TestSettings(input='file.exe')]
)
def test_returns_three_combinations_upon_two_settings_with_three_combinations(self):
class MyTest(Test):
settings1 = TestSettings(input='file.exe')
settings2 = TestSettings(input='file.exe', arch=['x86', 'arm'])
self.assertEqual(MyTest.settings_combinations(), [
TestSettings(input='file.exe'),
TestSettings(input='file.exe', arch='x86'),
TestSettings(input='file.exe', arch='arm')
])
def test_recognizes_settings_for_tool(self):
class MyTest(Test):
settings1 = TestSettings(input='file.exe', arch='x86')
settings2 = TestSettings(tool='fileinfo', input='file.exe')
self.assertEqual(MyTest.settings_combinations(only_for_tool='fileinfo'), [
TestSettings(tool='fileinfo', input='file.exe'),
])
| 2.53125
| 3
|
code/data_prep/mcd_to_adata.py
|
TheJacksonLaboratory/endometriosis-scrnaseq
| 0
|
12775543
|
<filename>code/data_prep/mcd_to_adata.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
from typing import Union
from io import BytesIO
import scanpy as sc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from skimage.io import imread
from skimage.morphology import white_tophat, disk
from scipy.ndimage import maximum_filter
from imctools.io.mcd.mcdparser import McdParser
from imctools.io.mcd.mcdxmlparser import McdXmlParser
from stardist.models import StarDist2D
from csbdeep.utils import Path, normalize
from skimage.measure import find_contours, grid_points_in_poly
from skimage.filters import gaussian
from skimage import morphology
from skimage import exposure
_curr_loc = Path(__file__).parent
SPILLMAT_CSV = (_curr_loc / ".." / ".." / "databases" / "spillover.csv").resolve()
#SPILLMAT_CSV = Path("/projects/robson-lab/research/imc/data/spillover.csv")
def cross_dist(u, v, w=None):
if w is None:
w = np.zeros_like(u)
return np.sign(np.cross(u - w, v - w))
def close_contour(contour, xmax, ymax):
if len(contour) < 5:
return contour
c1, c0 = contour[[-1, 0]]
if np.equal(c1[0], c0[0]):
return contour
elif np.equal(c1[1], c0[1]):
return contour
corners = np.array([[0,0], [0,ymax], [xmax,ymax], [xmax,0]])
crosses = cross_dist(c1, c0, corners)
return np.vstack((
contour,
corners[crosses < 0]
))
def get_img_by_key(adata, by):
n_obs = adata.shape[0]
shape = adata.obsm["X_spatial"].max(axis=0)[::-1].astype(int) + 1
if np.prod(shape) != n_obs:
shape = adata.obsm["X_spatial_lowres"].max(axis=0)[::-1].astype(int) + 1
img = adata.obs[by].values.reshape(shape)
return img
def paint_tissue(adata, by="log1p_total_intensity", key_added="in_tissue"):
img = get_img_by_key(adata, by)
contours = find_contours(img, fully_connected="high")
mask = np.ones_like(img, dtype=bool)
print(img.shape)
for contour in contours:
c = close_contour(contour, *img.shape)
mask[grid_points_in_poly(img.shape, c)] = False
adata.obs[key_added] = mask.ravel()
adata.obs[key_added] = adata.obs[key_added].astype("category")
def paint_tissue_fast(adata, by="log1p_total_intensity", key_added_in="in_tissue", key_added_out="background", sigma=3, threshold=0.3):
img = get_img_by_key(adata, by)
rmin, rmax = img.min(), img.max()
if (rmax - rmin) > 100:
print("Data range is very large, going to sublinear transform")
img = np.sqrt(img)
print(img.min(), img.max())
blurred = gaussian(img, sigma=sigma)
p = np.percentile(blurred, q=threshold*100)
mask = morphology.remove_small_holes(
morphology.remove_small_objects(
blurred > p, 500
), 500
)
adata.obs[key_added_in] = mask.ravel().astype(int)
adata.obs[key_added_out] = (~mask.ravel()).astype(int)
def shape_from_xy(arr):
assert len(arr.shape) > 1
return tuple(arr.max(axis=np.argmax(arr.shape))[::-1] + 1)
def detect_nuclei(data_1ch):
model = StarDist2D.from_pretrained("2D_versatile_fluo")
labels, details = model.predict_instances(normalize(data_1ch))
return np.vstack(np.nonzero(labels)).T
def load_spillmat(infile=None):
if not infile:
infile = SPILLMAT_CSV
return pd.read_csv(infile, index_col=0)
def align_spillmat(*, spillmat=None, input_metals=None):
if spillmat is None:
spillmat = load_spillmat()
if input_metals is None:
input_metals = set(spillmat.index.union(spillmat.columns))
sm = spillmat.reindex(index=input_metals, columns=input_metals, fill_value=0)
filled = sm.values
np.fill_diagonal(filled, 1.0)
return pd.DataFrame(filled, index=sm.index, columns=sm.columns)
def compensate_long(long, spillmat):
comp_ = long @ np.linalg.inv(spillmat.T)
comp_ = np.clip(comp_, 0, comp_.max()).astype(np.float32)
return comp_
def _load_imc_acquisition(mcd_file, acq_label, get_metadata=True, preview=False):
parser = McdParser(mcd_file)
if acq_label is None:
acq_label = parser.session.acquisition_ids[0]
if isinstance(acq_label, int):
aid = acq_label
acq = parser.session.acquisitions.get(aid)
else:
for aid, acq in parser.session.acquisitions.items():
if acq.description == acq_label:
print(f"Found {acq_label} with ID {aid}")
break
else:
raise ValueError(f"Label {acq_label} not found in {mcd_file}.")
preab_image = None
if acq.has_before_ablation_image:
preab_image = imread(BytesIO(
parser.get_before_ablation_image(aid)
))
if get_metadata:
xml = McdXmlParser(parser.get_mcd_xml(), mcd_file).metadata
#raw = parser._get_acquisition_raw_data(acq)
raw = parser.get_acquisition_data(aid)
if preview:
plt.imshow(raw.get_image_by_label("DNA3"), cmap="Blues")
return acq, raw.image_data, preab_image, xml
def _coarse_grain(data, window_size=10):
from skimage.util.shape import view_as_blocks
window = (window_size, window_size, 1)
pad_x = window_size - (data.shape[0] % window_size)
pad_y = window_size - (data.shape[1] % window_size)
padded = np.pad(data, ((0, pad_x), (0, pad_y), (0, 0)))
blocks = view_as_blocks(padded, window)
# expected to be x, y, c because data has been rolled prior to this function
ndim = blocks.ndim
return blocks.sum(axis=tuple(range(ndim-3, ndim)))
def clip_hot_pixels(img, hp_filter_shape, hp_threshold=None):
"""
See
https://github.com/BodenmillerGroup/ImcPluginsCP/blob/a53bb7e1dea60b859d57677ea9a15281fa84d493/plugins/smoothmultichannel.py#L417
"""
if hp_filter_shape[0] % 2 != 1 or hp_filter_shape[1] % 2 != 1:
raise ValueError(
"Invalid hot pixel filter shape: %s" % str(hp_filter_shape)
)
if hp_threshold is None:
hp_threshold = np.percentile(img, q=98)
hp_filter_footprint = np.ones(hp_filter_shape)
hp_filter_footprint[
int(hp_filter_shape[0] / 2), int(hp_filter_shape[1] / 2)
] = 0
max_img = maximum_filter(
img, footprint=hp_filter_footprint, mode="reflect"
)
hp_mask = img - max_img > hp_threshold
img = img.copy()
img[hp_mask] = max_img[hp_mask]
return img
def clean_channel(img, hp_filter_shape=(9, 9), noise_blob_radius=3):
# this should be done on total intensity, not per channel
#wh = white_tophat(img, selem=disk(noise_blob_radius))
#cleaned = img - wh
#return clip_hot_pixels(cleaned, hp_filter_shape)
return clip_hot_pixels(img, hp_filter_shape)
def mcd_to_anndata(
mcd_file: Union[str, Path],
library_id: str,
*,
acquisition_label: Union[str, int, None] = None,
compensate: bool = True,
remove_hot_pixels: bool = True,
remove_unused_channels: bool = True,
preview: bool = False
):
"""
Converts an MCD acqusition to an AnnData object
"""
mcd_file = Path(mcd_file)
assert mcd_file.exists()
acquisition, raw_data, preab_image, metadata = _load_imc_acquisition(
mcd_file, acquisition_label, get_metadata=True, preview=preview
)
raw_data = np.moveaxis(raw_data, 0, 2)
# get long data
# compensate
compensated = None
if compensate:
print("Compensating")
n_channels = acquisition.n_channels
long = raw_data.reshape(-1, n_channels)
spillmat = align_spillmat(input_metals=acquisition.channel_names)
compensated = compensate_long(long, spillmat).reshape(raw_data.shape)
# clean
cleaned = None
if remove_hot_pixels:
print("Removing small blobs and hot pixels")
x_ = raw_data
if compensated is not None:
x_ = compensated
# only doing maximum filter --- not sure if the conway filter is needed/worth it
cleaned = np.zeros_like(x_)
for k in range(x_.shape[-1]):
cleaned[..., k] = clean_channel(x_[...,k])
tot_int = cleaned.sum(axis=-1)
wh = white_tophat(tot_int, selem=disk(3))
mask = np.where((tot_int - wh) == 0)
cleaned[mask] = 0
var_info = pd.DataFrame({
"ab_mass": acquisition.channel_masses,
"ab_label": acquisition.channel_labels,
"ab_name": acquisition.channel_names
})
var_info["ab_label"] = var_info.ab_label.fillna(var_info.ab_name)
var_info.set_index("ab_label", inplace=True)
print("Finding nuclei")
nuc_inds = np.where(var_info.index.str.startswith("DNA"))
nuc_x = raw_data
if cleaned is not None:
nuc_x = cleaned
elif compensated is not None:
nuc_x = compensated
nuc_channel = nuc_x[..., nuc_inds].sum(axis=-1)
nuc_points = detect_nuclei(nuc_channel)
# filter out garbage channels:
drop_inds = np.logical_or(
var_info.ab_name.str.extract("([A-z]+)(\d+)", expand=True).agg(lambda r: "".join(r[::-1]), axis=1) == var_info.index,
var_info.ab_name == var_info.index
)
var_info = var_info.loc[~drop_inds, :]
keep_inds = np.where(~drop_inds)
raw_data = raw_data[..., keep_inds]
adata = sc.AnnData(
raw_data.reshape(-1, len(var_info)),
var = var_info,
obs = pd.DataFrame(index=pd.RangeIndex(stop=np.prod(raw_data.shape[:-1])))
)
sc.pp.calculate_qc_metrics(adata, expr_type="intensity", var_type="antibodies", percent_top=None, inplace=True)
# We need the layers:
if compensated is not None:
adata.layers["compensated"] = compensated[..., keep_inds].reshape(adata.shape)
if cleaned is not None:
adata.layers["cleaned"] = cleaned[..., keep_inds].reshape(adata.shape)
# add mcd metadata
adata.uns["mcd"] = metadata
# Add in spatial data
ys, xs = np.meshgrid(np.arange(raw_data.shape[1]), np.arange(raw_data.shape[0]))
adata.obsm["X_spatial"] = np.vstack((ys.ravel(), xs.ravel())).T
# We consider the pre-ablation image the 'hi-res' image
adata.uns["spatial"] = {
library_id: dict(
images=dict(hires=preab_image),
scalefactors=dict(spot_diameter_fullres=1, tissue_hires_scalef=1)
)
}
nuclei_counts = np.zeros(raw_data.shape[:-1])
nuclei_counts[nuc_points[:,0], nuc_points[:,1]] = 1
adata.obs["nuclei_counts"] = nuclei_counts.ravel()
# ID tissue
paint_tissue_fast(adata)
return adata
def coarse_grain_imc_adata(
adata: sc.AnnData,
*,
layer: Union[str, None] = None,
window_size: int = 10,
):
img_shape = shape_from_xy(adata.obsm["X_spatial"])
img_shape += (adata.shape[1],)
if layer is not None:
x_ = adata.layers[layer].reshape(img_shape)
else:
x_ = np.asarray(adata.X).reshape(img_shape)
coarse = _coarse_grain(x_, window_size)
nuc_counts = _coarse_grain(adata.obs["nuclei_counts"].values.reshape(img_shape[:-1])[...,None], window_size)
coarse_adata = sc.AnnData(
coarse.reshape(-1, adata.shape[1]),
var = adata.var,
obs = pd.DataFrame(index=pd.RangeIndex(stop=np.prod(coarse.shape[:-1]))),
uns = adata.uns.copy()
)
sc.pp.calculate_qc_metrics(coarse_adata, expr_type="intensity", var_type="antibodies", percent_top=None, inplace=True)
# Add in spatial data
ys, xs = np.meshgrid(np.arange(coarse.shape[1]), np.arange(coarse.shape[0]))
coarse_adata.obsm["X_spatial"] = (np.vstack((ys.ravel(), xs.ravel())).T * window_size) + window_size/2
coarse_adata.obsm["X_spatial_lowres"] = np.vstack((ys.ravel(), xs.ravel())).T
# We consider the pre-ablation image the 'hi-res' image
lib_id = list(coarse_adata.uns["spatial"].keys())[0]
paint_tissue_fast(coarse_adata)
coarse_adata.uns["spatial"][lib_id]["scalefactors"] = dict(
spot_diameter_fullres=window_size, tissue_hires_scalef=1, tissue_lowres_scalef=1/window_size
)
coarse_adata.obs["nuclei_counts"] = nuc_counts.ravel()
return coarse_adata
def preprocess_imc_data(adata, vst_cofactor=5.):
adata_ = adata[adata.obs.in_tissue.astype(bool), :].copy()
sc.pp.filter_cells(adata_, min_counts=50)
adata_.layers["cleaned"] = adata_.X.copy()
adata_.X = np.arcsinh(adata_.X / vst_cofactor)
adata_.layers["vst"] = adata_.X.copy()
maxs = np.percentile(adata_.X, q=98, axis=0)
adata_.X = np.clip(adata_.X, 0, maxs)
adata_.layers["vst-clipped"] = adata_.X.copy()
print("filtering done")
redux_vars = adata_.var_names[~adata_.var_names.str.startswith("DNA")]
adata_.obsm["X_redux"] = adata_[:, redux_vars].X.copy()
sc.pp.neighbors(adata_, n_neighbors=15, use_rep="X_redux", metric="correlation")
print("neighbors done")
sc.tl.umap(adata_, min_dist=0.5)
sc.tl.leiden(adata_, resolution=1)
return adata_
def convert_imc_adata_to_text(
adata,
outpath=None,
cluster_key="leiden",
cluster_prefix="Cluster_",
layer=None
):
df = sc.get.obs_df(adata, adata.var_names.tolist(), layer=layer)
coords = pd.DataFrame(adata.obsm["X_spatial_lowres"], columns=list("XY"))
coords["Z"] = 0
unknown = pd.DataFrame(
np.zeros_like(coords.values, dtype=np.uint8),
columns=["Start_push", "End_push", "Pushes_duration"],
index=df.index
)
coords.index = df.index
clusters = pd.get_dummies(adata.obs[cluster_key], prefix=cluster_prefix, prefix_sep="")
res = pd.concat((unknown, coords, df, clusters), axis=1)
# now we need to make this rectangular
shape = res[["X","Y"]].max(axis=0).values + 1
size = np.prod(shape)
rect = np.ones(shape)
rect[res.X, res.Y] = 0
nonzeros = np.nonzero(rect)
empty = pd.DataFrame(np.zeros((len(nonzeros[0]), len(res.columns))), columns=res.columns)
empty["X"] = nonzeros[0]
empty["Y"] = nonzeros[1]
final = pd.concat((res, empty), axis=0)
#final = final[unknown.columns.tolist() + ["Y", "X"] + final.columns[5:].tolist()]
final = final.sort_values(["Y", "X"])
final["Z"] = np.arange(len(final), dtype=np.uint16)
for col in final.columns:
final[col] = pd.to_numeric(final[col], downcast="unsigned")
if outpath is not None:
final.to_csv(outpath, index=False, sep="\t")
return final
| 2.03125
| 2
|
weibo/serve.py
|
CooperLuan/collector
| 0
|
12775544
|
<filename>weibo/serve.py
from flask import Flask, request, Response, json
from pymongo import MongoClient
client = MongoClient('192.168.1.202')
db = client['weibo-search']
app = Flask(__name__)
@app.route("/", methods=["GET", "POST"])
def hello():
result = db.webpages.insert({
'collected': dict(request.form.items()),
})
print(result)
response = Response(json.dumps({'ok': True}))
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Methods', 'POST')
response.headers.add('Access-Control-Max-Age', '1000')
return response
@app.route("/seed", methods=["GET"])
def get_seed():
doc = db.seeds.find_one({'status': 'enqueue'})
if doc:
seed = {'url': doc['url']}
else:
seed = None
response = Response(json.dumps({'seed': seed}))
response.headers.add('Access-Control-Allow-Origin', '*')
response.headers.add('Access-Control-Allow-Methods', 'GET')
response.headers.add('Access-Control-Max-Age', '1000')
if seed:
db.seeds.update({
'_id': doc['_id'],
}, {
'$set': {'status': 'pending'},
})
return response
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8716, debug=True)
| 2.828125
| 3
|
pigit/common/table.py
|
zlj-zz/pygittools
| 0
|
12775545
|
<gh_stars>0
# -*- coding:utf-8 -*-
from abc import ABC, abstractmethod
from shutil import get_terminal_size
from copy import deepcopy
from typing import Generator
from .style import BoxSymbol, Fx
from .str_utils import get_width
class TableTooWideError(Exception):
pass
class _baseTable(ABC):
"""Docstring for baseTable."""
each_column_width: list
def __init__(self, frame_format: str, nil: str, title: str = ""):
if frame_format not in BoxSymbol.rune.keys():
frame_format = "bold"
self.rune = BoxSymbol.rune[frame_format]
self.nil = nil
self.title = title
# create table when init.
self.fix_data()
@property
def display_title(self) -> str:
if self.title:
line_max = sum(self.each_column_width) + len(self.each_column_width) + 1
_t = "{}{:^%s}{}" % line_max
return _t.format(Fx.b, self.title, Fx.ub)
return "\r"
@property
def row_width(self) -> int:
"""Gets the width of table."""
return sum(self.each_column_width) + len(self.each_column_width) + 1
def real_len(self, text: str):
"""Gets the true width of the element on the command line."""
return sum([get_width(ord(ch)) for ch in text])
@abstractmethod
def fix_data(self):
"""
Calculate the width of each column in the table, and realize
``each_column_width`` in the real sense.
There may be different calculation methods for different forms
of tables, so this method has subclass implementation.
"""
@abstractmethod
def table_generator(self) -> Generator:
"""
Returns a table generator that is used to effectively generate
a table by row instead of returning all at once.
There may be different calculation methods for different forms
of tables, so this method has subclass implementation.
Yields:
Generator
"""
def print(self) -> None:
"""Output the table."""
term_width, _ = get_terminal_size()
if term_width < self.row_width:
raise TableTooWideError("Terminal is not wide enough.")
print(self.display_title)
g = self.table_generator()
for i in g:
print(i, end="")
class Table(_baseTable):
"""Create a table from list.
data format:
header = ["name", "age", "gender"]
data = [
["bob", "20", "f"],
["tom", "19", "f"],
]
tb = Table(header, data)
tb.print()
table format:
┏━━━━┳━━━┳━━━━━━┓
┃name┃age┃gender┃
┣━━━━╋━━━╋━━━━━━┫
┃bob ┃20 ┃f ┃
┃tom ┃19 ┃f ┃
┗━━━━┻━━━┻━━━━━━┛
"""
def __init__(
self,
header: list,
data: list[list],
title: str = "",
frame_format: str = "bold",
nil: str = "",
):
# Check data.
if not isinstance(header, list):
raise TypeError("title need is a list.")
self.header = deepcopy(header)
# Check data.
if not isinstance(data, list):
raise TypeError("data need is a list.")
for item in data:
if not isinstance(item, list):
raise TypeError("each item of data need is a list.")
self.data = deepcopy(data)
self.header_len = len(self.header)
self.each_column_width = [self.real_len(i) for i in self.header]
super().__init__(frame_format, nil, title=title)
def fix_data(self):
header_len = self.header_len
for item in self.data:
# Complete missing element.
item_len = len(item)
if item_len < header_len:
item.extend([self.nil] * (header_len - item_len))
elif item_len > header_len:
item = item[0:header_len]
# Calc each max.
self._adjust_each_max(item)
def add_row(self, row: list) -> None:
# XXX: whether need deepcopy
row_len = len(row)
if row_len < self.header_len:
row.extend([self.nil] * (self.header_len - row_len))
elif row_len > self.header_len:
row = row[0 : self.header_len]
self._adjust_each_max(row)
self.data.append(row)
def _adjust_each_max(self, cells: list) -> None:
for i, x in enumerate(cells):
x_len = self.real_len(Fx.pure(x))
self.each_column_width[i] = max(self.each_column_width[i], x_len)
def table_generator(self) -> Generator:
each_column_width = self.each_column_width
rune = self.rune
indexes = range(self.header_len)
# top and title line.
yield f"{rune[2]}{rune[-3].join([rune[0] * i for i in each_column_width])}{rune[3]}\n"
yield rune[1]
for idx in indexes:
width = each_column_width[idx]
cell = self.header[idx]
cell_len = self.real_len(Fx.pure(cell))
yield cell + " " * (width - cell_len) + rune[1]
yield "\n"
yield f"{rune[6]}{rune[-1].join([rune[0] * i for i in each_column_width])}{rune[7]}\n"
# all rows.
for cells in self.data:
yield rune[1]
for idx in indexes:
width = each_column_width[idx]
cell = cells[idx]
cell_len = self.real_len(Fx.pure(cell))
yield cell + " " * (width - cell_len) + rune[1]
yield "\n"
# bottom
yield rune[4] + rune[-2].join([rune[0] * i for i in each_column_width]) + rune[
5
] + "\n"
class dTable(_baseTable):
"""Create table from a special format dict.
d_data format: dict[str, dict[str, str]]
d_data = {
'Fruit color': {
'apple': 'red',
'grape': 'purple',
},
'Animal color': {
'cattle': 'yellow',
'sheep': 'white',
},
}
table format:
┏━━━━━━━━━━━━━┓
┃ Fruit color ┃
┣━━━━━━┳━━━━━━┫
┃apple ┃red ┃
┃grape ┃purple┃
┣━━━━━━┻━━━━━━┫
┃Animal color ┃
┣━━━━━━┳━━━━━━┫
┃cattle┃yellow┃
┃sheep ┃white ┃
┣━━━━━━┻━━━━━━┫
┃ ────END┃
┗━━━━━━━━━━━━━┛
"""
def __init__(
self, d_data: dict, title: str = "", frame_format: str = "bold", nil: str = ""
):
# check data whether right.
if not isinstance(d_data, dict):
raise TypeError("d_data need is a dict.")
for item in d_data.values():
if not isinstance(item, dict):
raise TypeError("the item of d_data need is a dict.")
self.data = deepcopy(d_data)
super().__init__(frame_format, nil, title=title)
def fix_data(self):
self.each_column_width = each_column_width = [0, 0]
max_subtitle_len = 0
r_len = self.real_len
for subtitle, sub_dict in self.data.items():
max_subtitle_len = max(max_subtitle_len, r_len(subtitle))
for k, v in sub_dict.items():
each_column_width[0] = max(each_column_width[0], r_len(Fx.pure(k)))
each_column_width[1] = max(each_column_width[1], r_len(Fx.pure(v)))
# For ensure that the table is output correctly when the len of sub title
# bigger than the len of item.
sum_each_max = sum(each_column_width)
if max_subtitle_len > sum_each_max:
each_column_width[1] += max_subtitle_len - sum_each_max
def table_generator(self) -> Generator:
rune = self.rune
each_column_width = self.each_column_width
row_width = self.row_width
r_len = self.real_len
_end_template = "%(flag)s{:>%(number)s}%(flag)s\n" % {
"flag": rune[1],
"number": row_width - 2,
}
sub_top = f"{rune[6]}{rune[-3].join([rune[0] * i for i in each_column_width])}{rune[7]}\n"
sub_bottom = f"{rune[6]}{rune[-2].join([rune[0] * i for i in each_column_width])}{rune[7]}\n"
# top
yield f"{rune[2]}{rune[0] * (row_width - 2)}{rune[3]}\n"
for subtitle, sub_dict in self.data.items():
# subtitle part
yield rune[1]
subtitle_len = r_len(Fx.pure(subtitle))
_div, _mod = divmod(row_width - 2 - subtitle_len, 2)
yield f"{_div * ' '}{subtitle}{(_div + _mod) * ' '}{rune[1]}\n"
# sub dict
yield sub_top
for k, v in sub_dict.items():
k_len = r_len(Fx.pure(k))
yield rune[1] + k + (each_column_width[0] - k_len) * " " + rune[1]
v_len = r_len(Fx.pure(v))
yield v + (each_column_width[1] - v_len) * " " + rune[1] + "\n"
yield sub_bottom
# bottom
yield _end_template.format("────END")
yield f"{rune[4]}{rune[0] * (row_width - 2)}{rune[5]}\n"
| 2.71875
| 3
|
day9.1/main.py
|
lfscheidegger/adventofcode2018
| 1
|
12775546
|
<filename>day9.1/main.py
#!/usr/bin/python
"""
--- Day 9: Marble Mania ---
You talk to the Elves while you wait for your navigation system to initialize. To pass the time, they introduce you to their favorite marble game.
The Elves play this game by taking turns arranging the marbles in a circle according to very particular rules. The marbles are numbered starting with 0 and increasing by 1 until every marble has a number.
First, the marble numbered 0 is placed in the circle. At this point, while it contains only a single marble, it is still a circle: the marble is both clockwise from itself and counter-clockwise from itself. This marble is designated the current marble.
Then, each Elf takes a turn placing the lowest-numbered remaining marble into the circle between the marbles that are 1 and 2 marbles clockwise of the current marble. (When the circle is large enough, this means that there is one marble between the marble that was just placed and the current marble.) The marble that was just placed then becomes the current marble.
However, if the marble that is about to be placed has a number which is a multiple of 23, something entirely different happens. First, the current player keeps the marble they would have placed, adding it to their score. In addition, the marble 7 marbles counter-clockwise from the current marble is removed from the circle and also added to the current player's score. The marble located immediately clockwise of the marble that was removed becomes the new current marble.
For example, suppose there are 9 players. After the marble with value 0 is placed in the middle, each player (shown in square brackets) takes a turn. The result of each of those turns would produce circles of marbles like this, where clockwise is to the right and the resulting current marble is in parentheses:
[-] (0)
[1] 0 (1)
[2] 0 (2) 1
[3] 0 2 1 (3)
[4] 0 (4) 2 1 3
[5] 0 4 2 (5) 1 3
[6] 0 4 2 5 1 (6) 3
[7] 0 4 2 5 1 6 3 (7)
[8] 0 (8) 4 2 5 1 6 3 7
[9] 0 8 4 (9) 2 5 1 6 3 7
[1] 0 8 4 9 2(10) 5 1 6 3 7
[2] 0 8 4 9 2 10 5(11) 1 6 3 7
[3] 0 8 4 9 2 10 5 11 1(12) 6 3 7
[4] 0 8 4 9 2 10 5 11 1 12 6(13) 3 7
[5] 0 8 4 9 2 10 5 11 1 12 6 13 3(14) 7
[6] 0 8 4 9 2 10 5 11 1 12 6 13 3 14 7(15)
[7] 0(16) 8 4 9 2 10 5 11 1 12 6 13 3 14 7 15
[8] 0 16 8(17) 4 9 2 10 5 11 1 12 6 13 3 14 7 15
[9] 0 16 8 17 4(18) 9 2 10 5 11 1 12 6 13 3 14 7 15
[1] 0 16 8 17 4 18 9(19) 2 10 5 11 1 12 6 13 3 14 7 15
[2] 0 16 8 17 4 18 9 19 2(20)10 5 11 1 12 6 13 3 14 7 15
[3] 0 16 8 17 4 18 9 19 2 20 10(21) 5 11 1 12 6 13 3 14 7 15
[4] 0 16 8 17 4 18 9 19 2 20 10 21 5(22)11 1 12 6 13 3 14 7 15
[5] 0 16 8 17 4 18(19) 2 20 10 21 5 22 11 1 12 6 13 3 14 7 15
[6] 0 16 8 17 4 18 19 2(24)20 10 21 5 22 11 1 12 6 13 3 14 7 15
[7] 0 16 8 17 4 18 19 2 24 20(25)10 21 5 22 11 1 12 6 13 3 14 7 15
The goal is to be the player with the highest score after the last marble is used up. Assuming the example above ends after the marble numbered 25, the winning score is 23+9=32 (because player 5 kept marble 23 and removed marble 9, while no other player got any points in this very short example game).
Here are a few more examples:
10 players; last marble is worth 1618 points: high score is 8317
13 players; last marble is worth 7999 points: high score is 146373
17 players; last marble is worth 1104 points: high score is 2764
21 players; last marble is worth 6111 points: high score is 54718
30 players; last marble is worth 5807 points: high score is 37305
What is the winning Elf's score?
"""
from collections import defaultdict
import multiprocessing
import re
import sys
REGEX = re.compile("(\d+) players; last marble is worth (\d+) points")
class Input:
def __init__(self, n_players, last_marble_value):
self.n_players = n_players
self.last_marble_value = last_marble_value
def __repr__(self):
return "Input(n_players=%s, last_marble_value=%s)" % (self.n_players, self.last_marble_value)
def get_inputs():
result = []
try:
while True:
match = REGEX.match(raw_input())
result.append(Input(int(match.groups()[0]), int(match.groups()[1])))
except EOFError:
return result
def solve(input):
board = [0]
current_marble = 0
next_marble_to_place = 1
scores = list(0 for idx in range(input.n_players))
current_player = -1
while next_marble_to_place <= input.last_marble_value:
one_to_clockwise = (board.index(current_marble) + 1) % len(board)
if next_marble_to_place % 23 == 0:
# Scoring pass
seven_to_counterclockwise = (board.index(current_marble) - 7) % len(board)
six_to_counterclockwise = (seven_to_counterclockwise + 1) % len(board)
scores[current_player] += next_marble_to_place + board[seven_to_counterclockwise]
board = board[:seven_to_counterclockwise] + board[six_to_counterclockwise:]
current_marble = board[seven_to_counterclockwise]
elif one_to_clockwise == len(board) - 1:
board.append(next_marble_to_place)
current_marble = next_marble_to_place
else:
board = board[:one_to_clockwise + 1] + [next_marble_to_place] + board[one_to_clockwise + 1:]
current_marble = next_marble_to_place
next_marble_to_place += 1
current_player = (current_player + 1) % input.n_players
print max(scores)
def main():
inputs = get_inputs()
for input in inputs:
solve(input)
# break
if __name__ == "__main__":
main()
| 4.46875
| 4
|
june/social/twitter.py
|
mrmuxl/june
| 1
|
12775547
|
from tornado import escape
from tornado.web import asynchronous
from tornado.options import options
from tornado.auth import TwitterMixin
from june.lib.decorators import require_user, require_system
from june.lib.handler import BaseHandler
from june.models import Social
class TwitterHandler(BaseHandler, TwitterMixin):
def check_xsrf_cookie(self):
# disable xsrf check
return
def _oauth_consumer_token(self):
# reset method to get consumer token
return {'key': options.twitter_key, 'secret': options.twitter_secret}
@require_user
@asynchronous
def get(self):
if 'twitter' in self.get_user_social(self.current_user.id):
enabled = self.get_argument('enabled', 'a')
if enabled not in ('y', 'n'):
self.redirect('/account/setting')
return
q = self.db.query(Social).filter_by(service='twitter')
t = q.filter_by(user_id=self.current_user.id).first()
t.enabled = enabled
self.db.add(t)
self.db.commit()
self.cache.delete('social:%s' % self.current_user.id)
self.redirect('/account/setting')
return
if self.get_argument('oauth_token', None):
self.get_authenticated_user(self._on_auth)
return
self.authorize_redirect()
def _on_auth(self, user):
if not user:
self.write('Twitter auth failed')
self.finish()
return
access_token = escape.json_encode(user['access_token'])
network = Social(service='twitter', user_id=self.current_user.id,
token=access_token)
self.db.add(network)
self.db.commit()
self.cache.delete('social:%s' % self.current_user.id)
self.redirect('/account/setting')
@require_system
@asynchronous
def post(self):
content = self.get_argument('content', None)
token = self.get_argument('token', None)
if not (content and token):
self.finish('deny')
return
token = escape.json_decode(token)
status = escape.utf8(content)
self.twitter_request(
'/statuses/update',
post_args={'status': status},
access_token=token,
callback=self._on_post)
def _on_post(self, entry):
if not entry:
self.finish('fail')
return
self.finish('ok')
handlers = [
('/social/twitter', TwitterHandler),
]
| 2.1875
| 2
|
setup.py
|
california-civic-data-coalition/django-calaccess-scraped-data
| 1
|
12775548
|
<reponame>california-civic-data-coalition/django-calaccess-scraped-data
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from distutils.core import Command
from setuptools import setup, find_packages
def read(fname):
with open(os.path.join(os.path.dirname(__file__), fname)) as f:
return f.read()
class TestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
from django.conf import settings
settings.configure(
DATABASES={
'default': {
'NAME': ':memory:',
'ENGINE': 'django.db.backends.sqlite3'
}
},
INSTALLED_APPS=('calaccess_scraped',),
MIDDLEWARE_CLASSES=()
)
from django.core.management import call_command
import django
django.setup()
call_command('test', 'calaccess_scraped')
setup(
name='django-calaccess-scraped-data',
version='3.1.0',
author='California Civic Data Coalition',
author_email='<EMAIL>',
url='http://django-calaccess.californiacivicdata.org',
description='A Django app to scrape campaign-finance data from '
'the California Secretary of State’s CAL-ACCESS website',
long_description=read('README.rst'),
license='MIT',
packages=find_packages(),
include_package_data=True,
zip_safe=False, # because we're including static files
cmdclass={'test': TestCommand},
install_requires=(
'django>=3.2.*',
'pytz',
'bs4',
'selenium',
),
classifiers=(
'Development Status :: 5 - Production/Stable',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Framework :: Django',
'Framework :: Django :: 3.2',
'License :: OSI Approved :: MIT License'
),
project_urls={
'Project': 'https://www.californiacivicdata.org/',
'Documentation': 'https://django-calaccess.californiacivicdata.org',
'Funding': 'https://www.californiacivicdata.org/about/',
'Source': 'https://github.com/california-civic-data-coalition/django-calaccess-scraped-data',
'Testing': 'https://github.com/california-civic-data-coalition/django-calaccess-scraped-data/actions/workflows/tests.yaml',
'Tracker': 'https://github.com/california-civic-data-coalition/django-calaccess-scraped-data/issues'
},
)
| 1.96875
| 2
|
{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/constants.py
|
yankeexe/cookiecutter-python-cli
| 4
|
12775549
|
<filename>{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/constants.py
"""
Constants for the CLI.
"""
WELCOME_MESSAGE = "Welcome to [bold green]{{cookiecutter.project_slug}}!![/bold green] Your CLI is working!"
| 1.257813
| 1
|
python-crons/check-online-offers.py
|
dantunescost/antunedo
| 0
|
12775550
|
#!/usr/bin python3.7
# -*- coding: utf-8 -*-
from pymongo.errors import DuplicateKeyError
from lib.mongoConnector import connect_to_mongodb, pop_offers_already_saved
from lib.queryBuilder import query_immo_offers, get_amount_of_pages
def get_offers_ids(immotype, total_pages, sort="asc"):
ids = []
if total_pages == 0:
return
i = 0
while i <= total_pages and i <= 500:
result = query_immo_offers(i, immotype, sort)
print("Processing data... " + "{0:.2f}".format((i / total_pages) * 100) + "%, i=" + str(i) + ", total="
+ str(total_pages) + '\n')
if result:
for k in result:
ids.append(k['id'])
i += 1
return ids
if __name__ == "__main__":
client = connect_to_mongodb()
db = client['antunedo']
mongo_collection = db['offers']
online_offers = []
categories_done = []
print('starting ...')
for j in range(0, 52):
immotype_id = str(j + 1)
total_page_count = get_amount_of_pages(immotype_id)
print("\nNouvel Immotype : " + immotype_id + ", avec un total de " + str(total_page_count) + " pages\n\n")
if total_page_count > 0:
if total_page_count > 1000:
# TODO : filter this immo category into smaller elements
print('DEAAAAAD --- immotype ID : ' + immotype_id + '\n\n')
elif total_page_count > 500:
online_offers += get_offers_ids(immotype_id, total_page_count)
online_offers += get_offers_ids(immotype_id, total_page_count - 500, "desc")
categories_done.append(int(immotype_id))
else:
online_offers += get_offers_ids(immotype_id, total_page_count)
categories_done.append(int(immotype_id))
mongo_collection.update_many({'id': {'$in': online_offers}}, {'$set': {'is_online': True}})
print(online_offers)
online_offers = []
print(categories_done)
client.close()
| 2.6875
| 3
|