max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
edafos/soil/__init__.py
|
nickmachairas/edafos
| 0
|
12781551
|
<filename>edafos/soil/__init__.py
from .physics import *
from .profile import SoilProfile
| 1.179688
| 1
|
Packages/Patterns_Package/setup.py
|
saribalarakeshreddy/Python-3.9.0
| 0
|
12781552
|
<reponame>saribalarakeshreddy/Python-3.9.0
from setuptools import setup,find_packages
classifiers = [
'Development Status :: ',
'Intended Audience :: Education',
'Operating System :: windows 10',
'License :: MIT License',
'Programming Language :: Python :: 3.9.0'
]
setup(
name='Patterns_Package',
version='0.0.1',
description='patterns of Capital and Small Alphabets, Numbers,some other Symbols',
Long_description=open('README.txt').read()+'\n\n'+open('CHANGELOG.txt').read(),
url='https://github.com/saribalarakeshreddy/Python-3.9.0/tree/main/Packages',
author='<NAME>',
author_emial='<EMAIL>',
license='MIT',
classifiers=classifiers,
keywords='patterns',
install_requires=['']
)
| 1.171875
| 1
|
backend/auth/__init__.py
|
golani04/bug-tracker
| 0
|
12781553
|
from fastapi import APIRouter
from backend.auth.login import router
auth_routers = APIRouter()
auth_routers.include_router(router, tags=["Auth"])
| 1.789063
| 2
|
office-plugin/windows-office/program/wizards/common/Properties.py
|
jerrykcode/kkFileView
| 6,660
|
12781554
|
#
# This file is part of the LibreOffice project.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# This file incorporates work covered by the following license notice:
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed
# with this work for additional information regarding copyright
# ownership. The ASF licenses this file to you under the Apache
# License, Version 2.0 (the "License"); you may not use this file
# except in compliance with the License. You may obtain a copy of
# the License at http://www.apache.org/licenses/LICENSE-2.0 .
#
from com.sun.star.beans import PropertyValue
'''
Simplifies handling Arrays of PropertyValue.
To make a use of this class, instantiate it, and call
the put(propName,propValue) method.
caution: propName should always be a String.
When finished, call the getProperties() method to get an array of the set properties.
'''
class Properties(dict):
@classmethod
def getPropertyValue(self, props, propName):
for i in props:
if propName == i.Name:
return i.Value
raise AttributeError ("Property '" + propName + "' not found.")
@classmethod
def hasPropertyValue(self, props, propName):
for i in props:
if propName == i.Name:
return True
return False
@classmethod
def getProperties(self, _map):
pv = []
for k,v in _map.items():
pv.append(self.createProperty(k, v))
return pv
@classmethod
def createProperty(self, name, value, handle=None):
pv = PropertyValue()
pv.Name = name
pv.Value = value
if handle is not None:
pv.Handle = handle
return pv
def getProperties1(self):
return self.getProperties(self)
| 2.5
| 2
|
fedml_core/trainer/model_trainer.py
|
arj119/FedML
| 0
|
12781555
|
<filename>fedml_core/trainer/model_trainer.py
from abc import ABC, abstractmethod
import torch
import torch.nn as nn
class ModelTrainer(ABC):
"""Abstract base class for federated learning trainer.
1. The goal of this abstract class is to be compatible to
any deep learning frameworks such as PyTorch, TensorFlow, Keras, MXNET, etc.
2. This class can be used in both server and client side
3. This class is an operator which does not cache any states inside.
"""
def __init__(self, model, args=None):
self.model = model
self.id = 0
self.args = args
def set_id(self, trainer_id):
self.id = trainer_id
@abstractmethod
def get_model_params(self):
pass
@abstractmethod
def set_model_params(self, model_parameters):
pass
@abstractmethod
def train(self, train_data, device, args=None):
pass
@abstractmethod
def test(self, test_data, device, args=None):
return self.test_model(self.model, test_data, device, args)
@abstractmethod
def test_on_the_server(self, train_data_local_dict, test_data_local_dict, device, args=None) -> bool:
pass
def get_client_optimiser(self, model, optimiser_name, lr):
if optimiser_name == "sgd":
optimiser = torch.optim.SGD(model.parameters(), lr=lr)
else:
beta1, beta2 = 0.5, 0.999
optimiser = torch.optim.Adam(model.parameters(),
lr=lr,
eps=1e-08,
weight_decay=1e-2,
amsgrad=True,
betas=(beta1, beta2)
)
return optimiser
def test_model(self, model, test_data, device, args):
model.to(device)
model.eval()
metrics = {
'test_correct': 0,
'test_loss': 0,
'test_precision': 0,
'test_recall': 0,
'test_total': 0
}
'''
stackoverflow_lr is the task of multi-label classification
please refer to following links for detailed explainations on cross-entropy and corresponding implementation of tff research:
https://towardsdatascience.com/cross-entropy-for-classification-d98e7f974451
https://github.com/google-research/federated/blob/49a43456aa5eaee3e1749855eed89c0087983541/optimization/stackoverflow_lr/federated_stackoverflow_lr.py#L131
'''
if args.dataset == "stackoverflow_lr":
criterion = nn.BCELoss(reduction='sum').to(device)
else:
criterion = nn.CrossEntropyLoss().to(device)
y_pred = []
y_true = []
with torch.no_grad():
for batch_idx, (x, target) in enumerate(test_data):
x = x.to(device)
target = target.to(device)
pred = model(x)
loss = criterion(pred, target)
if args.dataset == "stackoverflow_lr":
predicted = (pred > .5).int()
correct = predicted.eq(target).sum(axis=-1).eq(target.size(1)).sum()
true_positive = ((target * predicted) > .1).int().sum(axis=-1)
precision = true_positive / (predicted.sum(axis=-1) + 1e-13)
recall = true_positive / (target.sum(axis=-1) + 1e-13)
metrics['test_precision'] += precision.sum().item()
metrics['test_recall'] += recall.sum().item()
else:
_, predicted = torch.max(pred, 1)
correct = predicted.eq(target).sum()
output = predicted.data.cpu().numpy()
y_pred.extend(output) # Save Prediction
y_true.extend(target.data.cpu().numpy()) # Save Truth
metrics['test_correct'] += correct.item()
metrics['test_loss'] += loss.item() * target.size(0)
if len(target.size()) == 1: #
metrics['test_total'] += target.size(0)
elif len(target.size()) == 2: # for tasks of next word prediction
metrics['test_total'] += target.size(0) * target.size(1)
return metrics, y_pred, y_true
| 3.03125
| 3
|
geartracker/admin.py
|
pigmonkey/django-geartracker
| 1
|
12781556
|
from django.contrib import admin
from geartracker.models import *
class ItemAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("make", "model", "size")}
list_display = ('__unicode__', 'type', 'metric_weight', 'acquired')
list_filter = ('archived', 'category', 'type', 'make')
search_fields = ('make', 'model')
filter_horizontal = ('related',)
admin.site.register(Item, ItemAdmin)
class CategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ('__unicode__', 'number_items')
admin.site.register(Category, CategoryAdmin)
class TypeAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ('category', 'name', 'number_items')
list_filter = ('category',)
admin.site.register(Type, TypeAdmin)
class ListItemRelationshipInline(admin.TabularInline):
model = ListItem
extra = 1
class ListAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
inlines = (ListItemRelationshipInline,)
list_display = ('name', 'total_metric_weight', 'start_date', 'end_date',
'public')
list_filter = ('public',)
admin.site.register(List, ListAdmin)
| 2.015625
| 2
|
viterbi/viterbi.py
|
cornell-brg/xloops-bmarks
| 3
|
12781557
|
<filename>viterbi/viterbi.py
#! /usr/bin/python2.6
# Convolutional Encoder and Hard-Decision Viterbi Decoder
# Code and example inspired by the tutorial at
# http://home.netcom.com/~chip.f/viterbi/algrthms.html
import itertools
K = 3
RATE = 1.0/2
POLY = [7, 5]
#***************************************************************
# CONVOLUTIONAL ENCODER FUNCTIONS
#***************************************************************
def encoder(input):
""" Convolution encoder"""
# NOTE: EXPECTS input FORMAT SUCH THAT LEFTMOST BIT IS MOST RECENT
enc_out = []
# push some extra 0s at into the input for flushing
stream = [0]*(K-1) + input + [0]*(K-1)
# shift in data right to left
for i in reversed(range(len(input)+K-1)):
symbol = generate_symbol(stream[i:i+K], POLY, K)
# add to symbols
enc_out.append(symbol)
return enc_out
def generate_symbol(registers, polynomials, K):
"""Convolutionally encode one bit of data, output the encoded symbol"""
# NOTE: EXPECTS registers FORMAT SUCH THAT LEFTMOST BIT IS MOST RECENT
xor = 0
symbol = ''
# for each symbol bit
for p in polynomials:
xor = 0
# for each memory stage
for k in range(K):
# check polynomial to see if stage is connected
# xor if there is a connection
if (p >> k) & 1:
xor ^= registers[k]
# add digit to symbol
symbol += str(xor)
return symbol
def hamming(x, y):
"""Return the hamming distance between symbols x and y"""
dist = 0
for i in range(K-1):
if x[i] != y[i]:
dist += 1
return dist
def build_output_table():
"""Build an output table (find output given state and input bit)"""
output = {}
states = [''.join(x) for x in itertools.product('01', repeat=K-1)]
for s in states:
output[s] = []
stream = [int(x) for x in s]
for input in [0, 1]:
# NOTE: inputs arranged such that newest item is on left
symbol = generate_symbol([input]+stream, POLY, K)
output[s].append(symbol)
return output
def build_transition_table():
"""Build a transition table (find next state given state and input bit)"""
transition = {}
states = [''.join(x) for x in itertools.product('01', repeat=K-1)]
for s in states:
transition[s] = []
for input in [0, 1]:
# NOTE: states are arranged such that newest item is left
next_state = str(input) + s[:1]
transition[s].append(next_state)
return transition
#***************************************************************
# VITERBI
#***************************************************************
def viterbi(code):
"""Viterbi decoder"""
# generate forward paths, errors
errors, paths = forward_path(code)
# then decode message during traceback
message = traceback(code, errors, paths)
return message
def forward_path(code):
"""Traverse the forward paths and accumulate errors"""
# build the output table
outputs = build_output_table()
print "OUTPUT TABLE:"
print outputs
print
# initialize data structures
states = [''.join(x) for x in itertools.product('01', repeat=K-1)]
error = dict([(x, 0) for x in states])
newerror = dict([(x, 0) for x in states])
traces = dict([(x, []) for x in states])
# TODO: initialization of error for states?
# current implementation may not be accurate
# for each input symbol received from the transmission stream
for rx in code:
# for all 2**(K-1) possible states
for s in states:
# generate the previous states connected to this state
# (insert old bit on right, newer bit on left)
ps0 = s[1:] + '0' # previous state 0
ps1 = s[1:] + '1' # previous state 1
input = int(s[0])
# calculate the distance between the received symbol, i, and the
# two potential symbols that could be emitted entering this state
# (when previous state was 0x or 1x)
E0 = hamming(rx, outputs[ps0][input])
E1 = hamming(rx, outputs[ps1][input])
# save the path with the smallest error
if E0 <= E1:
newerror[s] = error[ps0] + E0
traces[s].append(ps0)
else:
newerror[s] = error[ps1] + E1
traces[s].append(ps1)
error = newerror.copy()
print rx, error
print "\nERRORS:", error
return traces, error
def traceback(code, traces, error):
"""Traceback through path with smallest error and reconstruct message"""
# build transition table (actually need a reverse lookup table for this)
# TODO: generate reverse lookup table
transition = build_transition_table()
print "\nTRANSITION TABLE:"
print transition
print
# initialize variables
prev_state = ''
best_error = 100 #TODO: init with max_value
states = [''.join(x) for x in itertools.product('01', repeat=K-1)]
# find the path tail with the lowest error
for s in states:
if error[s] < best_error:
best_error = error[s]
prev_state = s
# traceback to generate the path
path = [prev_state]
output = []
for i in reversed(range(len(code))):
prev_state = traces[prev_state][i]
path.append(prev_state)
path.reverse()
# reconstruct the message
message = []
for i in range(len(path) - 1):
s, ns = path[i:i+2]
message.append(transition[s].index(ns))
return message[:-(K-1)]
#***************************************************************
# MAIN
#***************************************************************
# CONVOLUTIONALLY ENCODE BITSTREAM
# in: mem: SYM p=7: p=5: enc:
# 0 00 0^0^0 0^x^0 00
# 1 00 1^0^0 1^x^0 11
# 0 10 0^1^0 0^x^0 10
# 1 01 1^0^1 1^x^1 00
# 1 10 1^1^0 1^x^0 01
# 1 11 1^1^1 1^x^1 10
# 0 11 0^1^1 0^x^1 01
# 0 01 0^0^1 0^x^1 11
# 1 00 1^0^0 1^x^0 11
# 0 10 0^1^0 0^x^0 10
# 1 01 1^0^1 1^x^1 00
# 0 10 0^1^0 0^x^0 10
# 0 01 0^0^1 0^x^1 11
# 0 00 0^0^0 0^x^0 00
# 1 00 1^0^0 1^x^0 11
# ---------FLUSH-------------
# 0 10 0^1^0 0^x^0 10
# 0 01 0^0^1 0^x^1 11
# 00 (final mem state)
# oldest data on left
data = [0, 1, 0, 1, 1,
1, 0, 0, 1, 0,
1, 0, 0, 0, 1]
# most recent data on left
bitstream = data[::-1]
symbols = encoder(bitstream)
print "VERIFY ENCODED BITSTREAM:"
# assert len(symbols) == len(sym_ref)
sym_ref = ['00', '11', '10', '00', '01',
'10', '01', '11', '11', '10',
'00', '10', '11', '00', '11', '10', '11']
for i in range(len(sym_ref)):
print symbols[i], sym_ref[i], symbols[i] == sym_ref[i]
print
# DECODE SYMBOL STREAM USING VITERBI
# Viterbi Decoder Trellis Diagram (assuming 0 errors)
# TRELLIS
# next_states: 00 01 10 11
# prev_states: 00/01 10/11 00/01 10/11
# outputs: 00/11 10/01 11/00 01/10
#
# time: out: branch errors (accumulated path errors):
# 0 *
# 1 00 *0/2 ( 0) 1/1 ( 1) 2/0 ( 0) 1/1 ( 1)
# 2 11 2/0 ( 1) 1/1 ( 1) *0/2 ( 0) 1/1 ( 1)
# 3 10 1/1 ( ) *0/2 ( 0) 1/1 ( ) 2/0 ( )
# 4 00 0/2 ( ) 1/1 ( ) *2/0 ( 0) 1/1 ( )
# 5 01 1/1 ( ) 0/2 ( ) 1/1 ( ) *2/0 ( 0)
# 6 10 1/1 ( ) 2/0 ( ) 1/1 ( ) *0/2 ( 0)
# 7 01 1/1 ( ) *0/2 ( 0) 1/1 ( ) 2/0 ( )
# 8 11 *2/0 ( 0) 1/1 ( ) 0/2 ( ) 1/1 ( )
# 9 11 2/0 ( ) 1/1 ( ) *0/2 ( 0) 1/1 ( )
# 10 10 1/1 ( ) *2/0 ( 0) 1/1 ( ) 0/2 ( )
# 11 00 0/2 ( ) 1/1 ( ) *2/0 ( ) 1/1 ( )
# 12 10 1/1 ( ) *2/0 ( 0) 1/1 ( ) 0/2 ( )
# 13 11 *2/0 ( 0) 1/1 ( ) 0/2 ( ) 1/1 ( )
# 14 00 *0/2 ( 0) 1/1 ( ) 2/0 ( ) 1/1 ( )
# 15 11 2/0 ( ) 1/1 ( ) *0/2 ( 0) 1/1 ( )
# 16 10 1/1 ( ) *2/0 ( 0) 1/1 ( ) 0/2 ( )
# 17 11 *2/0 ( 0) 1/1 ( ) 0/2 ( ) 1/1 ( )
# There are two errors in this data, at t=3 and
xmit = ['00', '11', '11', '00', '01',
'10', '01', '11', '11', '10',
'00', '00', '11', '00', '11', '10', '11']
message = viterbi(xmit)
print "VERIFY DECODED MESSAGE:"
# assert len(message) == len(data)
for i in range(len(data)):
print message[i], data[i], message[i] == data[i]
| 3.109375
| 3
|
delphi_epidata/request.py
|
lee14257/delphi-epidata-py
| 0
|
12781558
|
from datetime import date
from typing import Final, Generator, Sequence, cast, Iterable, Mapping, Optional, Union, List
from json import loads
from requests import Response, Session
from tenacity import retry, stop_after_attempt
from pandas import DataFrame
from ._model import (
EpiRangeLike,
AEpiDataCall,
EpiDataFormatType,
EpiDataResponse,
EpiRange,
EpidataFieldInfo,
OnlySupportsClassicFormatException,
add_endpoint_to_url,
)
from ._endpoints import AEpiDataEndpoints
from ._constants import HTTP_HEADERS, BASE_URL
from ._covidcast import CovidcastDataSources, define_covidcast_fields
@retry(reraise=True, stop=stop_after_attempt(2))
def _request_with_retry(
url: str, params: Mapping[str, str], session: Optional[Session] = None, stream: bool = False
) -> Response:
"""Make request with a retry if an exception is thrown."""
def call_impl(s: Session) -> Response:
res = s.get(url, params=params, headers=HTTP_HEADERS, stream=stream)
if res.status_code == 414:
return s.post(url, params=params, headers=HTTP_HEADERS, stream=stream)
return res
if session:
return call_impl(session)
with Session() as s:
return call_impl(s)
class EpiDataCall(AEpiDataCall):
"""
epidata call representation
"""
_session: Final[Optional[Session]]
def __init__(
self,
base_url: str,
session: Optional[Session],
endpoint: str,
params: Mapping[str, Union[None, EpiRangeLike, Iterable[EpiRangeLike]]],
meta: Optional[Sequence[EpidataFieldInfo]] = None,
only_supports_classic: bool = False,
) -> None:
super().__init__(base_url, endpoint, params, meta, only_supports_classic)
self._session = session
def with_base_url(self, base_url: str) -> "EpiDataCall":
return EpiDataCall(base_url, self._session, self._endpoint, self._params)
def with_session(self, session: Session) -> "EpiDataCall":
return EpiDataCall(self._base_url, session, self._endpoint, self._params)
def _call(
self,
format_type: Optional[EpiDataFormatType] = None,
fields: Optional[Iterable[str]] = None,
stream: bool = False,
) -> Response:
url, params = self.request_arguments(format_type, fields)
return _request_with_retry(url, params, self._session, stream)
def classic(
self, fields: Optional[Iterable[str]] = None, disable_date_parsing: Optional[bool] = False
) -> EpiDataResponse:
"""Request and parse epidata in CLASSIC message format."""
self._verify_parameters()
try:
response = self._call(None, fields)
r = cast(EpiDataResponse, response.json())
epidata = r.get("epidata")
if epidata and isinstance(epidata, list) and len(epidata) > 0 and isinstance(epidata[0], dict):
r["epidata"] = [self._parse_row(row, disable_date_parsing=disable_date_parsing) for row in epidata]
return r
except Exception as e: # pylint: disable=broad-except
return {"result": 0, "message": f"error: {e}", "epidata": []}
def __call__(
self, fields: Optional[Iterable[str]] = None, disable_date_parsing: Optional[bool] = False
) -> EpiDataResponse:
"""Request and parse epidata in CLASSIC message format."""
return self.classic(fields, disable_date_parsing=disable_date_parsing)
def json(
self, fields: Optional[Iterable[str]] = None, disable_date_parsing: Optional[bool] = False
) -> List[Mapping[str, Union[str, int, float, date, None]]]:
"""Request and parse epidata in JSON format"""
if self.only_supports_classic:
raise OnlySupportsClassicFormatException()
self._verify_parameters()
response = self._call(EpiDataFormatType.json, fields)
response.raise_for_status()
return [
self._parse_row(row, disable_date_parsing=disable_date_parsing)
for row in cast(List[Mapping[str, Union[str, int, float, None]]], response.json())
]
def df(self, fields: Optional[Iterable[str]] = None, disable_date_parsing: Optional[bool] = False) -> DataFrame:
"""Request and parse epidata as a pandas data frame"""
if self.only_supports_classic:
raise OnlySupportsClassicFormatException()
self._verify_parameters()
r = self.json(fields, disable_date_parsing=disable_date_parsing)
return self._as_df(r, fields, disable_date_parsing=disable_date_parsing)
def csv(self, fields: Optional[Iterable[str]] = None) -> str:
"""Request and parse epidata in CSV format"""
if self.only_supports_classic:
raise OnlySupportsClassicFormatException()
self._verify_parameters()
response = self._call(EpiDataFormatType.csv, fields)
response.raise_for_status()
return response.text
def iter(
self, fields: Optional[Iterable[str]] = None, disable_date_parsing: Optional[bool] = False
) -> Generator[Mapping[str, Union[str, int, float, date, None]], None, Response]:
"""Request and streams epidata rows"""
if self.only_supports_classic:
raise OnlySupportsClassicFormatException()
self._verify_parameters()
response = self._call(EpiDataFormatType.jsonl, fields, stream=True)
response.raise_for_status()
for line in response.iter_lines():
yield self._parse_row(loads(line), disable_date_parsing=disable_date_parsing)
return response
def __iter__(self) -> Generator[Mapping[str, Union[str, int, float, date, None]], None, Response]:
return self.iter()
class EpiDataContext(AEpiDataEndpoints[EpiDataCall]):
"""
sync epidata call class
"""
_base_url: Final[str]
_session: Final[Optional[Session]]
def __init__(self, base_url: str = BASE_URL, session: Optional[Session] = None) -> None:
super().__init__()
self._base_url = base_url
self._session = session
def with_base_url(self, base_url: str) -> "EpiDataContext":
return EpiDataContext(base_url, self._session)
def with_session(self, session: Session) -> "EpiDataContext":
return EpiDataContext(self._base_url, session)
def _create_call(
self,
endpoint: str,
params: Mapping[str, Union[None, EpiRangeLike, Iterable[EpiRangeLike]]],
meta: Optional[Sequence[EpidataFieldInfo]] = None,
only_supports_classic: bool = False,
) -> EpiDataCall:
return EpiDataCall(self._base_url, self._session, endpoint, params, meta, only_supports_classic)
Epidata = EpiDataContext()
def CovidcastEpidata(base_url: str = BASE_URL, session: Optional[Session] = None) -> CovidcastDataSources[EpiDataCall]:
url = add_endpoint_to_url(base_url, "covidcast/meta")
meta_data_res = _request_with_retry(url, {}, session, False)
meta_data_res.raise_for_status()
meta_data = meta_data_res.json()
def create_call(params: Mapping[str, Union[None, EpiRangeLike, Iterable[EpiRangeLike]]]) -> EpiDataCall:
return EpiDataCall(base_url, session, "covidcast", params, define_covidcast_fields())
return CovidcastDataSources.create(meta_data, create_call)
__all__ = ["Epidata", "EpiDataCall", "EpiDataContext", "EpiRange", "CovidcastEpidata"]
| 2.65625
| 3
|
python-package/lets_plot/plot/sampling.py
|
OLarionova-HORIS/lets-plot
| 0
|
12781559
|
<reponame>OLarionova-HORIS/lets-plot
#
# Copyright (c) 2019. JetBrains s.r.o.
# Use of this source code is governed by the MIT license that can be found in the LICENSE file.
#
from .core import FeatureSpec
__all__ = ['sampling_random',
'sampling_random_stratified',
'sampling_pick',
'sampling_systematic',
'sampling_group_random',
'sampling_group_systematic',
'sampling_vertex_vw',
'sampling_vertex_dp']
def sampling_random(n, seed=None):
return _sampling('random', n=n, seed=seed)
def sampling_random_stratified(n, seed=None, min_subsample=None):
return _sampling('random_stratified', n=n, seed=seed, min_subsample=min_subsample)
def sampling_pick(n):
return _sampling('pick', n=n)
def sampling_systematic(n):
return _sampling('systematic', n=n)
def sampling_group_systematic(n):
return _sampling('group_systematic', n=n)
def sampling_group_random(n, seed=None):
return _sampling('group_random', n=n, seed=seed)
def sampling_vertex_vw(n):
return _sampling('vertex_vw', n=n)
def sampling_vertex_dp(n):
return _sampling('vertex_dp', n=n)
def _sampling(name, **kwargs):
return FeatureSpec('sampling', name, **kwargs)
| 2.015625
| 2
|
models/user.py
|
wimp-project/backend
| 0
|
12781560
|
from dataclasses import dataclass
from typing import List
from sqlalchemy.orm import relationship
from app import db
from models import Feedback
@dataclass
class User(db.Model):
__tablename__ = 'user'
user_id: int = db.Column(db.Integer, primary_key=True)
name: str = db.Column(db.String(), nullable=False)
surname: str = db.Column(db.String(), nullable=False)
email: str = db.Column(db.String(), nullable=False, unique=True)
password: str = db.Column(db.String())
salt: str = db.Column(db.String())
position_lat: float = db.Column(db.Float)
position_lon: float = db.Column(db.Float)
visited_commercial_activities = relationship(
"CommercialActivity",
secondary="visit",
)
feedbacks = relationship("Feedback")
def __init__(
self,
name,
surname,
email,
user_id=None,
password=<PASSWORD>,
salt=None,
position_lat=None,
position_lon=None
):
self.name = name
self.surname = surname
self.email = email
self.user_id = user_id
self.password = password
self.salt = salt
self.position_lat = position_lat
self.position_lon = position_lon
def __repr__(self):
return '<User id {}>'.format(self.id)
| 2.90625
| 3
|
boa3/neo3/contracts/nef.py
|
hal0x2328/neo3-boa
| 25
|
12781561
|
<reponame>hal0x2328/neo3-boa<filename>boa3/neo3/contracts/nef.py
from __future__ import annotations
import hashlib
from typing import Tuple
from boa3.neo3.core import Size as s, serialization, types, utils
class Version(serialization.ISerializable):
"""
Represents the version number of an assembly
"""
def __init__(self, major: int = 0, minor: int = 0, build: int = 0, revision: int = 0):
"""
Args:
major: non interchangeable assembly.
minor: significant enhancements with backwards compatibility.
build: recompilation of the same source with possible other compiler or on other platform.
revision: fully interchangeable. Can be used for example for security fixes.
"""
if major < 0 or minor < 0 or build < 0 or revision < 0:
raise ValueError("Negative version numbers are not allowed")
if major > 255 or minor > 255 or build > 255 or revision > 255:
raise ValueError("Version numbers cannot exceed 255")
self.major = major
self.minor = minor
self.build = build
self.revision = revision
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return (self.major == other.major
and self.minor == other.minor
and self.build == other.build
and self.revision == other.revision)
def __len__(self):
return s.uint64 + s.uint64 + s.uint64 + s.uint64
def serialize(self, writer: serialization.BinaryWriter) -> None:
version_str = "{0}.{1}.{2}.{3}".format(self.major,
self.minor,
self.build,
self.revision)
from boa3.neo.vm.type.String import String
version_bytes = String(version_str).to_bytes() + bytes(s.uint64 * 4 - len(version_str))
writer.write_bytes(version_bytes)
def deserialize(self, reader: serialization.BinaryReader) -> None:
version_str = reader.read_bytes(s.uint64 * 4).decode('utf-8')
import re
version_str = re.sub(r'\x00+', '', version_str)
major, minor, build, revision = version_str.split('.')
self.major = int(major)
self.minor = int(minor)
self.build = int(build)
self.revision = int(revision)
@classmethod
def _parse_component(self, c: str) -> Tuple[bool, int]:
try:
r = int(c)
except ValueError:
return False, -1
if r < 0:
return False, -1
if r > 255:
return False, -1
return True, r
@classmethod
def from_string(cls, input: str) -> Version:
"""
Parse an instance out of a string.
Args:
input: string representing a version number following the format `Major.Minor[.build[.revision]]`.
Each version part must fit in the range >= 0 <= 255.
Raises:
ValueError: if the input cannot be successfully parsed.
"""
parts = input.split('.')
if len(parts) < 2 or len(parts) > 4:
raise ValueError(f"Cannot parse version from: {input}")
success, major = Version._parse_component(parts[0])
if not success:
raise ValueError(f"Cannot parse major field from: {parts[0]}")
success, minor = Version._parse_component(parts[1])
if not success:
raise ValueError(f"Cannot parse minor field from: {parts[1]}")
if len(parts) > 2:
success, build = Version._parse_component(parts[2])
if not success:
raise ValueError(f"Cannot parse build field from: {parts[2]}")
else:
build = 0
if len(parts) > 3:
success, revision = Version._parse_component(parts[3])
if not success:
raise ValueError(f"Cannot parse revision field from: {parts[3]}")
else:
revision = 0
return cls(major, minor, build, revision)
class NEF(serialization.ISerializable):
def __init__(self, compiler_name: str = None, version: Version = None, script: bytes = None):
self.magic = 0x3346454E
if compiler_name is None:
self.compiler = 'unknown'
else:
self.compiler = compiler_name[:32]
self.version = version if version else Version()
self.script = script if script else b''
self.checksum = self.compute_checksum()
def __len__(self):
return (
s.uint32 # magic
+ 32 # compiler
+ (s.uint64 * 4) # version
+ 2 # reserve
+ utils.get_var_size(bytes()) # TODO: method tokens
+ 2 # reserve
+ utils.get_var_size(self.script)
+ s.uint32) # checksum
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return (self.magic == other.magic
and self.compiler == other.compiler
and self.version == other.version
and self.script == other.script
and self.checksum == other.checksum)
@property
def compiler_with_version(self) -> bytes:
result = '{0}-'.format(self.compiler).encode('utf-8') + self.version.to_array()
return result[:64] + bytes(64 - len(result))
def serialize(self, writer: serialization.BinaryWriter) -> None:
writer.write_uint32(self.magic)
writer.write_bytes(self.compiler_with_version)
writer.write_uint16(0) # 2 reserved bytes
writer.write_var_bytes(bytes()) # TODO: method tokens
writer.write_uint16(0) # 2 reserved bytes
writer.write_var_bytes(self.script)
writer.write_bytes(self.checksum)
def deserialize(self, reader: serialization.BinaryReader) -> None:
self.magic = reader.read_uint32()
compiler_with_version = reader.read_bytes(64).decode('utf-8')
self.compiler, version = compiler_with_version.rsplit('-', maxsplit=1)
self.version = Version.deserialize_from_bytes(version[:32].encode('utf-8'))
assert reader.read_uint16() == 0 # 2 reserved bytes
reader.read_var_int(128) # TODO: method tokens
assert reader.read_uint16() == 0 # 2 reserved bytes
self.script = reader.read_var_bytes()
self.checksum = reader.read_bytes(4)
if self.checksum != self.compute_checksum():
raise ValueError("Deserialization error - invalid checksum")
def script_to_array(self):
from boa3.neo3.core.serialization import BinaryWriter
with BinaryWriter() as bw:
bw.write_var_bytes(self.script)
return bw._stream.getvalue()
def tokens_to_array(self):
from boa3.neo3.core.serialization import BinaryWriter
with BinaryWriter() as bw:
bw.write_var_bytes(bytes())
return bw._stream.getvalue()
def compute_checksum(self) -> bytes:
data = (self.magic.to_bytes(4, 'little')
+ self.compiler_with_version
+ bytes(2) # reserved bytes
+ self.tokens_to_array() # TODO: method tokens
+ bytes(2) # reserved bytes
+ self.script_to_array())
return hashlib.sha256(hashlib.sha256(data).digest()).digest()[:s.uint32]
def compute_script_hash(self) -> types.UInt160:
hash = hashlib.new('ripemd160', hashlib.sha256(self.script).digest()).digest()
return types.UInt160(data=hash)
| 2.390625
| 2
|
scplint/scp.py
|
richardzimmermann/scplint
| 2
|
12781562
|
import json
import os
from logging import getLogger
from pathlib import Path
from scplint.statement import Statement
logger = getLogger()
class SCP:
def __init__(self, scp: dict, filename: str = 'my_scp',
size_max: int = 5120, minimize: bool = False):
logger.debug('initialize scp')
self.scp = scp
self.file = filename
self.minimized = minimize
self.statements = self._get_statements()
logger.debug('get scp metrics')
self.size = self._get_size(min=minimize)
self.size_max = size_max
self.percent = self._get_percent(self.size)
self.actions = self._get_actions()
self.notactions = self._get_notactions()
def _get_statements(self) -> list:
'''
'''
logger.debug('Get every Statement from the SCP')
statements = []
for statement in self.scp.get('Statement', []):
statements.append(Statement(statement))
return statements
def _get_actions(self) -> list:
'''
'''
logger.debug('Get every Action from the SCP')
actions = []
for statement in self.statements:
logger.info(statement.actions)
actions += statement.actions
logger.info(actions)
logger.info(len(actions))
return actions
def _get_notactions(self) -> list:
'''
'''
logger.debug('Get every NotAction from the SCP')
notactions = []
for statement in self.statements:
notactions += statement.notactions
return notactions
def _get_size(self, min: bool = False) -> int:
''' checks the actual size of the json policy in bytes as aws
does it if you create/update a scp
Args:
min (bool): True if policy should be minimized before calculating
the size.
Returns:
scp_bytes (int): the size of the scp in bytes as int
'''
logger.debug('Get the size in bytes of the SCP (minimized=%s)', min)
if min:
scp_bytes = len(self.minimize().encode('utf-8'))
else:
scp_bytes = len(json.dumps(self.scp, indent=4).encode('utf-8'))
return scp_bytes
def _get_percent(self, size: int, precision: int = 1) -> float:
''' check the actual size of the minimized json policy as percentage
against the maximum policy size of aws
Args:
size (int): the size of the policy in bytes
precision (int): the precision of the percentage value
Returns:
percent (float): the size of the scp as percentage value
'''
logger.debug('Get the size in percent of the SCP')
percent = round(100 / 5120 * size, precision)
return percent
def minimize(self) -> str:
''' convert the json scp into a minifed str (remove blanks, tabs and
linebreaks)
Returns:
scp_minified (str): a minified version of the json policy
'''
logger.debug('Format the json policy into a minized text')
scp_minified = json.dumps(self.scp).replace(" ", "")
return scp_minified
| 2.6875
| 3
|
comdirect_api/comdirect_client.py
|
maxpautsch/comdirect-api-simple
| 1
|
12781563
|
import requests
import pickle
from comdirect_api.auth.auth_service import AuthService
from comdirect_api.service.account_service import AccountService
from comdirect_api.service.depot_service import DepotService
from comdirect_api.service.document_service import DocumentService
from comdirect_api.service.report_service import ReportService
from comdirect_api.service.order_service import OrderService
from comdirect_api.service.instrument_service import InstrumentService
class ComdirectClient:
def __init__(self, client_id, client_secret, import_session=False):
self.api_url = 'https://api.comdirect.de/api'
self.oauth_url = 'https://api.comdirect.de'
if not import_session:
self.session = requests.Session()
self.session.headers.update({
'Accept': 'application/json',
'Content-Type': 'application/json',
})
self.auth_service = AuthService(client_id, client_secret, self.session, self.api_url, self.oauth_url)
else:
if import_session == True:
import_session = 'session.pkl'
with open(import_session, 'rb') as input:
self.session = pickle.load(input)
self.auth_service = pickle.load(input)
self.account_service = AccountService(self.session, self.api_url)
self.depot_service = DepotService(self.session, self.api_url)
self.document_service = DocumentService(self.session, self.api_url)
self.report_service = ReportService(self.session, self.api_url)
self.order_service = OrderService(self.session, self.api_url)
self.instrument_service = InstrumentService(self.session, self.api_url)
def session_export(self, filename = 'session.pkl'):
with open(filename, 'wb') as output:
pickle.dump(self.session, output, pickle.HIGHEST_PROTOCOL)
pickle.dump(self.auth_service, output, pickle.HIGHEST_PROTOCOL)
def fetch_tan(self, zugangsnummer, pin, tan_type=None):
return self.auth_service.fetch_tan(zugangsnummer, pin, tan_type)
def activate_session(self, tan=None):
self.auth_service.activate_session(tan)
def refresh_token(self):
self.auth_service.refresh_token()
def revoke_token(self):
self.auth_service.revoke()
def get_all_balances(self, without_account=False):
"""
4.1.1. Fetch balances from all accounts.
:param without_account: Don't include account object in response
:return: Response object
"""
return self.account_service.get_all_balances(without_account)
def get_balance(self, account_uuid):
"""
4.1.2. Fetch balance for a specific account.
:param account_uuid: Account-ID
:return: Response object
"""
return self.account_service.get_balance(account_uuid)
def get_account_transactions(self, account_uuid, with_account=False, transaction_state='BOTH', paging_count=20,
paging_first=0, min_booking_date=None, max_booking_date=None):
"""
4.1.3. Fetch transactions for a specific account. Not setting a min_booking_date currently limits the result to
the last 180 days.
:param account_uuid: Account-ID
:param with_account: Include account information in the response. Defaults to False
:param transaction_state: 'BOOKED' or 'NOTBOOKED'. Defaults to 'BOTH'
:param paging_count: Number of transactions
:param paging_first: Index of first returned transaction. Only possible for booked transactions
(transaction_state='BOOKED').
:param max_booking_date: max booking date in format YYYY-MM-DD
:param min_booking_date: min booking date in format YYYY-MM-DD
:return: Response object
"""
return self.account_service.get_account_transactions(account_uuid, with_account, transaction_state,
paging_count, paging_first, min_booking_date,
max_booking_date)
def get_all_depots(self):
"""
5.1.2. Fetch information for all depots.
:return: Response object
"""
return self.depot_service.get_all_depots()
def get_depot_positions(self, depot_id, with_depot=True, with_positions=True, with_instrument=False):
"""
5.1.2. Fetch information for a specific depot.
:param depot_id: Depot-ID
:param with_depot: Include depot information in response. Defaults to True.
:param with_positions: Include positions in response. Defaults to True.
:param with_instrument: Include instrument information for positions, ignored if with_positions is False.
Defaults to False.
:return: Response object
"""
return self.depot_service.get_depot_positions(depot_id, with_depot, with_positions, with_instrument)
def get_position(self, depot_id, position_id, with_instrument=False):
"""
5.1.3. Fetch a specific position.
:param depot_id: Depot-ID
:param position_id: Position-ID
:param with_instrument: Include instrument information. Defaults to False.
:return: Response object
"""
return self.depot_service.get_position(depot_id, position_id, with_instrument)
def get_depot_transactions(self, depot_id, with_instrument=False, **kwargs):
"""
5.1.4. Fetch depot transactions, filter parameters can be applied via kwargs
:param depot_id: Depot-ID
:param with_instrument: Include instrument information. Defaults to False.
:key wkn: filter by WKN
:key isin: filter by ISIN
:key instrument_id: filter by instrumentId
:key max_booking_date: filter by booking date, Format "JJJJ-MM-TT"
:key transaction_direction: filter by transactionDirection: {"IN", "OUT"}
:key transaction_type: filter by transactionType: {"BUY", "SELL", "TRANSFER_IN", "TRANSFER_OUT"}
:key booking_status: filter by bookingStatus: {"BOOKED", "NOTBOOKED", "BOTH"}
:key min_transaction_value: filter by min-transactionValue
:key max_transaction_value: filter by max-transactionValue
:return: Response object
"""
return self.depot_service.get_depot_transactions(depot_id, with_instrument, **kwargs)
def get_instrument(self, instrument_id, order_dimensions=False, fund_distribution=False, derivative_data=False, static_data = True):
"""
6.1.1 Abruf Instrument
order_dimensions: es wird das OrderDimension-Objekt befüllt
fund_distribution: es wird das FundDistribution-Objekt befüllt, wenn es sich bei dem Wertpapier um einen Fonds handelt
derivative_data: es wird das DerivativeData-Objekt befüllt, wenn es sich bei dem Wertpapier um ein Derivat handelt
static_data: gibt das StaticData -Objekt nicht zurück
:return: Response object
"""
return self.instrument_service.get_instrument(instrument_id, order_dimensions=False, fund_distribution=False, derivative_data=False, static_data = True)
def get_order_dimensions(self, **kwargs):
"""
7.1.1 Abruf Order Dimensionen
:key instrument_id: fiters instrumentId
:key wkn: fiters WKN
:key isin: fiters ISIN
:key mneomic: fiters mneomic
:key venue_id: fiters venueId: Mit Hilfe der venueId, welche als UUID eingegeben werden muss, kann auf einen Handelsplatz gefiltert werden
:key side: Entspricht der Geschäftsart. Filtermöglichkeiten sind BUY oder SELL
:key order_type: fiters orderType: Enspricht dem Ordertypen (bspw. LIMIT, MARKET oder ONE_CANCELS_OTHER)
:key type: filters type: Mittels EXCHANGE oder OFF kann unterschieden werden, ob nach einem Börsenplatz (EXCHANGE) oder einem LiveTrading Handelsplatz (OFF) gefiltert werden soll
:return: Response object
"""
return self.order_service.get_dimensions(**kwargs)
def get_all_orders(self, depot_id, with_instrument=False, with_executions=True, **kwargs):
"""
7.1.2 Abruf Orders (Orderbuch)
:param depot_id: Depot-ID
:param with_instrument: Include instrument information. Defaults to False.
:param with_executions: Include execution information. Defaults to True.
:key order_status: filter by orderStatus: {"OPEN ", "EXECUTED", "SETTLED"...}
:key venue_id: filter by venueId
:key side: filter by side: {"BUY", "SELL"}
:key order_type: filter by orderType
:return: Response object
"""
return self.order_service.get_all_orders(depot_id, with_instrument, with_executions, **kwargs)
def get_order(self, order_id):
"""
7.1.3 Abruf Order (Einzelorder)
:param depot_id: Depot-ID
:return: Response object
"""
return self.order_service.get_order(order_id)
def set_order_change_validation(self, order_id, changed_order):
"""
7.1.5 Anlage Validation Orderanlage
:param order_id: Order-ID
:param changed_order: Altered order from get_order
:return: [challenge_id, challenge] (if challenge not neccessary: None)
"""
return self.order_service.set_change_validation(order_id, changed_order)
def set_order_change(self, order_id, changed_order, challenge_id, tan=None):
"""
7.1.11Änderung der Orde
:param order_id: Order-ID
:param changed_order: same altered order as for set_change_validation
:param challenge_id: first return value from set_change_validation
:param tan: tan if neccessary
:return: Response object
"""
return self.order_service.set_change(order_id, changed_order, challenge_id, tan)
def get_documents(self, first_index=0, count=1000):
"""
9.1.1. Fetch all documents in the PostBox
:param first_index: Index of the first document, starting with 0. Defaults to 0
:param count: Number of documents to be fetched. Max 1000. Defaults to 1000.
:return: Response object
"""
return self.document_service.get_documents(first_index, count)
def get_document(self, document_id):
"""
9.1.2. Fetch a specific document. The document will be marked as read when fetched.
:param document_id: Document-ID
:return: Document and the content type of the document
"""
return self.document_service.get_document(document_id)
def get_report(self, product_type=None):
"""
10.1.1. Fetch a report for all products
:param product_type: Filter by one or more of ACCOUNT, CARD, DEPOT, LOAN, SAVINGS
(list or comma-separated string)
Defaults to None (all product types without filter)
:return: Response object
"""
return self.report_service.get_report(product_type)
def get(self, endpoint, base_url='https://api.comdirect.de/api', **kwargs):
"""
Sends a generic GET-request to a given endpoint with given parameters
:param endpoint: endpoint without leading slash, e.g. 'banking/clients/clientId/v2/accounts/balances'
:param base_url: base url. Defaults to 'https://api.comdirect.de/api'
:param kwargs: query parameters
:return: Response object
"""
url = '{0}/{1}'.format(base_url, endpoint)
return self.session.get(url, params=kwargs).json()
| 2.171875
| 2
|
indicnlp/transliterate/unicode_transliterate.py
|
shubham303/indic_nlp_library
| 432
|
12781564
|
<reponame>shubham303/indic_nlp_library
#
# Copyright (c) 2013-present, <NAME>
# All rights reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
#Program for text written in one Indic script to another based on Unicode mappings.
#
# @author <NAME>
#
import sys, string, itertools, re, os
from collections import defaultdict
from indicnlp import common
from indicnlp import langinfo
from indicnlp.script import indic_scripts as isc
from indicnlp.transliterate.sinhala_transliterator import SinhalaDevanagariTransliterator as sdt
import pandas as pd
OFFSET_TO_ITRANS={}
ITRANS_TO_OFFSET=defaultdict(list)
DUPLICATE_ITRANS_REPRESENTATIONS={}
def init():
"""
To be called by library loader, do not call it in your program
"""
### Load the ITRANS-script offset map. The map was initially generated using the snippet below (uses the old itrans transliterator)
### The map is modified as needed to accomodate extensions and corrections to the mappings
#
# base=0x900
# l=[]
# for i in range(0,0x80):
# c=chr(base+i)
# itrans=ItransTransliterator.to_itrans(c,'hi')
# l.append((hex(i),c,itrans))
# print(l)
#
# pd.DataFrame(l,columns=['offset_hex','devnag_char','itrans']).to_csv('offset_itrans_map.csv',index=False,encoding='utf-8')
itrans_map_fname=os.path.join(common.get_resources_path(),'transliterate','offset_itrans_map.csv')
#itrans_map_fname=r'D:\src\python_sandbox\src\offset_itrans_map.csv'
itrans_df=pd.read_csv(itrans_map_fname,encoding='utf-8')
global OFFSET_TO_ITRANS, ITRANS_TO_OFFSET, DUPLICATE_ITRANS_REPRESENTATIONS
for r in itrans_df.iterrows():
itrans=r[1]['itrans']
o=int(r[1]['offset_hex'],base=16)
OFFSET_TO_ITRANS[o]=itrans
if langinfo.is_consonant_offset(o):
### for consonants, strip the schwa - add halant offset
ITRANS_TO_OFFSET[itrans[:-1]].extend([o,0x4d])
else:
### the append assumes that the maatra always comes after independent vowel in the df
ITRANS_TO_OFFSET[itrans].append(o)
DUPLICATE_ITRANS_REPRESENTATIONS = {
'A': 'aa',
'I': 'ii',
'U': 'uu',
'RRi': 'R^i',
'RRI': 'R^I',
'LLi': 'L^i',
'LLI': 'L^I',
'L': 'ld',
'w': 'v',
'x': 'kSh',
'gj': 'j~n',
'dny': 'j~n',
'.n': '.m',
'M': '.m',
'OM': 'AUM'
}
class UnicodeIndicTransliterator(object):
"""
Base class for rule-based transliteration among Indian languages.
Script pair specific transliterators should derive from this class and override the transliterate() method.
They can call the super class 'transliterate()' method to avail of the common transliteration
"""
@staticmethod
def _correct_tamil_mapping(offset):
# handle missing unaspirated and voiced plosives in Tamil script
# replace by unvoiced, unaspirated plosives
# for first 4 consonant rows of varnamala
# exception: ja has a mapping in Tamil
if offset>=0x15 and offset<=0x28 and \
offset!=0x1c and \
not ( (offset-0x15)%5==0 or (offset-0x15)%5==4 ) :
subst_char=(offset-0x15)//5
offset=0x15+5*subst_char
# for 5th consonant row of varnamala
if offset in [ 0x2b, 0x2c, 0x2d]:
offset=0x2a
# 'sh' becomes 'Sh'
if offset==0x36:
offset=0x37
return offset
@staticmethod
def transliterate(text,lang1_code,lang2_code):
"""
convert the source language script (lang1) to target language script (lang2)
text: text to transliterate
lang1_code: language 1 code
lang1_code: language 2 code
"""
if lang1_code in langinfo.SCRIPT_RANGES and lang2_code in langinfo.SCRIPT_RANGES:
# if Sinhala is source, do a mapping to Devanagari first
if lang1_code=='si':
text=sdt.sinhala_to_devanagari(text)
lang1_code='hi'
# if Sinhala is target, make Devanagiri the intermediate target
org_lang2_code=''
if lang2_code=='si':
lang2_code='hi'
org_lang2_code='si'
trans_lit_text=[]
for c in text:
newc=c
offset=ord(c)-langinfo.SCRIPT_RANGES[lang1_code][0]
if offset >=langinfo.COORDINATED_RANGE_START_INCLUSIVE and offset <= langinfo.COORDINATED_RANGE_END_INCLUSIVE and c!='\u0964' and c!='\u0965':
if lang2_code=='ta':
# tamil exceptions
offset=UnicodeIndicTransliterator._correct_tamil_mapping(offset)
newc=chr(langinfo.SCRIPT_RANGES[lang2_code][0]+offset)
trans_lit_text.append(newc)
# if Sinhala is source, do a mapping to Devanagari first
if org_lang2_code=='si':
return sdt.devanagari_to_sinhala(''.join(trans_lit_text))
return ''.join(trans_lit_text)
else:
return text
class ItransTransliterator(object):
"""
Transliterator between Indian scripts and ITRANS
"""
@staticmethod
def to_itrans(text,lang_code):
if lang_code in langinfo.SCRIPT_RANGES:
if lang_code=='ml':
# Change from chillus characters to corresponding consonant+halant
text=text.replace('\u0d7a','\u0d23\u0d4d')
text=text.replace('\u0d7b','\u0d28\u0d4d')
text=text.replace('\u0d7c','\u0d30\u0d4d')
text=text.replace('\u0d7d','\u0d32\u0d4d')
text=text.replace('\u0d7e','\u0d33\u0d4d')
text=text.replace('\u0d7f','\u0d15\u0d4d')
offsets = [ isc.get_offset(c,lang_code) for c in text ]
### naive lookup
# itrans_l = [ OFFSET_TO_ITRANS.get(o, '-' ) for o in offsets ]
itrans_l=[]
for o in offsets:
itrans=OFFSET_TO_ITRANS.get(o, chr(langinfo.SCRIPT_RANGES[lang_code][0]+o) )
if langinfo.is_halanta_offset(o):
itrans=''
if len(itrans_l)>0:
itrans_l.pop()
elif langinfo.is_vowel_sign_offset(o) and len(itrans_l)>0:
itrans_l.pop()
itrans_l.extend(itrans)
return ''.join(itrans_l)
else:
return text
@staticmethod
def from_itrans(text,lang):
"""
TODO: Document this method properly
TODO: A little hack is used to handle schwa: needs to be documented
TODO: check for robustness
"""
MAXCODE=4 ### TODO: Needs to be fixed
## handle_duplicate_itrans_representations
for k, v in DUPLICATE_ITRANS_REPRESENTATIONS.items():
if k in text:
text=text.replace(k,v)
start=0
match=None
solution=[]
i=start+1
while i<=len(text):
itrans=text[start:i]
# print('===')
# print('i: {}'.format(i))
# if i<len(text):
# print('c: {}'.format(text[i-1]))
# print('start: {}'.format(start))
# print('itrans: {}'.format(itrans))
if itrans in ITRANS_TO_OFFSET:
offs=ITRANS_TO_OFFSET[itrans]
## single element list - no problem
## except when it is 'a'
## 2 element list of 2 kinds:
### 1. alternate char for independent/dependent vowel
### 2. consonant + halant
if len(offs)==2 and \
langinfo.is_vowel_offset(offs[0]):
### 1. alternate char for independent/dependent vowel
## if previous is a consonant, then use the dependent vowel
if len(solution)>0 and langinfo.is_halanta(solution[-1],lang):
offs=[offs[1]] ## dependent vowel
else:
offs=[offs[0]] ## independent vowel
c=''.join([ langinfo.offset_to_char(x,lang) for x in offs ])
match=(i,c)
elif len(itrans)==1: ## unknown character
match=(i,itrans)
elif i<len(text) and (i-start)<MAXCODE+1: ## continue matching till MAXCODE length substring
i=i+1
continue
else:
solution.extend(match[1])
# start=i-1
start=match[0]
i=start
match=None
# print('match done')
# print('match: {}'.format(match))
i=i+1
### flush matches
if match is not None:
solution.extend(match[1])
#### post-processing
## delete unecessary halants
# print(''.join(solution))
temp_out=list(''.join(solution))
rem_indices=[]
for i in range(len(temp_out)-1):
if langinfo.is_halanta(temp_out[i],lang) and \
(langinfo.is_vowel_sign(temp_out[i+1],lang) \
or langinfo.is_nukta(temp_out[i+1],lang) \
or temp_out[i+1]==langinfo.offset_to_char(0x7f,lang)):
rem_indices.append(i)
# if temp_out[i]==langinfo.offset_to_char(0x7f,lang):
# rem_indices.append(i)
for i in reversed(rem_indices):
temp_out.pop(i)
out=''.join(temp_out)
## delete schwa placeholder
out=out.replace(langinfo.offset_to_char(0x7f,lang),'')
return out
if __name__ == '__main__':
if len(sys.argv)<4:
print("Usage: python unicode_transliterate.py <command> <infile> <outfile> <src_language> <tgt_language>")
sys.exit(1)
if sys.argv[1]=='transliterate':
src_language=sys.argv[4]
tgt_language=sys.argv[5]
with open(sys.argv[2],'r', encoding='utf-8') as ifile:
with open(sys.argv[3],'w', encoding='utf-8') as ofile:
for line in ifile.readlines():
transliterated_line=UnicodeIndicTransliterator.transliterate(line,src_language,tgt_language)
ofile.write(transliterated_line)
elif sys.argv[1]=='romanize':
language=sys.argv[4]
### temp fix to replace anusvara with corresponding nasal
#r1_nasal=re.compile(ur'\u0902([\u0915-\u0918])')
#r2_nasal=re.compile(ur'\u0902([\u091a-\u091d])')
#r3_nasal=re.compile(ur'\u0902([\u091f-\u0922])')
#r4_nasal=re.compile(ur'\u0902([\u0924-\u0927])')
#r5_nasal=re.compile(ur'\u0902([\u092a-\u092d])')
with open(sys.argv[2],'r', encoding='utf-8') as ifile:
with open(sys.argv[3],'w', encoding='utf-8') as ofile:
for line in ifile.readlines():
### temp fix to replace anusvara with corresponding nasal
#line=r1_nasal.sub(u'\u0919\u094D\\1',line)
#line=r2_nasal.sub(u'\u091e\u094D\\1',line)
#line=r3_nasal.sub(u'\u0923\u094D\\1',line)
#line=r4_nasal.sub(u'\u0928\u094D\\1',line)
#line=r5_nasal.sub(u'\u092e\u094D\\1',line)
transliterated_line=ItransTransliterator.to_itrans(line,language)
## temp fix to replace 'ph' to 'F' to match with Urdu transliteration scheme
transliterated_line=transliterated_line.replace('ph','f')
ofile.write(transliterated_line)
elif sys.argv[1]=='indicize':
language=sys.argv[4]
with open(sys.argv[2],'r', encoding='utf-8') as ifile:
with open(sys.argv[3],'w', encoding='utf-8') as ofile:
for line in ifile.readlines():
transliterated_line=ItransTransliterator.from_itrans(line,language)
ofile.write(transliterated_line)
| 2.375
| 2
|
utilities.py
|
RyanLinXiang/flower-classifier
| 0
|
12781565
|
<reponame>RyanLinXiang/flower-classifier
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PROGRAMMER: <NAME>
# DATE CREATED: 4th Feb 2020
# REVISED DATE: 05th Feb 2020
# PURPOSE: This file included all the helper functions necessary to save and load the model, as well as process the images
from PIL import Image
import numpy as np
from torch import save, load, from_numpy
import torchvision.models as models
def save_model(model, save_dir, class_to_idx, arch, structure):
"""
Saves the trained and validated model
Parameters:
model : the trained and validated model
save_dir : the path where the classifier model file should be saved
class_to_idx : the path to the file where category indices are saved to trace back the indiced predicted by the model
arch : the architecture of the pre-trained model chosen
structure : the structure of the classifier used to initiate the model
Returns:
None
"""
classifier = {'arch': arch,
'class_to_idx': class_to_idx,
'state_dict': model.classifier.state_dict(),
'structure': structure}
save(classifier, save_dir+"/"+"classifier.pth")
def load_model(classifier_path):
"""
Load the pre-trained model from the specified file with the updated classifier (features are frozen)
Parameters:
classifier_path : the path to the saved classifier model
Returns:
classifier model
"""
classifier = load(classifier_path)
model = getattr(models, classifier['arch'])
model = model(pretrained=True)
model.classifier = classifier['structure']
model.class_to_idx = classifier['class_to_idx']
model.classifier.load_state_dict(classifier['state_dict'])
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
Parameters:
image : the path to the image file
Returns:
numpy array with the image file processed and ready as input for the model
'''
img = Image.open(image)
# the shortest dimension of the image gets width of 256 while the other dimension is resized with respect to the ratio
if img.size[0] < img.size[1]:
ratio = 256/img.size[0]
img = img.resize((256,int(img.size[1]*ratio)))
else:
ratio = 256/img.size[1]
img = img.resize((int(img.size[0]*ratio),256))
# crop a square of 224px from the center of the image in order to get the image ready for the model
top = (img.size[1] - 224)/2
bottom = (img.size[1] + 224)/2
left = (img.size[0] - 224)/2
right = (img.size[0] + 224)/2
img = img.crop((left, top, right, bottom))
img = np.array(img)/255
# normalization of the image in order to get the image ready for the model
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
img = np.transpose((img - mean) / std)
return from_numpy(img)
| 2.859375
| 3
|
openneurodata/analyse_ophys.py
|
seankmartin/NeuralOpenData
| 0
|
12781566
|
<gh_stars>0
def summarise_single_session(allen_dataset):
## Summary in print
print(
f"-----------Working on image plane {allen_dataset.ophys_experiment_id}"
f"session {allen_dataset.ophys_session_id}------------"
)
print(f"\tThis experiment has metadata {allen_dataset.metadata}")
cell_specimen_table = allen_dataset.cell_specimen_table
print(
f"\tThere are {len(cell_specimen_table)} cells"
f"in this session with IDS {cell_specimen_table['cell_specimen_id']}"
)
methods = allen_dataset.list_data_attributes_and_methods()
print(f"The available information is {methods}")
## Plotting other information
| 2.65625
| 3
|
teamscale_precommit_client/client_configuration_utils.py
|
cqse/teamscale-cli
| 5
|
12781567
|
<gh_stars>1-10
from teamscale_client.teamscale_client_config import TeamscaleClientConfig
def get_teamscale_client_configuration(config_file):
"""Gets a Teamscale client configuration from the provided config file or a config file in the user's home dir
or both combined. This allows users to separate their credentials (e.g. in their home dir) from the project specific
configurations (e.g. in the repository roots).
"""
local_teamscale_config = None
teamscale_config_in_home_dir = None
try:
local_teamscale_config = TeamscaleClientConfig.from_config_file(config_file)
except RuntimeError:
# Error handling below, as either of the configs (or both combined) might be ok.
pass
try:
teamscale_config_in_home_dir = TeamscaleClientConfig.from_config_file_in_home_dir()
except RuntimeError:
# Error handling below, as either of the configs (or both combined) might be ok.
pass
if local_teamscale_config:
if not teamscale_config_in_home_dir:
_require_sufficient_configuration(local_teamscale_config)
return local_teamscale_config
else:
# Use config from home dir and overwrite with values from the local config.
# This allows users to combine two different config files as stated above.
teamscale_config_in_home_dir.overwrite_with(local_teamscale_config)
_require_sufficient_configuration(teamscale_config_in_home_dir)
return teamscale_config_in_home_dir
else:
if not teamscale_config_in_home_dir:
raise RuntimeError('No valid configuration found.')
else:
_require_sufficient_configuration(teamscale_config_in_home_dir)
return teamscale_config_in_home_dir
def _require_sufficient_configuration(configuration):
"""Ensures the provided configuration is sufficient for precommit analysis."""
if not configuration.is_sufficient(require_project_id=True):
raise RuntimeError('Not all necessary parameters specified in configuration file %s' %
configuration.config_file)
| 2.40625
| 2
|
class.py
|
NageshJ2014/TestRepor
| 0
|
12781568
|
lottery_player = {"name" : "Rolf", "numbers" : (20,40,30,85)}
#print(lottery_player)
class Lottery_player:
def __init__(self,name="Rolf",numbers=(20,40,30,85)):
self.name = name
self.numbers = numbers
def total(self):
return sum(self.numbers)
Player = Lottery_player("Nagesh",(12,24,36,48,60))
print(Player.name , Player.numbers)
print(Player.total())
| 3.375
| 3
|
SVD-example/mnist_training.py
|
tum-pbs/racecar
| 4
|
12781569
|
import tensorflow as tf
import numpy as np
import os
import time
import argparse
import imageio
parser = argparse.ArgumentParser()
parser.add_argument("--training", type=int, default=1, help="training or testing")
parser.add_argument("--testdir", type=str, default=None, help="specify log file dir")
parser.add_argument("--testnum", type=int, default=-1, help="specify file name")
parser.add_argument("--modelnum", type=int, default=-1, help="specify model name")
parser.add_argument("--basePath", type=str, default="", help="specify base path")
parser.add_argument("--batchsize", type=int, default=64, help="set batch size")
parser.add_argument("--epochnum", type=int, default=100, help="set training epochs")
parser.add_argument("--learningrate", type=float, default=0.0001, help="set learning rate")
parser.add_argument("--maxsave", type=int, default=5, help="set saving number")
parser.add_argument("--rrfactor", type=float, default=0.0, help="set factor for rr term")
parser.add_argument("--orthofactor", type=float, default=0.0, help="set factor for orthogonal term")
parser.add_argument("--runfile", type=str, default="run.py", help="specify run file for copy")
args = parser.parse_args()
if (not args.training):
if args.testnum < 0 or args.modelnum < 0:
print("specify --testnum and --modelnum for testing!")
exit()
if args.testdir:
folderpre = args.testdir
else:
folderpre = "default"
BATCH_SIZE = 2
if not args.training:
BATCH_SIZE = 1
CLASS_NUM = 10
EPOCHS = args.epochnum
learningratevalue = args.learningrate
maxToKeep = args.maxsave
epsilon = 1e-6
imagewidth = 28
imageheight = 28
def makedir():
count = 0
currentdir = os.getcwd()+"/"
while os.path.exists(args.basePath+folderpre+"/test_%04d/"%count):
count += 1
targetdir = args.basePath+folderpre+"/test_%04d/"%count
os.makedirs(targetdir)
return targetdir
test_path = makedir()
testf = open(test_path + "testaccuracy.txt",'a+')
trainf = open(test_path + "trainloss.txt",'a+')
timef = open(test_path + "elapsedtime.txt",'a+')
os.system("cp %s %s/%s"%(args.runfile,test_path,args.runfile))
os.system("cp %s %s/%s"%(__file__,test_path,__file__))
# training data
num1, num2 = 0,1
x_train0 = np.reshape(imageio.imread("MNIST/%d.png"%num1),[1,imagewidth*imageheight])
x_train1 = np.reshape(imageio.imread("MNIST/%d.png"%num2),[1,imagewidth*imageheight])
y_train0 = np.zeros([1,10])
y_train1 = np.zeros([1,10])
y_train0[0,num1]=1
y_train1[0,num2]=1
x_train = np.concatenate((x_train0,x_train1),axis=0)
y_train = np.concatenate((y_train0,y_train1),axis=0)
# testing data
x_test0 = np.reshape(imageio.imread("MNIST/%d_test.png"%num1),[1,imagewidth*imageheight])
x_test1 = np.reshape(imageio.imread("MNIST/%d_test.png"%num2),[1,imagewidth*imageheight])
x_test = np.concatenate((x_test0,x_test1),axis=0)
y_test = y_train
TOTALWEIGHT = 0
def weight_variable(name, shape):
var = tf.get_variable(name,shape,initializer = tf.glorot_uniform_initializer())
global TOTALWEIGHT
if len(shape) == 4:
print("Convolutional layer: {}".format(shape))
TOTALWEIGHT += shape[0]*shape[1]*shape[2]*shape[3]
if len(shape) == 2:
print("fully connected layer: {}".format(shape))
TOTALWEIGHT += shape[0]*shape[1]
return var
def bias_variable(name, shape):
global TOTALWEIGHT
TOTALWEIGHT += shape[0]
return tf.get_variable(name,shape,initializer = tf.zeros_initializer())
def conv2d(x, W, padding = 'SAME',strides=[1,1,1,1]):
return tf.nn.conv2d(x, W, strides=strides, padding=padding)
def batch_norm(input, reuse=False, is_training=args.training):
return tf.contrib.layers.batch_norm(input, decay=0.9, center=True, scale=True, epsilon=1e-3,
is_training=is_training, updates_collections=None, scope=tf.get_variable_scope(), reuse = reuse)
def l2_reg_ortho(weight):
reg = tf.constant(0.)
Wshape = weight.get_shape()
if np.size(weight.get_shape().as_list()) == 2:
cols = int(Wshape[1])
else:
cols = int(Wshape[1]*Wshape[2]*Wshape[3])
rows = int(Wshape[0])
w1 = tf.reshape(weight,[-1,cols])
wt = tf.transpose(w1)
m = tf.matmul(wt,w1)
ident = tf.eye(cols,num_columns=cols)
w_tmp = (m - ident)
height = w_tmp.get_shape().as_list()[0]
u = tf.nn.l2_normalize(tf.random_normal([height,1]),dim=0,epsilon=1e-12)
v = tf.nn.l2_normalize(tf.matmul(tf.transpose(w_tmp), u), dim=0,epsilon=1e-12)
u = tf.nn.l2_normalize(tf.matmul(w_tmp, v), dim=0,epsilon=1e-12)
sigma = tf.norm(tf.reshape(tf.keras.backend.dot(tf.transpose(u), tf.matmul(w_tmp, v)),[-1]))
reg+=sigma**2
return reg
x = tf.placeholder(tf.float32, [None,imagewidth*imageheight])
y = tf.placeholder(tf.float32, [None,CLASS_NUM])
lr = tf.placeholder(tf.float32)
# forward pass
W_conv1 = weight_variable("W_conv1",[imagewidth*imageheight,CLASS_NUM])
b_conv1 = bias_variable("b_conv1",[CLASS_NUM])
fcout = tf.matmul(x, W_conv1) + b_conv1
# backward pass
back_input = tf.matmul((fcout-b_conv1),tf.transpose(W_conv1))
prediction = tf.reshape(tf.nn.softmax(fcout),[-1,CLASS_NUM])
# calculate the loss
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=prediction))
loss = cross_entropy
if args.orthofactor != 0:
loss = loss + args.orthofactor*l2_reg_ortho(W_conv1)
if args.rrfactor != 0:
loss = loss + args.rrfactor * tf.reduce_mean(tf.nn.l2_loss(back_input - x))
correct_prediction = tf.equal(tf.argmax(prediction,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
train_step = tf.train.AdamOptimizer(lr).minimize(loss)
# init session
sess = tf.Session()
saver = tf.train.Saver(max_to_keep=maxToKeep)
sess.run(tf.global_variables_initializer())
if args.testnum >= 0 and args.modelnum >=0:
loadpath = args.basePath+folderpre+"/test_%04d/model_%04d.ckpt"%(args.testnum,args.modelnum)
saver.restore(sess,loadpath)
print("Model restored from %s."%(loadpath))
Epochnum = int(np.shape(x_train)[0]/BATCH_SIZE)
def saveModel(test_path, save_no):
saver.save(sess, test_path+'model_%04d.ckpt'%save_no)
msg = 'saved Model %04d.'%save_no
return msg
currenttime = time.time()
testindex = 0
if args.training:
for i in range(EPOCHS * Epochnum):
cross_e,_, trainloss = sess.run([cross_entropy , train_step,loss],feed_dict={x: x_train, y: y_train, lr:learningratevalue})
if i % (Epochnum*100) == 0:
epochindex = int(i/(Epochnum*100))
testaccuracy,outputdata= sess.run([accuracy,back_input],feed_dict={x: x_test, y: y_test})
costtime = time.time()-currenttime
print("EPOCHS: %d, train loss:%f, testing accuracy:%f, time consuming:%f"%(epochindex,trainloss,testaccuracy,costtime))
print("cross_e:%f"%cross_e)
testf.write(str(epochindex)+'\t'+str(testaccuracy)+'\r\n')
trainf.write(str(epochindex)+'\t'+str(trainloss)+'\r\n')
timef.write(str(epochindex)+'\t'+str(costtime)+'\r\n')
if (epochindex+1)%2 == 0:
print(saveModel(test_path,epochindex))
# output test image
outputdata = np.reshape(outputdata,[2,28,28])
resultpath = test_path +"backwardtest_img/"
while not os.path.exists(resultpath):
os.mkdir(resultpath)
for ind in range(2):
imageio.imwrite(resultpath + 'test%d_%04d.png'%(ind,testindex),outputdata[ind].astype(np.uint8))
testindex += 1
currenttime = time.time()
| 2.390625
| 2
|
anonramblings/ramblings/migrations/0010_post_comment_count.py
|
emre/anonramblings
| 2
|
12781570
|
<reponame>emre/anonramblings
# Generated by Django 3.0.6 on 2020-05-17 12:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ramblings', '0009_post_reply_to'),
]
operations = [
migrations.AddField(
model_name='post',
name='comment_count',
field=models.IntegerField(default=0),
),
]
| 1.5
| 2
|
examples/golf_putting/model_simple.py
|
mcguenther/ipme
| 22
|
12781571
|
import pandas as pd
import io
import pymc3 as pm
import arviz as az
from arviz_json import get_dag, arviz_to_json
#Binomial Logistic Regression Model
#Reference: https://docs.pymc.io/notebooks/putting_workflow.html#Logit-model
#data
golf_data = """distance tries successes
2 1443 1346
3 694 577
4 455 337
5 353 208
6 272 149
7 256 136
8 240 111
9 217 69
10 200 67
11 237 75
12 202 52
13 192 46
14 174 54
15 167 28
16 201 27
17 195 31
18 191 33
19 147 20
20 152 24"""
data = pd.read_csv(io.StringIO(golf_data), sep=" ")
#model-inference
coords = {"distance": data.distance}
fileName='golf_simple_PyMC3'
samples=2000
chains=2
tune=1000
simple_model=pm.Model(coords=coords)
with simple_model:
#to store the n-parameter of Binomial dist
#in the constant group of ArviZ InferenceData
#You should always call it n for imd to retrieve it
n = pm.Data('n', data.tries)
a = pm.Normal('a')
b = pm.Normal('b')
p_goes_in = pm.Deterministic('p_goes_in', pm.math.invlogit(a * data.distance + b), dims='distance')
successes = pm.Binomial('successes', n=n, p=p_goes_in, observed=data.successes, dims='distance')
#inference
# Get posterior trace, prior trace, posterior predictive samples, and the DAG
trace = pm.sample(draws=samples, chains=chains, tune=tune)
prior= pm.sample_prior_predictive(samples=samples)
posterior_predictive = pm.sample_posterior_predictive(trace,samples=samples)
## STEP 1
# will also capture all the sampler statistics
data_s = az.from_pymc3(trace=trace, prior=prior, posterior_predictive=posterior_predictive)
## STEP 2
#dag
dag = get_dag(simple_model)
# insert dag into sampler stat attributes
data_s.sample_stats.attrs["graph"] = str(dag)
## STEP 3
# save data
arviz_to_json(data_s, fileName+'.npz')
| 2.9375
| 3
|
testdatageneration.py
|
odeliss/wapetex
| 0
|
12781572
|
from datagenerationpipeline import dataGenerationPipeline
imageDirectory = "\\\\192.168.1.37\\Multimedia\\datasets\\test\\watches_categories\\1"
fileType=".jpg"
pipeline=dataGenerationPipeline(imageDirectory, fileType)
print("[INFO]: There should be 200 images in directory 1")
pipeline.rotate()
#assert there are 1000 flipped images in directory rotated
pipeline.flip(horizontaly = True)
#assert there are 200 flipped images in directory horFlip
pipeline.flip(verticaly = True)
#assert there are 200 flipped images in directory verFlip
pipeline.flip(horizontaly = True, verticaly = True)
#assert there are 200 flipped images in directory horverFlip
pipeline.skew()
| 3.0625
| 3
|
backend/uclapi/dashboard/management/commands/clear_dashboard_data.py
|
nayanadasgupta/uclapi
| 0
|
12781573
|
from django.core.management.base import BaseCommand
from dashboard.models import User, App, APICall, Webhook, WebhookTriggerHistory
class Command(BaseCommand):
help = 'Cleans Dashboard of everything'
def handle(self, *args, **options):
string = input("THIS WILL WIPE THESE MODELS ARE YOU SURE? "
"TYPE DELETE TO CONFIRM!: ")
if string == "DELETE":
User.objects.all().delete()
App.objects.all().delete()
APICall.objects.all().delete()
Webhook.objects.all().delete()
WebhookTriggerHistory.objects.all().delete()
| 2.078125
| 2
|
imcsdk/mometa/memory/MemoryPersistentMemoryDimms.py
|
ecoen66/imcsdk
| 31
|
12781574
|
"""This module contains the general information for MemoryPersistentMemoryDimms ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class MemoryPersistentMemoryDimmsConsts:
SOCKET_ID_1 = "1"
SOCKET_ID_2 = "2"
SOCKET_ID_3 = "3"
SOCKET_ID_4 = "4"
SOCKET_ID_ALL = "ALL"
class MemoryPersistentMemoryDimms(ManagedObject):
"""This is MemoryPersistentMemoryDimms class."""
consts = MemoryPersistentMemoryDimmsConsts()
naming_props = set(['socketId'])
mo_meta = {
"classic": MoMeta("MemoryPersistentMemoryDimms", "memoryPersistentMemoryDimms", "pmemory-dimms-[socket_id]", VersionMeta.Version404b, "InputOutput", 0x3f, [], ["admin", "read-only", "user"], ['memoryPersistentMemoryLogicalConfiguration'], [], [None]),
"modular": MoMeta("MemoryPersistentMemoryDimms", "memoryPersistentMemoryDimms", "pmemory-dimms-[socket_id]", VersionMeta.Version404b, "InputOutput", 0x3f, [], ["admin", "read-only", "user"], ['memoryPersistentMemoryLogicalConfiguration'], [], [None])
}
prop_meta = {
"classic": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version404b, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version404b, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"socket_id": MoPropertyMeta("socket_id", "socketId", "string", VersionMeta.Version404b, MoPropertyMeta.NAMING, 0x8, 0, 510, None, ["1", "2", "3", "4", "ALL"], []),
"socket_local_dimm_numbers": MoPropertyMeta("socket_local_dimm_numbers", "socketLocalDimmNumbers", "string", VersionMeta.Version404b, MoPropertyMeta.READ_WRITE, 0x10, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version404b, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version404b, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
"modular": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version404b, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version404b, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"socket_id": MoPropertyMeta("socket_id", "socketId", "string", VersionMeta.Version404b, MoPropertyMeta.NAMING, 0x8, 0, 510, None, ["1", "2", "3", "4", "ALL"], []),
"socket_local_dimm_numbers": MoPropertyMeta("socket_local_dimm_numbers", "socketLocalDimmNumbers", "string", VersionMeta.Version404b, MoPropertyMeta.READ_WRITE, 0x10, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version404b, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version404b, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
},
}
prop_map = {
"classic": {
"dn": "dn",
"rn": "rn",
"socketId": "socket_id",
"socketLocalDimmNumbers": "socket_local_dimm_numbers",
"status": "status",
"childAction": "child_action",
},
"modular": {
"dn": "dn",
"rn": "rn",
"socketId": "socket_id",
"socketLocalDimmNumbers": "socket_local_dimm_numbers",
"status": "status",
"childAction": "child_action",
},
}
def __init__(self, parent_mo_or_dn, socket_id, **kwargs):
self._dirty_mask = 0
self.socket_id = socket_id
self.socket_local_dimm_numbers = None
self.status = None
self.child_action = None
ManagedObject.__init__(self, "MemoryPersistentMemoryDimms", parent_mo_or_dn, **kwargs)
| 1.78125
| 2
|
tensorflow/python/tools/module_util.py
|
KosingZhu/tensorflow
| 190,993
|
12781575
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for modules."""
import os
import six
if six.PY2:
import imp # pylint: disable=g-import-not-at-top
else:
import importlib # pylint: disable=g-import-not-at-top
def get_parent_dir(module):
return os.path.abspath(os.path.join(os.path.dirname(module.__file__), ".."))
def get_parent_dir_for_name(module_name):
"""Get parent directory for module with the given name.
Args:
module_name: Module name for e.g.
tensorflow_estimator.python.estimator.api._v1.estimator.
Returns:
Path to the parent directory if module is found and None otherwise.
Given example above, it should return:
/pathtoestimator/tensorflow_estimator/python/estimator/api/_v1.
"""
name_split = module_name.split(".")
if not name_split:
return None
if six.PY2:
try:
spec = imp.find_module(name_split[0])
except ImportError:
return None
if not spec:
return None
base_path = spec[1]
else:
try:
spec = importlib.util.find_spec(name_split[0])
except ValueError:
return None
if not spec or not spec.origin:
return None
base_path = os.path.dirname(spec.origin)
return os.path.join(base_path, *name_split[1:-1])
| 1.960938
| 2
|
main_num_learners.py
|
crm416/online_boosting
| 56
|
12781576
|
<reponame>crm416/online_boosting<filename>main_num_learners.py
import argparse
from random import seed
from yaml import dump
from utils.experiment import testNumLearners
from utils.utils import *
if __name__ == "__main__":
seed(0)
parser = argparse.ArgumentParser(
description='Test error for a combination of ensembler and weak learner.')
parser.add_argument('dataset', help='dataset filename')
parser.add_argument('ensembler', help='chosen ensembler')
parser.add_argument('weak_learner', help='chosen weak learner')
parser.add_argument(
'start', help='initial number of weak learners', type=int)
parser.add_argument('end', help='final number of weak learners', type=int)
parser.add_argument(
'inc', help='increment for number of weak learners', type=int)
parser.add_argument('--record', action='store_const',
const=True, default=False, help='export the results in YAML format')
parser.add_argument(
'trials', help='number of trials (each with different shuffling of the data); defaults to 1', type=int, default=1, nargs='?')
args = parser.parse_args()
ensembler = get_ensembler(args.ensembler)
weak_learner = get_weak_learner(args.weak_learner)
data = load_data("data/" + args.dataset)
accuracy = testNumLearners(
ensembler, weak_learner, data, args.start, args.end, args.inc, trials=args.trials)
print accuracy
if args.record:
results = {
'accuracy': accuracy,
'booster': args.ensembler,
'weak_learner': args.weak_learner,
'trials': args.trials,
'seed': 0
}
filename = args.ensembler + "_" + \
args.weak_learner + "_" + \
str(args.start) + "_" + str(args.end) + \
"_" + str(args.inc) + ".yml"
f = open(filename, 'w+')
f.write(dump(results))
| 2.421875
| 2
|
python/packages/nisar/workflows/rdr2geo_runconfig.py
|
isce3-testing/isce3-circleci-poc
| 0
|
12781577
|
<gh_stars>0
import journal
from nisar.workflows.runconfig import RunConfig
class Rdr2geoRunConfig(RunConfig):
def __init__(self, args):
# all insar submodules share a commmon `insar` schema
super().__init__(args, 'insar')
if self.args.run_config_path is not None:
self.load_geocode_yaml_to_dict()
self.geocode_common_arg_load()
self.yaml_check()
def yaml_check(self):
error_channel = journal.error('Rdr2GeoRunConfig.yaml_check')
layers = ['x', 'y', 'z', 'incidence', 'heading', 'local_incidence',
'local_psi', 'simulated_amplitude', 'layover_shadow']
# get rdr2geo config dict from processing dict for brevity
rdr2geo_cfg = self.cfg['processing']['rdr2geo']
# list comprehend rasters to be written from layers dict
write_any_layer = any([rdr2geo_cfg[f'write_{layer}'] for layer in layers])
if not write_any_layer:
err_str = "All topo layers disabled"
error_channel.log(err_str)
raise ValueError(err_str)
| 2.015625
| 2
|
eelbrain/_wxgui/history.py
|
christianbrodbeck/Eelbrain
| 32
|
12781578
|
'''History for wx GUIs'''
# Author: <NAME> <<EMAIL>>
from logging import getLogger
import os
from typing import Optional, Tuple
import wx
from .help import show_help_txt
from .frame import EelbrainFrame
from .utils import Icon
from . import ID
TEST_MODE = False
class CallBackManager:
def __init__(self, keys):
self._callbacks = {k: [] for k in keys}
def register_key(self, key):
if key in self._callbacks:
raise KeyError("Key already registered")
self._callbacks[key] = []
def callback(self, key, *args):
for cb in self._callbacks[key]:
cb(*args)
def subscribe(self, key, func):
self._callbacks[key].append(func)
def remove(self, key, func):
try:
self._callbacks[key].remove(func)
except ValueError:
getLogger(__name__).debug("Trying to remove %r which is not in callbacks[%r]", func, key)
class Action:
def do(self, doc):
raise NotImplementedError
def undo(self, doc):
raise NotImplementedError
class History:
"""The history as a list of action objects
Public interface
----------------
can_redo() : bool
Whether the history can redo an action.
can_undo() : bool
Whether the history can redo an action.
do(action)
perform a action
is_saved() : bool
Whether the current state is saved
redo()
Redo the latest undone action.
...
"""
def __init__(self, doc):
self.doc = doc
self._history = []
self.callbacks = CallBackManager(('saved_change',))
# point to last executed action (always < 0)
self._last_action_idx = -1
# point to action after which we saved
self._saved_idx = -2 + doc.saved
def can_redo(self):
return self._last_action_idx < -1
def can_undo(self):
return len(self._history) + self._last_action_idx >= 0
def do(self, action):
logger = getLogger(__name__)
logger.debug("Do action: %s", action.desc)
was_saved = self.is_saved()
action.do(self.doc)
if self._last_action_idx < -1:
# discard alternate future
self._history = self._history[:self._last_action_idx + 1]
self._last_action_idx = -1
if self._saved_idx >= len(self._history):
self._saved_idx = -2
self._history.append(action)
self._process_saved_change(was_saved)
def _process_saved_change(self, was_saved):
"""Process a state change in whether all changes are saved
Parameters
----------
was_saved : bool
Whether all changes were saved before the current change happened.
"""
is_saved = self.is_saved()
if is_saved != was_saved:
self.doc.saved = is_saved
self.callbacks.callback('saved_change')
def is_saved(self):
"""Determine whether the document is saved
Returns
-------
is_saved : bool
Whether the document is saved (i.e., contains no unsaved changes).
"""
current_index = len(self._history) + self._last_action_idx
return self._saved_idx == current_index
def redo(self):
was_saved = self.is_saved()
if self._last_action_idx == -1:
raise RuntimeError("We are at the tip of the history")
action = self._history[self._last_action_idx + 1]
logger = getLogger(__name__)
logger.debug("Redo action: %s", action.desc)
action.do(self.doc)
self._last_action_idx += 1
self._process_saved_change(was_saved)
def register_save(self):
"Notify the history that the document is saved at the current state"
was_saved = self.is_saved()
self._saved_idx = len(self._history) + self._last_action_idx
self._process_saved_change(was_saved)
def undo(self):
was_saved = self.is_saved()
if -self._last_action_idx > len(self._history):
raise RuntimeError("We are at the beginning of the history")
action = self._history[self._last_action_idx]
logger = getLogger(__name__)
logger.debug("Undo action: %s", action.desc)
action.undo(self.doc)
self._last_action_idx -= 1
self._process_saved_change(was_saved)
class FileDocument:
"""Represent a file"""
def __init__(self, path):
self.saved = False # managed by the history
self.path = path
self.callbacks = CallBackManager(('path_change',))
def set_path(self, path):
self.path = path
self.callbacks.callback('path_change')
class FileModel:
"""Manages a document as well as its history"""
def __init__(self, doc: FileDocument):
self.doc = doc
self.history = History(doc)
def load(self, path):
raise NotImplementedError
def save(self):
self.doc.save()
self.history.register_save()
def save_as(self, path):
self.doc.set_path(path)
self.save()
class FileFrame(EelbrainFrame):
owns_file = True
_doc_name = 'document'
_name = 'Default' # internal, for config
_title = 'Title' # external, for frame title
_wildcard = "Tab Separated Text (*.txt)|*.txt|Pickle (*.pickle)|*.pickle"
def __init__(
self,
parent: wx.Frame,
pos: Optional[Tuple[int, int]],
size: Optional[Tuple[int, int]],
model: FileModel,
):
"""View object of the epoch selection GUI
Parameters
----------
parent : wx.Frame
Parent window.
others :
See TerminalInterface constructor.
"""
config = wx.Config("Eelbrain Testing" if TEST_MODE else "Eelbrain")
config.SetPath(self._name)
if pos is None:
pos = (config.ReadInt("pos_horizontal", -1),
config.ReadInt("pos_vertical", -1))
if size is None:
size = (config.ReadInt("size_width", 800),
config.ReadInt("size_height", 600))
super(FileFrame, self).__init__(parent, -1, self._title, pos, size)
self.config = config
self.model = model
self.doc = model.doc
self.history = model.history
# Bind Events ---
self.doc.callbacks.subscribe('path_change', self.UpdateTitle)
self.history.callbacks.subscribe('saved_change', self.UpdateTitle)
self.Bind(wx.EVT_CLOSE, self.OnClose)
def InitToolbar(self, can_open=True):
tb = self.CreateToolBar(wx.TB_HORIZONTAL)
tb.SetToolBitmapSize(size=(32, 32))
tb.AddTool(wx.ID_SAVE, "Save", Icon("tango/actions/document-save"),
shortHelp="Save")
self.Bind(wx.EVT_TOOL, self.OnSave, id=wx.ID_SAVE)
tb.AddTool(wx.ID_SAVEAS, "Save As", Icon("tango/actions/document-save-as"),
shortHelp="Save As")
self.Bind(wx.EVT_TOOL, self.OnSaveAs, id=wx.ID_SAVEAS)
if can_open:
tb.AddTool(wx.ID_OPEN, "Load", Icon("tango/actions/document-open"),
shortHelp="Open Rejections")
self.Bind(wx.EVT_TOOL, self.OnOpen, id=wx.ID_OPEN)
tb.AddTool(ID.UNDO, "Undo", Icon("tango/actions/edit-undo"), shortHelp="Undo")
tb.AddTool(ID.REDO, "Redo", Icon("tango/actions/edit-redo"), shortHelp="Redo")
return tb
def InitToolbarTail(self, tb):
tb.AddTool(wx.ID_HELP, 'Help', Icon("tango/apps/help-browser"))
self.Bind(wx.EVT_TOOL, self.OnHelp, id=wx.ID_HELP)
def CanRedo(self):
return self.history.can_redo()
def CanSave(self):
return bool(self.doc.path) and not self.doc.saved
def CanUndo(self):
return self.history.can_undo()
def OnClear(self, event):
self.model.clear()
def OnClose(self, event):
"""Ask to save unsaved changes.
Return True if confirmed so that child windows can unsubscribe from
document model changes.
"""
if self.owns_file and event.CanVeto() and not self.history.is_saved():
self.Raise()
msg = ("The current document has unsaved changes. Would you like "
"to save them?")
cap = "%s: Save Unsaved Changes?" % self._title
style = wx.YES | wx.NO | wx.CANCEL | wx.YES_DEFAULT
cmd = wx.MessageBox(msg, cap, style)
if cmd == wx.YES:
if self.OnSave(event) != wx.ID_OK:
event.Veto()
return
elif cmd == wx.CANCEL:
event.Veto()
return
elif cmd != wx.NO:
raise RuntimeError("Unknown answer: %r" % cmd)
logger = getLogger(__name__)
logger.debug("%s.OnClose()", self.__class__.__name__)
# remove callbacks
self.doc.callbacks.remove('path_change', self.UpdateTitle)
self.history.callbacks.remove('saved_change', self.UpdateTitle)
# save configuration
pos_h, pos_v = self.GetPosition()
w, h = self.GetSize()
self.config.WriteInt("pos_horizontal", pos_h)
self.config.WriteInt("pos_vertical", pos_v)
self.config.WriteInt("size_width", w)
self.config.WriteInt("size_height", h)
self.config.Flush()
event.Skip()
return True
def OnHelp(self, event):
show_help_txt(self.__doc__, self, self._title)
def OnOpen(self, event):
msg = ("Load the %s from a file." % self._doc_name)
if self.doc.path:
default_dir, default_name = os.path.split(self.doc.path)
else:
default_dir = ''
default_name = ''
dlg = wx.FileDialog(self, msg, default_dir, default_name,
self._wildcard, wx.FD_OPEN)
rcode = dlg.ShowModal()
dlg.Destroy()
if rcode != wx.ID_OK:
return rcode
path = dlg.GetPath()
try:
self.model.load(path)
except Exception as ex:
msg = str(ex)
title = "Error Loading %s" % self._doc_name.capitalize()
wx.MessageBox(msg, title, wx.ICON_ERROR)
raise
def OnRedo(self, event):
self.history.redo()
def OnSave(self, event):
if self.doc.path:
self.model.save()
return wx.ID_OK
else:
return self.OnSaveAs(event)
def OnSaveAs(self, event):
msg = ("Save the %s to a file." % self._doc_name)
if self.doc.path:
default_dir, default_name = os.path.split(self.doc.path)
else:
default_dir = ''
default_name = ''
dlg = wx.FileDialog(self, msg, default_dir, default_name,
self._wildcard, wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
rcode = dlg.ShowModal()
if rcode == wx.ID_OK:
path = dlg.GetPath()
self.model.save_as(path)
dlg.Destroy()
return rcode
def OnUndo(self, event):
self.history.undo()
def OnUpdateUIClear(self, event):
event.Enable(True)
def OnUpdateUIOpen(self, event):
event.Enable(True)
def OnUpdateUIRedo(self, event):
event.Enable(self.CanRedo())
def OnUpdateUISave(self, event):
event.Enable(self.CanSave())
def OnUpdateUISaveAs(self, event):
event.Enable(True)
def OnUpdateUIUndo(self, event):
event.Enable(self.CanUndo())
def UpdateTitle(self):
is_modified = not self.doc.saved
self.OSXSetModified(is_modified)
title = self._title
if self.doc.path:
title += ': ' + os.path.basename(self.doc.path)
if is_modified:
title = '* ' + title
self.SetTitle(title)
class FileFrameChild(FileFrame):
owns_file = False
| 2.4375
| 2
|
stevesie/remote_resource.py
|
Stevesie/stevesie-py
| 1
|
12781579
|
import os
import re
import json
from typing import Sequence, GenericMeta
from datetime import datetime, date
import dateutil.parser
import inflection
from stevesie import resources
from stevesie.utils import api
DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
class RemoteResource(object):
_is_hydrated = False
def set_hydrated(self):
self._is_hydrated = True
def hydrate(self, obj, fetch_remote=True, limit=None):
hydrate_args = {}
for field_name in self._fields:
field_type = self._field_types[field_name]
api_field_name = inflection.camelize(field_name, uppercase_first_letter=False)
field_value = obj.get(api_field_name, obj.get(field_name))
if field_value is not None:
if field_type == datetime:
field_value = dateutil.parser.parse(field_value)
elif issubclass(field_type, RemoteResource):
field_value = field_type().hydrate(field_value, fetch_remote=fetch_remote)
elif issubclass(field_type, Sequence) \
and issubclass(field_type.__class__, GenericMeta):
# TODO - serious debt, can't otherwise figure out the type of a typing.Sequence
sequence_class_string = str(field_type)
match = re.search(r'\[(.*)\]', sequence_class_string)
module_parts = match.group(1).split('.')
if len(module_parts) == 1: # referring to self using string type hack
class_name_match = re.search(r'\(\'(.*)\'\)', module_parts[0])
class_name = class_name_match.group(1)
module_name = inflection.underscore(class_name)
else:
module_name = module_parts[2]
class_name = module_parts[3]
mod = getattr(resources, module_name)
cls = getattr(mod, class_name)
field_value = [cls().hydrate(item, fetch_remote=fetch_remote) \
for item in field_value]
hydrate_args[field_name] = field_value
hydrated = self._replace(**hydrate_args)
hydrated.set_hydrated()
return hydrated
def fetch(self):
api_json = api.get(self.resource_url)
obj = self.parse_api_response(api_json)
return self.hydrate(obj)
def destroy(self):
api.delete(self.resource_url)
def to_json(self, obj=None):
if obj is None:
obj = self
def inner_json(inner_obj):
if isinstance(inner_obj, list):
return [self.to_json(o) for o in inner_obj]
if isinstance(inner_obj, RemoteResource):
return self.to_json(inner_obj)
return inner_obj
if hasattr(obj, 'collection_type') and obj.collection_type is not None:
# little hack for implicit remote resource collection
return [inner_json(value) for value in obj.items()]
return {key: inner_json(value) for key, value in obj._asdict().items()}
def save_to_file(self, local_filename):
def serialize(obj):
if isinstance(obj, (datetime, date)):
return obj.strftime(DATETIME_FORMAT)
raise TypeError('Cannot serialize %s' % type(obj))
with open(os.path.expanduser(local_filename), 'w') as file:
json.dump(self.to_json(), file, default=serialize)
def load_from_file(self, local_filename):
with open(os.path.expanduser(local_filename)) as file:
obj = json.load(file)
return self.hydrate(obj)
def parse_api_response(self, api_json):
return api_json['item']
@property
def resource_params(self):
return {}
@property
def is_hydrated(self):
return self._is_hydrated
@property
def resource_path(self):
pass
@property
def resource_url(self):
return api.BASE_URL_PATH + self.resource_path
| 2.328125
| 2
|
farms2face/payments/apps.py
|
dev1farms2face/f2f
| 0
|
12781580
|
<filename>farms2face/payments/apps.py
from __future__ import unicode_literals
from django.apps import AppConfig
class PaymentsConfig(AppConfig):
name = 'payments'
def ready(self):
import payments.signals # noqa
| 1.273438
| 1
|
evaluation_data_purify.py
|
gucheol/deep-text-recognition-benchmark
| 0
|
12781581
|
import os
import glob
import shutil
from PIL import Image
# for file_path in glob.glob('./data/evaluation/card_evaluation/*.png'):
# file_name = os.path.basename(file_path)
# if file_name.find(',') >= 0 or file_name.find('@') >=0 or file_name.find('*') > 0 or file_name.find(':') > 0 \
# or file_name.find('r') > 0 or file_name.find('성별') > 0 or file_name.find('KOR') > 0 or file_name.find('~') > 0:
# # shutil.move(file_path, os.path.join('result', 'no_way_out', file_name))
# # print(file_path, os.path.join('result', 'no_way_out', file_name))
# continue
# shutil.copy(file_path, os.path.join('./data/evaluation/valid_card_data', file_name))
src = './data/evaluation/valid_card_data/'
# target = './data/evaluation/deleted/'
# for file_path in glob.glob('./result/tagging_error/*.png'):
# file_name = os.path.basename(file_path)
# src_file_path = os.path.join(src, file_name)
# print(src_file_path, os.path.join(target, file_name))
# shutil.move(src_file_path, os.path.join(target, file_name))
for file_path in glob.glob(src + '*.png'):
base_name = os.path.basename(file_path)
if file_path.find('_(F-4)_') > 0:
target_file_path = file_path.replace('_(F-4)_', '_재외동포(F-4)_')
shutil.move(file_path, target_file_path)
# print(file_path, target_file_path)
# continue
# if base_name.find('e') > 0 :
# print(file_path)
| 2.703125
| 3
|
clean_aws/compare_format.py
|
SamuelDiai/Dash-Website
| 0
|
12781582
|
import time
import pandas as pd
from dash_website.utils.aws_loader import load_excel, load_parquet, load_feather
if __name__ == "__main__":
time_excel = 0
for idx_load_excel in range(10):
start_excel = time.time()
load_excel("xwas/univariate_results/linear_correlations.xlsx")
time_excel += time.time() - start_excel
print("Load excel", time_excel)
time_parquet = 0
for idx_load_excel in range(10):
start_parquet = time.time()
load_parquet("xwas/univariate_results/linear_correlations.parquet")
time_parquet += time.time() - start_parquet
print("Load parquet", time_parquet)
time_feather = 0
for idx_load_feather in range(10):
start_feather = time.time()
corr = load_feather("xwas/univariate_results/linear_correlations.feather").set_index("index")
corr.columns = pd.MultiIndex.from_tuples(
list(map(eval, corr.columns.tolist())), names=["dimension", "category", "variable"]
)
time_feather += time.time() - start_feather
print("Load feather", time_feather)
| 2.46875
| 2
|
back/to_do/models.py
|
valuko/ChiefOnboarding
| 0
|
12781583
|
from django.contrib.auth import get_user_model
from django.contrib.postgres.fields import JSONField
from django.db import models
from organization.models import BaseTemplate
from misc.models import Content
class ToDo(BaseTemplate):
content = models.ManyToManyField(Content)
due_on_day = models.IntegerField(default=0)
form = JSONField(models.CharField(max_length=100000, default='[]'))
# Chat bot specific actions
send_back = models.BooleanField(default=False)
channel = models.CharField(max_length=10000, null=True, blank=True)
def get_slack_form(self):
slack_form_items = []
for i in self.form:
options = []
if i['type'] == 'select':
for j in i['options']:
options.append({
"text": {
"type": "plain_text",
"text": j['name'],
"emoji": True,
# "action_id": j['id']
},
"value": j['name']
})
slack_form_items.append({
"type": "input",
"block_id": i['id'],
"element": {
"type": "static_select",
"placeholder": {
"type": "plain_text",
"text": "Select an item",
"emoji": True
},
"options": options,
"action_id": i['id']
},
"label": {
"type": "plain_text",
"text": i['text'],
"emoji": True
}
})
if i['type'] == 'input':
slack_form_items.append({
"type": "input",
"block_id": i['id'],
"element": {
"type": "plain_text_input",
"action_id": i['id']
},
"label": {
"type": "plain_text",
"text": i['text'],
"emoji": True
}
})
if i['type'] == 'text':
slack_form_items.append({
"type": "input",
"block_id": i['id'],
"element": {
"type": "plain_text_input",
"multiline": True,
"action_id": i['id']
},
"label": {
"type": "plain_text",
"text": i['text'],
"emoji": True
}
})
return slack_form_items
def valid_for_slack(self):
valid = True
for i in self.form:
if i['type'] == 'check' or i['type'] == 'upload':
valid = False
break
return valid
| 2.03125
| 2
|
WebSpider/proxy.py
|
bianQ/similarweb
| 0
|
12781584
|
<gh_stars>0
"""
Author : Alan
Date : 2021/7/24 9:39
Email : <EMAIL>
"""
import requests
from retry import retry
from settings import GET_IP_URL, WHITE_URL, USER, PWD
def get_my_ip():
try:
# resp = requests.get('http://myip.top', timeout=2)
resp = requests.get('http://ipinfo.io', timeout=2)
return resp.json().get('ip')
except Exception as e:
print(e)
def Merge(dict1, dict2):
res = {**dict1, **dict2}
return res
class Proxy:
def __init__(self):
self.sessoin = requests.session()
self.headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 10_3_3 like Mac OS X) AppleWebKit/603.3.8 (KHTML, like Gecko) Mobile/14G60 MicroMessenger/6.5.19 NetType/4G Language/zh_TW",
}
self.mainUrl = 'https://api.myip.la/en?json'
# if not WHITE_URL or not GET_IP_URL:
# raise ValueError("缺少代理配置")
# # 设置白名单
# my_ip = get_my_ip()
# if my_ip:
# self.set_white(my_ip)
@retry(exceptions=Exception, delay=1)
def get_proxy(self) -> dict:
ip = {}
# data = self.sessoin.get(GET_IP_URL).json()
# return data['data'][0]
entry = 'http://{}:{}@proxy.ipidea.io:2334'.format(USER, PWD)
proxy = {
'http': entry,
'https': entry,
}
try:
res = requests.get(self.mainUrl, headers=self.headers, proxies=proxy, timeout=10)
print(res.status_code, res.text)
ipstr = str(res.json()["ip"])
except Exception as e:
ipstr = ''
print("访问失败", e)
pass
if ipstr:
ip['ip'] = ipstr
# print(type(ip), ip)
port = {'port': '2334'}
# print(type(port), port)
data = Merge(ip, port)
# data = dict([ip.items()] + [port.items()])
else:
data = {}
return data
def set_white(self, ip):
url = WHITE_URL + ip
res = self.sessoin.get(url)
print(res.text)
proxy_server = Proxy()
| 2.65625
| 3
|
countries/auth.py
|
sinaesmaili216/blog
| 0
|
12781585
|
import functools
from flask import Blueprint
from flask import flash
from flask import g
from flask import redirect
from flask import render_template
from flask import request
from flask import session
from flask import url_for
from werkzeug.security import check_password_hash
from werkzeug.security import generate_password_hash
from countries.db import get_db
bp = Blueprint("auth", __name__, url_prefix="/auth")
def login_required(view):
"""View decorator that redirects anonymous users to the login page."""
@functools.wraps(view)
def wrapped_view(**kwargs):
if g.user is None:
return redirect(url_for("auth.login"))
return view(**kwargs)
return wrapped_view
#@bp.before_app_request
@bp.route("/hello")
def hello():
return "hello"
| 2.328125
| 2
|
daily_coding_problem/934.py
|
mhetrerajat/ds-challenge
| 0
|
12781586
|
def naive(string: str) -> str:
map = {}
for char in string:
count = map.get(char, 0)
if count >= 1:
return char
else:
map[char] = 1
return None
def get_recurring_char(string: str) -> str:
"""assuming string only contains lowercase letters"""
checker = 0
for char in string:
bin_idx = ord(char) - ord("a")
# will return a non-zero binary number
# if faces the duplicate character
if (checker & (1 << bin_idx)) > 0:
return char
checker = checker | (1 << bin_idx)
return None
if __name__ == "__main__":
testcases = [("acbbac", "b"), ("abcdef", None)]
for (testcase, expected) in testcases:
assert get_recurring_char(testcase) == expected
| 3.703125
| 4
|
tests/format-keys/examples/test.py
|
looking-for-a-job/format-keys.py
| 0
|
12781587
|
<filename>tests/format-keys/examples/test.py
#!/usr/bin/env python
import format_keys
string = "https://api.travis-ci.org/{fullname}.svg?branch={branch}"
keys = format_keys.keys(string)
print(keys)
| 1.828125
| 2
|
flyingcracker/weather/urls.py
|
grahamu/flyingcracker
| 0
|
12781588
|
from django.urls import re_path
from . import views
app_name = 'weather'
urlpatterns = [
re_path(r'^$', views.weather, name='root'),
re_path(r'^current/$', views.current, name='current'),
re_path(r'^unitchange/$', views.unit_change, name='unit-change'),
re_path(r'^generate/$', views.generate, name='generate'),
re_path(r'^delete/$', views.delete, name='delete'),
re_path(r'^data/$', views.output_data, name='data'),
re_path(r'^chart/$', views.chart, name='chart'),
]
| 1.804688
| 2
|
cmus_scrobbler/cli.py
|
JosephVSN/cmus-scrobbler
| 0
|
12781589
|
#!/usr/bin/python
"""Console script for cmus_scrobbler."""
import cmus_scrobbler
import argparse
import sys
def main():
"""Console script for cmus_scrobbler."""
parser = argparse.ArgumentParser()
parser.add_argument('status', nargs='*')
parser.add_argument('-c', '--config', nargs=2, help="Called with the API KEY and API SECRET KEY as arguments, updates their values in the config.")
args = parser.parse_args()
return cmus_scrobbler.main(args)
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| 2.484375
| 2
|
salmon/commands.py
|
elwoodpd/smoked-salmon
| 42
|
12781590
|
<reponame>elwoodpd/smoked-salmon
import asyncio
import importlib
import os
import html
from urllib import parse
import click
import pyperclip
import salmon.checks
import salmon.converter
import salmon.database
import salmon.play
import salmon.search
import salmon.sources
import salmon.tagger
import salmon.uploader
import salmon.web # noqa F401
from salmon import config
from salmon.common import commandgroup
from salmon.common import compress as recompress
from salmon.common import str_to_int_if_int
from salmon.tagger.audio_info import gather_audio_info
from salmon.tagger.combine import combine_metadatas
from salmon.tagger.metadata import clean_metadata, remove_various_artists
from salmon.tagger.retagger import create_artist_str
from salmon.tagger.sources import run_metadata
from salmon.uploader.spectrals import (
check_spectrals,
handle_spectrals_upload_and_deletion,
post_upload_spectral_check,
)
from salmon.uploader.upload import generate_source_links
for name in os.listdir(
os.path.join(os.path.dirname(os.path.dirname(__file__)), "plugins")
):
if not name.startswith(".") and not name.startswith("_"):
if name.endswith(".py"):
name = name[:-3]
try:
importlib.import_module(f"plugins.{name}")
except ImportError as e:
click.secho(
f"The plugin {name} could not be imported.", fg="red", bold=True
)
raise e
loop = asyncio.get_event_loop()
@commandgroup.command()
@click.argument(
"path", type=click.Path(exists=True, file_okay=False, resolve_path=True), nargs=1
)
@click.option("--no-delete-specs", "-nd", is_flag=True)
@click.option("--format-output", "-f", is_flag=True)
def specs(path, no_delete_specs, format_output):
"""Generate and open spectrals for a folder"""
audio_info = gather_audio_info(path)
_, sids = check_spectrals(path, audio_info, check_lma=False)
spath = os.path.join(path, "Spectrals")
spectral_urls = handle_spectrals_upload_and_deletion(
spath, sids, delete_spectrals=not no_delete_specs
)
filenames = list(audio_info.keys())
if spectral_urls:
output = []
for spec_id, urls in spectral_urls.items():
if format_output:
output.append(
f'[hide={filenames[spec_id]}][img={"][img=".join(urls)}][/hide]'
)
else:
output.append(f'{filenames[spec_id]}: {" ".join(urls)}')
output = "\n".join(output)
click.secho(output)
if config.COPY_UPLOADED_URL_TO_CLIPBOARD:
pyperclip.copy(output)
if no_delete_specs:
click.secho(f'Spectrals saved to {os.path.join(path, "Spectrals")}', fg="green")
@commandgroup.command()
@click.argument("urls", type=click.STRING, nargs=-1)
def descgen(urls):
"""Generate a description from metadata sources"""
if not urls:
return click.secho("You must specify at least one URL", fg="red")
tasks = [run_metadata(url, return_source_name=True) for url in urls]
metadatas = loop.run_until_complete(asyncio.gather(*tasks))
metadata = clean_metadata(combine_metadatas(*((s, m) for m, s in metadatas)))
remove_various_artists(metadata["tracks"])
description = "[b][size=4]Tracklist[/b]\n\n"
multi_disc = len(metadata["tracks"]) > 1
for dnum, disc in metadata["tracks"].items():
for tnum, track in disc.items():
if multi_disc:
description += (
f"[b]{str_to_int_if_int(str(dnum), zpad=True)}-"
f"{str_to_int_if_int(str(tnum), zpad=True)}.[/b] "
)
else:
description += f"[b]{str_to_int_if_int(str(tnum), zpad=True)}.[/b] "
description += f'{create_artist_str(track["artists"])} - {track["title"]}\n'
if metadata["comment"]:
description += f"\n{metadata['comment']}\n"
if metadata["urls"]:
description += "\n[b]More info:[/b] " + generate_source_links(metadata["urls"])
click.secho("\nDescription:\n", fg="yellow", bold=True)
click.echo(description)
if config.COPY_UPLOADED_URL_TO_CLIPBOARD:
pyperclip.copy(description)
@commandgroup.command()
@click.argument(
"path", type=click.Path(exists=True, file_okay=False, resolve_path=True)
)
def compress(path):
"""Recompress a directory of FLACs to level 8"""
for root, _, figles in os.walk(path):
for f in sorted(figles):
if os.path.splitext(f)[1].lower() == ".flac":
filepath = os.path.join(root, f)
click.secho(f"Recompressing {filepath[len(path) + 1:]}...")
recompress(filepath)
@commandgroup.command()
@click.option(
"--torrent-id",
"-i",
default=None,
help="Torrent id or URL, tracker from URL will overule -t flag.",
)
@click.option(
"--tracker",
"-t",
help=f'Tracker choices: ({"/".join(salmon.trackers.tracker_list)})',
)
@click.argument(
"path",
type=click.Path(exists=True, file_okay=False, resolve_path=True),
nargs=1,
default=".",
)
def checkspecs(tracker, torrent_id, path):
"""Will check the spectrals of a given torrent based on local files.\n
By default checks the folder the script is run from.
Can add spectrals to a torrent description and report a torrent as lossy web.
"""
if not torrent_id:
click.secho("No torrent id provided.", fg="red")
torrent_id = click.prompt(
click.style(
"""Input a torrent id or a URL containing one.
Tracker in a URL will override -t flag.""",
fg="magenta",
bold=True,
),
)
if "/torrents.php" in torrent_id:
base_url = parse.urlparse(torrent_id).netloc
if base_url in salmon.trackers.tracker_url_code_map.keys():
# this will overide -t tracker
tracker = salmon.trackers.tracker_url_code_map[base_url]
else:
click.echo('Unrecognised tracker!')
raise click.Abort
torrent_id = int(
parse.parse_qs(parse.urlparse(torrent_id).query)['torrentid'][0]
)
elif torrent_id.strip().isdigit():
torrent_id = int(torrent_id)
else:
click.echo('Not a valid torrent!')
raise click.Abort
tracker = salmon.trackers.validate_tracker(None, 'tracker', tracker)
gazelle_site = salmon.trackers.get_class(tracker)()
req = loop.run_until_complete(gazelle_site.request("torrent", id=torrent_id))
path = os.path.join(path, html.unescape(req['torrent']['filePath']))
source_url = None
source = req['torrent']['media']
click.echo(f"Generating spectrals for {source} sourced: {path}")
track_data = gather_audio_info(path)
post_upload_spectral_check(
gazelle_site, path, torrent_id, None, track_data, source, source_url
)
| 2.015625
| 2
|
19-CS-Data-Structures/01_queue/queue.py
|
shalevy1/data-science-journal
| 71
|
12781591
|
<gh_stars>10-100
class Queue:
def __init__(self):
self.size = 0
# what data structure should we
# use to store queue elements?
self.storage = LinkedList()
def enqueue(self, item):
self.storage.add_to_tail(item)
self.size += 1
def dequeue(self):
item = self.storage.pop_head() # popping from head is easier than tail
if item:
self.size -= 1
return item
def len(self):
return self.size
class Node:
def __init__(self, value=None, next_node=None):
self.value = value
self.next_node = next_node
# the above is all you need, but here are
# some nice accessors:
def get_value(self):
return self.value
def get_next(self):
return self.next_node
# a nice mutator:
def set_next(self, new_next):
self.next_node = new_next
def __repr__(self):
return f"{self.value}"
class LinkedList:
def __init__(self):
self.head = None
self.tail = None
# methods to add to list
def add_to_tail(self, value):
new_node = Node(value)
if not self.head: # so we can first add to head/tail
self.head = new_node
self.tail = new_node
else:
self.tail.set_next(new_node)
self.tail = new_node
def add_to_head(self, value):
new_node = Node(value)
if not self.head: # so we can first add to head/tail
self.head = new_node
self.tail = new_node
else:
temp_node = self.head
self.head = new_node
self.head.set_next(temp_node)
# method to find in list
def contains(self, value):
if not self.head: # if no head, list is empty, cannot contain our v
return False
current = self.head
while current:
if current.get_value() == value:
return True
current = current.get_next()
return False
# method to remove from list
def pop_head(self):
if not self.head:
return None
else:
temp_node = self.head
self.head = self.head.get_next()
return temp_node.value
| 4.0625
| 4
|
tests/qos/conftest.py
|
dmytroxshevchuk/sonic-mgmt
| 132
|
12781592
|
<gh_stars>100-1000
from .args.qos_sai_args import add_qos_sai_args
from .args.buffer_args import add_dynamic_buffer_calculation_args
# QoS pytest arguments
def pytest_addoption(parser):
'''
Adds option to QoS pytest
Args:
parser: pytest parser object
Returns:
None
'''
add_qos_sai_args(parser)
add_dynamic_buffer_calculation_args(parser)
| 1.757813
| 2
|
tool_read_file_to_db/read_file_to_db.py
|
xuerenlv/PaperWork
| 1
|
12781593
|
<reponame>xuerenlv/PaperWork<filename>tool_read_file_to_db/read_file_to_db.py
# -*- coding: utf-8 -*-
'''
Created on Oct 16, 2015
@author: nlp
'''
import os
from store_model import Single_weibo_store
import sys
import traceback
reload(sys)
sys.setdefaultencoding('utf8')
if __name__ == '__main__':
filelist = os.listdir('./china_hongkong_conflict_files')
count = 0
for file_name in filelist:
for line in open('./china_hongkong_conflict_files/' + file_name, 'r').readlines():
count+=1
line_change = line.split(']')
uid = ''
nickname = ''
is_auth = ''
weibo_url = ''
content = ''
praise_num = ''
retweet_num = ''
comment_num = ''
creat_time = ''
for one in line_change:
if 'uid:' in one:
uid = one[one.find(':') + 1:]
if 'nickname:' in one:
nickname = one[one.find(':') + 1:]
if 'is_auth:' in one:
is_auth = one[one.find(':') + 1:]
if 'weibo_url:' in one:
weibo_url = one[one.find(':') + 1:]
if 'content:' in one:
content = one[one.find(':') + 1:]
if 'praise_num:' in one:
praise_num = one[one.find(':') + 1:]
if 'retweet_num:' in one:
retweet_num = one[one.find(':') + 1:]
if 'comment_num:' in one:
comment_num = one[one.find(':') + 1:]
if 'creat_time:' in one:
creat_time = one[one.find(':') + 1:]
print uid,nickname,is_auth
print weibo_url
print content
print praise_num,retweet_num,comment_num
print creat_time
try:
single_weibo = Single_weibo_store(uid, nickname, is_auth , weibo_url , content, praise_num , retweet_num, comment_num , creat_time)
single_weibo.save()
except:
print traceback.format_exc()
print count
pass
| 2.234375
| 2
|
models/toy_models.py
|
MichaelArbel/GeneralizedEBM
| 40
|
12781594
|
<filename>models/toy_models.py
from torch import nn
import numpy as np
import torch.nn.functional as F
import torch
from torch.nn.utils import spectral_norm as sn_official
spectral_norm = sn_official
import samplers
import os
class Generator(nn.Module):
def __init__(self, dim=3, device ='cuda'):
super(Generator, self).__init__()
W = torch.eye(dim).float().to(device)
U = torch.zeros([dim,dim]).float().to(device)
U = torch.from_numpy( np.array([[1.,0,0],[0,1.,0],[0,0,1.]]) ).float().to(device)
self.W = nn.Linear(dim,dim, bias=False)
self.U = nn.Linear(dim,dim, bias=False)
self.W.weight.data = W
self.U.weight.data = U
self.W.weight.requires_grad = False
#self.U = U
def forward(self, latent):
# latent is a gaussian in 3D
# normalize to get a uniform on the sphere
Z = latent/torch.norm(latent, dim=1).unsqueeze(-1)
U = self.W(Z) +self.U(Z**4)
R = torch.sum(U**2, dim=1).unsqueeze(-1)
return R*Z
class Discriminator(nn.Module):
def __init__(self,dim, device='cuda', sn = True):
super(Discriminator, self).__init__()
W_1 =torch.randn([dim, dim]).float().to(device)
W_2 =torch.randn([dim, dim]).float().to(device)
W_3 =torch.randn([dim, dim]).float().to(device)
W_4 =torch.randn([dim, dim]).float().to(device)
U = torch.ones([dim]).float().to(device)/np.sqrt(dim)
self.W_1 = nn.Linear(dim,dim, bias=False)
#self.W_2 = nn.Linear(dim,dim, bias=False)
#self.W_3 = nn.Linear(dim,dim, bias=False)
#self.W_4 = nn.Linear(dim,dim, bias=False)
self.U = nn.Linear(dim,1, bias=False)
self.W_1.weight.data = W_1
#self.W_2.weight.data = W_2
#self.W_3.weight.data = W_3
#self.W_4.weight.data = W_4
self.U.weight.data = U
self.leak = 0.1
self.sn = sn
if self.sn:
self.main = nn.Sequential( spectral_norm(self.W_1),
nn.LeakyReLU(self.leak),
#spectral_norm(self.W_2),
#nn.LeakyReLU(self.leak),
#spectral_norm(self.W_3),
#nn.LeakyReLU(self.leak),
#spectral_norm(self.W_4),
#nn.LeakyReLU(self.leak),
spectral_norm(self.U)
)
else:
self.main = nn.Sequential( self.W_1,
nn.LeakyReLU(self.leak),
#self.W_2,
#nn.LeakyReLU(self.leak),
#self.W_3,
#nn.LeakyReLU(self.leak),
#self.W_4,
#nn.LeakyReLU(self.leak),
self.U
)
def forward(self, data):
return self.main(data)
class BaseDataset(torch.utils.data.Dataset):
def __init__(self, N_samples, dtype, device, b_size, root):
self.total_size = N_samples
self.cur_index = b_size
self.b_size = b_size
self.device = device
self.N_samples = N_samples
self.dtype=dtype
D=3
self.base = Generator(dim=D).to(self.device)
self.base.W.weight.data = torch.eye(3).float().to(self.device)
self.base.U.weight.data = torch.from_numpy( np.array([[1.,0,0],[0,1.,0],[0,0,1.]]) ).float().to(self.device)
self.energy = Discriminator(dim=D, sn=False).to(device)
W_1 = np.array([[0.1,-1.,-1.],[-1.,0.1,-1.],[-1.,-1.,0.1]])
W_2 = np.array([[-0.1,-1.,1.],[-1.,-0.1,1.],[1.,-1.,-0.1]])
W_3 = np.array([[-0.1,-1.,1.],[-1.,-0.1,1.],[1.,-1.,-0.1]])
W_4 = np.array([[0.1,-1.,-1.],[-1.,0.1,-1.],[-1.,-1.,0.1]])
U = np.array([1.,1.,1.])/np.sqrt(3)
_,S_1,_ = np.linalg.svd(W_1)
_,S_2,_ = np.linalg.svd(W_2)
_,S_3,_ = np.linalg.svd(W_3)
_,S_4,_ = np.linalg.svd(W_4)
W_1 = W_1/S_1[0]
W_2 = W_2/S_2[0]
W_3 = W_3/S_3[0]
W_4 = W_4/S_4[0]
self.energy.W_1.weight.data = torch.from_numpy(W_1).float().to(self.device)
#self.energy.W_2.weight.data = torch.from_numpy(W_2).float().to(self.device)
#self.energy.W_3.weight.data = torch.from_numpy(W_3).float().to(self.device)
#self.energy.W_4.weight.data = torch.from_numpy(W_4).float().to(self.device)
self.energy.U.weight.data = torch.from_numpy( U ).float().to(self.device)
self.source = torch.distributions.multivariate_normal.MultivariateNormal(torch.zeros([D] ).to(self.device), torch.eye(D).to(self.device))
self.latent_potential = samplers.Latent_potential(self.base,self.energy, self.source, 1.)
self.latent_sampler = samplers.MALA(self.latent_potential, T=1000, gamma=1e-3)
file_name = 'toy_data'
data_path = os.path.join(root, file_name)
if not os.path.exists(data_path+'.npz'):
data, latents, noise = self.make_data()
np.savez(data_path, data=data.cpu().numpy(),
latents=latents.cpu().numpy(), noise=noise.cpu().numpy())
else:
dataset = np.load(data_path+'.npz')
self.data = torch.from_numpy(dataset['data'])
self.latents = torch.from_numpy(dataset['latents'])
self.noise = torch.from_numpy(dataset['noise'])
self.counter = 0
def make_data(self):
self.noise =torch.cat([ self.source.sample([self.b_size]).cpu() for b in range(int( self.N_samples/self.b_size)+1)], dim=0)
self.latents = self.sample_latents(self.noise , T = 100)
self.data = self.sample_data(self.latents)
return self.data, self.latents, self.noise
def __len__(self):
return self.total_size
def sample_latents(self,priors, T, with_acceptance = False):
#return priors
posteriors = []
avg_acceptences = []
for b, prior in enumerate(priors.split(self.b_size, dim=0)):
prior = prior.clone().to(self.device)
posterior,avg_acceptence = self.latent_sampler.sample(prior,sample_chain=False,T=T)
posteriors.append(posterior)
avg_acceptences.append(avg_acceptence)
posteriors = torch.cat(posteriors, axis=0)
avg_acceptences = np.mean(np.array(avg_acceptences), axis=0)
if with_acceptance:
return posteriors, avg_acceptences
else:
return posteriors
def sample_data(self, latents, to_cpu = True, as_list=False):
self.base.eval()
self.energy.eval()
images = []
for latent in latents.split(self.b_size, dim=0):
with torch.no_grad():
img = self.base(latent.to(self.device))
if to_cpu:
img = img.cpu()
images.append(img)
if as_list:
return images
else:
return torch.cat(images, dim=0)
def __getitem__(self,index):
self.counter +=1
if np.mod(self.counter, 100.*self.N_samples ) ==0:
print('sampling data')
self.latents = self.sample_latents(self.latents , T = 10)
self.data = self.sample_data(self.latents)
return self.data[index],self.data[index]
| 2.203125
| 2
|
direct_posterior_comparison.py
|
cunningham-lab/cyclic-gps
| 0
|
12781595
|
<reponame>cunningham-lab/cyclic-gps
import numpy as np
import torch
from torch.nn import Parameter
from cyclic_gps.models import LEGFamily
import matplotlib.pyplot as plt
DTYPE = torch.double
RANK = 5
PATH_TO_NPY = "../numpy_arrays/"
with open(PATH_TO_NPY + "sample3_ts.npy", "rb") as f:
sample3_ts = np.load(f)
with open(PATH_TO_NPY + "sample3_vals.npy", "rb") as f:
sample3_vals = np.load(f)
with open(PATH_TO_NPY + "N.npy", "rb") as f:
N = np.load(f)
with open(PATH_TO_NPY + "R.npy", "rb") as f:
R = np.load(f)
with open(PATH_TO_NPY + "B.npy", "rb") as f:
B = np.load(f)
with open(PATH_TO_NPY + "Lambda.npy", "rb") as f:
L = np.load(f)
sample3_ts = torch.from_numpy(sample3_ts)
sample3_vals = torch.from_numpy(sample3_vals)
N = torch.from_numpy(N)
R = torch.from_numpy(R)
B = torch.from_numpy(B)
L = torch.from_numpy(L)
print(sample3_ts.shape)
print(sample3_vals.shape)
print(N.shape)
leg_model = LEGFamily(rank=RANK, obs_dim=sample3_vals.shape[1], data_type=DTYPE)
leg_model.N = Parameter(N)
leg_model.R = Parameter(R)
leg_model.B = Parameter(B)
leg_model.Lambda = Parameter(L)
leg_model.calc_G()
# print(leg_model.N)
# print(leg_model.R)
# print(leg_model.B)
# print(leg_model.Lambda)
sample3_ts_chopped = sample3_ts[:200]
sample3_vals_chopped = sample3_vals[:200]
test_ll = leg_model.log_likelihood(sample3_ts, sample3_vals)
print("test log_likelihood with jackson's params: {}".format(test_ll))
forecast_times = sample3_ts[200:300]
# plt.scatter(sample3_ts,sample3_vals[:,0],color='C1',alpha=.2)
# plt.scatter(sample3_ts_chopped,sample3_vals_chopped[:,0],color='C0')
# plt.show()
#print("N check before preds: {}".format(leg_model.N))
pred_means, pred_variances = leg_model.make_predictions(sample3_ts_chopped, sample3_vals_chopped, forecast_times)
#print("N check after preds: {}".format(leg_model.N))
pred_means = pred_means.detach().numpy()
pred_variances = pred_variances.detach().numpy()
plt.scatter(sample3_ts_chopped, sample3_vals_chopped[:, 0], label='observed data')
plt.scatter(sample3_ts[200:300], sample3_vals[200:300][:, 0],label='censored data')
plt.plot(forecast_times, pred_means[:,0], 'C1', label='forecasting')
plt.fill_between(forecast_times,
pred_means[:,0]+2*np.sqrt(pred_variances[:,0,0]),
pred_means[:,0]-2*np.sqrt(pred_variances[:,0,0]),
color='black',alpha=.5,label='Uncertainty')
plt.legend() #bbox_to_anchor=[1,1],fontsize=20
plt.show()
| 2.40625
| 2
|
0350_Intersection_of_Two_Arrays_II.py
|
21PIRLO21/LeetCode2020
| 0
|
12781596
|
from collections import Counter
# sort
class Solution_1:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
nums1.sort()
nums2.sort()
res = []
len1 = len(nums1)
len2 = len(nums2)
idx1 = 0
idx2 = 0
while idx1 < len1 and idx2 < len2:
if nums1[idx1] < nums2[idx2]:
idx1 += 1
elif nums1[idx1] > nums2[idx2]:
idx2 += 1
else:
res.append(nums1[idx1])
idx1 += 1
idx2 += 1
return res
# hash
class Solution_2:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
Hash = {}
for i in range(len(nums1)):
if nums1[i] not in Hash:
# Hash 的可以是一个 list object
Hash[nums1[i]] = [i]
else:
Hash[nums1[i]].append(i)
res = []
for j in range(len(nums2)):
if nums2[j] in Hash:
res.append(nums2[j])
Hash[nums2[j]].pop()
if Hash[nums2[j]] == []:
del Hash[nums2[j]]
return res
# the fastest
class Solution_3:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
from collections import Counter
res = []
for k, v in (Counter(nums1) & Counter(nums2)).items():
for _ in range(v):
res.append(k)
return res
| 3.359375
| 3
|
tests/test_validate_quantile_csv_file.py
|
Serena-Wang/zoltpy
| 0
|
12781597
|
import json
from unittest import TestCase
from unittest.mock import patch
from zoltpy.covid19 import validate_config_dict, validate_quantile_csv_file
class CdcIOTestCase(TestCase):
"""
"""
def test_validate_quantile_csv_file_calls_validate_config_dict(self):
validation_config = {'target_groups':
[{"outcome_variable": "inc flu hosp", "targets": [], "locations": [], "quantiles": []}]}
with patch('zoltpy.covid19.validate_config_dict') as validate_config_mock:
validate_quantile_csv_file('tests/quantile-predictions.csv', validation_config)
validate_config_mock.assert_called_once_with(validation_config)
validation_config = {'target_groups': [
{"outcome_variable": "inc flu hosp", "targets": [], "locations": [], "quantiles": ['not a number']}]}
error_messages = validate_quantile_csv_file('tests/quantile-predictions.csv', validation_config)
self.assertEqual(1, len(error_messages))
self.assertIn("invalid validation_config", error_messages[0])
def test_validate_config_dict(self):
# case: not a dict
with self.assertRaisesRegex(RuntimeError, "validation_config was not a dict"):
validate_config_dict(None)
# case: dict but no 'target_groups' key
with self.assertRaisesRegex(RuntimeError, "validation_config did not contain 'target_groups' key"):
validate_config_dict({})
# case: has 'target_groups', but not a list
with self.assertRaisesRegex(RuntimeError, "'target_groups' was not a list"):
validate_config_dict({'target_groups': None})
# case: dict with one 'target_groups', but not all keys present in it
with self.assertRaisesRegex(RuntimeError, "one or more target group keys was missing"):
validate_config_dict({'target_groups': [{}]})
# case: dict with one 'target_groups' with all keys present, but targets, locations, and quantiles not lists
bad_target_groups = [{"outcome_variable": "inc flu hosp", "targets": 'not a list', "locations": [], "quantiles": []},
{"outcome_variable": "inc flu hosp", "targets": [], "locations": 'not a list', "quantiles": []},
{"outcome_variable": "inc flu hosp", "targets": [], "locations": [], "quantiles": 'not a list'}]
for bad_target_group in bad_target_groups:
with self.assertRaisesRegex(RuntimeError, "one of these fields was not a list"):
validate_config_dict({'target_groups': [bad_target_group]})
# case: dict with one 'target_groups', but its name is not a string
with self.assertRaisesRegex(RuntimeError, "'outcome_variable' field was not a string"):
validate_config_dict({'target_groups': [{"outcome_variable": None, "targets": [], "locations": [], "quantiles": []}]})
# case: dict with one 'target_groups' with all keys present, but targets or locations contain non-strings
bad_target_groups = [{"outcome_variable": "inc flu hosp", "targets": [-1], "locations": [], "quantiles": []},
{"outcome_variable": "inc flu hosp", "targets": [], "locations": [-1], "quantiles": []}]
for bad_target_group in bad_target_groups:
with self.assertRaisesRegex(RuntimeError, "one of these fields contained non-strings"):
validate_config_dict({'target_groups': [bad_target_group]})
# case: dict with one 'target_groups' with all keys present, but quantiles contains non-numbers
with self.assertRaisesRegex(RuntimeError, "'quantiles' field contained non-numbers"):
validate_config_dict({'target_groups': [
{"outcome_variable": "inc flu hosp", "targets": [], "locations": [], "quantiles": ['not a number']}]})
# case: blue sky
try:
validate_config_dict({'target_groups':
[{"outcome_variable": "inc flu hosp", "targets": [], "locations": [], "quantiles": []}]})
except Exception as ex:
self.fail(f"unexpected exception: {ex}")
# case: load from file
with open('tests/covid-validation-config.json', 'r') as fp:
validation_config = json.load(fp)
try:
validate_config_dict(validation_config)
except Exception as ex:
self.fail(f"unexpected exception: {ex}")
| 2.546875
| 3
|
fsm/states/gui.py
|
nczeak/switchgrass_cv_application
| 0
|
12781598
|
<gh_stars>0
class WelcomeState:
"""
The welcome state for the application.
"""
| 1.195313
| 1
|
tests/test_cell.py
|
kafonek/gridthings
| 2
|
12781599
|
import pytest
from gridthings import Cell
# Cells represent individual data points in a grid
# They implement a variety of mathematical dunder methods
# so that they can be compared, sorted, and manipulated
def test_cell_when_equal():
c1 = Cell(y=0, x=0, value="foo")
c2 = Cell(y=0, x=1, value="foo")
# using ==, cell values are equal but actual
# cell objects are not considered equal
assert c1.value == c2.value
assert c1 != c2
# When > and < operators are used, it's
# a pure comparison on values
# so c1 == c2 is False, but c1 >= c2 is True
assert c1 >= c2
assert c2 >= c1
assert c1 <= c2
assert c2 <= c1
def test_cell_when_unequal():
c1 = Cell(y=0, x=0, value=1)
c2 = Cell(y=0, x=1, value=2)
assert c1 != c2
assert c1 < c2
assert c1 <= c2
assert c2 > c1
assert c2 >= c1
def test_cell_against_non_cells():
cell = Cell(y=0, x=0, value=2)
# __eq__
assert cell == 2
assert 2 == cell
# __ne__
assert cell != 0
assert 0 != cell
# __gte__ / __lte__
assert 3 >= cell
assert cell <= 3
assert 1 <= cell
assert cell >= 1
# __gt__ / __lt__
assert 3 > cell
assert cell < 3
assert 1 < cell
assert cell > 1
# __add__
assert cell + 2 == 4
assert 2 + cell == 4
# __sub__
assert 2 - cell == 0
assert cell - 2 == 0
# __mul__
assert 3 * cell == 6
assert cell * 3 == 6
# __truediv__
assert cell / 2 == 1
# __pow__
assert cell ** 3 == 8
def test_cell_when_mismatched_datatype():
c1 = Cell(y=0, x=0, value="foo")
c2 = Cell(y=0, x=0, value=1)
assert c1 != c2
with pytest.raises(TypeError):
# < not supported between instances of 'str' and 'int'
assert c1 < c2
def test_cell_str_concat():
c1 = Cell(y=0, x=0, value="foo")
c2 = Cell(y=0, x=1, value="bar")
assert c1 + c2 == "foobar"
assert c2 + c1 == "barfoo"
assert c1 + "baz" == "foobaz"
assert "baz" + c2 == "bazbar"
def test_cell_int_math():
c1 = Cell(y=0, x=0, value=2)
c2 = Cell(y=0, x=0, value=4)
c3 = Cell(y=0, x=0, value=6)
assert c1 + c2 == 6
assert c2 + c1 == 6
assert c1 + 2 == 4
assert 2 + c1 == 4
assert c1 + c2 + c3 == 12
assert c2 - c1 == 2
assert 4 - c1 == 2
assert c3 - c2 - c1 == 0
assert c1 * c2 == 8
assert 2 * c2 == 8
assert c1 * c2 * c3 == 48
assert c1 / c2 == 0.5
assert 4 / c1 == 2
assert c1 ** 3 == 8
assert c2 ** c1 == 16
assert 2 ** c1 == 4
def test_subclass_cell():
class MyCell(Cell):
extra_arg: bool = True
cell = MyCell(y=0, x=0, value=1)
assert cell.dict() == {"y": 0, "x": 0, "value": 1, "extra_arg": True}
cell2 = MyCell(y=0, x=0, value=1, extra_arg=False)
assert cell2.dict() == {"y": 0, "x": 0, "value": 1, "extra_arg": False}
| 3.5
| 4
|
conf/twitterbot_config.py
|
xabrickx/chirping-snoos
| 0
|
12781600
|
import os
import logging
#[PATHS]
# Paths will be based on the location of this file which is ./conf by default. Adjust accordingly!
FILEPATH = os.path.abspath(os.path.dirname(__file__))
ENV_PATH = FILEPATH + "/env"
#[LOGGING]
LOG_PATH = FILEPATH + "/../logs/"
LOG_FILE = "twitterbot.log"
LOG_LEVEL = logging.DEBUG
#[PRAW]
USER_AGENT = "" #Your Unique USER AGENT for Reddit
SUBREDDIT = "" # The Subreddit you want to target
REDDIT_NEWPOST_LIMIT = 100 #How many new posts to check
REDDIT_SHORTPATH = "redd.it/" # For creating the shortlink to reddit
#[DB]
TWEETDATA_PATH = FILEPATH + "/../db/"
TWEETDATA_FILENAME = "chirping-snoos.db"
subtweet_kwargs = {"tweetdata_path" : TWEETDATA_PATH, "tweetdb_filename" : TWEETDATA_FILENAME}
#[TWITTER]
TWEET_UPVOTE_THRESHOLD = 10 #Minimum upvotes to be considered for tweeting
TWEET_COMMENT_THRESHOLD = 20 #minimum comments to be considered for tweeting
TWEET_ABSOLUTE_LIMIT = 270 #Max characters for a tweet
TWEET_PREFIX="" #This text will appear before the title from reddit
TWEET_SUFFIX="" #This text will appear after the title and link from reddit
TWEET_PART_SEPARATOR = " " #This is used to separate the prefix, title, link and suffix if desired
INTERTWEET_DELAY_SEC = 0.7 # Delay between tweets. Recommended 0.5 or more to avoid flooding twitter
TWITTER_TIMESTAMP_FORMAT = "%a %b %d %H:%M:%S %z %Y" #Import Twitters timestamnp into arrow
#If the title is too long, it will be shortened to fit.
#Longtitle_Hint is shown at the end of the shortened text to symbolize shortening
TWEET_LONGTITLE_HINT = "..."
| 2.421875
| 2
|
special_cases.py
|
GBTAmmoniaSurvey/LineFitting
| 0
|
12781601
|
#=======================================================================================================================
# an example file on how to build special test/training cubes using nh3_testcube.py
#=======================================================================================================================
import numpy as np
import pyspeckit.spectrum.models.ammonia_constants as nh3con
from pyspeckit.spectrum.units import SpectroscopicAxis as spaxis
from astropy.utils.console import ProgressBar
import sys
import nh3_testcubes as testcubes
def generate_cubes(nCubes=100, nBorder=1, noise_rms=0.1, output_dir='random_cubes', random_seed=None,
linenames=['oneone', 'twotwo'], remove_low_sep=True, noise_class=True):
xarrList = []
lineIDList = []
for linename in linenames:
# generate spectral axis for each ammonia lines
xarr = spaxis((np.linspace(-500, 499, 1000) * 5.72e-6
+ nh3con.freq_dict[linename] / 1e9),
unit='GHz',
refX=nh3con.freq_dict[linename] / 1e9,
velocity_convention='radio', refX_unit='GHz')
xarrList.append(xarr)
# specify the ID fore each line to appear in saved fits files
if linename is 'oneone':
lineIDList.append('11')
elif linename is 'twotwo':
lineIDList.append('22')
else:
# use line names at it is for lines above (3,3)
lineIDList.append(linename)
# generate random parameters for nCubes
nComps, Temp, Width, Voff, logN = testcubes.generate_parameters(nCubes, random_seed)
gradX, gradY = testcubes.generate_gradients(nCubes, random_seed)
if noise_class:
# Creates a balanced training set with 1comp, noise, and 2comp classes
nComps = np.concatenate((np.zeros(nCubes / 3).astype(int),
np.ones(nCubes / 3).astype(int),
np.ones(nCubes / 3 + nCubes%3).astype(int) + 1))
if remove_low_sep:
Voff = remove_low_vsep(Voff, Width)
cubes = []
for xarr, lineID in zip(xarrList, lineIDList):
# generate cubes for each line specified
cubeList = []
print('----------- generating {0} lines ------------'.format(lineID))
for i in ProgressBar(range(nCubes)):
cube_i = testcubes.make_and_write(nCubes, nComps[i], i, nBorder, xarr, Temp[i], Width[i], Voff[i], logN[i], gradX[i], gradY[i]
, noise_rms, lineID, output_dir)
cubeList.append(cube_i)
cubes.append(cubeList)
return cubes
def remove_low_vsep(Voff, Width):
Voff = Voff.swapaxes(0, 1)
Voff1, Voff2 = Voff[0], Voff[1]
Width = Width.swapaxes(0, 1)
Width1, Width2 = Width[0], Width[1]
# Find where centroids are too close
too_close = np.where(np.abs(Voff1 - Voff2) < np.max(np.column_stack((Width1, Width2)), axis=1))
# Move the centroids farther apart by the length of largest line width
min_Voff = np.min(np.column_stack((Voff2[too_close], Voff1[too_close])), axis=1)
max_Voff = np.max(np.column_stack((Voff2[too_close], Voff1[too_close])), axis=1)
Voff1[too_close] = min_Voff - np.max(np.column_stack((Width1[too_close], Width2[too_close])), axis=1) / 2.
Voff2[too_close] = max_Voff + np.max(np.column_stack((Width1[too_close], Width2[too_close])), axis=1) / 2.
Voff = np.array([Voff1, Voff2]).swapaxes(0, 1)
return Voff
if __name__ == '__main__':
print(sys.argv)
if len(sys.argv) > 1:
generate_cubes(nCubes=int(sys.argv[1]))
else:
generate_cubes()
| 2.03125
| 2
|
tests/test_bed_adapter_consumer_kafka_timer.py
|
ba-tno/python-test-bed-adapter
| 1
|
12781602
|
<reponame>ba-tno/python-test-bed-adapter
import unittest
import sys
import threading
import time
import logging
import json
sys.path.append("..")
import datetime
from test_bed_adapter.options.test_bed_options import TestBedOptions
from test_bed_adapter import TestBedAdapter
logging.basicConfig(level=logging.INFO)
class TestConsumerWithAdapter(unittest.TestCase):
def test_consumer_from_adapter(self, **keywords):
self.was_any_message_obtained = False
self.wait_seconds = 5
#If no options are provided we grab them from file
if (not "test_bed_options" in keywords.keys()):
options_file = open("config_files_for_testing/test_bed_options_for_tests_consumer.json", encoding="utf8")
options = json.loads(options_file.read())
options_file.close()
test_bed_options = TestBedOptions(options)
test_bed_options.client_id = test_bed_options.client_id + "---" + str(datetime.datetime.now())
test_bed_adapter = TestBedAdapter(test_bed_options)
else:
test_bed_options = keywords["test_bed_options"]
#We add the message handler
test_bed_adapter.on_message += self.handle_message
e = threading.Event()
t = threading.Thread(target=self.run_consumer_in_thread, args=(e, test_bed_adapter))
t.start()
# wait 30 seconds for the thread to finish its work
t.join(self.wait_seconds)
if t.is_alive():
print
"thread is not done, setting event to kill thread."
e.set()
else:
print
"thread has already finished."
# If we have a parameter with a boolean "expect_messages" we check that we obtained messages according to it.
if ("expect_messages" in keywords.keys()):
self.assertTrue(keywords["expect_messages"] == self.was_any_message_obtained)
else:
self.assertTrue(self.was_any_message_obtained)
test_bed_adapter.stop()
pass
def handle_message(self,message):
logging.info("\n\n-------\n\n")
self.was_any_message_obtained=True
logging.info(message)
def run_consumer_in_thread(self, e, test_bed_adapter):
data = set()
test_bed_adapter.initialize()
test_bed_adapter.consumer_managers["standard_cap"].listen_messages()
# test_bed_adapter.consumers["simulation-entity-item"].listen_messages()
for i in range(self.wait_seconds):
data.add(i)
if not e.isSet():
time.sleep(1)
else:
break
if __name__ == '__main__':
unittest.main()
| 2.375
| 2
|
scripts/automation/trex_control_plane/stf/trex_stf_lib/trex_daemon_server.py
|
timgates42/trex-core
| 956
|
12781603
|
#!/usr/bin/python
import outer_packages
import daemon
from trex_server import do_main_program, trex_parser
import CCustomLogger
import logging
import time
import sys
import os, errno
import grp
import signal
from daemon import runner
from extended_daemon_runner import ExtendedDaemonRunner
import lockfile
import errno
class TRexServerApp(object):
def __init__(self):
TRexServerApp.create_working_dirs()
self.stdin_path = '/dev/null'
self.stdout_path = '/dev/tty' # All standard prints will come up from this source.
self.stderr_path = "/var/log/trex/trex_daemon_server.log" # All log messages will come up from this source
self.pidfile_path = '/var/run/trex/trex_daemon_server.pid'
self.pidfile_timeout = 5 # timeout in seconds
def run(self):
do_main_program()
@staticmethod
def create_working_dirs():
if not os.path.exists('/var/log/trex'):
os.mkdir('/var/log/trex')
if not os.path.exists('/var/run/trex'):
os.mkdir('/var/run/trex')
def main ():
trex_app = TRexServerApp()
# setup the logger
default_log_path = '/var/log/trex/trex_daemon_server.log'
try:
CCustomLogger.setup_daemon_logger('TRexServer', default_log_path)
logger = logging.getLogger('TRexServer')
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(name)-10s %(module)-20s %(levelname)-8s %(message)s")
handler = logging.FileHandler("/var/log/trex/trex_daemon_server.log")
logger.addHandler(handler)
except EnvironmentError, e:
if e.errno == errno.EACCES: # catching permission denied error
print "Launching user must have sudo privileges in order to run TRex daemon.\nTerminating daemon process."
exit(-1)
daemon_runner = ExtendedDaemonRunner(trex_app, trex_parser)
#This ensures that the logger file handle does not get closed during daemonization
daemon_runner.daemon_context.files_preserve=[handler.stream]
try:
if not set(['start', 'stop']).isdisjoint(set(sys.argv)):
print "Logs are saved at: {log_path}".format( log_path = default_log_path )
daemon_runner.do_action()
except lockfile.LockTimeout as inst:
logger.error(inst)
print inst
print """
Please try again once the timeout has been reached.
If this error continues, consider killing the process manually and restart the daemon."""
if __name__ == "__main__":
main()
| 2.03125
| 2
|
arangodb/tests/test_arangodb.py
|
OuesFa/integrations-core
| 0
|
12781604
|
# (C) Datadog, Inc. 2022-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import logging
import os
import mock
import pytest
from requests import HTTPError
from datadog_checks.arangodb import ArangodbCheck
from datadog_checks.dev.http import MockResponse
from datadog_checks.dev.utils import get_metadata_metrics
from .common import METRICS
@pytest.mark.integration
def test_invalid_endpoint(aggregator, instance_invalid_endpoint, dd_run_check):
check = ArangodbCheck('arangodb', {}, [instance_invalid_endpoint])
with pytest.raises(Exception):
dd_run_check(check)
aggregator.assert_service_check('arangodb.openmetrics.health', ArangodbCheck.CRITICAL, count=1)
@pytest.mark.integration
@pytest.mark.parametrize(
'tag_condition, base_tags',
[
pytest.param(
'valid_id_mode',
['endpoint:http://localhost:8529/_admin/metrics/v2', 'server_mode:default', 'server_id:1'],
id="valid id and valid mode",
),
pytest.param(
'invalid_mode_valid_id',
['endpoint:http://localhost:8529/_admin/metrics/v2', 'server_id:1'],
id="invalid mode but valid id",
),
pytest.param(
'valid_mode_invalid_id',
['endpoint:http://localhost:8529/_admin/metrics/v2', 'server_mode:default'],
id="valid mode but invalid id",
),
pytest.param(
'invalid_mode_invalid_id',
['endpoint:http://localhost:8529/_admin/metrics/v2'],
id="invalid mode and invalid id",
),
],
)
def test_check(instance, dd_run_check, aggregator, tag_condition, base_tags):
check = ArangodbCheck('arangodb', {}, [instance])
def mock_requests_get(url, *args, **kwargs):
fixture = url.rsplit('/', 1)[-1]
return MockResponse(file_path=os.path.join(os.path.dirname(__file__), 'fixtures', tag_condition, fixture))
with mock.patch('requests.get', side_effect=mock_requests_get, autospec=True):
dd_run_check(check)
aggregator.assert_service_check(
'arangodb.openmetrics.health',
ArangodbCheck.OK,
count=1,
tags=['endpoint:http://localhost:8529/_admin/metrics/v2'],
)
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
for metric in METRICS:
aggregator.assert_metric(metric)
for tag in base_tags:
aggregator.assert_metric_has_tag(metric, tag)
aggregator.assert_all_metrics_covered()
@pytest.mark.parametrize(
'side_effect, log_message',
[
pytest.param(
HTTPError, "Unable to get server foo, skipping `server_foo` tag.", id="HTTPError getting server tag"
),
pytest.param(
Exception,
"Unable to query `http://localhost:8529/test_endpoint/foo` to collect `server_foo` tag, received error:",
id="Exception getting server tag",
),
],
)
def test_get_server_tag(instance, caplog, side_effect, log_message):
caplog.clear()
check = ArangodbCheck('arangodb', {}, [instance])
with mock.patch("datadog_checks.base.utils.http.RequestsWrapper.get", side_effect=side_effect):
caplog.set_level(logging.DEBUG)
check.get_server_tag('foo', '/test_endpoint/foo')
assert log_message in caplog.text
@pytest.mark.parametrize(
'server_tags, args',
[
pytest.param([None, None], [], id="No server tags returned"),
pytest.param(
['server_mode:foo', 'server_id:bar'], ['server_mode:foo', 'server_id:bar'], id="Server tags returned"
),
],
)
def test_refresh_scrapers(instance, server_tags, args):
check = ArangodbCheck('arangodb', {}, [instance])
with mock.patch("datadog_checks.arangodb.check.ArangodbCheck.get_server_tag") as mock_get_server_tag:
mock_get_server_tag.side_effect = server_tags
check.set_dynamic_tags = mock.MagicMock()
check.refresh_scrapers()
check.set_dynamic_tags.assert_called_once_with(*args)
| 1.96875
| 2
|
test/db_test/operate_remote_dbs/context_prepare.py
|
AnonymousAuthor2013/KnowAlpha
| 2
|
12781605
|
from programmingalpha.DataSet.DBLoader import MongoStackExchange
import programmingalpha
from programmingalpha.Utility.TextPreprocessing import PreprocessPostContent
import json
import logging
import argparse
import tqdm
import multiprocessing
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt = '%m/%d/%Y %H:%M:%S',
level = logging.INFO)
logger = logging.getLogger(__name__)
def init(questionsData_G,answersData_G,indexData_G,copy=True):
global preprocessor
preprocessor=PreprocessPostContent()
global questionsData,answersData,indexData
if copy:
questionsData=questionsData_G.copy()
answersData=answersData_G.copy()
indexData=indexData_G.copy()
else:
questionsData=questionsData_G
answersData=answersData_G
indexData=indexData_G
logger.info("process {} init".format(multiprocessing.current_process()))
def fetchQuestionData(q_ids_set):
questionsData={}
needed_answerIds=set()
query={
"$or":[
{"AcceptedAnswerId":{"$exists":True,"$ne":''},"FavoriteCount":{"$gte":3}},
{"AnswerCount":{"$gte":args.answerNum}},
]
}
for question in tqdm.tqdm(docDB.questions.find(query).batch_size(args.batch_size),desc="loading questions"):
Id=question["Id"]
if Id not in q_ids_set:
continue
del question["_id"]
questionsData[Id]={"Title":question["Title"],"Body":question["Body"],"AcceptedAnswerId":question["AcceptedAnswerId"]}
needed_answerIds.add(question["AcceptedAnswerId"])
logger.info("loaded: questions({})".format(len(questionsData)))
return questionsData, needed_answerIds
def fetchAnswerData(ansIdxGlobal,questionsDataGlobal):
answersData={}
for ans in tqdm.tqdm(docDB.answers.find().batch_size(args.batch_size),desc="loading answers"):
Id=ans["Id"]
if Id not in ansIdxGlobal or ans["ParentId"] not in questionsDataGlobal:
continue
answersData[Id]={"Body":ans["Body"],"Score":ans["Score"]}
logger.info("loaded: answers({})".format(len(answersData)))
return answersData
def fetchIndexData(questionDataGlobal):
indexData={}
for indexer in tqdm.tqdm(docDB.stackdb["QAIndexer"].find().batch_size(args.batch_size),desc="loading indexers"):
Id=indexer["Id"]
if Id not in questionDataGlobal:
continue
del indexer["_id"]
indexData[Id]=indexer
logger.info("loaded: indexer({})".format(len(indexData)))
return indexData
#generate Core
def _getBestAnswers(q_id,K):
answers=[]
if "AcceptedAnswerId" in questionsData[q_id]:
ans_id=questionsData[q_id]["AcceptedAnswerId"]
if ans_id in answersData:
answer=answersData[ans_id]
K-=1
ans_idx=indexData[q_id]["Answers"]
scored=[]
for id in ans_idx:
if id in answersData:
scored.append((id,answersData[id]["Score"]))
if scored:
scored.sort(key=lambda x:x[1],reverse=True)
for i in range(min(K-1,len(scored))):
id=scored[i][0]
answers.append(answersData[id])
if K<args.answerNum:
answers=[answer]+answers
return answers
def _getPreprocess(txt):
txt_processed=preprocessor.getPlainTxt(txt)
if len(" ".join(txt_processed).split())<20:
return None
return txt_processed
def _genCore(distances):
#try:
q_id=distances["id"]
#get question
if q_id not in questionsData:
return None
question=questionsData[q_id]
title=question["Title"]
body=question["Body"]
question =_getPreprocess(body)
if not question:
return None
question=[title]+question
#get answer
answer=_getBestAnswers(q_id, K=args.answerNum)
if not answer:
return None
answer=_getPreprocess(answer[0]["Body"])
if not answer:
return None
#get context
relative_q_ids=[]
dists=distances["distances"]
for id in dists:
if id not in questionsData:
continue
if len(relative_q_ids)>=10:
break
if dists[id]==1:
relative_q_ids.append(id)
elif dists[id]==0:
relative_q_ids.insert(0,id)
else:
pass
if len(relative_q_ids)==0:
return None
context=[]
for q_id in relative_q_ids:
ans=_getBestAnswers(q_id,args.answerNum)
if not ans:
continue
context.extend(ans)
if len(context)==0:
#logger.info("due to none context")
return None
context.sort(key=lambda ans:ans["Score"],reverse=True)
contexts=[]
for txt in context:
txt=_getPreprocess(txt["Body"])
if not txt:
continue
contexts.extend(txt)
if len(contexts)==0:
#logger.info("due to none context")
return None
record={"question":question,"context":contexts,"answer":answer}
return record
#except :
# logger.warning("except triggered for distance data: {}".format(distances))
# return None
def generateContextAnswerCorpusParallel(distanceData,questionsDataGlobal,answersDataGlobal,indexDataGlobal):
cache=[]
batch_size=args.batch_size
batches=[distanceData[i:i+batch_size] for i in range(0,len(distanceData),batch_size)]
workers=multiprocessing.Pool(args.workers,initializer=init,
initargs=(questionsDataGlobal,answersDataGlobal,indexDataGlobal)
)
with open(programmingalpha.DataPath+"Corpus/"+args.db.lower()+"-context.json","w") as f:
for batch_links in tqdm.tqdm(batches,desc="processing documents"):
for record in workers.map(_genCore,batch_links):
if record is not None:
cache.append(json.dumps(record)+"\n")
f.writelines(cache)
cache.clear()
workers.close()
workers.join()
def generateContextAnswerCorpus(distanceData,questionsDataGlobal,answersDataGlobal,indexDataGlobal):
cache=[]
init(questionsDataGlobal,answersDataGlobal,indexDataGlobal,copy=False)
with open(programmingalpha.DataPath+"Corpus/"+args.db.lower()+"-context.json","w") as f:
for link in tqdm.tqdm(distanceData,desc="processing documents"):
record =_genCore(link)
if record is not None:
cache.append(json.dumps(record)+"\n")
if len(cache)>args.batch_size:
f.writelines(cache)
cache.clear()
if len(cache)>0:
f.writelines(cache)
cache.clear()
def main():
logger.info("loading distance data")
distance_file=programmingalpha.DataPath+"linkData/"+dbName.lower()+'-2graph.json'
distance_data=[]
q_ids_set=set()
with open(distance_file,"r") as f:
for line in f:
path=json.loads(line)
q_ids_set.add(path["id"])
q_ids_set.update(path["distances"])
distance_data.append(path)
logger.info("loaded {} links data".format(len(distance_data)))
questionsDataGlobal, ansIdxGlobal=fetchQuestionData(q_ids_set)
answersDataGlobal=fetchAnswerData(ansIdxGlobal,questionsDataGlobal.keys())
indexerDataGlobal=fetchIndexData(questionsDataGlobal.keys())
distance_dataNew=[]
for distance in distance_data:
id=distance["id"]
if len(distance["distances"])==0:
continue
if id not in questionsDataGlobal:
continue
new_distance={"id":int(id),"distances":{}}
for k,v in distance["distances"].items():
k=int(k)
v=int(v)
new_distance["distances"][k]=v
distance_dataNew.append(new_distance)
logger.info("finally loaded {} links data".format(len(distance_dataNew)))
generateContextAnswerCorpusParallel(distance_dataNew,questionsDataGlobal,answersDataGlobal,indexerDataGlobal)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--db', type=str, default="crossvalidated")
parser.add_argument('--lose_rate', type=float, default=0.5)
parser.add_argument("--answerNum",type=int,default=5)
parser.add_argument('--workers', type=int, default=32)
args = parser.parse_args()
docDB=MongoStackExchange(host='10.1.1.9',port=50000)
dbName=args.db
docDB.useDB(dbName)
logger.info("processing db data: {}".format(dbName))
main()
| 2.21875
| 2
|
Code/UI/splash_ui.py
|
jphdotam/Cophy
| 0
|
12781606
|
from Code.UI.splash_layout import Ui_MainWindow
from Code.UI.label import LabelUI
#from ui.export_ui import ExportUI
#from ui.questionnaire_ui import QuestionnaireUI
from PyQt5 import QtCore
import sys, traceback
if QtCore.QT_VERSION >= 0x50501:
def excepthook(type_, value, traceback_):
traceback.print_exception(type_, value, traceback_)
QtCore.qFatal('')
sys.excepthook = excepthook
class MainWindowUI(Ui_MainWindow):
def __init__(self, mainwindow):
super(MainWindowUI, self).__init__()
self.mainwindow = mainwindow
self.setupUi(mainwindow)
self.pushButton_ReportCases.clicked.connect(self.run_reportcases_ui)
#self.pushButton_ExportData.clicked.connect(self.run_exportcases_ui)
#self.pushButton_HumanLabelling.clicked.connect(self.run_questionnaire_ui)
def run_reportcases_ui(self):
print("Running Labeller")
report_window = LabelUI(self.mainwindow)
# def run_exportcases_ui(self):
# print("Running Exporter")
# export_window = ExportUI(self.mainwindow)
#
# def run_questionnaire_ui(self):
# print("Running Questionnaire")
# questionnaire_window = QuestionnaireUI(self.mainwindow)
| 2.25
| 2
|
nodebox/gui/mac/AskString.py
|
nodebox/nodebox-pyobjc
| 47
|
12781607
|
<gh_stars>10-100
__all__ = ["AskString"]
import objc
from Foundation import *
from AppKit import *
# class defined in AskString.xib
class AskStringWindowController(NSWindowController):
questionLabel = objc.IBOutlet()
textField = objc.IBOutlet()
def __new__(cls, question, resultCallback, default="", parentWindow=None):
self = cls.alloc().initWithWindowNibName_("AskString")
self.question = question
self.resultCallback = resultCallback
self.default = default
self.parentWindow = parentWindow
if self.parentWindow is None:
self.window().setFrameUsingName_("AskStringPanel")
self.setWindowFrameAutosaveName_("AskStringPanel")
self.showWindow_(self)
else:
NSApp().beginSheet_modalForWindow_modalDelegate_didEndSelector_contextInfo_(
self.window(), self.parentWindow, None, None, 0)
self.retain()
return self
def windowWillClose_(self, notification):
self.autorelease()
def awakeFromNib(self):
self.questionLabel.setStringValue_(self.question)
self.textField.setStringValue_(self.default)
def done(self):
if self.parentWindow is None:
self.close()
else:
sheet = self.window()
NSApp().endSheet_(sheet)
sheet.orderOut_(self)
def ok_(self, sender):
value = self.textField.stringValue()
self.done()
self.resultCallback(value)
def cancel_(self, sender):
self.done()
self.resultCallback(None)
def AskString(question, resultCallback, default="", parentWindow=None):
AskStringWindowController(question, resultCallback, default, parentWindow)
| 2.140625
| 2
|
REPORT.py
|
AlexXG0152/HR_report_fill
| 0
|
12781608
|
<reponame>AlexXG0152/HR_report_fill
import pandas as pd
import os
import time
from mailmerge import MailMerge
from datetime import date, datetime
from dateutil.relativedelta import relativedelta
from docx import Document
from docx.shared import Cm
from docx.enum.text import WD_ALIGN_PARAGRAPH
'''
The idea for writing this is after reading the AUTOMATE THE BORING STUFF book.
I am an HR manager and my job partly consists of preparing a wide variety of reports for management, government and statistics.
I've already become a pro in Excel in VBA, but I really like programming and the Python language.
So I decided to automate a bit of the boring job of reporting my department's performance to senior management.
Previously, you had to do this:
Due to the fact that I do not have direct access to the database, at first I had to generate
a ready-made report (xls file (the time of formation by the program is 1 minute) and then work with it
(but I have an idea and understanding of how to write the required SQL query , to unload this information from the database),
open data on people, make pivot tables, remove unnecessary things, copy to Word (do not ask why))) and then print and sign.
On average, if not distracted, it took about 20 minutes.
Now life has become much easier and better))).
I got new knowledge, skills and abilities.
And I get the finished report in 0.5 seconds, taking into account the time to save the original xls file - 1 minute and 0.5 seconds.
P.S. I'm understand that junior's level and this file not all in PEP rules.
But i'm write his file with maximum readability to whom who will want repeat this in their work
and of course automete the boring stuff :)
'''
start = time.time()
path = "D:\\REPORTS\\REPORT1\\"
sex = ["ж", "м"]
category = [1, 2, 3, 4, 5]
def df(name):
df = pd.read_excel("D:\\REPORTS\\REPORT1\\REPORT1.xls", index_col=0, sheet_name=name) # xls file and sheet with all emploees data
average_age = 0
taday_date = 0
month = 0
fired_reason = 0
hired_from = 0
if name == "sheet1": # all employees
df['year'] = pd.DatetimeIndex(df['d_rogden']).year
df['age'] = 2020 - df['year']
average_age = round(df['age'].mean())
taday_date = datetime.today()
month = str(taday_date.month - 1)
if name == "sheet2": # hired in report period
df = df[(df["p_priem"] != 9) & (df["p_priem"] != 17)]
hired_from = df["namepriem"].value_counts().to_dict()
if 'towards organs' in hired_from.keys():
hired_from['towards government'] = hired_from.pop('towards organs')
hired_from = {k: v for k, v in sorted(hired_from.items(), key=lambda item: item[1], reverse=True)}
if name == "sheet3": #fired in report period
df = df[(df["_priem"] != 17) & (df["nameyvol"] != "transfer to №1")]
fired_reason = df["nameyvol"].value_counts().to_dict()
count_employee_category = df["kkat"].value_counts().to_dict()
person_category_dict = {k: v for k, v in sorted(count_employee_category.items(), key=lambda item: item[0])}
sex_all = df["pol"].value_counts().to_dict()
sex_women = {k: v for k, v in sorted(sex_all.items(), key=lambda item: item[0])}
# add zero to key : value pairs if some category or sex doesn't hired/fired in report period
for i in sex:
if i not in sex_women.keys():
sex_women.update({i: 0})
for i in category:
if i not in person_category_dict.keys():
person_category_dict.update({i: 0})
return [person_category_dict, sex_women, average_age, month, df, hired_from, fired_reason]
person_category_dict, sex_women, average_age, month, _, _, _ = df("sheet1")
hired_category_dict, hired_sex, _, _, _, hired_from, _ = df("sheet2")
fired_category_dict, fired_sex, _, _, _, _, fired_reason = df("sheet3")
template_1 = "D:\\REPORTS\\REPORT1\\blank.docx"
# here i'm count last day in report period
today = date.today()
last_day = date(today.year, today.month, 1) - relativedelta(days=1)
date = last_day.strftime('%d.%m.%Y')
# filling template docx
document = MailMerge(template_1)
document.merge(
all_emp=str(sum(person_category_dict.values())),
all_itr=str(person_category_dict[3] + person_category_dict[4] + person_category_dict[5]),
all_ruk=str(person_category_dict[3]),
all_spec=str(person_category_dict[5]),
all_drsl=str(person_category_dict[4]),
all_rab=str(person_category_dict[1] + person_category_dict[2]),
all_women=str(sex_women["ж"]),
date=str(date),
average_age=str(average_age),
month=str(month),
all_hired=str(sum(hired_category_dict.values())),
hired_ruk=str(hired_category_dict[3]),
hired_spec=str(hired_category_dict[5]),
hired_drsl=str(hired_category_dict[4]),
hired_rab1=str(hired_category_dict[1] + hired_category_dict[2]),
hired_women=str(hired_sex["ж"]),
hired_men=str(hired_sex["м"]),
all_fired=str(sum(fired_category_dict.values())),
fired_ruk=str(fired_category_dict[3]),
fired_spec=str(fired_category_dict[5]),
fired_drsl=str(fired_category_dict[4]),
fired_rab=str(fired_category_dict[1] + fired_category_dict[2]),
fired_women=str(fired_sex["ж"]),
fired_men=str(fired_sex["м"])
)
filename = "Report (working with stuff) for " + month + " month 2020.docx"
document.write(path + filename) # save file to folder
print(filename)
# here we create 2 tables, one below the other.
# The first table contains the names of the fields, the second table contains summary data by type of hired and fired reasons.
def table(name):
# customizing first table
word_document = Document(path + filename)
table0 = word_document.add_table(0, 0)
table0.style = word_document.styles["Table Grid"]
first_column_width = 15
second_column_with = 2.5
table0.add_column(Cm(first_column_width))
table0.add_column(Cm(second_column_with))
table0.add_row()
header_cells = table0.rows[-1].cells
if name == hired_from:
header_cells[0].text = "Hired type"
else:
header_cells[0].text = "Fired reason"
header_cells[1].text = "employee"
table0.rows[0].cells[0].paragraphs[0].runs[0].font.bold = True
table0.rows[0].cells[1].paragraphs[0].runs[0].font.bold = True
table0.rows[0].cells[0].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
table0.rows[0].cells[1].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
# customizing second table
table1 = word_document.add_table(0, 0) # we add rows iteratively
table1.style = word_document.styles["Table Grid"]
first_column_width = 15
second_column_with = 2.5
table1.add_column(Cm(first_column_width))
table1.add_column(Cm(second_column_with))
for index, stat_item in enumerate(name.items()):
table1.add_row()
stat_name, stat_result = stat_item
row = table1.rows[index]
row.cells[0].text = str(stat_name)
row.cells[1].text = str(stat_result)
row.cells[1].paragraphs[0].alignment = WD_ALIGN_PARAGRAPH.CENTER
word_document.add_paragraph()
word_document.save(path + filename)
table(hired_from)
table(fired_reason)
end = time.time()
print(end - start)
input("\nPress any key to exit...")
os.startfile(path)
| 2.8125
| 3
|
wallsup.py
|
tonikarppi/wallsup
| 0
|
12781609
|
<reponame>tonikarppi/wallsup<gh_stars>0
#!/usr/bin/env python3
"""
This is a simple script for the Gnome desktop environment that randomly picks an image
file from a directory tree and sets it as the wallpaper.
"""
import os
import random
import subprocess as sp
from getpass import getuser
from os.path import join
from pathlib import Path
from urllib.request import pathname2url
# Change this to the top-level directory containing your wallpapers.
WALLPAPERS_PATH = f"/home/{getuser()}/.wallpapers"
def get_files_recursively(root_path):
for current_path, dir_names, file_names in os.walk(root_path):
for file_name in file_names:
yield join(current_path, file_name)
def file_has_extension(file_path, *extensions):
path = Path(file_path)
return path.suffix.lower() in extensions
def set_wallpaper(wallpaper_path):
url = f"file://{pathname2url(wallpaper_path)}"
sp.run(
[
"/usr/bin/gsettings",
"set",
"org.gnome.desktop.background",
"picture-uri",
url,
]
)
sp.run(
[
"/usr/bin/gsettings",
"set",
"org.gnome.desktop.screensaver",
"picture-uri",
url,
]
)
def main():
files = get_files_recursively(WALLPAPERS_PATH)
wallpaper_files = [
file for file in files if file_has_extension(file, ".jpg", ".png")
]
if len(wallpaper_files) == 0:
return
random_wallpaper = random.choice(wallpaper_files)
set_wallpaper(random_wallpaper)
print(f"Wallpaper was set to: {random_wallpaper}")
if __name__ == "__main__":
main()
| 2.96875
| 3
|
streaming-mqtt/python-tests/tests.py
|
JacopoCastello/bahir
| 337
|
12781610
|
<gh_stars>100-1000
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
import time
import random
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
from pyspark.context import SparkConf, SparkContext, RDD
from pyspark.streaming.context import StreamingContext
from pyspark.streaming.tests import PySparkStreamingTestCase
from mqtt import MQTTUtils
class MQTTStreamTests(PySparkStreamingTestCase):
timeout = 20 # seconds
duration = 1
def setUp(self):
super(MQTTStreamTests, self).setUp()
MQTTTestUtilsClz = self.ssc._jvm.java.lang.Thread.currentThread().getContextClassLoader() \
.loadClass("org.apache.spark.streaming.mqtt.MQTTTestUtils")
self._MQTTTestUtils = MQTTTestUtilsClz.newInstance()
self._MQTTTestUtils.setup()
def tearDown(self):
if self._MQTTTestUtils is not None:
self._MQTTTestUtils.teardown()
self._MQTTTestUtils = None
super(MQTTStreamTests, self).tearDown()
def _randomTopic(self):
return "topic-%d" % random.randint(0, 10000)
def _startContext(self, topic):
# Start the StreamingContext and also collect the result
stream = MQTTUtils.createStream(self.ssc, "tcp://" + self._MQTTTestUtils.brokerUri(), topic)
result = []
def getOutput(_, rdd):
for data in rdd.collect():
result.append(data)
stream.foreachRDD(getOutput)
self.ssc.start()
return result
def test_mqtt_stream(self):
"""Test the Python MQTT stream API."""
sendData = "MQTT demo for spark streaming"
topic = self._randomTopic()
result = self._startContext(topic)
def retry():
self._MQTTTestUtils.publishData(topic, sendData)
# Because "publishData" sends duplicate messages, here we should use > 0
self.assertTrue(len(result) > 0)
self.assertEqual(sendData, result[0])
# Retry it because we don't know when the receiver will start.
self._retry_or_timeout(retry)
def _start_context_with_paired_stream(self, topics):
stream = MQTTUtils.createPairedStream(self.ssc, "tcp://" + self._MQTTTestUtils.brokerUri(), topics)
# Keep a set because records can potentially be repeated.
result = set()
def getOutput(_, rdd):
for data in rdd.collect():
result.add(data)
stream.foreachRDD(getOutput)
self.ssc.start()
return result
def test_mqtt_pair_stream(self):
"""Test the Python MQTT stream API with multiple topics."""
data_records = ["random string 1", "random string 2", "random string 3"]
topics = [self._randomTopic(), self._randomTopic(), self._randomTopic()]
topics_and_records = zip(topics, data_records)
result = self._start_context_with_paired_stream(topics)
def retry():
for topic, data_record in topics_and_records:
self._MQTTTestUtils.publishData(topic, data_record)
# Sort the received records as they might be out of order.
self.assertEqual(topics_and_records, sorted(result, key=lambda x: x[1]))
# Retry it because we don't know when the receiver will start.
self._retry_or_timeout(retry)
def _retry_or_timeout(self, test_func):
start_time = time.time()
while True:
try:
test_func()
break
except:
if time.time() - start_time > self.timeout:
raise
time.sleep(0.01)
if __name__ == "__main__":
unittest.main()
| 1.921875
| 2
|
Minion/Collectors/Linux_CPU.py
|
onderogluserdar/boardInstrumentFramework
| 16
|
12781611
|
<gh_stars>10-100
##############################################################################
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# File Abstract:
# This attempts to get CPU utilization information on a Linux System
#
##############################################################################
from time import sleep
import sys
import os
procInfoDir="/sys/devices/system/cpu"
desiredFreqStats=["cpuinfo_min_freq","scaling_driver","energy_performance_preference","cpuinfo_max_freq","cpuinfo_cur_freq","scaling_cur_freq","scaling_governor","scaling_available_governors"]
def GetBaseDir():
global procInfoDir
return procInfoDir
def ReadFromFile_Legacy(Filename):
try:
file = open(Filename,'rt')
if None == file:
return "N/A"
except Exception:
return "N/A"
return file.read().strip()
def ReadFromFile(Filename):
file = open(Filename,'rt')
if None == file:
return "File [" + Filename + "] does not exist"
lines = [line.rstrip('\n') for line in open(Filename)]
return lines
#Linux 3.10.0-229.el7.x86_64 (csx-61) 09/07/2015 _x86_64_ (16 CPU)
#10:26:21 AM CPU %usr %nice %sys %iowait %irq %soft %steal %guest %gnice %idle
#10:26:21 AM all 1.98 0.00 0.24 0.02 0.00 0.01 0.00 0.00 0.00 97.74
#10:26:21 AM 0 0.35 0.00 0.22 0.22 0.00 0.01 0.00 0.00 0.00 99.19
#10:26:21 AM 1 0.77 0.01 0.10 0.00 0.00 0.00 0.00 0.00 0.00 99.12
#10:26:21 AM 2 0.46 0.01 0.08 0.01 0.00 0.00 0.00 0.00 0.00 99.44
#10:26:21 AM 3 0.74 0.01 0.09 0.00 0.00 0.00 0.00 0.00 0.00 99.16
#10:26:21 AM 4 0.49 0.01 0.08 0.01 0.00 0.00 0.00 0.00 0.00 99.41
#10:26:21 AM 5 0.94 0.01 0.10 0.01 0.00 0.00 0.00 0.00 0.00 98.94
#10:26:21 AM 6 0.49 0.01 0.09 0.01 0.00 0.00 0.00 0.00 0.00 99.40
#10:26:21 AM 7 1.31 0.01 0.11 0.01 0.00 0.00 0.00 0.00 0.00 98.56
#10:26:21 AM 8 0.31 0.00 0.05 0.00 0.00 0.00 0.00 0.00 0.00 99.64
#10:26:21 AM 9 0.38 0.00 0.05 0.00 0.00 0.00 0.00 0.00 0.00 99.56
#10:26:21 AM 10 0.29 0.01 0.12 0.00 0.00 0.00 0.00 0.00 0.00 99.58
#10:26:21 AM 11 0.32 0.01 0.13 0.00 0.00 0.00 0.00 0.00 0.00 99.53
#10:26:21 AM 12 0.26 0.00 0.15 0.00 0.00 0.00 0.00 0.00 0.00 99.58
#10:26:21 AM 13 24.13 0.00 2.45 0.01 0.00 0.11 0.00 0.00 0.00 73.31
#10:26:21 AM 14 0.21 0.00 0.05 0.00 0.00 0.00 0.00 0.00 0.00 99.73
#10:26:21 AM 15 0.31 0.00 0.05 0.00 0.00 0.00 0.00 0.00 0.00 99.64
def TokenizeRow(row):
columnsRaw = row.split()
retList = columnsRaw[2:len(columnsRaw)]
return retList
def CreatePerfFileFromMPStatFile(inputFile,outputFile):
rawData = ReadFromFile(inputFile)
columnHeaders = []
startFound = False
writeData = ""
for row in rawData:
if not startFound:
if row.find("usr") > 0:
print(row)
startFound = True
columnHeaders = TokenizeRow(row)
else:
columns = TokenizeRow(row)
cpuID = "CPU." + columns[0]
for loop in range(1,len(columns)):
writeData += cpuID + "." + columnHeaders[loop] + columns[loop] + os.linesep
pass
file = open(outputFile,"wt")
file.write(writeData)
file.close()
return "HelenKeller" # don't want to send anything
#Worker class to get cpu load info
class CPULoad(object):
def __init__(self, period = 1):
self._period = period #how long to wait when getting 2nd reading
def calcCPU_Time(self):
cpu_infoMap = {}
with open('/proc/stat','r') as fpStats:
lines = [line.split(' ') for content in fpStats.readlines() for line in content.split('\n') if line.startswith('cpu')]
for cpu_line_list in lines:
if '' in cpu_line_list: cpu_line_list.remove('')
cpu_line_list = [cpu_line_list[0]]+[float(i) for i in cpu_line_list[1:]]
#pylint: disable=unused-variable
cpu_id,user,nice,system,idle,iowait,irq,softrig,steal,guest,guest_nice = cpu_line_list
Idle=idle+iowait
NonIdle=user+nice+system+irq+softrig+steal
Total=Idle+NonIdle
cpu_infoMap.update({cpu_id:{'total':Total,'idle':Idle}})
return cpu_infoMap
def getcpuload(self):
# Remebmer that Percentage=((Total-PrevTotal)-(Idle-PrevIdle))/(Total-PrevTotal)
# read 1
start = self.calcCPU_Time()
#snooze a bit
sleep(self._period)
#read 2
stop = self.calcCPU_Time()
cpu_load_List = {}
for cpu in start:
Total = stop[cpu]['total']
PrevTotal = start[cpu]['total']
Idle = stop[cpu]['idle']
PrevIdle = start[cpu]['idle']
CPU_Percentage=((Total-PrevTotal)-(Idle-PrevIdle))/(Total-PrevTotal)*100
cpu_load_List.update({cpu: CPU_Percentage})
return cpu_load_List
def ReadProcStats():
dataMap = {} #collect here the information
with open('/proc/stat','r') as f_stat:
lines = [line.split(' ') for content in f_stat.readlines() for line in content.split('\n') if line.startswith('cpu')]
#compute for every cpu
for cpu_line in lines:
if '' in cpu_line: cpu_line.remove('')#remove empty elements
cpu_id,user,nice,system,idle,iowait,irq,softriq,steal,guest,guest_nice = cpu_line
if cpu_id == 'cpu':
cpu_id = 'total'
dataMap['cpu.'+cpu_id+'.user'] = user
dataMap['cpu.'+cpu_id+'.nice'] = nice
dataMap['cpu.'+cpu_id+'.system'] = system
dataMap['cpu.'+cpu_id+'.idle'] = idle
dataMap['cpu.'+cpu_id+'.iowait'] = iowait
dataMap['cpu.'+cpu_id+'.irq'] = irq
dataMap['cpu.'+cpu_id+'.softirq'] = softriq
dataMap['cpu.'+cpu_id+'.steal'] = steal
dataMap['cpu.'+cpu_id+'.guest'] = guest
dataMap['cpu.'+cpu_id+'.guest_nice'] = guest_nice
return dataMap
# uses the fact that /proc/stat has the desired information. pass target file to put
# the desired data, the period to sample stats over and precision level if desired
def CreateCPUUtilFileFromProcStats(targetFile,interval=.1,precision=2):
interval=float(interval)
precision=float(precision)
x = CPULoad(interval) # use this cool script I found online
strPrecision = '.' + str(int(precision)) + 'f'
data = x.getcpuload()
writeData = "CPU_COUNT=" + str(len(data) -1 ) + os.linesep # also has overall CPU in there
for proc in data:
writeData+= proc +"="+str(format(data[proc],strPrecision)) + os.linesep
file = open(targetFile,"wt")
file.write(writeData)
file.close()
return "HelenKeller" # don't want to send anything
# create a comma separated list with all cores and nothign else
# returnTuple will return the raw cpu load data too
def CreateUtilizationList(interval=.1,precision=2,returnTuple=False):
interval=float(interval)
precision=float(precision)
cpuInfo = CPULoad(interval) # use this cool script I found online
strPrecision = '.' + str(int(precision)) + 'f'
data = cpuInfo.getcpuload()
coreCount = len(data) -1
first = True
# data is stored in has, with key being cpu#
for index in range(0,coreCount):
key = "cpu" + str(index)
cpuVal = str(format(data[key],strPrecision))
if True == first:
writeData = cpuVal
first = False
else:
writeData+= "," + cpuVal
if True == returnTuple:
return (writeData,data)
return writeData
def getFrequencyInfo(prefix=""):
retMap={}
coreCount=0
for cpuDir in os.listdir(GetBaseDir()):
if not 'cpu' in cpuDir:
continue
if cpuDir in ['cpufreq','cpuidle']: #don't want these directories
continue
coreCount+=1
nextDir = GetBaseDir() + "/" + cpuDir + "/cpufreq"
#pylint: disable=unused-variable
for statRoot, statDirs, statFiles in os.walk(nextDir):
for file in statFiles:
if file in desiredFreqStats:
readFile = GetBaseDir() + "/" + cpuDir + "/cpufreq/" + file
key = "{0}.{1}".format(cpuDir,file)
retMap[prefix+key] = ReadFromFile_Legacy(readFile)
freqList=None
# create a comma separated list for graphing
if "cpu0.cpuinfo_cur_freq" in retMap:
freqKey = "cpuinfo_cur_freq"
else:
freqKey = "scaling_cur_freq"
for coreNum in range(0,coreCount):
key = "cpu{0}.{1}".format(coreNum,freqKey)
if None == freqList: #1st one
freqList=retMap[prefix+key]
else:
freqList += ","+retMap[prefix+key]
retMap[prefix+"cpu_frequency_list"] = freqList
return retMap
def GetSystemAverageCPU(interval=.1,precision=2):
interval=float(interval)
precision=float(precision)
cpuInfo = CPULoad(interval) # use this cool script I found online
strPrecision = '.' + str(int(precision)) + 'f'
data = cpuInfo.getcpuload()
coreCount = len(data) -1
total = 0.0
for index in range(0,coreCount):
key = "cpu" + str(index)
cpuVal = data[key]
total += cpuVal
total = total/coreCount
return str(format(total,strPrecision))
## Dynamic Collector interface, gets all raw stats
def CollectStatsFunction(frameworkInterface):
dataMap = ReadProcStats()
for collectorID in dataMap:
if not frameworkInterface.DoesCollectorExist(collectorID):
frameworkInterface.AddCollector(collectorID)
frameworkInterface.SetCollectorValue(collectorID,dataMap[collectorID])
#if __name__=='__main__':
# CreateCPUUtilFileFromProcStats("foo.txt",.1,4)
| 2.1875
| 2
|
heyyz/core.py
|
CarlHey/heyyz
| 0
|
12781612
|
<gh_stars>0
import heyy
from .dbf_utils import (
dbf2objs,
try_dbf2objs,
str2objs,
read_fields,
compare_fields,
split_multiline,
)
pt = heyy.pt
dbf2objs = dbf2objs
try_dbf2objs = try_dbf2objs
str2objs = str2objs
read_fields = read_fields
compare_fields = compare_fields
split_multiline = split_multiline
| 1.289063
| 1
|
desktop/core/ext-py/cx_Oracle-6.4.1/samples/tutorial/query_one.py
|
yetsun/hue
| 5,079
|
12781613
|
#------------------------------------------------------------------------------
# query_one.py (Section 3.2)
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# Copyright 2017, 2018, Oracle and/or its affiliates. All rights reserved.
#------------------------------------------------------------------------------
from __future__ import print_function
import cx_Oracle
import db_config
con = cx_Oracle.connect(db_config.user, db_config.pw, db_config.dsn)
cur = con.cursor()
cur.execute("select * from dept order by deptno")
row = cur.fetchone()
print(row)
row = cur.fetchone()
print(row)
| 2.390625
| 2
|
bach/tests/unit/bach/test_series.py
|
objectiv/objectiv-analytics
| 23
|
12781614
|
"""
Copyright 2021 Objectiv B.V.
"""
from typing import List
import pytest
from bach import get_series_type_from_dtype
from bach.expression import Expression
from bach.partitioning import GroupBy
from tests.unit.bach.util import get_fake_df, FakeEngine
def test_equals(dialect):
def get_df(index_names: List[str], data_names: List[str]):
return get_fake_df(dialect=dialect, index_names=index_names, data_names=data_names)
left = get_df(['a'], ['b', 'c'])
right = get_df(['a'], ['b', 'c'])
result = left['b'].equals(left['b'])
# assert result is a boolean (for e.g. '==') this is not the case
assert result is True
assert left['b'].equals(left['b'])
assert left['b'].equals(right['b'])
assert not left['b'].equals(left['c'])
assert not left['b'].equals(right['c'])
left = get_df(['a', 'x'], ['b', 'c'])
right = get_df(['a'], ['b', 'c'])
assert left['b'].equals(left['b'])
assert not left['b'].equals(right['b'])
assert not left['b'].equals(left['c'])
assert not left['b'].equals(right['c'])
# different order in the index
left = get_df(['a', 'b'], ['c'])
right = get_df(['b', 'a'], ['c'])
assert not left['c'].equals(right['c'])
engine = left.engine
engine_other = FakeEngine(dialect=engine.dialect, url='sql://some_other_string')
int_type = get_series_type_from_dtype('int64')
float_type = get_series_type_from_dtype('float64')
expr_test = Expression.construct('test')
expr_other = Expression.construct('test::text')
sleft = int_type(engine=engine, base_node=None, index={}, name='test',
expression=expr_test, group_by=None, sorted_ascending=None, index_sorting=[],
instance_dtype='int64')
sright = int_type(engine=engine, base_node=None, index={}, name='test',
expression=expr_test, group_by=None, sorted_ascending=None, index_sorting=[],
instance_dtype='int64')
assert sleft.equals(sright)
# different expression
sright = int_type(engine=engine, base_node=None, index={}, name='test',
expression=expr_other, group_by=None, sorted_ascending=None, index_sorting=[],
instance_dtype='int64')
assert not sleft.equals(sright)
# different name
sright = int_type(engine=engine, base_node=None, index={}, name='test_2',
expression=expr_test, group_by=None, sorted_ascending=None, index_sorting=[],
instance_dtype='int64')
assert not sleft.equals(sright)
# different base_node
sright = int_type(engine=engine, base_node='test', index={}, name='test',
expression=expr_test, group_by=None, sorted_ascending=None, index_sorting=[],
instance_dtype='int64')
assert not sleft.equals(sright)
# different engine
sright = int_type(engine=engine_other, base_node=None, index={}, name='test',
expression=expr_test, group_by=None, sorted_ascending=None, index_sorting=[],
instance_dtype='int64')
assert not sleft.equals(sright)
# different type
sright = float_type(engine=engine, base_node=None, index={}, name='test',
expression=expr_test, group_by=None, sorted_ascending=None, index_sorting=[],
instance_dtype='float64')
assert not sleft.equals(sright)
# different group_by
sright = int_type(engine=engine, base_node=None, index={}, name='test', expression=expr_test,
group_by=GroupBy(group_by_columns=[]), sorted_ascending=None, index_sorting=[],
instance_dtype='int64')
assert not sleft.equals(sright)
# different sorting
sright = int_type(engine=engine, base_node=None, index={}, name='test', expression=expr_test,
group_by=None, sorted_ascending=True, index_sorting=[], instance_dtype='int64')
assert not sleft.equals(sright)
sright = sright.copy_override(sorted_ascending=None)
assert sleft.equals(sright)
index_series = sleft
sleft = int_type(engine=engine, base_node=None, index={'a': index_series}, name='test',
expression=expr_test, group_by=None, sorted_ascending=None, index_sorting=[],
instance_dtype='int64')
sright = int_type(engine=engine, base_node=None, index={'a': index_series}, name='test',
expression=expr_test, group_by=None, sorted_ascending=None, index_sorting=[],
instance_dtype='int64')
assert sleft.equals(sright)
sright = sright.copy_override(index_sorting=[True])
assert not sleft.equals(sright)
@pytest.mark.skip_postgres
def test_equals_instance_dtype(dialect):
def get_df(index_names: List[str], data_names: List[str]):
return get_fake_df(dialect=dialect, index_names=index_names, data_names=data_names)
left = get_df(['a'], ['b', 'c'])
engine = left.engine
expr_test = Expression.construct('test')
dict_type = get_series_type_from_dtype('dict')
# Currently we only have bigquery types that actual use the instance_dtype. So skip postgres here.
sleft = dict_type(engine=engine, base_node=None, index={}, name='test',
expression=expr_test, group_by=None, sorted_ascending=None, index_sorting=[],
instance_dtype={'a': 'int64', 'b': ['bool']})
sright = sleft.copy_override()
assert sleft.equals(sright)
sright = sleft.copy_override(instance_dtype={'a': 'float64', 'b': ['bool']})
assert not sleft.equals(sright)
| 2.546875
| 3
|
mpikat/meerkat/apsuse/apsuse_config.py
|
ewanbarr/mpikat
| 2
|
12781615
|
"""
Copyright (c) 2018 <NAME> <<EMAIL>>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
import json
from mpikat.core.ip_manager import ip_range_from_stream
log = logging.getLogger('mpikat.apsuse_config_manager')
DEFAULT_DATA_RATE_PER_WORKER = 20e9 # bits / s
DUMMY_FBF_CONFIG = {
"coherent-beam-multicast-groups":"spead://172.16.17.32+15:7147",
"coherent-beam-multicast-groups-data-rate": 7e9,
"incoherent-beam-multicast-group": "spead://192.168.127.12:7147",
"incoherent-beam-multicast-group-data-rate": 150e6,
}
HOST_TO_LEAF_MAP = {
"apscn00.mpifr-be.mkat.karoo.kat.ac.za": 1,
"apscn01.mpifr-be.mkat.karoo.kat.ac.za": 1,
"apscn02.mpifr-be.mkat.karoo.kat.ac.za": 1,
"apscn03.mpifr-be.mkat.karoo.kat.ac.za": 1,
"apscn04.mpifr-be.mkat.karoo.kat.ac.za": 0,
"apscn05.mpifr-be.mkat.karoo.kat.ac.za": 0,
"apscn06.mpifr-be.mkat.karoo.kat.ac.za": 0,
"apscn07.mpifr-be.mkat.karoo.kat.ac.za": 0,
}
class ApsConfigurationError(Exception):
pass
class ApsWorkerBandwidthExceeded(Exception):
pass
class ApsWorkerTotalBandwidthExceeded(Exception):
pass
class ApsWorkerConfig(object):
def __init__(self, total_bandwidth=DEFAULT_DATA_RATE_PER_WORKER):
log.debug("Created new apsuse worker config")
self._total_bandwidth = total_bandwidth
self._available_bandwidth = self._total_bandwidth
self._incoherent_groups = []
self._coherent_groups = []
self._incoherent_beams = []
self._coherent_beams = []
self._even = True
def set_even(self, even_odd):
self._even = even_odd
def can_use_host(self, hostname):
HOST_TO_LEAF_MAP[hostname] = int(self._even)
def add_incoherent_group(self, group, bandwidth):
if bandwidth > self._total_bandwidth:
log.debug("Adding group would exceed worker bandwidth")
raise ApsWorkerTotalBandwidthExceeded
if self._available_bandwidth < bandwidth:
log.debug("Adding group would exceed worker bandwidth")
raise ApsWorkerBandwidthExceeded
else:
log.debug("Adding group {} to worker".format(group))
self._incoherent_groups.append(group)
self._available_bandwidth -= bandwidth
def add_coherent_group(self, group, bandwidth):
if self._available_bandwidth < bandwidth:
log.debug("Adding group would exceed worker bandwidth")
raise ApsWorkerBandwidthExceeded
else:
self._coherent_groups.append((group))
log.debug("Adding group {} to worker".format(group))
self._available_bandwidth -= bandwidth
def data_rate(self):
return self._total_bandwidth - self._available_bandwidth
def coherent_groups(self):
return self._coherent_groups
def incoherent_groups(self):
return self._incoherent_groups
def coherent_beams(self):
return self._coherent_beams
def incoherent_beams(self):
return self._incoherent_beams
class ApsConfigGenerator(object):
def __init__(self, fbfuse_config, bandwidth_per_worker=DEFAULT_DATA_RATE_PER_WORKER):
self._fbfuse_config = fbfuse_config
self._bandwidth_per_worker = bandwidth_per_worker
self._incoherent_range = ip_range_from_stream(
self._fbfuse_config['incoherent-beam-multicast-group'])
self._incoherent_mcast_group_rate = (
self._fbfuse_config['incoherent-beam-multicast-group-data-rate'])
self._incoherent_groups = list(self._incoherent_range)
self._coherent_range = ip_range_from_stream(
self._fbfuse_config['coherent-beam-multicast-groups'])
self._coherent_mcast_group_rate = (
self._fbfuse_config['coherent-beam-multicast-groups-data-rate'])
self._coherent_groups = list(self._coherent_range)
def allocate_groups(self, servers):
configs = {}
final_configs = {}
for server in servers:
configs[server] = ApsWorkerConfig(self._bandwidth_per_worker)
while configs and (self._incoherent_groups or self._coherent_groups):
for server in configs.keys():
if self._incoherent_groups:
group = self._incoherent_groups.pop(0)
try:
configs[server].add_incoherent_group(
group, self._incoherent_mcast_group_rate)
except (ApsWorkerTotalBandwidthExceeded, ApsWorkerBandwidthExceeded):
log.error("Incoherent beam mutlicast group ({} Gb/s) size exceeds data rate for one node ({} Gb/s)".format(
self._incoherent_mcast_group_rate/1e9,
configs[server]._total_bandwidth/1e9))
log.error("Incoherent beam data will not be captured")
else:
continue
if self._coherent_groups:
group = self._coherent_groups.pop(0)
try:
configs[server].add_coherent_group(group, self._coherent_mcast_group_rate)
except ApsWorkerTotalBandwidthExceeded:
log.error("Coherent beam mutlicast group ({} Gb/s) size exceeds data rate for one node ({} Gb/s)".format(
self._coherent_mcast_group_rate/1e9, configs[server]._total_bandwidth/1e9))
log.error("Coherent beam data will not be captured")
except ApsWorkerBandwidthExceeded:
self._coherent_groups.insert(0, group)
final_configs[server] = self._finalise_worker(configs[server], server)
del configs[server]
else:
continue
print(self._incoherent_groups, self._coherent_groups)
for server, config in configs.items():
final_configs[server] = self._finalise_worker(config, server)
return final_configs
def _finalise_worker(self, worker, server):
valid = False
for incoherent_group in worker.incoherent_groups():
valid = True
worker._incoherent_beams.append("ifbf00000")
for coherent_group in worker.coherent_groups():
valid = True
spead_formatted = "spead://{}:{}".format(str(coherent_group), self._coherent_range.port)
mapping = json.loads(self._fbfuse_config['coherent-beam-multicast-group-mapping'])
beam_idxs = mapping.get(spead_formatted, range(12))
worker._coherent_beams.extend(beam_idxs)
log.debug(("Worker {} config: coherent-groups: {},"
" coherent-beams: {}, incoherent-groups: {},"
" incoherent-beams: {},").format(
str(server), map(str, worker.coherent_groups()),
map(str, worker.coherent_beams()),
map(str, worker.incoherent_groups()),
map(str, worker.incoherent_beams())))
if valid:
return worker
else:
return None
def remaining_incoherent_groups(self):
return self._incoherent_groups
def remaining_coherent_groups(self):
return self._coherent_groups
| 1.476563
| 1
|
interpolation.py
|
DahlitzFlorian/how-to-work-with-config-files-in-python-article-snippets
| 0
|
12781616
|
<reponame>DahlitzFlorian/how-to-work-with-config-files-in-python-article-snippets<filename>interpolation.py<gh_stars>0
# interpolation.py
from configparser import ConfigParser
from configparser import ExtendedInterpolation
config = ConfigParser(interpolation=ExtendedInterpolation())
config.read("interpolation_config.ini")
print(config.get("destinations", "app_dir"))
| 2.40625
| 2
|
koronawirus_backend/elastic.py
|
merito/dokadzwirusem_backend
| 0
|
12781617
|
<reponame>merito/dokadzwirusem_backend
from datetime import datetime
from elasticsearch import Elasticsearch as ES
class NotDefined:
pass
class Point:
def __init__(self, name, operator, address, opening_hours, lat, lon, point_type, phone, prepare_instruction,
owned_by, waiting_time, doc_id=None, last_modified_timestamp=None):
self.waiting_time = waiting_time
self.owned_by = owned_by
self.prepare_instruction = prepare_instruction
self.phone = phone
self.opening_hours = opening_hours
self.address = address
self.operator = operator
self.name = name
self.lat = str(lat)
self.lon = str(lon)
self.point_type = point_type
self.doc_id = doc_id
self.last_modified_timestamp = datetime.utcnow().strftime("%s") if last_modified_timestamp is None \
else last_modified_timestamp
@classmethod
def new_point(cls, name, operator, address, opening_hours, lat, lon, point_type, phone, prepare_instruction,
waiting_time, user_sub):
return cls(name=name, operator=operator, address=address, opening_hours=opening_hours, lat=lat, lon=lon,
point_type=point_type, phone=phone, prepare_instruction=prepare_instruction,
waiting_time=waiting_time, owned_by=user_sub)
@classmethod
def from_dict(cls, body):
source = body['_source']
return cls(name=source['name'], operator=source['operator'], address=source['address'],
lat=source['location']['lat'], lon=source['location']['lon'], point_type=source['type'],
opening_hours=source['opening_hours'], phone=source['phone'],
prepare_instruction=source['prepare_instruction'], owned_by=source['owned_by'],
last_modified_timestamp=source['last_modified_timestamp'], waiting_time=source['waiting_time'],
doc_id=body['_id'])
def to_dict(self, with_id=False):
body = {
"name": self.name,
"operator": self.operator,
"address": self.address,
"location": {
"lat": self.lat,
"lon": self.lon
},
"type": self.point_type,
"opening_hours": self.opening_hours,
"phone": self.phone,
"prepare_instruction": self.prepare_instruction,
"last_modified_timestamp": self.last_modified_timestamp,
"waiting_time": self.waiting_time
}
if with_id is True:
body["id"] = self.doc_id
return body
def to_index(self, with_id=False):
body = self.to_dict(with_id=with_id)
body['owned_by'] = self.owned_by
return body
def modify(self, name, operator, address, lat, lon, point_type, opening_hours, phone,
prepare_instruction, owned_by, waiting_time):
params = locals()
params.pop('self')
changed = dict()
for param in params.keys():
if type(params[param]) is not NotDefined:
if getattr(self, param) != params[param]:
changed[param] = {'old_value': getattr(self, param),
'new_value': params[param]}
setattr(self, param, params[param])
self.last_modified_timestamp = datetime.utcnow().strftime("%s")
return changed
def add_to_or_create_list(location, name, query):
try:
location[name]
except KeyError:
location[name] = []
location[name].append(query)
class Elasticsearch:
def __init__(self, connection_string, index='hospitals'):
self.es = ES([connection_string])
self.index = index
def search_points(self, phrase, point_type=None, top_right=None, bottom_left=None, water=None, fire=None):
body = {
"query": {
"bool": {
"must": [{
"multi_match": {
"query": phrase,
"fields": [
"name^3",
"operator",
"address"
]
}
}]
}
}
}
if point_type is not None:
add_to_or_create_list(location=body['query']['bool'], name='filter',
query={"term": {"type": {"value": point_type}}})
if top_right is not None and bottom_left is not None:
add_to_or_create_list(location=body['query']['bool'], name='filter', query={
"geo_bounding_box": {
"location": {
"top_left": {
"lat": str(top_right['lat']),
"lon": str(bottom_left['lon'])
},
"bottom_right": {
"lat": str(bottom_left['lat']),
"lon": str(top_right['lon'])
}
}
}
}
)
response = self.es.search(index=self.index, body=body)
read_points = list(map(Point.from_dict, response['hits']['hits']))
out_points = [point.to_dict(with_id=True) for point in read_points]
return {'points': out_points}
def get_nearest(self, location):
body_transport = {'query': {'bool': {'must': [{'term': {'type': 'transport'}},
{'geo_distance': {'distance': '1000km',
'location': {'lat': float(location['lat']),
'lon': float(location['lon'])}}}]}},
'size': 1,
'sort': [{'_geo_distance': {'location': {'lat': float(location['lat']),
'lon': float(location['lon'])},
'order': 'asc',
'unit': 'km'}}]}
response_transport = self.es.search(index=self.index, body=body_transport)
nearest_transport = Point.from_dict(response_transport['hits']['hits'][0])
body_hospital = {'query': {'bool': {'must': [{'term': {'type': 'hospital'}},
{'geo_distance': {'distance': '1000km',
'location': {'lat': float(location['lat']),
'lon': float(
location['lon'])}}}]}},
'size': 1,
'sort': [{'_geo_distance': {'location': {'lat': float(location['lat']),
'lon': float(location['lon'])},
'order': 'asc',
'unit': 'km'}}]}
response_hospital = self.es.search(index=self.index, body=body_hospital)
nearest_hospital = Point.from_dict(response_hospital['hits']['hits'][0])
return {'hospital': nearest_hospital.to_dict(with_id=True),
'transport': nearest_transport.to_dict(with_id=True)}
def get_points(self, top_right, bottom_left):
body = '''{
"query": {
"bool" : {
"must" : {
"match_all" : {}
},
"filter" : {
"geo_bounding_box" : {
"validation_method": "COERCE",
"location" : {
"top_left" : {
"lat" : ''' + str(top_right['lat']) + ''',
"lon" : ''' + str(bottom_left['lon']) + '''
},
"bottom_right" : {
"lat" : ''' + str(bottom_left['lat']) + ''',
"lon" : ''' + str(top_right['lon']) + '''
}
}
}
}
}
},
"size": 9000
}'''
response = self.es.search(index=self.index, body=body)
read_points = list(map(Point.from_dict, response['hits']['hits']))
out_points = [point.to_dict(with_id=True) for point in read_points]
return {'points': out_points}
def get_point(self, point_id):
response = self.es.get(index=self.index, id=point_id)
point = Point.from_dict(body=response)
return point.to_dict(with_id=True)
def delete_point(self, point_id):
res = self.es.delete(index=self.index, id=point_id)
if res['result'] == 'deleted':
return
raise Exception("Can't delete point")
def get_my_points(self, sub):
body = {
"query": {
"bool": {
"filter": {
"term": {
"owned_by": sub
}
}
}
}
}
response = self.es.search(index=self.index, body=body)
read_points = list(map(Point.from_dict, response['hits']['hits']))
out_points = [point.to_dict(with_id=True) for point in read_points]
return {'points': out_points}
def get_full_point(self, point_id):
response = self.es.get(index=self.index, id=point_id)
point = Point.from_dict(body=response)
return point.to_index(with_id=True)
def get_logs(self, point_id=None, size=25, offset=0):
body = {"sort": [{"timestamp": {"order": "desc"}}], "from": offset, "size": size}
if point_id is not None:
body['query'] = {'term': {'doc_id.keyword': {'value': point_id}}}
response = self.es.search(index=self.index + '_*', body=body)
return {"logs": response['hits']['hits'], "total": response['hits']['total']['value']}
def modify_point(self, point_id, user_sub, name, operator, address, lat, lon,
point_type, opening_hours, phone, prepare_instruction, waiting_time, owned_by):
body = self.es.get(index=self.index, id=point_id)
point = Point.from_dict(body=body)
changes = point.modify(name=name, operator=operator, address=address, lat=lat, lon=lon,
point_type=point_type, opening_hours=opening_hours, phone=phone,
prepare_instruction=prepare_instruction, waiting_time=waiting_time, owned_by=owned_by)
res = self.es.index(index=self.index, id=point_id, body=point.to_index())
if res['result'] == 'updated':
self.save_log(user_sub=user_sub, doc_id=point_id, name=point.name, changed=changes)
return self.get_full_point(point_id=point_id)
return res
def add_point(self, name, operator, address, opening_hours, lat, lon, point_type, phone, prepare_instruction,
waiting_time, user_sub):
point = Point.new_point(name=name, operator=operator, address=address, opening_hours=opening_hours, lat=lat,
lon=lon, point_type=point_type, phone=phone,
prepare_instruction=prepare_instruction, waiting_time=waiting_time, user_sub=user_sub)
res = self.es.index(index=self.index, body=point.to_index())
if res['result'] == 'created':
return self.get_point(point_id=res['_id'])
return res
def save_log(self, user_sub, doc_id, name, changed):
document = {"modified_by": user_sub, "doc_id": doc_id, "changes": changed,
"timestamp": datetime.utcnow().strftime("%Y/%m/%d %H:%M:%S"), "name": name}
self.es.index(index=''.join((self.index, datetime.today().strftime('_%m_%Y'))), body=document)
| 2.484375
| 2
|
src/experiments/train_common.py
|
prakashchhipa/Depth-Contrast-Self-Supervised-Method
| 0
|
12781618
|
<filename>src/experiments/train_common.py
from distutils.log import error
import errno
import numpy as np
import json
import argparse
import time
from tqdm import tqdm
import cv2
import logging
import sys, os
import torch
import torchvision
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision.transforms import transforms
from sklearn.metrics import f1_score
from sampler import BalancedBatchSampler
from models import Resnext_Model, Densenet_Model
from dataloader import MBV_Dataset
sys.path.append(os.path.dirname(__file__))
from train_util import Train_Util
sys.path.append(os.path.dirname(__file__))
from utils import *
from mbv_config import MBV_Config
import mbv_config
import argparse
def train():
parser = argparse.ArgumentParser(description='PyTorch MBV Training')
parser.add_argument('--lr', default=0.00001, type=float, help='learning rate')
parser.add_argument('--wd', default=5e-3, type=float, help='weight decay')
parser.add_argument('--architecture', default="resnext", type=str, help='architecture - resnext | densenet')
parser.add_argument('--machine', default=7, type=int, help='define gpu no.')
parser.add_argument('--patience', default=10, type=int, help='patience for learning rate change')
parser.add_argument('--batch_size', default=16, type=int, help='batch size')
parser.add_argument('--input_size', default=225, type=int, help='input image')
parser.add_argument('--epochs', default=100, type=int, help='epochs')
parser.add_argument('--description', default="fine_tune", type=str, help='experiment name | description')
parser.add_argument('--data_path', default="fine_tune", type=str, help=' path for data of specifc fold - Fold 0|1|2|3|4 ')
args = parser.parse_args()
batch_size = args.batch_size
image_size = args.input_size
LR = args.lr
patience = args.patience
weight_decay = args.wd
fold_root = args.data_path
device = torch.device(f"cuda:{args.machine}")
epochs = args.epochs
experiment_description = args.description
architecture = args.architecture
raw_train_transform = transforms.Compose([
transforms.RandomCrop((image_size,image_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485], std=[0.229])
])
ref_train_transform = transforms.Compose([
transforms.RandomCrop((image_size,image_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485], std=[0.229])
])
val_transform = transforms.Compose([
transforms.Resize((image_size*4,image_size*4)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485], std=[0.229])
])
train_raw_image_file = fold_root + 'X_raw_train.npy'
train_ref_image_file = fold_root + 'X_ref_train.npy'
train_label_file = fold_root + 'Y_train.npy'
val_raw_image_file = fold_root + 'X_raw_val.npy'
val_ref_image_file = fold_root + 'X_ref_val.npy'
val_label_file = fold_root + 'Y_val.npy'
train_dataset = MBV_Dataset(raw_train_file_path = train_raw_image_file , reflectance_train_file_path=train_ref_image_file, label_file_path=train_label_file, transform= [raw_train_transform, ref_train_transform])
train_loader = DataLoader(train_dataset, batch_size = batch_size, shuffle=True, sampler=None) #, sampler = BalancedBatchSampler(train_dataset)
val_dataset = MBV_Dataset(raw_train_file_path = val_raw_image_file , reflectance_train_file_path=val_ref_image_file, label_file_path=val_label_file, transform= [val_transform])
val_loader = DataLoader(val_dataset, batch_size = batch_size, shuffle=False, sampler=None)
if architecture == "resnext":
downstream_task_model = Resnext_Model( pretrained=True)
elif architecture == "densenet":
downstream_task_model = Densenet_Model(pretrained=True)
else:
raise error ("invalid architecture name")
downstream_task_model = downstream_task_model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(downstream_task_model.parameters(), lr=LR, weight_decay= weight_decay)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.1 ,patience=patience, min_lr= 5e-3)
writer = SummaryWriter(log_dir=mbv_config.tensorboard_path+experiment_description)
train_util = Train_Util(experiment_description = experiment_description,image_type=mbv_config.image_both, epochs = epochs, model=downstream_task_model, device=device, train_loader=train_loader, val_loader=val_loader, optimizer=optimizer, criterion=criterion, batch_size=batch_size,scheduler=scheduler, writer=writer)
train_util.train_and_evaluate()
if __name__ == "__main__":
train()
| 1.960938
| 2
|
therma.py
|
tomacorp/thermapythia
| 3
|
12781619
|
#!/Users/toma/python278i/bin/python
# -*- coding: utf-8 -*-
#
import MainWindow
import os
import platform
import sys
from PyQt4.QtGui import (QApplication, QIcon)
__version__ = "1.0.0"
def main():
app = QApplication(sys.argv)
app.setOrganizationName("tomacorp")
app.setOrganizationDomain("tomacorp.com")
app.setWindowIcon(QIcon(":/icon.png"))
w = MainWindow.Window()
w.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| 2.375
| 2
|
python/scripts/waterfall_viewer/pyPeekTCP_1pol.py
|
eschnett/kotekan
| 19
|
12781620
|
<filename>python/scripts/waterfall_viewer/pyPeekTCP_1pol.py
# === Start Python 2/3 compatibility
from __future__ import absolute_import, division, print_function, unicode_literals
from future.builtins import * # noqa pylint: disable=W0401, W0614
from future.builtins.disabled import * # noqa pylint: disable=W0401, W0614
# === End Python 2/3 compatibility
from future import standard_library
standard_library.install_aliases()
import time
import threading
import socket
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import matplotlib.dates as md
import datetime
import struct
np.seterr(divide="ignore", invalid="ignore")
# struct IntensityHeader {
# int packet_length; // - packet length
# int header_length; // - header length
# int samples_per_packet; // - number of samples in packet (or dimensions, n_freq x n_time x n_stream?)
# int sample_type; // - data type of samples in packet
# double raw_cadence; // - raw sample cadence
# int num_freqs; // - freq list / map
# int samples_summed; // - samples summed for each datum
# uint handshake_idx; // - frame idx at handshake
# double handshake_utc; // - UTC time at handshake
# char stokes_type; // - description of stream (e.g. V / H pol, Stokes-I / Q / U / V)
# // -8 -7 -6 -5 -4 -3 -2 -1 1 2 3 4
# // YX XY YY XX LR RL LL RR I Q U V
# };
header_fmt = "=iiiidiiiId"
stokes_lookup = ["YX", "XY", "YY", "XX", "LR", "RL", "LL", "RR", "I", "Q", "U", "V"]
TCP_IP = "0.0.0.0"
TCP_PORT = 23401
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((TCP_IP, TCP_PORT))
sock.listen(1)
def updatefig(*args):
global waterfall, times, medsub, colorscale, p, ax
global spec_baseline
global pkt_elems
tmin = md.date2num(datetime.datetime.fromtimestamp(np.amin(times)))
tmax = md.date2num(datetime.datetime.fromtimestamp(np.amax(times)))
if medsub:
p[0].set_data(
waterfall[:, :, 0] - np.nanmedian(waterfall[:, :, 0], axis=0)[np.newaxis, :]
)
else:
p[0].set_data(waterfall[:, :, 0] - spec_baseline[np.newaxis, :])
p[0].set_extent([freqlist[0, 0], freqlist[-1, -1], tmin, tmax])
p[0].set_clim(vmin=colorscale[0], vmax=colorscale[1])
d = np.nanmean(waterfall[:, :, 0] - spec_baseline[np.newaxis, :], axis=1)
p[1].set_data(d, times)
ax[0, 1].set_xlim([np.nanmin(d), np.nanmax(d)])
ax[0, 1].set_ylim([np.amin(times), np.amax(times)])
d = np.nanmean(waterfall[:, :, 0], axis=0)
p[2].set_data(
freqlist.reshape(plot_freqs, -1).mean(axis=1), d - spec_baseline[np.newaxis, :]
)
ax[1, 0].set_ylim(colorscale)
return (p,)
def receive(connection, length):
chunks = []
bytes_recd = 0
while bytes_recd < length:
chunk = connection.recv(min(length - bytes_recd, 2048))
if chunk == b"":
raise RuntimeError("socket connection broken")
chunks.append(chunk)
bytes_recd = bytes_recd + len(chunk)
return b"".join(chunks)
connection, client_address = sock.accept()
packed_header = receive(connection, 48)
print(len(packed_header), packed_header)
tcp_header = struct.unpack(header_fmt, packed_header)
pkt_length = tcp_header[0] # packet_length
pkt_header = tcp_header[1] # header_length
pkt_samples = tcp_header[2] # samples_per_packet
pkt_dtype = tcp_header[3] # sample_type (?in bytes?)
pkt_raw_cad = tcp_header[4] # raw_cadence
pkt_freqs = tcp_header[5] # num_freqs
pkt_elems = tcp_header[6] # num_elems
pkt_int_len = tcp_header[7] # samples_summed
pkt_idx0 = tcp_header[8] # handshake_idx
pkt_utc0 = tcp_header[9] # handshake_utc
print(tcp_header)
sec_per_pkt_frame = pkt_raw_cad * pkt_int_len
info_header = receive(connection, pkt_freqs * 4 * 2 + pkt_elems * 1)
freqlist = np.fromstring(info_header[: pkt_freqs * 4 * 2], dtype=np.float32).reshape(
-1, 2
) # .mean(axis=1)
freqlist = freqlist / 1e6
elemlist = np.fromstring(info_header[pkt_freqs * 4 * 2 :], dtype=np.int8)
print(freqlist, elemlist)
plot_freqs = 256
plot_times = 256
total_integration = 64 * 8
if pkt_int_len > total_integration:
print("Pre-integrated to longer than desired time!")
print("{} vs {}".format(pkt_int_len, total_integration))
print("Resetting integration length to {}".format(pkt_int_len))
total_integration = pkt_int_len
local_integration = total_integration // pkt_int_len
waterfall = np.zeros((plot_times, plot_freqs, pkt_elems), dtype=np.float32) + np.nan
times = np.zeros(plot_times)
spec_baseline = np.ones(plot_freqs)
def data_listener():
global connection, sock
global waterfall
global times, total_integration, pkt_idx0
last_idx = pkt_idx0
data_pkt_frame_idx = 0
data_pkt_samples_summed = 1
idx = 0
while True:
try:
d = np.zeros([pkt_freqs, pkt_elems])
n = np.zeros([pkt_freqs, pkt_elems])
t = np.zeros(plot_times)
for _ in np.arange(local_integration * pkt_elems):
data = receive(connection, pkt_length + pkt_header)
if len(data) != pkt_length + pkt_header:
print("Lost Connection!")
connection.close()
return
(
data_pkt_frame_idx,
data_pkt_elem_idx,
data_pkt_samples_summed,
) = struct.unpack("III", data[:pkt_header])
d[:, data_pkt_elem_idx] += (
np.fromstring(data[pkt_header:], dtype=np.uint32) * 1.0
)
n[:, data_pkt_elem_idx] += data_pkt_samples_summed * 1.0
roll_idx = (data_pkt_frame_idx - last_idx) // local_integration
times = np.roll(times, roll_idx)
times[0] = sec_per_pkt_frame * (data_pkt_frame_idx - pkt_idx0) + pkt_utc0
waterfall = np.roll(waterfall, roll_idx, axis=0)
waterfall[0, :, :] = 10 * np.log10(
(d / n).reshape(-1, pkt_freqs // plot_freqs, pkt_elems).mean(axis=1)
)
if np.mean(n) != total_integration:
print(np.mean(n), np.std(n))
last_idx = data_pkt_frame_idx
# except socket.error, exc:
except:
connection, client_address = sock.accept()
packed_header = receive(connection, 48)
info_header = receive(connection, pkt_freqs * 4 * 2 + pkt_elems * 1)
print("Reconnected!")
thread = threading.Thread(target=data_listener)
thread.daemon = True
thread.start()
time.sleep(1)
f, ax = plt.subplots(
2, 2, gridspec_kw={"height_ratios": [4, 1], "width_ratios": [4, 1]}
)
f.subplots_adjust(right=0.8, top=0.95, wspace=0.0, hspace=0.0)
ax[-1, -1].axis("off")
plt.ioff()
p = []
tmin = md.date2num(
datetime.datetime.fromtimestamp(
pkt_utc0 - plot_times * local_integration * sec_per_pkt_frame
)
)
tmax = md.date2num(datetime.datetime.fromtimestamp(pkt_utc0))
times = pkt_utc0 - np.arange(plot_times) * local_integration * sec_per_pkt_frame
date_format = md.DateFormatter("%H:%M:%S")
medsub = False
med_range = [-1, 1]
full_range = [40, 60]
colorscale = med_range if medsub else full_range
oc = colorscale
p.append(
ax[0][0].imshow(
waterfall[:, :, 0],
aspect="auto",
animated=True,
origin="upper",
interpolation="nearest",
cmap="gray",
vmin=colorscale[0],
vmax=colorscale[1],
extent=[freqlist[0, 0], freqlist[-1, -1], tmin, tmax],
)
)
ax[0][0].set_yticklabels([])
ax[0][0].yaxis_date()
ax[0][0].set_title(stokes_lookup[elemlist[0] + 8])
ax[0][0].set_ylabel("Local Time")
ax[0][0].yaxis_date()
ax[0][0].xaxis.set_visible(False)
ax[0][0].yaxis.set_major_formatter(date_format)
ax[0][1].yaxis.set_visible(False)
d = np.nanmean(waterfall[:, :, 0], axis=1)
ax[0][1].set_xlim(np.amin(d), np.amax(d))
ax[0][1].set_ylim([tmin, tmax])
(im,) = ax[0][1].plot(d, times, ".")
ax[0][1].set_xlabel("Power (dB, arb)")
p.append(im)
ax[1][0].set_xlim(freqlist[0, 0], freqlist[-1, -1])
ax[1][0].set_ylim(colorscale)
(im,) = ax[1][0].plot(
freqlist.reshape(plot_freqs, -1).mean(axis=1),
np.nanmean(waterfall[:, :, 0], axis=0),
".",
)
p.append(im)
ax[1][0].set_xlabel("Frequency (MHz)")
ax[1][0].set_ylabel("Power (dB, arb)")
cbar_ax = f.add_axes([0.85, 0.15, 0.05, 0.8])
c = f.colorbar(p[0], cax=cbar_ax)
c.set_label("Power (dB, arbitrary)")
ani = animation.FuncAnimation(f, updatefig, frames=100, interval=100)
f.show()
# UI
from matplotlib.widgets import Button
import pickle
def save(event):
global check, waterfall, times, freqlist
freqs = freqlist.reshape(plot_freqs, -1).mean(axis=1)
data = 10 ** (waterfall / 10)
fn = time.strftime("MP_%Y%m%d-%H%M%S.pkl")
pickle.dump({"freqs": freqs, "times": times, "data": data}, open(fn, "wb"))
print("Data Saved to {}".format(fn))
rax = plt.axes([0.82, 0.03, 0.13, 0.04])
save_btn = Button(rax, "Save Data")
save_btn.on_clicked(save)
def set_spec_baseline(event):
global spec_baseline, waterfall, colorscale, oc
spec_baseline = np.nanmean(waterfall[:, :, 0], axis=0)
colorscale = [-1, 1]
print("Set a new spectral baseline")
rax = plt.axes([0.7, 0.07, 0.13, 0.04])
sb_btn = Button(rax, "Renorm Spec")
sb_btn.on_clicked(set_spec_baseline)
def reset_spec_baseline(event):
global spec_baseline, plot_freqs, colorscale, oc
spec_baseline = np.ones(plot_freqs)
colorscale = oc
print("Removed spectral baseline")
rax = plt.axes([0.82, 0.07, 0.13, 0.04])
rsb_btn = Button(rax, "Un-Renorm")
rsb_btn.on_clicked(reset_spec_baseline)
| 1.6875
| 2
|
brands/tests/test_views.py
|
netvigator/auctions
| 0
|
12781621
|
<filename>brands/tests/test_views.py
# import inspect
import logging
from django.urls import reverse
from psycopg2.errors import UniqueViolation
from core.tests.base import BaseUserWebTestCase
from core.utils import getExceptionMessageFromResponse
from ..models import Brand
from ..views import BrandCreateView
from .base import BrandModelWebTestBase
# Create your tests here.
class BrandViewsTests( BaseUserWebTestCase ):
"""Brand views tests."""
def test_no_brands_yet(self):
#
"""
If no brands exist, an appropriate message is displayed.
"""
#
response = self.client.get(reverse('brands:index'))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['brand_list'], [])
self.assertContains(response, "No brands are available.")
#
# print( 'ran %s' % inspect.getframeinfo( inspect.currentframe() ).function )
def test_got_brands(self):
#
"""
If brands exist, an appropriate message is displayed.
"""
logging.disable(logging.CRITICAL)
#
sBrand = "Proctor & Gamble"
oBrand = Brand( cTitle= sBrand, iUser = self.user1 )
oBrand.save()
sBrand = "Cadillac"
oBrand = Brand( cTitle= sBrand, iUser = self.user1 )
oBrand.save()
sLeverID = str( oBrand.id )
response = self.client.get(reverse('brands:index'))
#response = self.client.get('/brands/')
#pprint( 'printing response:')
#pprint( response )
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context['brand_list'],
['<Brand: Cadillac>', '<Brand: Proctor & Gamble>'] )
self.assertContains(response, "Cadillac")
#
response = self.client.get(
reverse( 'brands:detail', kwargs={ 'pk': sLeverID } ) )
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Cadillac")
self.client.logout()
self.client.login(username='username2', password='<PASSWORD>')
# print( 'logged out of user1, logged into user 2')
response = self.client.get(
reverse( 'brands:edit', kwargs={ 'pk': sLeverID } ) )
self.assertEqual(response.status_code, 403) # forbidden
self.assertEqual(
getExceptionMessageFromResponse( response ),
"Permission Denied -- that's not your record!" )
self.client.logout()
self.client.login(username='username3', password='<PASSWORD>')
# print( 'logged out of user2, logged into user 3')
response = self.client.get(
reverse( 'brands:detail', kwargs={ 'pk': sLeverID } ) )
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Cadillac")
"""
Not logged in, cannot see form, direct to login page.
"""
self.client.logout()
# print( 'logged out of user3, did not log back in ')
response = self.client.get(reverse('brands:index'))
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, '/accounts/login/?next=/brands/')
#pprint( 'printing response:')
#pprint( response )
#
# print( 'ran %s' % inspect.getframeinfo( inspect.currentframe() ).function )
#
logging.disable(logging.NOTSET)
class BrandViewsHitButtons( BrandModelWebTestBase ):
"""
Test Save and Cancel
"""
def test_get(self):
"""
Test GET requests
"""
request = self.factory.get(reverse('brands:add'))
request.user = self.user1
#
response = BrandCreateView.as_view()(request)
self.assertEqual(response.status_code, 200)
#print( 'response.context_data has form and view, view keys:' )
#for k in response.context_data['view'].__dict__:
#print( k )
#self.assertEqual(response.context_data['iUser'], self.user1)
self.assertEqual(response.context_data['view'].__dict__['request'], request)
#
# print( 'ran %s' % inspect.getframeinfo( inspect.currentframe() ).function )
# @patch('brands.models.Brand.save', MagicMock(name="save"))
def test_add_post(self):
"""
Test post requests
"""
data = dict(
cTitle = "Proctor & Gamble",
iUser = self.user1 )
# Create the request
response = self.client.post( reverse('brands:add'), data )
# import pdb; pdb.set_trace()
self.assertEqual( response.status_code, 200 )
#oBrand = Brand.objects.get( cTitle = "Great Widget" )
#self.assertEqual( oBrand, self.brands )
#request = self.factory.get(reverse('brands:add'))
#request.user = self.user1
#response = self.client.post( request, data )
#print( 'response.status_code:', response.status_code )
#
# print( 'ran %s' % inspect.getframeinfo( inspect.currentframe() ).function )
def test_add_hit_cancel(self):
#
"""
Hit cancel when adding
"""
data = dict(
cTitle = "Proctor & Gamble",
iUser = self.user1,
Cancel = True )
# Create the request
response = self.client.post( reverse('brands:add'), data )
#print( 'response.status_code:', response.status_code )
#print( 'response.__dict__:' )
#pprint( response.__dict__ )
self.assertEqual( response.status_code, 200 )
# self.assertRedirects( response, reverse( 'brands:index' ) )
#
# print( 'ran %s' % inspect.getframeinfo( inspect.currentframe() ).function )
def test_search_create_view(self):
#
request = self.factory.get(reverse('brands:add'))
request.user = self.user1
self.object = self.oBrand
#request.POST._mutable = True
#request.POST['cancel'] = True
#
response = BrandCreateView.as_view()(request)
#
# print( 'type( request.POST ):', type( request.POST ) )
#
self.assertEqual(response.status_code, 200 )
#print( 'response.template_name:' )
#print( response.template_name ) ['brands/add.html']
#
# print( 'ran %s' % inspect.getframeinfo( inspect.currentframe() ).function )
# @patch('brands.models.Brand.save', MagicMock(name="save"))
def test_edit_post(self):
"""
Test post requests
"""
# except UniqueViolation: # huh? hitting an error
# see
# https://stackoverflow.com/questions/34695323/django-db-utils-integrityerror-duplicate-key-value-violates-unique-constraint/59401538#59401538
#
iOneMore = Brand.objects.last().id + 1
#
oBrand = Brand( id = iOneMore,
cTitle = "Proctor & Gamble",
iUser = self.user1 )
oBrand.save()
#
data = dict(
cTitle = "Colgate-Palmolive",
iUser = self.user1 )
# Create the request
response = self.client.post(
reverse('brands:edit', kwargs={'pk': oBrand.id} ), data )
# import pdb; pdb.set_trace()
self.assertEqual( response.status_code, 200 )
#
#oEditedBrand = Brand.objects.get( id = oBrand.id )
#
#print( oEditedBrand )
# print( 'ran %s' % inspect.getframeinfo( inspect.currentframe() ).function )
def test_edit_hit_cancel(self):
#
"""
Hit cancel when adding
"""
# except UniqueViolation: # huh? hitting an error
# see
# https://stackoverflow.com/questions/34695323/django-db-utils-integrityerror-duplicate-key-value-violates-unique-constraint/59401538#59401538
#
iOneMore = Brand.objects.last().id + 1
#
oBrand = Brand( id = iOneMore,
cTitle = "Proctor & Gamble",
iUser = self.user1 )
#
oBrand.save()
#
data = dict(
cTitle = "Colgate-Palmolive",
iUser = self.user1,
Cancel = True )
# Create the request
response = self.client.post(
reverse('brands:edit', kwargs={'pk': oBrand.id} ), data )
#print( 'response.status_code:', response.status_code )
#print( 'response.__dict__:' )
#pprint( response.__dict__ )
self.assertEqual( response.status_code, 200 )
# self.assertRedirects( response, reverse( 'brands:index' ) )
#
# print( 'ran %s' % inspect.getframeinfo( inspect.currentframe() ).function )
| 2.359375
| 2
|
LeetCode/Trees and Graphs/117. Populating Next Right Pointers in Each Node II/solution.py
|
Ceruleanacg/Crack-Interview
| 17
|
12781622
|
<reponame>Ceruleanacg/Crack-Interview
class TreeLinkNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
self.next = None
class Solution:
# @param root, a tree link node
# @return nothing
def connect(self, root):
if not root:
return
level_nodes = [root]
while level_nodes:
nodes = []
for node in level_nodes:
if node.left:
nodes.append(node.left)
if node.right:
nodes.append(node.right)
for index in range(len(nodes) - 1):
nodes[index].next = nodes[index + 1]
if nodes:
nodes[-1].next = None
level_nodes = nodes
| 3.484375
| 3
|
001 - Delete-Clusters.py
|
georgeeks/mslearn-dp100
| 0
|
12781623
|
<reponame>georgeeks/mslearn-dp100<gh_stars>0
# tutorial/99-Delete-clusters.py
from azureml.core import Workspace
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
import os
#path = os.path.join(os.getcwd(), 'DP100\.azureml')
ws = Workspace.from_config() # This automatically looks for a directory .azureml
cpu_cluster_name = "cpu-cluster"
cpu_cluster = ComputeTarget(workspace=ws, name=cpu_cluster_name)
# Delete Cluster
cpu_cluster.delete()
# Get status of Cluster
status = cpu_cluster.get_status()
print(status)
| 2.515625
| 3
|
wfpr_modules/github.com/icgc-argo-workflows/data-processing-utility-tools/payload-gen-rna-alignment@0.1.3/main.py
|
icgc-argo-workflows/rna-seq-alignment
| 0
|
12781624
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Copyright (C) 2021, Ontario Institute for Cancer Research
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Authors:
<NAME>
"""
import os
import sys
import json
import argparse
import hashlib
import uuid
import subprocess
import copy
from datetime import date
import re
import tarfile
workflow_full_name = {
'rna-seq-alignment': 'RNA Seq Alignment'
}
analysis_tools = {
'star': 'STAR',
'hisat2': 'HiSAT2'
}
data_type_mapping = {
#file_type: [dataCategory, dataType, [data_subtypes], [star analysis_tools], [hisat2 analysis_tools]]
'genome_aln': ['Sequencing Reads', 'Aligned Reads', ['Genome Alignment'], ['STAR'], ['HiSAT2']],
'transcriptome_aln': ['Sequencing Reads', 'Aligned Reads', ['Transcriptome Alignment'], ['STAR'], ['HiSAT2']],
'chimeric_aln': ['Sequencing Reads', 'Aligned Reads', ['Chimeric Alignment'], ['STAR'], ['HiSAT2']],
'splice_junctions': ['Transcriptome Profiling', 'Splice Junctions', [None], ['STAR'], ['HiSAT2']],
'fastqc': ['Quality Control Metrics', 'Sequencing QC', ['Read Group Metrics'], ['FastQC'], ['FastQC']],
'collectrnaseqmetrics': ['Quality Control Metrics', 'Aligned Reads QC', ['Alignment Metrics'], ['Picard:CollectRnaSeqMetrics'], ['Picard:CollectRnaSeqMetrics']],
'duplicates_metrics': ['Quality Control Metrics', 'Aligned Reads QC', ['Duplicates Metrics'], ['biobambam2:bammarkduplicates2'], ['biobambam2:bammarkduplicates2']],
'supplement': ['Supplement', 'Running Logs', [None], ['STAR'], ['HiSAT2']]
}
def calculate_size(file_path):
return os.stat(file_path).st_size
def calculate_md5(file_path):
md5 = hashlib.md5()
with open(file_path, 'rb') as f:
for chunk in iter(lambda: f.read(1024 * 1024), b''):
md5.update(chunk)
return md5.hexdigest()
def insert_filename_friendly_rg_id(metadata):
filename_friendly_rg_ids = set()
# let's loop it two times, first for the rg id actually doesn't need to convert
for rg in metadata['read_groups']:
submitter_read_group_id = rg['submitter_read_group_id']
filename_friendly_rg_id = "".join([ c if re.match(r"[a-zA-Z0-9\.\-_]", c) else "_" for c in submitter_read_group_id ])
if filename_friendly_rg_id == submitter_read_group_id: # no change, safe to use
rg['filename_friendly_rg_id'] = filename_friendly_rg_id
filename_friendly_rg_ids.add(filename_friendly_rg_id)
for rg in metadata['read_groups']:
submitter_read_group_id = rg['submitter_read_group_id']
filename_friendly_rg_id = "".join([ c if re.match(r"[a-zA-Z0-9\.\-_]", c) else "_" for c in submitter_read_group_id ])
if filename_friendly_rg_id == submitter_read_group_id: # no change, already covered
continue
if filename_friendly_rg_id in filename_friendly_rg_ids: # the converted new friendly ID conflicts with existing one
for i in range(len(metadata['read_groups'])):
if not '%s_%s' % (filename_friendly_rg_id, i+1) in filename_friendly_rg_ids:
filename_friendly_rg_id += '_%s' % str(i+1)
break
rg['filename_friendly_rg_id'] = filename_friendly_rg_id
filename_friendly_rg_ids.add(filename_friendly_rg_id)
def get_rg_id_from_ubam_qc(tar, metadata):
tar_basename = os.path.basename(tar) # TEST-PR.DO250122.SA610149.D0RE2_1_.6cae87bf9f05cdfaa4a26f2da625f3b2.lane.bam.fastqc.tgz
md5sum_from_filename = tar_basename.split('.')[-5]
if not re.match(r'^[a-f0-9]{32}$', md5sum_from_filename):
sys.exit('Error: ubam naming not expected %s' % tar_basename)
for rg in metadata.get("read_groups"):
rg_id_in_bam = rg.get("read_group_id_in_bam") if rg.get("read_group_id_in_bam") else rg.get("submitter_read_group_id")
seq_file_name = rg.get("file_r1")
bam_name = seq_file_name if seq_file_name.endswith('.bam') else ''
md5sum_from_metadata = hashlib.md5(("%s %s" % (bam_name, rg_id_in_bam)).encode('utf-8')).hexdigest()
if md5sum_from_metadata == md5sum_from_filename:
return rg.get("filename_friendly_rg_id"), rg.get("submitter_read_group_id")
# up to this point no match found, then something wrong
sys.exit('Error: unable to match ubam qc metric tar "%s" to read group id' % tar_basename)
def get_dupmetrics(file_to_upload):
library = []
with tarfile.open(file_to_upload, 'r') as tar:
for member in tar.getmembers():
if member.name.endswith('.duplicates_metrics.txt'):
f = tar.extractfile(member)
cols_name = []
for r in f:
row = r.decode('utf-8')
if row.startswith('LIBRARY'):
cols_name = row.strip().split('\t')
continue
if cols_name:
if not row.strip(): break
metric = {}
cols = row.strip().split('\t')
for n, c in zip(cols_name, cols):
if n == "LIBRARY": metric.update({n: c})
elif '.' in c or 'e' in c: metric.update({n: float(c)})
else: metric.update({n: int(c)})
library.append(metric)
return library
def get_files_info(file_to_upload, date_str, seq_experiment_analysis_dict, aligner=None):
file_info = {
'fileSize': calculate_size(file_to_upload),
'fileMd5sum': calculate_md5(file_to_upload),
'fileAccess': 'controlled',
'info': {}
}
experimental_strategy = seq_experiment_analysis_dict['experiment']['experimental_strategy'].lower()
fname_sample_part = seq_experiment_analysis_dict['samples'][0]['sampleId']
aligner_or_rgid = aligner.lower() if aligner else None
submitter_rg_id = None
if re.match(r'^genome.merged.+?(cram|cram\.crai|bam|bam\.bai)$', file_to_upload):
file_type = 'genome_aln'
elif re.match(r'^transcriptome.merged.+?(cram|cram\.crai|bam|bam\.bai)$', file_to_upload):
file_type = 'transcriptome_aln'
elif re.match(r'^chimeric.merged.+?(cram|cram\.crai|bam|bam\.bai)$', file_to_upload):
file_type = 'chimeric_aln'
elif re.match(r'.+?\.fastqc\.tgz$', file_to_upload):
file_type = 'fastqc'
aligner_or_rgid, submitter_rg_id = get_rg_id_from_ubam_qc(file_to_upload, seq_experiment_analysis_dict)
elif re.match(r'.+?\.collectrnaseqmetrics\.tgz$', file_to_upload):
file_type = 'collectrnaseqmetrics'
elif re.match(r'.+?\.duplicates_metrics\.tgz$', file_to_upload):
file_type = 'duplicates_metrics'
elif re.match(r'.+?_SJ\.out\.tab$', file_to_upload):
file_type = 'splice_junctions'
elif re.match(r'.+?splicesites\.txt$', file_to_upload):
file_type = 'splice_junctions'
elif re.match(r'.+?supplement\.tgz$', file_to_upload) or re.match(r'.+?supplement\.tar.gz$', file_to_upload):
file_type = 'supplement'
else:
sys.exit('Error: unknown file type "%s"' % file_to_upload)
if file_type in ['fastqc', 'collectrnaseqmetrics', 'duplicates_metrics', 'aln_metrics', 'supplement']:
file_ext = 'tgz'
elif file_type in ['genome_aln', 'transcriptome_aln', 'chimeric_aln']:
if file_to_upload.endswith('.bam'):
file_ext = 'bam'
elif file_to_upload.endswith('.bam.bai'):
file_ext = 'bam.bai'
elif file_to_upload.endswith('.cram'):
file_ext = 'cram'
elif file_to_upload.endswith('.cram.crai'):
file_ext = 'cram.crai'
else:
sys.exit('Error: unknown aligned seq extention: %s' % file_to_upload)
elif file_type in ['splice_junctions']:
file_ext = 'txt'
else:
sys.exit('Error: unknown file type "%s"' % file_type)
# file naming patterns:
# pattern: <argo_study_id>.<argo_donor_id>.<argo_sample_id>.[rna-seq].<date>.<aligner|rg_id>.<file_type>.<file_ext>
# example: TEST-PR.DO250183.SA610229.rna-seq.20200319.star.genome_aln.cram
new_fname = '.'.join([
seq_experiment_analysis_dict['studyId'],
seq_experiment_analysis_dict['samples'][0]['donor']['donorId'],
fname_sample_part,
experimental_strategy,
date_str,
aligner_or_rgid,
file_type,
file_ext
])
file_info['fileName'] = new_fname
file_info['fileType'] = new_fname.split('.')[-1].upper()
file_info['info'] = {
'data_category': data_type_mapping[file_type][0],
'data_subtypes': data_type_mapping[file_type][2]
}
if not aligner:
file_info['info']['analysis_tools'] = ["FastQC"]
elif aligner.lower() == 'star':
file_info['info']['analysis_tools'] = data_type_mapping[file_type][3]
elif aligner.lower() == 'hisat2':
file_info['info']['analysis_tools'] = data_type_mapping[file_type][4]
if new_fname.endswith('.bai') or new_fname.endswith('.crai'):
file_info['dataType'] = 'Aligned Reads Index'
else:
file_info['dataType'] = data_type_mapping[file_type][1]
# extract info into payload
extra_info = {}
if new_fname.endswith('.tgz'):
tar = tarfile.open(file_to_upload)
for member in tar.getmembers():
if member.name.endswith('qc_metrics.json') or member.name.endswith('.extra_info.json'):
f = tar.extractfile(member)
extra_info = json.load(f)
else:
if not file_info['info'].get('files_in_tgz'): file_info['info']['files_in_tgz'] = []
file_info['info']['files_in_tgz'].append(os.path.basename(member.name))
# retrieve duplicates metrics from the file
if file_info['info']['data_subtypes'][0] == 'Duplicates Metrics':
extra_info['metrics'] = {
'libraries': get_dupmetrics(file_to_upload)
}
if file_info['info']['data_subtypes'][0] == 'Read Group Metrics':
extra_info['metrics'].update({'read_group_id': submitter_rg_id})
if extra_info:
extra_info.pop('tool', None)
file_info['info'].update(extra_info)
new_dir = 'out'
try:
os.mkdir(new_dir)
except FileExistsError:
pass
dst = os.path.join(os.getcwd(), new_dir, new_fname)
os.symlink(os.path.abspath(file_to_upload), dst)
return file_info
def get_sample_info(sample_list):
samples = copy.deepcopy(sample_list)
for sample in samples:
for item in ['info', 'sampleId', 'specimenId', 'donorId', 'studyId']:
sample.pop(item, None)
sample['specimen'].pop(item, None)
sample['donor'].pop(item, None)
return samples
def main():
"""
Python implementation of tool: payload-gen-rna-alignment
"""
parser = argparse.ArgumentParser(description='Tool: payload-gen-rna-alignment')
parser.add_argument("-f", "--files_to_upload", dest="files_to_upload", type=str, required=True,
nargs="+", help="Files to upload")
parser.add_argument("-a", "--seq_experiment_analysis", dest="seq_experiment_analysis", required=True,
help="Input analysis for sequencing experiment", type=str)
parser.add_argument("-t", "--analysis_type", dest="analysis_type", required=True, help="Specify analysis_type")
parser.add_argument("-l", "--aligner", dest="aligner", default=None, help="Provide RNA-Seq aligner if files_to_upload are generated from alignment results. Default=None")
parser.add_argument("-g", "--genome_annotation", dest="genome_annotation", default="GENCODE v38", help="RNA-Seq alignment genome annotation")
parser.add_argument("-b", "--genome_build", dest="genome_build", default="GRCh38_hla_decoy_ebv", help="RNA-Seq alignment genome build")
parser.add_argument("-w", "--wf_name", dest="wf_name", required=True, help="Workflow name")
parser.add_argument("-v", "--wf_version", dest="wf_version", required=True, help="Workflow version")
parser.add_argument("-r", "--wf_run", dest="wf_run", required=True, help="Workflow run ID")
parser.add_argument("-s", "--wf_session", dest="wf_session", required=True, help="Workflow session ID")
args = parser.parse_args()
with open(args.seq_experiment_analysis, 'r') as f:
seq_experiment_analysis_dict = json.load(f)
payload = {
'analysisType': {
'name': args.analysis_type
},
'studyId': seq_experiment_analysis_dict.get('studyId'),
'workflow': {
'workflow_name': workflow_full_name.get(args.wf_name, args.wf_name),
'workflow_version': args.wf_version,
'genome_build': args.genome_build,
'genome_annotation': args.genome_annotation,
'run_id': args.wf_run,
'session_id': args.wf_session,
'inputs': [
{
'analysis_type': 'sequencing_experiment',
'input_analysis_id': seq_experiment_analysis_dict.get('analysisId')
}
]
},
'files': [],
'samples': get_sample_info(seq_experiment_analysis_dict.get('samples')),
'experiment': seq_experiment_analysis_dict.get('experiment')
}
if "sequencing_alignment" in args.analysis_type:
payload['read_group_count'] = seq_experiment_analysis_dict.get('read_group_count')
payload['read_groups'] = copy.deepcopy(seq_experiment_analysis_dict.get('read_groups'))
# pass `info` dict from seq_experiment payload to new payload
if 'info' in seq_experiment_analysis_dict and isinstance(seq_experiment_analysis_dict['info'], dict):
payload['info'] = seq_experiment_analysis_dict['info']
if 'library_strategy' in payload['experiment']:
experimental_strategy = payload['experiment'].pop('library_strategy')
payload['experiment']['experimental_strategy'] = experimental_strategy
insert_filename_friendly_rg_id(seq_experiment_analysis_dict)
# get file of the payload
date_str = date.today().strftime("%Y%m%d")
for f in args.files_to_upload:
file_info = get_files_info(f, date_str, seq_experiment_analysis_dict, args.aligner)
payload['files'].append(file_info)
with open("%s.%s.payload.json" % (str(uuid.uuid4()), args.analysis_type), 'w') as f:
f.write(json.dumps(payload, indent=2))
if __name__ == "__main__":
main()
| 2.03125
| 2
|
tinier_tim/rebin_psf.py
|
jiwoncpark/tinier-tim
| 0
|
12781625
|
# -*- coding: utf-8 -*-
"""Rebinning the PSF
This script rebins the given PSF and stores the rebinned PSFs in the specified directory.
"""
import os
import argparse
import rebinning_utils
def parse_args():
"""Parse command-line arguments
"""
parser = argparse.ArgumentParser()
parser.add_argument('psf_file_path', help='path to the fits file of the PSF to be rebinned')
parser.add_argument('--factor', default=4, dest='factor', type=int,
help='size of rebinning kernel in number of pixels')
parser.add_argument('--save_dir', default='rebinned_dir', dest='rebinned_dir', type=str,
help='directory in which the rebinned (non-drizzled) PSFs will be stored. If it does not exist, it will be created.')
args = parser.parse_args()
return args
def main():
args = parse_args()
input_psf = rebinning_utils.load_psf_map(args.psf_file_path)
if not os.path.exists(args.rebinned_dir):
os.makedirs(args.rebinned_dir)
_ = rebinning_utils.rebin_psf(input_psf, factor=args.factor, save_dir=args.rebinned_dir)
if __name__ == '__main__':
main()
| 3.21875
| 3
|
gen_syscalls.py
|
HexRabbit/syscall-table
| 3
|
12781626
|
<filename>gen_syscalls.py
import re, json, ctags
from ctags import CTags, TagEntry
prefix = './linux/'
# file generated by ctags --fields=afmikKlnsStz --c-kinds=+pc -R
tags = CTags(prefix+'tags')
entry = TagEntry()
syscall_tbl = open(prefix+'arch/x86/entry/syscalls/syscall_64.tbl', 'r')
syscall_out = open('www/syscall.json', 'w+')
syscall_func = []
def parse_line(entry):
file_name = entry['file']
line_num = int(entry['lineNumber'])
syscall_str = entry['pattern'][2:-2]
if syscall_str[-1] != ')':
with open(prefix+file_name, 'r') as f:
# enumerate(x) uses x.next(), so it doesn't need the entire file in memory.
start = False
for i, line in enumerate(f):
if i == line_num:
start = True
if start:
line = line.strip()
syscall_str += line
if line[-1] == ')':
break
match = re.search('SYSCALL_DEFINE\d\((.*)\)', syscall_str)
symbols = map(str.strip, match.group(1).split(',')[1:])
params = []
for i in range(len(symbols)/2):
params.append({
'type': symbols[i*2] + ' ' + symbols[i*2+1],
'def': None
})
return params + [{}]*(6-len(params))
for line in syscall_tbl.readlines():
line = line.strip()
if line:
if line.startswith('#'):
continue
else:
syscall = re.search('(\d*)\s*(\w*)\s*(\w*)\s*(\w*)(/.*)?', line)
symbols = syscall.groups()
func_id = int(symbols[0])
func_type = symbols[1]
func_name = symbols[2]
func_fullname = symbols[3][10:] if symbols[3] else 'not implemented'
if func_type != 'x32':
if tags.find(entry, 'SYSCALL_DEFINE', ctags.TAG_PARTIALMATCH):
while True:
# '[,\)]' is essential to filter mmap2 or the like
if re.search('SYSCALL_DEFINE\d\('+func_fullname+'[,\)]', entry['pattern']):
parsed = parse_line(entry)
syscall_info = [
func_id,
func_name,
'{0:#04x}'.format(func_id)
]
syscall_info += parsed
syscall_info += [entry['file'], entry['lineNumber']]
syscall_func.append(syscall_info)
break
elif not tags.findNext(entry):
# print(func_name)
syscall_func.append([
func_id,
func_name,
'{0:#04x}'.format(func_id),
{},
{},
{},
{},
{},
{},
'',
0
])
break
syscall_out.write(
json.dumps(
{
'aaData': syscall_func
},
sort_keys=True,
indent=2
))
syscall_tbl.close()
syscall_out.close()
| 2.6875
| 3
|
Python2.7-UnpackAndUploadToCOS/index.py
|
tencentyun/scf-demo-repo
| 46
|
12781627
|
# -*- coding: utf-8 -*-
#####----------------------------------------------------------------#####
##### #####
##### 使用教程/readme: #####
##### https://cloud.tencent.com/document/product/583/47076 #####
##### #####
#####----------------------------------------------------------------#####
import os
import sys
import os.path
import zipfile
import patool
import logging
from qcloud_cos_v5 import CosConfig
from qcloud_cos_v5 import CosS3Client
from qcloud_cos_v5 import CosServiceError
reload(sys)
sys.setdefaultencoding('utf8')
os.environ['PATH'] = os.getenv("PATH")+":"+os.getcwd()
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
region = os.getenv('targetRegion')
bucket_upload = os.getenv('targetBucket')
unpack_suffix=os.getenv('suffix')
target_dir=os.getenv('targetPrefix')
logger = logging.getLogger()
def run_extract(archive, outdir):
"""Extract files from archive(s)."""
try:
patool.extract_archive(archive, verbosity=False, interactive="--non-interactive", outdir=outdir)
except PatoolError as msg:
logger.Error("error extracting %s: %s" % (archive, msg))
class PatoolError(Exception):
pass
def _fullpath(x):
x = os.path.expandvars(x)
x = os.path.expanduser(x)
x = os.path.normpath(x)
x = os.path.abspath(x)
return x
class Archive(object):
'''
:param backend: ``auto``, ``patool`` or ``zipfile``
:param filename: path to archive file
'''
def __init__(self, filename, backend='auto'):
self.filename = _fullpath(filename)
self.backend = backend
def extractall_patool(self, directory):
logger.debug('starting backend patool')
try:
run_extract(self.filename, directory)
except PatoolError as msg:
logger.info("error extracting %s: %s", self.filename, msg)
def extractall_zipfile(self, directory):
logger.debug('starting backend zipfile')
zipfile.ZipFile(self.filename).extractall(directory)
def extractall(self, directory, auto_create_dir=False):
'''
:param directory: directory to extract to
:param auto_create_dir: auto create directory
'''
logger.debug('extracting %s into %s (backend=%s)', self.filename, directory, self.backend)
is_zipfile = zipfile.is_zipfile(self.filename)
directory = _fullpath(directory)
if not os.path.exists(self.filename):
raise ValueError(
'archive file does not exist:' + str(self.filename))
if not os.path.exists(directory):
if auto_create_dir:
os.makedirs(directory)
else:
raise ValueError('directory does not exist:' + str(directory))
if self.backend == 'auto':
if is_zipfile:
self.extractall_zipfile(directory)
else:
self.extractall_patool(directory)
if self.backend == 'zipfile':
if not is_zipfile:
raise ValueError('file is not zip file:' + str(self.filename))
self.extractall_zipfile(directory)
if self.backend == 'patool':
self.extractall_patool(directory)
def delete_local_file(src):
logger.info("delete files and folders")
if os.path.isfile(src):
try:
os.remove(src)
except:
pass
elif os.path.isdir(src):
for item in os.listdir(src):
itemsrc = os.path.join(src, item)
delete_local_file(itemsrc)
try:
os.rmdir(src)
except:
pass
def upload_local_file(client, src, archivename):
logger.info("start to upload")
for filename in os.listdir(src):
path = src + '/{}'.format(os.path.basename(filename))
logger.info("path is [%s]", path)
if os.path.isfile(path):
logger.info("filename is [%s]", filename)
response = client.put_object_from_local_file(
Bucket=bucket_upload,
LocalFilePath=path,
Key='{}/{}'.format(archivename, filename))
delete_local_file(str(path))
elif os.path.isdir(path):
logger.info("dirname is [%s]", filename)
dirpath = archivename + '/{}'.format(filename)
upload_local_file(client, path, dirpath)
else:
logger.info("upload fail")
def main_handler(event, context):
logger.info("start unpack template function")
secret_id = os.getenv('TENCENTCLOUD_SECRETID')
secret_key = os.getenv('TENCENTCLOUD_SECRETKEY')
token = os.getenv('TENCENTCLOUD_SESSIONTOKEN')
config = CosConfig(Secret_id=secret_id, Secret_key=secret_key, Region=region, Token=token)
client = CosS3Client(config)
for record in event['Records']:
try:
appid = record['cos']['cosBucket']['appid']
bucket = record['cos']['cosBucket']['name'] + '-' + appid
filename = os.path.basename(record['cos']['cosObject']['url'])
download_path = '/tmp/{}'.format(filename.encode('gb18030'))
key = record['cos']['cosObject']['key']
key = key.replace('/' + appid + '/' + record['cos']['cosBucket']['name'] + '/', '', 1)
# 创建本地解压路径
isExists = os.path.exists('/tmp/unpack')
if not isExists:
os.mkdir('/tmp/unpack')
unpack_path = '/tmp/unpack'
# 提取文件名 shotname
(filepath, tempfilename) = os.path.split(filename);
(shotname, extension) = os.path.splitext(tempfilename);
if extension[1:] not in unpack_suffix.split(','):
logger.info("object suffix is [%s], expected: [%s]", extension, unpack_suffix)
return "object suffix is [%s], expected: [%s]" % (extension, unpack_suffix)
logger.info("object name is [%s]", shotname)
# download rar from cos
logger.info("get from [%s] to download object [%s]", bucket, filename)
try:
response = client.get_object(Bucket=bucket, Key=key, )
response['Body'].get_stream_to_file(download_path)
logger.info("download object [%s] Success", filename)
except CosServiceError as e:
print(e.get_error_code())
print(e.get_error_msg())
print(e.get_resource_location())
logger.info("download object [%s] failed", filename)
return "download object fail"
# start to extract archive file and upload to bucket_upload
logger.info("start to extract archive file")
Archive(download_path).extractall(unpack_path, auto_create_dir=True)
logger.info("extract success")
upload_local_file(client, '/tmp/unpack', target_dir)
# clean files
delete_local_file(str(download_path))
delete_local_file(str(unpack_path))
return "extract and upload success"
except Exception as e:
print(e)
raise e
return "extract and upload fail"
| 1.976563
| 2
|
pyiron/base/job/wrappercmd.py
|
SanderBorgmans/pyiron
| 0
|
12781628
|
import sys
import getopt
from pyiron.base.job.wrapper import job_wrapper_function
def command_line(argv):
"""
Parse the command line arguments.
Args:
argv: Command line arguments
"""
debug = False
project_path = None
job_id = None
try:
opts, args = getopt.getopt(argv, "dj:p:h", ["debug", "project_path=", "job_id=", "help"])
except getopt.GetoptError:
print('cms.py --p <project_path> -j <job_id> <debug>')
sys.exit()
else:
for opt, arg in opts:
if opt in ("-h", "--help"):
print('cms.py --p <project_path> -j <job_id> <debug>')
sys.exit()
elif opt in ("-d", "--debug"):
debug = True
elif opt in ("-j", "--job_id"):
job_id = arg
elif opt in ("-p", "--project_path"):
project_path = arg
job_wrapper_function(working_directory=project_path, job_id=job_id, debug=debug)
sys.exit()
if __name__ == "__main__":
command_line(sys.argv[1:])
| 2.34375
| 2
|
tabcmd/parsers/edit_site_parser.py
|
WillAyd/tabcmd
| 0
|
12781629
|
<gh_stars>0
from .global_options import *
class EditSiteParser:
"""
Parser for the command editsite
"""
@staticmethod
def edit_site_parser(manager, command):
"""Method to parse edit site arguments passed by the user"""
edit_site_parser = manager.include(command)
edit_site_parser.add_argument('sitename', help='name of site to update')
edit_site_parser.add_argument('--site-name', default=None, dest='target', help='new name of site')
edit_site_parser.add_argument('--site-id', default=None, help='id of site')
edit_site_parser.add_argument('--url', default=None, help='url of site')
edit_site_parser.add_argument(
'--user-quota', type=int, default=None, help='Max number of user that can be added to site')
edit_site_parser.add_argument(
'--status', default=None, help='Set to ACTIVE to activate a site, or to SUSPENDED to suspend a site.')
edit_site_parser.add_argument(
'--extract-encryption-mode', default=None,
help='The extract encryption mode for the site can be enforced, enabled or disabled')
edit_site_parser.add_argument(
'--run-now-enabled', default=None,
help='Allow or deny users from running extract refreshes, flows, or schedules manually.')
edit_site_parser.add_argument(
'--storage-quota', type=int, default=None,
help='in MB amount of workbooks, extracts data sources stored on site')
group = edit_site_parser.add_mutually_exclusive_group()
group.add_argument('--site-mode', default=None, help='Does not allow site admins to add or remove users')
group.add_argument('--no-site-mode', default=None, help='Allows site admins to add or remove users')
| 2.84375
| 3
|
worker/crawler.py
|
gcvalderrama/Palantir
| 0
|
12781630
|
"""
This script extract news information from the local web
"""
import glob
import os
import os.path
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions
class Crawler:
def extract_web_news(self, folder, url, tag):
"""
Extract information from the elcomercio feed url (XML format) and download them
to a filepath
:param folder: Folder where the news files will be saved
:param url: feed url like http://elcomercio.pe/feed/lima/policiales.xml
:param tag: news type like 'policiales'
:return: void
"""
browser = webdriver.Firefox()
browser.get(url)
list_linker_href = browser.find_elements_by_xpath('//xhtml:a[@href]')
driver = webdriver.Firefox()
wait = WebDriverWait(driver, 10)
for l in list_linker_href:
news_url = l.get_attribute('href')
driver.get(news_url)
print(news_url)
wait.until(expected_conditions.element_to_be_clickable((By.CLASS_NAME, 'fecha')))
fecha = driver.find_element_by_class_name("fecha").get_attribute("datetime")
file_name = tag + '--' + news_url.split('/')[-1]
try:
news_element = driver.find_element_by_id('main-txt-nota')
except NoSuchElementException:
print('main-txt-nota not found on ' + file_name)
continue
news_content = news_element.get_attribute('innerHTML').encode('utf-8')
content = fecha + "\n" + news_content.decode('utf-8')
with open(folder + "/" + file_name + ".html", 'w') as file:
file.write(content)
browser.close()
driver.close()
def clean_raw_news(self, origin, destination, skip_validation):
"""
Read raw news from origin and after cleaning, it will write them into destination folder
:param origin: Folder that contains all the raw news
:param destination: Destination folder to write clear news content
:param skip_validation: True or False - check file existence
:return: nothing - void
"""
news = glob.glob(origin + "/*.html")
for news_file in news:
print(news_file)
file_name = destination + '/' + news_file.split('/')[1].split('.')[0] + '.txt'
if skip_validation or not os.path.isfile(file_name):
with open(news_file, 'r') as read_file:
news_raw = read_file.read()
# create a new bs4 object from the html data loaded
soup = BeautifulSoup(news_raw, 'lxml')
# remove all javascript and stylesheet code
for script in soup(["script", "style"]):
script.extract()
# get text
text = soup.get_text()
# break into lines and remove leading and trailing space on each
lines = (line.strip() for line in text.splitlines())
# break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
# drop blank lines
text = '\n'.join(chunk for chunk in chunks if chunk)
with open(file_name, 'w') as write_file:
write_file.write(text)
| 3.328125
| 3
|
python/tabuada.py
|
sswellington/bootcamp
| 1
|
12781631
|
<reponame>sswellington/bootcamp
#!/bin/python3
valor=6
for n in range(1,11,1):
print(str(valor) + " x " + str(n) + " = " + str((valor*n)))
| 3.609375
| 4
|
pilea/cli.py
|
hfjn/pilea
| 0
|
12781632
|
<reponame>hfjn/pilea
from pathlib import Path
import time
import click
from click import ClickException, Context, pass_context
from livereload import Server
from pilea.build_controller import BuildController
from pilea.state import State, pass_state
from shutil import copytree
import os
EXECUTION_PATH = Path(os.getcwd())
INPUT_FOLDER = Path(__file__).parent / "input"
@click.group(help="pilea - a tiny static site generator.")
def pilea():
pass
@pilea.command()
@click.argument("folder")
def new(folder):
"""Creates basic folder structure to work with pilea.
\b
USAGE:
\b
pilea new FOLDER : Creates new project at FOLDER
"""
folder = EXECUTION_PATH / folder
if folder.exists():
raise ClickException(f"{folder} already exists.")
folder.mkdir()
copytree(INPUT_FOLDER, folder / "input")
click.echo(f"Created folder at {folder}")
@pilea.command()
@click.option("--watch", is_flag=True)
@pass_state
def build(state: State, watch: bool):
"""
Compiles the input files. Has to be run from within the project folder.
Resulting files will be in site/
USAGE:
pilea build : Will compile the static site to site/
"""
build_controller = BuildController(state)
build_controller.build_all()
@pilea.command()
@pass_state
@pass_context
def serve(ctx: Context, state: State):
"""
Starts a simple development server with live-reload functionality.
USAGE:
pilea serve : Will serve the static site at localhost:5050
"""
build_controller = BuildController(state)
build_controller.build_all()
server = Server()
server.watch(f"{str(state.input_folder)}/**/*.html", build_controller.build_all)
server.watch(f"{str(state.input_folder)}/**/**/*.md", build_controller.build_all)
server.watch(f"{str(state.input_folder)}/**/*.css", build_controller.build_css)
server.serve(root=state.output_folder)
| 2.796875
| 3
|
p_050_059/problem58.py
|
ericgreveson/projecteuler
| 0
|
12781633
|
<reponame>ericgreveson/projecteuler
from factor_tools import is_prime
def main():
"""
Entry point
"""
num_prime = 3
num_on_diagonals = 1 + 4
size = 3
while num_prime / num_on_diagonals > 0.1:
size += 2
bottom_right = size * size
other_diags = [bottom_right - i * (size - 1) for i in range(1, 4)]
num_prime += sum([1 for i in other_diags if is_prime(i)])
num_on_diagonals += 4
print(f"Side length with <10% prime along diag: {size}")
if __name__ == "__main__":
main()
| 3.421875
| 3
|
hsms/meta/make_sized_bytes.py
|
Quexington/hsms
| 2
|
12781634
|
from typing import Any, BinaryIO
from .bin_methods import bin_methods
from .hexbytes import hexbytes
def make_sized_bytes(size):
"""
Create a streamable type that subclasses "hexbytes" but requires instances
to be a certain, fixed size.
"""
name = "bytes%d" % size
def __new__(self, v):
v = bytes(v)
if not isinstance(v, bytes) or len(v) != size:
raise ValueError("bad %s initializer %s" % (name, v))
return hexbytes.__new__(self, v)
@classmethod
def parse(cls, f: BinaryIO) -> Any:
b = f.read(size)
assert len(b) == size
return cls(b)
def stream(self, f):
f.write(self)
def __str__(self):
return self.hex()
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, str(self))
namespace = dict(
__new__=__new__, parse=parse, stream=stream, __str__=__str__, __repr__=__repr__
)
cls = type(name, (hexbytes, bin_methods), namespace)
return cls
| 3.109375
| 3
|
niwidgets/__init__.py
|
lrq3000/niwidgets
| 0
|
12781635
|
<gh_stars>0
"""
Widgets to visualise neuroimaging data.
For volume images, try import NiftiWidget.
For surface images, try SurfaceWidget.
"""
# import example data
from .exampledata import exampleatlas, examplezmap, examplet1 # noqa
# import widget classes.
from .niwidget_volume import NiftiWidget # noqa
from .niwidget_surface import SurfaceWidget # noqa
| 1.539063
| 2
|
server/setupdb.py
|
xhuang98/Dtect
| 1
|
12781636
|
from flask_sqlalchemy import SQLAlchemy
from api import app, db
from data.models import Authentication, UserLogin
from data.parser import authlogs
from predictions import update_predictions
# create tables
with app.app_context():
db.create_all()
with app.app_context():
entry = UserLogin(
username="admin",
password="<PASSWORD>"
)
db.session.add(entry)
db.session.commit()
# insert authentication logs
with app.app_context():
for log in authlogs:
entry = Authentication(
time=log[0],
source_user=log[1],
destination_user=log[2],
source_computer=log[3],
destination_computer=log[4],
authentication_type=log[5],
logon_type=log[6],
auth_orientation=log[7],
auth_result=log[8]
)
db.session.add(entry)
db.session.commit()
update_predictions(db, app.app_context())
| 2.40625
| 2
|
running_modes/reinforcement_learning/core_reinforcement_learning.py
|
lilleswing/Reinvent-1
| 183
|
12781637
|
<gh_stars>100-1000
import time
import numpy as np
import torch
from reinvent_chemistry.utils import get_indices_of_unique_smiles
from reinvent_models.lib_invent.enums.generative_model_regime import GenerativeModelRegimeEnum
from reinvent_models.model_factory.configurations.model_configuration import ModelConfiguration
from reinvent_models.model_factory.enums.model_type_enum import ModelTypeEnum
from reinvent_models.model_factory.generative_model import GenerativeModel
from reinvent_models.model_factory.generative_model_base import GenerativeModelBase
from reinvent_scoring import FinalSummary
from reinvent_scoring.scoring.diversity_filters.reinvent_core.base_diversity_filter import BaseDiversityFilter
from reinvent_scoring.scoring.function.base_scoring_function import BaseScoringFunction
from running_modes.configurations import ReinforcementLearningConfiguration
from running_modes.constructors.base_running_mode import BaseRunningMode
from running_modes.reinforcement_learning.inception import Inception
from running_modes.reinforcement_learning.logging.base_reinforcement_logger import BaseReinforcementLogger
from running_modes.reinforcement_learning.margin_guard import MarginGuard
from running_modes.utils.general import to_tensor
class CoreReinforcementRunner(BaseRunningMode):
def __init__(self, critic: GenerativeModelBase, actor: GenerativeModelBase,
configuration: ReinforcementLearningConfiguration,
scoring_function: BaseScoringFunction, diversity_filter: BaseDiversityFilter,
inception: Inception, logger: BaseReinforcementLogger):
self._prior = critic
self._agent = actor
self._scoring_function = scoring_function
self._diversity_filter = diversity_filter
self.config = configuration
self._logger = logger
self._inception = inception
self._margin_guard = MarginGuard(self)
self._optimizer = torch.optim.Adam(self._agent.get_network_parameters(), lr=self.config.learning_rate)
def run(self):
self._logger.log_message("starting an RL run")
start_time = time.time()
self._disable_prior_gradients()
for step in range(self.config.n_steps):
seqs, smiles, agent_likelihood = self._sample_unique_sequences(self._agent, self.config.batch_size)
# switch signs
agent_likelihood = -agent_likelihood
prior_likelihood = -self._prior.likelihood(seqs)
score_summary: FinalSummary = self._scoring_function.get_final_score_for_step(smiles, step)
score = self._diversity_filter.update_score(score_summary, step)
augmented_likelihood = prior_likelihood + self.config.sigma * to_tensor(score)
loss = torch.pow((augmented_likelihood - agent_likelihood), 2)
loss, agent_likelihood = self._inception_filter(self._agent, loss, agent_likelihood, prior_likelihood,
self.config.sigma, smiles, score)
loss = loss.mean()
self._optimizer.zero_grad()
loss.backward()
self._optimizer.step()
self._stats_and_chekpoint(score, start_time, step, smiles, score_summary,
agent_likelihood, prior_likelihood,
augmented_likelihood)
self._logger.save_final_state(self._agent, self._diversity_filter)
self._logger.log_out_input_configuration()
self._logger.log_out_inception(self._inception)
def _disable_prior_gradients(self):
# There might be a more elegant way of disabling gradients
for param in self._prior.get_network_parameters():
param.requires_grad = False
def _stats_and_chekpoint(self, score, start_time, step, smiles, score_summary: FinalSummary,
agent_likelihood, prior_likelihood, augmented_likelihood):
self._margin_guard.adjust_margin(step)
mean_score = np.mean(score)
self._margin_guard.store_run_stats(agent_likelihood, prior_likelihood, augmented_likelihood, score)
self._logger.timestep_report(start_time, self.config.n_steps, step, smiles,
mean_score, score_summary, score,
agent_likelihood, prior_likelihood, augmented_likelihood, self._diversity_filter)
self._logger.save_checkpoint(step, self._diversity_filter, self._agent)
def _sample_unique_sequences(self, agent, batch_size):
seqs, smiles, agent_likelihood = agent.sample(batch_size)
unique_idxs = get_indices_of_unique_smiles(smiles)
seqs_unique = seqs[unique_idxs]
smiles_np = np.array(smiles)
smiles_unique = smiles_np[unique_idxs]
agent_likelihood_unique = agent_likelihood[unique_idxs]
return seqs_unique, smiles_unique, agent_likelihood_unique
def _inception_filter(self, agent, loss, agent_likelihood, prior_likelihood, sigma, smiles, score):
exp_smiles, exp_scores, exp_prior_likelihood = self._inception.sample()
if len(exp_smiles) > 0:
exp_agent_likelihood = -agent.likelihood_smiles(exp_smiles)
exp_augmented_likelihood = exp_prior_likelihood + sigma * exp_scores
exp_loss = torch.pow((to_tensor(exp_augmented_likelihood) - exp_agent_likelihood), 2)
loss = torch.cat((loss, exp_loss), 0)
agent_likelihood = torch.cat((agent_likelihood, exp_agent_likelihood), 0)
self._inception.add(smiles, score, prior_likelihood)
return loss, agent_likelihood
def reset(self, reset_countdown=0):
model_type_enum = ModelTypeEnum()
model_regime = GenerativeModelRegimeEnum()
actor_config = ModelConfiguration(model_type_enum.DEFAULT, model_regime.TRAINING,
self.config.agent)
self._agent = GenerativeModel(actor_config)
self._optimizer = torch.optim.Adam(self._agent.get_network_parameters(), lr=self.config.learning_rate)
self._logger.log_message("Resetting Agent")
self._logger.log_message(f"Adjusting sigma to: {self.config.sigma}")
return reset_countdown
| 1.796875
| 2
|
lbuild/api.py
|
fb39ca4/lbuild
| 0
|
12781638
|
<filename>lbuild/api.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018, <NAME>
# All Rights Reserved.
#
# The file is part of the lbuild project and is released under the
# 2-clause BSD license. See the file `LICENSE.txt` for the full license
# governing this code.
import os
import lbuild.environment
from lbuild.buildlog import BuildLog
from lbuild.parser import Parser
from lbuild.config import ConfigNode
from lbuild.utils import listify, listrify
class Builder:
def __init__(self, cwd=None, config=None, options=None):
if cwd is None:
cwd = os.getcwd() if config is None else os.path.dirname(config)
self.cwd = cwd
config = ConfigNode.from_file(config, fail_silent=True)
file_config = ConfigNode.from_filesystem(cwd)
if config:
config.extend_last(file_config)
else:
config = file_config if file_config else ConfigNode()
self.config = config
self.config.add_commandline_options(listify(options))
self.parser = Parser(self.config)
def _load_repositories(self, repos=None):
self.parser.load_repositories(listrify(repos))
self.parser.merge_repository_options()
def _load_modules(self):
if not len(self.parser._undefined_repo_options()):
self.parser.prepare_repositories()
self.parser.merge_module_options()
def _filter_modules(self, modules=None):
self.parser.config.modules.extend(listify(modules))
selected_modules = self.parser.find_modules(self.parser.config.modules)
return self.parser.resolve_dependencies(selected_modules)
def load(self, repos=None):
self._load_repositories(repos)
self._load_modules()
def validate(self, modules=None):
build_modules = self._filter_modules(modules)
self.parser.validate_modules(build_modules)
def build(self, outpath, modules=None, simulate=False):
build_modules = self._filter_modules(modules)
buildlog = BuildLog(outpath)
lbuild.environment.simulate = simulate
self.parser.build_modules(build_modules, buildlog)
return buildlog
| 1.929688
| 2
|
src/scheduler.py
|
positoro/radioRecorder
| 0
|
12781639
|
import pandas as pd
import json
import datetime
import subprocess
import localModule
#----
def atting_program(row):
ffmpeg_command_line = 'ffmpeg \
-loglevel error \
-fflags +discardcorrupt \
-i {0} \
-acodec copy \
-movflags faststart \
-vn \
-bsf:a aac_adtstoasc \
-t {1} \
-metadata date="{2}" \
-metadata genre="{3}" \
-metadata artist="{4}" \
-metadata title="{5}" \
{6}/{7}.m4a'.format(
localModule.DICTIONARY_OF_STATION_URL[row.service_id],
int((row.air_time + datetime.timedelta(seconds=localModule.MARGIN_SECOND*2)).total_seconds()),
row.start_time.strftime('%Y'),
'Radio Program',
row.service_name,
row.title,
localModule.FOLDER_OF_RECORD,
row.title+'-'+row.start_time.strftime('%Y%m%d%H%M'),
)
at_launch_time = row.start_time - datetime.timedelta(seconds=localModule.MARGIN_SECOND)
command_line = "echo 'sleep {0}; {1}' | at -t {2}".format(
at_launch_time.strftime('%S'),
ffmpeg_command_line,
at_launch_time.strftime('%Y%m%d%H%M'),
)
res = subprocess.check_output(command_line, shell=True)
#----
table = pd.read_csv(localModule.TABLE_FILE)
table['start_time'] = pd.to_datetime(table['start_time'])
table['end_time'] = pd.to_datetime(table['end_time'])
table['air_time'] = pd.to_timedelta(table['air_time'])
for row in table.itertuples():
atting_program(row)
| 2.4375
| 2
|
vpklib.py
|
victorvde/dota2_nohats
| 20
|
12781640
|
<filename>vpklib.py
from binary import Struct, Magic, Format, BaseArray, String, Blob, FakeWriteStream
from itertools import count
class VPK(Struct):
def fields(self):
self.F("magic", Magic(b"\x34\x12\xaa\x55"))
self.F("version", Format("I"))
assert self["version"].data == 1
self.F("index_size", Format("I"))
self.F("index", NulTerminatedArray(FileType))
def should_serialize(self, k, f):
return k != "magic"
def pack(self, s):
if self["index_size"].data == 0:
t = FakeWriteStream()
self["index"].pack(t)
self["index_size"].data = t.tell()
Struct.pack(self, s)
class NulTerminatedArray(BaseArray):
def unpack(self, s):
self.field = []
for i in count():
f = self.field_fun(i, self)
try:
f.unpack(s)
except StopIteration:
break
else:
self.field.append(f)
def pack(self, s):
BaseArray.pack(self, s)
s.write(b"\x00")
class FileType(Struct):
def fields(self):
t = self.F("type", String())
if t.data == "":
raise StopIteration
self.F("directory", NulTerminatedArray(Directory))
class Directory(Struct):
def fields(self):
p = self.F("path", String())
if p.data == "":
raise StopIteration
self.F("file", NulTerminatedArray(File))
class File(Struct):
def fields(self):
f = self.F("filename", String())
if f.data == "":
raise StopIteration
self.F("crc", Format("I"))
ps = self.F("preloadsize", Format("H"))
self.F("archive_index", Format("H"))
self.F("archive_offset", Format("I"))
self.F("archive_size", Format("I"))
self.F("terminator", Magic(b"\xFF\xFF"))
self.F("preload_data", Blob(ps.data))
def should_serialize(self, k, f):
return k not in ["terminator", "preload_data"]
| 2.5
| 2
|
pixelssl/task_template/__init__.py
|
charlesCXK/PixelSSL
| 223
|
12781641
|
from . import func as func_template
from . import data as data_template
from . import model as model_template
from . import criterion as criterion_template
from . import proxy as proxy_template
__all__ = [
'func_template',
'data_template',
'model_template',
'criterion_template',
'proxy_template',
]
| 1.28125
| 1
|
PythonFiles/SUWSS/TDMS/tdmsReader.py
|
VijayS02/Random-Programming-Items
| 0
|
12781642
|
<gh_stars>0
"""
import numpy as np
from nptdms import TdmsFile
from nptdms import tdms
filenameS = "24July2018_Intact_1.tdms"
tdms_file = TdmsFile(filenameS)
root_object = tdms_file.object()
for name, value in root_object.properties.items():
print("{0}: {1}".format(name, value))
print(root_object.properties)
"""
import scipy.io
import plotly.plotly as plt
import plotly.graph_objs as go
import numpy as np
mat = scipy.io.loadmat('24July2018_Intact_1.mat')
data = []
x = mat.get('VoltageAI0')[0][0][1][0][0]
time = []
for i in range(0,x):
time.append(i)
print(time)
data.append(mat.get('VoltageAI0')[0][0][0])
data.append(mat.get('VoltageAI1')[0][0][0])
data.append(mat.get('VoltageAI2')[0][0][0])
data.append(mat.get('VoltageAI3')[0][0][0])
print(data[0])
random_x = np.linspace(0,1,100)
random_y = np.random.randn(100)
trace = go.Scatter( x =random_x,y=random_y)
data1 = [trace]
plt.iplot(data1,filename="basic line")
| 2.265625
| 2
|
CSGLTrader.py
|
TerrenceHung/CSGLTrader
| 0
|
12781643
|
<gh_stars>0
import pickle
import time
from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
from tkinter import messagebox, Tk
class TradeException(Exception):
"""
An exception to be raised when there is an error trying to make a trade.
"""
pass
def search_item(driver, item, own_item):
"""(WebDriver, str, bool) -> None
Searches for the item on the csgolounge search menu. If own_item is True,
the item will be moved to the have section of the search window.
"""
if 'keys' in item:
driver.find_element_by_xpath('/html/body/main/section[2]/div[1]/form/input').send_keys(
'Any Key')
elif 'Stattrak' in item:
# click the stattrak button and search for the item
driver.find_element_by_xpath('/html/body/main/section[2]/div[1]/form/input').send_keys(
item.lstrip('Stattrak '))
driver.find_element_by_xpath('/html/body/main/section[2]/div[1]/form/a[2]').click()
elif own_item and 'Knife' in item:
driver.find_element_by_xpath('/html/body/main/section[2]/div[1]/form/input').send_keys(
'Any Knife')
elif own_item:
# trying to trade an item that is not a key or a knife, click on Any Offers icon
driver.find_element_by_xpath('/html/body/main/section[2]/div[1]/form/input').send_keys(
'Any Offers')
else:
driver.find_element_by_xpath('/html/body/main/section[2]/div[1]/form/input').send_keys(item)
# wait for the search result to appear and click it
# sometimes clicks the first item before the search results even appear,
# need to wait for results to disappear first
# waiting for the loading gif to appear doesn't work, sometimes it appears too quickly'
# WebDriverWait(driver, 3).until(EC.presence_of_element_located(
# (By.ID, 'loading')))
time.sleep(1)
# now wait for an item to appear
WebDriverWait(driver, 5).until(EC.presence_of_element_located(
(By.XPATH, '/html/body/main/section[2]/div[2]/div[2]/div[2]/img'))).click()
if own_item:
# click the item that was added to the trade and move it to the have section
driver.find_element_by_xpath(
'/html/body/main/section[1]/div[1]/div/form[2]/div/div[2]/img').click()
driver.find_element_by_xpath(
'/html/body/main/section[1]/div[1]/div/form[2]/div/div[1]/div[2]').click()
# disable stattrak filter if it was clicked and clear search box
if 'Stattrak' in item:
driver.find_element_by_xpath('/html/body/main/section[2]/div[1]/form/a[2]').click()
driver.find_element_by_xpath('/html/body/main/section[2]/div[1]/form/input').clear()
def open_new_tab(driver, webelement, main_window):
"""(WebDriver, WebElement, str) -> None
Opens the web element in a new tab, and shifts focus to the new tab.
"""
actions = ActionChains(driver)
actions.key_down(Keys.CONTROL)
actions.key_down(Keys.SHIFT)
actions.click(webelement)
actions.key_up(Keys.CONTROL)
actions.key_up(Keys.SHIFT)
actions.perform()
driver.switch_to_window(main_window)
def close_tab(driver, main_window):
"""(WebDriver, str) -> None
Closes the current tab.
"""
driver.find_element_by_tag_name('body').send_keys(Keys.CONTROL + 'w')
driver.switch_to_window(main_window)
def send_trade(driver, item_to_trade, item_to_get):
"""(WebDriver, str, str) -> bool
Opens up a trade with a user and sends a trade with the specified items.
"""
success = True
try:
# open the Steam offer menu in a new tab
open_new_tab(driver, driver.find_element_by_link_text('Steam offer'),
driver.current_window_handle)
# continue if the trade menu appears
if 'Trade offer with' in driver.title:
try:
# send your item first
# expand game selection drop down menu
WebDriverWait(driver, 2).until(EC.presence_of_element_located(
(By.ID, 'appselect_activeapp'))).click()
# click on csgo, app id 730
driver.find_element_by_id('appselect_option_you_730_2').click()
# expand advanced filter options
# advanced filter button disappears really quickly after selecting game
# just delay 1 second before checking for it
time.sleep(1)
WebDriverWait(driver, 10).until(EC.presence_of_element_located(
(By.ID, 'filter_tag_show'))).click()
# click on all the show more buttons
show_more_buttons = [option for option in driver.find_elements_by_class_name(
'econ_tag_filter_collapsable_tags_showlink') if option.is_displayed()]
for next_button in show_more_buttons:
next_button.click()
add_to_trade(driver, item_to_trade)
# now get other person's item
# go to their inventory
driver.find_element_by_id('inventory_select_their_inventory').click()
# same thing with the advanced filter button, delay 1 sec
time.sleep(1)
# expand advanced filter options
WebDriverWait(driver, 10).until(EC.presence_of_element_located(
(By.ID, 'filter_tag_show'))).click()
# click on all the show more buttons
show_more_buttons = [option for option in driver.find_elements_by_class_name(
'econ_tag_filter_collapsable_tags_showlink') if option.is_displayed()]
for next_button in show_more_buttons:
next_button.click()
add_to_trade(driver, item_to_get)
# items are now in trade, click ready and send trade
driver.find_element_by_id('you_notready').click()
# if the user did not have the item in their inventory, the warning popup will
# appear
no_item_warning = [warning for warning in driver.find_elements_by_class_name(
'ellipsis') if warning.text == 'Warning']
# if there is no warning, send the trade
if not no_item_warning:
driver.find_element_by_id('trade_confirmbtn_text').click()
# wait for the trade confirmation box to appear
WebDriverWait(driver, 3).until(EC.presence_of_element_located((By.CLASS_NAME, 'newmodal')))
except:
# if anything at all went wrong in the trade then stop the current trade
# bad design, but whatever, it's the easiest way to do it
success = False
else:
success = False
close_tab(driver, driver.current_window_handle)
except NoSuchElementException:
# for some reason there might be no Steam Offer button
success = False
close_tab(driver, driver.current_window_handle)
return success
def add_to_trade(driver, item):
"""(WebDriver, str) -> None
Searches for the item in the steam trade menu and adds it to the trade.
"""
if 'keys' not in item:
# get the web elements for the filters that are needed
filters = {}
for next_filter in driver.find_elements_by_class_name('econ_tag_filter_category_label'):
if next_filter.is_displayed():
if next_filter.text == 'Type':
filters['type'] = next_filter
elif next_filter.text == 'Category':
filters['category'] = next_filter
elif next_filter.text == 'Exterior':
filters['exterior'] = next_filter
# click all the check boxes for the type except container and key so cases do not show up
# in the search results
# get the parent tag so we the boxes can be clicked
type_options = filters['type'].find_element_by_xpath('..')
filter_clicked = False
for next_option in type_options.find_elements_by_class_name('econ_tag_filter_container'):
if ('Container' not in next_option.find_element_by_class_name(
'econ_tag_filter_label').text and 'Key' not in next_option
.find_element_by_class_name('econ_tag_filter_label').text):
# click the box if it is not container or key
next_option.find_element_by_xpath('./input').click()
filter_clicked = True
if not filter_clicked:
raise TradeException
# check off the exterior options
exterior_options = filters['exterior'].find_element_by_xpath('..')
filter_clicked = False
# now have the field for all available exterior options, go through all the options
# and check off the correct box
# if looking for a vanilla knife, then check off the "Not Painted" box
if item.find('(') == -1:
for next_option in exterior_options.find_elements_by_class_name(
'econ_tag_filter_container'):
if 'Not Painted' in next_option.find_element_by_class_name(
'econ_tag_filter_label').text:
next_option.find_element_by_xpath('./input').click()
filter_clicked = True
else:
for next_option in exterior_options.find_elements_by_class_name(
'econ_tag_filter_container'):
# slicing item like that will give the item condition
if item[item.find('(') + 1:-1] in next_option.find_element_by_class_name(
'econ_tag_filter_label').text:
next_option.find_element_by_xpath('./input').click()
filter_clicked = True
if not filter_clicked:
raise TradeException
filter_clicked = False
if 'Stattrak' in item:
category_options = filters['category'].find_element_by_xpath('..')
for next_option in category_options.find_elements_by_class_name(
'econ_tag_filter_container'):
if 'StatTrak' in next_option.find_element_by_class_name(
'econ_tag_filter_label').text:
next_option.find_element_by_xpath('./input').click()
filter_clicked = True
if not filter_clicked:
raise TradeException
# strip the Stattrak and condition from the item name, then search for it
driver.find_element_by_id('filter_control').send_keys(
item.lstrip('Stattrak ').split(' (')[0])
else:
if 'Knife' not in item:
category_options = filters['category'].find_element_by_xpath('..')
for next_option in category_options.find_elements_by_class_name(
'econ_tag_filter_container'):
if 'Normal' in next_option.find_element_by_class_name(
'econ_tag_filter_label').text:
next_option.find_element_by_xpath('./input').click()
filter_clicked = True
if not filter_clicked:
raise TradeException
# strip the condition from the item name, then search for it
driver.find_element_by_id('filter_control').send_keys(item.split(' (')[0])
# get the html component for the 4x4 item grid
# check for all tags with class name "inventory_page" and only get the one that is visible
item_grid = [grid for grid in driver.find_elements_by_class_name('inventory_page') if
grid.is_displayed()]
# inside this grid, add the first item, all check boxes should have filtered out anything
# unneeded
item_holders = [item for item in item_grid[0].find_elements_by_class_name('itemHolder') if
item.is_displayed()]
try:
actions = ActionChains(driver)
actions.double_click(item_holders[0].find_element_by_xpath('./div'))
actions.perform()
except IndexError:
# user might have a trade active on csgl but not actually have the item in their
# inventory
pass
else:
# get element with name Type and then get the parent tag so boxes can be checked
type_filter = [filter for filter in driver.find_elements_by_class_name(
'econ_tag_filter_category_label') if filter.is_displayed() and filter.text == 'Type']
type_filter = type_filter[0].find_element_by_xpath('..')
filter_clicked = False
for next_option in type_filter.find_elements_by_class_name('econ_tag_filter_container'):
if 'Key' in next_option.find_element_by_class_name('econ_tag_filter_label').text:
next_option.find_element_by_xpath('./input').click()
filter_clicked = True
if not filter_clicked:
raise TradeException
# find the number of keys to add to the trade
num_of_keys = int(item.rstrip(' keys'))
keys_added = 0
while keys_added < num_of_keys:
# find all tags with class name "itemHolder", and if it is visible then it is a key
# that can be added to the trade
# will hold a max of 16 keys
# this also grabs tags with class name "itemHolder trade_slot", need to filter these out
visible_keys = [key for key in driver.find_elements_by_class_name('itemHolder')
if key.is_displayed() and key.get_attribute("class") == 'itemHolder']
# add all the keys in visible_keys
for next_key in visible_keys:
if keys_added == num_of_keys:
break
else:
actions = ActionChains(driver)
actions.double_click(next_key.find_element_by_xpath('./div'))
actions.perform()
keys_added += 1
# added all the visible keys, so now get a new set of visible keys
# click on the next page button
driver.find_element_by_id('pagebtn_next').click()
time.sleep(1)
driver.find_element_by_id('filter_control').clear()
if __name__ == '__main__':
# create a root window for tkinter then hide it because it looks bad
root = Tk()
root.withdraw()
# open up trade history
try:
with open('tradehistory', 'rb') as trades:
trade_history = pickle.load(trades)
except EOFError:
# if tradehistory file is blank, then set to an empty dictionary
trade_history = {}
# structure of trade_history:
# {item_to_get: {steam_id: [item_to_trade]}}
# open up trades.txt and get the trade info
with open('trades.txt') as user_trade:
chromedriver_path = user_trade.readline().split('\n')[0]
MAX_NUM_TRADES = int(user_trade.readline().split('\n')[0])
print('Number of trades to send: ', MAX_NUM_TRADES)
item_to_get = user_trade.readline().split('\n')[0]
print('What you are looking for: ', item_to_get)
item_to_trade = user_trade.readline().split('\n')[0]
print('What you are trading away: ', item_to_trade)
# add item_to_get to trade_history if it is not in there already
if item_to_get not in trade_history:
trade_history[item_to_get] = {}
# open up steam login menu on csgolounge
driver = webdriver.Chrome(chromedriver_path)
driver.maximize_window()
driver.get('http://csgolounge.com/')
try:
assert 'CSGO Lounge' in driver.title
except AssertionError:
driver.quit()
messagebox.showerror('Error', 'Please try again')
# click login
driver.find_element_by_xpath('/html/body/header/div[1]/a[2]').click()
# wait for the pop up to appear then click Steam button
WebDriverWait(driver, 5).until(EC.presence_of_element_located(
(By.XPATH, '/html/body/div[4]/div/div[2]/form/div/div[6]/a'))).click()
# only continue once the user has logged into csgolounge
logged_in = False
while not logged_in:
if 'CSGO Lounge' in driver.title:
logged_in = True
# user is now logged into csgl, click search
driver.find_element_by_xpath('/html/body/header/nav/a[6]/img').click()
# add user's item to trade menu first
search_item(driver, item_to_trade, True)
# click all items button to reset item menu
driver.find_element_by_xpath('/html/body/main/section[2]/div[1]/form/a[1]').click()
time.sleep(1)
# wait for an item to appear
WebDriverWait(driver, 5).until(EC.presence_of_element_located(
(By.XPATH, '/html/body/main/section[2]/div[2]/div[2]/div[2]/img')))
# now add the item the user is looking for
search_item(driver, item_to_get, False)
# search for the item
driver.find_element_by_xpath('/html/body/main/section[1]/div[1]/a').click()
# send the max amt of trade offers, then exit
trades_sent = 0
while trades_sent < MAX_NUM_TRADES:
# get a list of all 20 trades on the screen
potential_trades = driver.find_elements_by_class_name('tradeheader')
main_window = driver.current_window_handle
# now have the list of trades, need to open up each trade and then check for user ids and
# stuff
for next_trade in potential_trades:
open_new_tab(driver, next_trade.find_element_by_xpath('./a[2]/span/b'), main_window)
# get their steam id from their csgl profile link
# sometimes selenium throws a NoSuchElementException even though their profile link is
# there so have this loop here to get the link even if an exception is thrown
profile_found = False
while not profile_found:
try:
# everything after the = sign in their csgl profile link is their steam id
steam_id = driver.find_element_by_xpath(
'/html/body/main/section[1]/div[1]/div[1]/div/a').get_attribute(
'href').split('=')[1]
profile_found = True
except NoSuchElementException:
pass
# check if this trade has been attempted with this person already
# if not, then proceed with sending a trade
if steam_id not in trade_history[item_to_get]:
trade_history[item_to_get][steam_id] = []
if send_trade(driver, item_to_trade, item_to_get):
trades_sent += 1
trade_history[item_to_get][steam_id].append(item_to_trade)
elif item_to_trade not in trade_history[item_to_get][steam_id]:
if send_trade(driver, item_to_trade, item_to_get):
trades_sent += 1
trade_history[item_to_get][steam_id].append(item_to_trade)
else:
close_tab(driver, main_window)
if trades_sent == MAX_NUM_TRADES:
break
# get all the page buttons, click the next one
pages = driver.find_element_by_class_name('simplePagerNav')
# find the one with class "currentPage", get the number, then click the button with the
# next number
next_page = int(pages.find_element_by_class_name('currentPage').text) + 1
try:
driver.find_element_by_link_text(str(next_page)).click()
except NoSuchElementException:
# no more pages to go through
trades_sent = MAX_NUM_TRADES
# write to tradehistory file
with open('tradehistory', 'wb') as trades:
pickle.dump(trade_history, trades)
driver.close()
| 2.84375
| 3
|
revscoring/revscoring/languages/tests/test_tamil.py
|
yafeunteun/wikipedia-spam-classifier
| 2
|
12781644
|
<filename>revscoring/revscoring/languages/tests/test_tamil.py<gh_stars>1-10
import pickle
from nose.tools import eq_
from .. import tamil
from .util import compare_extraction
BAD = [
"பூல்",
"பூலு",
"கூதி",
"தேவுடியாள்",
"தேவடியாள்",
"ஓத்த",
"ஓத்தா",
"சுன்னி",
"சுண்ணி",
"ஓல்",
"ஓழ்",
"ஓலு",
"ஓழு",
"ஓழி",
"ஒம்மால",
"சூத்து",
"முண்ட",
"முண்டை",
"புண்ட",
"புண்டை",
"தாயோளி",
"ஓல்மாரி",
"ஓழ்மாரி",
"புழுத்தி"
]
INFORMAL = [
"பொட்டை"
]
OTHER = [
"""
இந்தோனேசிய ரூபாய் அல்லது ரூபியா (rupiah, Rp)
இந்தோனேசியாவின் அலுவல்முறை நாணயம் ஆகும். இந்தோனேசிய
வங்கியால் வெளியிடப்பட்டு கட்டுப்படுத்தப்படும் இதன் ஐ.எசு.ஓ 4217
நாணயக் குறியீடு (ஐடிஆர்) IDR ஆகும். "ரூபியா" என்ற பெயர்
இந்துத்தானிய சொல்லான ரூப்யா (روپیہ),(रुपया), மற்றும் சமசுகிருத
வேரிலிருந்து (रूप्य; வார்ப்பு வெள்ளி) வந்துள்ளது. பேச்சு வழக்கில்
இந்தோனேசியர்கள் வெள்ளி என்பதற்கான இந்தோனேசியச்
சொல்லான "பெராக்" என்பதையும் பயன்படுத்துகின்றனர். ஒவ்வொரு
ரூபியாவும் 100 சென்களாக பிரிக்கப்பட்டுள்ளது; பணவீக்கத்தால்
சென் நாணயங்களும் வங்கித்தாள்களும் புழக்கத்திலிருந்து மறைந்து
விட்டன.
ரியாயு தீவுகளும் நியூ கினியின் இந்தோனேசியப் பகுதியும் முன்பு
தங்களுக்கேயான ரூபியாவை பயன்படுத்தி வந்தன; ஆனால் முறையே
1964, 1971ஆம் ஆண்டுகளில் இருந்து இங்கும் தேசிய ரூபியாவே
செயலாக்கப்பட்டது.
"""
]
def test_badwords():
compare_extraction(tamil.badwords.revision.datasources.matches,
BAD, OTHER)
eq_(tamil.badwords, pickle.loads(pickle.dumps(tamil.badwords)))
def test_informals():
compare_extraction(tamil.informals.revision.datasources.matches,
INFORMAL, OTHER)
eq_(tamil.informals, pickle.loads(pickle.dumps(tamil.informals)))
| 2.203125
| 2
|
test/graph_test.py
|
luisalves05/dsa-python
| 1
|
12781645
|
import sys
sys.path.append('..')
import unittest
from graphs.Graph import Graph
class TestGraph(unittest.TestCase):
def setUp(self):
self.G = Graph(5)
self.G.add_edge(1, 2)
self.G.add_edge(1, 3)
self.G.add_edge(1, 4)
self.G.add_edge(2, 4)
self.G.add_edge(3, 4)
def testing_len(self):
self.assertEqual(self.G.len(), 5)
def testing_nodes_adjacents(self):
self.assertEqual(self.G[4], [1, 2, 3])
self.assertEqual(self.G[2], [1, 4])
self.assertEqual(self.G[0], [])
def testing_degree(self):
self.assertEqual(self.G.degree(4), 3)
self.assertEqual(self.G.max_degree(), 3)
if __name__ == "__main__":
unittest.main()
| 3.109375
| 3
|
wherefore/KafkaMessageTracker.py
|
ess-dmsc/wherefore
| 0
|
12781646
|
from threading import Thread
from typing import Union, Dict, Optional, Tuple
from datetime import datetime, timedelta, timezone
from enum import Enum, auto
from kafka import KafkaConsumer, TopicPartition
from queue import Queue, Empty, Full
from wherefore.DataSource import DataSource
from wherefore.Message import Message
from copy import copy
CHECK_FOR_MSG_INTERVAL = 500
UPDATE_STATUS_INTERVAL = timedelta(milliseconds=50)
class PartitionOffset(Enum):
NEVER = auto()
BEGINNING = auto()
END = auto()
class HighLowOffset:
def __init__(self, low, high, lag=-1):
self.low = low
self.lag = lag
self.high = high
def thread_function(consumer: KafkaConsumer, stop: Union[datetime, int], in_queue: Queue, out_queue: Queue, topic_partition):
known_sources: Dict[bytes, DataSource] = {}
start_time = datetime.now(tz=timezone.utc)
update_timer = datetime.now(tz=timezone.utc)
while True:
messages_ctr = 0
for kafka_msg in consumer:
new_msg = Message(kafka_msg)
if type(stop) is int and new_msg.offset > stop:
pass
elif type(stop) is datetime and new_msg.timestamp is not None and new_msg.timestamp > stop:
pass
elif type(stop) is datetime and new_msg.timestamp is None and new_msg.kafka_timestamp > stop:
pass
else:
if not new_msg.source_hash in known_sources:
known_sources[new_msg.source_hash] = DataSource(new_msg.source_name, new_msg.message_type, start_time)
known_sources[new_msg.source_hash].process_message(new_msg)
messages_ctr += 1
if messages_ctr == CHECK_FOR_MSG_INTERVAL:
break
if not in_queue.empty():
new_msg = in_queue.get()
if new_msg == "exit":
break
now = datetime.now(tz=timezone.utc)
if now - update_timer > UPDATE_STATUS_INTERVAL:
update_timer = now
try:
out_queue.put(copy(known_sources), block=False)
low_offset = consumer.beginning_offsets([topic_partition, ])[topic_partition]
high_offset = consumer.end_offsets([topic_partition, ])[topic_partition]
out_queue.put(HighLowOffset(low_offset, high_offset))
except Full:
pass # Do nothing
consumer.close(True)
class KafkaMessageTracker:
def __init__(self, broker: str, topic: str, partition: int = -1, start: Tuple[Union[int, datetime, PartitionOffset], Optional[int]] = PartitionOffset.END, stop: Union[int, datetime, PartitionOffset] = PartitionOffset.NEVER):
self.to_thread = Queue()
self.from_thread = Queue(maxsize=100)
consumer = KafkaConsumer(bootstrap_servers=broker, fetch_max_bytes=52428800 * 6, consumer_timeout_ms=100)
existing_topics = consumer.topics()
self.current_msg = None
self.current_offset_limits = HighLowOffset(-1, -1)
if topic not in existing_topics:
raise RuntimeError(f"Topic \"{topic}\" does not exist.")
existing_partitions = consumer.partitions_for_topic(topic)
if partition == -1:
partition = existing_partitions.pop()
elif partition not in existing_partitions:
raise RuntimeError(f"Partition {partition} for topic \"{topic}\" does not exist.")
topic_partition = TopicPartition(topic, partition)
consumer.assign([topic_partition, ])
first_offset = consumer.beginning_offsets([topic_partition])[topic_partition]
last_offset = consumer.end_offsets([topic_partition])[topic_partition]
origin_offset = None
offset_to_offset = start[1]
if start[0] == PartitionOffset.BEGINNING:
origin_offset = first_offset
# consumer.seek_to_beginning()
# if type(start[1]) == int and start[1] > 0 and first_offset + start[1] <= last_offset:
# consumer.seek(partition=topic_partition, offset=first_offset + start[1])
elif start[0] == PartitionOffset.END or start == PartitionOffset.NEVER:
origin_offset = last_offset
# consumer.seek_to_end()
# if type(start[1]) == int and start[1] < 0 and last_offset + start[1] >= first_offset:
# consumer.seek(partition=topic_partition, offset=first_offset + start[1])
elif type(start[0]) is int:
if first_offset > start[0]:
origin_offset = first_offset
# consumer.seek_to_beginning()
elif last_offset < start[0]:
origin_offset = last_offset
else:
origin_offset = start[0]
# consumer.seek_to_end()
# else:
# consumer.seek(partition=topic_partition, offset=start[0])
elif type(start[0]) is datetime:
found_offsets = consumer.offsets_for_times({topic_partition: int(start[0].timestamp() * 1000)})
if found_offsets[topic_partition] is None:
origin_offset = last_offset
else:
origin_offset = found_offsets[topic_partition].offset
# if type(start[1]) == int:
# used_offset += start[1]
# consumer.seek(partition=topic_partition, offset=used_offset)
else:
raise RuntimeError("Unknown start offset configured.")
if offset_to_offset is not None:
origin_offset += offset_to_offset
if origin_offset < first_offset:
origin_offset = first_offset
elif origin_offset > last_offset:
origin_offset = last_offset
consumer.seek(partition=topic_partition, offset=origin_offset)
self.thread = Thread(target=thread_function, daemon=True, kwargs={"consumer": consumer, "stop": stop, "in_queue": self.to_thread, "out_queue": self.from_thread, "stop": stop, "topic_partition": topic_partition})
self.thread.start()
def stop_thread(self):
self.to_thread.put("exit")
def __del__(self):
self.stop_thread()
def _get_messages(self):
while not self.from_thread.empty():
try:
current_msg = self.from_thread.get(block=False)
if type(current_msg) is dict:
self.current_msg = current_msg
elif type(current_msg) is HighLowOffset:
self.current_offset_limits = current_msg
except Empty:
return
def get_latest_values(self):
self._get_messages()
return self.current_msg
def get_current_edge_offsets(self) -> HighLowOffset:
self._get_messages()
return self.current_offset_limits
| 2.40625
| 2
|
tests/test_mgxs_openmc/test_mgxs_openmc.py
|
lsder/OpenMOC
| 97
|
12781647
|
#!/usr/bin/env python
import os
import sys
sys.path.insert(0, os.pardir)
sys.path.insert(0, os.path.join(os.pardir, 'openmoc'))
from testing_harness import MultiSimTestHarness
import openmoc
try:
import openmc.openmoc_compatible
import openmc.mgxs
except:
print("OpenMC could not be imported, it's required for loading MGXS files")
class LoadMGXSTestHarness(MultiSimTestHarness):
"""Load a variety of OpenMC MGXS libraries."""
def __init__(self):
super(LoadMGXSTestHarness, self).__init__()
self.azim_spacing = 0.12
self.max_iters = 10
self.keffs = []
self.mgxs_files = ['mgxs_isotropic',
'mgxs_transport_corrected',
'mgxs_consistent',
'mgxs_consistent_nuscatter',
'mgxs_materials',
'mgxs_angular_legendre']
# mgxs_angular_histogram currently not supported
# mgxs_nuclide should be redone with the latest version of openmc
# mgxs by distribcell, universe and mesh also not supported
def _create_geometry(self):
pass
def _create_trackgenerator(self):
pass
def _generate_tracks(self):
pass
def _create_solver(self):
pass
def _run_openmoc(self):
"""Run an OpenMOC calculation with each library."""
for mgxs_file in self.mgxs_files:
# Initialize OpenMC multi-group cross section library for a pin cell
mgxs_lib = openmc.mgxs.Library.load_from_file(filename=mgxs_file,
directory='.')
# Create an OpenMOC Geometry from the OpenMOC Geometry
openmoc_geometry = \
openmc.openmoc_compatible.get_openmoc_geometry(mgxs_lib.geometry)
# Load cross section data
openmoc_materials = \
openmoc.materialize.load_openmc_mgxs_lib(mgxs_lib, openmoc_geometry)
# Initialize FSRs
openmoc_geometry.initializeFlatSourceRegions()
# Initialize an OpenMOC TrackGenerator
track_generator = openmoc.TrackGenerator(
openmoc_geometry, self.num_azim, self.azim_spacing)
track_generator.generateTracks()
# Initialize an OpenMOC Solver
self.solver = openmoc.CPUSolver(track_generator)
self.solver.setConvergenceThreshold(self.tolerance)
self.solver.setNumThreads(self.num_threads)
# Run eigenvalue calculation and store results
self.solver.computeEigenvalue(max_iters=self.max_iters)
self.keffs.append(self.solver.getKeff())
def _get_results(self, num_iters=True, keff=True, fluxes=True,
num_fsrs=False, num_tracks=False, num_segments=False,
hash_output=False):
"""Write a string with the results"""
outstr = ''
# Write out the mgxs file name eigenvalues from each simulation
for file, keff in zip(self.mgxs_files, self.keffs):
outstr += 'File: {0}\tkeff: {1:12.5E}\n'.format(file, keff)
return outstr
if __name__ == '__main__':
harness = LoadMGXSTestHarness()
harness.main()
| 1.71875
| 2
|
python/class/foo.py
|
trammell/test
| 0
|
12781648
|
#!/usr/bin/env python2.4
"""
"""
class Foo(object):
bar = {}
bar['x'] = "apple"
bar['y'] = "banana"
def __call__(self):
print self.bar['x']
f = Foo()
f()
| 3.6875
| 4
|
django/localConflict/breaking_news/views.py
|
martinez-zea/localconflict
| 1
|
12781649
|
<gh_stars>1-10
from breaking_news.models import *
from django.shortcuts import render_to_response, get_object_or_404
from django.http import HttpResponseRedirect, HttpResponse
from django.template import RequestContext
def index(request):
n_image = new.objects.filter(image__startswith='img_up').order_by('-when')[:3]
n_text = new.objects.filter(image__startswith='').order_by('-when')[:12]
return render_to_response('breaking_news/index.html', {'with_img' : n_image,
'without_img': n_text,}
, context_instance=RequestContext(request))
def allNews(request):
all_news = new.objects.all().order_by('-when')
return render_to_response('breaking_news/all.html',
{'all_news' : all_news},
context_instance=RequestContext(request))
def add(request):
return HttpResponse("@ add")
def saved(request):
return HttpResponse("saved!")
def search(request, death_id):
n = get_object_or_404(new, pk=death_id)
return render_to_response('breaking_news/detail.html', {'new':n}, context_instance=RequestContext(request))
| 2.125
| 2
|
cashweb/server/main/__init__.py
|
jbool24/CashWEB
| 0
|
12781650
|
# ./server/main/__init__.py
| 1.015625
| 1
|