max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
egg/hatch.py
|
TheMartianObserver/nsimd
| 247
|
12781151
|
<filename>egg/hatch.py
# Copyright (c) 2021 Agenium Scale
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# What does this script?
# ----------------------
#
# This script generates code for each architecture, the base C/C++ APIs and
# the advanced C++ API. Each part to be generated is handled by a
# `gen_*.py` file. This script simply calls the `doit` function of each
# `gen_*.py` module. Names are self-explanatory.
#
# -----------------------------------------------------------------------------
# First thing we do is check whether python3 is used
import sys
if sys.version_info[0] < 3:
print('Only Python 3 is supported')
sys.exit(1)
# -----------------------------------------------------------------------------
# Imports
import argparse
import os
import re
import common
import gen_archis
import gen_base_apis
import gen_adv_cxx_api
import gen_adv_c_api
import gen_tests
import gen_src
import gen_doc
import gen_friendly_but_not_optimized
import gen_modules
import gen_scalar_utilities
import get_sleef_code
# Dir of this script
script_dir = os.path.dirname(__file__)
if script_dir == '':
script_dir = '.'
# -----------------------------------------------------------------------------
# Arguments parsing
def parse_args(args):
def parse_simd(value):
## Split .simd now
values = {
'x86': common.x86_simds,
'arm': common.arm_simds,
'ppc': common.ppc_simds,
'all': common.simds,
}.get(value, value.split(','))
## Check that all simd are valid
ret = []
for simd in values:
if simd not in common.simds:
raise argparse.ArgumentTypeError(
"SIMD '{}' not found in {}".format(simd, common.simds))
ret += common.simds_deps[simd]
return list(set(ret))
def parse_match(value):
if value is None:
return None
else:
return re.compile(value)
# In pratice, we either generate all or all except tests and we never
# change default directories for code generation. So we remove unused
# options and regroup some into --library.
parser = argparse.ArgumentParser(
description='This is NSIMD generation script.')
parser.add_argument('--force', '-f', action='store_true',
help='Generate all files even if they already exist')
parser.add_argument('--list-files', '-L', action='store_true',
default=False,
help='List files that will be created by hatch.py')
parser.add_argument('--all', '-A', action='store_true',
help='Generate code for the library and its tests')
parser.add_argument('--library', '-l', action='store_true',
help='Generate code of the library (C and C++ APIs)')
parser.add_argument('--sleef', '-s', action='store_true', default=False,
help='Compile Sleef')
parser.add_argument('--tests', '-t', action='store_true',
help='Generate tests in C and C++')
parser.add_argument('--doc', '-d', action='store_true',
help='Generate all documentation')
parser.add_argument('--enable-clang-format', '-F', action='store_false',
default=True,
help='Disable Clang Format (mainly for speed on Windows)')
parser.add_argument('--sve-emulate-bool', action='store_true',
default=False,
help='Use normal SVE vector to emulate predicates.')
parser.add_argument('--simd', '-D', type=parse_simd, default='all',
help='List of SIMD extensions (separated by a comma)')
parser.add_argument('--match', '-m', type=parse_match, default=None,
help='Regex used to filter generation on operator names')
parser.add_argument('--verbose', '-v', action = 'store_true', default=None,
help='Enable verbose mode')
parser.add_argument('--simple-license', action='store_true', default=False,
help='Put a simple copyright statement instead of the whole license')
opts = parser.parse_args(args)
# When -L has been chosen, we want to list all files and so we have to
# turn to True other parameters
if opts.list_files:
opts.library = True
opts.tests = True
opts.force = True
opts.doc = True
# We set variables here because all the code depends on them + we do want
# to keep the possibility to change them in the future
opts.archis = opts.library
opts.base_apis = opts.library
opts.adv_cxx_api = opts.library
opts.adv_c_api = opts.library
opts.friendly_but_not_optimized = opts.library
opts.src = opts.library
opts.scalar_utilities = opts.library
opts.sleef_version = '3.5.1'
opts.include_dir = os.path.join(script_dir, '..', 'include', 'nsimd')
opts.tests_dir = os.path.join(script_dir, '..', 'tests')
opts.src_dir = os.path.join(script_dir, '..', 'src')
return opts
# -----------------------------------------------------------------------------
# Entry point
def main():
opts = parse_args(sys.argv[1:])
opts.script_dir = script_dir
opts.modules_list = None
opts.platforms_list = None
## Gather all SIMD dependencies
opts.simd = common.get_simds_deps_from_opts(opts)
common.myprint(opts, 'List of SIMD: {}'.format(', '.join(opts.simd)))
if opts.archis == True or opts.all == True:
gen_archis.doit(opts)
if opts.base_apis == True or opts.all == True:
gen_base_apis.doit(opts)
if opts.adv_cxx_api == True or opts.all == True:
gen_adv_cxx_api.doit(opts)
if opts.adv_c_api == True or opts.all == True:
gen_adv_c_api.doit(opts)
if opts.tests == True or opts.all == True:
gen_tests.doit(opts)
if opts.src == True or opts.all == True:
gen_src.doit(opts)
if opts.sleef == True or opts.all == True:
get_sleef_code.doit(opts)
if opts.scalar_utilities == True or opts.all == True:
gen_scalar_utilities.doit(opts)
if opts.friendly_but_not_optimized == True or opts.all == True:
gen_friendly_but_not_optimized.doit(opts)
gen_modules.doit(opts) # this must be here after all NSIMD
if opts.doc == True or opts.all == True:
gen_doc.doit(opts)
if __name__ == '__main__':
main()
| 2.078125
| 2
|
aim/ql/tree/__init__.py
|
VkoHov/aim
| 1
|
12781152
|
<gh_stars>1-10
from aim.ql.tree.abstract_syntax_tree import AbstractSyntaxTree
from aim.ql.tree.binary_expression_tree import BinaryExpressionTree
| 0.933594
| 1
|
Author/api.py
|
CMPUT404-Fa21-Organization/CMPUT404-Project-Social-Distribution
| 3
|
12781153
|
from rest_framework.decorators import api_view, authentication_classes, permission_classes
import requests
from rest_framework.response import Response
from .models import Author, Followers
from rest_framework import status
from .serializers import AuthorSerializer, FollowersSerializer
from permissions import CustomAuthentication, AccessPermission
from django.core.paginator import Paginator
################ FOLLOWERS API ##############################
@api_view(['GET',])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def APIGetFollowers(request, auth_pk):
followersObj = Followers.objects.get(auth_pk = auth_pk)
authors = FollowersSerializer(followersObj)
return Response(authors.data, status=status.HTTP_200_OK)
@api_view(['GET','PUT','DELETE'])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def ForeignAuthorAPI(request, auth_pk, fr_auth_pk):
followersObj = Followers.objects.get(auth_pk = auth_pk)
if request.method == "GET":
detail = False
foreign_author = Author.objects.get(pk = fr_auth_pk)
if foreign_author in followersObj.items.all():
detail = True
response_dict = {
"detail": detail
}
return Response(response_dict)
elif request.method == "PUT":
foreign_author = Author.objects.get(pk = fr_auth_pk)
followersObj.items.add(foreign_author)
elif request.method == "DELETE":
foreign_author = Author.objects.get(pk = fr_auth_pk)
followersObj.items.remove(foreign_author)
authors = FollowersSerializer(followersObj)
return Response(authors.data, status=status.HTTP_200_OK)
###############################################################
@api_view(['GET',])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def AuthorsListAPIView(request):
authors = Author.objects.filter(url__icontains = "linkedspace")
page_number = request.GET.get('page')
if 'size' in request.GET:
page_size = request.GET.get('size')
else:
page_size = 5
paginator = Paginator(authors, page_size)
page_obj = paginator.get_page(page_number)
serializer = AuthorSerializer(page_obj.object_list, many=True)
response_dict = {
"type": "authors",
"items": serializer.data
}
return Response(response_dict)
@api_view(['GET', 'POST',])
@authentication_classes([CustomAuthentication])
@permission_classes([AccessPermission])
def AuthorDetailAPIView(request, auth_pk):
try:
author = Author.objects.get(pk=auth_pk)
except Author.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
if request.method == "GET":
serializer = AuthorSerializer(instance=author)
return Response(serializer.data, status=status.HTTP_200_OK)
if request.method == "POST":
if 'displayName' in request.data.keys():
author.displayName = request.data['displayName']
if 'email' in request.data.keys():
if not len(Author.objects.filter(email=request.data['email'])):
author.email = request.data['email'] # update email field
else:
# email already exists
serializer = AuthorSerializer(author)
return Response(serializer.data, status=status.HTTP_400_BAD_REQUEST)
if 'github' in request.data.keys():
github_user = request.data['github']
author.github = f'http://github.com/{github_user}'
author.save()
serializer = AuthorSerializer(author)
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(status=status.HTTP_400_BAD_REQUEST)
@api_view(['GET',])
def AuthorsConnection(request, auth_id=None):
data = []
team3 = requests.get('https://social-dis.herokuapp.com/authors', auth=('socialdistribution_t03','c404t03'))
if team3.status_code == 200:
data.append(team3.json())
team15 = requests.get('https://unhindled.herokuapp.com/service/authors/', auth=('connectionsuperuser','404connection'))
if team15.status_code == 200:
data.append(team15.json())
team17 = requests.get('https://cmput404f21t17.herokuapp.com/service/connect/public/author/', auth=('<PASSWORD>','123456'))
if team17.status_code == 200:
data.append(team17.json())
return Response({'connection': data})
| 2.15625
| 2
|
dygraph/core/val.py
|
MRXLT/PaddleSeg
| 56
|
12781154
|
<filename>dygraph/core/val.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
import tqdm
import cv2
from paddle.fluid.dygraph.base import to_variable
import paddle.fluid as fluid
import dygraph.utils.logger as logger
from dygraph.utils import ConfusionMatrix
from dygraph.utils import Timer, calculate_eta
def evaluate(model,
eval_dataset=None,
model_dir=None,
num_classes=None,
ignore_index=255,
iter_id=None):
ckpt_path = os.path.join(model_dir, 'model')
para_state_dict, opti_state_dict = fluid.load_dygraph(ckpt_path)
model.set_dict(para_state_dict)
model.eval()
total_iters = len(eval_dataset)
conf_mat = ConfusionMatrix(num_classes, streaming=True)
logger.info(
"Start to evaluating(total_samples={}, total_iters={})...".format(
len(eval_dataset), total_iters))
timer = Timer()
timer.start()
for iter, (im, im_info, label) in tqdm.tqdm(
enumerate(eval_dataset), total=total_iters):
im = to_variable(im)
pred, _ = model(im)
pred = pred.numpy().astype('float32')
pred = np.squeeze(pred)
for info in im_info[::-1]:
if info[0] == 'resize':
h, w = info[1][0], info[1][1]
pred = cv2.resize(pred, (w, h), cv2.INTER_NEAREST)
elif info[0] == 'padding':
h, w = info[1][0], info[1][1]
pred = pred[0:h, 0:w]
else:
raise Exception("Unexpected info '{}' in im_info".format(
info[0]))
pred = pred[np.newaxis, :, :, np.newaxis]
pred = pred.astype('int64')
mask = label != ignore_index
conf_mat.calculate(pred=pred, label=label, ignore=mask)
_, iou = conf_mat.mean_iou()
time_iter = timer.elapsed_time()
remain_iter = total_iters - iter - 1
logger.debug(
"[EVAL] iter_id={}, iter={}/{}, iou={:4f}, sec/iter={:.4f} | ETA {}"
.format(iter_id, iter + 1, total_iters, iou, time_iter,
calculate_eta(remain_iter, time_iter)))
timer.restart()
category_iou, miou = conf_mat.mean_iou()
category_acc, macc = conf_mat.accuracy()
logger.info("[EVAL] #Images={} mAcc={:.4f} mIoU={:.4f}".format(
len(eval_dataset), macc, miou))
logger.info("[EVAL] Category IoU: " + str(category_iou))
logger.info("[EVAL] Category Acc: " + str(category_acc))
logger.info("[EVAL] Kappa:{:.4f} ".format(conf_mat.kappa()))
return miou, macc
| 1.882813
| 2
|
oscar/lib/python2.7/site-packages/traitlets/tests/test_traitlets_enum.py
|
sainjusajan/django-oscar
| 0
|
12781155
|
# -*- coding: UTF-8 -*-
# pylint: disable=missing-docstring, too-few-public-methods
"""
Test the trait-type ``UseEnum``.
"""
import unittest
import enum
from ipython_genutils.py3compat import string_types
from traitlets import HasTraits, TraitError, UseEnum
# -----------------------------------------------------------------------------
# TEST SUPPORT:
# -----------------------------------------------------------------------------
class Color(enum.Enum):
red = 1
green = 2
blue = 3
yellow = 4
class OtherColor(enum.Enum):
red = 0
green = 1
# -----------------------------------------------------------------------------
# TESTSUITE:
# -----------------------------------------------------------------------------
class TestUseEnum(unittest.TestCase):
# pylint: disable=invalid-name
class Example(HasTraits):
color = UseEnum(Color, help="Color enum")
def test_assign_enum_value(self):
example = self.Example()
example.color = Color.green
self.assertEqual(example.color, Color.green)
def test_assign_all_enum_values(self):
# pylint: disable=no-member
enum_values = [value for value in Color.__members__.values()]
for value in enum_values:
self.assertIsInstance(value, Color)
example = self.Example()
example.color = value
self.assertEqual(example.color, value)
self.assertIsInstance(value, Color)
def test_assign_enum_value__with_other_enum_raises_error(self):
example = self.Example()
with self.assertRaises(TraitError):
example.color = OtherColor.green
def test_assign_enum_name_1(self):
# -- CONVERT: string => Enum value (item)
example = self.Example()
example.color = "red"
self.assertEqual(example.color, Color.red)
def test_assign_enum_value_name(self):
# -- CONVERT: string => Enum value (item)
# pylint: disable=no-member
enum_names = [enum_val.name for enum_val in Color.__members__.values()]
for value in enum_names:
self.assertIsInstance(value, string_types)
example = self.Example()
enum_value = Color.__members__.get(value)
example.color = value
self.assertIs(example.color, enum_value)
self.assertEqual(example.color.name, value)
def test_assign_scoped_enum_value_name(self):
# -- CONVERT: string => Enum value (item)
scoped_names = ["Color.red", "Color.green", "Color.blue", "Color.yellow"]
for value in scoped_names:
example = self.Example()
example.color = value
self.assertIsInstance(example.color, Color)
self.assertEqual(str(example.color), value)
def test_assign_bad_enum_value_name__raises_error(self):
# -- CONVERT: string => Enum value (item)
bad_enum_names = ["UNKNOWN_COLOR", "RED", "Green", "blue2"]
for value in bad_enum_names:
example = self.Example()
with self.assertRaises(TraitError):
example.color = value
def test_assign_enum_value_number_1(self):
# -- CONVERT: number => Enum value (item)
example = self.Example()
example.color = 1 # == Color.red.value
example.color = Color.red.value
self.assertEqual(example.color, Color.red)
def test_assign_enum_value_number(self):
# -- CONVERT: number => Enum value (item)
# pylint: disable=no-member
enum_numbers = [enum_val.value
for enum_val in Color.__members__.values()]
for value in enum_numbers:
self.assertIsInstance(value, int)
example = self.Example()
example.color = value
self.assertIsInstance(example.color, Color)
self.assertEqual(example.color.value, value)
def test_assign_bad_enum_value_number__raises_error(self):
# -- CONVERT: number => Enum value (item)
bad_numbers = [-1, 0, 5]
for value in bad_numbers:
self.assertIsInstance(value, int)
assert UseEnum(Color).select_by_number(value, None) is None
example = self.Example()
with self.assertRaises(TraitError):
example.color = value
def test_ctor_without_default_value(self):
# -- IMPLICIT: default_value = Color.red (first enum-value)
class Example2(HasTraits):
color = UseEnum(Color)
example = Example2()
self.assertEqual(example.color, Color.red)
def test_ctor_with_default_value_as_enum_value(self):
# -- CONVERT: number => Enum value (item)
class Example2(HasTraits):
color = UseEnum(Color, default_value=Color.green)
example = Example2()
self.assertEqual(example.color, Color.green)
def test_ctor_with_default_value_none_and_not_allow_none(self):
# -- IMPLICIT: default_value = Color.red (first enum-value)
class Example2(HasTraits):
color1 = UseEnum(Color, default_value=None, allow_none=False)
color2 = UseEnum(Color, default_value=None)
example = Example2()
self.assertEqual(example.color1, Color.red)
self.assertEqual(example.color2, Color.red)
def test_ctor_with_default_value_none_and_allow_none(self):
class Example2(HasTraits):
color1 = UseEnum(Color, default_value=None, allow_none=True)
color2 = UseEnum(Color, allow_none=True)
example = Example2()
self.assertIs(example.color1, None)
self.assertIs(example.color2, None)
def test_assign_none_without_allow_none_resets_to_default_value(self):
class Example2(HasTraits):
color1 = UseEnum(Color, allow_none=False)
color2 = UseEnum(Color)
example = Example2()
example.color1 = None
example.color2 = None
self.assertIs(example.color1, Color.red)
self.assertIs(example.color2, Color.red)
def test_assign_none_to_enum_or_none(self):
class Example2(HasTraits):
color = UseEnum(Color, allow_none=True)
example = Example2()
example.color = None
self.assertIs(example.color, None)
def test_assign_bad_value_with_to_enum_or_none(self):
class Example2(HasTraits):
color = UseEnum(Color, allow_none=True)
example = Example2()
with self.assertRaises(TraitError):
example.color = "BAD_VALUE"
| 2.234375
| 2
|
voicefilter/utils/train.py
|
o74589055/voicefilter_torch_ws
| 0
|
12781156
|
<filename>voicefilter/utils/train.py
import os
import math
import torch
import torch.nn as nn
import traceback
from .adabound import AdaBound
from .audio import Audio
from .evaluation import validate
from model.model import VoiceFilter
from model.embedder import SpeechEmbedder
def train(args, pt_dir, chkpt_path, trainloader, testloader, writer, logger, hp, hp_str):
# load embedder
embedder_pt = torch.load(args.embedder_path)
embedder = SpeechEmbedder(hp).cuda()
embedder.load_state_dict(embedder_pt)
embedder.eval()
audio = Audio(hp)
device = torch.device('cuda')
model = nn.DataParallel(VoiceFilter(hp)).to(device)
if hp.train.optimizer == 'adabound':
optimizer = AdaBound(model.parameters(),
lr=hp.train.adabound.initial,
final_lr=hp.train.adabound.final)
elif hp.train.optimizer == 'adam':
optimizer = torch.optim.Adam(model.parameters(),
lr=hp.train.adam)
else:
raise Exception("%s optimizer not supported" % hp.train.optimizer)
step = 0
if chkpt_path is not None:
logger.info("Resuming from checkpoint: %s" % chkpt_path)
checkpoint = torch.load(chkpt_path)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
step = checkpoint['step']
# will use new given hparams.
if hp_str != checkpoint['hp_str']:
logger.warning("New hparams is different from checkpoint.")
else:
logger.info("Starting new training run")
try:
criterion = nn.MSELoss()
while True:
model.train()
for dvec_mels, target_mag, mixed_mag in trainloader:
target_mag = target_mag.cuda()
mixed_mag = mixed_mag.cuda()
dvec_list = list()
for mel in dvec_mels:
mel = mel.cuda()
dvec = embedder(mel)
dvec_list.append(dvec)
dvec = torch.stack(dvec_list, dim=0)
dvec = dvec.detach()
mask = model(mixed_mag, dvec)
output = mixed_mag * mask
# output = torch.pow(torch.clamp(output, min=0.0), hp.audio.power)
# target_mag = torch.pow(torch.clamp(target_mag, min=0.0), hp.audio.power)
loss = criterion(output, target_mag)
optimizer.zero_grad()
loss.backward()
optimizer.step()
step += 1
loss = loss.item()
if loss > 1e8 or math.isnan(loss):
logger.error("Loss exploded to %.02f at step %d!" % (loss, step))
raise Exception("Loss exploded")
# write loss to tensorboard
if step % hp.train.summary_interval == 0:
writer.log_training(loss, step)
logger.info("Wrote summary at step %d" % step)
# 1. save checkpoint file to resume training
# 2. evaluate and save sample to tensorboard
if step % hp.train.checkpoint_interval == 0:
save_path = os.path.join(pt_dir, 'chkpt_%d.pt' % step)
torch.save({
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
'step': step,
'hp_str': hp_str,
}, save_path)
logger.info("Saved checkpoint to: %s" % save_path)
validate(audio, model, embedder, testloader, writer, step)
except Exception as e:
logger.info("Exiting due to exception: %s" % e)
traceback.print_exc()
| 2.40625
| 2
|
code/python-modules/kinectData.py
|
jvahala/lucid-robotics
| 2
|
12781157
|
<reponame>jvahala/lucid-robotics<filename>code/python-modules/kinectData.py
'''
module: kinectData.py
use: contains functions and class associated with messing with storing and manipulating kinect data
'''
import numpy as np
import timeObject
import feature_tools
import utils
class kinectData(object):
'''
Purpose:
Manipulate Kinect data files
Class functions:
addData(self,filename) - Adds Kinect data to the dataArray object as appended rows
getFeatures(self) - Computes similary matrix for selected features for any data added through addData()
Required modules:
numpy
timeObject
feature_tools
utils
'''
def __init__(self, ID, header_string=None):
self.ID = ID #string user ID number ('0' is first person, '1' is second, etc)
self.names_list = [] #list object of the names of the features of data_array
if header_string != None:
self.processLine(header_string,header_line=True) #if a string with the header is input to the kinectData object, initialize the names list
self.names_base = [] #base names of the name list (ie ShoulderLeft instead of ShoulderLeft.X)
self.data_array = [] #m by n rows represent different timestamps, columns represent raw data elements from names_list
self.dataXYZ = np.zeros(1) #data given as an m by p by 3 where m is number of time stamps, p is number of names_base elements, and 3 is x,y,z
self.raw_times_array = [] #raw times of each row
self.num_vectors = 0 #number of timestampes (ie. number of rows)
self.date = '' #MM/DD element that is necessary but useless in general
self.init_time = 0 #initial element's timestamp datetime object
self.total_time = 0 #total time spanned by the dataset
self.delta_time_array = [] #datetime.timedelta objects of each row's elapsed time since the start
self.feat_array = np.zeros(1) #thresholded feature columns taken from self.all_features - call getFeatures() to fill
self.all_features = np.zeros(1) #feature array for the data containing all features for each frame - call getFeatures() to fill
self.feature_norms = [] #normalizing values for all features that are kept at the first getFeatures() call unless this is set back to -1
self.similarity_method = -1 #similarity method determines how to generate the similarity matrix in utils.getSimilarityArray()
self.norm_features = ['SpineMid.X', 'SpineShoulder.X'] #normalize features by the difference between those defined here
self.norm_value = -1 #value to normalize features by getFeatures()
self.midpoint = np.zeros((1,3)) #1 by 3 array midpoint(X,Y,Z) between the two parties(u1 and u2) - define in main using feature_tools.getMidpointXYZ(u1_jointsXYZ,u2_jointsXYZ)
self.feature_inds = -1 #indicies of the features chosen based on the definition provided in feature_tools.py
def addData(self, filename, filename_is_data=False):
'''
Purpose:
Adds Kinect data to the dataArray object as appended rows
Inputs:
filename: Kinect data file [1st row (0 index) is names of variables, additional rows (>0 index) are data]
filename_is_data: Bool - if True, then filename is a string of data instead of a filename, if False (default) then the filename is a filename
Outputs:
self.names_list: updated if empty
self.data_array: new rows of data from filename are appended
self.num_vectors: updated to reflect number of data frames added
self.init_time: updated if first set up data added to object
self.date: updated as necessary
self.total_time: updated to reflect total time elapsed between init_time and the last timestamp
self.delta_time_array: updated with new delta_time values
self.raw_times_array: updated with newly added raw timestamps
'''
if filename_is_data:
data_line = filename
_header = False
if len(self.names_list) == 0:
_header = True
self.processLine(data_line,header_line=_header)
else:
print 'Adding new data from', filename, '...'
with open(filename,'r') as f:
'''line_index = 0'''
_header = True
## begin looking at each line
for line in f:
self.processLine(line,header_line=_header)
_header = False
print ' Data added.\n'
#end addData()
def processLine(self,line,header_line=False):
# process the header line
if header_line:
avoid_words = set(['timestamp','personID','HandLeftStatus','HandRightStatus']) #these words should not be included in self.names_list
self.names_list = []
for word_index,word in enumerate(line.split()):
if word not in avoid_words:
self.names_list += [word]
# else, the line is a piece of data - treat it so
else:
# compile the entered data
data_list = []
for word_index,word in enumerate(line.split()):
# date
if word_index == 0:
self.date = word
# time
elif word_index == 1:
if len(self.data_array) == 0:
self.init_time = timeObject.timeObject(self.date,word)
curr_time = timeObject.timeObject(self.date,word)
delta_time = curr_time.time - self.init_time.time
# AM/PM
elif word_index == 2:
pass
# userID
elif word_index == 3:
if word != self.ID:
break
# data:
elif (word_index >= 4) and (word != 'Tracked') and (word != 'Inferred'):
data_list.append(float(word))
# adjust all relevant fields
if len(data_list)>0:
_data = np.array(data_list)
if len(self.data_array) == 0:
self.data_array = np.array(_data)
else:
self.data_array = np.vstack((self.data_array,_data))
self.delta_time_array.append(delta_time)
self.raw_times_array.append(curr_time.time)
self.total_time = delta_time
self.num_vectors += 1
def getFeatures(self, exp_weighting=True):
'''
Purpose:
Builds an array of features using one of the various accepted self.feature_method types
(ADD in normalization input???? where to normalize the data? - no normalization is implemented)
Inputs:
self.norm_value: Not exactly an input, but function will calculate norm value if indef and returned array is normalized by this value
currently based on |SpineMid.Y - SpineShoulder.Y|
Outputs:
self.all_features: updated with all feature vectors associated with each frame of kinect data
self.feat_array: updated with the new frames of chosen feature vectors for each frame of kinect data
'''
# if no norm value is assigned, set up the norm value
if self.norm_value == -1:
try:
name_index_1 = self.names_list.index(self.norm_features[0])
name_index_2 = self.names_list.index(self.norm_features[1])
#print 'important: \n', self.data_array[0,name_index_1:name_index_1+3], '\n', self.data_array[0,name_index_2:name_index_2+3]
#self.norm_value = np.absolute(self.data_array[0,self.names_list.index(self.norm_features[0])] - self.data_array[0,self.names_list.index(self.norm_features[1])])
self.norm_value = 10.0*np.linalg.norm(self.data_array[0,name_index_1:name_index_1+3] - self.data_array[0,name_index_2:name_index_2+3])**2
#print self.norm_value
except ValueError:
self.norm_value = 1
print 'ERROR: norm_features not found.\n'
#if no features yet defined, start messing with all data
if self.all_features.shape == (1,):
sub_data_array = self.data_array
weight_all = True
#else if features are defined, mess with only the new data
else:
sub_data_array = self.data_array[(len(self.all_features)-1):self.num_vectors-1,:]
weight_all = False
#define the new feature vectors for each row
for row in sub_data_array:
#for each row of data,
# a) get jointsXYZ for row,
# b) get normalized interjoint features,
# c) get normalized joint-midpoint features,
# d) concatenate features together
self.names_base, jointsXYZ = feature_tools.disectJoints(self.names_list,row)
features_interjoint = utils.normalize(feature_tools.getInterjointFeatures(jointsXYZ),self.norm_value)
features_jointMidpoint = utils.normalize(feature_tools.getJointToMidpointFeatures(jointsXYZ,self.midpoint),self.norm_value)
features = np.hstack((features_interjoint,features_jointMidpoint))
if len(self.feature_norms) > 0:
features = features/self.feature_norms #normalize all features within themselves, does this make sense to do just with some generic current max? Future data will be poorly compared to eachother...need some definite 0-1 normalizing factor for all new featuers
#print 'feature_norms; ', self.feature_norms
#append feat_vec to self.all_features if it has alread been defined
if self.all_features.shape != (1,): #all_features exists
if len(self.all_features)>1:
features = 0.5*features + 0.3*self.all_features[-1,:] + 0.2*self.all_features[-2,:] #apply a basic weighted moving average
self.all_features = np.vstack((self.all_features,features))
self.dataXYZ = np.vstack((self.dataXYZ,jointsXYZ[np.newaxis,:,:]))
else: #all_features does not exist
self.all_features = features.reshape(1,len(features))
self.dataXYZ = jointsXYZ[np.newaxis,:,:]
#remove non time-varying features
'''will need to implement a method to only calculate the required feature_inds features and to append them properly when adding additional sets of data to the same class object'''
#self.features_interjoint = self.feat_array[:,0:120]
#self.features_jointMidpoint = self.feat_array[:,120:]
if len(self.feature_norms) == 0:
self.feature_norms = 1.*np.amax(self.all_features,axis=0)
self.all_features = self.all_features/self.feature_norms
if self.feature_inds == -1:
self.feat_array, self.feature_inds = feature_tools.thresholdFeatures(self.all_features,self.names_base,self.norm_value)
feature_tools.describeFeatures(self.feature_inds, len(self.names_base), self.names_base)
else:
self.feat_array = self.all_features[:,self.feature_inds]
#end getFeatures()
| 3.0625
| 3
|
reconstruction/ostec/external/graphonomy/FaceHairMask/gcn.py
|
itsraina/insightface
| 0
|
12781158
|
<reponame>itsraina/insightface<filename>reconstruction/ostec/external/graphonomy/FaceHairMask/gcn.py
import torch
from . import graph
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
class GraphConvolution(nn.Module):
def __init__(self, in_features, out_features, bias=False):
super(GraphConvolution, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter("bias", None)
self.reset_parameters()
def reset_parameters(self):
# stdv = 1./math.sqrt(self.weight(1))
# self.weight.data.uniform_(-stdv,stdv)
torch.nn.init.xavier_uniform_(self.weight)
# if self.bias is not None:
# self.bias.data.uniform_(-stdv,stdv)
def forward(self, input, adj=None, relu=False):
support = torch.matmul(input, self.weight)
# print(support.size(),adj.size())
if adj is not None:
output = torch.matmul(adj, support)
else:
output = support
# print(output.size())
if self.bias is not None:
return output + self.bias
else:
if relu:
return F.relu(output)
else:
return output
def __repr__(self):
return (
self.__class__.__name__
+ " ("
+ str(self.in_features)
+ " -> "
+ str(self.out_features)
+ ")"
)
class Featuremaps_to_Graph(nn.Module):
def __init__(self, input_channels, hidden_layers, nodes=7):
super(Featuremaps_to_Graph, self).__init__()
self.pre_fea = Parameter(torch.FloatTensor(input_channels, nodes))
self.weight = Parameter(torch.FloatTensor(input_channels, hidden_layers))
self.reset_parameters()
def forward(self, input):
n, c, h, w = input.size()
# print('fea input',input.size())
input1 = input.view(n, c, h * w)
input1 = input1.transpose(1, 2) # n x hw x c
# print('fea input1', input1.size())
############## Feature maps to node ################
fea_node = torch.matmul(input1, self.pre_fea) # n x hw x n_classes
weight_node = torch.matmul(input1, self.weight) # n x hw x hidden_layer
# softmax fea_node
fea_node = F.softmax(fea_node, dim=-1)
# print(fea_node.size(),weight_node.size())
graph_node = F.relu(torch.matmul(fea_node.transpose(1, 2), weight_node))
return graph_node # n x n_class x hidden_layer
def reset_parameters(self):
for ww in self.parameters():
torch.nn.init.xavier_uniform_(ww)
# if self.bias is not None:
# self.bias.data.uniform_(-stdv,stdv)
class Featuremaps_to_Graph_transfer(nn.Module):
def __init__(self, input_channels, hidden_layers, nodes=7, source_nodes=20):
super(Featuremaps_to_Graph_transfer, self).__init__()
self.pre_fea = Parameter(torch.FloatTensor(input_channels, nodes))
self.weight = Parameter(torch.FloatTensor(input_channels, hidden_layers))
self.pre_fea_transfer = nn.Sequential(
*[
nn.Linear(source_nodes, source_nodes),
nn.LeakyReLU(True),
nn.Linear(source_nodes, nodes),
nn.LeakyReLU(True),
]
)
self.reset_parameters()
def forward(self, input, source_pre_fea):
self.pre_fea.data = self.pre_fea_learn(source_pre_fea)
n, c, h, w = input.size()
# print('fea input',input.size())
input1 = input.view(n, c, h * w)
input1 = input1.transpose(1, 2) # n x hw x c
# print('fea input1', input1.size())
############## Feature maps to node ################
fea_node = torch.matmul(input1, self.pre_fea) # n x hw x n_classes
weight_node = torch.matmul(input1, self.weight) # n x hw x hidden_layer
# softmax fea_node
fea_node = F.softmax(fea_node, dim=1)
# print(fea_node.size(),weight_node.size())
graph_node = F.relu(torch.matmul(fea_node.transpose(1, 2), weight_node))
return graph_node # n x n_class x hidden_layer
def pre_fea_learn(self, input):
pre_fea = self.pre_fea_transfer.forward(input.unsqueeze(0)).squeeze(0)
return self.pre_fea.data + pre_fea
class Graph_to_Featuremaps(nn.Module):
# this is a special version
def __init__(self, input_channels, output_channels, hidden_layers, nodes=7):
super(Graph_to_Featuremaps, self).__init__()
self.node_fea = Parameter(torch.FloatTensor(input_channels + hidden_layers, 1))
self.weight = Parameter(torch.FloatTensor(hidden_layers, output_channels))
self.reset_parameters()
def reset_parameters(self):
for ww in self.parameters():
torch.nn.init.xavier_uniform_(ww)
def forward(self, input, res_feature):
"""
:param input: 1 x batch x nodes x hidden_layer
:param res_feature: batch x channels x h x w
:return:
"""
batchi, channeli, hi, wi = res_feature.size()
# print(res_feature.size())
# print(input.size())
try:
_, batch, nodes, hidden = input.size()
except:
# print(input.size())
input = input.unsqueeze(0)
_, batch, nodes, hidden = input.size()
assert batch == batchi
input1 = input.transpose(0, 1).expand(batch, hi * wi, nodes, hidden)
res_feature_after_view = res_feature.view(batch, channeli, hi * wi).transpose(
1, 2
)
res_feature_after_view1 = res_feature_after_view.unsqueeze(2).expand(
batch, hi * wi, nodes, channeli
)
new_fea = torch.cat((res_feature_after_view1, input1), dim=3)
# print(self.node_fea.size(),new_fea.size())
new_node = torch.matmul(new_fea, self.node_fea) # batch x hw x nodes x 1
new_weight = torch.matmul(input, self.weight) # batch x node x channel
new_node = new_node.view(batch, hi * wi, nodes)
# 0721
new_node = F.softmax(new_node, dim=-1)
#
feature_out = torch.matmul(new_node, new_weight)
# print(feature_out.size())
feature_out = feature_out.transpose(2, 3).contiguous().view(res_feature.size())
return F.relu(feature_out)
class Graph_to_Featuremaps_savemem(nn.Module):
# this is a special version for saving gpu memory. The process is same as Graph_to_Featuremaps.
def __init__(self, input_channels, output_channels, hidden_layers, nodes=7):
super(Graph_to_Featuremaps_savemem, self).__init__()
self.node_fea_for_res = Parameter(torch.FloatTensor(input_channels, 1))
self.node_fea_for_hidden = Parameter(torch.FloatTensor(hidden_layers, 1))
self.weight = Parameter(torch.FloatTensor(hidden_layers, output_channels))
self.reset_parameters()
def reset_parameters(self):
for ww in self.parameters():
torch.nn.init.xavier_uniform_(ww)
def forward(self, input, res_feature):
"""
:param input: 1 x batch x nodes x hidden_layer
:param res_feature: batch x channels x h x w
:return:
"""
batchi, channeli, hi, wi = res_feature.size()
# print(res_feature.size())
# print(input.size())
try:
_, batch, nodes, hidden = input.size()
except:
# print(input.size())
input = input.unsqueeze(0)
_, batch, nodes, hidden = input.size()
assert batch == batchi
input1 = input.transpose(0, 1).expand(batch, hi * wi, nodes, hidden)
res_feature_after_view = res_feature.view(batch, channeli, hi * wi).transpose(
1, 2
)
res_feature_after_view1 = res_feature_after_view.unsqueeze(2).expand(
batch, hi * wi, nodes, channeli
)
# new_fea = torch.cat((res_feature_after_view1,input1),dim=3)
## sim
new_node1 = torch.matmul(res_feature_after_view1, self.node_fea_for_res)
new_node2 = torch.matmul(input1, self.node_fea_for_hidden)
new_node = new_node1 + new_node2
## sim end
# print(self.node_fea.size(),new_fea.size())
# new_node = torch.matmul(new_fea, self.node_fea) # batch x hw x nodes x 1
new_weight = torch.matmul(input, self.weight) # batch x node x channel
new_node = new_node.view(batch, hi * wi, nodes)
# 0721
new_node = F.softmax(new_node, dim=-1)
#
feature_out = torch.matmul(new_node, new_weight)
# print(feature_out.size())
feature_out = feature_out.transpose(2, 3).contiguous().view(res_feature.size())
return F.relu(feature_out)
class Graph_trans(nn.Module):
def __init__(
self,
in_features,
out_features,
begin_nodes=7,
end_nodes=2,
bias=False,
adj=None,
):
super(Graph_trans, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.FloatTensor(in_features, out_features))
if adj is not None:
h, w = adj.size()
assert (h == end_nodes) and (w == begin_nodes)
self.adj = torch.autograd.Variable(adj, requires_grad=False)
else:
self.adj = Parameter(torch.FloatTensor(end_nodes, begin_nodes))
if bias:
self.bias = Parameter(torch.FloatTensor(out_features))
else:
self.register_parameter("bias", None)
# self.reset_parameters()
def reset_parameters(self):
# stdv = 1./math.sqrt(self.weight(1))
# self.weight.data.uniform_(-stdv,stdv)
torch.nn.init.xavier_uniform_(self.weight)
# if self.bias is not None:
# self.bias.data.uniform_(-stdv,stdv)
def forward(self, input, relu=False, adj_return=False, adj=None):
support = torch.matmul(input, self.weight)
# print(support.size(),self.adj.size())
if adj is None:
adj = self.adj
adj1 = self.norm_trans_adj(adj)
output = torch.matmul(adj1, support)
if adj_return:
output1 = F.normalize(output, p=2, dim=-1)
self.adj_mat = torch.matmul(output1, output1.transpose(-2, -1))
if self.bias is not None:
return output + self.bias
else:
if relu:
return F.relu(output)
else:
return output
def get_adj_mat(self):
adj = graph.normalize_adj_torch(F.relu(self.adj_mat))
return adj
def get_encode_adj(self):
return self.adj
def norm_trans_adj(self, adj): # maybe can use softmax
adj = F.relu(adj)
r = F.softmax(adj, dim=-1)
# print(adj.size())
# row_sum = adj.sum(-1).unsqueeze(-1)
# d_mat = row_sum.expand(adj.size())
# r = torch.div(row_sum,d_mat)
# r[torch.isnan(r)] = 0
return r
if __name__ == "__main__":
graph = torch.randn((7, 128))
en = GraphConvolution(128, 128)
a = en.forward(graph)
print(a)
# a = en.forward(graph,pred)
# print(a.size())
| 2.25
| 2
|
important_political_entities_finder/ingest/fa_scrape.py
|
dberger1989/Important_Political_Entities
| 0
|
12781159
|
<reponame>dberger1989/Important_Political_Entities<gh_stars>0
#!/usr/bin/env python
import os
import time
import pickle
from bs4 import BeautifulSoup
import requests
from selenium.webdriver.common.keys import Keys
from selenium import webdriver
from dateutil import parser
import re
import config
class FA_scrape(object):
def __init__(self):
## FA login placeholders that must be filled in by users.
LOGIN_USERNAME = config.login_username
LOGIN_PASSWORD = <PASSWORD>
self.login_username = LOGIN_USERNAME
self.login_password = <PASSWORD>
## Placeholder for number of articles to scrape that must be filled in by users.
N_ARTICLES_TO_SCRAPE = config.n_articles_to_scrape
self.n_articles_to_scrape = N_ARTICLES_TO_SCRAPE
## Placeholder for path to chromedriver that must be filled in by users
PATH_TO_CHROMEDRIVER = config.path_to_chromedriver
self.chromedriver = PATH_TO_CHROMEDRIVER
## Use driver to load FA archive page
self.driver = webdriver.Chrome(self.chromedriver)
#-------------------------load and scrape archive page------------------------------------#
def get_article_links(self, n_articles_to_scrape, driver):
'''
Function to scrape the article htmls from the Foreign Affairs(FA) archive
search page.'Load More' must be clicked each timeto get an additional 10
articles loaded to page.
Args:
n_articles_to_scrape(int): The number of article urls to scrape
driver(selenium webdriver obj): The chromedriver object
Returns:
url_archive_html_soup(str): the html of the FA archive page with all the desired urls loaded.
'''
## Foreign Affairs archive search page
url = "https://www.foreignaffairs.com/search?qs="
driver.get(url)
## Click on date button to sort articles in descending date order
self.recursive_click(driver.find_element_by_xpath('//*[@id="content"]/div/section[2]/div/div[2]/div/div/div/a'))
## Each time 'load more' is clicked, 10 more articles are
## listed. Determine how many times to click 'Load More'
if n_articles_to_scrape <= 10:
loads_needed = 1
else:
loads_needed = n_articles_to_scrape/10
for i in range(loads_needed):
## Click the 'Load More' button
self.recursive_click(driver.find_element_by_link_text("Load More"))
## Return the html for the page with all the articles listed
url_archive_html_soup = BeautifulSoup(driver.page_source, 'html.parser')
return url_archive_html_soup
def place_urls_in_list(self, url_archive_html, n_articles_to_scrape):
'''
Function to find all the urls in the scraped archive page and then
store them in a list.
Args:
url_archive_html_soup(Beautiful Soup obj): A Beautiful Soup html object of the Foreign Affairs archive page
containing the urls of the articles
Returns:
article_links:
A list of urls
'''
article_links = []
article_titles = url_archive_html.find_all(class_='title')
for item in article_titles:
href_string = item.contents[1]
for href_tag, link_extension in href_string.attrs.items():
full_link = "https://www.foreignaffairs.com" + link_extension
article_links.append(full_link)
article_links = article_links[:n_articles_to_scrape]
print "---links to scrape:----"
for link in article_links:
print link
print "-----------------------"
return article_links
#-------------------------scrape data from article pages------------------------------------#
def login_get_article_text(self, article_url, login_username, login_password, driver):
'''
Function to login to Foreign Affairs(FA) website, should user be prompted to do so.
Args:
article_url(str): url of the article
login_username(str): FA login username
login_password(str): <PASSWORD>
Returns:
article_html_soup(Beautiful Soup obj): The html of the article
'''
## Click to login
login = driver.find_element_by_xpath('//*[@id="content"]/article/div[3]/div[2]/div[1]/div/div[1]/div/a[1]')
login.click()
time.sleep(5)
## Locate email and password fields
email_address_username = driver.find_element_by_xpath('//*[@id="edit-name"]')
password = driver.find_element_by_xpath('//*[@id="edit-pass"]')
## Click on the date button to reverse order of dates displayed so they are descending
email_address_username.send_keys(login_username)
password.send_keys(login_password)
time.sleep(5)
## Submit
submit = driver.find_element_by_xpath('//*[@id="edit-submit--3"]')
submit.click()
time.sleep(5)
## Make soup out of page_source
page_source = driver.page_source
article_html_soup = BeautifulSoup(driver.page_source, 'html.parser')
return article_html_soup
def get_article_text_no_login(self, article_url, driver):
'''
Function to load article page if there is no login needed
'''
page_source = driver.page_source
article_html_soup = BeautifulSoup(driver.page_source, 'html.parser')
return article_html_soup
def get_article_data(self, article_links, driver):
'''
Function to get the data for each article from the list of article links.
The data to be scraped for each article is the title, description, date, and text.
Stores each articles as a document in a mongoDB database.
Args:
article_links(list): a list of article urls
driver(selenium webdriver obj): The chromedriver object
Returns:
articles_data_list(list): A list of dictionaries where each dictionary contains the title,
description, date, and text of each article.
'''
articles_data_list = []
for article_url in article_links:
## Go to article page
driver.get(article_url)
time.sleep(5)
## Check if login needed
try:
driver.find_element_by_xpath('//*[@id="content"]/article/div[3]/div[2]/div[1]/div/div[1]/div/a[1]')
soup = self.login_get_article_text(article_url, self.login_username, self.login_password, driver)
## If not needed
except:
soup = self.get_article_text_no_login(article_url, driver)
## Retrieve the top and bottom halves of the article
top_content = soup.find_all(class_="top_content_at_page_load" )
end_content = soup.find_all(class_="l-article-column article-icon-cap")
## Format out the unicode from the top half
try:
top_article_formatted = self.remove_unwanted_unicode_characters(top_content[0].get_text())
except:
top_article_formatted = soup.findAll(class_="container l-detail")[0].text
## Format out the unicode from the bottom half
try:
bottom_article_formatted = self.remove_unwanted_unicode_characters(end_content[0].get_text())
except:
bottom_article_formatted = 'blank'
## Combine top and bottom halves
full_article = top_article_formatted + " " + bottom_article_formatted
## Get headline tag and convert to string
title_tag = soup.find_all(class_="article-header__headline")[0].contents[0]
title = self.remove_unwanted_unicode_characters(title_tag)
## Get headline tag and convert to string
try:
description_tag = soup.find_all(class_='article-header__deck')[0].contents[0]
description = self.remove_unwanted_unicode_characters(description_tag)
except:
description = 'blank'
## Get article date
try:
date = parser.parse(str(soup.findAll('time')[0].contents[0]))
except:
try:
date = parser.parse(re.findall('([0-9]{4}-[0-9]{2}-[0-9]{2})', concatenated_url)[0])
except:
try:
date = str(soup.findAll(class_='date')[0].contents[0])
date = parser.parse(re.findall('/.+', date)[0])
except:
date = str(soup.find(class_="article-header__metadata-date").text)
date = parser.parse(re.findall('/\w+\s{1}\d+', date)[0])
## Make a dictionary out of the article data
article_data_dic = dic = {'title':title, 'date':date,
'description':description, 'text':full_article }
## Append the dictionary of article data to a list with the other article data
articles_data_list.append(article_data_dic)
return articles_data_list
#-------------------------helper functions------------------------------------#
def recursive_click(self, path_to_element):
'''
Function to click on an element. If the click does not work, wait 10
seconds and try again. Useful for when selenium's Explicit Wait causes
connection to close.
Returns:
None
'''
try:
path_to_element.click()
except:
time.sleep(10)
self.recursive_click(path_to_element)
def remove_unwanted_unicode_characters(self, text_string):
'''
Function to get rid of unwanted unicode.
'''
new_text_string = re.sub(u"(\u2018|\u2019)", "'", text_string)
new_text_string = re.sub(u"(\u2014)", "--", new_text_string)
new_text_string = re.sub(u"(\u201c|\u201d)", '"' , new_text_string)
new_text_string = re.sub(u"(\xa0)", "", new_text_string)
new_text_string = new_text_string.replace("\n","")
new_text_string = re.sub(u"(\u2013)", "-", new_text_string)
new_text_string = re.sub(u"(\u2026)", "...", new_text_string)
new_text_string = re.sub(u"(\xe9)", "e", new_text_string)
new_text_string = re.sub(u"(\xad)", "-", new_text_string)
new_text_string = re.sub(u"(\xfa)", "u", new_text_string)
new_text_string = re.sub(u"(\xf3)", "o", new_text_string)
new_text_string = re.sub(u"(\xed)", "i", new_text_string)
new_text_string = re.sub(u"(\xe3)", "a", new_text_string)
#new_text_string = re.sub(u"(\u2026)", "...", new_text_string)
#new_text_string = re.sub(u"(\u2026)", "...", new_text_string)
#new_text_string = re.sub(u"(\u2026)", "...", new_text_string)
new_text_string = str(new_text_string.encode('ascii','ignore')).replace("\\","")
#new_text_string = re.sub("\\", "", str(new_text_string))
return new_text_string
def main(self):
print "scraping article urls..."
## Get the html with the links to the the last n articles from
## the Foreign Affairs website
url_archive_html_soup = self.get_article_links(self.n_articles_to_scrape, self.driver)
## Get article links from url archive html
article_links = self.place_urls_in_list(url_archive_html_soup, self.n_articles_to_scrape)
print "scraping article data..."
## Iterate throgh concatenated urls and get article data from the page
articles_data_list = self.get_article_data(article_links, self.driver)
print "pickling article data..."
with open('important_political_entities_finder/ingest/data_store/articles_data_list.pkl', 'w') as picklefile:
pickle.dump(articles_data_list, picklefile)
print "scraping complete"
if __name__ == "__main__":
FA_scrape().main()
| 2.9375
| 3
|
ggtnn_graph_parse.py
|
hexahedria/gated-graph-transformer-network
| 160
|
12781160
|
<filename>ggtnn_graph_parse.py<gh_stars>100-1000
import os
import sys
import re
import collections
import numpy as np
import scipy
import json
import itertools
import pickle
import gc
import gzip
import argparse
def tokenize(sent):
'''Return the tokens of a sentence including punctuation.
>>> tokenize('Bob dropped the apple. Where is the apple?')
['Bob', 'dropped', 'the', 'apple', '.', 'Where', 'is', 'the', 'apple', '?']
'''
return re.findall('(?:\w+)|\S',sent)
def list_to_map(l):
'''Convert a list of values to a map from values to indices'''
return {val:i for i,val in enumerate(l)}
def parse_stories(lines):
'''
Parse stories provided in the bAbi tasks format, with knowledge graph.
'''
data = []
story = []
for line in lines:
if line[-1] == "\n":
line = line[:-1]
nid, line = line.split(' ', 1)
nid = int(nid)
if nid == 1:
story = []
questions = []
if '\t' in line:
q, apre = line.split('\t')[:2]
a = apre.split(',')
q = tokenize(q)
substory = [x for x in story if x]
data.append((substory, q, a))
story.append('')
else:
line, graph = line.split('=', 1)
sent = tokenize(line)
graph_parsed = json.loads(graph)
story.append((sent, graph_parsed))
return data
def get_stories(taskname):
with open(taskname, 'r') as f:
lines = f.readlines()
return parse_stories(lines)
def get_max_sentence_length(stories):
return max((max((len(sentence) for (sentence, graph) in sents_graphs)) for (sents_graphs, query, answer) in stories))
def get_max_query_length(stories):
return max((len(query) for (sents_graphs, query, answer) in stories))
def get_max_num_queries(stories):
return max((len(queries) for (sents_graphs, query, answer) in stories))
def get_max_nodes_per_iter(stories):
result = 0
for (sents_graphs, query, answer) in stories:
prev_nodes = set()
for (sentence, graph) in sents_graphs:
cur_nodes = set(graph["nodes"])
new_nodes = len(cur_nodes - prev_nodes)
if new_nodes > result:
result = new_nodes
prev_nodes = cur_nodes
return result
def get_buckets(stories, max_ignore_unbatched=100, max_pad_amount=25):
sentencecounts = [len(sents_graphs) for (sents_graphs, query, answer) in stories]
countpairs = sorted(collections.Counter(sentencecounts).items())
buckets = []
smallest_left_val = 0
num_unbatched = max_ignore_unbatched
for val,ct in countpairs:
num_unbatched += ct
if val - smallest_left_val > max_pad_amount or num_unbatched > max_ignore_unbatched:
buckets.append(val)
smallest_left_val = val
num_unbatched = 0
if buckets[-1] != countpairs[-1][0]:
buckets.append(countpairs[-1][0])
return buckets
PAD_WORD = "<PAD>"
def get_wordlist(stories):
words = [PAD_WORD] + sorted(list(set((word
for (sents_graphs, query, answer) in stories
for wordbag in itertools.chain((s for s,g in sents_graphs), [query])
for word in wordbag ))))
wordmap = list_to_map(words)
return words, wordmap
def get_answer_list(stories):
words = sorted(list(set(word for (sents_graphs, query, answer) in stories for word in answer)))
wordmap = list_to_map(words)
return words, wordmap
def pad_story(story, num_sentences, sentence_length):
def pad(lst,dlen,pad):
return lst + [pad]*(dlen - len(lst))
sents_graphs, query, answer = story
padded_sents_graphs = [(pad(s,sentence_length,PAD_WORD), g) for s,g in sents_graphs]
padded_query = pad(query,sentence_length,PAD_WORD)
sentgraph_padding = (pad([],sentence_length,PAD_WORD), padded_sents_graphs[-1][1])
return (pad(padded_sents_graphs, num_sentences, sentgraph_padding), padded_query, answer)
def get_unqualified_id(s):
return s.split("#")[0]
def get_graph_lists(stories):
node_words = sorted(list(set(get_unqualified_id(node)
for (sents_graphs, query, answer) in stories
for sent,graph in sents_graphs
for node in graph["nodes"])))
nodemap = list_to_map(node_words)
edge_words = sorted(list(set(get_unqualified_id(edge["type"])
for (sents_graphs, query, answer) in stories
for sent,graph in sents_graphs
for edge in graph["edges"])))
edgemap = list_to_map(edge_words)
return node_words, nodemap, edge_words, edgemap
def convert_graph(graphs, nodemap, edgemap, new_nodes_per_iter, dynamic=True):
num_node_ids = len(nodemap)
num_edge_types = len(edgemap)
full_size = len(graphs)*new_nodes_per_iter + 1
prev_size = 1
processed_nodes = []
index_map = {}
all_num_nodes = []
all_node_ids = []
all_node_strengths = []
all_edges = []
if not dynamic:
processed_nodes = list(nodemap.keys())
index_map = nodemap.copy()
prev_size = num_node_ids
full_size = prev_size
new_nodes_per_iter = 0
for g in graphs:
active_nodes = g["nodes"]
active_edges = g["edges"]
new_nodes = [e for e in active_nodes if e not in processed_nodes]
num_new_nodes = len(new_nodes)
if not dynamic:
assert num_new_nodes == 0, "Cannot create more nodes in non-dynamic mode!\n{}".format(graphs)
new_node_strengths = np.zeros([new_nodes_per_iter], np.float32)
new_node_strengths[:num_new_nodes] = 1.0
new_node_ids = np.zeros([new_nodes_per_iter, num_node_ids], np.float32)
for i, node in enumerate(new_nodes):
new_node_ids[i,nodemap[get_unqualified_id(node)]] = 1.0
index_map[node] = prev_size + i
next_edges = np.zeros([full_size, full_size, num_edge_types])
for edge in active_edges:
next_edges[index_map[edge["from"]],
index_map[edge["to"]],
edgemap[get_unqualified_id(edge["type"])]] = 1.0
processed_nodes.extend(new_nodes)
prev_size += new_nodes_per_iter
all_num_nodes.append(num_new_nodes)
all_node_ids.append(new_node_ids)
all_edges.append(next_edges)
all_node_strengths.append(new_node_strengths)
return np.stack(all_num_nodes), np.stack(all_node_strengths), np.stack(all_node_ids), np.stack(all_edges)
def convert_story(story, wordmap, answer_map, graph_node_map, graph_edge_map, new_nodes_per_iter, dynamic=True):
"""
Converts a story in format
([(sentence, graph)], [(index, question_arr, answer)])
to a consolidated story in format
(sentence_arr, [graph_arr_dict], [(index, question_arr, answer)])
and also replaces words according to the input maps
"""
sents_graphs, query, answer = story
sentence_arr = [[wordmap[w] for w in s] for s,g in sents_graphs]
graphs = convert_graph([g for s,g in sents_graphs], graph_node_map, graph_edge_map, new_nodes_per_iter, dynamic)
query_arr = [wordmap[w] for w in query]
answer_arr = [answer_map[w] for w in answer]
return (sentence_arr, graphs, query_arr, answer_arr)
def process_story(s,bucket_len):
return convert_story(pad_story(s, bucket_len, sentence_length), wordmap, answer_map, graph_node_map, graph_edge_map, new_nodes_per_iter, dynamic)
def bucket_stories(stories, buckets, wordmap, answer_map, graph_node_map, graph_edge_map, sentence_length, new_nodes_per_iter, dynamic=True):
return [ [process_story(story,bmax) for story in stories if bstart < len(story[0]) <= bmax]
for bstart, bmax in zip([0]+buckets,buckets)]
def prepare_stories(stories, dynamic=True):
sentence_length = max(get_max_sentence_length(stories), get_max_query_length(stories))
buckets = get_buckets(stories)
wordlist, wordmap = get_wordlist(stories)
anslist, ansmap = get_answer_list(stories)
new_nodes_per_iter = get_max_nodes_per_iter(stories)
graph_node_list, graph_node_map, graph_edge_list, graph_edge_map = get_graph_lists(stories)
bucketed = bucket_stories(stories, buckets, wordmap, ansmap, graph_node_map, graph_edge_map, sentence_length, new_nodes_per_iter, dynamic)
return sentence_length, new_nodes_per_iter, buckets, wordlist, anslist, graph_node_list, graph_edge_list, bucketed
def print_batch(story, wordlist, anslist, file=sys.stdout):
sents, query, answer = story
for batch,(s,q,a) in enumerate(zip(sents,query,answer)):
file.write("Story {}\n".format(batch))
for sent in s:
file.write(" ".join([wordlist[word] for word in sent]) + "\n")
file.write(" ".join(wordlist[word] for word in q) + "\n")
file.write(" ".join(anslist[word] for word in a.nonzero()[1]) + "\n")
MetadataList = collections.namedtuple("MetadataList", ["sentence_length", "new_nodes_per_iter", "buckets", "wordlist", "anslist", "graph_node_list", "graph_edge_list"])
PreppedStory = collections.namedtuple("PreppedStory", ["converted", "sentences", "query", "answer"])
def generate_metadata(stories, dynamic=True):
sentence_length = max(get_max_sentence_length(stories), get_max_query_length(stories))
buckets = get_buckets(stories)
wordlist, wordmap = get_wordlist(stories)
anslist, ansmap = get_answer_list(stories)
new_nodes_per_iter = get_max_nodes_per_iter(stories)
graph_node_list, graph_node_map, graph_edge_list, graph_edge_map = get_graph_lists(stories)
metadata = MetadataList(sentence_length, new_nodes_per_iter, buckets, wordlist, anslist, graph_node_list, graph_edge_list)
return metadata
def preprocess_stories(stories, savedir, dynamic=True, metadata_file=None):
if metadata_file is None:
metadata = generate_metadata(stories, dynamic)
else:
with open(metadata_file,'rb') as f:
metadata = pickle.load(f)
buckets = get_buckets(stories)
sentence_length, new_nodes_per_iter, old_buckets, wordlist, anslist, graph_node_list, graph_edge_list = metadata
metadata = metadata._replace(buckets=buckets)
if not os.path.exists(savedir):
os.makedirs(savedir)
with open(os.path.join(savedir,'metadata.p'),'wb') as f:
pickle.dump(metadata, f)
bucketed_files = [[] for _ in buckets]
for i,story in enumerate(stories):
bucket_idx, cur_bucket = next(((i,bmax) for (i,(bstart, bmax)) in enumerate(zip([0]+buckets,buckets))
if bstart < len(story[0]) <= bmax), (None,None))
assert cur_bucket is not None, "Couldn't put story of length {} into buckets {}".format(len(story[0]), buckets)
bucket_dir = os.path.join(savedir, "bucket_{}".format(cur_bucket))
if not os.path.exists(bucket_dir):
os.makedirs(bucket_dir)
story_fn = os.path.join(bucket_dir, "story_{}.pz".format(i))
sents_graphs, query, answer = story
sents = [s for s,g in sents_graphs]
cvtd = convert_story(pad_story(story, cur_bucket, sentence_length), list_to_map(wordlist), list_to_map(anslist), list_to_map(graph_node_list), list_to_map(graph_edge_list), new_nodes_per_iter, dynamic)
prepped = PreppedStory(cvtd, sents, query, answer)
with gzip.open(story_fn, 'wb') as zf:
pickle.dump(prepped, zf)
bucketed_files[bucket_idx].append(os.path.relpath(story_fn, savedir))
gc.collect() # we don't want to use too much memory, so try to clean it up
with open(os.path.join(savedir,'file_list.p'),'wb') as f:
pickle.dump(bucketed_files, f)
def main(file, dynamic, metadata_file=None):
stories = get_stories(file)
dirname, ext = os.path.splitext(file)
preprocess_stories(stories, dirname, dynamic, metadata_file)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Parse a graph file')
parser.add_argument("file", help="Graph file to parse")
parser.add_argument("--static", dest="dynamic", action="store_false", help="Don't use dynamic nodes")
parser.add_argument("--metadata-file", default=None, help="Use this particular metadata file instead of building it from scratch")
args = vars(parser.parse_args())
main(**args)
| 2.703125
| 3
|
infrastructure/gateways.py
|
aleksarias/ubermaton
| 0
|
12781161
|
from typing import List, Union, Tuple, Dict, Set
import googlemaps
import networkx as nx
from networkx.algorithms import shortest_paths
from domain.gateways import DirectionsGateway
from domain.models import Location
class NetworkXGateway(DirectionsGateway):
def __init__(self, graph: nx.Graph):
"""
Uses the networkx package to create a graph on which to a network for which directions and travel times can be
generated. For a list of functions that generate commonly useful graphs please see:
https://networkx.github.io/documentation/stable/reference/generators.html
:param graph:
"""
assert graph.number_of_nodes() > 0, "Graph cannot empty"
self._graph = graph
print('Graph initialized')
def validate_location(self, location: Location):
assert location.coordinates in self._graph.nodes
def get_next_destination(self, origin: Location, destinations: List[Location]) -> Location:
assert isinstance(origin, Location)
for d in destinations:
assert isinstance(d, Location)
destination_lengths = [
shortest_paths.shortest_path_length(self._graph, origin.coordinates, d.coordinates) for d in destinations
]
closest_destination = destinations[destination_lengths.index(min(destination_lengths))]
return closest_destination
def shortest_path_to_destination(self, origin: Location, destination: Location) -> List[Location]:
path: List[Tuple[int]] = shortest_paths.shortest_path(self._graph, origin.coordinates, destination.coordinates)
return [Location(node[0], node[1]) for node in path]
class GoogleDirectionsGateway(DirectionsGateway):
"""
https://developers.google.com/maps/documentation/
"""
def __init__(self, api_key: str):
"""
To get an API get from google:
https://cloud.google.com/docs/authentication/api-keys#creating_an_api_key
Make sure to enable products: Directions API, Distance Matrix API, and Geocoding API
:param api_key:
"""
self._client = googlemaps.Client(key=api_key)
def _geocode(self, request):
# TODO: create request and response schema for api
raise NotImplemented
def _distance_matrix(self, request):
# TODO: create request and response schema for api
raise NotImplemented
def get_address_location(self, address: str) -> Location:
"""
Convenience method for converting an address to a Location type
:param address:
:return:
"""
result: dict = self._client.geocode(address)
x, y = result[0]['geometry']['location'].values()
return Location(x, y)
def _get_distance_matrix(self, origin: Location, destinations: List[Location]) -> List[dict]:
"""
Accepts an origin and a list of destinations and returns a list that contains the distance to each destination
from the origin
:param origin:
:param destinations:
:return:
"""
destinations: List[Tuple[str]] = self._convert_locations_to_coordinates(destinations)
result = self._client.distance_matrix(origin.coordinates, destinations)
destinations: List[dict] = [
{**cost, 'location': destination} for destination, cost in zip(destinations, result['rows'][0]['elements'])
]
return destinations
@staticmethod
def _convert_locations_to_coordinates(locations: List[Location]) -> List[tuple]:
"""
Converts Location type to a coordinate tuple, (x,y)
:param locations:
:return:
"""
return [l.coordinates for l in locations]
def get_next_destination(self, origin: Location, destinations: List[Location]) -> Location:
"""
Accepts an origin and a list of destinations and returns an itinerary (route) that's optimized so that each
destination can be reached in the least amount of time
:param origin:
:param destinations:
:return:
"""
# Make sure origin and destinations are of type Location (just in case)
origin = self.get_address_location(origin) if isinstance(origin, str) else origin
destinations: List[Location] = [
self.get_address_location(d) if isinstance(d, str) else d for d in destinations
]
path_costs = self._get_distance_matrix(origin, destinations)
next_destination = destinations[
path_costs.index(min(path_costs, key=lambda x: x['distance']['value']))
]
return next_destination
def shortest_path_to_destination(self, origin: Location, destination: Location) -> List[Location]:
raise NotImplemented
| 3.34375
| 3
|
csv/demo5.py
|
silianpan/seal-spider-demo
| 0
|
12781162
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019-06-13 14:53
# @Author : liupan
# @Site :
# @File : demo5.py
# @Software: PyCharm
import csv
with open('data.csv', 'a') as csvfile:
fieldnames = ['id', 'name', 'age']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writerow({'id': '10004', 'name': 'Durant', 'age': 22})
| 3.125
| 3
|
python/p22.py
|
schwanberger/projectEuler
| 0
|
12781163
|
<reponame>schwanberger/projectEuler<filename>python/p22.py
# -*- encoding: utf-8 -*-
# Names scores: https://projecteuler.net/problem=22
# Problem 22 Using names.txt (right click and 'Save Link/Target As...'), a 46K
# text file containing over five-thousand first names, begin by sorting it into
# alphabetical order. Then working out the alphabetical value for each name,
# multiply this value by its alphabetical position in the list to obtain a name
# score.
#
# For example, when the list is sorted into alphabetical order, COLIN, which is
# worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list. So, COLIN
# would obtain a score of 938 × 53 = 49714.
#
# What is the total of all the name scores in the file?
import string
f = open('datasets/p022_names.txt', 'r')
data = f.read()
f.close()
data = data.replace('"', '')
data = data.split(',')
data.sort()
nameScoreSum = 0
count = 1
def getNameWorth(name):
nameWorth = 0
for letter in name:
nameWorth += string.uppercase.index(letter) + 1
return nameWorth
for name in data:
nameScoreSum += getNameWorth(name) * count
count += 1
print nameScoreSum
| 3.78125
| 4
|
apps/quickbooks_online/tasks.py
|
fylein/fyle-qbo-api
| 0
|
12781164
|
import logging
import json
import traceback
from typing import List
from datetime import datetime, timedelta
from django.db import transaction
from django.db.models import Q
from django_q.tasks import Chain
from django_q.models import Schedule
from qbosdk.exceptions import WrongParamsError
from fyle_accounting_mappings.models import Mapping, ExpenseAttribute, DestinationAttribute, EmployeeMapping
from fyle_qbo_api.exceptions import BulkError
from apps.fyle.models import ExpenseGroup, Reimbursement, Expense
from apps.tasks.models import TaskLog
from apps.mappings.models import GeneralMapping
from apps.workspaces.models import QBOCredential, FyleCredential, WorkspaceGeneralSettings
from apps.fyle.utils import FyleConnector
from .models import Bill, BillLineitem, Cheque, ChequeLineitem, CreditCardPurchase, CreditCardPurchaseLineitem, \
JournalEntry, JournalEntryLineitem, BillPayment, BillPaymentLineitem, QBOExpense, QBOExpenseLineitem
from .utils import QBOConnector
logger = logging.getLogger(__name__)
logger.level = logging.INFO
def get_or_create_credit_card_vendor(workspace_id: int, merchant: str):
"""
Get or create car default vendor
:param workspace_id: Workspace Id
:param merchant: Fyle Expense Merchant
:return:
"""
qbo_credentials = QBOCredential.objects.get(workspace_id=workspace_id)
qbo_connection = QBOConnector(credentials_object=qbo_credentials, workspace_id=workspace_id)
vendor = None
if merchant:
try:
vendor = qbo_connection.get_or_create_vendor(merchant, create=False)
except WrongParamsError as bad_request:
logger.error(bad_request.response)
if not vendor:
vendor = qbo_connection.get_or_create_vendor('Credit Card Misc', create=True)
return vendor
def load_attachments(qbo_connection: QBOConnector, ref_id: str, ref_type: str, expense_group: ExpenseGroup):
"""
Get attachments from fyle
:param qbo_connection: QBO Connection
:param ref_id: object id
:param ref_type: type of object
:param expense_group: Expense group
"""
try:
fyle_credentials = FyleCredential.objects.get(workspace_id=expense_group.workspace_id)
expense_ids = expense_group.expenses.values_list('expense_id', flat=True)
fyle_connector = FyleConnector(fyle_credentials.refresh_token, expense_group.workspace_id)
attachments = fyle_connector.get_attachments(expense_ids)
qbo_connection.post_attachments(ref_id, ref_type, attachments)
except Exception:
error = traceback.format_exc()
logger.error(
'Attachment failed for expense group id %s / workspace id %s \n Error: %s',
expense_group.id, expense_group.workspace_id, {'error': error}
)
def create_or_update_employee_mapping(expense_group: ExpenseGroup, qbo_connection: QBOConnector,
auto_map_employees_preference: str):
try:
vendor_mapping = EmployeeMapping.objects.get(
source_employee__value=expense_group.description.get('employee_email'),
workspace_id=expense_group.workspace_id
).destination_vendor
if not vendor_mapping:
raise EmployeeMapping.DoesNotExist
except EmployeeMapping.DoesNotExist:
source_employee = ExpenseAttribute.objects.get(
workspace_id=expense_group.workspace_id,
attribute_type='EMPLOYEE',
value=expense_group.description.get('employee_email')
)
try:
if auto_map_employees_preference == 'EMAIL':
filters = {
'detail__email__iexact': source_employee.value,
'attribute_type': 'VENDOR'
}
else:
filters = {
'value__iexact': source_employee.detail['full_name'],
'attribute_type': 'VENDOR'
}
entity = DestinationAttribute.objects.filter(
workspace_id=expense_group.workspace_id,
**filters
).first()
if entity is None:
entity: DestinationAttribute = qbo_connection.get_or_create_vendor(
vendor_name=source_employee.detail['full_name'],
email=source_employee.value,
create=True
)
existing_employee_mapping = EmployeeMapping.objects.filter(
source_employee=source_employee
).first()
destination = {}
if existing_employee_mapping:
destination['destination_employee_id'] = existing_employee_mapping.destination_employee_id
destination['destination_card_account_id'] = existing_employee_mapping.destination_card_account_id
mapping = EmployeeMapping.create_or_update_employee_mapping(
source_employee_id=source_employee.id,
destination_vendor_id=entity.id,
workspace=expense_group.workspace,
**destination
)
mapping.source_employee.auto_mapped = True
mapping.source_employee.save()
mapping.destination_vendor.auto_created = True
mapping.destination_vendor.save()
except WrongParamsError as bad_request:
logger.error(bad_request.response)
error_response = json.loads(bad_request.response)['Fault']['Error'][0]
# This error code comes up when the vendor or employee already exists
if error_response['code'] == '6240':
logger.error(
'Destination Attribute with value %s not found in workspace %s',
source_employee.detail['full_name'],
expense_group.workspace_id
)
raise BulkError('Mappings are missing', [{
'row': None,
'expense_group_id': expense_group.id,
'value': expense_group.description.get('employee_email'),
'type': 'Employee Mapping',
'message': 'Employee mapping not found'
}])
def handle_quickbooks_error(exception, expense_group: ExpenseGroup, task_log: TaskLog, export_type: str):
logger.info(exception.response)
response = json.loads(exception.response)
quickbooks_errors = response['Fault']['Error']
error_msg = 'Failed to create {0}'.format(export_type)
errors = []
for error in quickbooks_errors:
errors.append({
'expense_group_id': expense_group.id,
'type': '{0} / {1}'.format(response['Fault']['type'], error['code']),
'short_description': error['Message'] if error['Message'] else '{0} error'.format(export_type),
'long_description': error['Detail'] if error['Detail'] else error_msg
})
task_log.status = 'FAILED'
task_log.detail = None
task_log.quickbooks_errors = errors
task_log.save()
def schedule_bills_creation(workspace_id: int, expense_group_ids: List[str]):
"""
Schedule bills creation
:param expense_group_ids: List of expense group ids
:param workspace_id: workspace id
:return: None
"""
if expense_group_ids:
expense_groups = ExpenseGroup.objects.filter(
Q(tasklog__id__isnull=True) | ~Q(tasklog__status__in=['IN_PROGRESS', 'COMPLETE']),
workspace_id=workspace_id, id__in=expense_group_ids, bill__id__isnull=True, exported_at__isnull=True
).all()
chain = Chain()
for expense_group in expense_groups:
task_log, _ = TaskLog.objects.get_or_create(
workspace_id=expense_group.workspace_id,
expense_group=expense_group,
defaults={
'status': 'ENQUEUED',
'type': 'CREATING_BILL'
}
)
if task_log.status not in ['IN_PROGRESS', 'ENQUEUED']:
task_log.type = 'CREATING_BILL'
task_log.status = 'ENQUEUED'
task_log.save()
chain.append('apps.quickbooks_online.tasks.create_bill', expense_group, task_log.id)
if chain.length():
chain.run()
def create_bill(expense_group, task_log_id):
task_log = TaskLog.objects.get(id=task_log_id)
if task_log.status not in ['IN_PROGRESS', 'COMPLETE']:
task_log.status = 'IN_PROGRESS'
task_log.save()
else:
return
general_settings = WorkspaceGeneralSettings.objects.get(workspace_id=expense_group.workspace_id)
try:
qbo_credentials = QBOCredential.objects.get(workspace_id=expense_group.workspace_id)
qbo_connection = QBOConnector(qbo_credentials, expense_group.workspace_id)
if expense_group.fund_source == 'PERSONAL' and general_settings.auto_map_employees \
and general_settings.auto_create_destination_entity \
and general_settings.auto_map_employees != 'EMPLOYEE_CODE':
create_or_update_employee_mapping(expense_group, qbo_connection, general_settings.auto_map_employees)
with transaction.atomic():
__validate_expense_group(expense_group, general_settings)
bill_object = Bill.create_bill(expense_group)
bill_lineitems_objects = BillLineitem.create_bill_lineitems(expense_group, general_settings)
created_bill = qbo_connection.post_bill(bill_object, bill_lineitems_objects)
task_log.detail = created_bill
task_log.bill = bill_object
task_log.quickbooks_errors = None
task_log.status = 'COMPLETE'
task_log.save()
expense_group.exported_at = datetime.now()
expense_group.response_logs = created_bill
expense_group.save()
load_attachments(qbo_connection, created_bill['Bill']['Id'], 'Bill', expense_group)
except QBOCredential.DoesNotExist:
logger.info(
'QBO Credentials not found for workspace_id %s / expense group %s',
expense_group.workspace_id,
expense_group.id
)
detail = {
'expense_group_id': expense_group.id,
'message': 'QBO Account not connected'
}
task_log.status = 'FAILED'
task_log.detail = detail
task_log.save()
except BulkError as exception:
logger.info(exception.response)
detail = exception.response
task_log.status = 'FAILED'
task_log.detail = detail
task_log.save()
except WrongParamsError as exception:
handle_quickbooks_error(exception, expense_group, task_log, 'Bill')
except Exception:
error = traceback.format_exc()
task_log.detail = {
'error': error
}
task_log.status = 'FATAL'
task_log.save()
logger.error('Something unexpected happened workspace_id: %s %s', task_log.workspace_id, task_log.detail)
def __validate_expense_group(expense_group: ExpenseGroup, general_settings: WorkspaceGeneralSettings):
bulk_errors = []
row = 0
general_mapping = None
try:
general_mapping = GeneralMapping.objects.get(workspace_id=expense_group.workspace_id)
except GeneralMapping.DoesNotExist:
bulk_errors.append({
'row': None,
'expense_group_id': expense_group.id,
'value': 'bank account',
'type': 'General Mapping',
'message': 'General mapping not found'
})
if general_settings.corporate_credit_card_expenses_object and \
general_settings.corporate_credit_card_expenses_object == 'BILL' and \
expense_group.fund_source == 'CCC':
if general_mapping:
if not (general_mapping.default_ccc_vendor_id or general_mapping.default_ccc_vendor_name):
bulk_errors.append({
'row': None,
'expense_group_id': expense_group.id,
'value': expense_group.description.get('employee_email'),
'type': 'General Mapping',
'message': 'Default Credit Card Vendor not found'
})
if general_mapping and not (general_mapping.accounts_payable_id or general_mapping.accounts_payable_name):
if (general_settings.reimbursable_expenses_object == 'BILL' or \
general_settings.corporate_credit_card_expenses_object == 'BILL') or (
general_settings.reimbursable_expenses_object == 'JOURNAL ENTRY' and
general_settings.employee_field_mapping == 'VENDOR' and expense_group.fund_source == 'PERSONAL'):
bulk_errors.append({
'row': None,
'expense_group_id': expense_group.id,
'value': 'Accounts Payable',
'type': 'General Mapping',
'message': 'Accounts Payable not found'
})
if general_mapping and not (general_mapping.bank_account_id or general_mapping.bank_account_name) and \
(
(
general_settings.reimbursable_expenses_object == 'CHECK'
or (
general_settings.reimbursable_expenses_object == 'JOURNAL ENTRY' and
general_settings.employee_field_mapping == 'EMPLOYEE' and expense_group.fund_source == 'PERSONAL'
)
)
):
bulk_errors.append({
'row': None,
'expense_group_id': expense_group.id,
'value': 'Bank Account',
'type': 'General Mapping',
'message': 'Bank Account not found'
})
if general_mapping and not (general_mapping.qbo_expense_account_id or general_mapping.qbo_expense_account_name)\
and general_settings.reimbursable_expenses_object == 'EXPENSE':
bulk_errors.append({
'row': None,
'expense_group_id': expense_group.id,
'value': 'Expense Payment Account',
'type': 'General Mapping',
'message': 'Expense Payment Account not found'
})
if general_settings.corporate_credit_card_expenses_object == 'CREDIT CARD PURCHASE' or \
general_settings.corporate_credit_card_expenses_object == 'JOURNAL ENTRY':
ccc_account_mapping: EmployeeMapping = EmployeeMapping.objects.filter(
source_employee__value=expense_group.description.get('employee_email'),
workspace_id=expense_group.workspace_id
).first()
ccc_account_id = None
if ccc_account_mapping and ccc_account_mapping.destination_card_account:
ccc_account_id = ccc_account_mapping.destination_card_account.destination_id
elif general_mapping:
ccc_account_id = general_mapping.default_ccc_account_id
if not ccc_account_id:
bulk_errors.append({
'row': None,
'expense_group_id': expense_group.id,
'value': expense_group.description.get('employee_email'),
'type': 'Employee / General Mapping',
'message': 'CCC account mapping / Default CCC account mapping not found'
})
if general_settings.corporate_credit_card_expenses_object != 'BILL' and expense_group.fund_source == 'CCC':
if not (general_mapping.default_ccc_account_id or general_mapping.default_ccc_account_name):
bulk_errors.append({
'row': None,
'expense_group_id': expense_group.id,
'value': 'Default Credit Card Account',
'type': 'General Mapping',
'message': 'Default Credit Card Account not found'
})
if general_settings.import_tax_codes and not (general_mapping.default_tax_code_id or general_mapping.default_tax_code_name):
bulk_errors.append({
'row': None,
'expense_group_id': expense_group.id,
'value': 'Default Tax Code',
'type': 'General Mapping',
'message': 'Default Tax Code not found'
})
if not (expense_group.fund_source == 'CCC' and \
((general_settings.corporate_credit_card_expenses_object == 'CREDIT CARD PURCHASE' and \
general_settings.map_merchant_to_vendor) or \
general_settings.corporate_credit_card_expenses_object == 'BILL')):
try:
entity = EmployeeMapping.objects.get(
source_employee__value=expense_group.description.get('employee_email'),
workspace_id=expense_group.workspace_id
)
if general_settings.employee_field_mapping == 'EMPLOYEE':
entity = entity.destination_employee
else:
entity = entity.destination_vendor
if not entity:
raise EmployeeMapping.DoesNotExist
except EmployeeMapping.DoesNotExist:
bulk_errors.append({
'row': None,
'expense_group_id': expense_group.id,
'value': expense_group.description.get('employee_email'),
'type': 'Employee Mapping',
'message': 'Employee mapping not found'
})
expenses = expense_group.expenses.all()
for lineitem in expenses:
category = lineitem.category if lineitem.category == lineitem.sub_category else '{0} / {1}'.format(
lineitem.category, lineitem.sub_category)
account = Mapping.objects.filter(
source_type='CATEGORY',
source__value=category,
workspace_id=expense_group.workspace_id
).first()
if not account:
bulk_errors.append({
'row': row,
'expense_group_id': expense_group.id,
'value': category,
'type': 'Category Mapping',
'message': 'Category Mapping not found'
})
if general_settings.import_tax_codes and lineitem.tax_group_id:
tax_group = ExpenseAttribute.objects.get(
workspace_id=expense_group.workspace_id,
attribute_type='TAX_GROUP',
source_id=lineitem.tax_group_id
)
tax_code = Mapping.objects.filter(
source_type='TAX_GROUP',
source__value=tax_group.value,
workspace_id=expense_group.workspace_id
).first()
if not tax_code:
bulk_errors.append({
'row': row,
'expense_group_id': expense_group.id,
'value': tax_group.value,
'type': 'Tax Group Mapping',
'message': 'Tax Group Mapping not found'
})
row = row + 1
if bulk_errors:
raise BulkError('Mappings are missing', bulk_errors)
def schedule_cheques_creation(workspace_id: int, expense_group_ids: List[str]):
"""
Schedule cheque creation
:param expense_group_ids: List of expense group ids
:param workspace_id: workspace id
:return: None
"""
if expense_group_ids:
expense_groups = ExpenseGroup.objects.filter(
Q(tasklog__id__isnull=True) | ~Q(tasklog__status__in=['IN_PROGRESS', 'COMPLETE']),
workspace_id=workspace_id, id__in=expense_group_ids, cheque__id__isnull=True, exported_at__isnull=True
).all()
chain = Chain()
for expense_group in expense_groups:
task_log, _ = TaskLog.objects.get_or_create(
workspace_id=expense_group.workspace_id,
expense_group=expense_group,
defaults={
'status': 'ENQUEUED',
'type': 'CREATING_CHECK'
}
)
if task_log.status not in ['IN_PROGRESS', 'ENQUEUED']:
task_log.type = 'CREATING_CHECK'
task_log.status = 'ENQUEUED'
task_log.save()
chain.append('apps.quickbooks_online.tasks.create_cheque', expense_group, task_log.id)
if chain.length():
chain.run()
def create_cheque(expense_group, task_log_id):
task_log = TaskLog.objects.get(id=task_log_id)
if task_log.status not in ['IN_PROGRESS', 'COMPLETE']:
task_log.status = 'IN_PROGRESS'
task_log.save()
else:
return
general_settings = WorkspaceGeneralSettings.objects.get(workspace_id=expense_group.workspace_id)
try:
qbo_credentials = QBOCredential.objects.get(workspace_id=expense_group.workspace_id)
qbo_connection = QBOConnector(qbo_credentials, expense_group.workspace_id)
if general_settings.auto_map_employees and general_settings.auto_create_destination_entity:
create_or_update_employee_mapping(expense_group, qbo_connection, general_settings.auto_map_employees)
with transaction.atomic():
__validate_expense_group(expense_group, general_settings)
cheque_object = Cheque.create_cheque(expense_group)
cheque_line_item_objects = ChequeLineitem.create_cheque_lineitems(expense_group, general_settings)
created_cheque = qbo_connection.post_cheque(cheque_object, cheque_line_item_objects)
task_log.detail = created_cheque
task_log.cheque = cheque_object
task_log.quickbooks_errors = None
task_log.status = 'COMPLETE'
task_log.save()
expense_group.exported_at = datetime.now()
expense_group.response_logs = created_cheque
expense_group.save()
load_attachments(qbo_connection, created_cheque['Purchase']['Id'], 'Purchase', expense_group)
except QBOCredential.DoesNotExist:
logger.info(
'QBO Credentials not found for workspace_id %s / expense group %s',
expense_group.id,
expense_group.workspace_id
)
detail = {
'expense_group_id': expense_group.id,
'message': 'QBO Account not connected'
}
task_log.status = 'FAILED'
task_log.detail = detail
task_log.save()
except BulkError as exception:
logger.info(exception.response)
detail = exception.response
task_log.status = 'FAILED'
task_log.detail = detail
task_log.save()
except WrongParamsError as exception:
handle_quickbooks_error(exception, expense_group, task_log, 'Check')
except Exception:
error = traceback.format_exc()
task_log.detail = {
'error': error
}
task_log.status = 'FATAL'
task_log.save()
logger.error('Something unexpected happened workspace_id: %s %s', task_log.workspace_id, task_log.detail)
def schedule_qbo_expense_creation(workspace_id: int, expense_group_ids: List[str]):
"""
Schedule QBO expense creation
:param expense_group_ids: List of expense group ids
:param workspace_id: workspace id
:return: None
"""
if expense_group_ids:
expense_groups = ExpenseGroup.objects.filter(
Q(tasklog__id__isnull=True) | ~Q(tasklog__status__in=['IN_PROGRESS', 'COMPLETE']),
workspace_id=workspace_id, id__in=expense_group_ids, qboexpense__id__isnull=True, exported_at__isnull=True
).all()
chain = Chain()
for expense_group in expense_groups:
task_log, _ = TaskLog.objects.get_or_create(
workspace_id=expense_group.workspace_id,
expense_group=expense_group,
defaults={
'status': 'ENQUEUED',
'type': 'CREATING_EXPENSE'
}
)
if task_log.status not in ['IN_PROGRESS', 'ENQUEUED']:
task_log.type = 'CREATING_EXPENSE'
task_log.status = 'ENQUEUED'
task_log.save()
chain.append('apps.quickbooks_online.tasks.create_qbo_expense', expense_group, task_log.id)
if chain.length():
chain.run()
def create_qbo_expense(expense_group, task_log_id):
task_log = TaskLog.objects.get(id=task_log_id)
if task_log.status not in ['IN_PROGRESS', 'COMPLETE']:
task_log.status = 'IN_PROGRESS'
task_log.save()
else:
return
general_settings = WorkspaceGeneralSettings.objects.get(workspace_id=expense_group.workspace_id)
try:
qbo_credentials = QBOCredential.objects.get(workspace_id=expense_group.workspace_id)
qbo_connection = QBOConnector(qbo_credentials, expense_group.workspace_id)
if general_settings.auto_map_employees and general_settings.auto_create_destination_entity:
create_or_update_employee_mapping(expense_group, qbo_connection, general_settings.auto_map_employees)
with transaction.atomic():
__validate_expense_group(expense_group, general_settings)
qbo_expense_object = QBOExpense.create_qbo_expense(expense_group)
qbo_expense_line_item_objects = QBOExpenseLineitem.create_qbo_expense_lineitems(
expense_group, general_settings
)
created_qbo_expense = qbo_connection.post_qbo_expense(qbo_expense_object, qbo_expense_line_item_objects)
task_log.detail = created_qbo_expense
task_log.qbo_expense = qbo_expense_object
task_log.quickbooks_errors = None
task_log.status = 'COMPLETE'
task_log.save()
expense_group.exported_at = datetime.now()
expense_group.response_logs = created_qbo_expense
expense_group.save()
load_attachments(qbo_connection, created_qbo_expense['Purchase']['Id'], 'Purchase', expense_group)
except QBOCredential.DoesNotExist:
logger.info(
'QBO Credentials not found for workspace_id %s / expense group %s',
expense_group.id,
expense_group.workspace_id
)
detail = {
'expense_group_id': expense_group.id,
'message': 'QBO Account not connected'
}
task_log.status = 'FAILED'
task_log.detail = detail
task_log.save()
except BulkError as exception:
logger.info(exception.response)
detail = exception.response
task_log.status = 'FAILED'
task_log.detail = detail
task_log.save()
except WrongParamsError as exception:
handle_quickbooks_error(exception, expense_group, task_log, 'Expense')
except Exception:
error = traceback.format_exc()
task_log.detail = {
'error': error
}
task_log.status = 'FATAL'
task_log.save()
logger.error('Something unexpected happened workspace_id: %s %s', task_log.workspace_id, task_log.detail)
def schedule_credit_card_purchase_creation(workspace_id: int, expense_group_ids: List[str]):
"""
Schedule credit card purchase creation
:param expense_group_ids: List of expense group ids
:param workspace_id: workspace id
:return: None
"""
if expense_group_ids:
expense_groups = ExpenseGroup.objects.filter(
Q(tasklog__id__isnull=True) | ~Q(tasklog__status__in=['IN_PROGRESS', 'COMPLETE']),
workspace_id=workspace_id, id__in=expense_group_ids, creditcardpurchase__id__isnull=True,
exported_at__isnull=True
).all()
chain = Chain()
for expense_group in expense_groups:
task_log, _ = TaskLog.objects.get_or_create(
workspace_id=expense_group.workspace_id,
expense_group=expense_group,
defaults={
'status': 'ENQUEUED',
'type': 'CREATING_CREDIT_CARD_PURCHASE'
}
)
if task_log.status not in ['IN_PROGRESS', 'ENQUEUED']:
task_log.type = 'CREATING_CREDIT_CARD_PURCHASE'
task_log.status = 'ENQUEUED'
task_log.save()
chain.append('apps.quickbooks_online.tasks.create_credit_card_purchase', expense_group, task_log.id)
if chain.length():
chain.run()
def create_credit_card_purchase(expense_group: ExpenseGroup, task_log_id):
task_log = TaskLog.objects.get(id=task_log_id)
if task_log.status not in ['IN_PROGRESS', 'COMPLETE']:
task_log.status = 'IN_PROGRESS'
task_log.save()
else:
return
general_settings = WorkspaceGeneralSettings.objects.get(workspace_id=expense_group.workspace_id)
try:
qbo_credentials = QBOCredential.objects.get(workspace_id=expense_group.workspace_id)
qbo_connection = QBOConnector(qbo_credentials, int(expense_group.workspace_id))
if not general_settings.map_merchant_to_vendor:
if general_settings.auto_map_employees and general_settings.auto_create_destination_entity \
and general_settings.auto_map_employees != 'EMPLOYEE_CODE':
create_or_update_employee_mapping(expense_group, qbo_connection, general_settings.auto_map_employees)
else:
merchant = expense_group.expenses.first().vendor
get_or_create_credit_card_vendor(expense_group.workspace_id, merchant)
with transaction.atomic():
__validate_expense_group(expense_group, general_settings)
credit_card_purchase_object = CreditCardPurchase.create_credit_card_purchase(
expense_group, general_settings.map_merchant_to_vendor)
credit_card_purchase_lineitems_objects = CreditCardPurchaseLineitem.create_credit_card_purchase_lineitems(
expense_group, general_settings
)
created_credit_card_purchase = qbo_connection.post_credit_card_purchase(
credit_card_purchase_object, credit_card_purchase_lineitems_objects
)
task_log.detail = created_credit_card_purchase
task_log.credit_card_purchase = credit_card_purchase_object
task_log.quickbooks_errors = None
task_log.status = 'COMPLETE'
task_log.save()
expense_group.exported_at = datetime.now()
expense_group.response_logs = created_credit_card_purchase
expense_group.save()
load_attachments(qbo_connection, created_credit_card_purchase['Purchase']['Id'], 'Purchase', expense_group)
except QBOCredential.DoesNotExist:
logger.info(
'QBO Credentials not found for workspace_id %s / expense group %s',
expense_group.id,
expense_group.workspace_id
)
detail = {
'expense_group_id': expense_group.id,
'message': 'QBO Account not connected'
}
task_log.status = 'FAILED'
task_log.detail = detail
task_log.save()
except BulkError as exception:
logger.info(exception.response)
detail = exception.response
task_log.status = 'FAILED'
task_log.detail = detail
task_log.save()
except WrongParamsError as exception:
handle_quickbooks_error(exception, expense_group, task_log, 'Credit Card Purchase')
except Exception:
error = traceback.format_exc()
task_log.detail = {
'error': error
}
task_log.status = 'FATAL'
task_log.save()
logger.error('Something unexpected happened workspace_id: %s %s', task_log.workspace_id, task_log.detail)
def schedule_journal_entry_creation(workspace_id: int, expense_group_ids: List[str]):
"""
Schedule journal_entry creation
:param expense_group_ids: List of expense group ids
:param workspace_id: workspace id
:return: None
"""
if expense_group_ids:
expense_groups = ExpenseGroup.objects.filter(
Q(tasklog__id__isnull=True) | ~Q(tasklog__status__in=['IN_PROGRESS', 'COMPLETE']),
workspace_id=workspace_id, id__in=expense_group_ids, journalentry__id__isnull=True, exported_at__isnull=True
).all()
chain = Chain()
for expense_group in expense_groups:
task_log, _ = TaskLog.objects.get_or_create(
workspace_id=expense_group.workspace_id,
expense_group=expense_group,
defaults={
'status': 'ENQUEUED',
'type': 'CREATING_JOURNAL_ENTRY'
}
)
if task_log.status not in ['IN_PROGRESS', 'ENQUEUED']:
task_log.type = 'CREATING_JOURNAL_ENTRY'
task_log.status = 'ENQUEUED'
task_log.save()
chain.append('apps.quickbooks_online.tasks.create_journal_entry', expense_group, task_log.id)
if chain.length():
chain.run()
def create_journal_entry(expense_group, task_log_id):
task_log = TaskLog.objects.get(id=task_log_id)
if task_log.status not in ['IN_PROGRESS', 'COMPLETE']:
task_log.status = 'IN_PROGRESS'
task_log.save()
else:
return
general_settings = WorkspaceGeneralSettings.objects.get(workspace_id=expense_group.workspace_id)
try:
qbo_credentials = QBOCredential.objects.get(workspace_id=expense_group.workspace_id)
qbo_connection = QBOConnector(qbo_credentials, expense_group.workspace_id)
if general_settings.auto_map_employees and general_settings.auto_create_destination_entity \
and general_settings.auto_map_employees != 'EMPLOYEE_CODE':
create_or_update_employee_mapping(expense_group, qbo_connection, general_settings.auto_map_employees)
with transaction.atomic():
__validate_expense_group(expense_group, general_settings)
journal_entry_object = JournalEntry.create_journal_entry(expense_group)
journal_entry_lineitems_objects = JournalEntryLineitem.create_journal_entry_lineitems(
expense_group, general_settings
)
created_journal_entry = qbo_connection.post_journal_entry(
journal_entry_object, journal_entry_lineitems_objects, general_settings.je_single_credit_line)
task_log.detail = created_journal_entry
task_log.journal_entry = journal_entry_object
task_log.quickbooks_errors = None
task_log.status = 'COMPLETE'
task_log.save()
expense_group.exported_at = datetime.now()
expense_group.response_logs = created_journal_entry
expense_group.save()
load_attachments(qbo_connection, created_journal_entry['JournalEntry']['Id'], 'JournalEntry', expense_group)
except QBOCredential.DoesNotExist:
logger.info(
'QBO Credentials not found for workspace_id %s / expense group %s',
expense_group.id,
expense_group.workspace_id
)
detail = {
'expense_group_id': expense_group.id,
'message': 'QBO Account not connected'
}
task_log.status = 'FAILED'
task_log.detail = detail
task_log.save()
except BulkError as exception:
logger.info(exception.response)
detail = exception.response
task_log.status = 'FAILED'
task_log.detail = detail
task_log.save()
except WrongParamsError as exception:
handle_quickbooks_error(exception, expense_group, task_log, 'Journal Entries')
except Exception:
error = traceback.format_exc()
task_log.detail = {
'error': error
}
task_log.status = 'FATAL'
task_log.save()
logger.error('Something unexpected happened workspace_id: %s %s', task_log.workspace_id, task_log.detail)
def check_expenses_reimbursement_status(expenses):
all_expenses_paid = True
for expense in expenses:
reimbursement = Reimbursement.objects.filter(settlement_id=expense.settlement_id).first()
if reimbursement.state != 'COMPLETE':
all_expenses_paid = False
return all_expenses_paid
def create_bill_payment(workspace_id):
fyle_credentials = FyleCredential.objects.get(workspace_id=workspace_id)
fyle_connector = FyleConnector(fyle_credentials.refresh_token, workspace_id)
fyle_connector.sync_reimbursements()
bills = Bill.objects.filter(
payment_synced=False, expense_group__workspace_id=workspace_id,
expense_group__fund_source='PERSONAL'
).all()
if bills:
for bill in bills:
expense_group_reimbursement_status = check_expenses_reimbursement_status(
bill.expense_group.expenses.all())
if expense_group_reimbursement_status:
task_log, _ = TaskLog.objects.update_or_create(
workspace_id=workspace_id,
task_id='PAYMENT_{}'.format(bill.expense_group.id),
defaults={
'status': 'IN_PROGRESS',
'type': 'CREATING_BILL_PAYMENT'
}
)
try:
qbo_credentials = QBOCredential.objects.get(workspace_id=workspace_id)
qbo_connection = QBOConnector(qbo_credentials, workspace_id)
with transaction.atomic():
bill_payment_object = BillPayment.create_bill_payment(bill.expense_group)
qbo_object_task_log = TaskLog.objects.get(expense_group=bill.expense_group)
linked_transaction_id = qbo_object_task_log.detail['Bill']['Id']
bill_payment_lineitems_objects = BillPaymentLineitem.create_bill_payment_lineitems(
bill_payment_object.expense_group, linked_transaction_id
)
created_bill_payment = qbo_connection.post_bill_payment(
bill_payment_object, bill_payment_lineitems_objects
)
bill.payment_synced = True
bill.paid_on_qbo = True
bill.save()
task_log.detail = created_bill_payment
task_log.bill_payment = bill_payment_object
task_log.quickbooks_errors = None
task_log.status = 'COMPLETE'
task_log.save()
except QBOCredential.DoesNotExist:
logger.info(
'QBO Credentials not found for workspace_id %s / expense group %s',
workspace_id,
bill.expense_group
)
detail = {
'expense_group_id': bill.expense_group,
'message': 'QBO Account not connected'
}
task_log.status = 'FAILED'
task_log.detail = detail
task_log.save()
except BulkError as exception:
logger.info(exception.response)
detail = exception.response
task_log.status = 'FAILED'
task_log.detail = detail
task_log.save()
except WrongParamsError as exception:
handle_quickbooks_error(exception, bill.expense_group, task_log, 'Bill Payment')
except Exception:
error = traceback.format_exc()
task_log.detail = {
'error': error
}
task_log.status = 'FATAL'
task_log.save()
logger.error(
'Something unexpected happened workspace_id: %s %s', task_log.workspace_id, task_log.detail)
def schedule_bill_payment_creation(sync_fyle_to_qbo_payments, workspace_id):
general_mappings: GeneralMapping = GeneralMapping.objects.filter(workspace_id=workspace_id).first()
if general_mappings:
if sync_fyle_to_qbo_payments and general_mappings.bill_payment_account_id:
start_datetime = datetime.now()
schedule, _ = Schedule.objects.update_or_create(
func='apps.quickbooks_online.tasks.create_bill_payment',
args='{}'.format(workspace_id),
defaults={
'schedule_type': Schedule.MINUTES,
'minutes': 24 * 60,
'next_run': start_datetime
}
)
if not sync_fyle_to_qbo_payments:
schedule: Schedule = Schedule.objects.filter(
func='apps.quickbooks_online.tasks.create_bill_payment',
args='{}'.format(workspace_id)
).first()
if schedule:
schedule.delete()
def get_all_qbo_object_ids(qbo_objects):
qbo_objects_details = {}
expense_group_ids = [qbo_object.expense_group_id for qbo_object in qbo_objects]
task_logs = TaskLog.objects.filter(expense_group_id__in=expense_group_ids).all()
for task_log in task_logs:
qbo_objects_details[task_log.expense_group.id] = {
'expense_group': task_log.expense_group,
'qbo_object_id': task_log.detail['Bill']['Id']
}
return qbo_objects_details
def check_qbo_object_status(workspace_id):
qbo_credentials = QBOCredential.objects.get(workspace_id=workspace_id)
qbo_connection = QBOConnector(qbo_credentials, workspace_id)
bills = Bill.objects.filter(
expense_group__workspace_id=workspace_id, paid_on_qbo=False, expense_group__fund_source='PERSONAL'
).all()
if bills:
bill_ids = get_all_qbo_object_ids(bills)
for bill in bills:
bill_object = qbo_connection.get_bill(bill_ids[bill.expense_group.id]['qbo_object_id'])
if 'LinkedTxn' in bill_object:
line_items = BillLineitem.objects.filter(bill_id=bill.id)
for line_item in line_items:
expense = line_item.expense
expense.paid_on_qbo = True
expense.save()
bill.paid_on_qbo = True
bill.payment_synced = True
bill.save()
def schedule_qbo_objects_status_sync(sync_qbo_to_fyle_payments, workspace_id):
if sync_qbo_to_fyle_payments:
start_datetime = datetime.now()
schedule, _ = Schedule.objects.update_or_create(
func='apps.quickbooks_online.tasks.check_qbo_object_status',
args='{}'.format(workspace_id),
defaults={
'schedule_type': Schedule.MINUTES,
'minutes': 24 * 60,
'next_run': start_datetime
}
)
else:
schedule: Schedule = Schedule.objects.filter(
func='apps.quickbooks_online.tasks.check_qbo_object_status',
args='{}'.format(workspace_id)
).first()
if schedule:
schedule.delete()
def process_reimbursements(workspace_id):
fyle_credentials = FyleCredential.objects.get(workspace_id=workspace_id)
fyle_connector = FyleConnector(fyle_credentials.refresh_token, workspace_id)
fyle_connector.sync_reimbursements()
reimbursements = Reimbursement.objects.filter(state='PENDING', workspace_id=workspace_id).all()
reimbursement_ids = []
if reimbursements:
for reimbursement in reimbursements:
expenses = Expense.objects.filter(settlement_id=reimbursement.settlement_id, fund_source='PERSONAL').all()
paid_expenses = expenses.filter(paid_on_qbo=True)
all_expense_paid = False
if len(expenses):
all_expense_paid = len(expenses) == len(paid_expenses)
if all_expense_paid:
reimbursement_ids.append(reimbursement.reimbursement_id)
if reimbursement_ids:
fyle_connector.post_reimbursement(reimbursement_ids)
fyle_connector.sync_reimbursements()
def schedule_reimbursements_sync(sync_qbo_to_fyle_payments, workspace_id):
if sync_qbo_to_fyle_payments:
start_datetime = datetime.now() + timedelta(hours=12)
schedule, _ = Schedule.objects.update_or_create(
func='apps.quickbooks_online.tasks.process_reimbursements',
args='{}'.format(workspace_id),
defaults={
'schedule_type': Schedule.MINUTES,
'minutes': 24 * 60,
'next_run': start_datetime
}
)
else:
schedule: Schedule = Schedule.objects.filter(
func='apps.quickbooks_online.tasks.process_reimbursements',
args='{}'.format(workspace_id)
).first()
if schedule:
schedule.delete()
def async_sync_accounts(workspace_id):
qbo_credentials: QBOCredential = QBOCredential.objects.get(workspace_id=workspace_id)
qbo_connection = QBOConnector(
credentials_object=qbo_credentials,
workspace_id=workspace_id
)
qbo_connection.sync_accounts()
| 1.820313
| 2
|
tests/test_umi.py
|
johannesnicolaus/celseq2
| 14
|
12781165
|
<filename>tests/test_umi.py
import pytest
import pickle
from pkg_resources import resource_filename
def test_umi(instance_count_umi):
ans_umi_cnt = resource_filename(
'celseq2',
'demo/{}'.format('BC-22-GTACTC.counter.pkl'))
ans_umi_cnt = pickle.load(open(ans_umi_cnt, 'rb'))
ans_umi_set = resource_filename(
'celseq2',
'demo/{}'.format('BC-22-GTACTC.set.pkl'))
ans_umi_set = pickle.load(open(ans_umi_set, 'rb'))
umi_cnt, umi_set = instance_count_umi
assert umi_cnt == ans_umi_cnt
# for calc, ans in zip(umi_set, ans_umi_set):
# assert c
assert umi_set == ans_umi_set
| 2.265625
| 2
|
bitmovin_api_sdk/encoding/inputs/http/http_api.py
|
jaythecaesarean/bitmovin-api-sdk-python
| 11
|
12781166
|
<gh_stars>10-100
# coding: utf-8
from __future__ import absolute_import
from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase
from bitmovin_api_sdk.common.poscheck import poscheck_except
from bitmovin_api_sdk.models.http_input import HttpInput
from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope
from bitmovin_api_sdk.models.response_error import ResponseError
from bitmovin_api_sdk.encoding.inputs.http.customdata.customdata_api import CustomdataApi
from bitmovin_api_sdk.encoding.inputs.http.http_input_list_query_params import HttpInputListQueryParams
class HttpApi(BaseApi):
@poscheck_except(2)
def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None):
# type: (str, str, str, BitmovinApiLoggerBase) -> None
super(HttpApi, self).__init__(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
self.customdata = CustomdataApi(
api_key=api_key,
tenant_org_id=tenant_org_id,
base_url=base_url,
logger=logger
)
def create(self, http_input, **kwargs):
# type: (HttpInput, dict) -> HttpInput
"""Create HTTP Input
:param http_input: The HTTP input to be created
:type http_input: HttpInput, required
:return: HTTP input
:rtype: HttpInput
"""
return self.api_client.post(
'/encoding/inputs/http',
http_input,
type=HttpInput,
**kwargs
)
def delete(self, input_id, **kwargs):
# type: (string_types, dict) -> HttpInput
"""Delete HTTP Input
:param input_id: Id of the input
:type input_id: string_types, required
:return: Id of the input
:rtype: HttpInput
"""
return self.api_client.delete(
'/encoding/inputs/http/{input_id}',
path_params={'input_id': input_id},
type=HttpInput,
**kwargs
)
def get(self, input_id, **kwargs):
# type: (string_types, dict) -> HttpInput
"""HTTP Input Details
:param input_id: Id of the input
:type input_id: string_types, required
:return: HTTP input
:rtype: HttpInput
"""
return self.api_client.get(
'/encoding/inputs/http/{input_id}',
path_params={'input_id': input_id},
type=HttpInput,
**kwargs
)
def list(self, query_params=None, **kwargs):
# type: (HttpInputListQueryParams, dict) -> HttpInput
"""List HTTP Inputs
:param query_params: Query parameters
:type query_params: HttpInputListQueryParams
:return: List of HTTP inputs
:rtype: HttpInput
"""
return self.api_client.get(
'/encoding/inputs/http',
query_params=query_params,
pagination_response=True,
type=HttpInput,
**kwargs
)
| 2.03125
| 2
|
basics.py
|
younes-assou/opencv-beginner
| 0
|
12781167
|
import cv2 as cv
img = cv.imread("me1.jpg")
cv.imshow('me', img)
blured = cv.GaussianBlur(img, (3,3), cv.BORDER_DEFAULT)
cv.imshow('blured', blured)
canny = cv.Canny(img, 60,70)
cv.imshow('canny edges', canny)
cv.waitKey(0)
| 2.84375
| 3
|
app/core/migrations/0002_auto_20201129_1325.py
|
bondeveloper/maischool
| 0
|
12781168
|
# Generated by Django 3.1.3 on 2020-11-29 13:25
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='attachment',
name='file',
field=models.FileField(blank=True, upload_to='attachments'),
),
migrations.AlterField(
model_name='session',
name='attendance',
field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL),
),
]
| 1.46875
| 1
|
google/cloud/datacatalog/v1/datacatalog-v1-py/google/cloud/datacatalog_v1/types/datacatalog.py
|
googleapis/googleapis-gen
| 7
|
12781169
|
<reponame>googleapis/googleapis-gen
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.datacatalog_v1.types import bigquery
from google.cloud.datacatalog_v1.types import common
from google.cloud.datacatalog_v1.types import data_source as gcd_data_source
from google.cloud.datacatalog_v1.types import gcs_fileset_spec as gcd_gcs_fileset_spec
from google.cloud.datacatalog_v1.types import schema as gcd_schema
from google.cloud.datacatalog_v1.types import search
from google.cloud.datacatalog_v1.types import table_spec
from google.cloud.datacatalog_v1.types import tags as gcd_tags
from google.cloud.datacatalog_v1.types import timestamps
from google.cloud.datacatalog_v1.types import usage
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.cloud.datacatalog.v1',
manifest={
'EntryType',
'SearchCatalogRequest',
'SearchCatalogResponse',
'CreateEntryGroupRequest',
'UpdateEntryGroupRequest',
'GetEntryGroupRequest',
'DeleteEntryGroupRequest',
'ListEntryGroupsRequest',
'ListEntryGroupsResponse',
'CreateEntryRequest',
'UpdateEntryRequest',
'DeleteEntryRequest',
'GetEntryRequest',
'LookupEntryRequest',
'Entry',
'DatabaseTableSpec',
'DataSourceConnectionSpec',
'RoutineSpec',
'EntryGroup',
'CreateTagTemplateRequest',
'GetTagTemplateRequest',
'UpdateTagTemplateRequest',
'DeleteTagTemplateRequest',
'CreateTagRequest',
'UpdateTagRequest',
'DeleteTagRequest',
'CreateTagTemplateFieldRequest',
'UpdateTagTemplateFieldRequest',
'RenameTagTemplateFieldRequest',
'RenameTagTemplateFieldEnumValueRequest',
'DeleteTagTemplateFieldRequest',
'ListTagsRequest',
'ListTagsResponse',
'ListEntriesRequest',
'ListEntriesResponse',
},
)
class EntryType(proto.Enum):
r"""The enum field that lists all the types of entry resources in Data
Catalog. For example, a BigQuery table entry has the ``TABLE`` type.
"""
ENTRY_TYPE_UNSPECIFIED = 0
TABLE = 2
MODEL = 5
DATA_STREAM = 3
FILESET = 4
CLUSTER = 6
DATABASE = 7
DATA_SOURCE_CONNECTION = 8
ROUTINE = 9
SERVICE = 14
class SearchCatalogRequest(proto.Message):
r"""Request message for
[SearchCatalog][google.cloud.datacatalog.v1.DataCatalog.SearchCatalog].
Attributes:
scope (google.cloud.datacatalog_v1.types.SearchCatalogRequest.Scope):
Required. The scope of this search request.
The ``scope`` is invalid if ``include_org_ids``,
``include_project_ids`` are empty AND
``include_gcp_public_datasets`` is set to ``false``. In this
case, the request returns an error.
query (str):
Optional. The query string with a minimum of 3 characters
and specific syntax. For more information, see `Data Catalog
search
syntax <https://cloud.google.com/data-catalog/docs/how-to/search-reference>`__.
An empty query string returns all data assets (in the
specified scope) that you have access to.
A query string can be a simple ``xyz`` or qualified by
predicates:
- ``name:x``
- ``column:y``
- ``description:z``
page_size (int):
Number of results to return in a single
search page.
Can't be negative or 0, defaults to 10 in this
case. The maximum number is 1000. If exceeded,
throws an "invalid argument" exception.
page_token (str):
Optional. Pagination token that, if specified, returns the
next page of search results. If empty, returns the first
page.
This token is returned in the
[SearchCatalogResponse.next_page_token][google.cloud.datacatalog.v1.SearchCatalogResponse.next_page_token]
field of the response to a previous
[SearchCatalogRequest][google.cloud.datacatalog.v1.DataCatalog.SearchCatalog]
call.
order_by (str):
Specifies the order of results.
Currently supported case-sensitive values are:
- ``relevance`` that can only be descending
- ``last_modified_timestamp [asc|desc]`` with descending
(``desc``) as default
If this parameter is omitted, it defaults to the descending
``relevance``.
"""
class Scope(proto.Message):
r"""The criteria that select the subspace used for query
matching.
Attributes:
include_org_ids (Sequence[str]):
The list of organization IDs to search within.
To find your organization ID, follow the steps from
[Creating and managing organizations]
(/resource-manager/docs/creating-managing-organization).
include_project_ids (Sequence[str]):
The list of project IDs to search within.
For more information on the distinction between project
names, IDs, and numbers, see
`Projects </docs/overview/#projects>`__.
include_gcp_public_datasets (bool):
If ``true``, include Google Cloud Platform (GCP) public
datasets in search results. By default, they are excluded.
See `Google Cloud Public Datasets </public-datasets>`__ for
more information.
restricted_locations (Sequence[str]):
Optional. The list of locations to search within. If empty,
all locations are searched.
Returns an error if any location in the list isn't one of
the `Supported
regions <https://cloud.google.com/data-catalog/docs/concepts/regions#supported_regions>`__.
If a location is unreachable, its name is returned in the
``SearchCatalogResponse.unreachable`` field. To get
additional information on the error, repeat the search
request and set the location name as the value of this
parameter.
include_public_tag_templates (bool):
Optional. If ``true``, include [public tag
templates][google.cloud.datacatalog.v1.TagTemplate.is_publicly_readable]
in the search results. By default, they are included only if
you have explicit permissions on them to view them. For
example, if you are the owner.
Other scope fields, for example, ``include_org_ids``, still
restrict the returned public tag templates and at least one
of them is required.
"""
include_org_ids = proto.RepeatedField(
proto.STRING,
number=2,
)
include_project_ids = proto.RepeatedField(
proto.STRING,
number=3,
)
include_gcp_public_datasets = proto.Field(
proto.BOOL,
number=7,
)
restricted_locations = proto.RepeatedField(
proto.STRING,
number=16,
)
include_public_tag_templates = proto.Field(
proto.BOOL,
number=19,
)
scope = proto.Field(
proto.MESSAGE,
number=6,
message=Scope,
)
query = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
order_by = proto.Field(
proto.STRING,
number=5,
)
class SearchCatalogResponse(proto.Message):
r"""Response message for
[SearchCatalog][google.cloud.datacatalog.v1.DataCatalog.SearchCatalog].
Attributes:
results (Sequence[google.cloud.datacatalog_v1.types.SearchCatalogResult]):
Search results.
next_page_token (str):
Pagination token that can be used in
subsequent calls to retrieve the next page of
results.
unreachable (Sequence[str]):
Unreachable locations. Search results don't include data
from those locations.
To get additional information on an error, repeat the search
request and restrict it to specific locations by setting the
``SearchCatalogRequest.scope.restricted_locations``
parameter.
"""
@property
def raw_page(self):
return self
results = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=search.SearchCatalogResult,
)
next_page_token = proto.Field(
proto.STRING,
number=3,
)
unreachable = proto.RepeatedField(
proto.STRING,
number=6,
)
class CreateEntryGroupRequest(proto.Message):
r"""Request message for
[CreateEntryGroup][google.cloud.datacatalog.v1.DataCatalog.CreateEntryGroup].
Attributes:
parent (str):
Required. The names of the project and
location that the new entry group belongs to.
Note: The entry group itself and its child
resources might not be stored in the location
specified in its name.
entry_group_id (str):
Required. The ID of the entry group to create.
The ID must contain only letters (a-z, A-Z), numbers (0-9),
underscores (_), and must start with a letter or underscore.
The maximum size is 64 bytes when encoded in UTF-8.
entry_group (google.cloud.datacatalog_v1.types.EntryGroup):
The entry group to create. Defaults to empty.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
entry_group_id = proto.Field(
proto.STRING,
number=3,
)
entry_group = proto.Field(
proto.MESSAGE,
number=2,
message='EntryGroup',
)
class UpdateEntryGroupRequest(proto.Message):
r"""Request message for
[UpdateEntryGroup][google.cloud.datacatalog.v1.DataCatalog.UpdateEntryGroup].
Attributes:
entry_group (google.cloud.datacatalog_v1.types.EntryGroup):
Required. Updates for the entry group. The ``name`` field
must be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Names of fields whose values to overwrite on
an entry group.
If this parameter is absent or empty, all
modifiable fields are overwritten. If such
fields are non-required and omitted in the
request body, their values are emptied.
"""
entry_group = proto.Field(
proto.MESSAGE,
number=1,
message='EntryGroup',
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class GetEntryGroupRequest(proto.Message):
r"""Request message for
[GetEntryGroup][google.cloud.datacatalog.v1.DataCatalog.GetEntryGroup].
Attributes:
name (str):
Required. The name of the entry group to get.
read_mask (google.protobuf.field_mask_pb2.FieldMask):
The fields to return. If empty or omitted,
all fields are returned.
"""
name = proto.Field(
proto.STRING,
number=1,
)
read_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class DeleteEntryGroupRequest(proto.Message):
r"""Request message for
[DeleteEntryGroup][google.cloud.datacatalog.v1.DataCatalog.DeleteEntryGroup].
Attributes:
name (str):
Required. The name of the entry group to
delete.
force (bool):
Optional. If true, deletes all entries in the
entry group.
"""
name = proto.Field(
proto.STRING,
number=1,
)
force = proto.Field(
proto.BOOL,
number=2,
)
class ListEntryGroupsRequest(proto.Message):
r"""Request message for
[ListEntryGroups][google.cloud.datacatalog.v1.DataCatalog.ListEntryGroups].
Attributes:
parent (str):
Required. The name of the location that
contains the entry groups to list.
Can be provided as a URL.
page_size (int):
Optional. The maximum number of items to return.
Default is 10. Maximum limit is 1000. Throws an invalid
argument if ``page_size`` is greater than 1000.
page_token (str):
Optional. Pagination token that specifies the
next page to return. If empty, returns the first
page.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListEntryGroupsResponse(proto.Message):
r"""Response message for
[ListEntryGroups][google.cloud.datacatalog.v1.DataCatalog.ListEntryGroups].
Attributes:
entry_groups (Sequence[google.cloud.datacatalog_v1.types.EntryGroup]):
Entry group details.
next_page_token (str):
Pagination token to specify in the next call
to retrieve the next page of results. Empty if
there are no more items.
"""
@property
def raw_page(self):
return self
entry_groups = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='EntryGroup',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class CreateEntryRequest(proto.Message):
r"""Request message for
[CreateEntry][google.cloud.datacatalog.v1.DataCatalog.CreateEntry].
Attributes:
parent (str):
Required. The name of the entry group this
entry belongs to.
Note: The entry itself and its child resources
might not be stored in the location specified in
its name.
entry_id (str):
Required. The ID of the entry to create.
The ID must contain only letters (a-z, A-Z), numbers (0-9),
and underscores (_). The maximum size is 64 bytes when
encoded in UTF-8.
entry (google.cloud.datacatalog_v1.types.Entry):
Required. The entry to create.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
entry_id = proto.Field(
proto.STRING,
number=3,
)
entry = proto.Field(
proto.MESSAGE,
number=2,
message='Entry',
)
class UpdateEntryRequest(proto.Message):
r"""Request message for
[UpdateEntry][google.cloud.datacatalog.v1.DataCatalog.UpdateEntry].
Attributes:
entry (google.cloud.datacatalog_v1.types.Entry):
Required. Updates for the entry. The ``name`` field must be
set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Names of fields whose values to overwrite on an entry.
If this parameter is absent or empty, all modifiable fields
are overwritten. If such fields are non-required and omitted
in the request body, their values are emptied.
You can modify only the fields listed below.
For entries with type ``DATA_STREAM``:
- ``schema``
For entries with type ``FILESET``:
- ``schema``
- ``display_name``
- ``description``
- ``gcs_fileset_spec``
- ``gcs_fileset_spec.file_patterns``
For entries with ``user_specified_type``:
- ``schema``
- ``display_name``
- ``description``
- ``user_specified_type``
- ``user_specified_system``
- ``linked_resource``
- ``source_system_timestamps``
"""
entry = proto.Field(
proto.MESSAGE,
number=1,
message='Entry',
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class DeleteEntryRequest(proto.Message):
r"""Request message for
[DeleteEntry][google.cloud.datacatalog.v1.DataCatalog.DeleteEntry].
Attributes:
name (str):
Required. The name of the entry to delete.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class GetEntryRequest(proto.Message):
r"""Request message for
[GetEntry][google.cloud.datacatalog.v1.DataCatalog.GetEntry].
Attributes:
name (str):
Required. The name of the entry to get.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class LookupEntryRequest(proto.Message):
r"""Request message for
[LookupEntry][google.cloud.datacatalog.v1.DataCatalog.LookupEntry].
Attributes:
linked_resource (str):
The full name of the Google Cloud Platform resource the Data
Catalog entry represents. For more information, see [Full
Resource Name]
(https://cloud.google.com/apis/design/resource_names#full_resource_name).
Full names are case-sensitive. For example:
- ``//bigquery.googleapis.com/projects/{PROJECT_ID}/datasets/{DATASET_ID}/tables/{TABLE_ID}``
- ``//pubsub.googleapis.com/projects/{PROJECT_ID}/topics/{TOPIC_ID}``
sql_resource (str):
The SQL name of the entry. SQL names are case-sensitive.
Examples:
- ``pubsub.topic.{PROJECT_ID}.{TOPIC_ID}``
- ``pubsub.topic.{PROJECT_ID}.``\ \`\ ``{TOPIC.ID.SEPARATED.WITH.DOTS}``\ \`
- ``bigquery.table.{PROJECT_ID}.{DATASET_ID}.{TABLE_ID}``
- ``bigquery.dataset.{PROJECT_ID}.{DATASET_ID}``
- ``datacatalog.entry.{PROJECT_ID}.{LOCATION_ID}.{ENTRY_GROUP_ID}.{ENTRY_ID}``
Identifiers (``*_ID``) should comply with the [Lexical
structure in Standard SQL]
(https://cloud.google.com/bigquery/docs/reference/standard-sql/lexical).
fully_qualified_name (str):
Fully qualified name (FQN) of the resource.
FQNs take two forms:
- For non-regionalized resources:
``{SYSTEM}:{PROJECT}.{PATH_TO_RESOURCE_SEPARATED_WITH_DOTS}``
- For regionalized resources:
``{SYSTEM}:{PROJECT}.{LOCATION_ID}.{PATH_TO_RESOURCE_SEPARATED_WITH_DOTS}``
Example for a DPMS table:
``dataproc_metastore:{PROJECT_ID}.{LOCATION_ID}.{INSTANCE_ID}.{DATABASE_ID}.{TABLE_ID}``
"""
linked_resource = proto.Field(
proto.STRING,
number=1,
oneof='target_name',
)
sql_resource = proto.Field(
proto.STRING,
number=3,
oneof='target_name',
)
fully_qualified_name = proto.Field(
proto.STRING,
number=5,
oneof='target_name',
)
class Entry(proto.Message):
r"""Entry metadata. A Data Catalog entry represents another resource in
Google Cloud Platform (such as a BigQuery dataset or a Pub/Sub
topic) or outside of it. You can use the ``linked_resource`` field
in the entry resource to refer to the original resource ID of the
source system.
An entry resource contains resource details, for example, its
schema. Additionally, you can attach flexible metadata to an entry
in the form of a [Tag][google.cloud.datacatalog.v1.Tag].
Attributes:
name (str):
Output only. The resource name of an entry in
URL format.
Note: The entry itself and its child resources
might not be stored in the location specified in
its name.
linked_resource (str):
The resource this metadata entry refers to.
For Google Cloud Platform resources, ``linked_resource`` is
the [Full Resource Name]
(https://cloud.google.com/apis/design/resource_names#full_resource_name).
For example, the ``linked_resource`` for a table resource
from BigQuery is:
``//bigquery.googleapis.com/projects/{PROJECT_ID}/datasets/{DATASET_ID}/tables/{TABLE_ID}``
Output only when the entry is one of the types in the
``EntryType`` enum.
For entries with a ``user_specified_type``, this field is
optional and defaults to an empty string.
The resource string must contain only letters (a-z, A-Z),
numbers (0-9), underscores (_), periods (.), colons (:),
slashes (/), dashes (-), and hashes (#). The maximum size is
200 bytes when encoded in UTF-8.
fully_qualified_name (str):
Fully qualified name (FQN) of the resource. Set
automatically for entries representing resources from synced
systems. Settable only during creation and read-only
afterwards. Can be used for search and lookup of the
entries.
FQNs take two forms:
- For non-regionalized resources:
``{SYSTEM}:{PROJECT}.{PATH_TO_RESOURCE_SEPARATED_WITH_DOTS}``
- For regionalized resources:
``{SYSTEM}:{PROJECT}.{LOCATION_ID}.{PATH_TO_RESOURCE_SEPARATED_WITH_DOTS}``
Example for a DPMS table:
``dataproc_metastore:{PROJECT_ID}.{LOCATION_ID}.{INSTANCE_ID}.{DATABASE_ID}.{TABLE_ID}``
type_ (google.cloud.datacatalog_v1.types.EntryType):
The type of the entry. Only used for entries with types
listed in the ``EntryType`` enum.
Currently, only ``FILESET`` enum value is allowed. All other
entries created in Data Catalog must use the
``user_specified_type``.
user_specified_type (str):
Custom entry type that doesn't match any of the values
allowed for input and listed in the ``EntryType`` enum.
When creating an entry, first check the type values in the
enum. If there are no appropriate types for the new entry,
provide a custom value, for example, ``my_special_type``.
The ``user_specified_type`` string has the following
limitations:
- Is case insensitive.
- Must begin with a letter or underscore.
- Can only contain letters, numbers, and underscores.
- Must be at least 1 character and at most 64 characters
long.
integrated_system (google.cloud.datacatalog_v1.types.IntegratedSystem):
Output only. Indicates the entry's source
system that Data Catalog integrates with, such
as BigQuery, Pub/Sub, or Dataproc Metastore.
user_specified_system (str):
Indicates the entry's source system that Data Catalog
doesn't automatically integrate with.
The ``user_specified_system`` string has the following
limitations:
- Is case insensitive.
- Must begin with a letter or underscore.
- Can only contain letters, numbers, and underscores.
- Must be at least 1 character and at most 64 characters
long.
gcs_fileset_spec (google.cloud.datacatalog_v1.types.GcsFilesetSpec):
Specification that applies to a Cloud Storage fileset. Valid
only for entries with the ``FILESET`` type.
bigquery_table_spec (google.cloud.datacatalog_v1.types.BigQueryTableSpec):
Specification that applies to a BigQuery table. Valid only
for entries with the ``TABLE`` type.
bigquery_date_sharded_spec (google.cloud.datacatalog_v1.types.BigQueryDateShardedSpec):
Specification for a group of BigQuery tables with the
``[prefix]YYYYMMDD`` name pattern.
For more information, see [Introduction to partitioned
tables]
(https://cloud.google.com/bigquery/docs/partitioned-tables#partitioning_versus_sharding).
database_table_spec (google.cloud.datacatalog_v1.types.DatabaseTableSpec):
Specification that applies to a table resource. Valid only
for entries with the ``TABLE`` type.
data_source_connection_spec (google.cloud.datacatalog_v1.types.DataSourceConnectionSpec):
Specification that applies to a data source connection.
Valid only for entries with the ``DATA_SOURCE_CONNECTION``
type.
routine_spec (google.cloud.datacatalog_v1.types.RoutineSpec):
Specification that applies to a user-defined function or
procedure. Valid only for entries with the ``ROUTINE`` type.
display_name (str):
Display name of an entry.
The name must contain only Unicode letters, numbers (0-9),
underscores (_), dashes (-), spaces ( ), and can't start or
end with spaces. The maximum size is 200 bytes when encoded
in UTF-8. Default value is an empty string.
description (str):
Entry description that can consist of several
sentences or paragraphs that describe entry
contents.
The description must not contain Unicode non-
characters as well as C0 and C1 control codes
except tabs (HT), new lines (LF), carriage
returns (CR), and page breaks (FF).
The maximum size is 2000 bytes when encoded in
UTF-8. Default value is an empty string.
schema (google.cloud.datacatalog_v1.types.Schema):
Schema of the entry. An entry might not have
any schema attached to it.
source_system_timestamps (google.cloud.datacatalog_v1.types.SystemTimestamps):
Timestamps from the underlying resource, not from the Data
Catalog entry.
Output only when the entry has a type listed in the
``EntryType`` enum. For entries with
``user_specified_type``, this field is optional and defaults
to an empty timestamp.
usage_signal (google.cloud.datacatalog_v1.types.UsageSignal):
Output only. Resource usage statistics.
labels (Sequence[google.cloud.datacatalog_v1.types.Entry.LabelsEntry]):
Cloud labels attached to the entry.
In Data Catalog, you can create and modify
labels attached only to custom entries. Synced
entries have unmodifiable labels that come from
the source system.
data_source (google.cloud.datacatalog_v1.types.DataSource):
Output only. Physical location of the entry.
"""
name = proto.Field(
proto.STRING,
number=1,
)
linked_resource = proto.Field(
proto.STRING,
number=9,
)
fully_qualified_name = proto.Field(
proto.STRING,
number=29,
)
type_ = proto.Field(
proto.ENUM,
number=2,
oneof='entry_type',
enum='EntryType',
)
user_specified_type = proto.Field(
proto.STRING,
number=16,
oneof='entry_type',
)
integrated_system = proto.Field(
proto.ENUM,
number=17,
oneof='system',
enum=common.IntegratedSystem,
)
user_specified_system = proto.Field(
proto.STRING,
number=18,
oneof='system',
)
gcs_fileset_spec = proto.Field(
proto.MESSAGE,
number=6,
oneof='type_spec',
message=gcd_gcs_fileset_spec.GcsFilesetSpec,
)
bigquery_table_spec = proto.Field(
proto.MESSAGE,
number=12,
oneof='type_spec',
message=table_spec.BigQueryTableSpec,
)
bigquery_date_sharded_spec = proto.Field(
proto.MESSAGE,
number=15,
oneof='type_spec',
message=table_spec.BigQueryDateShardedSpec,
)
database_table_spec = proto.Field(
proto.MESSAGE,
number=24,
oneof='spec',
message='DatabaseTableSpec',
)
data_source_connection_spec = proto.Field(
proto.MESSAGE,
number=27,
oneof='spec',
message='DataSourceConnectionSpec',
)
routine_spec = proto.Field(
proto.MESSAGE,
number=28,
oneof='spec',
message='RoutineSpec',
)
display_name = proto.Field(
proto.STRING,
number=3,
)
description = proto.Field(
proto.STRING,
number=4,
)
schema = proto.Field(
proto.MESSAGE,
number=5,
message=gcd_schema.Schema,
)
source_system_timestamps = proto.Field(
proto.MESSAGE,
number=7,
message=timestamps.SystemTimestamps,
)
usage_signal = proto.Field(
proto.MESSAGE,
number=13,
message=usage.UsageSignal,
)
labels = proto.MapField(
proto.STRING,
proto.STRING,
number=14,
)
data_source = proto.Field(
proto.MESSAGE,
number=20,
message=gcd_data_source.DataSource,
)
class DatabaseTableSpec(proto.Message):
r"""Specification that applies to a table resource. Valid only for
entries with the ``TABLE`` type.
Attributes:
type_ (google.cloud.datacatalog_v1.types.DatabaseTableSpec.TableType):
Type of this table.
"""
class TableType(proto.Enum):
r"""Type of the table."""
TABLE_TYPE_UNSPECIFIED = 0
NATIVE = 1
EXTERNAL = 2
type_ = proto.Field(
proto.ENUM,
number=1,
enum=TableType,
)
class DataSourceConnectionSpec(proto.Message):
r"""Specification that applies to a data source connection. Valid only
for entries with the ``DATA_SOURCE_CONNECTION`` type.
Attributes:
bigquery_connection_spec (google.cloud.datacatalog_v1.types.BigQueryConnectionSpec):
Fields specific to BigQuery connections.
"""
bigquery_connection_spec = proto.Field(
proto.MESSAGE,
number=1,
message=bigquery.BigQueryConnectionSpec,
)
class RoutineSpec(proto.Message):
r"""Specification that applies to a routine. Valid only for entries with
the ``ROUTINE`` type.
Attributes:
routine_type (google.cloud.datacatalog_v1.types.RoutineSpec.RoutineType):
The type of the routine.
language (str):
The language the routine is written in. The exact value
depends on the source system. For BigQuery routines,
possible values are:
- ``SQL``
- ``JAVASCRIPT``
routine_arguments (Sequence[google.cloud.datacatalog_v1.types.RoutineSpec.Argument]):
Arguments of the routine.
return_type (str):
Return type of the argument. The exact value
depends on the source system and the language.
definition_body (str):
The body of the routine.
bigquery_routine_spec (google.cloud.datacatalog_v1.types.BigQueryRoutineSpec):
Fields specific for BigQuery routines.
"""
class RoutineType(proto.Enum):
r"""The fine-grained type of the routine."""
ROUTINE_TYPE_UNSPECIFIED = 0
SCALAR_FUNCTION = 1
PROCEDURE = 2
class Argument(proto.Message):
r"""Input or output argument of a function or stored procedure.
Attributes:
name (str):
The name of the argument. A return argument
of a function might not have a name.
mode (google.cloud.datacatalog_v1.types.RoutineSpec.Argument.Mode):
Specifies whether the argument is input or
output.
type_ (str):
Type of the argument. The exact value depends
on the source system and the language.
"""
class Mode(proto.Enum):
r"""The input or output mode of the argument."""
MODE_UNSPECIFIED = 0
IN = 1
OUT = 2
INOUT = 3
name = proto.Field(
proto.STRING,
number=1,
)
mode = proto.Field(
proto.ENUM,
number=2,
enum='RoutineSpec.Argument.Mode',
)
type_ = proto.Field(
proto.STRING,
number=3,
)
routine_type = proto.Field(
proto.ENUM,
number=1,
enum=RoutineType,
)
language = proto.Field(
proto.STRING,
number=2,
)
routine_arguments = proto.RepeatedField(
proto.MESSAGE,
number=3,
message=Argument,
)
return_type = proto.Field(
proto.STRING,
number=4,
)
definition_body = proto.Field(
proto.STRING,
number=5,
)
bigquery_routine_spec = proto.Field(
proto.MESSAGE,
number=6,
oneof='system_spec',
message=bigquery.BigQueryRoutineSpec,
)
class EntryGroup(proto.Message):
r"""Entry group metadata.
An ``EntryGroup`` resource represents a logical grouping of zero or
more Data Catalog [Entry][google.cloud.datacatalog.v1.Entry]
resources.
Attributes:
name (str):
The resource name of the entry group in URL
format.
Note: The entry group itself and its child
resources might not be stored in the location
specified in its name.
display_name (str):
A short name to identify the entry group, for
example, "analytics data - jan 2011". Default
value is an empty string.
description (str):
Entry group description. Can consist of
several sentences or paragraphs that describe
the entry group contents. Default value is an
empty string.
data_catalog_timestamps (google.cloud.datacatalog_v1.types.SystemTimestamps):
Output only. Timestamps of the entry group.
Default value is empty.
"""
name = proto.Field(
proto.STRING,
number=1,
)
display_name = proto.Field(
proto.STRING,
number=2,
)
description = proto.Field(
proto.STRING,
number=3,
)
data_catalog_timestamps = proto.Field(
proto.MESSAGE,
number=4,
message=timestamps.SystemTimestamps,
)
class CreateTagTemplateRequest(proto.Message):
r"""Request message for
[CreateTagTemplate][google.cloud.datacatalog.v1.DataCatalog.CreateTagTemplate].
Attributes:
parent (str):
Required. The name of the project and the template location
`region <https://cloud.google.com/data-catalog/docs/concepts/regions>`__.
tag_template_id (str):
Required. The ID of the tag template to create.
The ID must contain only lowercase letters (a-z), numbers
(0-9), or underscores (_), and must start with a letter or
underscore. The maximum size is 64 bytes when encoded in
UTF-8.
tag_template (google.cloud.datacatalog_v1.types.TagTemplate):
Required. The tag template to create.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
tag_template_id = proto.Field(
proto.STRING,
number=3,
)
tag_template = proto.Field(
proto.MESSAGE,
number=2,
message=gcd_tags.TagTemplate,
)
class GetTagTemplateRequest(proto.Message):
r"""Request message for
[GetTagTemplate][google.cloud.datacatalog.v1.DataCatalog.GetTagTemplate].
Attributes:
name (str):
Required. The name of the tag template to
get.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class UpdateTagTemplateRequest(proto.Message):
r"""Request message for
[UpdateTagTemplate][google.cloud.datacatalog.v1.DataCatalog.UpdateTagTemplate].
Attributes:
tag_template (google.cloud.datacatalog_v1.types.TagTemplate):
Required. The template to update. The ``name`` field must be
set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Names of fields whose values to overwrite on a tag template.
Currently, only ``display_name`` can be overwritten.
If this parameter is absent or empty, all modifiable fields
are overwritten. If such fields are non-required and omitted
in the request body, their values are emptied.
"""
tag_template = proto.Field(
proto.MESSAGE,
number=1,
message=gcd_tags.TagTemplate,
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class DeleteTagTemplateRequest(proto.Message):
r"""Request message for
[DeleteTagTemplate][google.cloud.datacatalog.v1.DataCatalog.DeleteTagTemplate].
Attributes:
name (str):
Required. The name of the tag template to
delete.
force (bool):
Required. If true, deletes all tags that use this template.
Currently, ``true`` is the only supported value.
"""
name = proto.Field(
proto.STRING,
number=1,
)
force = proto.Field(
proto.BOOL,
number=2,
)
class CreateTagRequest(proto.Message):
r"""Request message for
[CreateTag][google.cloud.datacatalog.v1.DataCatalog.CreateTag].
Attributes:
parent (str):
Required. The name of the resource to attach
this tag to.
Tags can be attached to entries or entry groups.
An entry can have up to 1000 attached tags.
Note: The tag and its child resources might not
be stored in the location specified in its name.
tag (google.cloud.datacatalog_v1.types.Tag):
Required. The tag to create.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
tag = proto.Field(
proto.MESSAGE,
number=2,
message=gcd_tags.Tag,
)
class UpdateTagRequest(proto.Message):
r"""Request message for
[UpdateTag][google.cloud.datacatalog.v1.DataCatalog.UpdateTag].
Attributes:
tag (google.cloud.datacatalog_v1.types.Tag):
Required. The updated tag. The "name" field
must be set.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Names of fields whose values to overwrite on a tag.
Currently, a tag has the only modifiable field with the name
``fields``.
In general, if this parameter is absent or empty, all
modifiable fields are overwritten. If such fields are
non-required and omitted in the request body, their values
are emptied.
"""
tag = proto.Field(
proto.MESSAGE,
number=1,
message=gcd_tags.Tag,
)
update_mask = proto.Field(
proto.MESSAGE,
number=2,
message=field_mask_pb2.FieldMask,
)
class DeleteTagRequest(proto.Message):
r"""Request message for
[DeleteTag][google.cloud.datacatalog.v1.DataCatalog.DeleteTag].
Attributes:
name (str):
Required. The name of the tag to delete.
"""
name = proto.Field(
proto.STRING,
number=1,
)
class CreateTagTemplateFieldRequest(proto.Message):
r"""Request message for
[CreateTagTemplateField][google.cloud.datacatalog.v1.DataCatalog.CreateTagTemplateField].
Attributes:
parent (str):
Required. The name of the project and the template location
`region <https://cloud.google.com/data-catalog/docs/concepts/regions>`__.
tag_template_field_id (str):
Required. The ID of the tag template field to create.
Note: Adding a required field to an existing template is
*not* allowed.
Field IDs can contain letters (both uppercase and
lowercase), numbers (0-9), underscores (_) and dashes (-).
Field IDs must be at least 1 character long and at most 128
characters long. Field IDs must also be unique within their
template.
tag_template_field (google.cloud.datacatalog_v1.types.TagTemplateField):
Required. The tag template field to create.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
tag_template_field_id = proto.Field(
proto.STRING,
number=2,
)
tag_template_field = proto.Field(
proto.MESSAGE,
number=3,
message=gcd_tags.TagTemplateField,
)
class UpdateTagTemplateFieldRequest(proto.Message):
r"""Request message for
[UpdateTagTemplateField][google.cloud.datacatalog.v1.DataCatalog.UpdateTagTemplateField].
Attributes:
name (str):
Required. The name of the tag template field.
tag_template_field (google.cloud.datacatalog_v1.types.TagTemplateField):
Required. The template to update.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. Names of fields whose values to overwrite on an
individual field of a tag template. The following fields are
modifiable:
- ``display_name``
- ``type.enum_type``
- ``is_required``
If this parameter is absent or empty, all modifiable fields
are overwritten. If such fields are non-required and omitted
in the request body, their values are emptied with one
exception: when updating an enum type, the provided values
are merged with the existing values. Therefore, enum values
can only be added, existing enum values cannot be deleted or
renamed.
Additionally, updating a template field from optional to
required is *not* allowed.
"""
name = proto.Field(
proto.STRING,
number=1,
)
tag_template_field = proto.Field(
proto.MESSAGE,
number=2,
message=gcd_tags.TagTemplateField,
)
update_mask = proto.Field(
proto.MESSAGE,
number=3,
message=field_mask_pb2.FieldMask,
)
class RenameTagTemplateFieldRequest(proto.Message):
r"""Request message for
[RenameTagTemplateField][google.cloud.datacatalog.v1.DataCatalog.RenameTagTemplateField].
Attributes:
name (str):
Required. The name of the tag template.
new_tag_template_field_id (str):
Required. The new ID of this tag template field. For
example, ``my_new_field``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
new_tag_template_field_id = proto.Field(
proto.STRING,
number=2,
)
class RenameTagTemplateFieldEnumValueRequest(proto.Message):
r"""Request message for
[RenameTagTemplateFieldEnumValue][google.cloud.datacatalog.v1.DataCatalog.RenameTagTemplateFieldEnumValue].
Attributes:
name (str):
Required. The name of the enum field value.
new_enum_value_display_name (str):
Required. The new display name of the enum value. For
example, ``my_new_enum_value``.
"""
name = proto.Field(
proto.STRING,
number=1,
)
new_enum_value_display_name = proto.Field(
proto.STRING,
number=2,
)
class DeleteTagTemplateFieldRequest(proto.Message):
r"""Request message for
[DeleteTagTemplateField][google.cloud.datacatalog.v1.DataCatalog.DeleteTagTemplateField].
Attributes:
name (str):
Required. The name of the tag template field
to delete.
force (bool):
Required. If true, deletes this field from any tags that use
it.
Currently, ``true`` is the only supported value.
"""
name = proto.Field(
proto.STRING,
number=1,
)
force = proto.Field(
proto.BOOL,
number=2,
)
class ListTagsRequest(proto.Message):
r"""Request message for
[ListTags][google.cloud.datacatalog.v1.DataCatalog.ListTags].
Attributes:
parent (str):
Required. The name of the Data Catalog resource to list the
tags of.
The resource can be an
[Entry][google.cloud.datacatalog.v1.Entry] or an
[EntryGroup][google.cloud.datacatalog.v1.EntryGroup]
(without ``/entries/{entries}`` at the end).
page_size (int):
The maximum number of tags to return. Default
is 10. Maximum limit is 1000.
page_token (str):
Pagination token that specifies the next page
to return. If empty, the first page is returned.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
class ListTagsResponse(proto.Message):
r"""Response message for
[ListTags][google.cloud.datacatalog.v1.DataCatalog.ListTags].
Attributes:
tags (Sequence[google.cloud.datacatalog_v1.types.Tag]):
[Tag][google.cloud.datacatalog.v1.Tag] details.
next_page_token (str):
Pagination token of the next results page.
Empty if there are no more items in results.
"""
@property
def raw_page(self):
return self
tags = proto.RepeatedField(
proto.MESSAGE,
number=1,
message=gcd_tags.Tag,
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
class ListEntriesRequest(proto.Message):
r"""Request message for
[ListEntries][google.cloud.datacatalog.v1.DataCatalog.ListEntries].
Attributes:
parent (str):
Required. The name of the entry group that
contains the entries to list.
Can be provided in URL format.
page_size (int):
The maximum number of items to return. Default is 10.
Maximum limit is 1000. Throws an invalid argument if
``page_size`` is more than 1000.
page_token (str):
Pagination token that specifies the next page
to return. If empty, the first page is returned.
read_mask (google.protobuf.field_mask_pb2.FieldMask):
The fields to return for each entry. If empty or omitted,
all fields are returned.
For example, to return a list of entries with only the
``name`` field, set ``read_mask`` to only one path with the
``name`` value.
"""
parent = proto.Field(
proto.STRING,
number=1,
)
page_size = proto.Field(
proto.INT32,
number=2,
)
page_token = proto.Field(
proto.STRING,
number=3,
)
read_mask = proto.Field(
proto.MESSAGE,
number=4,
message=field_mask_pb2.FieldMask,
)
class ListEntriesResponse(proto.Message):
r"""Response message for
[ListEntries][google.cloud.datacatalog.v1.DataCatalog.ListEntries].
Attributes:
entries (Sequence[google.cloud.datacatalog_v1.types.Entry]):
Entry details.
next_page_token (str):
Pagination token of the next results page.
Empty if there are no more items in results.
"""
@property
def raw_page(self):
return self
entries = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='Entry',
)
next_page_token = proto.Field(
proto.STRING,
number=2,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 1.023438
| 1
|
src/rbxmx.py
|
GestaIt/Model-Uploader
| 3
|
12781170
|
<gh_stars>1-10
"""
Created by Gestalt on 10/26/21
rbxmx.py
Used for the manipulation of RBXMX objects.
Functions:
change_script_source(xml_source, new_source) -> str
get_asset_data(asset_id) -> tuple[bool, str]
"""
import xml.etree.ElementTree as ElementTree
from typing import Union
import requests
def get_asset_data(asset_id: str) -> tuple[bool, str]:
"""Gets the RBXMX data for the specified asset.
:param asset_id: The asset id in which the program gets the data for.
:return: The RBXMX data for the given asset id.
"""
assert asset_id.isdigit(), "The asset id must only include digits!"
url: str = f"https://assetdelivery.roblox.com/v1/assetId/{asset_id}"
# Sometimes, the API can fail. This mainly happens whenever the user gives a fake asset.
try:
asset_location_fetch_response: requests.Response = requests.get(url)
asset_location_fetch_json: dict[str] = asset_location_fetch_response.json()
asset_location: str = asset_location_fetch_json["location"]
asset_data_fetch_response: requests.Response = requests.get(asset_location)
asset_data_fetch_text: str = asset_data_fetch_response.text
return True, asset_data_fetch_text
except requests.HTTPError:
print(f"An error occurred while getting the asset data for the id {asset_id}")
return False, ""
def change_script_source(xml_source: str, new_source: str) -> str:
"""Sets the source text to the given new source.
:param xml_source: The Roblox asset XML data. Must only include a script.
:param new_source: The new source text for the script.
:return: The new asset XML data.
"""
# We must search through the parsed elements and find the script source.
try:
parsed_data: Union[ElementTree.Element, None] = ElementTree.fromstring(xml_source)
except ElementTree.ParseError:
parsed_data = None
assert parsed_data, "Failed to parse asset data. Is the asset in RBXMX format?"
script_object: ElementTree.Element = parsed_data.find("Item")
# Just doing some sanity checks.
assert script_object, "I couldn't find an item in that asset you provided!"
script_properties: ElementTree.Element = script_object.find("Properties")
source_property: ElementTree.Element = script_properties.find("ProtectedString[@name='Source']")
assert source_property is not None, "I couldn't find a source property in your object. Is the asset a script?"
source_property.text = new_source
return ElementTree.tostring(parsed_data).decode("utf-8")
| 2.828125
| 3
|
my_lambdata/OOP_Sprint/Acme.py
|
misqualzarabi/lambdata_misqual_z
| 0
|
12781171
|
<filename>my_lambdata/OOP_Sprint/Acme.py
import random
class Product():
def __init__(self, name, price=10, weight=20,
flammability=0.5, identifier=random.randint(1000000, 9999999)):
self.name = name
self.price = price
self.weight = weight
self.flammability = flammability
self.identifier = identifier
def stealability(self):
ratio = 10 / 20
if ratio > 0.5:
('not so stealable')
if ratio >= 0.5 or ratio < 1.0:
print('kinda stealable')
else:
print('very stealable')
return
def explode(self):
product = 0.5 * 20
if product < 10:
print('...fizzle')
if product >= 10 or product < 50:
print('boom')
else:
print('...BABOOM!!')
return
class BoxingGlove(Product):
def __init__(self, weight=10):
super().__init__(
name=None,
price=10,
flammability=0.5,
identifier=random.randint(
1000000,
9999999))
self.weight = weight
def explode(self):
print('...it is a glove')
def punch(self):
weight = 10
if weight < 5:
print('That tickles')
if weight >= 5 or weight < 15:
print('Hey that hurt')
else:
print('OOUCH')
return
if __name__ == "__main__":
prod = Product("A cool toy")
print(prod.name)
print(prod.price)
print(prod.weight)
print(prod.flammability)
print(prod.identifier)
prod.stealability()
prod.explode()
glove = BoxingGlove('Punchy the third')
print(glove.weight)
print(glove.price)
glove.explode()
glove.punch()
| 3.453125
| 3
|
src/feature_extract.py
|
JiJingYu/Sensor-Specific-Hyperspectral-Image-Feature-Learning
| 1
|
12781172
|
import os
import sys
import stat
import h5py
import time
import shutil
import subprocess
import numpy as np
import scipy.io as sio
from data_analysis import find_caffe
# import caffe
import data_analysis.get_feature_from_model as feature
caffe_root = find_caffe.caffe_root
def mkdir_if_not_exist(the_dir):
if not os.path.isdir(the_dir) :
os.makedirs(the_dir)
def get_indian_pines_features_from_indian_pines_model():
for i in range(10):
class data: pass
data.data_dir = os.path.expanduser('../hyperspectral_datas/indian_pines/data/')
data.data_5x5_mean_std = sio.loadmat(data.data_dir + '/indian_pines_5x5_mean_std.mat')['data']
data.labels_5x5_mean_std = sio.loadmat(data.data_dir + '/indian_pines_5x5_mean_std.mat')['labels']
data.result_dir = '../result/indian_pines/bn_net_200/feature'
mkdir_if_not_exist(data.result_dir)
data.result_file = data.result_dir + '/ip_feature_ip_model_{}.mat'.format(i)
data.iters = 2000000
pretrained_model = data.result_dir + '/../model/5x5_mean_std_models_time_{}_iter_{}.caffemodel.h5'.format(i,
data.iters)
deploy_file = data.result_dir + '/../proto/indian_pines_5x5_mean_std_deploy.prototxt'
getFeature = feature.GetFeatureFromCaffe(deploy_file=deploy_file, pretrained_model=pretrained_model)
getFeature.set_data(data.data_5x5_mean_std, data.labels_5x5_mean_std)
getFeature.get_ip1()
data.result_dict = {'data': getFeature.ip1_data, 'labels': getFeature.label}
sio.savemat(data.result_file, data.result_dict)
def get_salina_features_from_salina_model():
for i in range(10):
class data: pass
data.data_dir = os.path.expanduser('~/hyperspectral_datas/salina/data/')
data.data_5x5_mean_std = sio.loadmat(data.data_dir + '/salina_5x5_mean_std.mat')['data']
data.labels_5x5_mean_std = sio.loadmat(data.data_dir + '/salina_5x5_mean_std.mat')['labels']
data.result_dir = '../result/salina/bn_net_200/feature'
mkdir_if_not_exist(data.result_dir)
data.result_file = data.result_dir + '/salina_feature_salina_5x5_mean_std_model_{}.mat'.format(i)
data.iters = 2000000
pretrained_model = data.result_dir + '/../model/5x5_mean_std_models_time_{}_iter_{}.caffemodel.h5'.format(i,
data.iters)
deploy_file = data.result_dir + '/../proto/salina_5x5_mean_std_deploy.prototxt'
getFeature = feature.GetFeatureFromCaffe(deploy_file=deploy_file, pretrained_model=pretrained_model)
getFeature.set_data(data.data_5x5_mean_std, data.labels_5x5_mean_std)
getFeature.get_ip1()
data.result_dict = {'data': getFeature.ip1_data, 'labels': getFeature.label}
sio.savemat(data.result_file, data.result_dict)
def get_indian_pines_features_from_salina_model():
for i in range(10):
class data: pass
data.data_dir = os.path.expanduser('../hyperspectral_datas/indian_pines/data/')
data.data_5x5_mean_std = sio.loadmat(data.data_dir + '/indian_pines_5x5_mean_std.mat')['data']
data.labels_5x5_mean_std = sio.loadmat(data.data_dir + '/indian_pines_5x5_mean_std.mat')['labels']
data.result_dir = '../result/salina/bn_net_200/feature'
mkdir_if_not_exist(data.result_dir)
data.result_file = data.result_dir + '/ip_feature_salina_model_{}.mat'.format(i)
data.iters = 2000000
pretrained_model = data.result_dir + '/../model/5x5_mean_std_models_time_{}_iter_{}.caffemodel.h5'.format(i,
data.iters)
deploy_file = data.result_dir + '/../proto/salina_5x5_mean_std_deploy.prototxt'
getFeature = feature.GetFeatureFromCaffe(deploy_file=deploy_file, pretrained_model=pretrained_model)
getFeature.set_data(data.data_5x5_mean_std, data.labels_5x5_mean_std)
getFeature.get_ip1()
data.result_dict = {'data': getFeature.ip1_data, 'labels': getFeature.label}
sio.savemat(data.result_file, data.result_dict)
def get_salina_features_from_indian_pines_model():
for i in range(10):
class data: pass
data.data_dir = os.path.expanduser('../hyperspectral_datas/salina/data/')
data.data_5x5_mean_std = sio.loadmat(data.data_dir + '/salina_5x5_mean_std.mat')['data']
data.labels_5x5_mean_std = sio.loadmat(data.data_dir + '/salina_5x5_mean_std.mat')['labels']
data.result_dir = '../result/indian_pines/bn_net_200/feature'
mkdir_if_not_exist(data.result_dir)
data.result_file = data.result_dir + '/salina_feature_ip_model_{}.mat'.format(i)
data.iters = 2000000
pretrained_model = data.result_dir + '/../model/5x5_mean_std_models_time_{}_iter_{}.caffemodel.h5'.format(i,
data.iters)
deploy_file = data.result_dir + '/../proto/indian_pines_5x5_mean_std_deploy.prototxt'
getFeature = feature.GetFeatureFromCaffe(deploy_file=deploy_file, pretrained_model=pretrained_model)
getFeature.set_data(data.data_5x5_mean_std, data.labels_5x5_mean_std)
getFeature.get_ip1()
data.result_dict = {'data': getFeature.ip1_data, 'labels': getFeature.label}
sio.savemat(data.result_file, data.result_dict)
if __name__ == '__main__':
start = time.time()
get_indian_pines_features_from_indian_pines_model()
get_salina_features_from_salina_model()
get_indian_pines_features_from_salina_model()
get_salina_features_from_indian_pines_model()
end = time.time()
print(end - start)
| 2.015625
| 2
|
computo avanzado/python/kmeans.py
|
corahama/python
| 1
|
12781173
|
<reponame>corahama/python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import random
# Function for means comparation
def are_means_equal(array1, array2):
if len(array1) == len(array2):
for i in range(len(array1)):
if np.array_equal(array1[i], array2[i]) == False:
return False
return True
else:
return False
# Seting up the dataset and number of clusters (n) variable
n = int(input('Introduce la cantidad de clusters: '))
iris = pd.read_csv('iris.data', header=None)
dataset = np.array(iris.loc[:, 0:1])
ds_dimensions = dataset.shape[1]
ds_size = dataset.shape[0]
# ***** Algorithm start *****
# Select n random elements from the dataset
means = []
for i in range(n):
means.append(dataset[random.randint(0,149)])
# Loop for defining clusters
do = True
new_means = means
iterations = 0
while are_means_equal(means, new_means) == False or do:
means = new_means.copy()
clusters = [[] for i in range(n)]
for e in dataset:
distances = []
for m in means:
distances.append(sum([(m[i]-e[i])**2 for i in range(ds_dimensions)]))
clusters[distances.index(min(distances))].append(e)
new_means = []
for c in clusters:
mean = []
for i in range(ds_dimensions):
mean.append(sum([point[i] for point in c])/len(c))
new_means.append(np.array(mean))
do = False
iterations += 1
for i in range(len(clusters)):
clusters[i] = np.array(clusters[i])
print('Numero total de iteraciones antes de la convergencia: ' + str(iterations))
if ds_dimensions == 2 and n==3:
# Graph designed to work specifically with 3 clusters and 2 dimensions
plt.scatter(clusters[0][:, 0], clusters[0][:, 1], color='red', alpha=0.5)
plt.scatter(clusters[1][:, 0], clusters[1][:, 1], color='blue', alpha=0.5)
plt.scatter(clusters[2][:, 0], clusters[2][:, 1], color='green', alpha=0.5)
plt.show()
else:
for c in clusters:
print(c)
| 3.5
| 4
|
tutor/project_4/aggregate.py
|
globulion/qc-workshop
| 1
|
12781174
|
#!/usr/bin/python3
"""Aggregate Module.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
import psi4, numpy
class Aggregate:
def __init__(self, psi4_molecule):
self.all = psi4_molecule
self.qm = psi4_molecule.extract_subsets(1)
self.nfrags = psi4_molecule.nfragments()
self.bath = [] if self.nfrags == 1 else [psi4_molecule.extract_subsets(2+i) for i in range(self.nfrags-1)]
self._mrec = 1./(numpy.array([self.all.mass(i) for i in range(self.all.natom())])) * psi4.constants.au2amu
def update(self, xyz):
self.all.set_geometry(xyz)
self.qm = self.all.extract_subsets(1)
self.bath = [self.all.extract_subsets(2+i) for i in range(self.nfrags-1)]
def save_xyz(self, out, center_mode='qm'):
geom = self.all.geometry()
geom.scale(psi4.constants.bohr2angstroms)
if center_mode is None : com = [0.0,0.0,0.0]
elif center_mode.lower() == 'qm' : com = self.qm.center_of_mass()
elif center_mode.lower() == 'all': com = self.all.center_of_mass()
elif isinstance(center_mode, int): com = self.bath[center_mode].center_of_mass()
else: raise ValueError("Centering mode - %s - is not supported" % center_mode)
out.write("%d\n\n" % self.all.natom())
for i in range(self.all.natom()):
sym = self.all.label(i)
out.write("%s %10.6f %10.6f %10.6f\n"%(sym,geom.get(i,0)-com[0],geom.get(i,1)-com[1],geom.get(i,2)-com[2]))
| 2.265625
| 2
|
disk_set.py
|
warm-ice0x00/disk-set
| 0
|
12781175
|
<reponame>warm-ice0x00/disk-set<gh_stars>0
import hashlib
import typing
import sympy
class DiskSet:
def __init__(self, file: typing.BinaryIO, n: int, key_len: int) -> None:
self.n = n
self.m = sympy.nextprime(n << 1)
self.key_len = key_len
try:
file.truncate(self.m * self.key_len)
except OSError:
pass
self.file = file
def __enter__(self) -> "DiskSet":
return self
def _hash(self, key: bytes) -> int:
return int.from_bytes(hashlib.md5(key).digest(), "big") % self.m
def put(self, key: bytes) -> None:
h = self._hash(key)
while True:
self.file.seek(h * self.key_len)
if not any(self.file.read(self.key_len)):
self.file.seek(-self.key_len, 1)
self.file.write(key)
break
h = (h + 1) % self.m
def get(self, key: bytes) -> bool:
h = self._hash(key)
while True:
self.file.seek(h * self.key_len)
b = self.file.read(self.key_len)
if not any(b):
return False
elif b == key:
return True
h = (h + 1) % self.m
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
self.file.close()
if __name__ == "__main__":
with open(
"pwned-passwords-ntlm-ordered-by-hash-v8.txt", "r", encoding="ascii"
) as f_in:
N = sum(1 for _ in f_in)
print("N = %d" % N)
with open(
"pwned-passwords-ntlm-ordered-by-hash-v8.txt", "r", encoding="ascii"
) as f_in, open("hash_set", "w+b") as f_out:
with DiskSet(f_out, N, 16) as hash_set:
for line in f_in:
hash_set.put(bytes.fromhex(line[:32]))
| 2.359375
| 2
|
src/examples/plot_costs.py
|
zhhengcs/sunny-side-up
| 581
|
12781176
|
#!/usr/bin/env python
import os
import json
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import argparse
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument("--start", default=25, type=int)
arg_parser.add_argument("cost_file", default="metrics_costs.json", nargs="?")
args = arg_parser.parse_args()
def plot_costs(json_path, path_prefix=""):
with open(json_path) as f:
json_obj = json.load(f)
#df = np.array(json_obj)
for idx, epoch in enumerate(json_obj):
print idx, ":"
costs_epoch = np.array(list(enumerate(epoch)))
plt.figure()
plt.plot(costs_epoch[args.start:,0], costs_epoch[args.start:,1])
plt.savefig(os.path.join(path_prefix, "costs_{}.png".format(idx)))
plt.close()
if __name__=="__main__":
plot_costs(args.cost_file)
| 3
| 3
|
structural-variation/scripts-projection/variants2hapmap.py
|
hexin010101/NAM-genomes
| 0
|
12781177
|
<gh_stars>0
#!/usr/bin/python3
'''
created by <NAME>
2020-02-04
'''
import argparse as ap
# initialize argument parser (pass user input from command line to script)
parser = ap.ArgumentParser(formatter_class=ap.RawDescriptionHelpFormatter,
description='''
description: this script reads a tab-delimeted file containing structural
variant calls, and transform it into a hapmap file.
example: variants2hapmap.py my_file.txt my_results.sorted.hmp.txt''')
# add positional arguments
parser.add_argument("variant_file", type=str,
help="variant file with SV calls")
parser.add_argument("output_name", type=str,
help="name of the hapmap file")
# pass arguments into variables
args = parser.parse_args()
variant_file = args.variant_file
output_name = args.output_name
# open input file
infile = open(variant_file, "r")
# open output file
outfile = open(output_name, "w")
# get header information
header = infile.readline()
header = header.strip()
header = header.split("\t")
# get inbred list
inbreds_list = header[5:]
# write output header
print("rs", "alleles", "chrom", "pos", "strand", "assembly",
"center", "protLSID", "assayLSID", "panel", "QCcode",
"\t".join(inbreds_list), sep="\t", file=outfile)
# read file line by line
for line in infile:
line = line.strip()
line = line.split("\t")
# get chrom number and start/end positions
SV_location = line[0].split(":")
chr = SV_location[0].split("-")[0]
sv_start = int(SV_location[0].split("-")[1])
sv_end = int(SV_location[1].split("-")[1])
# get position in the middle of the SV
pos = round((sv_start + sv_end) / 2)
# # get length of sv
# sv_length = abs(int(line[2]))
# determine type of sv
sv_type = line[4].lower()
# create id based on sv type and location
id = sv_type + "." + chr + "." + str(sv_start) + "." + str(sv_end)
# if sv is TRA, add TRA location in id
if sv_type == "tra":
# get location of where the TRA went
tra_chr = SV_location[1].split("-")[0]
# correct id
id = "tra." + tra_chr + "." + str(sv_end)
# make sure position of TRA is the sv start, and not middle position
pos = sv_start
# parse each inbred line (based on its index on header)
inbreds_geno = []
for index in range(5, len(header)):
# print(line[index])
inbred_info = line[index]
# if SV is 'T'here, assign genotype TT
if inbred_info == "1/1":
genotype = "TT"
# if SV is 'A'bsent or het, assign genotype AA
elif inbred_info == "0/0" or inbred_info == "0/1":
genotype = "AA"
# if missing info, assign genotype NN
else:
genotype = "NN"
inbreds_geno.append(genotype)
# before writing output, format chrom according to hapmap format
if chr[0:3] == "chr":
chr = chr.split("chr")[1]
if chr[0:4] == "scaf":
chr = chr[0:4].upper() + chr[4:]
# check which alleles are present for that SV
SV_alleles = "".join(inbreds_geno)
if ("A" in SV_alleles) and ("T" in SV_alleles):
alleles = "A/T"
elif "A" in SV_alleles:
alleles = "A"
elif "T" in SV_alleles:
alleles = "T"
else:
alleles = "N"
# write output
print(id, alleles, chr, pos, "NA", "NA", "NA", "NA", "NA", "NA", "NA",
"\t".join(inbreds_geno), sep="\t", file=outfile)
# close files
infile.close()
outfile.close()
| 3
| 3
|
flight_ticket/apps.py
|
Calpax-aaS/web-app
| 0
|
12781178
|
<gh_stars>0
from django.apps import AppConfig
class FlightTicketConfig(AppConfig):
name = 'flight_ticket'
| 1.234375
| 1
|
tests/test_integrate.py
|
rtsfred3/pyntegrate
| 0
|
12781179
|
<gh_stars>0
import time, unittest
import pyntegrate.pyarctan as pyarctan
import pyntegrate.arctan as arctan
def makeArrMin(n, seed=25):
return arctan.makeArrMin(n)
def makeArr(n, seed=25):
return makeArrMin(n, seed), makeArrMin(n, seed)
class TestArctanMethods(unittest.TestCase):
def test_bubblesort(self):
assert arctan.bubblesort([4, 2, 3, 1]) == [1, 2, 3, 4]
def test_bubblesort2(self):
assert arctan.bubblesort2([4, 2, 3, 1]) == [1, 2, 3, 4]
def test_quicksort(self):
assert arctan.quicksort([4, 2, 3, 1]) == [1, 2, 3, 4]
def test_p_quicksort(self):
assert arctan.p_quicksort([4, 2, 3, 1]) == [1, 2, 3, 4]
def test_mergesort(self):
assert arctan.mergesort([4, 2, 3, 1]) == [1, 2, 3, 4]
def test_p_mergesort(self):
assert arctan.p_mergesort([4, 2, 3, 1]) == [1, 2, 3, 4]
def test_insertionsort(self):
assert arctan.insertionsort([4, 2, 3, 1]) == [1, 2, 3, 4]
#def test_bucketsort(self):
# assert arctan.bucketsort([4, 2, 3, 1]) == [1, 2, 3, 4]
class TestPyarctanMethods(unittest.TestCase):
def setUp(self):
self.inputArr = [4, 2, 3, 1]
self.outputArr = [1, 2, 3, 4]
def test_bubblesort(self):
assert pyarctan.bubblesort(self.inputArr) == self.outputArr
def test_bubblesort2(self):
assert pyarctan.bubblesort2(self.inputArr) == self.outputArr
def test_quicksort(self):
assert pyarctan.quicksort(self.inputArr, 0, len(self.inputArr)) == self.outputArr
def test_p_quicksort(self):
assert pyarctan.p_quicksort(pyarctan.arg_struct(self.inputArr, 0, len(self.inputArr), 0)) == self.outputArr
def test_mergesort(self):
assert pyarctan.mergesort(self.inputArr) == self.outputArr
def test_p_mergesort(self):
assert pyarctan.p_mergesort(pyarctan.arg_struct(self.inputArr, 0, len(self.inputArr), 0)) == self.outputArr
def test_insertionsort(self):
assert pyarctan.insertionsort(self.inputArr) == self.outputArr
#def test_bucketsort(self):
# assert pyarctan.bucketsort(self.inputArr) == self.outputArr
class TestTimeMethods(unittest.TestCase):
def setUp(self):
self.n = 100
def test_bubblesort(self):
a, b = makeArr(self.n)
startC = time.time()
arctan.bubblesort(a)
timeC = time.time() - startC
startPY = time.time()
pyarctan.bubblesort(b)
timePY = time.time() - startPY
assert timeC < timePY
def test_bubblesort2(self):
a, b = makeArr(self.n)
startC = time.time()
arctan.bubblesort2(a)
timeC = time.time() - startC
startPY = time.time()
pyarctan.bubblesort2(b)
timePY = time.time() - startPY
assert timeC < timePY
def test_quicksort(self):
a, b = makeArr(self.n)
startC = time.time()
arctan.quicksort(a)
timeC = time.time() - startC
startPY = time.time()
pyarctan.quicksort(b, 0, len(b))
timePY = time.time() - startPY
assert timeC < timePY
'''def test_p_quicksort(self):
a, b = makeArr(self.n)
startC = time.time()
arctan.p_quicksort(a)
timeC = time.time() - startC
startPY = time.time()
pyarctan.p_quicksort(pyarctan.arg_struct(b, 0, len(b), 0))
timePY = time.time() - startPY
assert timeC < timePY'''
def test_mergesort(self):
a, b = makeArr(self.n)
startC = time.time()
arctan.mergesort(a)
timeC = time.time() - startC
startPY = time.time()
pyarctan.mergesort(b)
timePY = time.time() - startPY
assert timeC < timePY
'''def test_p_mergesort(self):
a, b = makeArr(self.n)
startC = time.time()
arctan.p_mergesort(a)
timeC = time.time() - startC
startPY = time.time()
pyarctan.p_mergesort(pyarctan.arg_struct(b, 0, len(b), 0))
timePY = time.time() - startPY
assert timeC < timePY'''
def test_insertionsort(self):
a, b = makeArr(self.n)
startC = time.time()
arctan.insertionsort(a)
timeC = time.time() - startC
startPY = time.time()
pyarctan.insertionsort(b)
timePY = time.time() - startPY
assert timeC < timePY
if __name__ == '__main__':
unittest.main()
| 2.5
| 2
|
_2015/adventCoinMiner/adventCoinMiner.py
|
dcsparkes/adventofcode
| 0
|
12781180
|
import hashlib
import itertools
class AdventCoinMiner():
def solve(self, prefix, check="00000"):
for i in itertools.count(1):
hash = hashlib.md5("{}{}".format(prefix, i).encode('utf-8')).hexdigest()
if check == hash[:len(check)]:
return i
| 3.140625
| 3
|
test.py
|
igelbox/blender-ogf
| 5
|
12781181
|
<reponame>igelbox/blender-ogf
#!/usr/bin/python
from io_scene_ogf.ogf_import import load, ImportContext
load(ImportContext('test.ogf'))
| 1.132813
| 1
|
app/app/api/domain/services/wrappers/mongo/PymongoExecutor.py
|
GPortas/Playgroundb
| 1
|
12781182
|
<filename>app/app/api/domain/services/wrappers/mongo/PymongoExecutor.py
from bson import ObjectId
class PymongoExecutor:
def __init__(self, db):
self.db = db
def execute(self, expression):
return eval(expression)
| 2.046875
| 2
|
examples/summary.py
|
stoeckli/msread
| 1
|
12781183
|
# Created by <NAME>
# import module
import msread
# init path
path = r"sample.raw"
import msread
# open file
with msread.open(path) as reader:
# show summary
reader.summary(show=True)
# read headers only
for header in reader.headers(min_rt=5*60, max_rt=10*60, ms_level=1):
print(header)
# read scans
for scan in reader.scans(min_rt=5*60, max_rt=10*60, ms_level=1):
print(scan.header)
print(scan.centroids)
| 2.515625
| 3
|
Python Programs/consogram.py
|
muhammad-masood-ur-rehman/Skillrack
| 2
|
12781184
|
Consogram
Consograms are words or sentences that has every consonant( letters other than a,e,i,o,u) of the English alphabet occurring at least once. Write an algorithm and a subsequent Python code to check whether a string is a consogram or not. Write a function to check if a given string is a consogram. For example,”"The quick brown fox jumps over the lazy dog"" is a consogram.
def cons(sen):
sen=list(filter(lambda a: a != ' ', sen))
q=['q','w','r','t','y','p','s','d','f','g','h','j','k','l','z','x','c','v','b','n','m']
flag=1
for i in q:
if(sen.count(i)<1):
flag=0
if(flag==1):
print('Consogram')
else:
print('Not consogram')
sen=input().lower()
cons(sen)
| 3.890625
| 4
|
configs/003_random_crop.py
|
taraspiotr/data-driven-robot-grasping
| 0
|
12781185
|
<gh_stars>0
from mrunner.helpers.specification_helper import create_experiments_helper
config = {
"name": "sac_kuka_diverse",
"env_num_objects": 5,
"env_camera_random": 0,
"env_use_height_hack": True,
"model_hidden_sizes": (256, 256),
"encoder_num_filters": 32,
"cuda_idx": 0,
"learning_rate": 3e-3,
"alpha": None,
"env_block_random": 0,
"encoder_num_layers": 2,
"encoder_feature_dim": 32,
"augmentations": ["crop"],
}
params_grid = {"observation_size": [70, 80, 90]}
name = globals()["script"][:-3]
experiments_list = create_experiments_helper(
experiment_name=name,
project_name="taraspiotr/data-driven-robot-grasping",
script="python3.8 experiments/sac.py",
python_path=".",
tags=[name],
base_config=config,
params_grid=params_grid,
)
| 1.75
| 2
|
Oefeningen/standalone/mileage_converter.py
|
Seviran/Python_3
| 0
|
12781186
|
print("How many kilometres did you cycle today?")
kms = input()
miles = float(kms) / 1.60934
miles = round(miles, 2)
print(f"Your {kms}km ride is {miles}mi")
# print("You ran {}".format(miles))
# print("You ran " + miles) <> DOES NOT WORK!!
| 4.21875
| 4
|
CS-1656-Data-Science/Recitations/Recitation 9/task.py
|
solomonheisey/University_Projects
| 1
|
12781187
|
<filename>CS-1656-Data-Science/Recitations/Recitation 9/task.py
import sqlite3 as lite
import pandas as pd
from sqlalchemy import create_engine
class Task(object):
def __init__(self, db_name, students, grades, courses, majors):
self.con = lite.connect(db_name)
self.cur = self.con.cursor()
self.cur.execute('DROP TABLE IF EXISTS Courses')
self.cur.execute(
"CREATE TABLE Courses(cid INT, number INT, professor TEXT, major TEXT, year INT, semester TEXT)")
self.cur.execute('DROP TABLE IF EXISTS Majors')
self.cur.execute("CREATE TABLE Majors(sid INT, major TEXT)")
self.cur.execute('DROP TABLE IF EXISTS Grades')
self.cur.execute("CREATE TABLE Grades(sid INT, cid INT, credits INT, grade INT)")
self.cur.execute('DROP TABLE IF EXISTS Students')
self.cur.execute("CREATE TABLE Students(sid INT, firstName TEXT, lastName TEXT, yearStarted INT)")
engine = create_engine("sqlite:///" + db_name)
df1 = pd.read_csv(students)
df1.to_sql('students', engine, if_exists='append', index=False)
df2 = pd.read_csv(grades)
df2.to_sql('grades', engine, if_exists='append', index=False)
df3 = pd.read_csv(courses)
df3.to_sql('courses', engine, if_exists='append', index=False)
df4 = pd.read_csv(majors)
df4.to_sql('majors', engine, if_exists='append', index=False)
self.cur.execute("DROP VIEW IF EXISTS allgrades")
self.cur.execute("""
create view allgrades as
SELECT s.firstName, s.lastName, m.major as ms,
c.number, c.major as mc, g.grade
FROM students as s, majors as m, grades as g, courses as c
WHERE s.sid = m.sid AND g.sid = s.sid AND g.cid = c.cid
""")
# q0 is an example
def q0(self):
query = '''
SELECT * FROM students
'''
self.cur.execute(query)
all_rows = self.cur.fetchall()
return all_rows
def q1(self):
query = '''
SELECT sid, year, semester, COUNT(*) as passed_courses
FROM courses natural join grades
WHERE grade > 0
GROUP BY sid, year, semester
ORDER BY sid, year, semester
'''
self.cur.execute(query)
all_rows = self.cur.fetchall()
return all_rows
def q2(self):
query = '''
SELECT firstName,lastName, year, semester, passed_courses
FROM (
SELECT sid, year, semester, COUNT(*) as passed_courses
FROM courses natural join grades
WHERE grade > 0
GROUP BY sid, year, semester
ORDER BY sid, year, semester) natural join Students
WHERE passed_courses > 1
GROUP BY firstName, lastName, year, semester
ORDER BY firstName, lastName , year, semester
'''
self.cur.execute(query)
all_rows = self.cur.fetchall()
return all_rows
def q3(self):
query = '''
SELECT firstName, lastName, ms, number
FROM allgrades
WHERE grade = 0 and ms = mc
GROUP BY firstName, lastName, ms, number
ORDER BY firstName, lastName, ms, number
'''
self.cur.execute(query)
all_rows = self.cur.fetchall()
return all_rows
def q4(self):
query = '''
SELECT firstName, lastName, ms, number
FROM (
SELECT s.firstName, s.lastName, m.major as ms, c.number, c.major as mc, g.grade
FROM students as s, majors as m, grades as g, courses as c
WHERE s.sid = m.sid AND g.sid = s.sid AND g.cid = c.cid)
WHERE grade = 0 and ms = mc
GROUP BY firstName, lastName, ms, number
ORDER BY firstName, lastName, ms, number
'''
self.cur.execute(query)
all_rows = self.cur.fetchall()
return all_rows
def q5(self):
query = '''
SELECT c.professor, (SELECT COUNT(*) FROM courses c2 INNER JOIN grades g ON c2.cid = g.cid WHERE g.grade >= 2
AND c.professor = c2.professor) AS success
FROM courses c
WHERE success != 0
GROUP BY c.professor, success
ORDER BY success DESC, c.professor ASC
'''
self.cur.execute(query)
all_rows = self.cur.fetchall()
return all_rows
def q6(self):
query = '''
SELECT number, REPLACE(GROUP_CONCAT(firstName || ' ' || lastName), ',', ', ') AS students_names, AVG(grade) avg_grade
FROM students NATURAL JOIN grades NATURAL JOIN courses
WHERE grade >= 2
GROUP BY number
HAVING AVG(grade) > 3
ORDER BY avg_grade DESC, students_names, number ASC
'''
self.cur.execute(query)
all_rows = self.cur.fetchall()
return all_rows
if __name__ == "__main__":
task = Task("database.db", 'students.csv', 'grades.csv', 'courses.csv', 'majors.csv')
rows = task.q0()
print(rows)
print()
rows = task.q1()
print(rows)
print()
rows = task.q2()
print(rows)
print()
rows = task.q3()
print(rows)
print()
rows = task.q4()
print(rows)
print()
rows = task.q5()
print(rows)
print()
rows = task.q6()
print(rows)
print()
| 3.375
| 3
|
setup.py
|
Ckoetael/monolog
| 0
|
12781188
|
"""
Mongo logger package
"""
from setuptools import setup, find_packages
import monolog
DESCRIPTION = 'MongoDB logger + std_logger'
AUTHOR = '<NAME>'
AUTHOR_EMAIL = "<EMAIL>"
URL = "https://github.com/Ckoetael/monolog"
VERSION = monolog.__version__
setup(
name="monolog",
version=VERSION,
description=DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="BSD",
url=URL,
packages=find_packages(),
install_requires=['pymongo >= 3.10'],
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
zip_safe=False,
)
| 1.273438
| 1
|
eilat/DatabaseLog.py
|
jsoffer/eilat
| 0
|
12781189
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2013, 2014, 2015 <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
Neither the name of the involved organizations nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
"""
from os.path import expanduser, isfile
from PyQt5.QtSql import QSqlDatabase, QSqlQueryModel, QSqlQuery
class DatabaseLogLite(object):
""" Low load only; using SQLite
To store bookmarks, configuration, etc.
AB01 CFG02
"""
def __init__(self):
# ###### STARTUP
super(DatabaseLogLite, self).__init__()
self.litedb = QSqlDatabase("QSQLITE")
db_file = expanduser("~/.eilat/eilat.db")
rebuild = not isfile(db_file)
self.litedb.setDatabaseName(db_file)
self.litedb.open()
if rebuild:
query_mknav = (
"CREATE TABLE navigation (host TEXT NOT NULL," +
" path TEXT, count INTEGER default 0, prefix char(2)," +
" PRIMARY KEY (host, path))")
self.litedb.exec_(query_mknav)
# ###### VALIDATION
# verifies database structure, not datatypes
tables = self.litedb.tables()
tables_ok = [k in tables for k in ['navigation']]
if not all(tables_ok):
raise RuntimeError("tables missing from database")
fnav_ok = [self.litedb.record('navigation').contains(k)
for k in ['host', 'path', 'count', 'prefix']]
if not all(fnav_ok):
raise RuntimeError("bad structure for 'navigation' table")
def model(self, prefix=None):
""" recreate the model each call; opening a new window will not
be needed to use the recent completions
"""
if prefix is None:
query_nav = QSqlQuery(
"select host || path from navigation " +
"order by count desc",
self.litedb)
else: # CFG02
query_nav = QSqlQuery(
"select host || path from navigation " +
"where prefix = '{}' ".format(prefix) +
"order by count desc",
self.litedb)
ret_model = QSqlQueryModel()
ret_model.setQuery(query_nav) # AB01
return ret_model
def store_navigation(self, host, path, prefix):
""" save host, path and increase its count AB01 """
host = host.replace("'", "%27")
path = path.replace("'", "%27")
insert_or_ignore = (
"insert or ignore into navigation (host, path, prefix) " +
"values ('{}', '{}', '{}')".format(host, path, prefix))
update = (
"update navigation set count = count + 1 where " +
"host = '{}' and path = '{}'".format(host, path))
self.litedb.exec_(insert_or_ignore)
self.litedb.exec_(update)
| 1.25
| 1
|
tests/data/source/test_generator_data_source.py
|
trajkova-elena/scikit-multiflow
| 1
|
12781190
|
<gh_stars>1-10
from skmultiflow.data.observer.event_observer import BufferDataEventObserver
from skmultiflow.data.generator.anomaly_sine_generator import AnomalySineGenerator
from skmultiflow.data.source.generator_data_source import GeneratorDataSource
import numpy as np
import time
def record_to_dictionary(record):
if record is None:
return None
return record
def test_generator_data_source():
generator = AnomalySineGenerator(random_state=3)
buffer_data_event_observer = BufferDataEventObserver()
data_source = GeneratorDataSource(record_to_dictionary, [buffer_data_event_observer], generator)
data_source.listen_for_events()
while (len(buffer_data_event_observer.get_buffer()) < 5):
time.sleep(0.100) # 100ms
events = buffer_data_event_observer.get_buffer()[:5]
expected = [(np.array([[0.89431424, 2.15223693]]), np.array([1.])), (np.array([[0.46565888, 0.05565128]]), np.array([0.])), (np.array([[0.52767427, 0.45518165]]), np.array([0.])), (np.array([[-0.25010759, -0.39191752]]), np.array([0.])), (np.array([[0.70277688, 1.11163411]]), np.array([0.]))]
for j in range(0,5):
assert np.alltrue(np.isclose(events[j][0], expected[j][0]))
assert np.alltrue(np.isclose(events[j][1], expected[j][1]))
| 2
| 2
|
src/doc-break.py
|
andreblue/doc-breaker
| 4
|
12781191
|
<reponame>andreblue/doc-breaker
import msoffcrypto
import sys, getopt
import os
import urllib.request
def download_PasswordList():
#Grabbed from https://github.com/danielmiessler/SecLists
url = 'https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/Common-Credentials/10-million-password-list-top-10000.txt'
try:
urllib.request.urlretrieve(url, "10000-password-top-list.txt")
except Exception as e:
handleException(e)
sys.exit()
def handleException(e):
print ('Error: ' + str(e))
def breakFile(fileHandle, passwordStr):
try:
fileHandle.load_key(password=passwordStr)
except Exception as e:
if str(e) != 'Key verification failed':
handleException(e)
else:
print ('Password FOUND!')
print ('Saving document as decrypted_file.docx next to main script')
print ('Password was: "' + passwordStr + '"')
fileHandle.decrypt(open('decrypted_file.docx', "wb"))
sys.exit()
def main(argv):
inputfile = ''
doCommonPasswordChecks = False
verbose = False
customList = False
try:
opts, args = getopt.getopt(argv,"hi:cvl:",["ifile=", "common", "verbose", "list="])
except getopt.GetoptError:
print ('doc-break.py -i <inputfile>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print ('doc-break.py -i <inputfile> -c -v -l <listfile>')
print ('| -i | Required | <input file> | Will use that file as the one to open | Somefile.docx')
print ('| -c | Optional | None | Use the 10000 common list | ')
print ('| -v | Optional | None | Will spam console with info | ')
print ('| -l | Optional | <input file> | Will use the file as the password list | Password.txt ')
sys.exit()
elif opt in ("-i", "--ifile"):
inputfile = arg
elif opt in ("-v", "--verbose"):
verbose = True
elif opt in ("-c", "--common"):
doCommonPasswordChecks = True
elif opt in ("-l", "--list"):
customList = arg
if inputfile == '':
print ('No file passed.')
print ('doc-break.py -i <inputfile>')
sys.exit()
exists = os.path.isfile(inputfile)
if not exists:
print ('Failed to find file. Please check your file location: ')
print (inputfile)
sys.exit()
fh = msoffcrypto.OfficeFile(open(inputfile, "rb"))
found = False
if doCommonPasswordChecks:
exists = os.path.isfile("10000-password-top-list.txt")
if not exists:
download_PasswordList()
common_passwords = open('10000-password-top-list.txt')
currentLine = 1
print ("Checking against the 10000 common password list")
for line in common_passwords:
if verbose:
print ('Trying "' + line.rstrip() + '"')
print ( str(currentLine) + '/' + str(10000))
if breakFile(fh, line.rstrip()):
break
currentLine = currentLine+1
common_passwords.close()
if customList:
exists = os.path.isfile(customList)
if not exists:
print ('Could not find list "' + customList + '" Please check your file')
sys.exit()
password_list = open(customList)
#this is ugly. I know
linecount = 0
for line in password_list:
linecount = linecount+1
password_list.close()
password_list = open(customList)
linecount = str(linecount)
currentLine = 1
for line in password_list:
if verbose:
print ('Trying "' + line.rstrip() + '"')
print ( str(currentLine) + '/' + linecount)
if breakFile(fh, line.rstrip()):
break
currentLine = currentLine+1
password_list.close()
print ('Could not find the password. Perhaps try a larger list')
if __name__ == "__main__":
main(sys.argv[1:])
| 3
| 3
|
quera/13609/35987/dumper.py
|
TheMn/Quera-College-ML-Course
| 1
|
12781192
|
<reponame>TheMn/Quera-College-ML-Course
import zlib
import zipfile
import numpy as np
np.savez('data.npz',ethnics=ethnics,balance=balance,allow_pickle=True)
def compress(file_names):
print("File Paths:")
print(file_names)
# Select the compression mode ZIP_DEFLATED for compression
# or zipfile.ZIP_STORED to just store the file
compression = zipfile.ZIP_DEFLATED
# create the zip file first parameter path/name, second mode
with zipfile.ZipFile("result.zip", mode="w") as zf:
for file_name in file_names:
# Add file to the zip file
# first parameter file to zip, second filename in zip
zf.write('./'+file_name, file_name, compress_type=compression)
file_names= ["data.npz", "solution.ipynb"]
compress(file_names)
| 3.4375
| 3
|
Fractals/Markus-Lyapunov Fractals/3D_Markus_Lyapunov.py
|
michellely98/FractalExploration
| 3
|
12781193
|
<filename>Fractals/Markus-Lyapunov Fractals/3D_Markus_Lyapunov.py
'''
Adapted from VisPy example volume rendering here: https://github.com/vispy/vispy/blob/master/examples/basics/scene/volume.py
NOTE: Normalization approach credited to <NAME> on Stack Overflow: https://stackoverflow.com/questions/51306488/transparency-with-voxels-in-vispy/51309283#51309283
'''
from numba import jit
import numpy as np
import imageio
from vispy import app, scene
from vispy.color import Colormap
from timeit import default_timer as timer
start = timer()
'''
Computing Fractal
'''
# PARAMETERS TO CHANGE THE FRACTAL GENERATED
anim = True # change whether to produce a .gif animation of fractal rotating
seq = "ABC" # sequence to alternate r values
a_lb = 2 # a lower bound
a_ub = 4 # a upper bound
b_lb = 2 # b lower bound
b_ub = 4 # b upper bound
c_lb = 2 # c lower bound
c_ub = 4 # c upper bound
# PARAMETERS REFINING ACCURACY OF FRACTAL PICTURE GENERATED
num_warmups = 100 # number of "warmups" or throwaway iterations before computing lyapunov exponent
num_lyap_iterations = 100 # number of iterations used to compute the lyapunov exp
steps = 100 # steps between b1 and b2 values on axes -- higher it is, the better the picture
# LOGISTIC MAP THAT GIVES US THE NEXT X
@jit
def F(x, curr_r):
return (curr_r * x) * (1 - x)
# DERIVATIVE OF F -- USED TO COMPUTE THE LYAPUNOV EXPONENT
@jit
def Fprime(x, curr_r):
ans = curr_r * (1 - (2 * x))
ans[ans == 0] = 0.00001
ans[ans == -np.inf] = -1000
ans[ans == np.inf] = 1000
return ans
# RETURNS THE CORRECT B-VALUE BASED ON THE CURRENT ITERATION
@jit
def getseqval(curr_iteration, a, b, c):
index = np.mod(curr_iteration, len(seq))
if (seq[index] == 'A'):
return a
elif (seq[index] == 'B'):
return b
else:
return c
# RETURNS THE LYAPUNOV EXPONENT BASED ON THE SPECIFIED B1 AND B2 VALUES
@jit
def getlyapexponent(a, b, c):
x = .5 # initial value of x
lyap_sum = 0 # initializing lyapunov sum for use later
# do warmups, to discard the early values of the iteration to allow the orbit to settle down
for i in range(num_warmups):
x = F(x, getseqval(i, a, b, c))
for i in range(num_warmups, num_lyap_iterations + num_warmups):
lyap_sum += np.log( np.abs(Fprime(x, getseqval(i, a, b, c) ) ) )
# get next x
x = F(x, getseqval(i, a, b, c))
return (lyap_sum / num_lyap_iterations)
# RETURNS DATA NORMALIZED TO VALUES BETWEEN 0 AND 1, AS WELL AS THE NORMALIZED VALUE OF BOUNDARY_OLD
@jit
def normalize(data, boundary_old):
orig_max = data.max()
orig_min = data.min()
# normalized boundary
boundary_norm = boundary_old - orig_min
boundary_norm = boundary_norm / (orig_max - orig_min)
data = np.subtract(data, orig_min)
data = np.divide(data, orig_max - orig_min)
return data, boundary_norm
'''
Creating and Preparing 3D Fractal Data
'''
# CREATING FRACTAL IMAGE
a = np.linspace(a_lb, a_ub, steps) #range of b1 values
b = np.linspace(b_lb, b_ub, steps) #range of b2 values
c = np.linspace(c_lb, c_ub, steps)
aa, bb, cc = np.meshgrid(a, b, c, indexing='ij')
fractal_3D = getlyapexponent(aa, bb, cc)
# normalize data between 0 and 1 to be displayed and return chaotic boundary
fractal_3D, chaotic_boundary = normalize(fractal_3D, 0.0)
print("chaotic boundary:", chaotic_boundary)
'''
Creating 3D projection of data
'''
# Prepare canvas
canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True)
canvas.measure_fps()
# Set up a viewbox to display the image with interactive pan/zoom
view = canvas.central_widget.add_view()
camera = scene.cameras.ArcballCamera(parent=view.scene, fov=60, scale_factor=steps*3, center = (0, 0, 0))
view.camera = camera
# Create the volume
volume = scene.visuals.Volume(fractal_3D, clim=(0, 1), method='translucent', parent=view.scene, threshold=0.225,emulate_texture=False)
volume.transform = scene.STTransform(translate=(-steps//2, -steps//2, -steps//2))
# Creating color map to display fractal
fractal_colors = [(1, 0, 1, .5), (0, 0, 1, .5), (.1, .8, .8, .3), (.1, 1, .1, .3), (1, 1, 0, .2), (1, 0, 0, .1), (0, 1, 1, (1 - chaotic_boundary) / 7), (0, 1, .8, (1 - chaotic_boundary) / 8), (0, 0, 0, 0), (0, 0, 0, 0)]
color_control_pts = [0, (0.6 * chaotic_boundary), (0.7 * chaotic_boundary), (0.8 * chaotic_boundary), (0.9 * chaotic_boundary), (0.95 * chaotic_boundary), (0.97 * chaotic_boundary), (0.99 * chaotic_boundary), chaotic_boundary, chaotic_boundary, 1.0]
fractal_map = Colormap(fractal_colors, controls=color_control_pts, interpolation='zero')
# Assigning newly made color map to volume data
volume.cmap = fractal_map
''' Creating animation of rotating fractal '''
if anim:
file_name = "Anim_3D_Fractal_" + seq + ".gif"
writer = imageio.get_writer(file_name)
# Parameters to change animation
angle_delta = 10.0 # amount to rotate fractal by each frame
axes = [[1, 1, 0], [1, .5, .5], [1, 0, 1], [.5, 0, 1], [1, .5, .5]] # axes to rotate fractal on, in succession
for axis in axes:
for rotate in range(int(360/angle_delta)):
im = canvas.render()
writer.append_data(im)
view.camera.transform.rotate(angle_delta, axis)
writer.close()
''' Run program '''
if __name__ == '__main__':
print(__doc__)
app.run()
end = timer()
print("elapsed time: " + str(end - start))
| 2.84375
| 3
|
modu_01/02_lab.py
|
94JuHo/study_for_deeplearning
| 0
|
12781194
|
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #내 맥북에서 발생되는 에러를 없애기 위한 코드
import tensorflow as tf
#trainable variable이다. 학습과정에서 변경될 수 있는 값이다.
# x_train = [1, 2, 3]
# y_train = [1, 2, 3]
#placeholder를 사용해서 출력단에서 값 입력받기
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
#W,b를 모르기 때문에 랜덤한 값을 만든다.
W = tf.Variable(tf.random_normal([1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
#Our hypothesis XW+b
# hypothesis = x_train * W + b
hypothesis = X * W + b
#cost/loss function
#cost = tf.reduce_mean(tf.square(hypothesis - y_train))
cost = tf.reduce_mean(tf.square(hypothesis - Y))
#reduce_mean은 tensor가 주어지면 그것의 평균을 내주는 것임
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(cost)
sess = tf.Session()
sess.run(tf.global_variables_initializer()) #variable을 실행하기전에는 무조건 이 함수를 통해 초기화시켜줘야함
for step in range(4001):
# sess.run(train)
cost_val, W_val, b_val, _ = sess.run([cost, W, b, train], feed_dict={X:[1, 2, 3, 4, 5], Y:[2.1, 3.1, 4.1, 5.1, 6.1]})
if step % 20 == 0:
#print(step, sess.run(cost), sess.run(W), sess.run(b))
print(step, cost_val, W_val, b_val)
| 2.796875
| 3
|
src/Problem0006.py
|
rrohrer/ProjectEuler
| 0
|
12781195
|
sum_of_sqares = sum(map(lambda x: x ** 2,list(range(1,101))))
sum_squared = sum(list(range(1,101))) ** 2
print sum_squared - sum_of_sqares
| 2.921875
| 3
|
arm64_tests/disassemble.py
|
jgouly/cranelift-1
| 0
|
12781196
|
#!/usr/bin/env python3
import sys
import os
import tempfile
words = []
found_code = False
for line in sys.stdin.readlines():
if line.startswith("Machine code:"):
found_code = True
continue
if found_code:
words.append(int("0x" + line.strip(), 16))
fd, filename = tempfile.mkstemp(suffix = ".bin")
f = os.fdopen(fd, "wb")
for word in words:
f.write(bytes([word & 0xff, (word >> 8) & 0xff, (word >> 16) & 0xff, (word >> 24) & 0xff]))
f.close()
os.system("aarch64-linux-gnu-objdump -b binary -m aarch64 -EL -D %s" % filename)
os.unlink(filename)
| 2.453125
| 2
|
tests/unit/test_cname_validator.py
|
cloud-gov/domain-broker
| 5
|
12781197
|
import openbrokerapi
import pytest
from broker.validators import CNAME
def test_one_layer_of_cnames(dns):
dns.add_cname("_acme-challenge.foo.example.gov")
# we're just making sure we don't raise an exception here
CNAME(["foo.example.gov"]).validate()
def test_two_layers_of_cnames(dns):
dns.add_cname(
"_acme-challenge.foo.example.gov", target="_acme-challenge.bar.example.gov"
)
dns.add_cname(
"_acme-challenge.bar.example.gov",
target="_acme-challenge.foo.example.gov.domains.cloud.test",
)
# we're just making sure we don't raise an exception here
CNAME(["foo.example.gov"]).validate()
def test_three_layers_of_cnames(dns):
dns.add_cname(
"_acme-challenge.foo.example.gov", target="_acme-challenge.bar.example.gov"
)
dns.add_cname(
"_acme-challenge.bar.example.gov", target="_acme-challenge.baz.example.gov"
)
dns.add_cname(
"_acme-challenge.baz.example.gov",
target="_acme-challenge.foo.example.gov.domains.cloud.test",
)
# we're just making sure we don't raise an exception here
CNAME(["foo.example.gov"]).validate()
def test_detects_looping_cnames(dns):
dns.add_cname(
"_acme-challenge.foo.example.gov", target="_acme-challenge.bar.example.gov"
)
dns.add_cname(
"_acme-challenge.bar.example.gov", target="_acme-challenge.foo.example.gov"
)
# we're just making sure we don't raise an exception here
with pytest.raises(
openbrokerapi.errors.ErrBadRequest,
match=r"_acme-challenge.foo.example.gov points to itself. Resolution chain: \['_acme-challenge.foo.example.gov', '_acme-challenge.bar.example.gov'\]",
):
CNAME(["foo.example.gov"]).validate()
| 2.171875
| 2
|
thonny/plugins/language.py
|
aroberge/thonny
| 0
|
12781198
|
<reponame>aroberge/thonny
import tkinter as tk
from tkinter import ttk
from thonny import get_workbench
from thonny.config_ui import ConfigurationPage
from thonny.languages import LANGUAGES_DICT
class GeneralConfigurationPage(ConfigurationPage):
def __init__(self, master):
ConfigurationPage.__init__(self, master)
self._language_var = get_workbench().get_variable("general.language")
self._language_label = ttk.Label(self, text=_("Language"))
self._language_label.grid(
row=7, column=0, sticky=tk.W, padx=(0, 10), pady=(10, 0)
)
languages = list(LANGUAGES_DICT.keys())
self._language_combo = ttk.Combobox(
self,
width=7,
exportselection=False,
textvariable=self._language_var,
state="readonly",
height=15,
values=languages,
)
self._language_combo.grid(row=7, column=1, sticky=tk.W, pady=(10, 0))
reopen_label = ttk.Label(
self,
text=_("You must restart Thonny for language change to take effect."),
font="BoldTkDefaultFont",
)
reopen_label.grid(row=20, column=0, sticky=tk.W, pady=20, columnspan=2)
self.columnconfigure(1, weight=1)
def load_plugin() -> None:
get_workbench().add_configuration_page(_("Language"), GeneralConfigurationPage)
| 2.34375
| 2
|
services/workers/src/workers/jobs/models/__init__.py
|
goubertbrent/oca-backend
| 0
|
12781199
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from datetime import datetime
from google.appengine.api import users
from google.appengine.ext import ndb
from common.mcfw.utils import Enum
from common.dal import parent_ndb_key
from common.models import NdbModel
class VDABSettings(NdbModel):
client_id = ndb.StringProperty(indexed=False)
synced_until = ndb.IntegerProperty(indexed=False, default=0)
@classmethod
def create_key(cls):
return ndb.Key(cls, 'VDABSettings')
class JobOfferFunction(NdbModel):
title = ndb.StringProperty()
description = ndb.TextProperty()
class JobOfferEmployer(NdbModel):
name = ndb.StringProperty()
class JobOfferLocation(NdbModel):
geo_location = ndb.GeoPtProperty() # type: ndb.GeoPt
city = ndb.StringProperty()
street = ndb.StringProperty()
street_number = ndb.StringProperty()
country_code = ndb.StringProperty()
postal_code = ndb.StringProperty()
class JobOfferContract(NdbModel):
type = ndb.StringProperty()
class JobOfferContactInformation(NdbModel):
email = ndb.TextProperty()
phone_number = ndb.TextProperty()
website = ndb.TextProperty()
class JobOfferInfo(NdbModel):
function = ndb.LocalStructuredProperty(JobOfferFunction) # type: JobOfferFunction
employer = ndb.LocalStructuredProperty(JobOfferEmployer) # type: JobOfferEmployer
location = ndb.LocalStructuredProperty(JobOfferLocation) # type: JobOfferLocation
contract = ndb.LocalStructuredProperty(JobOfferContract) # type: JobOfferContract
contact_information = ndb.LocalStructuredProperty(JobOfferContactInformation) # type: JobOfferContactInformation
details = ndb.TextProperty()
class JobOfferSourceType(Enum):
VDAB = 'vdab'
OCA = 'oca'
class JobOfferSource(NdbModel):
type = ndb.StringProperty(choices=JobOfferSourceType.all())
id = ndb.StringProperty()
name = ndb.TextProperty()
avatar_url = ndb.TextProperty()
class JobOffer(NdbModel):
# VDAB reasons
INVISIBLE_REASON_SKIP = 'skip'
INVISIBLE_REASON_STATUS = 'status'
INVISIBLE_REASON_LOCATION_MISSING = 'location_missing'
INVISIBLE_REASON_LOCATION_UNKNOWN = 'location_unknown'
INVISIBLE_REASON_LOCATION_COUNTRY = 'location_country'
INVISIBLE_REASON_LOCATION_LATLON = 'location_latlon'
INVISIBLE_REASON_DESCRIPTION = 'description'
INVISIBLE_REASON_DOUBLE = 'double'
created = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
updated = ndb.DateTimeProperty(auto_now=True)
source = ndb.StructuredProperty(JobOfferSource) # type: JobOfferSource
service_email = ndb.StringProperty() # can be None (when created via VDAB)
demo_app_ids = ndb.TextProperty(repeated=True) # TODO communities: remove after migration
demo = ndb.BooleanProperty()
data = ndb.JsonProperty(compressed=True)
visible = ndb.BooleanProperty()
invisible_reason = ndb.TextProperty()
info = ndb.LocalStructuredProperty(JobOfferInfo) # type: JobOfferInfo
job_domains = ndb.TextProperty(repeated=True)
@property
def id(self):
return self.key.id()
@classmethod
def create_key(cls, job_id):
return ndb.Key(cls, job_id)
@classmethod
def get_by_source(cls, source, source_id):
return cls.query() \
.filter(cls.source.type == source) \
.filter(cls.source.id == source_id) \
.get()
@classmethod
def list_by_service(cls, service_email):
return cls.query().filter(cls.service_email == service_email)
class JobNotificationSchedule(Enum):
NEVER = 'no_notifications'
# every 30 minutes as to not spam users when multiple new jobs are posted in a short time
AS_IT_HAPPENS = 'as_it_happens'
AT_MOST_ONCE_A_DAY = 'at_most_once_a_day'
AT_MOST_ONCE_A_WEEK = 'at_most_once_a_week'
class JobMatchingCriteriaNotifications(NdbModel):
timezone = ndb.StringProperty()
how_often = ndb.StringProperty(choices=JobNotificationSchedule.all())
delivery_day = ndb.StringProperty()
delivery_time = ndb.IntegerProperty()
class JobMatchingCriteria(NdbModel):
created = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
updated = ndb.DateTimeProperty(auto_now=True)
last_load_request = ndb.DateTimeProperty()
address = ndb.TextProperty()
geo_location = ndb.GeoPtProperty(indexed=False) # type: ndb.GeoPt
distance = ndb.IntegerProperty(indexed=False)
# Currently looking for job. Inactive profiles will have their profile and matches deleted after a certain time
# TODO: actually create this cron
active = ndb.BooleanProperty(default=True)
contract_types = ndb.TextProperty(repeated=True)
job_domains = ndb.StringProperty(repeated=True)
keywords = ndb.TextProperty(repeated=True)
notifications = ndb.LocalStructuredProperty(JobMatchingCriteriaNotifications) # type: JobMatchingCriteriaNotifications
demo = ndb.BooleanProperty(default=False)
@property
def should_send_notifications(self):
return self.active and self.notifications and self.notifications.how_often != JobNotificationSchedule.NEVER
@property
def app_user(self):
return users.User(self.key.parent().id().decode('utf8'))
@classmethod
def create_key(cls, app_user):
return ndb.Key(cls, app_user.email(), parent=parent_ndb_key(app_user))
@classmethod
def list_by_job_domain(cls, job_domain):
return cls.query().filter(cls.job_domains == job_domain)
@classmethod
def list_inactive(cls):
return cls.query().filter(cls.active == False)
@classmethod
def list_inactive_loads(cls, d):
return cls.query(cls.last_load_request < d)
class JobMatchingNotifications(NdbModel):
job_ids = ndb.IntegerProperty(repeated=True, indexed=False) # type: List[int]
schedule_time = ndb.IntegerProperty()
@property
def app_user(self):
return users.User(self.key.parent().id().decode('utf8'))
@classmethod
def create_key(cls, app_user):
return ndb.Key(cls, app_user.email(), parent=parent_ndb_key(app_user))
@classmethod
def list_scheduled(cls, schedule_time):
return cls.query()\
.filter(cls.schedule_time < schedule_time)\
.filter(cls.schedule_time > 0)
class JobMatchStatus(Enum):
PERMANENTLY_DELETED = 0
DELETED = 1
NEW = 2
STARRED = 3
class JobMatch(NdbModel):
# Score given to matches created via non-automated means (like pressing a button linked to a job on a news item)
MANUAL_CREATED_SCORE = 1e8
create_date = ndb.DateTimeProperty(auto_now_add=True, indexed=False)
update_date = ndb.DateTimeProperty(auto_now=True)
status = ndb.IntegerProperty(choices=JobMatchStatus.all())
job_id = ndb.IntegerProperty() # For querying only
score = ndb.IntegerProperty() # Based on location and source - higher score => higher in the list
chat_key = ndb.TextProperty() # only set if user has send a message already
@property
def app_user(self):
return users.User(self.key.parent().id().decode('utf8'))
@property
def can_delete(self):
return self.status == JobMatchStatus.NEW
def get_job_id(self):
return self.key.id()
@classmethod
def create_key(cls, app_user, job_id):
return ndb.Key(cls, job_id, parent=parent_ndb_key(app_user))
@classmethod
def list_by_app_user(cls, app_user):
return cls.query(ancestor=parent_ndb_key(app_user))
@classmethod
def list_by_app_user_and_status(cls, app_user, status):
return cls.list_by_app_user(app_user) \
.filter(cls.status == status) \
.order(-cls.update_date)
@classmethod
def list_new_by_app_user(cls, app_user):
return cls.list_by_app_user(app_user) \
.filter(cls.status == JobMatchStatus.NEW) \
.order(-cls.score)
@classmethod
def list_by_job_id(cls, job_id):
return cls.query().filter(cls.job_id == job_id)
@classmethod
def list_by_job_id_and_status(cls, job_id, status):
return cls.list_by_job_id(job_id) \
.filter(cls.status == status)
@classmethod
def manually_create(cls, app_user, job_id):
match = cls(key=JobMatch.create_key(app_user, job_id))
match.status = JobMatchStatus.NEW
match.create_date = datetime.now()
match.update_date = datetime.now()
match.job_id = job_id
match.score = cls.MANUAL_CREATED_SCORE
return match
| 1.976563
| 2
|
python/hackerrank/variables/variable creation/task.py
|
3keepmovingforward3/ENGR1102
| 0
|
12781200
|
def variables():
f = open('test.txt', 'w')
# Start your code below (tip: Make sure to indent your code)
# Setup variables here
# put variables in order respective of task commands
f.write(______+"\n")
f.write(______+"\n")
f.write(______+"\n")
f.write(______+"\n")
f.write(______+"\n")
# put variables in order respective of task commands
print(______)
print(______)
print(______)
print(______)
print(______)
f.close()
| 3.171875
| 3
|
Python-Programs/Cyclically_rotate_an_array_by_one.py
|
adityaverma121/Simple-Programs
| 71
|
12781201
|
def rotate(A, n):
temp = A[n - 1]
for i in range(len(A)):
A[n - i - 1] = A[n - i - 2]
A[0] = temp
return A
A = [1, 2, 3, 4, 5]
print(rotate(A, len(A)))
| 3.609375
| 4
|
DeepLearning/Python/Chapter 3/Ch03-06-02-mnist.py
|
BlueWay-KU/Study
| 0
|
12781202
|
import sys, os
sys.path.append(os.pardir)
import numpy as np
from dataset.mnist import load_mnist
from PIL import Image
def img_show(img):
pil_img = Image.fromarray(np.uint8(img))
pil_img.show()
(x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize = False)
img = x_train[0]
label = t_train[0]
print(label)
print(img.shape)
img = img.reshape(28, 28)
print(img.shape)
img_show(img)
| 2.953125
| 3
|
app/users/tests/test_views.py
|
erischon/p10_try_1
| 0
|
12781203
|
<reponame>erischon/p10_try_1<gh_stars>0
from django.test import TestCase, Client, RequestFactory
from django.urls import reverse
from django.contrib.auth.models import User
from database.models import Product, Nutriscore
class UsersTestViews(TestCase):
def setUp(self):
self.client = Client()
self.factory = RequestFactory()
self.credentials = {
'username': 'testuser',
'email': 'testemail',
'password': '<PASSWORD>'}
User.objects.create_user(**self.credentials)
self.user = User.objects.get(username='testuser')
nutriscore = Nutriscore.objects.create(nut_id=1, nut_type="C")
self.product = Product.objects.create(
prod_id=3017620422003,
prod_name="test product",
nut_id=nutriscore,
)
self.product.myproduct.add(self.user)
self.signupuser_url = reverse('signupuser')
self.logoutuser_url = reverse('logoutuser')
self.moncompte_url = reverse('moncompte')
self.myproducts_url = reverse('myproducts')
self.myproducts_delete_url = reverse(
'myproducts_delete', args=[self.product.prod_id])
# === Method signupuser ===
def test_signupuser_view(self):
response = self.client.get(self.signupuser_url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'users/signup.html')
def test_signupuser_view_post_method_no_same_kw(self):
response = self.client.post(
'/users/signup/', {'password1': '<PASSWORD>', 'password2': '<PASSWORD>'})
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'users/signup.html')
def test_signupuser_view_post_method_except(self):
response = self.client.post(
'/users/signup/', {'password1': '<PASSWORD>', 'password2': '<PASSWORD>', 'username': 'testuser', 'email': 'testemail'})
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'users/signup.html')
def test_signupuser_view_post_method_with_connect(self):
response = self.client.post(
'/users/signup/', {'password1': '<PASSWORD>', 'password2': '<PASSWORD>', 'username': 'testuser2', 'email': 'testemail'})
self.assertEquals(response.status_code, 302)
# === Method loginuser ===
def test_loginuser_view(self):
response = self.client.post(
'/users/login/', self.credentials, follow=True)
self.assertTrue(response.context['user'].is_active)
self.assertEquals(response.status_code, 200)
def test_loginuser_view_user_is_none(self):
response = self.client.post(
'/users/login/', {'username': 'none', 'password': '<PASSWORD>'})
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'users/login.html')
def test_loginuser_view_get_method(self):
response = self.client.get('/users/login/')
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'users/login.html')
def test_logoutuser_view(self):
self.client.login(**self.credentials)
response = self.client.post(self.logoutuser_url)
self.assertRedirects(response, reverse('home'))
def test_moncompte_view(self):
self.client.login(**self.credentials)
response = self.client.get(self.moncompte_url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'users/moncompte.html')
def test_myproducts_view(self):
self.client.login(**self.credentials)
response = self.client.get(self.myproducts_url)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(response, 'users/myproducts.html')
def test_myproducts_delete_view(self):
self.client.login(**self.credentials)
response = self.client.get(self.myproducts_delete_url)
self.assertEquals(response.status_code, 302)
| 2.28125
| 2
|
excel_and_pandas_lab_44.py
|
QPThree/python-mycode
| 1
|
12781204
|
#!/usr/bin/python3
"""Alta3 Research | <EMAIL>
Exploring using pandas to create dataframes, and output graphs"""
import pandas as pd
def main():
# define the name of our xls file
excel_file = 'files/movies.xls'
# create a DataFrame (DF) object. EASY!
# because we did not specify a sheet
# only the first sheet was read into the DF
movies = pd.read_excel(excel_file)
# show the first five rows of our DF
# DF has 5 rows and 25 columns (indexed by integer)
print(movies.head())
# Choose the first column "Title" as
# index (index=0)
movies_sheet1 = pd.read_excel(excel_file, sheet_name=0, index_col=0)
# DF has 5 rows and 24 columns (indexed by title)
# print the top 10 movies in the dataframe
print(movies_sheet1.head())
# export 5 movies from the top dataframe to excel
movies_sheet1.head(5).to_excel("5movies.xlsx")
# export 5 movies from the top of the dataframe to json
movies_sheet1.head(5).to_json("5movies.json")
# export 5 movies from the top of the dataframe to csv
movies_sheet1.head(5).to_csv("5movies.csv")
if __name__ == "__main__":
main()
| 4.34375
| 4
|
app/config/default_config.py
|
vonsago/service_platform
| 6
|
12781205
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/9/8 下午9:22
# @Author : Vassago
# @File : default_config.py
# @Software: PyCharm
import os
class DefaultConfig():
CONFIG_NAME = 'DEFAULT'
DEBUG = True
TEMPLATE_DIR = "."
STATIC_DIR = "."
class DevConfig():
CONFIG_NAME = 'PRO'
DEBUG = True
TEMPLATE_DIR = "./app"
STATIC_DIR = "./app"
| 1.671875
| 2
|
libs/coda/runtime/typeregistry.py
|
viridia/coda
| 1
|
12781206
|
<filename>libs/coda/runtime/typeregistry.py<gh_stars>1-10
'''Registry for extensions and subclasses.'''
class TypeRegistry:
def __init__(self):
self.__subtypes = {}
self.__extensions = {}
def addSubtype(self, subtype):
'''Register a type as being a subtype of a given base type.'''
assert subtype.getTypeId() is not None, subtype.getName()
base = subtype.getBaseType()
assert base is not None
while base.getBaseType():
base = base.getBaseType()
try:
subtypesForBase = self.__subtypes[id(base)]
except KeyError:
subtypesForBase = self.__subtypes[id(base)] = {}
if subtype.getTypeId() in subtypesForBase:
raise AssertionError(
"Error registering type {0}: subtype ID {1} already registered".\
formatter(subtype.getFullName(), subtype.getTypeId()))
subtypesForBase[subtype.getTypeId()] = subtype
return self
def getSubtype(self, base, typeId):
'''Retrieve a subtype of a base type by subtype ID.'''
subtypesForBase = self.__subtypes.get(id(base))
if subtypesForBase:
return subtypesForBase.get(typeId)
return None
def getSubtypes(self, base):
'''Retrieve all subtype of a base type.'''
return self.__subtypes.get(id(base), {})
def addFile(self, file):
'''Add all subtypes and extensions registered within a file.'''
def addStruct(struct):
if struct.getBaseType() is not None:
self.addSubtype(struct)
for st in struct.getStructs():
addStruct(st)
for struct in file.getStructs():
addStruct(struct)
def getExtension(self, struct, fieldId):
return self.__extensions.get(id(struct), {}).get(fieldId)
def addExtension(self, extField):
try:
extensionsForStruct = self.__extensions[id(extField.getExtends())]
except KeyError:
extensionsForStruct = self.__subtypes[id(extField.getExtends())] = {}
assert extField.getId() not in extensionsForStruct, \
'Duplicate extension id for struct ' + extField.getExtends().getName()
extensionsForStruct[extField.getId()] = extField
INSTANCE = None
TypeRegistry.INSTANCE = TypeRegistry()
| 2.53125
| 3
|
swap_start/tf_train/special_train/test/begin1.py
|
yudongqiu/gomoku
| 3
|
12781207
|
<filename>swap_start/tf_train/special_train/test/begin1.py
# black white black
begin_lib = [[ ( 8, 8), ( 7, 9), (11,11)]]
| 1.382813
| 1
|
EulerProblem5.py
|
JonathanFox1993/PythonCode
| 0
|
12781208
|
# Greatest common divisor of more than 2 numbers
def gcd(x,y): return y and gcd(y, x % y) or x
#Lowest common multiple of 2 integers
def lcm(x,y): return x * y / gcd(x,y)
# loops through number 1 to 20 and puts them into the lcm fucntion to calculate the lowest common mulitple of 2 intergers
# Keeps looping until it getts to 20 and then you have the smallest number that can be divided by the numbers 1 to 20
n = 1
for i in range(1, 21):
n = lcm(n, i)
print(n)
| 3.984375
| 4
|
Lib/site-packages/django_core/admin.py
|
fochoao/cpython
| 0
|
12781209
|
from django.contrib import admin
from .models import TokenAuthorization
class TokenAuthorizationAdmin(admin.ModelAdmin):
"""Model admin for the TokenAuthorization model."""
list_display = ('id', 'reason', 'user', 'token', 'email_address',
'created_user', 'expires')
readonly_fields = list_display + ('email_sent', 'text')
fields = readonly_fields
admin.site.register(TokenAuthorization, TokenAuthorizationAdmin)
| 2.109375
| 2
|
tests/functional/modules/ims_catalog/test_catalog_managed_acbs.py
|
thedoubl3j/ibm_zos_ims
| 7
|
12781210
|
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
import pytest
import re
from math import ceil
from pprint import pprint
from ibm_zos_ims.tests.functional.module_utils.ims_test_catalog_utils import CatalogInputParameters as cp # pylint: disable=import-error
from ibm_zos_ims.tests.functional.module_utils.ims_test_catalog_utils import load_catalog, purge_catalog # pylint: disable=import-error
__metaclass__ = type
BYTES_PER_TRK = 56664
BYTES_PER_CYL = BYTES_PER_TRK * 15
BYTES_PER_KB = 1024
BYTES_PER_MB = 1048576
# Scenario 2: Load mode, managed_acbs - setup=True
def test_catalog_load_managed_acbs(ansible_zos_module):
hosts = ansible_zos_module
load_catalog(hosts,
psb_lib=cp.PSBLIB,
dbd_lib=cp.DBDLIB,
acb_lib=cp.ACBLIB,
steplib=cp.STEPLIB,
reslib=cp.RESLIB,
proclib=cp.PROCLIB,
primary_log_dataset=cp.PRIMARYLOG,
buffer_pool_param_dataset=cp.BUFFERPOOL,
mode=cp.LOADMODE,
validation_msg="DFS4533I",
control_statements={'managed_acbs': {"setup": True}})
purge_catalog(hosts,
psb_lib=cp.PSBLIB,
dbd_lib=cp.DBDLIB,
steplib=cp.STEPLIB,
reslib=cp.RESLIB,
proclib=cp.PROCLIB,
primary_log_dataset=cp.PRIMARYLOG,
buffer_pool_param_dataset=cp.BUFFERPOOL,
mode=cp.PURGEMODE,
validation_msg="",
delete=cp.DELETES,
managed_acbs=True)
# Scenario 3: Update mode, managed_acbs - stage options(save_acb=UNCOND and clean_staging_dataset=True)
# and update option(replace_acb=UNCOND)
def test_catalog_update_managed_acbs_stage_and_update(ansible_zos_module):
hosts = ansible_zos_module
load_catalog(hosts,
psb_lib=cp.PSBLIB,
dbd_lib=cp.DBDLIB,
acb_lib=cp.ACBLIB,
steplib=cp.STEPLIB,
reslib=cp.RESLIB,
proclib=cp.PROCLIB,
primary_log_dataset=cp.PRIMARYLOG,
buffer_pool_param_dataset=cp.BUFFERPOOL,
mode=cp.UPDATEMODE,
validation_msg="DFS4536I",
control_statements={
'managed_acbs': {
'stage': {
'save_acb': "UNCOND",
'clean_staging_dataset': True
}
}
})
load_catalog(hosts,
psb_lib=cp.PSBLIB,
dbd_lib=cp.DBDLIB,
acb_lib=cp.ACBLIB,
steplib=cp.STEPLIB,
reslib=cp.RESLIB,
proclib=cp.PROCLIB,
primary_log_dataset=cp.PRIMARYLOG,
buffer_pool_param_dataset=cp.BUFFERPOOL,
mode=cp.UPDATEMODE,
validation_msg="DFS4534I",
control_statements={'managed_acbs': {'update': {'replace_acb': "UNCOND"}}})
purge_catalog(hosts,
psb_lib=cp.PSBLIB,
dbd_lib=cp.DBDLIB,
steplib=cp.STEPLIB,
reslib=cp.RESLIB,
proclib=cp.PROCLIB,
primary_log_dataset=cp.PRIMARYLOG,
buffer_pool_param_dataset=cp.BUFFERPOOL,
mode=cp.PURGEMODE,
validation_msg="",
delete=cp.DELETES,
managed_acbs=True)
# Setup the Catalog while defining the bootstrap dataset.
def test_catalog_define_bootstrap(ansible_zos_module):
hosts = ansible_zos_module
# Delete the bootstrap dataset first
response = hosts.all.zos_data_set(name=cp.BSDS, state="absent")
for result in response.contacted.values():
assert result['message'] == ''
if result['changed'] is False:
response = hosts.all.zos_data_set(name=cp.BSDS, state="absent", volume="SCR03")
# Load catalog while defining the bootstrap dataset
load_catalog(hosts,
psb_lib=cp.PSBLIB,
dbd_lib=cp.DBDLIB,
acb_lib=cp.ACBLIB,
steplib=cp.STEPLIB,
reslib=cp.RESLIB,
proclib=cp.PROCLIB,
primary_log_dataset=cp.PRIMARYLOG,
buffer_pool_param_dataset=cp.BUFFERPOOL,
mode=cp.LOADMODE,
validation_msg="DFS4533I",
bootstrap_dataset={
'dataset_name': cp.BSDS,
'disposition': 'NEW',
'normal_disposition': 'CATLG',
'primary': 350,
'volumes': ['222222']
},
control_statements={'managed_acbs': {"setup": True}})
# Verify the bootstrap dataset was created with the specified parameters
estimated_size_in_bytes = 0
response = hosts.all.command("dls -s " + cp.BSDS)
for result in response.contacted.values():
for line in result.get("stdout_lines", []):
lineList = line.split()
estimated_size_in_bytes = int(lineList[-1])
estimated_size_in_unit = bytes_to_unit(estimated_size_in_bytes, "TRK")
assert estimated_size_in_unit == 350
# Purge the catalog
purge_catalog(hosts,
psb_lib=cp.PSBLIB,
dbd_lib=cp.DBDLIB,
steplib=cp.STEPLIB,
reslib=cp.RESLIB,
proclib=cp.PROCLIB,
primary_log_dataset=cp.PRIMARYLOG,
buffer_pool_param_dataset=cp.BUFFERPOOL,
mode=cp.PURGEMODE,
validation_msg="",
delete=cp.DELETES,
managed_acbs=True)
# Finally delete the boostrap dataset again
response = hosts.all.zos_data_set(name=cp.BSDS, state="absent")
for result in response.contacted.values():
assert result['changed'] is True
assert result['message'] == ''
# Setup the Catalog while defining the staging dataset.
def test_catalog_define_staging(ansible_zos_module):
hosts = ansible_zos_module
# Delete the staging dataset first
response = hosts.all.zos_data_set(name=cp.STAGE, state="absent")
for result in response.contacted.values():
assert result['message'] == ''
if result['changed'] is False:
response = hosts.all.zos_data_set(name=cp.STAGE, state="absent", volume="SCR03")
# Load catalog while defining the staging dataset
load_catalog(hosts,
psb_lib=cp.PSBLIB,
dbd_lib=cp.DBDLIB,
acb_lib=cp.ACBLIB,
steplib=cp.STEPLIB,
reslib=cp.RESLIB,
proclib=cp.PROCLIB,
primary_log_dataset=cp.PRIMARYLOG,
buffer_pool_param_dataset=cp.BUFFERPOOL,
mode=cp.LOADMODE,
validation_msg="DFS4533I",
directory_staging_dataset={
'dataset_name': cp.STAGE,
'disposition': 'NEW',
'normal_disposition': 'CATLG',
'primary': 300,
'volumes': ['222222']
},
control_statements={'managed_acbs': {"setup": True}})
# Verify the staging dataset was created with the specified parameters
estimated_size_in_bytes = 0
response = hosts.all.command("dls -s " + cp.STAGE)
for result in response.contacted.values():
for line in result.get("stdout_lines", []):
pprint("dls stdout: " + line)
lineList = line.split()
estimated_size_in_bytes = int(lineList[-1])
estimated_size_in_unit = bytes_to_unit(estimated_size_in_bytes, "TRK")
assert estimated_size_in_unit == 300
# Purge the catalog
purge_catalog(hosts,
psb_lib=cp.PSBLIB,
dbd_lib=cp.DBDLIB,
steplib=cp.STEPLIB,
reslib=cp.RESLIB,
proclib=cp.PROCLIB,
primary_log_dataset=cp.PRIMARYLOG,
buffer_pool_param_dataset=cp.BUFFERPOOL,
mode=cp.PURGEMODE,
validation_msg="",
delete=cp.DELETES,
managed_acbs=True)
# Finally delete the staging dataset again
response = hosts.all.zos_data_set(name=cp.STAGE, state="absent")
for result in response.contacted.values():
assert result['changed'] is True
assert result['message'] == ''
# Setup the Catalog while defining the directory datasets.
def test_catalog_define_directory(ansible_zos_module):
hosts = ansible_zos_module
# Delete the directory datasets first
response = hosts.all.zos_data_set(batch=cp.DIR_BATCH)
for result in response.contacted.values():
assert result['message'] == ''
if result['changed'] is False:
response = hosts.all.zos_data_set(name=cp.DIR_BATCH, state="absent", volume="SCR03")
# Load catalog while defining the directory datasets
load_catalog(hosts,
psb_lib=cp.PSBLIB,
dbd_lib=cp.DBDLIB,
acb_lib=cp.ACBLIB,
steplib=cp.STEPLIB,
reslib=cp.RESLIB,
proclib=cp.PROCLIB,
primary_log_dataset=cp.PRIMARYLOG,
buffer_pool_param_dataset=cp.BUFFERPOOL,
mode=cp.LOADMODE,
validation_msg="DFS4533I",
directory_datasets=[
{
'dataset_name': cp.DIR1,
'disposition': 'NEW',
'normal_disposition': 'CATLG',
'primary': 200,
'volumes': ['222222']
},
{
'dataset_name': cp.DIR2,
'disposition': 'NEW',
'normal_disposition': 'CATLG',
'primary': 200,
'volumes': ['222222']
},
],
control_statements={'managed_acbs': {"setup": True}})
# Verify the directory datasets were created with the specified parameters
estimated_size_in_bytes = 0
response = hosts.all.command("dls -s " + cp.DIR1)
for result in response.contacted.values():
for line in result.get("stdout_lines", []):
lineList = line.split()
estimated_size_in_bytes = int(lineList[-1])
estimated_size_in_unit = bytes_to_unit(estimated_size_in_bytes, "TRK")
assert estimated_size_in_unit == 200
response = hosts.all.command("dls -s " + cp.DIR2)
for result in response.contacted.values():
for line in result.get("stdout_lines", []):
lineList = line.split()
estimated_size_in_bytes = int(lineList[-1])
estimated_size_in_unit = bytes_to_unit(estimated_size_in_bytes, "TRK")
assert estimated_size_in_unit == 200
# Purge the catalog
purge_catalog(hosts,
psb_lib=cp.PSBLIB,
dbd_lib=cp.DBDLIB,
steplib=cp.STEPLIB,
reslib=cp.RESLIB,
proclib=cp.PROCLIB,
primary_log_dataset=cp.PRIMARYLOG,
buffer_pool_param_dataset=cp.BUFFERPOOL,
mode=cp.PURGEMODE,
validation_msg="",
delete=cp.DELETES,
managed_acbs=True)
# Finally delete the directory datasets again
response = hosts.all.zos_data_set(batch=cp.DIR_BATCH)
for result in response.contacted.values():
assert result['changed'] is True
assert result['message'] == ''
"""
Scenario 7: Test the creation of the temp_acb_dataset, which holds ACBs that reference
GSAM database. Test catalog in load mode with managed acbs setup = true or no managedacbs
options specified. Specify the temp_acb_dataset fields. The temp_acb_dataset can be named
anything, I recommend sticking with your first two IMS library qualifiers with the 3rd
qualifier being whatever you want. Verify the temp acb dataset is created with the specified
values. Purge the catalog.
"""
def test_creation_of_temp_acb_dataset_with_managed_acbs(ansible_zos_module):
hosts = ansible_zos_module
# Delete TEMP_ACB data set before the test
response = hosts.all.zos_data_set(name=cp.TEMP_ACB, state="absent")
for result in response.contacted.values():
assert result['message'] == ''
temp_acb_data_set = {
'dataset_name': cp.TEMP_ACB,
'disposition': 'NEW',
'normal_disposition': 'CATLG',
'primary': 200,
'volumes': ['222222']
}
load_catalog(hosts,
psb_lib=cp.PSBLIB,
dbd_lib=cp.DBDLIB,
acb_lib=cp.ACBLIB,
steplib=cp.STEPLIB,
reslib=cp.RESLIB,
proclib=cp.PROCLIB,
primary_log_dataset=cp.PRIMARYLOG,
temp_acb_dataset=temp_acb_data_set,
buffer_pool_param_dataset=cp.BUFFERPOOL,
mode=cp.LOADMODE,
validation_msg="DFS4533I",
control_statements={
'managed_acbs': {
'setup': True
}
})
estimated_size_in_bytes = 0
response = hosts.all.command("dls -s " + cp.TEMP_ACB)
for result in response.contacted.values():
for line in result.get("stdout_lines", []):
lineList = line.split()
estimated_size_in_bytes = int(lineList[-1])
estimated_size_in_unit = bytes_to_unit(estimated_size_in_bytes, "TRK")
assert estimated_size_in_unit == 200
purge_catalog(hosts,
psb_lib=cp.PSBLIB,
dbd_lib=cp.DBDLIB,
steplib=cp.STEPLIB,
reslib=cp.RESLIB,
proclib=cp.PROCLIB,
primary_log_dataset=cp.PRIMARYLOG,
buffer_pool_param_dataset=cp.BUFFERPOOL,
mode=cp.PURGEMODE,
validation_msg="",
delete=cp.DELETES,
managed_acbs=True)
# Delete TEMP_ACB data set after the test
response = hosts.all.zos_data_set(name=cp.TEMP_ACB, state="absent")
for result in response.contacted.values():
assert result['changed'] is True
assert result['message'] == ''
def test_creation_of_temp_acb_dataset_without_managed_acbs(ansible_zos_module):
hosts = ansible_zos_module
# Delete TEMP_ACB data set before the test
response = hosts.all.zos_data_set(name=cp.TEMP_ACB, state="absent")
for result in response.contacted.values():
assert result['message'] == ''
temp_acb_data_set = {
'dataset_name': cp.TEMP_ACB,
'disposition': 'NEW',
'normal_disposition': 'CATLG',
'primary': 200,
'volumes': ['222222']
}
load_catalog(hosts,
psb_lib=cp.PSBLIB,
dbd_lib=cp.DBDLIB,
acb_lib=cp.ACBLIB,
steplib=cp.STEPLIB,
reslib=cp.RESLIB,
proclib=cp.PROCLIB,
primary_log_dataset=cp.PRIMARYLOG,
temp_acb_dataset=temp_acb_data_set,
buffer_pool_param_dataset=cp.BUFFERPOOL,
mode=cp.LOADMODE,
validation_msg="DFS4434I"
)
estimated_size_in_bytes = 0
response = hosts.all.command("dls -s " + cp.TEMP_ACB)
for result in response.contacted.values():
for line in result.get("stdout_lines", []):
lineList = line.split()
estimated_size_in_bytes = int(lineList[-1])
estimated_size_in_unit = bytes_to_unit(estimated_size_in_bytes, "TRK")
assert estimated_size_in_unit == 200
purge_catalog(hosts,
psb_lib=cp.PSBLIB,
dbd_lib=cp.DBDLIB,
steplib=cp.STEPLIB,
reslib=cp.RESLIB,
proclib=cp.PROCLIB,
primary_log_dataset=cp.PRIMARYLOG,
buffer_pool_param_dataset=cp.BUFFERPOOL,
mode=cp.PURGEMODE,
validation_msg="",
delete=cp.DELETES,
managed_acbs=True)
# Delete TEMP_ACB data set after the test
response = hosts.all.zos_data_set(name=cp.TEMP_ACB, state="absent")
for result in response.contacted.values():
assert result['changed'] is True
assert result['message'] == ''
def bytes_to_unit(number_of_bytes, unit):
size = 0
unit = unit.lower()
if number_of_bytes == 0:
number_of_bytes = 1
if unit == "cyl":
size = byte_to_cyl(number_of_bytes)
elif unit == "kb" or unit == "k":
size = byte_to_kilobyte(number_of_bytes)
elif unit == "mb" or unit == "m":
size = byte_to_megabyte(number_of_bytes)
else:
size = byte_to_trk(number_of_bytes)
return size
def byte_to_trk(number_of_bytes):
return ceil(number_of_bytes / BYTES_PER_TRK)
def byte_to_cyl(number_of_bytes):
return ceil(number_of_bytes / BYTES_PER_CYL)
def byte_to_kilobyte(number_of_bytes):
return ceil(number_of_bytes / BYTES_PER_KB)
def byte_to_megabyte(number_of_bytes):
return ceil(number_of_bytes / BYTES_PER_MB)
| 1.710938
| 2
|
mne/viz/utils.py
|
TanayGahlot/mne-python
| 0
|
12781211
|
<gh_stars>0
"""Utility functions for plotting M/EEG data
"""
from __future__ import print_function
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: Simplified BSD
import math
from copy import deepcopy
from functools import partial
import difflib
import webbrowser
from warnings import warn
import tempfile
import numpy as np
from ..io import show_fiff
from ..utils import verbose
COLORS = ['b', 'g', 'r', 'c', 'm', 'y', 'k', '#473C8B', '#458B74',
'#CD7F32', '#FF4040', '#ADFF2F', '#8E2323', '#FF1493']
DEFAULTS = dict(color=dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='r',
emg='k', ref_meg='steelblue', misc='k', stim='k',
resp='k', chpi='k', exci='k', ias='k', syst='k',
seeg='k'),
units=dict(eeg='uV', grad='fT/cm', mag='fT', misc='AU',
seeg='uV'),
scalings=dict(eeg=1e6, grad=1e13, mag=1e15, misc=1.0,
seeg=1e4),
scalings_plot_raw=dict(mag=1e-12, grad=4e-11, eeg=20e-6,
eog=150e-6, ecg=5e-4, emg=1e-3,
ref_meg=1e-12, misc=1e-3,
stim=1, resp=1, chpi=1e-4, exci=1,
ias=1, syst=1, seeg=1e-5),
ylim=dict(mag=(-600., 600.), grad=(-200., 200.),
eeg=(-200., 200.), misc=(-5., 5.),
seeg=(-200., 200.)),
titles=dict(eeg='EEG', grad='Gradiometers',
mag='Magnetometers', misc='misc', seeg='sEEG'),
mask_params=dict(marker='o',
markerfacecolor='w',
markeredgecolor='k',
linewidth=0,
markeredgewidth=1,
markersize=4))
def _mutable_defaults(*mappings):
""" To avoid dicts as default keyword arguments
Use this function instead to resolve default dict values.
Example usage:
scalings, units = _mutable_defaults(('scalings', scalings,
'units', units))
"""
out = []
for k, v in mappings:
this_mapping = DEFAULTS[k]
if v is not None:
this_mapping = deepcopy(DEFAULTS[k])
this_mapping.update(v)
out += [this_mapping]
return out
def _setup_vmin_vmax(data, vmin, vmax):
"""Aux function to handle vmin and vamx parameters"""
if vmax is None and vmin is None:
vmax = np.abs(data).max()
vmin = -vmax
else:
if callable(vmin):
vmin = vmin(data)
elif vmin is None:
vmin = np.min(data)
if callable(vmax):
vmax = vmax(data)
elif vmin is None:
vmax = np.max(data)
return vmin, vmax
def tight_layout(pad=1.2, h_pad=None, w_pad=None, fig=None):
""" Adjust subplot parameters to give specified padding.
Note. For plotting please use this function instead of plt.tight_layout
Parameters
----------
pad : float
padding between the figure edge and the edges of subplots, as a
fraction of the font-size.
h_pad : float
Padding height between edges of adjacent subplots.
Defaults to `pad_inches`.
w_pad : float
Padding width between edges of adjacent subplots.
Defaults to `pad_inches`.
fig : instance of Figure
Figure to apply changes to.
"""
import matplotlib.pyplot as plt
if fig is None:
fig = plt.gcf()
try: # see https://github.com/matplotlib/matplotlib/issues/2654
fig.canvas.draw()
fig.tight_layout(pad=pad, h_pad=h_pad, w_pad=w_pad)
except:
msg = ('Matplotlib function \'tight_layout\'%s.'
' Skipping subpplot adjusment.')
if not hasattr(plt, 'tight_layout'):
case = ' is not available'
else:
case = (' is not supported by your backend: `%s`'
% plt.get_backend())
warn(msg % case)
def _check_delayed_ssp(container):
""" Aux function to be used for interactive SSP selection
"""
if container.proj is True or\
all([p['active'] for p in container.info['projs']]):
raise RuntimeError('Projs are already applied. Please initialize'
' the data with proj set to False.')
elif len(container.info['projs']) < 1:
raise RuntimeError('No projs found in evoked.')
def mne_analyze_colormap(limits=[5, 10, 15], format='mayavi'):
"""Return a colormap similar to that used by mne_analyze
Parameters
----------
limits : list (or array) of length 3
Bounds for the colormap.
format : str
Type of colormap to return. If 'matplotlib', will return a
matplotlib.colors.LinearSegmentedColormap. If 'mayavi', will
return an RGBA array of shape (256, 4).
Returns
-------
cmap : instance of matplotlib.pyplot.colormap | array
A teal->blue->gray->red->yellow colormap.
Notes
-----
For this will return a colormap that will display correctly for data
that are scaled by the plotting function to span [-fmax, fmax].
Examples
--------
The following code will plot a STC using standard MNE limits:
colormap = mne.viz.mne_analyze_colormap(limits=[5, 10, 15])
brain = stc.plot('fsaverage', 'inflated', 'rh', colormap)
brain.scale_data_colormap(fmin=-15, fmid=0, fmax=15, transparent=False)
"""
l = np.asarray(limits, dtype='float')
if len(l) != 3:
raise ValueError('limits must have 3 elements')
if any(l < 0):
raise ValueError('limits must all be positive')
if any(np.diff(l) <= 0):
raise ValueError('limits must be monotonically increasing')
if format == 'matplotlib':
from matplotlib import colors
l = (np.concatenate((-np.flipud(l), l)) + l[-1]) / (2 * l[-1])
cdict = {'red': ((l[0], 0.0, 0.0),
(l[1], 0.0, 0.0),
(l[2], 0.5, 0.5),
(l[3], 0.5, 0.5),
(l[4], 1.0, 1.0),
(l[5], 1.0, 1.0)),
'green': ((l[0], 1.0, 1.0),
(l[1], 0.0, 0.0),
(l[2], 0.5, 0.5),
(l[3], 0.5, 0.5),
(l[4], 0.0, 0.0),
(l[5], 1.0, 1.0)),
'blue': ((l[0], 1.0, 1.0),
(l[1], 1.0, 1.0),
(l[2], 0.5, 0.5),
(l[3], 0.5, 0.5),
(l[4], 0.0, 0.0),
(l[5], 0.0, 0.0))}
return colors.LinearSegmentedColormap('mne_analyze', cdict)
elif format == 'mayavi':
l = np.concatenate((-np.flipud(l), [0], l)) / l[-1]
r = np.array([0, 0, 0, 0, 1, 1, 1])
g = np.array([1, 0, 0, 0, 0, 0, 1])
b = np.array([1, 1, 1, 0, 0, 0, 0])
a = np.array([1, 1, 0, 0, 0, 1, 1])
xp = (np.arange(256) - 128) / 128.0
colormap = np.r_[[np.interp(xp, l, 255 * c) for c in [r, g, b, a]]].T
return colormap
else:
raise ValueError('format must be either matplotlib or mayavi')
def _toggle_options(event, params):
"""Toggle options (projectors) dialog"""
import matplotlib.pyplot as plt
if len(params['projs']) > 0:
if params['fig_opts'] is None:
_draw_proj_checkbox(event, params, draw_current_state=False)
else:
# turn off options dialog
plt.close(params['fig_opts'])
del params['proj_checks']
params['fig_opts'] = None
def _toggle_proj(event, params):
"""Operation to perform when proj boxes clicked"""
# read options if possible
if 'proj_checks' in params:
bools = [x[0].get_visible() for x in params['proj_checks'].lines]
for bi, (b, p) in enumerate(zip(bools, params['projs'])):
# see if they tried to deactivate an active one
if not b and p['active']:
bools[bi] = True
else:
bools = [True] * len(params['projs'])
compute_proj = False
if not 'proj_bools' in params:
compute_proj = True
elif not np.array_equal(bools, params['proj_bools']):
compute_proj = True
# if projectors changed, update plots
if compute_proj is True:
params['plot_update_proj_callback'](params, bools)
def _prepare_trellis(n_cells, max_col):
"""Aux function
"""
import matplotlib.pyplot as plt
if n_cells == 1:
nrow = ncol = 1
elif n_cells <= max_col:
nrow, ncol = 1, n_cells
else:
nrow, ncol = int(math.ceil(n_cells / float(max_col))), max_col
fig, axes = plt.subplots(nrow, ncol, figsize=(7.4, 1.5 * nrow + 1))
axes = [axes] if ncol == nrow == 1 else axes.flatten()
for ax in axes[n_cells:]: # hide unused axes
ax.set_visible(False)
return fig, axes
def _draw_proj_checkbox(event, params, draw_current_state=True):
"""Toggle options (projectors) dialog"""
import matplotlib.pyplot as plt
import matplotlib as mpl
projs = params['projs']
# turn on options dialog
labels = [p['desc'] for p in projs]
actives = ([p['active'] for p in projs] if draw_current_state else
[True] * len(params['projs']))
width = max([len(p['desc']) for p in projs]) / 6.0 + 0.5
height = len(projs) / 6.0 + 0.5
fig_proj = figure_nobar(figsize=(width, height))
fig_proj.canvas.set_window_title('SSP projection vectors')
ax_temp = plt.axes((0, 0, 1, 1))
ax_temp.get_yaxis().set_visible(False)
ax_temp.get_xaxis().set_visible(False)
fig_proj.add_axes(ax_temp)
proj_checks = mpl.widgets.CheckButtons(ax_temp, labels=labels,
actives=actives)
# change already-applied projectors to red
for ii, p in enumerate(projs):
if p['active'] is True:
for x in proj_checks.lines[ii]:
x.set_color('r')
# make minimal size
# pass key presses from option dialog over
proj_checks.on_clicked(partial(_toggle_proj, params=params))
params['proj_checks'] = proj_checks
# this should work for non-test cases
try:
fig_proj.canvas.draw()
fig_proj.show()
except Exception:
pass
@verbose
def compare_fiff(fname_1, fname_2, fname_out=None, show=True, indent=' ',
read_limit=np.inf, max_str=30, verbose=None):
"""Compare the contents of two fiff files using diff and show_fiff
Parameters
----------
fname_1 : str
First file to compare.
fname_2 : str
Second file to compare.
fname_out : str | None
Filename to store the resulting diff. If None, a temporary
file will be created.
show : bool
If True, show the resulting diff in a new tab in a web browser.
indent : str
How to indent the lines.
read_limit : int
Max number of bytes of data to read from a tag. Can be np.inf
to always read all data (helps test read completion).
max_str : int
Max number of characters of string representation to print for
each tag's data.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fname_out : str
The filename used for storing the diff. Could be useful for
when a temporary file is used.
"""
file_1 = show_fiff(fname_1, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
file_2 = show_fiff(fname_2, output=list, indent=indent,
read_limit=read_limit, max_str=max_str)
diff = difflib.HtmlDiff().make_file(file_1, file_2, fname_1, fname_2)
if fname_out is not None:
f = open(fname_out, 'w')
else:
f = tempfile.NamedTemporaryFile('w', delete=False, suffix='.html')
fname_out = f.name
with f as fid:
fid.write(diff)
if show is True:
webbrowser.open_new_tab(fname_out)
return fname_out
def figure_nobar(*args, **kwargs):
"""Make matplotlib figure with no toolbar"""
import matplotlib.pyplot as plt
import matplotlib as mpl
old_val = mpl.rcParams['toolbar']
try:
mpl.rcParams['toolbar'] = 'none'
fig = plt.figure(*args, **kwargs)
# remove button press catchers (for toolbar)
cbs = list(fig.canvas.callbacks.callbacks['key_press_event'].keys())
for key in cbs:
fig.canvas.callbacks.disconnect(key)
except Exception as ex:
raise ex
finally:
mpl.rcParams['toolbar'] = old_val
return fig
| 2.1875
| 2
|
xrptipbotPy/xrptipbot.py
|
AJ58O/xrptipbotPy
| 0
|
12781212
|
import requests
class xrptipbot:
def __init__(self, token):
self.token = token
self.baseUrl = "https://www.xrptipbot.com/app/api"
def login(self, platform, model):
url = self.baseUrl + "/action:login/"
headers = {"Content-Type":"application/json"}
payload = {
"token":self.token,
"platform":platform,
"model":model
}
r = requests.post(url=url, json=payload, headers=headers)
return r
def unlink(self):
url = self.baseUrl + "/action:unlink/"
headers = {"Content-Type":"application/json"}
payload = {
"token":self.token
}
r = requests.post(url=url, json=payload, headers=headers)
return r
def get_balance(self):
url = self.baseUrl + "/action:balance/"
headers = {"Content-Type":"application/json"}
payload = {
"token":self.token
}
r = requests.post(url=url, json=payload, headers=headers)
return r
def tip(self, amount, to, existingDestination):
url = self.baseUrl + "/action:tip/"
headers = {"Content-Type":"application/json"}
payload = {
"token":self.token,
"amount":amount,
"to":to,
"existingDestination":existingDestination
}
r = requests.post(url=url, json=payload, headers=headers)
return r
def get_stats(self):
url = self.baseUrl + "/action:userinfo/"
headers = {"Content-Type":"application/json"}
payload = {
"token":self.token
}
r = requests.post(url=url, json=payload, headers=headers)
return r
def get_contacts(self):
url = self.baseUrl + "/action:contacts/"
headers = {"Content-Type":"application/json"}
payload = {
"token":self.token
}
r = requests.post(url=url, json=payload, headers=headers)
return r
def lookup_user(self, query):
url = self.baseUrl + "/action:lookup/"
headers = {"Content-Type":"application/json"}
payload = {
"token":self.token,
"query":query
}
r = requests.post(url=url, json=payload, headers=headers)
return r
def create_paper_wallet(self, note):
url = self.baseUrl + "/action:paper-proposal/"
headers = {"Content-Type":"application/json"}
payload = {
"token":self.token,
"note":note
}
r = requests.post(url=url, json=payload, headers=headers)
return r
def bump(self, amount, aps=None, geo=None, nfc=None):
url = self.baseUrl + "/action:bump/"
headers = {"Content-Type":"application/json"}
payload = {
"token":self.token,
"amount":amount,
"aps":aps,
"geo":geo,
"nfc":nfc
}
r = requests.post(url=url, json=payload, headers=headers)
return r
| 2.625
| 3
|
polygon/views.py
|
foreignbill/eoj3
| 0
|
12781213
|
from django.http import HttpResponse
from django.shortcuts import render, redirect, get_object_or_404
from django.urls import reverse
from django.views import View
from django.views.generic import ListView
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from polygon.base_views import PolygonBaseMixin
from polygon.models import Run
from polygon.rejudge import rejudge_submission, rejudge_all_submission_on_problem
from problem.models import Problem
from submission.models import Submission
from utils.permission import is_problem_manager, is_contest_manager
def authorization(user):
return False
# TODO: open polygon
# return get_accept_problem_count(user.id) >= 100
def home_view(request):
return render(request, 'polygon/home.jinja2', context={'polygon_authorized': authorization(request.user)})
def register_view(request):
template_name = 'polygon/register.jinja2'
if request.method == 'GET':
return render(request, template_name)
else:
if request.POST.get('terms') != 'on':
return render(request, template_name, context={'register_error': 'You did\'nt accept terms of use.'})
if not authorization(request.user):
return render(request, template_name, context={'register_error': 'You are not authorized.'})
request.user.polygon_enabled = True
request.user.save(update_fields=['polygon_enabled'])
return redirect(reverse('polygon:home'))
class RejudgeSubmission(PolygonBaseMixin, APIView):
def dispatch(self, request, *args, **kwargs):
self.submission = get_object_or_404(Submission, pk=kwargs.get('sid'))
return super(RejudgeSubmission, self).dispatch(request, *args, **kwargs)
def test_func(self):
if is_problem_manager(self.request.user, self.submission.problem) or \
is_contest_manager(self.request.user, self.submission.contest):
return super(RejudgeSubmission, self).test_func()
return False
def post(self, request, sid):
rejudge_submission(self.submission)
return Response()
class RunsList(PolygonBaseMixin, ListView):
template_name = 'polygon/runs.jinja2'
paginate_by = 100
context_object_name = 'runs_list'
def get_queryset(self):
return Run.objects.filter(user=self.request.user).order_by("-pk").all()
class RunMessageView(PolygonBaseMixin, View):
def get(self, request, pk):
message = ''
try:
run = Run.objects.get(pk=pk, user=request.user)
message = run.message
except Run.DoesNotExist:
pass
return HttpResponse(message, content_type='text/plain')
| 2.03125
| 2
|
src/practices/practice/missing_number/script.py
|
rahul38888/coding_practice
| 1
|
12781214
|
<reponame>rahul38888/coding_practice
def missing_number(array, n):
s = 0
for val in array:
s += val
return int(n*(n+1)/2 - s)
def scan_input():
n = int(input())
nsstr = input()
a = list(map(lambda x: int(x), nsstr.split()))
return a, n
if __name__ == '__main__':
t = int(input())
for i in range(t):
a, n = scan_input()
print(missing_number(a, n))
| 3.765625
| 4
|
analysis/baseline/s01_generate_features_alexnet.py
|
eduardojdiniz/Buzznauts
| 2
|
12781215
|
#!/usr/bin/env python
# coding=utf-8
import argparse
import os
import os.path as op
from Buzznauts.models.baseline.alexnet import load_alexnet
from Buzznauts.utils import set_seed, set_device
from Buzznauts.analysis.baseline import get_activations_and_save
from Buzznauts.analysis.baseline import do_PCA_and_save
def main():
buzz_root = '/home/<EMAIL>/proj/Buzznauts'
description = 'Feature Extraction from Alexnet and preprocessing using PCA'
parser = argparse.ArgumentParser(description=description)
parser.add_argument('-vdir', '--video_frames_dir',
help='video frames data directory',
default=op.join(buzz_root, 'data/stimuli/frames'),
type=str)
parser.add_argument('-sdir', '--save_dir',
help='saves processed features',
default=op.join(buzz_root, 'models/baseline'),
type=str)
args = vars(parser.parse_args())
save_dir = args['save_dir']
if not op.exists(save_dir):
os.makedirs(save_dir)
frames_dir = args['video_frames_dir']
# Call set_seed to ensure reproducibility
seed = set_seed()
# Set computational device (cuda is GPU is available, else cpu)
device = set_device()
# Petrained Alexnet from:
# https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth
checkpoint_path = op.join(save_dir, "alexnet.pth")
url = "https://download.pytorch.org/models/alexnet-owt-4df8aa71.pth"
kwargs = {'ckpth_urls': {'alexnet': url}, 'ckpth': checkpoint_path}
# download pretrained model and save in the current directory
model = load_alexnet(pretrained=True, custom_keys=True, **kwargs)
model.to(device)
model.eval()
# get and save activations
activations_dir = op.join(save_dir, 'activations')
if not op.exists(activations_dir):
os.makedirs(activations_dir)
print("-------------------Saving activations ----------------------------")
imagenet_file = op.join(save_dir, 'imagenet_labels.txt')
_ = get_activations_and_save(model, frames_dir, activations_dir
imagenet_file, device=device)
# preprocessing using PCA and save
pca_dir = op.join(activations_dir, 'pca_100')
print("-----------------------Performing PCA----------------------------")
do_PCA_and_save(activations_dir, pca_dir, seed=seed)
if __name__ == "__main__":
main()
| 2.296875
| 2
|
wflow-py/WflowDeltashell/addwflowtoolbar.py
|
edwinkost/wflow
| 0
|
12781216
|
<reponame>edwinkost/wflow
from WflowDeltashell.Shortcuts import *
from WflowDeltashell.plotcsv import *
def OpenDoc(url):
import Libraries.StandardFunctions as sf
from DelftTools.Utils import Url
murl = Url(url,url)
sf.OpenView(murl)
def notimplemented():
print "Not implemented yet..."
name = "Web Documentation"
tabName = "Wflow-Tools"
groupName = "Internet"
CreateShortcutButton(name,groupName,tabName, lambda: OpenDoc("http://wflow.readthedocs.io/en/latest/"), None)
name = "Github"
tabName = "Wflow-Tools"
groupName = "Internet"
CreateShortcutButton(name,groupName,tabName, lambda: OpenDoc("http://github.com/openstreams/wflow"), None)
name = "Plotcsv"
tabName = "Wflow-Tools"
groupName = "Plots"
CreateShortcutButton(name,groupName,tabName, lambda: plotit(getcsvname()), None)
name = "Netcdf Input"
tabName = "Wflow-Tools"
groupName = "Conversion"
CreateShortcutButton(name,groupName,tabName, lambda: notimplemented(), None)
#RemoveShortcut(name,groupName,tabName)
#RemoveShortcutsTab(tabName)
| 2.421875
| 2
|
migration/new_fields.py
|
jmilosze/wfrp-hammergen
| 1
|
12781217
|
from pymongo import MongoClient
pswd = ""
db = "test"
conn_string = f"mongodb+srv://Jacek:{<EMAIL>/test?retryWrites=true&w=majority"
client = MongoClient(conn_string, 27017)
non_char_collections = ["career", "item", "mutation", "property", "skill", "spell", "talent"]
for element in non_char_collections:
print(f"processing {element}")
collection = client.__getattr__(db).__getattr__(element)
items = collection.find()
for item in items:
if not item.get("shared"):
collection.find_one_and_update({"_id": item["_id"]}, {"$set": {"shared": True}})
collection = client.__getattr__(db).character
items = collection.find()
for item in items:
query = {"$set": {}}
if not item.get("shared"):
query["$set"]["shared"] = False
if not item.get("spells"):
query["$set"]["spells"] = []
if not item.get("mutations"):
query["$set"]["mutations"] = []
if not item.get("sin"):
query["$set"]["sin"] = 0
if not item.get("corruption"):
query["$set"]["corruption"] = 0
if query["$set"]:
collection.find_one_and_update({"_id": item["_id"]}, query)
collection = client.__getattr__(db).user
items = collection.find()
for item in items:
query = {"$set": {}}
if not item.get("shared_accounts"):
query["$set"]["shared_accounts"] = []
if query["$set"]:
collection.find_one_and_update({"_id": item["_id"]}, query)
| 2.484375
| 2
|
django_personals/enums.py
|
sasriawesome/django_personals
| 2
|
12781218
|
import enum
from django.utils.translation import ugettext_lazy as _
class MaxLength(enum.Enum):
SHORT = 128
MEDIUM = 256
LONG = 512
XLONG = 1024
TEXT = 2048
RICHTEXT = 10000
class ActiveStatus(enum.Enum):
ACTIVE = 'ACT'
INACTIVE = 'INC'
CHOICES = (
(ACTIVE, _("active").title()),
(INACTIVE, _("inactive").title()),
)
class PrivacyStatus(enum.Enum):
ANYONE = 'anyone'
USERS = 'users'
FRIENDS = 'friends'
STUDENTS = 'students'
TEACHERS = 'teachers'
EMPLOYEES = 'employees'
MANAGERS = 'managers'
ME = 'me'
CHOICES = (
(ANYONE, _("anyone").title()),
(USERS, _('all users').title()),
(FRIENDS, _('all friends').title()),
(STUDENTS, _('all students').title()),
(TEACHERS, _('all teachers').title()),
(EMPLOYEES, _('all employees').title()),
(MANAGERS, _('all managers').title()),
(ME, _('only me').title())
)
class Gender(enum.Enum):
MALE = 'L'
FEMALE = 'P'
CHOICES = (
(MALE, _("male").title()),
(FEMALE, _("female").title()),
)
class AddressName(enum.Enum):
HOME = 'home'
OFFICE = 'office'
CHOICES = (
(HOME, _("home").title()),
(OFFICE, _("office").title()),
)
class EducationStatus(enum.Enum):
FINISHED = 'FNS'
ONGOING = 'ONG'
UNFINISHED = 'UNF'
CHOICES = (
(FINISHED, _("finished").title()),
(ONGOING, _("ongoing").title()),
(UNFINISHED, _("unfinished").title()),
)
class WorkingStatus(enum.Enum):
CONTRACT = 'CTR'
FIXED = 'FXD'
OUTSOURCE = 'OSR'
ELSE = 'ELS'
CHOICES = (
(CONTRACT, _("contract").title()),
(FIXED, _("fixed").title()),
(OUTSOURCE, _("outsource").title()),
(ELSE, _("else").title())
)
class FamilyRelation(enum.Enum):
FATHER = 1
MOTHER = 2
SIBLING = 3
CHILD = 4
HUSBAND = 5
WIFE = 6
OTHER = 99
CHOICES = (
(FATHER, _('father').title()),
(MOTHER, _('mother').title()),
(HUSBAND, _('husband').title()),
(WIFE, _('wife').title()),
(CHILD, _('children').title()),
(SIBLING, _('sibling').title()),
(OTHER, _('other').title()),
)
| 2.140625
| 2
|
venv/Lib/site-packages/_TFL/Apply_All.py
|
nasir733/airbnb-clone
| 6
|
12781219
|
# -*- coding: utf-8 -*-
# Copyright (C) 2005-2013 Mag. <NAME>. All rights reserved
# Glasauergasse 32, A--1130 Wien, Austria. <EMAIL>
# ****************************************************************************
#
# This module is licensed under the terms of the BSD 3-Clause License
# <http://www.c-tanzer.at/license/bsd_3c.html>.
# ****************************************************************************
#
#++
# Name
# Apply_All
#
# Purpose
# Class transparently applying method calls to a set of objects
#
# Revision Dates
# 20-Feb-2005 (CT) Creation
# ««revision-date»»···
#--
from _TFL import TFL
import _TFL._Meta.Object
class Apply_All (TFL.Meta.Object) :
"""Class transparently applying method calls to a set of objects.
>>> l1 = list (range (5))
>>> l2 = ["f", "b", "c", "a"]
>>> all = Apply_All (l1, l2)
>>> all._receivers
([0, 1, 2, 3, 4], ['f', 'b', 'c', 'a'])
>>> all.sort ()
>>> all._receivers
([0, 1, 2, 3, 4], ['a', 'b', 'c', 'f'])
>>> all.count ("a")
[0, 1]
>>> all.reverse ()
>>> all._receivers
([4, 3, 2, 1, 0], ['f', 'c', 'b', 'a'])
>>> all.pop ()
[0, 'a']
>>> all._receivers
([4, 3, 2, 1], ['f', 'c', 'b'])
"""
def __init__ (self, * receivers) :
self._receivers = receivers
# end def __init__
def _apply (self, name, * args, ** kw) :
result = []
for r in self._receivers :
f = getattr (r, name)
r = f (* args, ** kw)
if r is not None :
result.append (r)
return result or None
# end def _apply
def __getattr__ (self, name) :
return lambda * args, ** kw : self._apply (name, * args, ** kw)
# end def __getattr__
# end class Apply_All
if __name__ != "__main__" :
TFL._Export ("*")
### __END__ Apply_All
| 2.21875
| 2
|
tests/test_prepifg_system_vs_python.py
|
uniomni/PyRate
| 1
|
12781220
|
<reponame>uniomni/PyRate
import shutil
import pytest
from pathlib import Path
import numpy as np
from pyrate.core import config as cf
from pyrate import conv2tif, prepifg
from pyrate.configuration import Configuration
from tests.common import (
assert_two_dirs_equal,
manipulate_test_conf,
TRAVIS,
PYTHON3P6,
PYTHON3P7,
)
@pytest.fixture(params=[1, 2, 3, 4])
def local_crop(request):
return request.param
@pytest.fixture()
def modified_config_short(tempdir, local_crop, get_lks, coh_mask):
orbfit_lks = 1
orbfit_method = 1
orbfit_degrees = 1
ref_est_method = 1
def modify_params(conf_file, parallel, output_conf_file):
tdir = Path(tempdir())
params = manipulate_test_conf(conf_file, tdir)
params[cf.COH_MASK] = coh_mask
params[cf.PARALLEL] = parallel
params[cf.PROCESSES] = 4
params[cf.APSEST] = 1
params[cf.IFG_LKSX], params[cf.IFG_LKSY] = get_lks, get_lks
params[cf.REFNX], params[cf.REFNY] = 4, 4
params[cf.IFG_CROP_OPT] = local_crop
params[cf.ORBITAL_FIT_LOOKS_X], params[cf.ORBITAL_FIT_LOOKS_Y] = orbfit_lks, orbfit_lks
params[cf.ORBITAL_FIT] = 1
params[cf.ORBITAL_FIT_METHOD] = orbfit_method
params[cf.ORBITAL_FIT_DEGREE] = orbfit_degrees
params[cf.REF_EST_METHOD] = ref_est_method
params["rows"], params["cols"] = 3, 2
params["notiles"] = params["rows"] * params["cols"] # number of tiles
print(params)
# write new temp config
output_conf = tdir.joinpath(output_conf_file)
cf.write_config_file(params=params, output_conf_file=output_conf)
return output_conf, params
return modify_params
@pytest.fixture
def create_mpi_files(modified_config_short):
def _create(conf):
mpi_conf, params = modified_config_short(conf, 0, 'mpi_conf.conf')
params = Configuration(mpi_conf).__dict__
conv2tif.main(params)
params = Configuration(mpi_conf).__dict__
prepifg.main(params)
return params # don't need the reamining params
return _create
@pytest.fixture()
def modified_config_largetifs(tempdir, local_crop, get_lks, coh_mask):
orbfit_lks = 1
orbfit_method = 1
orbfit_degrees = 1
ref_est_method = 1
def modify_params(conf_file, parallel, output_conf_file):
tdir = Path(tempdir())
params = manipulate_test_conf(conf_file, tdir)
params[cf.COH_MASK] = coh_mask
params[cf.LARGE_TIFS] = 1
params[cf.PARALLEL] = parallel
params[cf.PROCESSES] = 4
params[cf.APSEST] = 1
params[cf.IFG_LKSX], params[cf.IFG_LKSY] = get_lks, get_lks
params[cf.REFNX], params[cf.REFNY] = 4, 4
params[cf.IFG_CROP_OPT] = local_crop
params[cf.ORBITAL_FIT_LOOKS_X], params[cf.ORBITAL_FIT_LOOKS_Y] = orbfit_lks, orbfit_lks
params[cf.ORBITAL_FIT] = 1
params[cf.ORBITAL_FIT_METHOD] = orbfit_method
params[cf.ORBITAL_FIT_DEGREE] = orbfit_degrees
params[cf.REF_EST_METHOD] = ref_est_method
params["rows"], params["cols"] = 3, 2
params["notiles"] = params["rows"] * params["cols"] # number of tiles
print(params)
# write new temp config
output_conf = tdir.joinpath(output_conf_file)
cf.write_config_file(params=params, output_conf_file=output_conf)
return output_conf, params
return modify_params
@pytest.mark.slow
@pytest.mark.skipif(PYTHON3P6 or PYTHON3P7, reason="Only run in python 3.8")
def test_prepifg_largetifs_vs_python(modified_config_largetifs, gamma_conf, create_mpi_files):
print("\n\n")
print("===x==="*10)
if TRAVIS and np.random.randint(0, 1000) > 499: # skip 50% of tests randomly
pytest.skip("Randomly skipping as part of 50 percent")
params = create_mpi_files(gamma_conf)
sr_conf, params_p = modified_config_largetifs(gamma_conf, 1, 'parallel_conf.conf')
params_p = Configuration(sr_conf).__dict__
conv2tif.main(params_p)
params_p = Configuration(sr_conf).__dict__
prepifg.main(params_p)
params_p = Configuration(sr_conf).__dict__
# convert2tif tests, 17 interferograms
assert_two_dirs_equal(params[cf.OUT_DIR], params_p[cf.OUT_DIR], "*_unw_ifg.tif", 17)
# if coherence masking, compare coh files were converted
if params[cf.COH_MASK]:
assert_two_dirs_equal(params[cf.OUT_DIR], params_p[cf.OUT_DIR], "*_coh.tif", 17)
print("coherence files compared")
# 17 ifgs + 1 dem + 17 mlooked file
assert_two_dirs_equal(params[cf.OUT_DIR], params_p[cf.OUT_DIR], f"*{params[cf.IFG_CROP_OPT]}cr.tif", 35)
else:
# prepifg
# 17 ifgs + 1 dem
assert_two_dirs_equal(params[cf.OUT_DIR], params_p[cf.OUT_DIR], f"*{params[cf.IFG_CROP_OPT]}cr.tif", 18)
print("==========================xxx===========================")
shutil.rmtree(params[cf.OBS_DIR])
shutil.rmtree(params_p[cf.OBS_DIR])
| 1.875
| 2
|
Main_Code.py
|
Sax-Ted/Eye-Timer
| 1
|
12781221
|
<reponame>Sax-Ted/Eye-Timer
#---------import modules---------
from tkinter import *
import time
import math
import pygame
#---------init the window---------
root = Tk()
root.geometry("460x665")
root.resizable(False, False)
root.title("Computer Use Time")
root.iconbitmap('icon.ico')
#---------set the function to pass---------
def passss():
pass
root.bind("<Alt-F4>", passss)
#---------setup the countdown and lock window---------
def Start():
Start_Win = Toplevel(root)
Start_Win.iconbitmap('icon.ico')
Start_Win.geometry("300x300")
Start_Win.iconbitmap('icon.ico')
Start_Win.overrideredirect(True)
def passss():
pass
root.protocol("WM_DELETE_WINDOW", passss)
Start_Win.bind("<Alt-F4>", passss)
#---------get the time to countdown---------
U_Hr_Val = Hour.get()
U_Min_Val = Minute.get()
U_Sec_Val = Second.get()
U_Total_Sec = U_Hr_Val * 3600 + U_Min_Val * 60 + U_Sec_Val
B_Hr_Val = B_Hour.get()
B_Min_Val = B_Minute.get()
B_Total_Sec = B_Hr_Val * 3600 + B_Min_Val * 60
B_Total_ms = B_Total_Sec * 1000
#---------countdown---------
def countDown():
Countdown.config(bg = "black")
Countdown.config(fg = 'white')
Countdown.config(height = 3, font = "微軟正黑體 20")
for k in range(U_Total_Sec, 0, -1):
kk = math.floor(k / 3600)
kkk = k % 3600
kkkk = math.floor(kkk / 60)
kkkkk = kkk % 60
Countdown["text"] = kk, ":", kkkk, ":", kkkkk
Start_Win.update()
time.sleep(1)
#---------lock---------
Start_Win.overrideredirect(False)
Start_Win.attributes("-topmost", True)
Start_Win.attributes("-fullscreen", True)
Countdown.config(bg = 'black')
Countdown.config(fg = 'white')
Countdown["text"] = "It's time to take a break!"
Start_Win.overrideredirect(True)
#---------play beep sound---------
pygame.mixer.init()
pygame.mixer.music.load("End_Sound.mp3")
pygame.mixer.music.play()
#---------start the countdown window---------
Start_Win.title("Countdown")
Countdown = Label(Start_Win)
Countdown.pack(fill = BOTH, expand = 1)
countDown()
Start_Win.after(B_Total_ms, Start_Win.destroy)
#---------set the root window text---------
Space_1 = Label(root, text = " ", font = "微軟正黑體 10")
Space_1.pack()
Use_Time_text = Label(root, text = "Use Time", font = "微軟正黑體 15")
Use_Time_text.pack()
#---------set the root window scale---------
Hour = Scale(orient = HORIZONTAL, width = 15, length = 150)
Hour.config(from_ = 0, to = 5)
Hour.config(showvalue = 1, tickinterval = 1, resolution = 1)
Hour.config(label = " Hour(s)", font = "微軟正黑體 10")
Hour.set(0)
Hour.pack()
Minute = Scale(orient = HORIZONTAL, width = 15, length = 300)
Minute.config(from_ = 0, to = 60)
Minute.config(showvalue = 1, tickinterval = 5, resolution = 1)
Minute.config(label = " Minute(s)", font = "微軟正黑體 10")
Minute.set(0)
Minute.pack()
Second = Scale(orient = HORIZONTAL, width = 15, length = 300)
Second.config(from_ = 0, to = 60)
Second.config(showvalue = 1, tickinterval = 5, resolution = 1)
Second.config(label = " Second(s)", font = "微軟正黑體 10")
Second.set(0)
Second.pack()
Space_2 = Label(root, text = " ", font = "微軟正黑體 10")
Space_2.pack()
Break_Time_text = Label(root, text = "Break Time", font = "微軟正黑體 15")
Break_Time_text.pack()
B_Hour = Scale(orient = HORIZONTAL, width = 15, length = 150)
B_Hour.config(from_ = 0, to = 5)
B_Hour.config(showvalue = 1, tickinterval = 1, resolution = 1)
B_Hour.config(label = " Hour(s)", font = "微軟正黑體 10")
B_Hour.set(0)
B_Hour.pack()
B_Minute = Scale(orient = HORIZONTAL, width = 15, length = 300)
B_Minute.config(from_ = 10, to = 60)
B_Minute.config(showvalue = 1, tickinterval = 5, resolution = 1)
B_Minute.config(label = " Minute(s)", font = "微軟正黑體 10")
B_Minute.set(0)
B_Minute.pack()
Space_3 = Label(root, text = " ", font = "微軟正黑體 10")
Space_3.pack()
Space_4 = Label(root, text = " ", font = "微軟正黑體 10")
Space_4.pack()
Start_but = Button(root, text = "Start", font = "微軟正黑體 10", command = Start)
Start_but.pack()
#---------make the window run---------
root.mainloop()
| 2.9375
| 3
|
application/content/routes.py
|
nicolaskyejo/project-fuksit
| 0
|
12781222
|
<reponame>nicolaskyejo/project-fuksit<filename>application/content/routes.py
import redis
from flask import Blueprint, render_template, request, make_response, session, jsonify, redirect, url_for
from flask_login import login_required
from application.landing_page.forms import Quiz
content_bp = Blueprint('content_bp', __name__)
conn = redis.Redis('localhost', 6379, charset='utf-8', decode_responses=True)
leader_board = 'leaderboard'
@content_bp.route('/points', methods=['POST'])
@login_required
def points():
"""Adds one point to user's cumulative points and returns a JSON response indicating success or failure"""
if request.method == 'POST':
r = request.get_json()
mission_number = r.get('mission') # Mission which was completed
if mission_number in session['missions']: # If mission is valid
if not session['missions'][mission_number]: # If mission has not been completed before
session['points'] = session.get('points') + 1
session['missions'][mission_number] = True
conn.zadd(leader_board, {session['username']: 1}, incr=True) # updating our leaderboard with new points
return make_response(jsonify({'message': 'points received and updated'}), 200)
return make_response(jsonify({'message': 'mission done'}), 200)
return make_response(jsonify({'message': 'invalid mission'}), 200)
return make_response(jsonify({'error': 'invalid request'}), 400)
@content_bp.route('/leaderboard', methods=['GET'])
@login_required
def leaderboard():
"""Returns a sorted leaderboard in JSON format"""
top_10 = conn.zrevrangebyscore(leader_board, 15, 1, start=0, num=10, withscores=True)
return jsonify(top_10)
@content_bp.route('/story', methods=['GET'])
@login_required
def story():
"""The story and avatar"""
return render_template('story.html', username=session['username'][:12], xp=session.get('points'),
done=session['missions'].values())
@content_bp.route('/about', methods=['GET'])
@login_required
def about():
return render_template('about.html')
@content_bp.route("/profile", methods=['GET'])
@login_required
def profile():
completed = session['missions'].values()
if False in completed:
return render_template('profile.html', username=session['username'][:12], xp=session.get('points'),
done=completed)
return render_template('profile.html', username=session['username'][:12], xp=session.get('points'),
done=completed, badge=True)
# Missions aka Content #
@content_bp.route('/mission_1', methods=['GET', 'POST'])
@login_required
def mission_1():
"""Mission 1 and its contents"""
return render_template('mission_1.html', username=session['username'][:12], xp=session.get('points'),
done=session['missions'].values())
@content_bp.route('/mission_2', methods=['GET', 'POST'])
@login_required
def mission_2():
"""Mission 2 and its contents"""
return render_template('mission_2.html', username=session['username'][:12], xp=session.get('points'),
done=session['missions'].values())
@content_bp.route('/mission_3', methods=['GET', 'POST'])
@login_required
def mission_3():
"""Mission 3 and its contents"""
return render_template('mission_3.html', username=session['username'][:12], xp=session.get('points'),
done=session['missions'].values())
@content_bp.route('/mission_4', methods=['GET', 'POST'])
@login_required
def mission_4():
"""Mission 4 and its contents"""
return render_template('mission_4.html', username=session['username'][:12], xp=session.get('points'),
done=session['missions'].values())
@content_bp.route('/mission_5', methods=['GET', 'POST'])
@login_required
def mission_5():
"""Mission 5 and its contents"""
return render_template('mission_5.html', username=session['username'][:12], xp=session.get('points'),
done=session['missions'].values())
@content_bp.route('/mission_6', methods=['GET', 'POST'])
@login_required
def mission_6():
"""Mission 6 and its contents"""
return render_template('mission_6.html', username=session['username'][:12], xp=session.get('points'),
done=session['missions'].values())
@content_bp.route('/mission_7', methods=['GET', 'POST'])
@login_required
def mission_7():
"""Mission 7 and its contents"""
return render_template('mission_7.html', username=session['username'][:12], xp=session.get('points'),
done=session['missions'].values())
@content_bp.route('/sidemission', methods=['GET', 'POST'])
@login_required
def quiz():
"""Quiz route aka side mission"""
form = Quiz()
if request.method == 'POST':
if form.validate_on_submit():
if not session['missions']['special']: # If mission has not been completed before
session['points'] = session.get('points') + 6
session['missions']['special'] = True
conn.zadd(leader_board, {session['username']: 6}, incr=True)
if session['points'] >= 10:
return render_template('success.html', username=session['username'], xp=session.get('points'))
return redirect(url_for('content_bp.profile'))
return render_template('quiz.html', form=form, username=session['username'][:12], xp=session.get('points'),
done=session['missions'].values())
@content_bp.route('/links', methods=['GET'])
@login_required
def links():
"""Just some extra links for additional reading"""
return render_template('extra_read_allaboutit.html')
| 2.796875
| 3
|
pipeline.py
|
evansgroup/JournalOfBiomedicalOptics
| 0
|
12781223
|
<filename>pipeline.py<gh_stars>0
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
import glob
from skimage.io import imread, imsave
from skimage.transform import hough_circle, hough_circle_peaks
from skimage.feature import canny
from skimage.draw import circle_perimeter
from skimage import color
def detect_circles_in_image(image):
edges = canny(image, sigma=3, low_threshold=10, high_threshold=50)
hough_radii = np.arange(10, 60, 1)
hough_res = hough_circle(edges, hough_radii)
accums, cx, cy, radii = hough_circle_peaks(hough_res, hough_radii, threshold=0.50,
min_xdistance=50, min_ydistance=50)
# Removes circles that are closer than 20px to any other circle
acr = [accums[0]]; cxr = [cx[0]]; cyr = [cy[0]]; radiir = [radii[0]]
for i in range(1, len(accums)): # For each point
closest_than_20_to_any = False
for j in range(0, len(radiir)): # For all already existing points
if np.sqrt((cxr[j]-cx[i])**2 + (cyr[j]-cy[i])**2 ) < 20:
closest_than_20_to_any = True
if closest_than_20_to_any == False:
acr.append(accums[i]); cxr.append(cx[i])
cyr.append(cy[i]) ; radiir.append(radii[i])
centers = np.transpose(np.array([cxr, cyr, radiir]))
return centers
def measure_average_grayscale_in_circles(image, circles):
x,y = np.meshgrid(range(0, image.shape[0]), range(0, image.shape[1]))
vals = []
if circles is not None:
for c in circles:
msk = (x-c[0])**2/(c[2]**2) + (y-c[1])**2/(c[2]**2) <= 1
mv = float(np.sum(image*msk))/np.sum(msk)
vals.append(mv)
return vals
def generate_detection_control_image(image, circles):
imagec = color.gray2rgb(image)
for c in circles:
center_y = c[1]
center_x = c[0]
radius = c[2]
for re in np.arange(-3,3,1):
circy, circx = circle_perimeter(int(center_y), int(center_x), int(radius+re))
circy[circy<=0] = 0
circx[circx<=0] = 0
circy[circy>=1023] = 1023
circx[circx>=1023] = 1023
imagec[circy, circx] = (220, 20, 20)
return imagec
def crop_image_at_circles(image, circles):
rl = []
if circles is not None:
for c in circles:
r = image[int(c[1])-64:int(c[1])+64,
int(c[0])-64:int(c[0])+64]
rl.append(r)
return rl
def create_montage(rl):
if rl is not None:
n_c = len(rl)
w = int(np.floor(np.sqrt(n_c)))
if w**2 == n_c:
h = w
elif w*(w+1) >= n_c:
h = w+1
else:
w = w+1
h = w
if len(rl[0].shape) == 2:
mtge = np.zeros((w*rl[0].shape[0], h*rl[0].shape[1]))
else:
mtge = np.zeros((w*rl[0].shape[0], h*rl[0].shape[1], rl[0].shape[2]))
for n_im, im in enumerate(rl):
i = int(np.floor(n_im/h))
j = n_im - i*h
mtge[i*im.shape[0]:(i+1)*im.shape[0], j*im.shape[1]:(j+1)*im.shape[1] ] = im
return mtge
return none
img_dir = 'data/acquisitions/'
circ_dir = 'data/circles/'
qc_dir = 'data/qc/'
# Part 1 of the pipeline - detect circles in the image and measure mean intensity
# Loops through all the fields of view
for img_name in glob.glob(img_dir + "/*/*/*/*.png"):
e, _ = os.path.split(img_name)
# Checks that the output directory structure exists and recreates it if not
o_img_dir = e.replace(img_dir, circ_dir)
o_qc_dir = e.replace(img_dir, qc_dir)
for dd in [o_img_dir, o_qc_dir]:
if not os.path.exists(dd):
os.makedirs(dd)
# Sets up the output paths
circles_name = img_name.replace(img_dir, circ_dir).replace(".png", ".txt")
qc_img_name = img_name.replace(img_dir,qc_dir)
# If there is no output file, process the image
if not os.path.exists(circles_name):
print(img_name)
img = imread(img_name, flags=0)
circles = detect_circles_in_image(img)
vals = measure_average_grayscale_in_circles(img, circles)
if circles is not None and vals is not None:
circles = np.c_[circles, vals]
det_control_image = generate_detection_control_image(img, circles)
imsave(qc_img_name, det_control_image)
np.savetxt(circles_name, circles)
# Part 2 of the pipeline - generate experiment control montages of the experiments
# Loops through the experiments
for e in glob.glob(circ_dir + "/*/*/*"):
if not os.path.isdir(e):
continue
# Gets the subject directory
subject_directory, e_dir = os.path.split(e)
if not os.path.exists(subject_directory.replace(circ_dir, qc_dir)):
os.makedirs(subject_directory.replace(circ_dir, qc_dir))
o_img = e.replace(circ_dir, qc_dir) + ".png"
if not os.path.exists(o_img):
all_crops = []
for circ_name in glob.glob(e + "/*.txt"):
circles = np.loadtxt(circ_name)
if circles.shape[0] :
img = plt.imread(circ_name
.replace(circ_dir, img_dir)
.replace(".txt",".png"))
crops = crop_image_at_circles(img, circles)
all_crops.extend(crops)
mtg = create_montage(all_crops)
# print(o_img)
imsave(o_img, mtg)
# Part 3 of the pipeline - aggregate the experiments
data = []
# Loop through all circles detected in the fields of view
for circ_name in glob.glob(circ_dir + "/*/*/*/*.txt"):
print(circ_name)
circles = np.loadtxt(circ_name)
if circles is not None:
exp_path, fov = os.path.split(circ_name)
sub_path, exp = os.path.split(exp_path)
date_path, subject = os.path.split(sub_path)
_, date = os.path.split(date_path)
for i,c in enumerate(circles):
l = [date, subject, exp, fov, i]
l.extend(list(c))
data.append(l)
dt = pd.DataFrame(data, columns=['Date','Subject','Experiment','FOV',
'Circle','X','Y','R','Mean Intensity'])
dt.to_csv('data/experiments.csv', index=False)
| 2.453125
| 2
|
NJ_tree_analysis_manual_sobject_run.py
|
kcotar/Stellar_abudance_trees
| 0
|
12781224
|
from NJ_tree_analysis_functions import start_gui_explorer
# nov v omegaCen?
objs = [
140305003201095, 140305003201103, 140305003201185, 140307002601128, 140307002601147, 140311006101253,
140314005201008, 140608002501266, 150211004701104, 150428002601118, 150703002101192
]
# nov v NGC6774
objs = [
140707002601170, 140707002601363, 140806003501357, 151009001601071, 160522005601187, 170506006401032,
170506006401321, 170506006401334, 170506006401352, 170506006401367, 170506006401373, 170506006401374,
170506006401392, 170802004301085, 170906002601139, 170907002601241, 140708005301211,150703005601230,161013001601131,161109002601048,170506005401371,170506006401241,170506006401303,170907003101232,170907003101274,170910003101093,170506006401009, 170506006401032, 170506006401039, 170506006401063, 170506006401095, 170506006401189, 170506006401265, 170506006401281, 170506006401321, 170506006401331, 170506006401334, 170506006401345, 170506006401352, 170506006401367, 170506006401373, 170506006401374, 170506006401392
]
objs = [
140308001401117,140308001401346,151229004001035,151229004001161,160327002601047,160327002601054,160327002601078,160327002601137,160327002601145,160327002601160,160327002601181,160327002601229,160327002601258,160327002601299,160327002601314,160327002601391,170407002101038
]
objs = [str(o) for o in objs]
start_gui_explorer(objs,
manual=True, initial_only=False, loose=True,
kinematics_source='ucac5')
# start_gui_explorer(objs,
# manual=False, initial_only=False, loose=True,
# kinematics_source='ucac5')
| 0.957031
| 1
|
examples/case1/case2_est.py
|
JavierArroyoBastida/forecast-gen
| 1
|
12781225
|
<gh_stars>1-10
"""Parameter estimation in all FMUs used in Case 1"""
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import modestpy
# Paths
ms_file = os.path.join('examples', 'case1', 'measurements.csv')
fmu_dir = os.path.join('examples', 'case1', 'models')
res_dir = os.path.join('examples', 'case1', 'results', 'est')
if not os.path.exists(res_dir):
os.makedirs(res_dir)
# FMU list
fmus = os.listdir(fmu_dir)
# Training and validation periods
trn_t0 = 0
trn_t1 = trn_t0 + 5 * 86400
vld_t0 = trn_t0
vld_t1 = trn_t0 + 9 * 86400
# Read measurements
ms = pd.read_csv(ms_file)
ms['datetime'] = pd.to_datetime(ms['datetime'])
ms = ms.set_index('datetime')
# Resample
ms = ms.resample('1h').mean().ffill().bfill()
# Assign model inputs
inp = ms[['solrad', 'Tout', 'occ', 'dpos', 'vpos']]
inp['time'] = (inp.index - inp.index[0]).total_seconds() # ModestPy needs index in seconds
inp = inp.set_index('time') # ModestPy needs index named 'time'
inp.to_csv(os.path.join(res_dir, 'inp.csv'))
ax = inp.loc[trn_t0:trn_t1].plot(subplots=True)
fig = ax[0].get_figure()
fig.savefig(os.path.join(res_dir, 'inp_training.png'), dpi=200)
ax = inp.loc[vld_t0:vld_t1].plot(subplots=True)
fig = ax[0].get_figure()
fig.savefig(os.path.join(res_dir, 'inp_validation.png'), dpi=200)
# Assign model desired outputs
ideal = ms[['T']]
ideal['time'] = (ideal.index - ideal.index[0]).total_seconds() # ModestPy needs index in seconds
ideal = ideal.set_index('time') # ModestPy needs index named 'time'
ideal.to_csv(os.path.join(res_dir, 'ideal.csv'))
ax = ideal.loc[trn_t0:trn_t1].plot(subplots=True)
fig = ax[0].get_figure()
fig.savefig(os.path.join(res_dir, 'ideal_training.png'), dpi=200)
ax = ideal.loc[vld_t0:vld_t1].plot(subplots=True)
fig = ax[0].get_figure()
fig.savefig(os.path.join(res_dir, 'ideal_validation.png'), dpi=200)
# Parameters
known = {
'Vi': 139. * 3.5,
'maxHeat': 2689.,
'maxVent': 4800.,
'Tve': 21.
}
est = dict()
est['shgc'] = (1.0, 0.0, 10.0)
est['tmass'] = (5., 1., 50.)
est['RExt'] = (1., 0.5, 4.)
est['occheff'] = (1., 0.5, 3.0)
# Initial condition parameters:
ic_param = dict() # Empty, because MShoot needs to manipulate states directly
# Estimation
ga_opts = {'maxiter': 50, 'tol': 1e-7, 'lhs': True, 'pop_size': 40}
scipy_opts = {
'solver': 'L-BFGS-B',
'options': {'maxiter': 50, 'tol': 1e-12}
}
# Iterate over all FMUs
for fmu in fmus:
wdir = os.path.join(res_dir, fmu.split('.')[0])
fmu_file = os.path.join(fmu_dir, fmu)
if not os.path.exists(wdir):
os.makedirs(wdir)
session = modestpy.Estimation(wdir, fmu_file, inp, known, est, ideal,
lp_n = 1,
lp_len = trn_t1 - trn_t0,
lp_frame = (trn_t0, trn_t1),
vp = (vld_t0, vld_t1),
methods = ('GA', 'SCIPY'),
ga_opts = ga_opts,
scipy_opts = scipy_opts,
ic_param=ic_param,
ftype = 'RMSE',
seed = 12345)
estimates = session.estimate()
# Validation
vld = session.validate()
vld_err = vld[0]
vld_res = vld[1]
with open(os.path.join(wdir, 'vld_err.txt'), 'w') as f:
for k in vld_err:
f.write("{}: {:.5f}\n".format(k, vld_err[k]))
vld_res.to_csv(os.path.join(wdir, 'vld_res.csv'))
# Save all parameters (except IC parameters)
parameters = pd.DataFrame(index=[0])
for p in estimates:
parameters[p] = estimates[p]
for p in known:
parameters[p] = known[p]
for p in ic_param:
parameters = parameters.drop(p, axis=1)
parameters.to_csv(os.path.join(wdir, 'parameters.csv'), index=False)
# Check how the estimates are far from the bounds (relative estimates -> esrel)
esrel = pd.DataFrame(index=[0])
for p in estimates:
lb = est[p][1] # Lower bound
ub = est[p][2] # Upper bound
esrel[p] = (estimates[p] - lb) / (ub - lb)
esrel.to_csv(os.path.join(wdir, 'parameters_rel.csv'), index=False)
| 2.15625
| 2
|
mykde/main.py
|
warvariuc/mykde
| 5
|
12781226
|
<filename>mykde/main.py
__author__ = "<NAME> <<EMAIL>>"
import os
import sys
import html
from pkgutil import iter_modules
from PyQt4 import QtCore, QtGui, uic
from PyKDE4 import kdecore
import mykde
def walk_modules(path):
"""Loads a module and all its submodules from a the given module path and returns them.
If *any* module throws an exception while importing, that exception is thrown back.
For example: walk_modules('scrapy.utils')
"""
modules = []
module = __import__(path, {}, {}, [''])
modules.append(module)
if hasattr(module, '__path__'): # is a package
for _, subpath, ispkg in iter_modules(module.__path__):
fullpath = path + '.' + subpath
if ispkg:
modules += walk_modules(fullpath)
else:
submod = __import__(fullpath, {}, {}, [''])
modules.append(submod)
return modules
def iter_classes(module, klass):
"""Return an iterator over all klass subclasses defined in the given module.
"""
for obj in vars(module).values():
if isinstance(obj, type) and issubclass(obj, klass) and obj.__module__ == module.__name__:
yield obj
BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
os.chdir(BASE_DIR)
FormClass, BaseClass = uic.loadUiType(os.path.join(BASE_DIR, 'mykde', 'main_window.ui'))
assert BaseClass is QtGui.QMainWindow
class MainWindow(QtGui.QMainWindow, FormClass):
def __init__(self):
super().__init__()
# uic adds a function to our class called setupUi
# calling this creates all the widgets from the .ui file
self.setupUi(self)
self.setWindowIcon(QtGui.QIcon('mykde/icon_kde.svg'))
# open URL in the default KDE browser
self.textBrowser.setOpenExternalLinks(True)
self.print_html('<h3 style="color:#268BD2">Welcome to the KDE transformer!</h3>')
self.print_text('You are using KDE %s\n' % kdecore.versionString())
@QtCore.pyqtSlot(str)
def on_textBrowser_highlighted(self, url):
# show link URL in the status bar when mouse cursor is over it
self.statusBar().showMessage(url)
def _print_html(self, text):
text_browser = self.textBrowser
cursor = text_browser.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
text_browser.setTextCursor(cursor)
text_browser.insertHtml(text)
text_browser.ensureCursorVisible() # scroll to the new message
QtGui.QApplication.processEvents()
def print_text(self, text, end='\n'):
self._print_html(html.escape(text + end).replace('\n', '<br>'))
def print_html(self, text, end='<br>'):
self._print_html(text + end)
@QtCore.pyqtSlot()
def on_aboutButton_clicked(self):
self.print_html("""
<hr><h3 style="color:#268BD2">
"My KDE" transformer. Author <NAME>.<br>
<a href="https://github.com/warvariuc/mykde">Project page here.</a>
</h3><hr>
""")
@QtCore.pyqtSlot()
def on_proceedButton_clicked(self):
actions = []
for index in range(self.actionList.count()):
action_item = self.actionList.item(index)
if action_item.checkState() == QtCore.Qt.Checked:
action_class = action_item.data(QtCore.Qt.UserRole)
actions.append(action_class)
mykde.run_action_set(self, actions)
@QtCore.pyqtSlot(int)
def on_packageCombo_activated(self, index):
self.actionList.clear()
package_path = self.packageCombo.itemData(index)
all_actions = []
for module in walk_modules(package_path):
for action in iter_classes(module, mykde.BaseAction):
item = QtGui.QListWidgetItem(action.name)
all_actions.append(action)
item.setFlags(QtCore.Qt.ItemIsSelectable | QtCore.Qt.ItemIsUserCheckable |
QtCore.Qt.ItemIsEnabled)
item.setCheckState(QtCore.Qt.Checked)
item.setData(QtCore.Qt.UserRole, action)
self.actionList.addItem(item)
# enable all actions by default
self.allActionsCheckBox.setChecked(True)
def on_allActionsCheckBox_stateChanged(self, state):
if state == QtCore.Qt.PartiallyChecked:
return
for index in range(self.actionList.count()):
item = self.actionList.item(index)
item.setCheckState(state)
self.actionList.setCurrentItem(None) # reset selection
def on_actionList_itemChanged(self, item):
"""Item checked/unchecked.
"""
checked_action_count = 0
for index in range(self.actionList.count()):
action_item = self.actionList.item(index)
if action_item.checkState() == QtCore.Qt.Checked:
checked_action_count += 1
if checked_action_count == 0:
self.allActionsCheckBox.setCheckState(QtCore.Qt.Unchecked)
elif checked_action_count == self.actionList.count():
self.allActionsCheckBox.setCheckState(QtCore.Qt.Checked)
else:
self.allActionsCheckBox.setCheckState(QtCore.Qt.PartiallyChecked)
def on_actionList_doubleClicked(self, modelIndex):
"""Item double-clicked.
"""
for index in range(self.actionList.count()):
action_item = self.actionList.item(index)
check_state = QtCore.Qt.Checked if index == modelIndex.row() else QtCore.Qt.Unchecked
action_item.setCheckState(check_state)
def on_actionList_currentRowChanged(self, index):
if index == -1: # no row is selected
return
item = self.actionList.item(index)
action = item.data(QtCore.Qt.UserRole)
self.print_html('About action "<b>%s</b>":<blockquote>%s</blockquote>'
% (action.name, action.description.strip()))
def main(package_module):
app = QtGui.QApplication(sys.argv)
main_window = MainWindow()
package_module_name = package_module.__name__
for _, module_name, _ in iter_modules([package_module_name]):
main_window.packageCombo.addItem(module_name, package_module_name + '.' + module_name)
if main_window.packageCombo.count():
main_window.packageCombo.activated.emit(0)
main_window.show()
main_window.proceedButton.setFocus(True)
app.exec()
| 2.453125
| 2
|
pages/extensions/shortcodes/shortcode.py
|
uskay/docs
| 0
|
12781227
|
<gh_stars>0
# -*- coding: utf-8 -*-
import os
import uuid
from grow.templates import tags
import jinja2
import markdown
class Shortcode(object):
# The (tag)name for the shortcode
name = 'default'
# For all BBCode options see https://bbcode.readthedocs.io/en/latest/formatters.html#custom-tag-options
newline_closes = False
same_tag_closes = False
standalone = False
render_embedded = True
transform_newlines = False
escape_html = False
replace_links = False
replace_cosmetic = True
strip = False
swallow_trailing_newline = True
# Pod-relative path to the template if there is one to render
template = None
# Dictionary of variables that get passed into the template
context = {}
# If set to True markdown inside the shortcode will be rendered ahead
prerender_markdown = False
# Can be overwritten with a method that can be used to alter the value
# before rendering
transform = None
# Set to True to enable template rendering even with empty value
render_empty = False
def __init__(self, pod, extension):
self._pod = pod
self._extension = extension
def register(self, parser):
"""Adds a formatter for the shortcode to the BBCode parser"""
parser.add_formatter(
self.name,
self._render,
newline_closes=self.newline_closes,
same_tag_closes=self.same_tag_closes,
standalone=self.standalone,
render_embedded=self.render_embedded,
transform_newlines=self.transform_newlines,
escape_html=self.escape_html,
replace_links=self.replace_links,
replace_cosmetic=self.replace_cosmetic,
strip=self.strip,
swallow_trailing_newline=self.swallow_trailing_newline, )
self._pod.logger.info('Registered shortcode "{}"'.format(self.name))
def _render(self, tag_name, value, options, parent, context):
if self.prerender_markdown:
# Prerender markdown to have HTML
value = markdown.markdown(value)
if callable(self.transform):
# Give shortcode author the chance to manipulate the output
value = self.transform(value=value, options=options)
# Check if we still have a value to render
if not value and not self.render_empty:
return ''
if self.template:
value = self._render_template(
doc=context['doc'], value=value, options=options)
# Store rendered shortcode in extension for replacement on output
id = uuid.uuid4()
self._extension.values[id] = value.strip()
return '<!-- {} -->'.format(id)
def _render_template(self, doc, value, options):
# Check if template exists
template_path = '{}/{}'.format(self._pod.root, self.template)
if os.path.exists(template_path):
# Get pod's jinja2 environment for rendering
jinja = self._pod.get_jinja_env()
# Build context for rendering of template
context = self.context
context['value'] = value
context['options'] = options
# Bring default grow tags/variables into template
context['doc'] = doc
context['g'] = tags.create_builtin_tags(self._pod, doc)
with open(template_path) as template:
template = jinja.from_string(template.read())
return template.render(context)
| 2.59375
| 3
|
tests/test_profiles.py
|
nacknime-official/freelancehunt-api
| 3
|
12781228
|
#!usr/bin/python3
"""#TODO: Write comments."""
from freelancehunt import Profiles
class Profiles:
def __init__(self, token=None, **kwargs):
pass
#property
def my_profile(self):
pass
def get_freelancers_list(self, country_id=None, city_id=None,
skill_id=None, login=None, pages=1):
pass
def get_employers_list(self, country_id=None, city_id=None, login=None,
pages=1):
pass
def get_freelancer_datails(self, profile_id):
pass
def get_employer_datails(self, profile_id):
pass
| 2.53125
| 3
|
apps/trails/migrations/0001_initial.py
|
schocco/mds-web
| 0
|
12781229
|
<filename>apps/trails/migrations/0001_initial.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.contrib.gis.db.models.fields
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Trail',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100, verbose_name='name')),
('type', models.CharField(max_length=100, verbose_name='trail type', choices=[(b'unknown', 'unknown'), (b'uphill', 'uphill'), (b'downhill', 'downhill'), (b'xc', 'cross country')])),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('edited', models.DateTimeField(auto_now=True, verbose_name='last change')),
('description', models.CharField(max_length=500, verbose_name='description', blank=True)),
('waypoints', django.contrib.gis.db.models.fields.MultiLineStringField(srid=4326, dim=3, verbose_name='waypoints')),
('trail_length', models.IntegerField(help_text='in meters', null=True, verbose_name='length', blank=True)),
('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.78125
| 2
|
main.py
|
Borgotto/magnifico-rettore
| 0
|
12781230
|
import os
import asyncio
import discord
from discord.ext import commands
# Try to get the bot token from file, quit if it fails
try:
with open('token') as file:
token = file.readline()
except IOError:
print("Missing token file containing the bot's token")
quit()
# Create config and cog folders if they don't exist
if not os.path.exists('./config/'):
os.makedirs('./config/')
if not os.path.exists('./cogs/'):
os.makedirs('./cogs/')
# Create bot object
bot = commands.Bot(command_prefix="mhh",
strip_after_prefix=True,
owner_id=289887222310764545,
intents=discord.Intents.all())
# Load all .py files from 'cogs' directory
for filename in os.listdir('./cogs'):
if (filename.endswith('.py')):
asyncio.run(bot.load_extension(f'cogs.{filename[:-3]}'))
@bot.event
async def on_ready():
# Set the bot presence status
await bot.change_presence(status=discord.Status.online)
# Print a bunch of info about the bot
print ("\n--------------------------------\n")
print ("Bot Name:", bot.user.name)
print ("Bot ID:", bot.user.id)
print ("discord.py version:", discord.__version__)
print ("\n--------------------------------\n")
bot.run(token)
| 2.765625
| 3
|
test/test_file_generators.py
|
fluxtransport/hazel2
| 0
|
12781231
|
import hazel
import glob
import os
def test_file_generators():
tmp = hazel.tools.File_observation(mode='single')
tmp.set_size(n_lambda=128, n_pixel=1)
tmp.save('test')
tmp = hazel.tools.File_observation(mode='multi')
tmp.set_size(n_lambda=128, n_pixel=10)
tmp.save('test2')
tmp = hazel.tools.File_photosphere(mode='single')
tmp.set_default(n_pixel=1)
tmp.save('photosphere')
tmp = hazel.tools.File_photosphere(mode='multi')
tmp.set_default(n_pixel=10)
tmp.save('photosphere2')
tmp = hazel.tools.File_chromosphere(mode='single')
tmp.set_default(n_pixel=1)
tmp.save('chromosphere')
tmp = hazel.tools.File_chromosphere(mode='multi')
tmp.set_default(n_pixel=10)
tmp.save('chromosphere2')
try:
for f in glob.glob('test*.*'):
os.remove(f)
except:
pass
try:
for f in glob.glob('photosphere*.*'):
os.remove(f)
except:
pass
try:
for f in glob.glob('chromosphere*.*'):
os.remove(f)
except:
pass
| 2.21875
| 2
|
pyscripts/city_to_graph.py
|
ElvinLord12/cara_v2
| 0
|
12781232
|
import osmnx as ox
import networkx as nx
ox.config(use_cache=True, log_console=False)
graph = ox.graph_from_address('953 Danby Rd, Ithaca, New York', network_type='walk')
fig, ax = ox.plot_graph(graph)
| 2.40625
| 2
|
test/scripts/py/enrollment_summary_pyspark.py
|
joerg-schneider/airflow-bootstrap
| 23
|
12781233
|
import pyspark
import pyspark.sql.functions as f
from airtunnel import PySparkDataAsset, PySparkDataAssetIO
def rebuild_for_store(asset: PySparkDataAsset, airflow_context):
spark_session = pyspark.sql.SparkSession.builder.getOrCreate()
student = PySparkDataAsset(name="student_pyspark")
programme = PySparkDataAsset(name="programme_pyspark")
enrollment = PySparkDataAsset(name="enrollment_pyspark")
student_df = student.retrieve_from_store(
airflow_context=airflow_context,
consuming_asset=asset,
spark_session=spark_session,
)
programme_df = programme.retrieve_from_store(
airflow_context=airflow_context,
consuming_asset=asset,
spark_session=spark_session,
)
enrollment_df = enrollment.retrieve_from_store(
airflow_context=airflow_context,
consuming_asset=asset,
spark_session=spark_session,
)
enrollment_summary: pyspark.sql.DataFrame = enrollment_df.join(
other=student_df, on=student.declarations.key_columns
).join(other=programme_df, on=programme.declarations.key_columns)
enrollment_summary = (
enrollment_summary.select(["student_major", "programme_name", "student_id"])
.groupby(["student_major", "programme_name"])
.agg(f.count("*").alias("count"))
)
PySparkDataAssetIO.write_data_asset(asset=asset, data=enrollment_summary)
spark_session.stop()
| 2.75
| 3
|
Coin_Detection.py
|
Jarvis-BITS/Coin-Detection
| 2
|
12781234
|
import cv2
import numpy as np
import math
# Func to cal eucledian dist b/w 2 pts:
def euc_dst(x1, y1, x2, y2):
pt_a = (x1 - x2)**2
pt_b = (y1 - y2)**2
return math.sqrt(pt_a + pt_b)
cap = cv2.VideoCapture(0)
while(True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
gray = cv2.medianBlur(gray, 5)
circles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, dp=1,
minDist=10, param1=100, param2=50, minRadius=0, maxRadius=500)
if circles is not None:
circles = np.uint16(np.around(circles))
x_cord = []
y_cord = []
rad = []
# Converting parameters of circle (center coordinates:x,y & radius)
for pt in circles[0, :]:
x, y, r = pt[0], pt[1], pt[2]
# Storing centers & radius of all circles
x_cord.append(x)
y_cord.append(y)
rad.append(r)
# Drawing outer circle
cv2.circle(frame, (x, y), r, (0, 255, 0), 2)
# Drawing circle center
cv2.circle(frame, (x, y), 1, (0, 0, 255), 3)
if len(rad) > 1:
for i in range(0, len(rad)):
x1 = x_cord[i]
y1 = y_cord[i]
for j in range(i+1, len(rad)):
x2 = x_cord[j]
y2 = y_cord[j]
cv2.line(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
mid_x = (x1+x2)/2
mid_y = (y1+y2)/2
dist = euc_dst(x1/25, y1/25, x2/25, y2/25)
cv2.putText(frame, "{:.1f}cm".format(dist), (int(mid_x), int(
mid_y - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.55, (0, 255, 0), 2)
cv2.imshow('video', frame)
if cv2.waitKey(1) == 27: # esc Key
break
cap.release()
cv2.destroyAllWindows()
| 2.765625
| 3
|
setup.py
|
kate-melnykova/authentication
| 1
|
12781235
|
<reponame>kate-melnykova/authentication
from setuptools import setup
from setuptools import find_packages
with open("README.md", "r+") as fh:
long_description = fh.read()
setup(
name='authentication',
version='1.1.0',
packages=find_packages(),
package_data={'authentication': ['templates/*']},
install_requires=['Flask >= 1.0', 'pycryptodome', 'wtforms', 'passlib', 'redis'],
url='',
license='MIT',
author='<NAME>',
author_email='<EMAIL>',
description='Learning authentication',
long_description=long_description,
long_description_content_type="text/markdown"
)
| 1.632813
| 2
|
tests/unit/test_simple_collectible.py
|
Sam44323/nft-mix-opensea
| 0
|
12781236
|
from scripts.utils.helpful_scripts import get_account, LOCAL_BLOCKCHAIN_ENVIRONMENTS
from scripts.simple_collectible.deploy_and_create import deploy_and_create
from brownie import network
import pytest
def network_checker():
if network.show_active() not in LOCAL_BLOCKCHAIN_ENVIRONMENTS:
pytest.skip()
def test_can_create__simple_collectible():
network_checker()
simple_collectible = deploy_and_create()
assert simple_collectible.ownerOf(0) == get_account()
| 1.882813
| 2
|
src/clims/migrations/0012_remove_extensibletype_category.py
|
withrocks/commonlims
| 4
|
12781237
|
<reponame>withrocks/commonlims<gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2020-01-09 13:21
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('clims', '0011_auto_20191106_1509'),
]
operations = [
migrations.RemoveField(
model_name='extensibletype',
name='category',
),
]
| 1.367188
| 1
|
payplug/test/test_real_http_query.py
|
SOGEXIS/payplug-python
| 0
|
12781238
|
<reponame>SOGEXIS/payplug-python<gh_stars>0
# -*- coding: utf-8 -*-
import sys
import pytest
from payplug import routes
from payplug.network import HttpClient, UrllibRequest, RequestsRequest
from payplug.test import TestBase
class TestRealHttpQuery(TestBase):
@pytest.mark.parametrize("api_version", [None, '2019-08-06'])
def test_http_query_requests(self, api_version):
http_client = HttpClient(token='a_secret_key', api_version=api_version, request_handler=RequestsRequest)
_, status = http_client._request('GET', routes.API_BASE_URL + '/test', authenticated=False)
assert status == 200
@pytest.mark.xfail(sys.version_info < (2, 7, 9), reason="Can't set ca_file easily with urllib.")
@pytest.mark.parametrize("api_version", [None, '2019-08-06'])
def test_http_query_urllib(self, api_version):
http_client = HttpClient(token='a_secret_key', api_version=api_version, request_handler=UrllibRequest)
_, status = http_client._request('GET', routes.API_BASE_URL + '/test', authenticated=False)
assert status == 200
| 2.375
| 2
|
tests/pytests/pyre/complex-facility.py
|
willic3/pythia
| 1
|
12781239
|
#!/usr/bin/env python
def simple():
from TestComponents import ComplexFacility
return ComplexFacility()
# End of file
| 1.335938
| 1
|
sds-az/Part 2 - Regression/Section 7 - Support Vector Regression (SVR)/SupportVectorRegression.py
|
coolmacmaniac/codeml
| 0
|
12781240
|
#!/usr/local/bin/python
# -*- coding: utf-8 -*-
"""
Created on : Mon Jun 4 23:17:56 2018
@author : Sourabh
"""
# %%
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
import matplotlib.pyplot as plt
# ============================================================================ #
np.set_printoptions(threshold=np.nan)
# constant properties that need changes according to the actual problem
Data_File = 'Position_Salaries.csv'
Dependent_Variable_Column = 2
Test_Data_Size = 0.2
# import the dataset & extract the feature and the dependent variable vectors
dataset = pd.read_csv(Data_File)
X = dataset.iloc[:, 1:Dependent_Variable_Column].values
y = dataset.iloc[:, Dependent_Variable_Column].values
# feature scaling: SVR does not support it automatically, we need to do it here
sc_X = StandardScaler()
sc_y = StandardScaler()
X_scaled = sc_X.fit_transform(X.reshape(-1, 1))
y_scaled = sc_y.fit_transform(y.reshape(-1, 1))
# ============================================================================ #
# creating and fitting the SVR model to the dataset
# as we know that our training data set is not linear, we should not use linear
# kernel here, it's better we use any of Polynomial or Gaussian kernel.
regressor = SVR(kernel='rbf')
regressor.fit(X_scaled, y_scaled)
# predicting a new result with SVR model
# the sample should also be a 1 x m matrix with m feature values
sampleValue = np.array([[6.5]])
y_pred = sc_y.inverse_transform(
regressor.predict(
sc_X.transform(sampleValue)
)
)
# ============================================================================ #
# visualising the SVR results
stepSize = 0.1
X_grid = np.arange(start=min(X), stop=max(X)+stepSize, step=stepSize)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color='red', marker='o', label='Samples')
plt.plot(X_grid,
sc_y.inverse_transform(regressor.predict(sc_X.transform(X_grid))),
color='blue',
label='SVR Model')
plt.title('Truth or Bluff (SVR)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.legend(loc='best')
plt.show()
| 3.046875
| 3
|
337. House Robber III.py
|
JazzikPeng/Algorithm-in-Python
| 3
|
12781241
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def __init__(self):
self.dic = {}
def rob(self, root: TreeNode) -> int:
if root is None:
return 0
if root.left is None and root.right is None:
return root.val
if root in self.dic:
return self.dic[root]
rob1 = self.rob(root.left) + self.rob(root.right)
rob2 = root.val
if root.left:
rob2 += self.rob(root.left.left) + self.rob(root.left.right)
if root.right:
rob2 += self.rob(root.right.right) + self.rob(root.right.left)
re = max(rob1, rob2)
self.dic[root] = re
return re
# 状态 f 是这个根能找到的最大值 f(root)
# 状态转移方程:f(root) = max(f(root.left) + f(root.right) , root.val + f(root.left.left) + f1 + f2 ...)
# base case: if root is None; return root.val
| 3.625
| 4
|
test_script.py
|
stu314159/pyGAS
| 0
|
12781242
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 10 17:36:05 2017
@author: stu
"""
import pyGAS as pg
helium = pg.He();
To = 273.15; # K
T1 = 800; #K
P = 101.3; # kPa
cp1 = helium.Cp(T1)
cv1 = helium.Cv(T1)
k1 = helium.k(T1)
print "Cp = %g \n"%cp1
print "Cv = %g \n"%cv1
print "k = %g \n"%k1
h0 = helium.enthalpy(To)
h1 = helium.enthalpy(T1)
s0 = helium.entropy(To,P)
s1 = helium.entropy(T1,P)
v1 = helium.v_TP(T1,P)
T_check = helium.T_vP(v1,P)
print "specific heat at %g K = %g kJ/kg-K \n"%(T1,cp1)
print "specific enthalpy at %g K = %g kJ/kg \n"%(T1,(h1))
print "specific entropy at %g K = %g kJ/kg-K \n"%(T1,(s1))
print "specific volume at %g K, %g kPa = %g m^3/kg \n"%(T1,P,v1)
print "Temperature at %g m^3/kg, %g kPa = %g K \n"%(v1,P,T_check)
CO2 = pg.CO2();
s1 = CO2.entropy(T1,P)
P2 = 10.*P
T2 = CO2.isentropicWork(T1,P,P2)
s2 = CO2.entropy(T2,P2)
h1 = CO2.enthalpy(T1)
print "specific enthalpy at %g K = %g kJ/kg \n"%(T1,(h1))
print "specific entropy at %g K = %g kJ/kg-K \n"%(T1,(s1))
print "Cp(CO2) at 250K = %g kJ/kg-K \n"%(CO2.Cp(250))
print "h(Air) at 650K = %g kJ/kg \n"%(pg.Air().Cp(650))
print "T2 = %g K \n"%T2
print "s(CO2) at %g K and %g kPa = %g kJ/kg-K \n"%(T2,P2,s2)
| 2.546875
| 3
|
drive/cars/admin.py
|
yousef-alramli/rest-framwork
| 0
|
12781243
|
from django.contrib import admin
from .models import Car
@admin.register(Car)
class CarAdmin(admin.ModelAdmin):
list_display = ['name', 'updated', 'user']
| 1.671875
| 2
|
euctr/crawl/base/test_config.py
|
jeekim/euctr-tracker-code
| 3
|
12781244
|
<gh_stars>1-10
from pathlib import Path
from .config import *
from .config import SCRAPY_SETTINGS
CACHE_SETTINGS = {
'HTTPCACHE_POLICY': 'scrapy.extensions.httpcache.DummyPolicy',
'HTTPCACHE_ENABLED': True,
'HTTPCACHE_DIR': Path(__file__).parent.parent / 'tests/fixtures'
}
SCRAPY_SETTINGS.update(CACHE_SETTINGS)
| 1.351563
| 1
|
tzcode/hook/issue.py
|
Lewinta/TZCode
| 0
|
12781245
|
<reponame>Lewinta/TZCode
import frappe
import requests
import json
from frappe import enqueue
@frappe.whitelist()
def create_issue(subject, status, customer, raised_by, remote_reference, priority=None, description="No Description"):
if not priority:
priority = "Medio"
if priority and not frappe.db.exists("Issue Priority", priority):
priority = "Medio"
exists = frappe.db.exists("Issue", {"remote_reference": remote_reference})
issue = frappe.get_doc("Issue", exists) if exists else frappe.new_doc("Issue")
issue.update({
"subject": subject,
"status": status,
"customer": customer,
"raised_by": raised_by,
"remote_reference": remote_reference,
"priority": priority,
"description": description,
})
issue.db_update() if exists else issue.save(ignore_permissions=True)
return issue.as_dict()
def on_update(doc, method):
enqueue(close_issue, doc=doc)
def close_issue(doc):
if not doc.customer:
return
remote_method = "/api/resource/Issue/{}".format(doc.remote_reference)
customer = frappe.get_doc("Customer", doc.customer)
if not customer.api_key or not customer.api_secret:
frappe.throw("Please fill out integration section for customer {}".format(customer.customer_name))
headers = { "Authorization": 'token {}:{}'.format(customer.api_key, customer.api_secret)}
data = json.dumps({
"status": doc.status,
"resolution_details": doc.resolution_details,
"due_date": doc.due_date,
"assigned_to": doc.assigned_to,
})
endpoint = "{}{}".format(customer.host_url, remote_method)
response = requests.put(
url=endpoint,
data=data,
headers=headers
)
response.raise_for_status()
# print(response.text)
| 2.03125
| 2
|
qiling/qiling/loader/elf.py
|
mrTavas/owasp-fstm-auto
| 2
|
12781246
|
<gh_stars>1-10
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
import os
from heapq import heappush, heappop
from elftools.elf.elffile import ELFFile
from elftools.elf.relocation import RelocationSection
from elftools.elf.sections import SymbolTableSection
from elftools.elf.descriptions import describe_reloc_type
from qiling.const import *
from qiling.exception import *
from .loader import QlLoader
from qiling.os.linux.function_hook import FunctionHook
from qiling.os.linux.syscall_nums import SYSCALL_NR
from qiling.os.linux.kernel_api.hook import *
from qiling.os.linux.kernel_api.kernel_api import hook_sys_open, hook_sys_read, hook_sys_write
AT_NULL = 0
AT_IGNORE = 1
AT_EXECFD = 2
AT_PHDR = 3
AT_PHENT = 4
AT_PHNUM = 5
AT_PAGESZ = 6
AT_BASE = 7
AT_FLAGS = 8
AT_ENTRY = 9
AT_NOTELF = 10
AT_UID = 11
AT_EUID = 12
AT_GID = 13
AT_EGID = 14
AT_PLATFORM = 15
AT_HWCAP = 16
AT_CLKTCK = 17
AT_SECURE = 23
AT_BASE_PLATFORM = 24
AT_RANDOM = 25
AT_HWCAP2 = 26
AT_EXECFN = 31
FILE_DES = []
# start area memory for API hooking
# we will reserve 0x1000 bytes for this (which contains multiple slots of 4/8 bytes, each for one api)
API_HOOK_MEM = 0x1000000
# SYSCALL_MEM = 0xffff880000000000
# memory for syscall table
SYSCALL_MEM = API_HOOK_MEM + 0x1000
class ELFParse():
def __init__(self, path, ql):
self.path = os.path.abspath(path)
self.ql = ql
self.f = open(path, "rb")
elfdata = self.f.read()
self.elffile = ELFFile(self.f)
self.elfdata = elfdata.ljust(52, b'\x00')
if self.elffile.e_ident_raw[: 4] != b'\x7fELF':
raise QlErrorELFFormat("ERROR: NOT a ELF")
self.elfhead = self.parse_header()
if self.elfhead['e_type'] == "ET_REL": # kernel driver
self.is_driver = True
else:
self.is_driver = False
def getelfdata(self, offest, size):
return self.elfdata[offest: offest + size]
def parse_header(self):
return dict(self.elffile.header)
def parse_sections(self):
return self.elffile.iter_sections()
def parse_segments(self):
return self.elffile.iter_segments()
def translate_segment_perm_to_uc_prot(self, perm):
"""
Unicorn define the following memory protection constants :
'Public Enum uc_prot
' UC_PROT_NONE = 0
' UC_PROT_READ = 1
' UC_PROT_WRITE = 2
' UC_PROT_EXEC = 4
' UC_PROT_ALL = 7
'End Enum
Elf segment permissions are the following
* bit 0 : X
* bit 1 : W
* bit 2 : R
"""
prot = 0
if perm & 0x1:
prot |= 4
if (perm >> 1) & 0x1:
prot |= 2
if (perm >> 2) & 0x1:
prot |= 1
return prot
class QlLoaderELF(QlLoader, ELFParse):
def __init__(self, ql):
super(QlLoaderELF, self).__init__(ql)
self.ql = ql
def run(self):
if self.ql.code:
self.ql.mem.map(self.ql.os.entry_point, self.ql.os.code_ram_size, info="[shellcode_stack]")
self.ql.os.entry_point = (self.ql.os.entry_point + 0x200000 - 0x1000)
self.ql.mem.write(self.ql.os.entry_point, self.ql.code)
self.ql.reg.arch_sp = self.ql.os.entry_point
return
if self.ql.archbit == 32:
stack_address = int(self.ql.os.profile.get("OS32", "stack_address"), 16)
stack_size = int(self.ql.os.profile.get("OS32", "stack_size"), 16)
elif self.ql.archbit == 64:
stack_address = int(self.ql.os.profile.get("OS64", "stack_address"), 16)
stack_size = int(self.ql.os.profile.get("OS64", "stack_size"), 16)
self.path = self.ql.path
ELFParse.__init__(self, self.path, self.ql)
self.interp_address = 0
self.mmap_address = 0
self.ql.mem.map(stack_address, stack_size, info="[stack]")
# if self.ql.ostype == QL_OS.FREEBSD:
# init_rbp = stack_address + 0x40
# init_rdi = stack_address
# self.ql.reg.rbp = init_rbp
# self.ql.reg.rdi = init_rdi
# self.ql.reg.r14 = init_rdi
if not self.is_driver:
self.load_with_ld(stack_address + stack_size, argv=self.argv, env=self.env)
else:
# Linux kernel driver
if self.load_driver(self.ql, stack_address + stack_size):
raise QlErrorFileType("Unsupported FileType")
# hook Linux kernel api
self.ql.hook_code(hook_kernel_api)
self.ql.reg.arch_sp = self.stack_address
self.ql.os.stack_address = self.stack_address
# No idea why.
if self.ql.ostype == QL_OS.FREEBSD:
self.ql.reg.rdi = self.stack_address
self.ql.reg.r14 = self.stack_address
# Copy strings to stack.
def copy_str(self, addr, strs):
l_addr = []
s_addr = addr
for s in strs:
bs = s.encode("utf-8") + b"\x00" if not isinstance(s, bytes) else s
s_addr = s_addr - len(bs)
self.ql.mem.write(s_addr, bs)
l_addr.append(s_addr)
return l_addr, s_addr
def alignment(self, val):
if self.ql.archbit == 64:
return (val // 8) * 8
elif self.ql.archbit == 32:
return (val // 4) * 4
def NEW_AUX_ENT(self, key, val):
return self.ql.pack(int(key)) + self.ql.pack(int(val))
def NullStr(self, s):
return s[: s.find(b'\x00')]
def pcalc(self, length, align):
tmp = length // align
if length % align:
tmp = tmp + 1
return tmp * align
def load_with_ld(self, stack_addr, load_address=-1, argv=[], env={}):
pagesize = 0x1000
_mem_e = 0
if load_address <= 0:
if self.ql.archbit == 64:
load_address = int(self.ql.os.profile.get("OS64", "load_address"), 16)
else:
load_address = int(self.ql.os.profile.get("OS32", "load_address"), 16)
elfhead = super().parse_header()
# Correct the load_address if needed
if elfhead['e_type'] == 'ET_EXEC':
load_address = 0
elif elfhead['e_type'] != 'ET_DYN':
self.ql.log.debug("Some error in head e_type: %i!", elfhead['e_type'])
return -1
# We need to sort the memory segments first, sometimes they are unordered
loadheap = []
for entry in super().parse_segments():
if entry['p_type'] == 'PT_LOAD' or entry['p_type'] == 'PT_INTERP':
paddr = entry['p_vaddr']
heappush(loadheap, (paddr, entry))
loaddb = [dict(heappop(loadheap)[1].header) for i in range(len(loadheap))]
# Determine the range of memory space opened up
mem_start = -1
mem_end = -1
interp_path = ''
for entry in loaddb:
if entry['p_type'] == 'PT_LOAD':
if mem_start > entry['p_vaddr'] or mem_start == -1:
mem_start = entry['p_vaddr']
if mem_end < entry['p_vaddr'] + entry['p_memsz'] or mem_end == -1:
mem_end = entry['p_vaddr'] + entry['p_memsz']
if entry['p_type'] == 'PT_INTERP':
interp_path = self.NullStr(super().getelfdata(entry['p_offset'], entry['p_filesz']))
mem_start = int(mem_start // 0x1000) * 0x1000
mem_end = int(mem_end // 0x1000 + 1) * 0x1000
# Now we calculate the segments based on page alignment
_load_segments = {}
_last_start = 0
_last_end = 0
_last_perm = 0
for entry in loaddb:
if entry['p_type'] == 'PT_LOAD':
_mem_start = ((load_address + entry["p_vaddr"]) // pagesize) * pagesize
_mem_len = entry['p_memsz']
_mem_end = self.pcalc(load_address + entry["p_vaddr"] + _mem_len, pagesize)
_perms = self.translate_segment_perm_to_uc_prot(entry["p_flags"])
if _last_end < _mem_start:
_load_segments[_mem_start] = _mem_end, _perms
_last_start = _mem_start
elif _perms == _last_perm:
_load_segments[_last_start] = _mem_end, _perms
elif _last_end == _mem_start:
_load_segments[_mem_start] = _mem_end, _perms
_last_start = _mem_start
elif _mem_start<_last_end:
_load_segments[_last_start]=_mem_end,_perms
_last_end = _mem_end
_last_perm = _perms
# Let's map the memory first
_highestmapped_e = 0
for segment in _load_segments:
_mem_s = segment
_mem_e = _load_segments[segment][0]
_perms = _load_segments[segment][1] & 0xFF
try:
self.ql.mem.map(_mem_s, _mem_e - _mem_s, perms=_perms, info=self.path)
if _mem_e > _highestmapped_e:
_highestmapped_e = _mem_e
self.ql.log.debug("load 0x%x - 0x%x" % (_mem_s, _mem_e))
except Exception as e:
self.ql.log.debug("load 0x%x - 0x%x => %s" % (_mem_s, _mem_e, str(e)))
continue
# Now we write the segment data to the memory
for entry in loaddb:
if entry['p_type'] == 'PT_LOAD' and entry['p_filesz'] > 0:
try:
_mem_s = load_address + entry["p_vaddr"]
data = super().getelfdata(entry['p_offset'], entry['p_filesz'])
self.ql.mem.write(_mem_s, data)
except Exception as e:
self.ql.log.debug("segment data 0x%x - Length 0x%x => %s" % (_mem_s, len(data), str(e)))
continue
loaded_mem_end = load_address + mem_end
if loaded_mem_end > _mem_e:
self.ql.mem.map(_mem_e, loaded_mem_end - _mem_e, info=self.path)
self.ql.log.debug("load 0x%x - 0x%x" % (
_mem_e, loaded_mem_end)) # make sure we map all PT_LOAD tagged area
entry_point = elfhead['e_entry'] + load_address
self.ql.os.elf_mem_start = mem_start
self.ql.log.debug("mem_start: 0x%x mem_end: 0x%x" % (mem_start, mem_end))
self.brk_address = mem_end + load_address + 0x2000
# Load interpreter if there is an interpreter
if interp_path != '':
interp_path = str(interp_path, 'utf-8', errors="ignore")
interp = ELFParse(self.ql.rootfs + interp_path, self.ql)
interphead = interp.parse_header()
self.ql.log.debug("interp is : %s" % (self.ql.rootfs + interp_path))
interp_mem_size = -1
for i in interp.parse_segments():
i = dict(i.header)
if i['p_type'] == 'PT_LOAD':
if interp_mem_size < i['p_vaddr'] + i['p_memsz'] or interp_mem_size == -1:
interp_mem_size = i['p_vaddr'] + i['p_memsz']
interp_mem_size = (interp_mem_size // 0x1000 + 1) * 0x1000
self.ql.log.debug("interp_mem_size is : 0x%x" % int(interp_mem_size))
if self.ql.archbit == 64:
self.interp_address = int(self.ql.os.profile.get("OS64", "interp_address"), 16)
elif self.ql.archbit == 32:
self.interp_address = int(self.ql.os.profile.get("OS32", "interp_address"), 16)
self.ql.log.debug("interp_address is : 0x%x" % (self.interp_address))
self.ql.mem.map(self.interp_address, int(interp_mem_size),
info=os.path.abspath(self.ql.rootfs + interp_path))
for i in interp.parse_segments():
# i =dict(i.header)
if i['p_type'] == 'PT_LOAD':
self.ql.mem.write(self.interp_address + i['p_vaddr'],
interp.getelfdata(i['p_offset'], i['p_filesz']))
entry_point = interphead['e_entry'] + self.interp_address
# Set MMAP addr
if self.ql.archbit == 64:
self.mmap_address = int(self.ql.os.profile.get("OS64", "mmap_address"), 16)
else:
self.mmap_address = int(self.ql.os.profile.get("OS32", "mmap_address"), 16)
self.ql.log.debug("mmap_address is : 0x%x" % (self.mmap_address))
# Set elf table
elf_table = b''
new_stack = stack_addr
# Set argc
elf_table += self.ql.pack(len(argv))
# Set argv
if len(argv) != 0:
argv_addr, new_stack = self.copy_str(stack_addr, argv)
elf_table += b''.join([self.ql.pack(_) for _ in argv_addr])
elf_table += self.ql.pack(0)
# Set env
if len(env) != 0:
env_addr, new_stack = self.copy_str(new_stack, [key + '=' + value for key, value in env.items()])
elf_table += b''.join([self.ql.pack(_) for _ in env_addr])
elf_table += self.ql.pack(0)
new_stack = self.alignment(new_stack)
randstr = 'a' * 0x10
cpustr = 'i686'
(addr, new_stack) = self.copy_str(new_stack, [randstr, cpustr])
new_stack = self.alignment(new_stack)
# Set AUX
self.elf_phdr = (load_address + elfhead['e_phoff'])
self.elf_phent = (elfhead['e_phentsize'])
self.elf_phnum = (elfhead['e_phnum'])
self.elf_pagesz = 0x1000
self.elf_guid = self.ql.os.uid
self.elf_flags = 0
self.elf_entry = (load_address + elfhead['e_entry'])
self.randstraddr = addr[0]
self.cpustraddr = addr[1]
if self.ql.archbit == 64:
self.elf_hwcap = 0x078bfbfd
elif self.ql.archbit == 32:
self.elf_hwcap = 0x1fb8d7
if self.ql.archendian == QL_ENDIAN.EB:
self.elf_hwcap = 0xd7b81f
elf_table += self.NEW_AUX_ENT(AT_PHDR, self.elf_phdr + mem_start)
elf_table += self.NEW_AUX_ENT(AT_PHENT, self.elf_phent)
elf_table += self.NEW_AUX_ENT(AT_PHNUM, self.elf_phnum)
elf_table += self.NEW_AUX_ENT(AT_PAGESZ, self.elf_pagesz)
elf_table += self.NEW_AUX_ENT(AT_BASE, self.interp_address)
elf_table += self.NEW_AUX_ENT(AT_FLAGS, self.elf_flags)
elf_table += self.NEW_AUX_ENT(AT_ENTRY, self.elf_entry)
elf_table += self.NEW_AUX_ENT(AT_UID, self.elf_guid)
elf_table += self.NEW_AUX_ENT(AT_EUID, self.elf_guid)
elf_table += self.NEW_AUX_ENT(AT_GID, self.elf_guid)
elf_table += self.NEW_AUX_ENT(AT_EGID, self.elf_guid)
elf_table += self.NEW_AUX_ENT(AT_HWCAP, self.elf_hwcap)
elf_table += self.NEW_AUX_ENT(AT_CLKTCK, 100)
elf_table += self.NEW_AUX_ENT(AT_RANDOM, self.randstraddr)
elf_table += self.NEW_AUX_ENT(AT_PLATFORM, self.cpustraddr)
elf_table += self.NEW_AUX_ENT(AT_SECURE, 0)
elf_table += self.NEW_AUX_ENT(AT_NULL, 0)
elf_table += b'\x00' * (0x10 - (new_stack - len(elf_table)) & 0xf)
self.ql.mem.write(new_stack - len(elf_table), elf_table)
new_stack = new_stack - len(elf_table)
# self.ql.reg.write(UC_X86_REG_RDI, new_stack + 8)
# for i in range(120):
# buf = self.ql.mem.read(new_stack + i * 0x8, 8)
# self.ql.log.info("0x%08x : 0x%08x " % (new_stack + i * 0x4, self.ql.unpack64(buf)) + ' '.join(['%02x' % i for i in buf]) + ' ' + ''.join([chr(i) if i in string.printable[ : -5].encode('ascii') else '.' for i in buf]))
self.ql.os.entry_point = self.entry_point = entry_point
self.ql.os.elf_entry = self.elf_entry = load_address + elfhead['e_entry']
self.stack_address = new_stack
self.load_address = load_address
self.images.append(self.coverage_image(load_address, load_address + mem_end, self.path))
self.ql.os.function_hook = FunctionHook(self.ql, self.elf_phdr + mem_start, self.elf_phnum, self.elf_phent,
load_address, load_address + mem_end)
self.init_sp = self.ql.reg.arch_sp
# If there is a loader, we ignore exit
self.skip_exit_check = self.elf_entry != self.entry_point
# map vsyscall section for some specific needs
if self.ql.archtype == QL_ARCH.X8664 and self.ql.ostype == QL_OS.LINUX:
_vsyscall_addr = int(self.ql.os.profile.get("OS64", "vsyscall_address"), 16)
_vsyscall_size = int(self.ql.os.profile.get("OS64", "vsyscall_size"), 16)
if not self.ql.mem.is_mapped(_vsyscall_addr, _vsyscall_size):
# initialize with \xcc then insert syscall entry
# each syscall should be 1KiB(0x400 bytes) away
self.ql.mem.map(_vsyscall_addr, _vsyscall_size, info="[vsyscall]")
self.ql.mem.write(_vsyscall_addr, _vsyscall_size * b'\xcc')
assembler = self.ql.create_assembler()
def _compile(asm):
bs, _ = assembler.asm(asm)
return bytes(bs)
_vsyscall_entry_asm = ["mov rax, 0x60;", # syscall gettimeofday
"mov rax, 0xc9;", # syscall time
"mov rax, 0x135;", # syscall getcpu
]
for idx, val in enumerate(_vsyscall_entry_asm):
self.ql.mem.write(_vsyscall_addr + idx * 0x400, _compile(val + "; syscall; ret"))
# get file offset of init module function
def lkm_get_init(self, ql):
elffile = ELFFile(open(ql.path, 'rb'))
symbol_tables = [s for s in elffile.iter_sections() if isinstance(s, SymbolTableSection)]
for section in symbol_tables:
for nsym, symbol in enumerate(section.iter_symbols()):
if symbol.name == 'init_module':
addr = symbol.entry.st_value + elffile.get_section(symbol['st_shndx'])['sh_offset']
ql.log.info("init_module = 0x%x" % addr)
return addr
# not found. FIXME: report error on invalid module??
ql.log.warning("invalid module? symbol init_module not found")
return -1
def lkm_dynlinker(self, ql, mem_start):
def get_symbol(elffile, name):
section = elffile.get_section_by_name('.symtab')
for symbol in section.iter_symbols():
if symbol.name == name:
return symbol
return None
elffile = ELFFile(open(ql.path, 'rb'))
all_symbols = []
self.ql.os.hook_addr = API_HOOK_MEM
# map address to symbol name
self.import_symbols = {}
# reverse dictionary to map symbol name -> address
rev_reloc_symbols = {}
# dump_mem("XX Original code at 15a1 = ", ql.mem.read(0x15a1, 8))
_sections = list(elffile.iter_sections())
for section in _sections:
# only care about reloc section
if not isinstance(section, RelocationSection):
continue
# ignore reloc for module section
if section.name == ".rela.gnu.linkonce.this_module":
continue
dest_sec_idx = section.header.get('sh_info', None)
if dest_sec_idx is not None and dest_sec_idx < len(_sections):
dest_sec = _sections[dest_sec_idx]
if dest_sec.header['sh_flags'] & 2 == 0:
# The target section is not loaded into memory, so just continue
continue
# The symbol table section pointed to in sh_link
symtable = elffile.get_section(section['sh_link'])
for rel in section.iter_relocations():
if rel['r_info_sym'] == 0:
continue
symbol = symtable.get_symbol(rel['r_info_sym'])
# Some symbols have zero 'st_name', so instead what's used is
# the name of the section they point at.
if symbol['st_name'] == 0:
symsec = elffile.get_section(symbol['st_shndx']) # save sh_addr of this section
symbol_name = symsec.name
sym_offset = symsec['sh_offset']
# we need to do reverse lookup from symbol to address
rev_reloc_symbols[symbol_name] = sym_offset + mem_start
else:
symbol_name = symbol.name
# get info about related section to be patched
info_section = elffile.get_section(section['sh_info'])
sym_offset = info_section['sh_offset']
if not symbol_name in all_symbols:
_symbol = get_symbol(elffile, symbol_name)
if _symbol['st_shndx'] == 'SHN_UNDEF':
# external symbol
# only save symbols of APIs
all_symbols.append(symbol_name)
# we need to lookup from address to symbol, so we can find the right callback
# for sys_xxx handler for syscall, the address must be aligned to 8
if symbol_name.startswith('sys_'):
if self.ql.os.hook_addr % self.ql.pointersize != 0:
self.ql.os.hook_addr = (int(
self.ql.os.hook_addr / self.ql.pointersize) + 1) * self.ql.pointersize
# print("hook_addr = %x" %self.ql.os.hook_addr)
self.import_symbols[self.ql.os.hook_addr] = symbol_name
# ql.log.info(":: Demigod is hooking %s(), at slot %x" %(symbol_name, self.ql.os.hook_addr))
if symbol_name == "page_offset_base":
# FIXME: this is for rootkit to scan for syscall table from page_offset_base
# write address of syscall table to this slot,
# so syscall scanner can quickly find it
ql.mem.write(self.ql.os.hook_addr, self.ql.pack(SYSCALL_MEM))
# we also need to do reverse lookup from symbol to address
rev_reloc_symbols[symbol_name] = self.ql.os.hook_addr
sym_offset = self.ql.os.hook_addr - mem_start
self.ql.os.hook_addr += self.ql.pointersize
else:
# local symbol
all_symbols.append(symbol_name)
_section = elffile.get_section(_symbol['st_shndx'])
rev_reloc_symbols[symbol_name] = _section['sh_offset'] + _symbol['st_value'] + mem_start
# ql.log.info(":: Add reverse lookup for %s to %x (%x, %x)" %(symbol_name, rev_reloc_symbols[symbol_name], _section['sh_offset'], _symbol['st_value']))
# ql.log.info(":: Add reverse lookup for %s to %x" %(symbol_name, rev_reloc_symbols[symbol_name]))
else:
sym_offset = rev_reloc_symbols[symbol_name] - mem_start
# ql.log.info("Relocating symbol %s -> 0x%x" %(symbol_name, rev_reloc_symbols[symbol_name]))
loc = elffile.get_section(section['sh_info'])['sh_offset'] + rel['r_offset']
loc += mem_start
if describe_reloc_type(rel['r_info_type'], elffile) == 'R_X86_64_32S':
# patch this reloc
if rel['r_addend']:
val = sym_offset + rel['r_addend']
val += mem_start
# ql.log.info('R_X86_64_32S %s: [0x%x] = 0x%x' %(symbol_name, loc, val & 0xFFFFFFFF))
ql.mem.write(loc, ql.pack32(val & 0xFFFFFFFF))
else:
# print("sym_offset = %x, rel = %x" %(sym_offset, rel['r_addend']))
# ql.log.info('R_X86_64_32S %s: [0x%x] = 0x%x' %(symbol_name, loc, rev_reloc_symbols[symbol_name] & 0xFFFFFFFF))
ql.mem.write(loc, ql.pack32(rev_reloc_symbols[symbol_name] & 0xFFFFFFFF))
elif describe_reloc_type(rel['r_info_type'], elffile) == 'R_X86_64_64':
# patch this function?
val = sym_offset + rel['r_addend']
val += 0x2000000 # init_module position: FIXME
# finally patch this reloc
# ql.log.info('R_X86_64_64 %s: [0x%x] = 0x%x' %(symbol_name, loc, val))
ql.mem.write(loc, ql.pack64(val))
elif describe_reloc_type(rel['r_info_type'], elffile) == 'R_X86_64_PC32':
# patch branch address: X86 case
val = rel['r_addend'] - loc
val += rev_reloc_symbols[symbol_name]
# finally patch this reloc
# ql.log.info('R_X86_64_PC32 %s: [0x%x] = 0x%x' %(symbol_name, loc, val & 0xFFFFFFFF))
ql.mem.write(loc, ql.pack32(val & 0xFFFFFFFF))
elif describe_reloc_type(rel['r_info_type'], elffile) == 'R_386_PC32':
val = ql.unpack(ql.mem.read(loc, 4))
val = rev_reloc_symbols[symbol_name] + val - loc
ql.mem.write(loc, ql.pack32(val & 0xFFFFFFFF))
elif describe_reloc_type(rel['r_info_type'], elffile) in ('R_386_32', 'R_MIPS_32'):
val = ql.unpack(ql.mem.read(loc, 4))
val = rev_reloc_symbols[symbol_name] + val
ql.mem.write(loc, ql.pack32(val & 0xFFFFFFFF))
elif describe_reloc_type(rel['r_info_type'], elffile) == 'R_MIPS_HI16':
# actual relocation is done in R_MIPS_LO16
prev_mips_hi16_loc = loc
elif describe_reloc_type(rel['r_info_type'], elffile) == 'R_MIPS_LO16':
val = ql.unpack16(ql.mem.read(prev_mips_hi16_loc + 2, 2)) << 16 | ql.unpack16(ql.mem.read(loc + 2, 2))
val = rev_reloc_symbols[symbol_name] + val
# *(word)(mips_lo16_loc + 2) is treated as signed
if (val & 0xFFFF) >= 0x8000:
val += (1 << 16)
ql.mem.write(prev_mips_hi16_loc + 2, ql.pack16(val >> 16))
ql.mem.write(loc + 2, ql.pack16(val & 0xFFFF))
else:
raise QlErrorNotImplemented("Relocation type %s not implemented" % describe_reloc_type(rel['r_info_type'], elffile))
return rev_reloc_symbols
def load_driver(self, ql, stack_addr, loadbase=0):
elfhead = super().parse_header()
elfdata_mapping = self.get_elfdata_mapping()
# Determine the range of memory space opened up
mem_start = -1
mem_end = -1
# for i in super().parse_program_header(ql):
# if i['p_type'] == PT_LOAD:
# if mem_start > i['p_vaddr'] or mem_start == -1:
# mem_start = i['p_vaddr']
# if mem_end < i['p_vaddr'] + i['p_memsz'] or mem_end == -1:
# mem_end = i['p_vaddr'] + i['p_memsz']
# mem_start = int(mem_start // 0x1000) * 0x1000
# mem_end = int(mem_end // 0x1000 + 1) * 0x1000
# FIXME
mem_start = 0x1000
mem_end = mem_start + int(len(elfdata_mapping) / 0x1000 + 1) * 0x1000
# map some memory to intercept external functions of Linux kernel
ql.mem.map(API_HOOK_MEM, 0x1000, info="[api_mem]")
ql.log.info("loadbase: %x, mem_start: %x, mem_end: %x" % (loadbase, mem_start, mem_end))
ql.mem.map(loadbase + mem_start, mem_end - mem_start, info=ql.path)
ql.mem.write(loadbase + mem_start, elfdata_mapping)
entry_point = self.lkm_get_init(ql) + loadbase + mem_start
ql.brk_address = mem_end + loadbase
# Set MMAP addr
if self.ql.archbit == 64:
self.mmap_address = int(self.ql.os.profile.get("OS64", "mmap_address"), 16)
else:
self.mmap_address = int(self.ql.os.profile.get("OS32", "mmap_address"), 16)
ql.log.debug("mmap_address is : 0x%x" % (self.mmap_address))
new_stack = stack_addr
new_stack = self.alignment(new_stack)
# self.ql.os.elf_entry = self.elf_entry = loadbase + elfhead['e_entry']
self.ql.os.entry_point = self.entry_point = entry_point
self.elf_entry = self.ql.os.elf_entry = self.ql.os.entry_point
self.stack_address = new_stack
self.load_address = loadbase
rev_reloc_symbols = self.lkm_dynlinker(ql, mem_start + loadbase)
# remember address of syscall table, so external tools can access to it
ql.os.syscall_addr = SYSCALL_MEM
# setup syscall table
ql.mem.map(SYSCALL_MEM, 0x1000, info="[syscall_mem]")
# zero out syscall table memory
ql.mem.write(SYSCALL_MEM, b'\x00' * 0x1000)
# print("sys_close = %x" %rev_reloc_symbols['sys_close'])
# print(rev_reloc_symbols.keys())
for sc in rev_reloc_symbols.keys():
if sc != 'sys_call_table' and sc.startswith('sys_'):
tmp_sc = sc[4:]
if hasattr(SYSCALL_NR, tmp_sc):
syscall_id = getattr(SYSCALL_NR, tmp_sc).value
ql.log.debug("Writing syscall %s to [0x%x]" % (sc, SYSCALL_MEM + ql.pointersize * syscall_id))
ql.mem.write(SYSCALL_MEM + ql.pointersize * syscall_id, ql.pack(rev_reloc_symbols[sc]))
# write syscall addresses into syscall table
# ql.mem.write(SYSCALL_MEM + 0, struct.pack("<Q", hook_sys_read))
ql.mem.write(SYSCALL_MEM + 0, ql.pack(self.ql.os.hook_addr))
# ql.mem.write(SYSCALL_MEM + 1 * 8, struct.pack("<Q", hook_sys_write))
ql.mem.write(SYSCALL_MEM + 1 * ql.pointersize, ql.pack(self.ql.os.hook_addr + 1 * ql.pointersize))
# ql.mem.write(SYSCALL_MEM + 2 * 8, struct.pack("<Q", hook_sys_open))
ql.mem.write(SYSCALL_MEM + 2 * ql.pointersize, ql.pack(self.ql.os.hook_addr + 2 * ql.pointersize))
# setup hooks for read/write/open syscalls
self.import_symbols[self.ql.os.hook_addr] = hook_sys_read
self.import_symbols[self.ql.os.hook_addr + 1 * ql.pointersize] = hook_sys_write
self.import_symbols[self.ql.os.hook_addr + 2 * ql.pointersize] = hook_sys_open
def get_elfdata_mapping(self):
elfdata_mapping = bytearray()
elfdata_mapping.extend(self.getelfdata(0, self.elfhead['e_ehsize'])) #elf header
for section in self.parse_sections():
if section.header['sh_flags'] & 2: # alloc flag
sh_offset = section.header['sh_offset']
sh_size = section.header['sh_size']
# align section addr
elfdata_len = len(elfdata_mapping)
if elfdata_len < sh_offset:
elfdata_mapping.extend(b'\x00' * (sh_offset - elfdata_len))
if section.header['sh_type'] == 'SHT_NOBITS':
elfdata_mapping.extend(b'\x00' * sh_size)
else:
elfdata_mapping.extend(self.getelfdata(sh_offset, sh_size))
return bytes(elfdata_mapping)
| 1.882813
| 2
|
tests/unit/test_utils.py
|
shkumagai/python-ndb
| 137
|
12781247
|
<reponame>shkumagai/python-ndb
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
try:
from unittest import mock
except ImportError: # pragma: NO PY3 COVER
import mock
import pytest
from google.cloud.ndb import utils
class Test_asbool:
@staticmethod
def test_None():
assert utils.asbool(None) is False
@staticmethod
def test_bool():
assert utils.asbool(True) is True
assert utils.asbool(False) is False
@staticmethod
def test_truthy_int():
assert utils.asbool(0) is False
assert utils.asbool(1) is True
@staticmethod
def test_truthy_string():
assert utils.asbool("Y") is True
assert utils.asbool("f") is False
def test_code_info():
with pytest.raises(NotImplementedError):
utils.code_info()
def test_decorator():
with pytest.raises(NotImplementedError):
utils.decorator()
def test_frame_info():
with pytest.raises(NotImplementedError):
utils.frame_info()
def test_func_info():
with pytest.raises(NotImplementedError):
utils.func_info()
def test_gen_info():
with pytest.raises(NotImplementedError):
utils.gen_info()
def test_get_stack():
with pytest.raises(NotImplementedError):
utils.get_stack()
class Test_logging_debug:
@staticmethod
@mock.patch("google.cloud.ndb.utils.DEBUG", False)
def test_noop():
log = mock.Mock(spec=("debug",))
utils.logging_debug(log, "hello dad! {} {where}", "I'm", where="in jail")
log.debug.assert_not_called()
@staticmethod
@mock.patch("google.cloud.ndb.utils.DEBUG", True)
def test_log_it():
log = mock.Mock(spec=("debug",))
utils.logging_debug(log, "hello dad! {} {where}", "I'm", where="in jail")
log.debug.assert_called_once_with("hello dad! I'm in jail")
def test_positional():
@utils.positional(2)
def test_func(a=1, b=2, **kwargs):
return a, b
@utils.positional(1)
def test_func2(a=3, **kwargs):
return a
with pytest.raises(TypeError):
test_func(1, 2, 3)
with pytest.raises(TypeError):
test_func2(1, 2)
assert test_func(4, 5, x=0) == (4, 5)
assert test_func(6) == (6, 2)
assert test_func2(6) == 6
def test_keyword_only():
@utils.keyword_only(foo=1, bar=2, baz=3)
def test_kwonly(**kwargs):
return kwargs["foo"], kwargs["bar"], kwargs["baz"]
with pytest.raises(TypeError):
test_kwonly(faz=4)
assert test_kwonly() == (1, 2, 3)
assert test_kwonly(foo=3, bar=5, baz=7) == (3, 5, 7)
assert test_kwonly(baz=7) == (1, 2, 7)
def test_threading_local():
assert utils.threading_local is threading.local
def test_tweak_logging():
with pytest.raises(NotImplementedError):
utils.tweak_logging()
def test_wrapping():
with pytest.raises(NotImplementedError):
utils.wrapping()
| 2.03125
| 2
|
quark/network_strategy.py
|
Cerberus98/quark
| 0
|
12781248
|
<gh_stars>0
# Copyright 2013 Openstack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from oslo_config import cfg
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
quark_opts = [
cfg.StrOpt('default_net_strategy', default='{}',
help=_("Default network assignment strategy"))
]
CONF.register_opts(quark_opts, "QUARK")
class JSONStrategy(object):
def __init__(self, strategy=None):
self.subnet_strategy = {}
self.strategy = {}
self.load(strategy)
def load(self, strategy=None):
if not strategy:
self._compile_strategy(CONF.QUARK.default_net_strategy)
else:
self._compile_strategy(strategy)
def _compile_strategy(self, strategy):
self.strategy = json.loads(strategy)
for net_id, meta in self.strategy.iteritems():
for ip_version, subnet_id in meta["subnets"].iteritems():
self.subnet_strategy[subnet_id] = {"ip_version": ip_version,
"network_id": net_id}
def _split(self, func, resource_ids):
provider = []
tenant = []
for res_id in resource_ids:
if func(res_id):
provider.append(res_id)
else:
tenant.append(res_id)
return tenant, provider
def split_network_ids(self, net_ids):
return self._split(self.is_provider_network, net_ids)
def split_subnet_ids(self, subnet_ids):
return self._split(self.is_provider_subnet, subnet_ids)
def get_provider_networks(self):
return sorted(self.strategy.keys())
def get_provider_subnets(self):
return sorted(self.subnet_strategy.keys())
def get_provider_subnet_id(self, net_id, ip_version):
if net_id not in self.strategy:
return None
return self.strategy[net_id]["subnets"][str(ip_version)]
def get_network(self, net_id):
return self.strategy.get(net_id)
def is_provider_network(self, net_id):
return self.strategy.get(net_id) is not None
def is_provider_subnet(self, subnet_id):
return subnet_id in self.subnet_strategy
def subnet_ids_for_network(self, net_id):
if net_id in self.strategy:
subnets = self.strategy.get(net_id)["subnets"]
return [subnet_id for ip_version, subnet_id in subnets.iteritems()]
def get_network_for_subnet(self, subnet_id):
if subnet_id not in self.subnet_strategy:
return None
return self.subnet_strategy.get(subnet_id)["network_id"]
STRATEGY = JSONStrategy()
| 1.75
| 2
|
tests/test_level_output001.py
|
brianpm/level_maker
| 0
|
12781249
|
<gh_stars>0
from pathlib import Path
import json
from level_maker import makelev
def test_example_data_exists():
assert Path("../level_maker/cam3_levels_input.json").is_file()
def test_level_output001():
with open("../level_maker/cam3_levels_input.json") as f:
data = json.load(f)
am, bm, ai, bi, lev, ilev = make_levels(data['dps'], data['purmax'], data['regions'], print_out=False)
assert am.shape == bm.shape
| 2.4375
| 2
|
5-bit-manipulation/4-next-number/solution.py
|
andrenbrandao/cracking-the-coding-interview
| 0
|
12781250
|
"""
Problem:
5.4 Next Number: Given a positive integer, print the next smallest and the next largest number that
have the same number of 1 bits in their binary representation.
Hints: #147, #175, #242, #312, #339, #358, #375, #390
--
Questions:
- Are we considering only non-negative numbers? Or should we also consider negative ones?
Let's consider only non-negative
- Are we dealing with 32 or 64 bits?
64 bits
--
Algorithm:
01111...1111: largest number
00000...0001: smallest
00000...0000: zero
Examples:
1: 00000...0001
To get the largest number, we could shift the 1 to the left until we get the largest one.
The smallest is itself, because we cannot shift to the right without removing any ones.
63 0
010000.00000
We have to shift the one until the second last bit. The last bit would turn the number
into negative.
What are the max and min numbers we can have?
max = 011111.11111
MAX_NUMBER = 2^62+2^61+2^60..+2^0 = 2^63 - 1
MIN_NUMBER = 1
So, if the input is MAX_NUMBER, the answer would be (MAX_NUMBER, MAX_NUMBER)
If the input is MIN_NUMBER, the answer would be (2^62, MIN_NUMBER)
Let's think about another example:
5: 0000...0101
Here, instead of shifting the bits to the right, we actually would need to count
the numbers of ones, and then position them in the beginning. So, our previous
thought was incorrect.
Next option:
- Count the number of 1 bits
- Position them in the least significant bits to the get the minimum value
- Position them in the most significant bits (without the last) to get the max value
- Return [smallest, largest]
Time Complexity: O(n) being n the number of bits
-- How can we position the bits on the most significant bits?
We can use the min_number shifted to the left.
00000..00000
00000..00111 -> min_number
------------
01110..00000 -> max_number
Total: 64 bits
000011
011000
We want to shift total_bits - count_1s - 1.
0000000011
0110000000
total_bits = 10
count_1s = 2
shift = 7
So, shift min_number to the left (64 - count_1s - 1) times.
---
Exercise Statistics
Elapsed Time: 32min
TODO: The book talks about optimal solutions to the problem. Understand it.
"""
def next_number(number):
count_1s = count_1_bits(number)
min_number = 0
count = count_1s
while count > 0:
min_number = min_number << 1
min_number += 1
count -= 1
shift_times = 64 - count_1s - 1
max_number = min_number
while shift_times > 0:
max_number = max_number << 1
shift_times -= 1
return [min_number, max_number]
def count_1_bits(number):
count = 0
while number != 0:
if number & 1:
count += 1
number = number >> 1
return count
def test(number, expected_answer):
answer = next_number(number)
if answer != expected_answer:
raise Exception(
f"Answer {answer} is wrong. Expected answer is {expected_answer}"
)
if __name__ == "__main__":
test(1, [1, 2 ** 62])
# 0000...111
# 0111...000
test(7, [7, 2 ** 62 + 2 ** 61 + 2 ** 60])
# 0000...101 - 5
# 0000...011 - min
# 0110...000 - max
test(5, [3, 2 ** 62 + 2 ** 61])
print("All tests passed!")
| 3.53125
| 4
|