max_stars_repo_path
stringlengths
3
269
max_stars_repo_name
stringlengths
4
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.05M
score
float64
0.23
5.13
int_score
int64
0
5
kratos/mpi/tests/test_data_communicator_factory.py
lkusch/Kratos
778
12200
from KratosMultiphysics import ParallelEnvironment, IsDistributedRun if IsDistributedRun(): from KratosMultiphysics.mpi import DataCommunicatorFactory import KratosMultiphysics.KratosUnittest as UnitTest import math class TestDataCommunicatorFactory(UnitTest.TestCase): def setUp(self): self.registered_comms = [] self.default_data_communicator = ParallelEnvironment.GetDefaultDataCommunicator() self.original_default = ParallelEnvironment.GetDefaultDataCommunicatorName() def tearDown(self): if len(self.registered_comms) > 0: ParallelEnvironment.SetDefaultDataCommunicator(self.original_default) for comm_name in self.registered_comms: ParallelEnvironment.UnregisterDataCommunicator(comm_name) def markForCleanUp(self,comm_name): self.registered_comms.append(comm_name) @UnitTest.skipUnless(IsDistributedRun(), "Test is distributed.") def testDataCommunicatorDuplication(self): duplicate_comm = DataCommunicatorFactory.DuplicateAndRegister(self.default_data_communicator, "Duplicate") self.markForCleanUp("Duplicate") # to clean up during tearDown self.assertEqual(duplicate_comm.Rank(), self.default_data_communicator.Rank()) self.assertEqual(duplicate_comm.Size(), self.default_data_communicator.Size()) @UnitTest.skipUnless(IsDistributedRun(), "Test is distributed.") def testDataCommunicatorSplit(self): rank = self.default_data_communicator.Rank() size = self.default_data_communicator.Size() split_comm = DataCommunicatorFactory.SplitAndRegister(self.default_data_communicator, rank % 2, 0, "EvenOdd") self.markForCleanUp("EvenOdd") # to clean up during tearDown expected_rank = rank // 2 if rank % 2 == 0: expected_size = math.ceil(size/2) else: expected_size = math.floor(size/2) self.assertEqual(split_comm.Rank(), expected_rank) self.assertEqual(split_comm.Size(), expected_size) @UnitTest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() > 1, "Test requires at least two ranks.") def testDataCommunicatorCreateFromRange(self): rank = self.default_data_communicator.Rank() size = self.default_data_communicator.Size() # Create a communicator using all ranks except the first ranks = [i for i in range(1,size)] range_comm = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, ranks, "AllExceptFirst") self.markForCleanUp("AllExceptFirst") # to clean up during tearDown if rank == 0: self.assertTrue(range_comm.IsNullOnThisRank()) self.assertFalse(range_comm.IsDefinedOnThisRank()) else: self.assertEqual(range_comm.Rank(), rank-1) self.assertEqual(range_comm.Size(), size-1) @UnitTest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() > 2, "Test requires at least three ranks.") def testDataCommunicatorCreateUnion(self): rank = self.default_data_communicator.Rank() size = self.default_data_communicator.Size() # Create a communicator using all ranks except the first all_except_first = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(1,size)], "AllExceptFirst") self.markForCleanUp("AllExceptFirst") # to clean up during tearDown all_except_last = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(0,size-1)], "AllExceptLast") self.markForCleanUp("AllExceptLast") # to clean up during tearDown # Create union communicator (should contain all ranks) union_comm = DataCommunicatorFactory.CreateUnionAndRegister(all_except_first, all_except_last, self.default_data_communicator, "Union") self.markForCleanUp("Union") # to clean up during tearDown self.assertFalse(union_comm.IsNullOnThisRank()) self.assertEqual(union_comm.Rank(), rank) self.assertEqual(union_comm.Size(), size) @UnitTest.skipUnless(IsDistributedRun() and ParallelEnvironment.GetDefaultSize() > 2, "Test requires at least three ranks.") def testDataCommunicatorCreateIntersection(self): rank = self.default_data_communicator.Rank() size = self.default_data_communicator.Size() # Create a communicator using all ranks except the first all_except_first = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(1,size)], "AllExceptFirst") self.markForCleanUp("AllExceptFirst") # to clean up during tearDown all_except_last = DataCommunicatorFactory.CreateFromRanksAndRegister(self.default_data_communicator, [i for i in range(0,size-1)], "AllExceptLast") self.markForCleanUp("AllExceptLast") # to clean up during tearDown intersection_comm = DataCommunicatorFactory.CreateIntersectionAndRegister( all_except_first, all_except_last, self.default_data_communicator, "Intersection") self.markForCleanUp("Intersection") # to clean up during tearDown if rank == 0 or rank == size - 1: # The first and last ranks do not participate in the intersection communicator self.assertTrue(intersection_comm.IsNullOnThisRank()) else: self.assertEqual(intersection_comm.Rank(), rank - 1 ) self.assertEqual(intersection_comm.Size(), size - 2 ) if __name__ == "__main__": UnitTest.main()
2.46875
2
example/usage/example_kate.py
vodka2/vkaudiotoken-python
32
12201
from __future__ import print_function try: import vkaudiotoken except ImportError: import path_hack from vkaudiotoken import supported_clients import sys import requests import json token = sys.argv[1] user_agent = supported_clients.KATE.user_agent sess = requests.session() sess.headers.update({'User-Agent': user_agent}) def prettyprint(result): print(json.dumps(json.loads(result.content.decode('utf-8')), indent=2)) prettyprint(sess.get( "https://api.vk.com/method/audio.getById", params=[('access_token', token), ('audios', '371745461_456289486,-41489995_202246189'), ('v', '5.95')] ))
2.078125
2
create_order.py
behnam71/Crypto_P
0
12202
# -*- coding: utf-8 -*- import os import sys from pprint import pprint root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) sys.path.append(root + '/python') import ccxt # noqa: E402 exchange = ccxt.binance({ 'apiKey': '<KEY>', 'secret': '<KEY>', 'enableRateLimit': True, }) exchange.urls['api'] = exchange.urls['test'] # use the testnet symbol = 'BTC/USDT'; type = 'market' # or limit amount = 0.01; price = None; side = 'buy' # or sell # extra params and overrides if needed params = { 'test': True, # test if it's valid, but don't actually place it } order = exchange.create_order(symbol, type, side, amount, price) pprint(order)
2.390625
2
tools/testrunner/outproc/message.py
LancerWang001/v8
20,995
12203
# Copyright 2018 the V8 project authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import itertools import os import re from . import base class OutProc(base.ExpectedOutProc): def __init__(self, expected_outcomes, basepath, expected_fail, expected_filename, regenerate_expected_files): super(OutProc, self).__init__(expected_outcomes, expected_filename, regenerate_expected_files) self._basepath = basepath self._expected_fail = expected_fail def _is_failure_output(self, output): fail = output.exit_code != 0 if fail != self._expected_fail: return True expected_lines = [] # Can't use utils.ReadLinesFrom() here because it strips whitespace. with open(self._basepath + '.out') as f: for line in f: if line.startswith("#") or not line.strip(): continue expected_lines.append(line) raw_lines = output.stdout.splitlines() actual_lines = [ s for s in raw_lines if not self._ignore_line(s) ] if len(expected_lines) != len(actual_lines): return True # Try .js first, and fall back to .mjs. # TODO(v8:9406): clean this up by never separating the path from # the extension in the first place. base_path = self._basepath + '.js' if not os.path.exists(base_path): base_path = self._basepath + '.mjs' env = { 'basename': os.path.basename(base_path), } for (expected, actual) in itertools.izip_longest( expected_lines, actual_lines, fillvalue=''): pattern = re.escape(expected.rstrip() % env) pattern = pattern.replace('\\*', '.*') pattern = pattern.replace('\\{NUMBER\\}', '\d+(?:\.\d*)?') pattern = '^%s$' % pattern if not re.match(pattern, actual): return True return False def _ignore_line(self, string): """Ignore empty lines, valgrind output, Android output.""" return ( not string or not string.strip() or string.startswith("==") or string.startswith("**") or string.startswith("ANDROID") or # Android linker warning. string.startswith('WARNING: linker:') )
2.125
2
gemtown/users/urls.py
doramong0926/gemtown
0
12204
from django.urls import path from . import views app_name = "users" urlpatterns = [ path("all/", view=views.UserList.as_view(), name="all_user"), path("<int:user_id>/password/", view=views.ChangePassword.as_view(), name="change password"), path("<int:user_id>/follow/", view=views.FollowUser.as_view(), name="follow user"), path("<int:user_id>/unfollow/", view=views.UnfollowUser.as_view(), name="unfollow user"), path("<int:user_id>/", view=views.UserFeed.as_view(), name="user_detail_infomation"), path("login/facebook/", view=views.FacebookLogin.as_view(), name="fb_login"), ]
1.960938
2
Packs/Thycotic/Integrations/Thycotic/Thycotic_test.py
diCagri/content
799
12205
import pytest from Thycotic import Client, \ secret_password_get_command, secret_username_get_command, \ secret_get_command, secret_password_update_command, secret_checkout_command, secret_checkin_command, \ secret_delete_command, folder_create_command, folder_delete_command, folder_update_command from test_data.context import GET_PASSWORD_BY_ID_CONTEXT, GET_USERNAME_BY_ID_CONTENT, \ SECRET_GET_CONTENT, SECRET_PASSWORD_UPDATE_CONTEXT, SECRET_CHECKOUT_CONTEXT, SECRET_CHECKIN_CONTEXT, \ SECRET_DELETE_CONTEXT, FOLDER_CREATE_CONTEXT, FOLDER_DELETE_CONTEXT, FOLDER_UPDATE_CONTEXT from test_data.http_responses import GET_PASSWORD_BY_ID_RAW_RESPONSE, GET_USERNAME_BY_ID_RAW_RESPONSE, \ SECRET_GET_RAW_RESPONSE, SECRET_PASSWORD_UPDATE_RAW_RESPONSE, SECRET_CHECKOUT_RAW_RESPONSE, \ SECRET_CHECKIN_RAW_RESPONSE, SECRET_DELETE_RAW_RESPONSE, FOLDER_CREATE_RAW_RESPONSE, FOLDER_DELETE_RAW_RESPONSE, \ FOLDER_UPDATE_RAW_RESPONSE GET_PASSWORD_BY_ID_ARGS = {"secret_id": "4"} GET_USERNAME_BY_ID_ARGS = {"secret_id": "4"} SECRET_GET_ARGS = {"secret_id": "4"} SECRET_PASSWORD_UPDATE_ARGS = {"secret_id": "4", "newpassword": "<PASSWORD>"} SECRET_CHECKOUT_ARGS = {"secret_id": "4"} SECRET_CHECKIN_ARGS = {"secret_id": "4"} SECRET_DELETE_ARGS = {"id": "9"} FOLDER_CREATE_ARGS = {"folderName": "xsoarFolderTest3", "folderTypeId": "1", "parentFolderId": "3"} FOLDER_DELETE_ARGS = {"folder_id": "9"} FOLDER_UPDATE_ARGS = {"id": "12", "folderName": "xsoarTF3New"} @pytest.mark.parametrize('command, args, http_response, context', [ (secret_password_get_command, GET_PASSWORD_BY_ID_ARGS, GET_PASSWORD_BY_ID_RAW_RESPONSE, GET_PASSWORD_BY_ID_CONTEXT), (secret_username_get_command, GET_USERNAME_BY_ID_ARGS, GET_USERNAME_BY_ID_RAW_RESPONSE, GET_USERNAME_BY_ID_CONTENT), (secret_get_command, SECRET_GET_ARGS, SECRET_GET_RAW_RESPONSE, SECRET_GET_CONTENT), (secret_password_update_command, SECRET_PASSWORD_UPDATE_ARGS, SECRET_PASSWORD_UPDATE_RAW_RESPONSE, SECRET_PASSWORD_UPDATE_CONTEXT), (secret_checkout_command, SECRET_CHECKOUT_ARGS, SECRET_CHECKOUT_RAW_RESPONSE, SECRET_CHECKOUT_CONTEXT), (secret_checkin_command, SECRET_CHECKIN_ARGS, SECRET_CHECKIN_RAW_RESPONSE, SECRET_CHECKIN_CONTEXT), (secret_delete_command, SECRET_DELETE_ARGS, SECRET_DELETE_RAW_RESPONSE, SECRET_DELETE_CONTEXT), (folder_create_command, FOLDER_CREATE_ARGS, FOLDER_CREATE_RAW_RESPONSE, FOLDER_CREATE_CONTEXT), (folder_delete_command, FOLDER_DELETE_ARGS, FOLDER_DELETE_RAW_RESPONSE, FOLDER_DELETE_CONTEXT), (folder_update_command, FOLDER_UPDATE_ARGS, FOLDER_UPDATE_RAW_RESPONSE, FOLDER_UPDATE_CONTEXT) ]) def test_thycotic_commands(command, args, http_response, context, mocker): mocker.patch.object(Client, '_generate_token') client = Client(server_url="https://thss.softwarium.net/SecretServer", username="xsoar1", password="<PASSWORD>", proxy=False, verify=False) mocker.patch.object(Client, '_http_request', return_value=http_response) outputs = command(client, **args) results = outputs.to_context() assert results.get("EntryContext") == context
2.078125
2
xml_to_csv.py
bhavdeepsingh33/blood-cell-detection
0
12206
import os import glob import pandas as pd import xml.etree.ElementTree as ET def xml_to_csv(path): xml_list = [] for xml_file in glob.glob(path + '/*.xml'): tree = ET.parse(xml_file) root = tree.getroot() for member in root.findall('object'): value = (root.find('filename').text, member[0].text, int(member[4][0].text), int(member[4][2].text), int(member[4][1].text), int(member[4][3].text) ) xml_list.append(value) column_name = ['image_names', 'cell_type', 'xmin', 'xmax', 'ymin', 'ymax'] xml_df = pd.DataFrame(xml_list, columns=column_name) return xml_df def main(): for folder in ['train','test']: image_path = os.path.join(os.getcwd(), ('images/' + folder)) xml_df = xml_to_csv(image_path) xml_df.to_csv(('images/' + folder + '_labels.csv'), index=None) print('Successfully converted xml to csv.') main()
3.234375
3
src/pynnet/test.py
RalphMao/kaldi-pynnet
0
12207
import _nnet import numpy as np import IPython net = _nnet.Nnet() net.read('/home/maohz12/online_50h_Tsinghua/exp_train_50h/lstm_karel_bak/nnet/nnet_iter14_learnrate7.8125e-07_tr1.2687_cv1.6941') # Test1 blobs = net.layers[0].get_params() x = blobs[1].data.flatten() x_test = np.fromfile('test/1.bin', 'f') assert np.sum(abs(x-x_test)) < 1e-5 x = blobs[4].data.flatten() x_test = np.fromfile('test/4.bin', 'f') assert np.sum(abs(x-x_test)) < 1e-5 blobs[1].data[:] = np.arange(blobs[1].data.size).reshape(blobs[1].data.shape) blobs[4].data[:] = np.arange(blobs[4].data.size).reshape(blobs[4].data.shape) net.layers[0].set_params(blobs) net.write('test/test_nnet', 0) pointer, read_only_flag = blobs[1].data.__array_interface__['data'] # Test 2 data_copy = blobs[1].data.copy() del net pointer, read_only_flag = blobs[1].data.__array_interface__['data'] assert np.sum(abs(blobs[1].data - data_copy)) < 1e-5 # Test 3 net = _nnet.Nnet() net.read('test/test_nnet') blobs_new = net.layers[0].get_params() x = blobs[1].data x_test = blobs_new[1].data assert np.sum(abs(x-x_test)) < 1e-5 x = blobs[4].data x_test = blobs_new[4].data assert np.sum(abs(x-x_test)) < 1e-5 print "Test passed"
2.140625
2
collegiate-explorer-admin/cc_admin/cc_admin/test.py
Chit-Chaat/Collegiate_Explorer_APP
3
12208
<gh_stars>1-10 __author__ = '<NAME>' __email__ = '<EMAIL>' __date__ = '10/28/2020 4:52 PM' # import re # # # def format_qs_score(score_str): # """ # help you generate a qs score # 1 - 100 : 5 # 141-200 : 4 # =100: 4 # N/A 3 # :param score_str: # :return: # """ # score = 3 # if not score_str or score_str != "N/A": # try: # parts = int(list(filter(lambda val: val, # list(re.split('-|=', score_str))))[0]) # except: # return 3 # score = 5 - int(parts / 100) # if score > 5 or score < 1: # return 3 # return score # # # print(format_qs_score("=100")) # # print(list(filter(lambda val: val, re.split('-|=', "=100")))) # import csv # import numpy as np # import requests # # with open('./college_explorer.csv', newline='', encoding='utf-8') as file: # data = list(csv.reader(file)) # data = np.array(data) # img_list = data[1:, 33].tolist() # # img_list = list(filter(lambda url: url != 'N/A', img_list)) # # # for url in img_list: # response = requests.get(url) # if response.status_code == 200: # school_name = url.split('/')[-1].split('_')[0] # with open("./images/" + school_name + ".jpg", 'wb') as f: # f.write(response.content)
2.984375
3
djangocms_translations/utils.py
divio/djangocms-translations
3
12209
<reponame>divio/djangocms-translations # -*- coding: utf-8 -*- import json from itertools import chain from django.conf import settings from django.contrib.sites.models import Site from django.core.exceptions import ObjectDoesNotExist from django.db.models import BooleanField from django.forms import modelform_factory from django.utils.lru_cache import lru_cache from django.utils.safestring import mark_safe from django.utils.translation import get_language_info from djangocms_transfer.utils import get_plugin_class, get_plugin_model from pygments import highlight from pygments.formatters import HtmlFormatter from pygments.lexers import JsonLexer from yurl import URL from .conf import TRANSLATIONS_CONF try: from urllib.parse import urljoin except ImportError: from urlparse import urljoin USE_HTTPS = getattr(settings, 'URLS_USE_HTTPS', False) def get_plugin_form_class(plugin_type, fields): plugin_class = get_plugin_class(plugin_type) plugin_fields = chain( plugin_class.model._meta.concrete_fields, plugin_class.model._meta.private_fields, plugin_class.model._meta.many_to_many, ) plugin_fields_disabled = [ field.name for field in plugin_fields if not getattr(field, 'editable', False) ] plugin_form_class = modelform_factory( plugin_class.model, fields=fields, exclude=plugin_fields_disabled, ) return plugin_form_class def get_plugin_form(plugin_type, data): _data = data.copy() plugin_form_class = get_plugin_form_class(plugin_type, fields=data.keys()) multi_value_fields = [ (name, field) for name, field in plugin_form_class.base_fields.items() if hasattr(field.widget, 'decompress') and name in data ] for name, field in multi_value_fields: # The value used on the form data is compressed, # and the form contains multi-value fields which expect # a decompressed value. compressed = data[name] try: decompressed = field.widget.decompress(compressed) except ObjectDoesNotExist: break for pos, value in enumerate(decompressed): _data['{}_{}'.format(name, pos)] = value return plugin_form_class(_data) def add_domain(url, domain=None): # add the domain to this url. if domain is None: domain = Site.objects.get_current().domain url = URL(url) if USE_HTTPS: url = url.replace(scheme='https') else: url = url.replace(scheme='http') return str(url.replace(host=domain)) def pretty_data(data, LexerClass): formatter = HtmlFormatter(style='colorful') data = highlight(data, LexerClass(), formatter) style = '<style>' + formatter.get_style_defs() + '</style><br>' return mark_safe(style + data) def pretty_json(data): data = json.dumps(json.loads(data), sort_keys=True, indent=2) return pretty_data(data, JsonLexer) @lru_cache(maxsize=None) def get_translatable_fields(plugin_type): conf = TRANSLATIONS_CONF.get(plugin_type, {}) if 'fields' in conf: fields = conf['fields'] else: model = get_plugin_model(plugin_type) opts = model._meta.concrete_model._meta fields = opts.local_fields fields = [ field.name for field in fields if ( not field.is_relation and not field.primary_key and not field.choices and not isinstance(field, BooleanField) ) ] excluded = conf.get('excluded_fields', []) return set(fields).difference(set(excluded)) @lru_cache(maxsize=None) def get_text_field_child_label(plugin_type): return settings.DJANGOCMS_TRANSLATIONS_CONF.get(plugin_type, {}).get('text_field_child_label') def get_language_name(lang_code): info = get_language_info(lang_code) if info['code'] == lang_code: return info['name'] try: return dict(settings.LANGUAGES)[lang_code] except KeyError: # fallback to known name return info['name'] def get_page_url(page, language, is_https=False): return urljoin( 'http{}://{}'.format( 's' if is_https else '', page.node.site.domain, ), page.get_absolute_url(language=language), )
1.90625
2
bin/render_ingress.py
phplaboratory/madcore-ai
0
12210
import sys, os, json, jinja2, redis from jinja2 import Template r_server = redis.StrictRedis('127.0.0.1', db=2) i_key = "owner-info" json_data = r_server.get(i_key) if json_data is not None: data = json.loads(json_data) main_domain = data['Hostname'] fqdn = sys.argv[1] + ".ext." + main_domain config_template = open('/opt/madcore/bin/templates/ingress.template').read() template = Template(config_template) config = (template.render(HOST=fqdn, SERVICE_NAME=sys.argv[2], SERVICE_PORT=sys.argv[3], NAMESPACE=sys.argv[4])) open("/opt/ingress/" + sys.argv[2] + ".yaml", "w").write(config)
2.203125
2
Linear_Insertion_Sort.py
toppassion/python-master-app
0
12211
def Linear_Search(Test_arr, val): index = 0 for i in range(len(Test_arr)): if val > Test_arr[i]: index = i+1 return index def Insertion_Sort(Test_arr): for i in range(1, len(Test_arr)): val = Test_arr[i] j = Linear_Search(Test_arr[:i], val) Test_arr.pop(i) Test_arr.insert(j, val) return Test_arr if __name__ == "__main__": Test_list = input("Enter the list of Numbers: ").split() Test_list = [int(i) for i in Test_list] print(f"Binary Insertion Sort: {Insertion_Sort(Test_list)}")
3.984375
4
scripts/tests/snapshots/snap_keywords_test.py
Duroktar/Wolf
105
12212
# -*- coding: utf-8 -*- # snapshottest: v1 - https://goo.gl/zC4yUc from __future__ import unicode_literals from snapshottest import Snapshot snapshots = Snapshot() snapshots['test_keywords 1'] = '[{"lineno": 7, "source": [" a\\n"], "value": "1"}, {"lineno": 7, "source": [" a\\n"], "value": "2"}, {"lineno": 7, "source": [" a\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "0"}, {"lineno": 13, "source": [" i\\n"], "value": "1"}, {"lineno": 13, "source": [" i\\n"], "value": "2"}, {"lineno": 13, "source": [" i\\n"], "value": "3"}, {"lineno": 13, "source": [" i\\n"], "value": "4"}]'
1.992188
2
pyside/lesson_10_main.py
LueyEscargot/pyGuiTest
0
12213
<filename>pyside/lesson_10_main.py import sys from PySide2.QtWidgets import QApplication, QMainWindow from PySide2.QtCore import QFile from lesson_10_mainWidget import Ui_MainWindow class MainWindow(QMainWindow): def __init__(self): super(MainWindow, self).__init__() self.ui = Ui_MainWindow() self.ui.setupUi(self) if __name__ == "__main__": app = QApplication(sys.argv) window = MainWindow() window.show() sys.exit(app.exec_())
2.453125
2
test/test_everything.py
jameschapman19/Eigengame
0
12214
<reponame>jameschapman19/Eigengame<filename>test/test_everything.py import jax.numpy as jnp import numpy as np from jax import random from algorithms import Game, GHA, Oja, Krasulina, Numpy def test_pca(): """ At the moment just checks they all run. Returns ------- """ n = 10 p = 2 n_components = 2 batch_size = 2 epochs = 10 key = random.PRNGKey(0) X = random.normal(key, (n, p)) X = X / jnp.linalg.norm(X, axis=0) numpy = Numpy(n_components=n_components).fit(X) game = Game( n_components=n_components, batch_size=batch_size, epochs=epochs ).fit(X) gha = GHA(n_components=n_components, batch_size=batch_size, epochs=epochs).fit( X ) oja = Oja(n_components=n_components, batch_size=batch_size, epochs=epochs).fit( X ) krasulina = Krasulina( n_components=n_components, batch_size=batch_size, epochs=epochs ).fit(X) assert ( np.testing.assert_almost_equal( [ game.score(X), gha.score(X), oja.score(X), krasulina.score(X), ], numpy.score(X), decimal=0, ) is None )
2.5
2
script/python/result_get.py
yztong/LeNet_RTL
29
12215
import numpy as np from radix import radixConvert c = radixConvert() a = np.load("../../data/5/layer4.npy") print(a.shape) a = a*128 a = np.around(a).astype(np.int16) print(a) a = np.load('../../data/6.npy') a = a*128 a = np.around(a).astype(np.int8) print(a.shape) for i in range(84): print(i) print(a[i]) ''' a = a*128 print(a) for i in range(a.shape[0]): for j in range(a.shape[1]): if a[i][j] > 127: a[i][j] = 127 a = np.around(a).astype(np.int8) print(a) print(a[4][17]) weight_file = open('f1_rom.coe', 'w') weight_file.write('MEMORY_INITIALIZATION_RADIX=2;\n') weight_file.write('MEMORY_INITIALIZATION_VECTOR=\n') for i in range(32): for j in range(32): if(i < 2 or i > 29): weight_file.write(c.dec2Bincmpmt('0', 8)+';\n') elif(j < 2 or j > 29): weight_file.write(c.dec2Bincmpmt('0', 8)+';\n') else: weight_file.write(c.dec2Bincmpmt(str(a[i-2][j-2]), 8)+',\n') '''
2.53125
3
anygraph/wrapper.py
gemerden/anygraph
10
12216
class Wrapper(object): wrapper_classes = {} @classmethod def wrap(cls, obj): return cls(obj) def __init__(self, wrapped): self.__dict__['wrapped'] = wrapped def __getattr__(self, name): return getattr(self.wrapped, name) def __setattr__(self, name, value): setattr(self.wrapped, name, value) def __delattr__(self, name): delattr(self.wrapped, name) def __str__(self): return str(self.wrapped) def __repr__(self): return repr(self.wrapped)
3.4375
3
saifooler/classifiers/image_net_classifier.py
sailab-code/SAIFooler
0
12217
from saifooler.classifiers.classifier import Classifier import torch import json import os class ImageNetClassifier(Classifier): def __init__(self, model, *args, **kwargs): super().__init__(model, *args, **kwargs) self.std = torch.tensor([0.229, 0.224, 0.225], device=self.device) self.mean = torch.tensor([0.485, 0.456, 0.406], device=self.device) class_index_path = os.path.join( os.path.dirname(__file__), "imagenet_class_index.json" ) self.class_dict = { int(key): val[1] for key, val in json.load(open(class_index_path)).items() } def to(self, device): super().to(device) self.mean = self.mean.to(device) self.std = self.std.to(device) def get_class_label(self, class_id: int): return self.class_dict[class_id] def normalize_image(self, image): """ :param image: tensor of shape (N, W, H, C) :return: image normalized for ImageNet and permuted in the shape (N, C, W, H) which is the shape used by torchvision models """ image = (image - self.mean) / self.std image = image.permute(0, 3, 1, 2) return image
2.765625
3
bertsification-multi-lstm.py
linhd-postdata/alberti
0
12218
#!/usr/bin/env python # coding: utf-8 # conda install pytorch>=1.6 cudatoolkit=10.2 -c pytorch # wandb login XXX import json import logging import os import re import sklearn import time from itertools import product import numpy as np import pandas as pd import wandb #from IPython import get_ipython from keras.preprocessing.text import Tokenizer from keras.preprocessing.sequence import pad_sequences from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation from keras.layers import Bidirectional, GlobalMaxPool1D from keras.models import Model from keras import initializers, regularizers, constraints, optimizers, layers from simpletransformers.classification import MultiLabelClassificationModel from sklearn.model_selection import train_test_split truthy_values = ("true", "1", "y", "yes") TAG = os.environ.get("TAG", "bertsification") LANGS = [lang.strip() for lang in os.environ.get("LANGS", "es,ge,en,multi").lower().split(",")] MODELNAMES = os.environ.get("MODELNAMES") EVAL = os.environ.get("EVAL", "True").lower() in truthy_values OVERWRITE = os.environ.get("OVERWRITE", "False").lower() in truthy_values logging.basicConfig(level=logging.INFO, filename=time.strftime("models/{}-%Y-%m-%dT%H%M%S.log".format(TAG))) with open('pid', 'w') as pid: pid.write(str(os.getpid())) logging.info("Experiment '{}' on {}, (eval = {}, pid = {})".format( TAG, LANGS, str(EVAL), str(os.getpid()), )) # SimpleTransformers (based on HuggingFace/Transformers) for Multilingual Scansion # We will be using `simpletransformers`, a wrapper of `huggingface/transformers` to fine-tune different BERT-based and other architecture models with support for Spanish. # Utils def clean_text(string): output = string.strip() # replacements = (("“", '"'), ("”", '"'), ("//", ""), ("«", '"'), ("»",'"')) replacements = ( ("“", ''), ("”", ''), ("//", ""), ("«", ''), ("»",''), (",", ''), (";", ''), (".", ''), # ("?", ''), ("¿", ''), ("¡", ''), ("!", ''), ("-", ' '), ) for replacement in replacements: output = output.replace(*replacement) # Any sequence of two or more spaces should be converted into one space output = re.sub(r'(?is)\s+', ' ', output) return output.strip() def metric2binary(meter, pad=11): return ([1 if syllable == "+" else 0 for syllable in meter] + [0] * (11 - len(meter)))[:pad] def label2metric(label): return "".join("+" if l else "-" for l in label) def flat_accuracy(preds, labels): pred_flat = np.argmax(preds, axis=1).flatten() labels_flat = labels.flatten() return np.sum(pred_flat == labels_flat) / len(labels_flat) # Spanish # if not os.path.isfile("adso100.json"): # get_ipython().system("averell export adso100 --filename adso100.json") # if not os.path.isfile("adso.json"): # get_ipython().system("averell export adso --filename adso.json") es_test = (pd .read_json(open("adso100.json")) .query("manually_checked == True")[["line_text", "metrical_pattern"]] .assign( line_text=lambda x: x["line_text"].apply(clean_text), length=lambda x: x["metrical_pattern"].str.len() ) .drop_duplicates("line_text") .rename(columns={"line_text": "text", "metrical_pattern": "meter"}) ) es_test = es_test[es_test["length"] == 11] es = (pd .read_json(open("adso.json")) .query("manually_checked == True")[["line_text", "metrical_pattern"]] .assign( line_text=lambda x: x["line_text"].apply(clean_text), length=lambda x: x["metrical_pattern"].str.len() ) .drop_duplicates("line_text") .rename(columns={"line_text": "text", "metrical_pattern": "meter"}) ) es = es[~es["text"].isin(es_test["text"])][es["length"] == 11] es["labels"] = es.meter.apply(metric2binary) es_train, es_eval = train_test_split( es[["text", "labels"]], test_size=0.25, random_state=42) logging.info("Spanish") logging.info("- Lines: {} train, {} eval, {} test".format(es_train.shape[0], es_eval.shape[0], es_test.shape[0])) # English en_test = (pd .read_csv("4b4v_prosodic_meter.csv") .assign( text=lambda x: x["text"].apply(clean_text), length=lambda x: x["meter"].str.len() ) .drop_duplicates("text") .rename(columns={"line_text": "text", "metrical_pattern": "meter", "prosodic_meter": "sota"}) ) en_test = en_test.query("length in (5,6,7,8,9,10,11)") # if not os.path.isfile("ecpa.json"): # get_ipython().system("averell export ecpa --filename ecpa.json") en = (pd .read_json(open("ecpa.json")) .query("manually_checked == True")[["line_text", "metrical_pattern"]] .assign( line_text=lambda x: x["line_text"].apply(clean_text), metrical_pattern=lambda x: x["metrical_pattern"].str.replace("|", "").str.replace("(", "").str.replace(")", "") ) .assign( length=lambda x: x["metrical_pattern"].str.len(), ) .drop_duplicates("line_text") .rename(columns={"line_text": "text", "metrical_pattern": "meter", "prosodic_meter": "sota"}) ) en = en[~en["text"].isin(en_test["text"])].query("length in (5,6,7,8,9,10,11)") en["labels"] = en.meter.apply(metric2binary) en_train, en_eval = train_test_split( en[["text", "labels"]], test_size=0.25, random_state=42) logging.info("English") logging.info("- Lines: {} train, {} eval, {} test".format(en_train.shape[0], en_eval.shape[0], en_test.shape[0])) # sota en_sota = sum(en_test.meter == en_test.sota) / en_test.meter.size # German ge = (pd .read_csv("po-emo-metricalizer.csv") .rename(columns={"verse": "text", "annotated_pattern": "meter", "metricalizer_pattern": "sota"}) .assign( text=lambda x: x["text"].apply(clean_text), length=lambda x: x["meter"].str.len() ) .drop_duplicates("text") .query("length in (5, 6, 7, 8, 9, 10, 11)") ) ge["labels"] = ge.meter.apply(metric2binary) ge_train_eval, ge_test = train_test_split(ge, test_size=0.15, random_state=42) ge_train, ge_eval = train_test_split( ge_train_eval[["text", "labels"]], test_size=0.176, random_state=42) logging.info("German") logging.info("- Lines: {} train, {} eval, {} test".format(ge_train.shape[0], ge_eval.shape[0], ge_test.shape[0])) # sota ge_sota = sum(ge_test.meter == ge_test.sota) / ge_test.meter.size # training # Multilingual inputs # - bert bert-base-multilingual-cased # - distilbert distilbert-base-multilingual-cased # - xlmroberta, xlm-roberta-base # - xlmroberta, xlm-roberta-large # Only English # - roberta roberta-base # - roberta roberta-large # - albert albert-xxlarge-v2 # You can set class weights by using the optional weight argument models = ( # ("xlnet", "xlnet-base-cased"), ("bert", "bert-base-multilingual-cased"), ("distilbert", "distilbert-base-multilingual-cased"), ("roberta", "roberta-base"), ("roberta", "roberta-large"), ("xlmroberta", "xlm-roberta-base"), ("xlmroberta", "xlm-roberta-large"), ("electra", "google/electra-base-discriminator"), ("albert", "albert-base-v2"), ("albert", "albert-large-v2"), ) if MODELNAMES: models = [list(map(str.strip, modelname.split(","))) for modelname in MODELNAMES.split(";")] langs = LANGS or ("es", "ge", "en", "multi") for lang, (model_type, model_name) in product(langs, models): model_output = 'models/{}-{}-{}-{}'.format(TAG, lang, model_type, model_name.replace("/", "-")) if OVERWRITE is False and os.path.exists(model_output): logging.info("Skipping training of {} for {}".format(model_name, lang)) continue logging.info("Starting training of {} for {}".format(model_name, lang)) run = wandb.init(project=model_output.split("/")[-1], reinit=True) model = MultiLabelClassificationModel( model_type, model_name, num_labels=11, args={ 'output_dir': model_output, 'best_model_dir': '{}/best'.format(model_output), 'reprocess_input_data': True, 'overwrite_output_dir': True, 'use_cached_eval_features': True, 'num_train_epochs': 100, # For BERT, 2, 3, 4 'save_steps': 10000, 'early_stopping_patience': 5, 'evaluate_during_training': EVAL, #'early_stopping_metric': "accuracy_score", 'evaluate_during_training_steps': 1000, 'early_stopping_delta': 0.00001, 'manual_seed': 42, # 'learning_rate': 2e-5, # For BERT, 5e-5, 3e-5, 2e-5 # For BERT 16, 32. It could be 128, but with gradient_acc_steps set to 2 is equivalent 'train_batch_size': 16 if "large" in model_name else 32, 'eval_batch_size': 16 if "large" in model_name else 32, # Doubles train_batch_size, but gradients and wrights are calculated once every 2 steps 'gradient_accumulation_steps': 2 if "large" in model_name else 1, 'max_seq_length': 32, 'use_early_stopping': True, 'wandb_project': model_output.split("/")[-1], #'wandb_kwargs': {'reinit': True}, # "adam_epsilon": 3e-5, # 1e-8 "silent": False, "fp16": False, "n_gpu": 2, }) # train the model if lang == "multi": train_df = pd.concat([es_train, en_train, ge_train], ignore_index=True) eval_df = pd.concat([es_eval, en_eval, ge_eval], ignore_index=True) elif lang == "es": train_df = es_train eval_df = es_eval elif lang == "en": train_df = en_train eval_df = en_eval elif lang == "ge": train_df = ge_train eval_df = ge_eval if EVAL: model.train_model(train_df, eval_df=eval_df) # evaluate the model result, model_outputs, wrong_predictions = model.eval_model(eval_df) logging.info(str(result)) #logging.info(str(model_outputs)) else: train_eval_df = pd.concat([train_df, eval_df, ge_train], ignore_index=True) model.train_model(train_eval_df) if lang in ("es", "multi"): es_test["predicted"], *_ = model.predict(es_test.text.values) es_test["predicted"] = es_test["predicted"].apply(label2metric) es_test["pred"] = es_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1) es_bert = sum(es_test.meter == es_test.pred) / es_test.meter.size logging.info("Accuracy [{}:es]: {} ({})".format(lang, es_bert, model_name)) wandb.log({"accuracy_es": es_bert}) if lang in ("en", "multi"): en_test["predicted"], *_ = model.predict(en_test.text.values) en_test["predicted"] = en_test["predicted"].apply(label2metric) en_test["pred"] = en_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1) en_bert = sum(en_test.meter == en_test.pred) / en_test.meter.size logging.info("Accuracy [{}:en]: {} ({})".format(lang, en_bert, model_name)) wandb.log({"accuracy_en": en_bert}) if lang in ("ge", "multi"): ge_test["predicted"], *_ = model.predict(ge_test.text.values) ge_test["predicted"] = ge_test["predicted"].apply(label2metric) ge_test["pred"] = ge_test.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1) ge_bert = sum(ge_test.meter == ge_test.pred) / ge_test.meter.size logging.info("Accuracy [{}:ge]: {} ({})".format(lang, ge_bert, model_name)) wandb.log({"accuracy_ge": ge_bert}) if lang in ("multi", ): test_df = pd.concat([es_test, en_test, ge_test], ignore_index=True) test_df["predicted"], *_ = model.predict(test_df.text.values) test_df["predicted"] = test_df["predicted"].apply(label2metric) test_df["pred"] = test_df.apply(lambda x: str(x.predicted)[:int(x.length)], axis=1) multi_bert = sum(test_df.meter == test_df.pred) / test_df.meter.size logging.info("Accuracy [{}:multi]: {} ({})".format(lang, multi_bert, model_name)) wandb.log({"accuracy_multi": multi_bert}) run.finish() logging.info("Done training '{}'".format(model_output)) # get_ipython().system("rm -rf `ls -dt models/{}-*/checkpoint*/ | awk 'NR>5'`".format(TAG)) logging.info("Done training")
2.1875
2
go_server_app/views.py
benjaminaaron/simple-go-server
1
12219
<gh_stars>1-10 from django.shortcuts import render from .models import GameMeta def index(request): return render(request, 'go_server_app/index.html') def dashboard(request): return render(request, 'go_server_app/dashboard.html', {'games_list': GameMeta.objects.all()}) def game(request, game_id): game_meta = GameMeta.objects.get(game_id=game_id) return render(request, 'go_server_app/game.html', {'game_meta': game_meta}) def terminal(request): return render(request, 'go_server_app/terminal.html')
1.796875
2
test/settings/test_kafka_consumer_config.py
DebasishMaji/PI
0
12220
<gh_stars>0 import unittest class TestKafkaConsumerConfig(unittest.TestCase): pass
1.164063
1
Pycraft/StartupAnimation.py
demirdogukan/InsiderPycraft
22
12221
if not __name__ == "__main__": print("Started <Pycraft_StartupAnimation>") class GenerateStartupScreen: def __init__(self): pass def Start(self): try: self.Display.fill(self.BackgroundCol) self.mod_Pygame__.display.flip() self.mod_Pygame__.display.set_caption(f"Pycraft: v{self.version}: Welcome") PresentsFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 35) PycraftFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 60) NameFont = self.mod_Pygame__.font.Font(self.mod_OS__.path.join(self.base_folder, ("Fonts\\Book Antiqua.ttf")), 45) NameText = NameFont.render("<NAME>", True, self.FontCol) NameTextWidth = NameText.get_width() NameTextHeight = NameText.get_height() PresentsText = PresentsFont.render("presents", True, self.FontCol) PycraftText = PycraftFont.render("Pycraft", True, self.FontCol) PycraftTextWidth = PycraftText.get_width() PycraftTextHeight = PycraftText.get_height() iteration = 0 clock = self.mod_Pygame__.time.Clock() if self.RunFullStartup == True: while iteration <= (60*3): self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size() self.Display.fill(self.BackgroundCol) self.Display.blit(NameText, ((self.realWidth-NameTextWidth)/2, (self.realHeight-NameTextHeight)/2)) iteration += 1 if self.realWidth < 1280: self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight) if self.realHeight < 720: self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720) self.mod_Pygame__.display.flip() clock.tick(60) for event in self.mod_Pygame__.event.get(): if event.type == self.mod_Pygame__.QUIT: self.Stop_Thread_Event.set() self.Thread_StartLongThread.join() self.Thread_AdaptiveMode.join() self.Thread_StartLongThread.join() self.mod_Pygame__.quit() self.mod_Sys__.exit("Thanks for playing") quit() iteration = 0 while iteration <= (60*2): self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size() self.Display.fill(self.BackgroundCol) self.Display.blit(NameText, ((self.realWidth-NameTextWidth)/2, (self.realHeight-NameTextHeight)/2)) self.Display.blit(PresentsText, ((((self.realWidth-NameTextWidth)/2)+120), ((self.realHeight-NameTextHeight)/2)+30)) iteration += 1 if self.realWidth < 1280: self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight) if self.realHeight < 720: self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720) self.mod_Pygame__.display.flip() clock.tick(60) for event in self.mod_Pygame__.event.get(): if event.type == self.mod_Pygame__.QUIT: self.Stop_Thread_Event.set() self.Thread_StartLongThread.join() self.Thread_AdaptiveMode.join() self.Thread_StartLongThread.join() self.mod_Pygame__.quit() self.mod_Sys__.exit("Thanks for playing") quit() iteration = 0 while iteration <= (60*3): self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size() self.Display.fill(self.BackgroundCol) self.Display.blit(PycraftText, ((self.realWidth-PycraftTextWidth)/2, (self.realHeight-PycraftTextHeight)/2)) iteration += 1 if self.realWidth < 1280: self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight) if self.realHeight < 720: self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720) self.mod_Pygame__.display.flip() clock.tick(60) for event in self.mod_Pygame__.event.get(): if event.type == self.mod_Pygame__.QUIT: self.Stop_Thread_Event.set() self.Thread_StartLongThread.join() self.Thread_AdaptiveMode.join() self.Thread_StartLongThread.join() self.mod_Pygame__.quit() self.mod_Sys__.exit("Thanks for playing") quit() y = 0 while True: self.realWidth, self.realHeight = self.mod_Pygame__.display.get_window_size() self.Display.fill(self.BackgroundCol) self.Display.blit(PycraftText, ((self.realWidth-PycraftTextWidth)/2, ((self.realHeight-PycraftTextHeight)/2)-y)) y += 2 if self.realWidth < 1280: self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, 1280, self.SavedHeight) if self.realHeight < 720: self.mod_DisplayUtils__.DisplayUtils.GenerateMinDisplay(self, self.SavedWidth, 720) self.mod_Pygame__.display.flip() clock.tick(60) for event in self.mod_Pygame__.event.get(): if event.type == self.mod_Pygame__.QUIT: self.Stop_Thread_Event.set() self.Thread_StartLongThread.join() self.Thread_AdaptiveMode.join() self.Thread_StartLongThread.join() self.mod_Pygame__.quit() self.mod_Sys__.exit("Thanks for playing") quit() if ((self.realHeight-PycraftTextHeight)/2)-y <= 0: self.RunFullStartup = False return None except Exception as Message: self.RunFullStartup = False return Message else: print("You need to run this as part of Pycraft") import tkinter as tk from tkinter import messagebox root = tk.Tk() root.withdraw() messagebox.showerror("Startup Fail", "You need to run this as part of Pycraft, please run the 'main.py' file") quit()
2.484375
2
kattishunter/kattis/submission.py
ParksProjets/kattis-hunter
0
12222
<reponame>ParksProjets/kattis-hunter """ Submit files for a Kattis problem. Copyright (C) 2019, <NAME> This project is under the MIT license. """ import os.path as path import re from typing import Dict, List, Text import requests import logging from .login import login logger = logging.getLogger(__name__) # Base headers to use. HEADERS = { "Accept": "text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8", "Accept-Language": "en-US,en;q=0.5", } def retreive_csrf_token(config: Dict, pid: Text, retry = True): "Retreive CSRF token from the submit page." # Setup headers to send. headers = HEADERS.copy() headers["User-Agent"] = config["cache"]["user-agent"] # Make the GET request. url = config["url"]["submit"].format(pid=pid) cookies = config["cache"].get("cookies", {}) res = requests.get(url, headers=headers, cookies=cookies, allow_redirects=False) config["cache"]["cookies"] = {**cookies, **res.cookies.get_dict()} # Not logged, try to login first. if res.status_code != 200: if not retry: logger.critical("Can't retrieve submit page from Kattis.") login(config) return retreive_csrf_token(config, pid, False) # Find the CSRF token in response body. pattern = r"name=\"csrf_token\".*?value=\"([0-9a-z]+)\"" match = re.search(pattern, res.text) if match is None: logger.critical("Can't find CSRF token in submit page.") return match.group(1) def read_file(filename: Text): "Read a single file to send." with open(filename, "rb") as file: return file.read() def read_files(files: List[Text]): "Read files to send." return [( "sub_file[]", (path.basename(file), read_file(file), "application/octet-stream") ) for file in files] def submit_kattis(config: Dict, pid: Text, files: List[Text]): "Submit files to a Kattis problem." # Setup headers to send. headers = HEADERS.copy() headers["User-Agent"] = config["cache"]["user-agent"] # Setup data to send. data = { "csrf_token": retreive_csrf_token(config, pid), "type": "files", "sub_code": "", "problem": pid, "language": "C++", "submit": "Submit", "submit_ctr": 10 } # URL, files and cookies to use. url = config["url"]["submit"].format(pid=pid) files = read_files(files) cookies = config["cache"]["cookies"] # Make the POST request. logger.debug("Submitting %d files for '%s'.", len(files), pid) res = requests.post(url, data=data, files=files, headers=headers, cookies=cookies) config["cache"]["cookies"] = {**cookies, **res.cookies.get_dict()} # Find submisson ID. match = re.match(r"^.*/submissions/([0-9]+)$", res.url) if not match: logger.critical("Can't find submission ID from URL '%s'.", res.url) sid = match.group(1) logger.debug("Files sent to submission %s.", sid) return sid
2.609375
3
scripts/set_health_led.py
alanmitchell/mini-monitor
7
12223
#!/usr/bin/env python3 """Script to do basic health checks of the system and turn on an LED on BCM pin 12 (pin 32 on header) if they pass, turn Off otherwise. """ import time import RPi.GPIO as GPIO import subprocess # The BCM pin number that the LED is wired to. When the pin # is at 3.3V the LED is On. LED_PIN = 12 GPIO.setmode(GPIO.BCM) GPIO.setwarnings(False) GPIO.setup(LED_PIN, GPIO.OUT) # ----- Test for Internet availability. # Try to ping for a minute before declaring that the Internet # is not available internet_available = False for i in range(12): if subprocess.call('/bin/ping -q -c1 8.8.8.8', shell=True) == 0: internet_available = True break time.sleep(5) # Set LED according to results of test GPIO.output(LED_PIN, internet_available)
3.265625
3
vqa_txt_data/compare_experiment_results.py
billyang98/UNITER
0
12224
import json import numpy as np from tqdm import tqdm # Change these based on experiment #exp_dataset = 'mask_char_oov_test_set.db' #exp_name = 'results_test_mask_char' #exp_dataset = 'mask_2_oov_test_set.db' #exp_name = 'results_test_mask_2' #exp_dataset = 'mask_2_oov_test_set.db' #exp_name = 'results_test_synonyms_mask_2_ensemble_all_5' #exp_dataset = 'synonyms_mask_char_l03_oov_test_set.db' #exp_name = 'results_test_synonyms_mask_char_l03' #exp_dataset = 'synonyms_mask_char_03m_oov_test_set.db' #exp_name = 'results_test_synonyms_mask_char_03m' #exp_dataset = 'synonyms_mask_2_03l_oov_test_set.db' #exp_name = 'results_test_synonyms_mask_2_03l' exp_dataset = 'mask_2_oov_test_set.db' exp_name = 'results_test_synonyms_mask_2_fixed' q_list_file = '/scratch/cluster/billyang/vqa_dataset/txt_db/oov_datasets/{}/questions_changed.json'.format(exp_dataset) exp_ans_file = '/scratch/cluster/billyang/uniter_image/vqa_joint_trained/{}/results_3000_all.json'.format(exp_name) #exp_ans_file = '/scratch/cluster/billyang/uniter_image/vqa_joint_fixed_trained/{}/results_3000_all.json'.format(exp_name) q_list = json.load(open(q_list_file)) exp_ans_list = json.load(open(exp_ans_file)) baseline_ans_list = json.load(open('/scratch/cluster/billyang/uniter_image/vqa_joint_trained/results_test_normal_test/results_3000_all.json')) #baseline_ans_list = json.load(open('/scratch/cluster/billyang/uniter_image/vqa_joint_fixed_trained/results_test_normal_test_fixed/results_3000_all.json')) exp_ans = {o['question_id']: o['answer'] for o in exp_ans_list} baseline_ans = {o['question_id']: o['answer'] for o in baseline_ans_list} gt_ans = json.load(open('oov_test_full_answers.json')) results = {} results['num_questions'] = len(q_list) exp_tot_score = 0 bl_tot_score = 0 rtw = [] wtr = [] def getscore(answer, answers, scores): if answer in answers: return scores[answers.index(answer)] return 0 for qid in tqdm(q_list): exp_score = getscore(exp_ans[qid], gt_ans[qid]['strings'], gt_ans[qid]['scores']) exp_tot_score += exp_score bl_score = getscore(baseline_ans[qid], gt_ans[qid]['strings'], gt_ans[qid]['scores']) bl_tot_score += bl_score if exp_score > 0 and bl_score == 0: wtr.append(qid) if bl_score > 0 and exp_score == 0: rtw.append(qid) results['exp_score'] = exp_tot_score / len(q_list) results['bl_score'] = bl_tot_score / len(q_list) results['rtw'] = rtw results['wtr'] = wtr results['rtw_count'] = len(rtw) results['wtr_count'] = len(wtr) print("dumping") json.dump(results, open('{}.json'.format(exp_name), 'w')) # get new scores # find answers wrong to right # find answers right to wrong
1.984375
2
app/articles/forms.py
AlexRAV/flask-blog
0
12225
<filename>app/articles/forms.py # -*- coding: utf-8 -*- """Article forms.""" from flask_wtf import Form, FlaskForm from wtforms import PasswordField, StringField, TextAreaField from wtforms.validators import DataRequired, Email, EqualTo, Length class NewArticleForm(FlaskForm): title = StringField('Article title', validators=[DataRequired(), Length(min=5, max=200)]) body = TextAreaField('Article body', validators=[DataRequired(), Length(min=50)]) class NewCommentForm(FlaskForm): body = TextAreaField('Comment', validators=[DataRequired()])
2.59375
3
dataanalysis.py
Rev-Jiang/Python
0
12226
<reponame>Rev-Jiang/Python<filename>dataanalysis.py #-*- coding: UTF-8 -*- #上句表示可用中文注释,否则默认ASCII码保存 # Filename : dataanalysis.py # author by : Rev_997 import numpy as np import pandas as pd import matplotlib.pyplot as plt def isiterable(obj): try: iter(obj) return True except TypeError:#not iterable return False #if it is not list or NumPy, transfer it if not isinstance(x,list) and isiterable(x): x=list(x) #is and is not are used to judge if the varible is None, as None is unique. a=None a is None import datetime dt=datetime(2011,10,29,20,30,21) dt.day dt.minute dt.date() dt.time() #datetime could be transfered to string by function striftime dt.strftime('%m/%d/%Y %H:%M') #string could be transfered to datetime by function strptime datetime.strptime('20091031','%Y%m%d') #substitute 0 for minutes and seconds dt.replace(minute=0,second=0) #the difference of two datetime objects produce a datetime.timedelta dt2=datetime(2011,11,15,22,30) delta=dt2-dt delta type(delta) #add a timedelta to a datetime -- get a now datetime dt+delta #if elif else if x: pass elif: pass else: pass #for for value in collection: #do something wuth value #continue #break for a,b,c in iterator: #do something #while x=256 total=0 while x>0: if total>500: break total+=x x=x//2 def attempt_float(x): try: return float(x) except: return x #once the float(x) is invalid, the except works def attempt_float(x): try: return float(x) except(TypeError,ValueError): return x #catch the abnormity #value=true-expr if condition else false-expr #same as ''' if condition: value=true-expr else: value=false-expr ''' #about tuple tup=4,5,6 tup #(4,5,6) #transfer to tuple tuple([4,0,2]) tuple('string') #tuple use + to generate longer tuple #tuple.append() #tuple.count() #list.append() #list.insert() #list.pop() #list.remove() #list.extend() #list.sort() import bisect c=[1,2,2,2,3,4,7] #find the suitable position bisect.bisect(c,2) #insert the new number bisect.insort(c,6) ###attention: bisect is suitable for ordered sequence #---------------------------------------------------------------- #some function of list #enumerate for i,value in enumerate(collection): #do something with value some_list=['foo','bar','baz'] mapping=dict((v,i) for i,v in enumerate(some_list)) mapping #sorted sorted([7,2,4,6,3,5,2]) sorted('horse race') #powerful with set sorted(set('this is just some string')) #zip seq1=['foo','bar','baz'] seq2=['one','two','three'] zip(seq1,seq2) seq3=[False,True] zip(seq1,seq2,seq3) #several arrays iterate together with zip for i,(a,b) in enumerate(zip(seq1,seq2)): print('%d: %s, %s' % (i,a,b)) #unzip pitchers=[('Nolan','Ryan'),('Roger','Clemens'),('Schilling','Curt')] first_names,last_names=zip(*pitchers)# * is meant zip(seq[0],seq[1],...,seq[len(seq)-1]) first_names last_names #reversed list(reversed(range(10))) #dictionary empty_dict={}d1={'a':'some value','b':[1,2,3,4]} d1 #delete del d1[5] #or ret=d1.pop('dummy') ret #get keys and values d1.keys() d1.values() #combine two dictionaries d1.update({'b':'foo','c':12}) d1 #match two list to be dictionary ''' mapping={} for key,value in zip(key_list,value_list): mapping[key]=value ''' mapping=dict(zip(range(5),reversed(range(5)))) mapping #brief way to express circulation by dict ''' if key in some_dict: value=some_dict[key] else: value=default_value ''' value=some_dict.get(key,default_values) #the vlaue of dictionary is set as other list ''' words=['apple','bat','bar','atom','book'] by_letter={} for word in words: letter=word[0] if letter not in by_letter: by_letter[letter]=[word] else: by_letter[letter].append(word) by_letter ''' by_letter.setdefault(letter,[]).append(word) #or use defaultdict class in Module collections from collections import defaultdict by_letter=defaultdict(list) for word in words: by_letter[word[0]].append(word) #the key of dictionary should be of hashability--unchangable hash('string') hash((1,2,(2,3))) hash((1,2,[3,4]))#no hashability as list is changable #to change a list to tuple is the easiest way to make it a key d={} d[tuple([1,2,3])]=5 d #set set([2,2,2,1,3,3]) {2,2,2,1,3,3} a={1,2,3,4,5} b={3,4,5,6,7,8} #intersection a|b #union a&b #difference a-b #symmetric difference a^b #if is subset a_set={1,2,3,4,5} {1,2,3}.issubset(a_set) a_set.issuperset({1,2,3}) #set could use the == to judge if the same {1,2,3}=={3,2,1} #the operation of the sets a.add(x) a.remove(x) a.union(b) a.intersection(b) a.difference(b) a.symmetric_difference(b) a.issubset(b) a.issuperset(b) a.isdisjoint(b) #the derivative of list&set&dictionary ''' [expr for val in collection if condition] is the same as result=[] for val in collection: if condition: result.append(expr) ''' #list #[expr for val in collection if condition] strings=['a','as','bat','car','dove','python'] [x.upper() for x in strings if len(x)>2] #dicrionary #dict_comp={key-expr:value-expr for value in collection if condition} loc_mapping={val:index for index, val in enumerate(string)} loc_mapping #or loc_mapping=dict((val,idx) for idx, val in enumerate(string)) #set #set_comp={expr for value in collection if condition} unique_lengths={len(x) for x in strings} unique_lengths #list nesting derivative all_data=[['Tom','Billy','Jeffery','Andrew','Wesley','Steven','Joe'], ['Susie','Casey','Jill','Ana','Eva','Jennifer','Stephanie']] #find the names with two 'e' and put them in a new list names_of_interest=[] for name in all_data: enough_es=[name for name in names if name.count('e')>2] names_of_interest.extend(enough_es) #which could be shorten as below: result=[name for names in all_data for name in names if name.count('e')>=2] result #flat a list consist of tuples some_tuples=[(1,2,3),(4,5,6),(7,8,9)] flattened=[x for tup in some_tuples for x in tup] flattened ''' flattened=[] for tup in some_tuples: for x in tup: flattened.append(x) ''' #which is different from: [[x for x in tup] for tup in some_tuples] #clean function import re def clean_strings(strings): result=[] for value in strings: value=value.strip() value=re.sub('[!#?]','',value) #Remove punctuation marks value=value.title() result.append(value) return result states=[' Alabama ','Georgia!','Georgia','georgia','FlOrIda','south carolina##','West virginia?'] clean_strings(states) #or def remove_punctuation(value): return re.sub('[!#?]','',value) clean_ops=[str.strip,remove_punctuation,str.title] def clean_strings(strings,ops): result=[] for value in strings: for function in ops: value=function(value) result.append(value) return result clean_strings(states,clean_ops) #anonymous function #lambda [arg1[, arg2, ... argN]]: expression #exmaple 1 #use def define function def add( x, y ): return x + y #use lambda expression lambda x, y: x + y #lambda permits default parameter lambda x, y = 2: x + y lambda *z: z #call lambda function a = lambda x, y: x + y a( 1, 3 ) b = lambda x, y = 2: x + y b( 1 ) b( 1, 3 ) c = lambda *z: z c( 10, 'test') #example2 #use def define function def add( x, y ): return x + y #use lambda expression lambda x, y: x + y #lambda permits default parameter lambda x, y = 2: x + y lambda *z: z #call lambda function a = lambda x, y: x + y a( 1, 3 ) b = lambda x, y = 2: x + y b( 1 ) b( 1, 3 ) c = lambda *z: z c( 10, 'test') #example 3 def apply_to_list(some_list,f): return [f(x) for x in some_list] ints=[4,0,1,5,6] apply_to_list(ints,lambda x:x*2) #example 4 strings=['foo','card','bar','aaaa','abab'] strings.sort(key=lambda x: len(set(list(x)))) strings #currying ''' def add_numbers(x,y): return x+y add_five=lambda y:add_numbers(5,y) ''' #partial function is to simplify the process from functools import partial add_five=partial(add_numbers,5) #generator expression gen=(x**2 for x in xxrange(100)) gen #the same: def _make_gen(): for x in xrange(100): yield x**2 gen=_make_gen() #generator expression could be used in any python function acceptable of generator sum(x**2 for x in xrange(100)) dict((i,i**2) for i in xrange(5)) #itertools module import itertools first_letter=lambda x:x[0] names=['Alan','Adam','Wes','Will','Albert','Steven'] for letter,names in itertools.groupby(names,first_letter): print letter,list(names) #names is a genetator #some functions in itertools imap(func,*iterables) ifilter(func,iterable) combinations(iterable,k) permutations(iterable,k) groupby(iterable[,keyfunc]) #documents and operation system path='xxx.txt' f=open(path) for line in f: pass #remove EOL of every line lines=[x.rstrip() for x in open(path)] lines #set a empty-lineproof doc with open('tmp.txt','w') as handle: handle.writelines(x for x in open(path) if len(x)>1) open('tmp.txt').readlines() #some function to construct documents read([size]) readlines([size]) write(str) close() flush() seek(pos) tell() closed
3.03125
3
show_model_info.py
panovr/Brain-Tumor-Segmentation
0
12227
import bts.model as model import torch device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') BATCH_SIZE = 6 FILTER_LIST = [16,32,64,128,256] unet_model = model.DynamicUNet(FILTER_LIST) unet_model.summary(batch_size=BATCH_SIZE, device=device)
2.203125
2
docs/source/conf.py
andriis/bravado
600
12228
# -*- coding: utf-8 -*- import sphinx_rtd_theme # -- General configuration ----------------------------------------------- extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'bravado' copyright = u'2013, Digium, Inc.; 2014-2015, Yelp, Inc' exclude_patterns = [] pygments_style = 'sphinx' autoclass_content = 'both' # -- Options for HTML output --------------------------------------------- html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] html_static_path = ['_static'] htmlhelp_basename = 'bravado-pydoc' intersphinx_mapping = { 'python': ('http://docs.python.org/', None), 'bravado-core': ('https://bravado-core.readthedocs.io/en/latest/', None), }
1.5625
2
edgedb/_testbase.py
Fogapod/edgedb-python
0
12229
<reponame>Fogapod/edgedb-python<gh_stars>0 # # This source file is part of the EdgeDB open source project. # # Copyright 2016-present MagicStack Inc. and the EdgeDB authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import asyncio import atexit import contextlib import functools import inspect import json import logging import os import re import unittest import edgedb from edgedb import _cluster as edgedb_cluster @contextlib.contextmanager def silence_asyncio_long_exec_warning(): def flt(log_record): msg = log_record.getMessage() return not msg.startswith('Executing ') logger = logging.getLogger('asyncio') logger.addFilter(flt) try: yield finally: logger.removeFilter(flt) _default_cluster = None def _init_cluster(data_dir=None, *, cleanup_atexit=True): if (not os.environ.get('EDGEDB_DEBUG_SERVER') and not os.environ.get('EDGEDB_LOG_LEVEL')): _env = {'EDGEDB_LOG_LEVEL': 'silent'} else: _env = {} if data_dir is None: cluster = edgedb_cluster.TempCluster(env=_env, testmode=True) destroy = True else: cluster = edgedb_cluster.Cluster(data_dir=data_dir, env=_env) destroy = False if cluster.get_status() == 'not-initialized': cluster.init() cluster.start(port='dynamic') cluster.set_superuser_password('<PASSWORD>') if cleanup_atexit: atexit.register(_shutdown_cluster, cluster, destroy=destroy) return cluster def _start_cluster(*, cleanup_atexit=True): global _default_cluster if _default_cluster is None: cluster_addr = os.environ.get('EDGEDB_TEST_CLUSTER_ADDR') if cluster_addr: conn_spec = json.loads(cluster_addr) _default_cluster = edgedb_cluster.RunningCluster(**conn_spec) else: data_dir = os.environ.get('EDGEDB_TEST_DATA_DIR') _default_cluster = _init_cluster( data_dir=data_dir, cleanup_atexit=cleanup_atexit) return _default_cluster def _shutdown_cluster(cluster, *, destroy=True): cluster.stop() if destroy: cluster.destroy() class TestCaseMeta(type(unittest.TestCase)): _database_names = set() @staticmethod def _iter_methods(bases, ns): for base in bases: for methname in dir(base): if not methname.startswith('test_'): continue meth = getattr(base, methname) if not inspect.iscoroutinefunction(meth): continue yield methname, meth for methname, meth in ns.items(): if not methname.startswith('test_'): continue if not inspect.iscoroutinefunction(meth): continue yield methname, meth @classmethod def wrap(mcls, meth): @functools.wraps(meth) def wrapper(self, *args, __meth__=meth, **kwargs): try_no = 1 while True: try: # There might be unobvious serializability # anomalies across the test suite, so, rather # than hunting them down every time, simply # retry the test. self.loop.run_until_complete( __meth__(self, *args, **kwargs)) except edgedb.TransactionSerializationError: if try_no == 3: raise else: self.loop.run_until_complete(self.con.execute( 'ROLLBACK;' )) try_no += 1 else: break return wrapper @classmethod def add_method(mcls, methname, ns, meth): ns[methname] = mcls.wrap(meth) def __new__(mcls, name, bases, ns): for methname, meth in mcls._iter_methods(bases, ns.copy()): if methname in ns: del ns[methname] mcls.add_method(methname, ns, meth) cls = super().__new__(mcls, name, bases, ns) if not ns.get('BASE_TEST_CLASS') and hasattr(cls, 'get_database_name'): dbname = cls.get_database_name() if name in mcls._database_names: raise TypeError( f'{name} wants duplicate database name: {dbname}') mcls._database_names.add(name) return cls class TestCase(unittest.TestCase, metaclass=TestCaseMeta): @classmethod def setUpClass(cls): loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) cls.loop = loop @classmethod def tearDownClass(cls): cls.loop.close() asyncio.set_event_loop(None) def add_fail_notes(self, **kwargs): if not hasattr(self, 'fail_notes'): self.fail_notes = {} self.fail_notes.update(kwargs) @contextlib.contextmanager def annotate(self, **kwargs): # Annotate the test in case the nested block of code fails. try: yield except Exception: self.add_fail_notes(**kwargs) raise @contextlib.contextmanager def assertRaisesRegex(self, exception, regex, msg=None, **kwargs): with super().assertRaisesRegex(exception, regex, msg=msg): try: yield except BaseException as e: if isinstance(e, exception): for attr_name, expected_val in kwargs.items(): val = getattr(e, attr_name) if val != expected_val: raise self.failureException( f'{exception.__name__} context attribute ' f'{attr_name!r} is {val} (expected ' f'{expected_val!r})') from e raise class ClusterTestCase(TestCase): BASE_TEST_CLASS = True @classmethod def setUpClass(cls): super().setUpClass() cls.cluster = _start_cluster(cleanup_atexit=True) class ConnectedTestCaseMixin: @classmethod async def connect(cls, *, cluster=None, database='edgedb', user='edgedb', password='<PASSWORD>'): conargs = cls.get_connect_args( cluster=cluster, database=database, user=user, password=password) return await edgedb.async_connect(**conargs) @classmethod def get_connect_args(cls, *, cluster=None, database='edgedb', user='edgedb', password='<PASSWORD>'): if cluster is None: cluster = cls.cluster conargs = cluster.get_connect_args().copy() conargs.update(dict(user=user, password=password, database=database)) return conargs class DatabaseTestCase(ClusterTestCase, ConnectedTestCaseMixin): SETUP = None TEARDOWN = None SCHEMA = None SETUP_METHOD = None TEARDOWN_METHOD = None # Some tests may want to manage transactions manually, # in which case ISOLATED_METHODS will be False. ISOLATED_METHODS = True # Turns on "EdgeDB developer" mode which allows using restricted # syntax like FROM SQL and similar. It allows modifying standard # library (e.g. declaring casts). INTERNAL_TESTMODE = True BASE_TEST_CLASS = True def setUp(self): if self.INTERNAL_TESTMODE: self.loop.run_until_complete( self.con.execute( 'CONFIGURE SESSION SET __internal_testmode := true;')) if self.ISOLATED_METHODS: self.xact = self.con.transaction() self.loop.run_until_complete(self.xact.start()) if self.SETUP_METHOD: self.loop.run_until_complete( self.con.execute(self.SETUP_METHOD)) super().setUp() def tearDown(self): try: if self.TEARDOWN_METHOD: self.loop.run_until_complete( self.con.execute(self.TEARDOWN_METHOD)) finally: try: if self.ISOLATED_METHODS: self.loop.run_until_complete(self.xact.rollback()) del self.xact if self.con.is_in_transaction(): self.loop.run_until_complete( self.con.execute('ROLLBACK')) raise AssertionError( 'test connection is still in transaction ' '*after* the test') if not self.ISOLATED_METHODS: self.loop.run_until_complete( self.con.execute('RESET ALIAS *;')) finally: super().tearDown() @classmethod def setUpClass(cls): super().setUpClass() dbname = cls.get_database_name() cls.admin_conn = None cls.con = None class_set_up = os.environ.get('EDGEDB_TEST_CASES_SET_UP') # Only open an extra admin connection if necessary. if not class_set_up: script = f'CREATE DATABASE {dbname};' cls.admin_conn = cls.loop.run_until_complete(cls.connect()) cls.loop.run_until_complete(cls.admin_conn.execute(script)) cls.con = cls.loop.run_until_complete(cls.connect(database=dbname)) if not class_set_up: script = cls.get_setup_script() if script: # The setup is expected to contain a CREATE MIGRATION, # which needs to be wrapped in a transaction. tx = cls.con.transaction() cls.loop.run_until_complete(tx.start()) cls.loop.run_until_complete(cls.con.execute(script)) cls.loop.run_until_complete(tx.commit()) del tx @classmethod def get_database_name(cls): if cls.__name__.startswith('TestEdgeQL'): dbname = cls.__name__[len('TestEdgeQL'):] elif cls.__name__.startswith('Test'): dbname = cls.__name__[len('Test'):] else: dbname = cls.__name__ return dbname.lower() @classmethod def get_setup_script(cls): script = '' # Look at all SCHEMA entries and potentially create multiple # modules, but always create the 'test' module. schema = ['\nmodule test {}'] for name, val in cls.__dict__.items(): m = re.match(r'^SCHEMA(?:_(\w+))?', name) if m: module_name = (m.group(1) or 'test').lower().replace( '__', '.') with open(val, 'r') as sf: module = sf.read() schema.append(f'\nmodule {module_name} {{ {module} }}') # Don't wrap the script into a transaction here, so that # potentially it's easier to stitch multiple such scripts # together in a fashion similar to what `edb inittestdb` does. script += f'\nSTART MIGRATION TO {{ {"".join(schema)} }};' script += f'\nPOPULATE MIGRATION; \nCOMMIT MIGRATION;' if cls.SETUP: if not isinstance(cls.SETUP, (list, tuple)): scripts = [cls.SETUP] else: scripts = cls.SETUP for scr in scripts: if '\n' not in scr and os.path.exists(scr): with open(scr, 'rt') as f: setup = f.read() else: setup = scr script += '\n' + setup return script.strip(' \n') @classmethod def tearDownClass(cls): script = '' class_set_up = os.environ.get('EDGEDB_TEST_CASES_SET_UP') if cls.TEARDOWN and not class_set_up: script = cls.TEARDOWN.strip() try: if script: cls.loop.run_until_complete( cls.con.execute(script)) finally: try: cls.loop.run_until_complete(cls.con.aclose()) if not class_set_up: dbname = cls.get_database_name() script = f'DROP DATABASE {dbname};' cls.loop.run_until_complete( cls.admin_conn.execute(script)) finally: try: if cls.admin_conn is not None: cls.loop.run_until_complete( cls.admin_conn.aclose()) finally: super().tearDownClass() class AsyncQueryTestCase(DatabaseTestCase): BASE_TEST_CLASS = True class SyncQueryTestCase(DatabaseTestCase): BASE_TEST_CLASS = True def setUp(self): super().setUp() cls = type(self) cls.async_con = cls.con conargs = cls.get_connect_args().copy() conargs.update(dict(database=cls.async_con.dbname)) cls.con = edgedb.connect(**conargs) def tearDown(self): cls = type(self) cls.con.close() cls.con = cls.async_con del cls.async_con _lock_cnt = 0 def gen_lock_key(): global _lock_cnt _lock_cnt += 1 return os.getpid() * 1000 + _lock_cnt
1.882813
2
konwledge_extraction/ner/bert_crf_ner/losses/focal_loss.py
mlshenkai/KGQA
0
12230
<filename>konwledge_extraction/ner/bert_crf_ner/losses/focal_loss.py # -*- coding: utf-8 -*- # @Author: <NAME> # @Created Time: 2022/2/23 10:14 AM # @Organization: YQN # @Email: <EMAIL> import torch import torch.nn as nn import torch.nn.functional as F class FocalLoss(nn.Module): def __init__(self, gamma=2, weight=None, ignore_index=-100): super(FocalLoss, self).__init__() self.gamma = gamma self.weight = weight self.ignore_index = ignore_index def forward(self, output, target): """ :param output: [N, CLASS] :param target: [N,] :return: """ logit = F.softmax(output, dim=1) # [N,CLASS] pt = torch.exp(logit) logit = (1 - pt) ** self.gamma * logit # [N, CLASS] loss = F.nll_loss(logit, target, self.weight, ignore_index=self.ignore_index) return loss
2.4375
2
tests/biology/test_join_fasta.py
shandou/pyjanitor
1
12231
import importlib import os import pytest from helpers import running_on_ci import janitor.biology # noqa: F403, F401 # Skip all tests if Biopython not installed pytestmark = pytest.mark.skipif( (importlib.util.find_spec("Bio") is None) & ~running_on_ci(), reason="Biology tests relying on Biopython only required for CI", ) @pytest.mark.biology def test_join_fasta(biodf): """Test adding sequence from FASTA file in ``sequence`` column.""" df = biodf.join_fasta( filename=os.path.join(pytest.TEST_DATA_DIR, "sequences.fasta"), id_col="sequence_accession", column_name="sequence", ) assert "sequence" in df.columns
2.234375
2
test/test_tilepyramid.py
ungarj/tilematrix
16
12232
"""TilePyramid creation.""" import pytest from shapely.geometry import Point from shapely.ops import unary_union from types import GeneratorType from tilematrix import TilePyramid, snap_bounds def test_init(): """Initialize TilePyramids.""" for tptype in ["geodetic", "mercator"]: assert TilePyramid(tptype) with pytest.raises(ValueError): TilePyramid("invalid") with pytest.raises(ValueError): TilePyramid() assert hash(TilePyramid(tptype)) def test_metatiling(): """Metatiling setting.""" for metatiling in [1, 2, 4, 8, 16]: assert TilePyramid("geodetic", metatiling=metatiling) try: TilePyramid("geodetic", metatiling=5) raise Exception() except ValueError: pass def test_tile_size(): """Tile sizes.""" for tile_size in [128, 256, 512, 1024]: tp = TilePyramid("geodetic", tile_size=tile_size) assert tp.tile_size == tile_size def test_intersect(): """Get intersecting Tiles.""" # same metatiling tp = TilePyramid("geodetic") intersect_tile = TilePyramid("geodetic").tile(5, 1, 1) control = {(5, 1, 1)} test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)} assert control == test_tiles # smaller metatiling tp = TilePyramid("geodetic") intersect_tile = TilePyramid("geodetic", metatiling=2).tile(5, 1, 1) control = {(5, 2, 2), (5, 2, 3), (5, 3, 3), (5, 3, 2)} test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)} assert control == test_tiles # bigger metatiling tp = TilePyramid("geodetic", metatiling=2) intersect_tile = TilePyramid("geodetic").tile(5, 1, 1) control = {(5, 0, 0)} test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)} assert control == test_tiles intersect_tile = TilePyramid("geodetic").tile(4, 12, 31) control = {(4, 6, 15)} test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)} assert control == test_tiles # different CRSes tp = TilePyramid("geodetic") intersect_tile = TilePyramid("mercator").tile(5, 1, 1) try: test_tiles = {tile.id for tile in tp.intersecting(intersect_tile)} raise Exception() except ValueError: pass def test_tilepyramid_compare(grid_definition_proj, grid_definition_epsg): """Comparison operators.""" gproj, gepsg = grid_definition_proj, grid_definition_epsg # predefined assert TilePyramid("geodetic") == TilePyramid("geodetic") assert TilePyramid("geodetic") != TilePyramid("geodetic", metatiling=2) assert TilePyramid("geodetic") != TilePyramid("geodetic", tile_size=512) assert TilePyramid("mercator") == TilePyramid("mercator") assert TilePyramid("mercator") != TilePyramid("mercator", metatiling=2) assert TilePyramid("mercator") != TilePyramid("mercator", tile_size=512) # epsg based assert TilePyramid(gepsg) == TilePyramid(gepsg) assert TilePyramid(gepsg) != TilePyramid(gepsg, metatiling=2) assert TilePyramid(gepsg) != TilePyramid(gepsg, tile_size=512) # proj based assert TilePyramid(gproj) == TilePyramid(gproj) assert TilePyramid(gproj) != TilePyramid(gproj, metatiling=2) assert TilePyramid(gproj) != TilePyramid(gproj, tile_size=512) # altered bounds abounds = dict(**gproj) abounds.update(bounds=(-5000000.0, -5000000.0, 5000000.0, 5000000.0)) assert TilePyramid(abounds) == TilePyramid(abounds) assert TilePyramid(gproj) != TilePyramid(abounds) # other type assert TilePyramid("geodetic") != "string" def test_grid_compare(grid_definition_proj, grid_definition_epsg): """Comparison operators.""" gproj, gepsg = grid_definition_proj, grid_definition_epsg # predefined assert TilePyramid("geodetic").grid == TilePyramid("geodetic").grid assert TilePyramid("geodetic").grid == TilePyramid("geodetic", metatiling=2).grid assert TilePyramid("geodetic").grid == TilePyramid("geodetic", tile_size=512).grid assert TilePyramid("mercator").grid == TilePyramid("mercator").grid assert TilePyramid("mercator").grid == TilePyramid("mercator", metatiling=2).grid assert TilePyramid("mercator").grid == TilePyramid("mercator", tile_size=512).grid # epsg based assert TilePyramid(gepsg).grid == TilePyramid(gepsg).grid assert TilePyramid(gepsg).grid == TilePyramid(gepsg, metatiling=2).grid assert TilePyramid(gepsg).grid == TilePyramid(gepsg, tile_size=512).grid # proj based assert TilePyramid(gproj).grid == TilePyramid(gproj).grid assert TilePyramid(gproj).grid == TilePyramid(gproj, metatiling=2).grid assert TilePyramid(gproj).grid == TilePyramid(gproj, tile_size=512).grid # altered bounds abounds = dict(**gproj) abounds.update(bounds=(-5000000.0, -5000000.0, 5000000.0, 5000000.0)) assert TilePyramid(abounds).grid == TilePyramid(abounds).grid assert TilePyramid(gproj).grid != TilePyramid(abounds).grid def test_tile_from_xy(): tp = TilePyramid("geodetic") zoom = 5 # point inside tile p_in = (0.5, 0.5, zoom) control_in = [ ((5, 15, 32), "rb"), ((5, 15, 32), "lb"), ((5, 15, 32), "rt"), ((5, 15, 32), "lt"), ] for tile_id, on_edge_use in control_in: tile = tp.tile_from_xy(*p_in, on_edge_use=on_edge_use) assert tile.id == tile_id assert Point(p_in[0], p_in[1]).within(tile.bbox()) # point is on tile edge p_edge = (0, 0, zoom) control_edge = [ ((5, 16, 32), "rb"), ((5, 16, 31), "lb"), ((5, 15, 32), "rt"), ((5, 15, 31), "lt"), ] for tile_id, on_edge_use in control_edge: tile = tp.tile_from_xy(*p_edge, on_edge_use=on_edge_use) assert tile.id == tile_id assert Point(p_edge[0], p_edge[1]).touches(tile.bbox()) with pytest.raises(ValueError): tp.tile_from_xy(180, -90, zoom, on_edge_use="rb") with pytest.raises(ValueError): tp.tile_from_xy(180, -90, zoom, on_edge_use="lb") tile = tp.tile_from_xy(180, -90, zoom, on_edge_use="rt") assert tile.id == (5, 31, 0) tile = tp.tile_from_xy(180, -90, zoom, on_edge_use="lt") assert tile.id == (5, 31, 63) with pytest.raises(TypeError): tp.tile_from_xy(-180, 90, zoom, on_edge_use="lt") with pytest.raises(TypeError): tp.tile_from_xy(-180, 90, zoom, on_edge_use="rt") tile = tp.tile_from_xy(-180, 90, zoom, on_edge_use="rb") assert tile.id == (5, 0, 0) tile = tp.tile_from_xy(-180, 90, zoom, on_edge_use="lb") assert tile.id == (5, 0, 63) with pytest.raises(ValueError): tp.tile_from_xy(-180, 90, zoom, on_edge_use="invalid") def test_tiles_from_bounds(grid_definition_proj): # global pyramids tp = TilePyramid("geodetic") parent = tp.tile(8, 5, 5) from_bounds = set([t.id for t in tp.tiles_from_bounds(parent.bounds(), 9)]) children = set([t.id for t in parent.get_children()]) assert from_bounds == children # non-global pyramids tp = TilePyramid(grid_definition_proj) parent = tp.tile(8, 0, 0) from_bounds = set([t.id for t in tp.tiles_from_bounds(parent.bounds(), 9)]) children = set([t.id for t in parent.get_children()]) assert from_bounds == children def test_tiles_from_bounds_batch_by_row(): tp = TilePyramid("geodetic") bounds = (0, 0, 90, 90) zoom = 8 tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="row") assert isinstance(tiles, GeneratorType) assert list(tiles) previous_row = None tiles = 0 for tile_row in tp.tiles_from_bounds(bounds, zoom, batch_by="row"): assert isinstance(tile_row, GeneratorType) previous_tile = None for tile in tile_row: tiles += 1 if previous_row is None: if previous_tile is not None: assert tile.col == previous_tile.col + 1 else: if previous_tile is not None: assert tile.col == previous_tile.col + 1 assert tile.row == previous_tile.row assert tile.row == previous_row + 1 previous_tile = tile previous_row = tile.row assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom))) def test_tiles_from_bounds_batch_by_column(): tp = TilePyramid("geodetic") bounds = (0, 0, 90, 90) zoom = 8 tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="column") assert isinstance(tiles, GeneratorType) assert list(tiles) previous_column = None tiles = 0 for tile_column in tp.tiles_from_bounds(bounds, zoom, batch_by="column"): assert isinstance(tile_column, GeneratorType) previous_tile = None for tile in tile_column: tiles += 1 if previous_column is None: if previous_tile is not None: assert tile.row == previous_tile.row + 1 else: if previous_tile is not None: assert tile.row == previous_tile.row + 1 assert tile.col == previous_tile.col assert tile.col == previous_column + 1 previous_tile = tile previous_column = tile.col assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom))) def test_tiles_from_bounds_batch_by_row_antimeridian_bounds(): tp = TilePyramid("geodetic") bounds = (0, 0, 185, 95) zoom = 8 tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="row") assert isinstance(tiles, GeneratorType) assert list(tiles) previous_row = None tiles = 0 for tile_row in tp.tiles_from_bounds(bounds, zoom, batch_by="row"): assert isinstance(tile_row, GeneratorType) previous_tile = None for tile in tile_row: tiles += 1 if previous_row is None: if previous_tile is not None: assert tile.col > previous_tile.col else: if previous_tile is not None: assert tile.col > previous_tile.col assert tile.row == previous_tile.row assert tile.row > previous_row previous_tile = tile previous_row = tile.row assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom))) def test_tiles_from_bounds_batch_by_row_both_antimeridian_bounds(): tp = TilePyramid("geodetic") bounds = (-185, 0, 185, 95) zoom = 8 tiles = tp.tiles_from_bounds(bounds, zoom, batch_by="row") assert isinstance(tiles, GeneratorType) assert list(tiles) previous_row = None tiles = 0 for tile_row in tp.tiles_from_bounds(bounds, zoom, batch_by="row"): assert isinstance(tile_row, GeneratorType) previous_tile = None for tile in tile_row: tiles += 1 if previous_row is None: if previous_tile is not None: assert tile.col == previous_tile.col + 1 else: if previous_tile is not None: assert tile.col == previous_tile.col + 1 assert tile.row == previous_tile.row assert tile.row == previous_row + 1 previous_tile = tile previous_row = tile.row assert tiles == len(list(tp.tiles_from_bounds(bounds, zoom))) def test_snap_bounds(): bounds = (0, 1, 2, 3) tp = TilePyramid("geodetic") zoom = 8 snapped = snap_bounds(bounds=bounds, tile_pyramid=tp, zoom=zoom) control = unary_union( [tile.bbox() for tile in tp.tiles_from_bounds(bounds, zoom)] ).bounds assert snapped == control pixelbuffer = 10 snapped = snap_bounds( bounds=bounds, tile_pyramid=tp, zoom=zoom, pixelbuffer=pixelbuffer ) control = unary_union( [tile.bbox(pixelbuffer) for tile in tp.tiles_from_bounds(bounds, zoom)] ).bounds assert snapped == control def test_deprecated(): tp = TilePyramid("geodetic") assert tp.type assert tp.srid assert tp.tile_x_size(0) assert tp.tile_y_size(0) assert tp.tile_height(0) assert tp.tile_width(0)
2.359375
2
setup.py
giovannicuriel/report_builder
0
12233
# -*- coding: utf-8 -*- """ setup.py script """ import io from collections import OrderedDict from setuptools import setup, find_packages with io.open('README.md', 'rt', encoding='utf8') as f: README = f.read() setup( name='reportbuilder', version='0.0.1', url='http://github.com/giovannicuriel/report-builder', project_urls=OrderedDict(( ('Code', 'https://github.com/giovannicuriel/report-builder.git'), ('Issue tracker', 'https://github.com/giovannicuriel/report-builder/issues'), )), license='BSD-2-Clause', author='<NAME>', author_email='<EMAIL>', description='Sample package for Python training courses', long_description=README, packages=["reportbuilder"], include_package_data=True, zip_safe=False, platforms=[any], classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", ], install_requires=[ 'flask==1.1.1' ], entry_points={ 'console_scripts': [ 'report-builder = reportbuilder.app:main' ] } )
1.328125
1
pyfunds/option.py
lucaruzzola/pyfunds
6
12234
<reponame>lucaruzzola/pyfunds from __future__ import annotations from abc import ABC, abstractmethod from typing import Callable, Generic, TypeVar T = TypeVar("T") U = TypeVar("U") class NoElement(Exception): pass class Option(ABC, Generic[T]): def __init__(self): super().__init__() @staticmethod def apply(value: T): return Some(value) if value is not None else Nothing() @abstractmethod def _is_empty(self) -> bool: pass @abstractmethod def get(self) -> T: pass def get_or_else(self, default: T) -> T: return default if self._is_empty() else self.get() @staticmethod def when(condition: bool, value: T) -> Option[T]: return Some(value) if condition else Nothing() def map(self, f: Callable[[T], U]) -> Option[U]: return Some(f(self.get())) if not self._is_empty() else self def flat_map(self, f: Callable[[T], Option[U]]) -> Option[U]: return f(self.get()) if not self._is_empty() else self def fold(self, default: U, fs: Callable[[T], U]) -> U: return default if self._is_empty() else fs(self.get()) def __str__(self) -> str: return f"Option is {'Some' if not self._is_empty() else 'Nothing'}" + ( f", with value: {self.get().__repr__()} of type {type(self.get())}" if not self._is_empty() else "" ) def __repr__(self) -> str: return "pyfunds.Option" def __eq__(self, other: Option[T]) -> bool: if self._is_empty(): return other._is_empty() elif other._is_empty(): return False else: return self.get() == other.get() def __ne__(self, other: Option[T]) -> bool: return not self == other class Some(Option[T]): def __init__(self, value: T): super().__init__() self._value = value def _is_empty(self) -> bool: return False def get(self) -> T: return self._value def __repr__(self) -> str: return f"pyfunds.Some({self.get()})" class Nothing(Option[T]): def __init__(self): super().__init__() def _is_empty(self) -> bool: return True def get(self) -> T: raise NoElement def __repr__(self) -> str: return "pyfunds.Nothing"
2.84375
3
generate_dataset/visualize_mask.py
Kaju-Bubanja/PoseCNN
20
12235
<reponame>Kaju-Bubanja/PoseCNN<filename>generate_dataset/visualize_mask.py<gh_stars>10-100 import cv2 import rosbag import rospy from cv_bridge import CvBridge def main(): # bag = rosbag.Bag("/home/satco/PycharmProjects/PoseCNN/bag/dataset_one_box.bag") bag = rosbag.Bag("/home/satco/PycharmProjects/PoseCNN/bag/test.bag") # topics = ["/camera1/color/image_raw", "/camera2/color/image_raw"] topics = ["/camera/color/image_raw"] # counter = -20 counter = 0 bridge = CvBridge() for topic, msg, t in bag.read_messages(topics=topics, start_time=rospy.Time(1537799716, 30952)): print(msg.header.stamp) # if topic == "/camera1/color/image_raw": if topic == "/camera/color/image_raw": # print(msg.header.stamp) if counter < 0: counter += 1 continue # print("Showing image " + str(counter)) image = bridge.imgmsg_to_cv2(msg, "bgr8") mask_name = "data/images/cube" + str(counter) + ".png" mask = cv2.imread(mask_name) alpha = 0.5 image_with_mask = cv2.addWeighted(mask, alpha, image, 1 - alpha, 0) cv2.imshow("Image with mask", image_with_mask) cv2.waitKey(5000) counter += 1 if __name__ == "__main__": main()
2.578125
3
itembase/core/urls/location_urls.py
wedwardbeck/ibase
0
12236
from django.urls import path from itembase.core.views.location_views import LocationAddressCreateView, LocationAddressDetailView, \ LocationAddressUpdateView, LocationCreateView, LocationDeleteView, LocationDetailView, LocationListView, \ LocationUpdateView app_name = "locations" urlpatterns = [ path("", LocationListView.as_view(), name="list"), path("new/", LocationCreateView.as_view(), name="new"), path("edit/<int:pk>/", LocationUpdateView.as_view(), name="edit"), path("delete/<int:pk>/", LocationDeleteView.as_view(), name="delete"), path("<int:pk>/", LocationDetailView.as_view(), name="view"), path('<int:pk>/address-new/', LocationAddressCreateView.as_view(), name='address-new'), path('address/<int:pk>', LocationAddressDetailView.as_view(), name='address-view'), path('address/edit/<int:pk>', LocationAddressUpdateView.as_view(), name='address-edit'), ]
1.984375
2
web/addons/product_margin/wizard/product_margin.py
diogocs1/comps
1
12237
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.osv import fields, osv from openerp.tools.translate import _ class product_margin(osv.osv_memory): _name = 'product.margin' _description = 'Product Margin' _columns = { 'from_date': fields.date('From'), 'to_date': fields.date('To'), 'invoice_state': fields.selection([ ('paid', 'Paid'), ('open_paid', 'Open and Paid'), ('draft_open_paid', 'Draft, Open and Paid'), ], 'Invoice State', select=True, required=True), } _defaults = { 'from_date': time.strftime('%Y-01-01'), 'to_date': time.strftime('%Y-12-31'), 'invoice_state': "open_paid", } def action_open_window(self, cr, uid, ids, context=None): """ @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param ids: the ID or list of IDs if we want more than one @return: """ context = dict(context or {}) def ref(module, xml_id): proxy = self.pool.get('ir.model.data') return proxy.get_object_reference(cr, uid, module, xml_id) model, search_view_id = ref('product', 'product_search_form_view') model, graph_view_id = ref('product_margin', 'view_product_margin_graph') model, form_view_id = ref('product_margin', 'view_product_margin_form') model, tree_view_id = ref('product_margin', 'view_product_margin_tree') #get the current product.margin object to obtain the values from it records = self.browse(cr, uid, ids, context=context) record = records[0] context.update(invoice_state=record.invoice_state) if record.from_date: context.update(date_from=record.from_date) if record.to_date: context.update(date_to=record.to_date) views = [ (tree_view_id, 'tree'), (form_view_id, 'form'), (graph_view_id, 'graph') ] return { 'name': _('Product Margins'), 'context': context, 'view_type': 'form', "view_mode": 'tree,form,graph', 'res_model': 'product.product', 'type': 'ir.actions.act_window', 'views': views, 'view_id': False, 'search_view_id': search_view_id, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
1.726563
2
scripts/analysis_one.py
VikkiMba/Programmable-matter
0
12238
<filename>scripts/analysis_one.py name = input('Enter file name: ') lst=list() lst2=list() with open(name) as f: for line in f: #print(line) blops=line.rstrip() blop=blops.split() #for val in blop: my_lst = [float(val) for val in blop]#list_comprehension for num in my_lst: if num <= 3.5: lst.append(num) if num >=4: lst2.append(num) #num = float(val) #print(num) #text = f.read() #print(text) #print(type(text)) #print(type(line)) #print(blop) #print(type(blop)) #print(lst) #print(lst2) import itertools import matplotlib.pyplot as plt import seaborn as sns #for (f, b) in zip(lst2 ,lst): #print (f, b) #print(type(my_lst)) with open('neu_sam_4b.csv', 'w') as fh: for (f, b) in zip(lst, lst2): print(f,',',b, file=fh) ext=lst force=lst2 plt.plot(ext, force) plt.xlabel('Extension') plt.ylabel('Force') plt.title('sample with 0.25wt%') plt.tight_layout() plt.show() #for digit in lst: #print(digit, file=fh)
3.203125
3
account/models.py
Hasanozzaman-Khan/Django-User-Authentication
0
12239
<filename>account/models.py<gh_stars>0 from django.db import models from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin, BaseUserManager from PIL import Image # Create your models here. class Home(models.Model): pass class CustomUserManager(BaseUserManager): """Manager for user profiles""" def create_user(self, email, first_name, last_name, password=None): """Create a new user profile""" if not email: raise ValueError("User must have an email address.") email = self.normalize_email(email) user = self.model(email=email, first_name=first_name, last_name=last_name) user.set_password(password) user.save(using=self._db) return user def create_superuser(self, email, first_name, last_name, password): """Create and save a new superuser with given details""" user = self.create_user(email, first_name, last_name, password) user.is_superuser = True user.is_staff = True user.save(using=self._db) return user class CustomRegisterModel(AbstractBaseUser, PermissionsMixin): """ Database model for users in the system """ email = models.EmailField(max_length=255, unique=True) first_name = models.CharField(max_length=255) last_name = models.CharField(max_length=255) is_active = models.BooleanField(default=True) is_staff = models.BooleanField(default=False) is_email_verified = models.BooleanField(default=False) objects = CustomUserManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = ['first_name', 'last_name'] def get_full_name(self): """Retrieve full name of user""" return self.first_name + " " + self.last_name def get_short_name(self): """Retrieve short name of user""" return self.first_name def __str__(self): """Return string representation of our user""" return self.email class ProfileModel(models.Model): user = models.OneToOneField(CustomRegisterModel, on_delete=models.CASCADE) image = models.ImageField(default='default.jpg', upload_to='profile_picture') def __str__(self): return f"{self.user.first_name}'s profile" def save(self, *args, **kwargs): super().save(*args, **kwargs) img = Image.open(self.image.path) if img.height > 300 or img.width > 300: output_size = (300, 300) img.thumbnail(output_size) img.save(self.image.path)
2.65625
3
Numeric Patterns/numericpattern37.py
vaidehisinha1/Python-PatternHouse
0
12240
<reponame>vaidehisinha1/Python-PatternHouse height = int(input()) for i in range(1,height+1) : for j in range(1, i+1): m = i*j if(m <= 9): print("",m,end = " ") else: print(m,end = " ") print() # Sample Input :- 5 # Output :- # 1 # 2 4 # 3 6 9 # 4 8 12 16 # 5 10 15 20 25
3.625
4
reviewboard/search/testing.py
pombredanne/reviewboard
0
12241
"""Search-related testing utilities.""" import tempfile import time from contextlib import contextmanager import haystack from django.conf import settings from django.core.management import call_command from djblets.siteconfig.models import SiteConfiguration from reviewboard.admin.siteconfig import load_site_config def reindex_search(): """Rebuild the search index.""" call_command('rebuild_index', interactive=False) # On Whoosh, the above is asynchronous, and we can end up trying to read # before we end up writing, occasionally breaking tests. We need to # introduce just a bit of a delay. # # Yeah, this is still sketchy, but we can't turn off the async behavior # or receive notification that the write has completed. time.sleep(0.1) @contextmanager def search_enabled(on_the_fly_indexing=False, backend_id='whoosh'): """Temporarily enable indexed search. Args: on_the_fly_indexing (bool, optional): Whether or not to enable on-the-fly indexing. backend_id (unicode, optional): The search backend to enable. Valid options are "whoosh" (default) and "elasticsearch". """ siteconfig = SiteConfiguration.objects.get_current() old_backend_id = siteconfig.get('search_backend_id') old_backend_settings = siteconfig.get('search_backend_settings') if backend_id == 'whoosh': backend_settings = { 'PATH': tempfile.mkdtemp(suffix='search-index', dir=settings.SITE_DATA_DIR), 'STORAGE': 'file', } elif backend_id == 'elasticsearch': backend_settings = { 'INDEX_NAME': 'reviewboard-tests', 'URL': 'http://es.example.com:9200/', } else: raise NotImplementedError('Unexpected backend ID "%s"' % backend_id) siteconfig.settings.update({ 'search_enable': True, 'search_backend_id': backend_id, 'search_backend_settings': { backend_id: backend_settings, }, 'search_on_the_fly_indexing': on_the_fly_indexing, }) siteconfig.save(update_fields=('settings',)) load_site_config() try: yield haystack.connections['default'].reset_sessions() finally: siteconfig.settings.update({ 'search_enable': False, 'search_backend_id': old_backend_id, 'search_backend_settings': old_backend_settings, 'search_on_the_fly_indexing': False, }) siteconfig.save(update_fields=('settings',)) load_site_config()
2.203125
2
pydron/config/config.py
DelphianCalamity/pydron
5
12242
# Copyright (C) 2015 <NAME> import json import os.path from remoot import pythonstarter, smartstarter import anycall from pydron.backend import worker from pydron.interpreter import scheduler, strategies from twisted.internet import defer preload_packages = [] def load_config(configfile=None): if not configfile: candidates = [] if "PYDRON_CONF" in os.environ: candidates.append(os.environ["PYDRON_CONF"]) candidates.append(os.path.abspath("pydron.conf")) candidates.append(os.path.expanduser("~/pydron.conf")) candidates.append("/etc/pydron.conf") for candidate in candidates: if os.path.exists(candidate): configfile = candidate break else: raise ValueError("Config file could not be found. Looked for %s" % repr(candidates)) with open(configfile, 'r') as f: cfg = json.load(f) def convert(obj): if isinstance(obj, dict): return {k:convert(v) for k,v in obj.iteritems()} elif isinstance(obj, list): return [convert(v) for v in obj] elif isinstance(obj, unicode): return str(obj) else: return obj cfg = convert(cfg) return cfg def create_scheduler(config, pool): if "scheduler" not in config: strategy_name = "trivial" else: strategy_name = config["scheduler"] if strategy_name == "trivial": strategy = strategies.TrivialSchedulingStrategy(pool) strategy = strategies.VerifySchedulingStrategy(strategy) else: raise ValueError("Unsupported scheduler: %s" % strategy_name) return scheduler.Scheduler(pool, strategy) def create_pool(config, rpcsystem, error_handler): """ starts workers and returns a pool of them. Returns two callbacks: * The first callbacks with the pool as soon as there is one worker. Errbacks if all starters failed to create a worker. * The second calls back once all workers have been started. This one can be cancelled. The given `error_handler` is invoked for every failed start. """ starters = [] for starter_conf in config["workers"]: starters.extend(_create_starters(starter_conf, rpcsystem)) pool = worker.Pool() ds = [] for i, starter in enumerate(starters): d = starter.start() def success(worker, i, starter): worker.nicename = "#%s" % i pool.add_worker(worker) def fail(failure): error_handler(failure) return failure d.addCallback(success, i, starter) ds.append(d) d = defer.DeferredList(ds, fireOnOneErrback=True, consumeErrors=True) def on_success(result): return pool def on_fail(firsterror): return firsterror.value.subFailure d.addCallbacks(on_success, on_fail) return d def create_rpc_system(conf): port_range = _parse_port_range(conf.get("data_ports", 0)) return anycall.create_tcp_rpc_system(port_range = port_range) def _create_starters(conf, rpcsystem): global preload_packages import pydron data_ports = _parse_port_range(conf.get("data_ports", 0)) preconnect = conf.get("preconnect", True) if 0 in data_ports: # use automatically selected ports. this is not compatible # with preconnect preconnect = False data_ports = [0] if data_ports != [0] and len(data_ports) <= conf["cores"]: if 0 not in data_ports: raise ValueError("Not enough ports configured for %r" % conf) starters = [] for i in range(conf["cores"]): starter_type = conf["type"] if starter_type == "multicore": starter = _multicore_starter(conf, rpcsystem) elif starter_type == "ssh": starter = _ssh_starter(conf, rpcsystem) elif starter_type == "cloud": starter = _ec2_starter(conf, rpcsystem) else: raise ValueError("Not supported worker type %s" % repr(starter_type)) if data_ports == [0]: port = 0 else: port = data_ports[i] smart = smartstarter.SmartStarter(starter, rpcsystem, anycall.create_tcp_rpc_system, list(preload_packages)+[pydron], preconnect = preconnect, data_port = port) starters.append(worker.WorkerStarter(smart)) return starters def _multicore_starter(conf, rpcsystem): return pythonstarter.LocalStarter() def _ssh_starter(conf, rpcsystem): starter = pythonstarter.SSHStarter(conf["hostname"], username=conf["username"], password=conf.get("password", None), private_key_files=conf.get("private_key_files", []), private_keys=conf.get("private_keys", []), tmp_dir=conf.get("tmp_dir", "/tmp")) return starter def _ec2_starter(conf, rpcsystem): starter = pythonstarter.EC2Starter(username=conf["username"], provider=conf["provider"], provider_keyid=conf["accesskeyid"], provider_key=conf["accesskey"], image_id=conf["imageid"], size_id=conf["sizeid"], public_key_file=conf["publickey"], private_key_file=conf["privatekey"], tmp_dir=conf.get("tmp_dir", "/tmp")) return starter def _parse_port_range(ports): try: return [int(ports)] except ValueError: pass if isinstance(ports, list): return [int(x) for x in ports] min_port, max_port = str(ports).split('-', 1) min_port = int(min_port) max_port = int(max_port) return range(min_port, max_port + 1)
1.9375
2
astropy/tests/plugins/display.py
guntbert/astropy
0
12243
# Licensed under a 3-clause BSD style license - see LICENSE.rst """ This plugin provides customization of the header displayed by pytest for reporting purposes. """ import os import sys import datetime import locale import math from collections import OrderedDict from astropy.tests.helper import ignore_warnings from astropy.utils.introspection import resolve_name PYTEST_HEADER_MODULES = OrderedDict([('Numpy', 'numpy'), ('Scipy', 'scipy'), ('Matplotlib', 'matplotlib'), ('h5py', 'h5py'), ('Pandas', 'pandas')]) # This always returns with Astropy's version from astropy import __version__ TESTED_VERSIONS = OrderedDict([('Astropy', __version__)]) def pytest_report_header(config): try: stdoutencoding = sys.stdout.encoding or 'ascii' except AttributeError: stdoutencoding = 'ascii' args = config.args # TESTED_VERSIONS can contain the affiliated package version, too if len(TESTED_VERSIONS) > 1: for pkg, version in TESTED_VERSIONS.items(): if pkg not in ['Astropy', 'astropy_helpers']: s = "\nRunning tests with {} version {}.\n".format( pkg, version) else: s = "\nRunning tests with Astropy version {}.\n".format( TESTED_VERSIONS['Astropy']) # Per https://github.com/astropy/astropy/pull/4204, strip the rootdir from # each directory argument if hasattr(config, 'rootdir'): rootdir = str(config.rootdir) if not rootdir.endswith(os.sep): rootdir += os.sep dirs = [arg[len(rootdir):] if arg.startswith(rootdir) else arg for arg in args] else: dirs = args s += "Running tests in {}.\n\n".format(" ".join(dirs)) s += "Date: {}\n\n".format(datetime.datetime.now().isoformat()[:19]) from platform import platform plat = platform() if isinstance(plat, bytes): plat = plat.decode(stdoutencoding, 'replace') s += f"Platform: {plat}\n\n" s += f"Executable: {sys.executable}\n\n" s += f"Full Python Version: \n{sys.version}\n\n" s += "encodings: sys: {}, locale: {}, filesystem: {}".format( sys.getdefaultencoding(), locale.getpreferredencoding(), sys.getfilesystemencoding()) s += '\n' s += f"byteorder: {sys.byteorder}\n" s += "float info: dig: {0.dig}, mant_dig: {0.dig}\n\n".format( sys.float_info) for module_display, module_name in PYTEST_HEADER_MODULES.items(): try: with ignore_warnings(DeprecationWarning): module = resolve_name(module_name) except ImportError: s += f"{module_display}: not available\n" else: try: version = module.__version__ except AttributeError: version = 'unknown (no __version__ attribute)' s += f"{module_display}: {version}\n" # Helpers version if 'astropy_helpers' in TESTED_VERSIONS: astropy_helpers_version = TESTED_VERSIONS['astropy_helpers'] else: try: from astropy.version import astropy_helpers_version except ImportError: astropy_helpers_version = None if astropy_helpers_version: s += f"astropy_helpers: {astropy_helpers_version}\n" special_opts = ["remote_data", "pep8"] opts = [] for op in special_opts: op_value = getattr(config.option, op, None) if op_value: if isinstance(op_value, str): op = ': '.join((op, op_value)) opts.append(op) if opts: s += "Using Astropy options: {}.\n".format(", ".join(opts)) return s def pytest_terminal_summary(terminalreporter): """Output a warning to IPython users in case any tests failed.""" try: get_ipython() except NameError: return if not terminalreporter.stats.get('failed'): # Only issue the warning when there are actually failures return terminalreporter.ensure_newline() terminalreporter.write_line( 'Some tests are known to fail when run from the IPython prompt; ' 'especially, but not limited to tests involving logging and warning ' 'handling. Unless you are certain as to the cause of the failure, ' 'please check that the failure occurs outside IPython as well. See ' 'http://docs.astropy.org/en/stable/known_issues.html#failing-logging-' 'tests-when-running-the-tests-in-ipython for more information.', yellow=True, bold=True)
2.1875
2
packages/api-server/api_server/routes/lifts.py
Sald-for-Communication-and-IT/rmf-web
0
12244
<reponame>Sald-for-Communication-and-IT/rmf-web<gh_stars>0 from typing import Any, List, cast from fastapi import Depends from rx import operators as rxops from api_server.base_app import BaseApp from api_server.fast_io import FastIORouter, WatchRequest from api_server.models import Lift, LiftHealth, LiftRequest, LiftState from api_server.repositories import RmfRepository from .utils import rx_watcher class LiftsRouter(FastIORouter): def __init__(self, app: BaseApp): super().__init__(tags=["Lifts"]) @self.get("", response_model=List[Lift]) async def get_lifts(rmf_repo: RmfRepository = Depends(app.rmf_repo)): return await rmf_repo.get_lifts() @self.get("/{lift_name}/state", response_model=LiftState) async def get_lift_state( lift_name: str, rmf_repo: RmfRepository = Depends(app.rmf_repo) ): """ Available in socket.io """ return await rmf_repo.get_lift_state(lift_name) @self.watch("/{lift_name}/state") async def watch_lift_state(req: WatchRequest, lift_name: str): lift_state = await get_lift_state(lift_name, RmfRepository(req.user)) if lift_state is not None: await req.emit(lift_state.dict()) rx_watcher( req, app.rmf_events().lift_states.pipe( rxops.filter(lambda x: cast(LiftState, x).lift_name == lift_name), rxops.map(cast(Any, lambda x: cast(LiftState, x).dict())), ), ) @self.get("/{lift_name}/health", response_model=LiftHealth) async def get_lift_health( lift_name: str, rmf_repo: RmfRepository = Depends(app.rmf_repo) ): """ Available in socket.io """ return await rmf_repo.get_lift_health(lift_name) @self.watch("/{lift_name}/health") async def watch_lift_health(req: WatchRequest, lift_name: str): health = await get_lift_health(lift_name, RmfRepository(req.user)) if health is not None: await req.emit(health.dict()) rx_watcher( req, app.rmf_events().lift_health.pipe( rxops.filter(lambda x: cast(LiftHealth, x).id_ == lift_name), rxops.map(cast(Any, lambda x: cast(LiftHealth, x).dict())), ), ) @self.post("/{lift_name}/request") def _post_lift_request( lift_name: str, lift_request: LiftRequest, ): app.rmf_gateway().request_lift( lift_name, lift_request.destination, lift_request.request_type, lift_request.door_mode, )
2.171875
2
src/opnsense/scripts/suricata/queryAlertLog.py
ass-a2s/opnsense-core
2
12245
#!/usr/local/bin/python3.6 """ Copyright (c) 2015-2019 <NAME> <<EMAIL>> All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------------- query suricata alert log """ import sys import os.path import re import sre_constants import shlex import ujson sys.path.insert(0, "/usr/local/opnsense/site-python") from log_helper import reverse_log_reader from params import update_params from lib import suricata_alert_log if __name__ == '__main__': # handle parameters parameters = {'limit': '0', 'offset': '0', 'filter': '', 'fileid': ''} update_params(parameters) # choose logfile by number if parameters['fileid'].isdigit(): suricata_log = '%s.%d' % (suricata_alert_log, int(parameters['fileid'])) else: suricata_log = suricata_alert_log if parameters['limit'].isdigit(): limit = int(parameters['limit']) else: limit = 0 if parameters['offset'].isdigit(): offset = int(parameters['offset']) else: offset = 0 data_filters = {} data_filters_comp = {} for filter_txt in shlex.split(parameters['filter']): filterField = filter_txt.split('/')[0] if filter_txt.find('/') > -1: data_filters[filterField] = '/'.join(filter_txt.split('/')[1:]) filter_regexp = data_filters[filterField] filter_regexp = filter_regexp.replace('*', '.*') filter_regexp = filter_regexp.lower() try: data_filters_comp[filterField] = re.compile(filter_regexp) except sre_constants.error: # remove illegal expression # del data_filters[filterField] data_filters_comp[filterField] = re.compile('.*') # filter one specific log line if 'filepos' in data_filters and data_filters['filepos'].isdigit(): log_start_pos = int(data_filters['filepos']) else: log_start_pos = None # query suricata eve log result = {'filters': data_filters, 'rows': [], 'total_rows': 0, 'origin': suricata_log.split('/')[-1]} if os.path.exists(suricata_log): for line in reverse_log_reader(filename=suricata_log, start_pos=log_start_pos): try: record = ujson.loads(line['line']) except ValueError: # can not handle line record = {} # only process valid alert items if 'alert' in record: # add position in file record['filepos'] = line['pos'] record['fileid'] = parameters['fileid'] # flatten structure record['alert_sid'] = record['alert']['signature_id'] record['alert_action'] = record['alert']['action'] record['alert'] = record['alert']['signature'] # use filters on data (using regular expressions) do_output = True for filterKeys in data_filters: filter_hit = False for filterKey in filterKeys.split(','): if filterKey in record and data_filters_comp[filterKeys].match( ('%s' % record[filterKey]).lower()): filter_hit = True if not filter_hit: do_output = False if do_output: result['total_rows'] += 1 if (len(result['rows']) < limit or limit == 0) and result['total_rows'] >= offset: result['rows'].append(record) elif result['total_rows'] > offset + limit: # do not fetch data until end of file... break # only try to fetch one line when filepos is given if log_start_pos is not None: break # output results print(ujson.dumps(result))
1.476563
1
cairis/gui/DictionaryListCtrl.py
RachelLar/cairis_update
0
12246
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import wx from cairis.core.armid import * from DictionaryEntryDialog import DictionaryEntryDialog class DictionaryListCtrl(wx.ListCtrl): def __init__(self,parent): wx.ListCtrl.__init__(self,parent,PROJECTSETTINGS_LISTDICTIONARY_ID,size=wx.DefaultSize,style=wx.LC_REPORT | wx.LC_SORT_ASCENDING) self.keys = [] self.InsertColumn(0,'Name') self.SetColumnWidth(0,150) self.InsertColumn(1,'Definition') self.SetColumnWidth(1,300) self.theSelectedIdx = -1 self.theMenu = wx.Menu() self.theMenu.Append(DICTIONARYLISTCTRL_MENUADD_ID,'Add') self.theMenu.Append(DICTIONARYLISTCTRL_MENUDELETE_ID,'Delete') self.Bind(wx.EVT_LIST_ITEM_RIGHT_CLICK,self.OnRightDown) self.Bind(wx.EVT_LIST_ITEM_SELECTED,self.OnItemSelected) self.Bind(wx.EVT_LIST_ITEM_DESELECTED,self.OnItemDeselected) self.Bind(wx.EVT_LIST_ITEM_ACTIVATED,self.onEntryActivated) wx.EVT_MENU(self.theMenu,DICTIONARYLISTCTRL_MENUADD_ID,self.onAddEntry) wx.EVT_MENU(self.theMenu,DICTIONARYLISTCTRL_MENUDELETE_ID,self.onDeleteEntry) def OnItemSelected(self,evt): self.theSelectedIdx = evt.GetIndex() def OnItemDeselected(self,evt): self.theSelectedIdx = -1 def OnRightDown(self,evt): self.PopupMenu(self.theMenu) def onAddEntry(self,evt): dlg = DictionaryEntryDialog(self) if (dlg.ShowModal() == DICTIONARYENTRY_BUTTONCOMMIT_ID): name = dlg.name() definition = dlg.definition() idx = self.GetItemCount() self.InsertStringItem(idx,name) self.SetStringItem(idx,1,definition) def onDeleteEntry(self,evt): if (self.theSelectedIdx == -1): errorText = 'No entry selected' errorLabel = 'Delete definition' dlg = wx.MessageDialog(self,errorText,errorLabel,wx.OK) dlg.ShowModal() dlg.Destroy() else: selectedValue = self.GetItemText(self.theSelectedIdx) self.DeleteItem(self.theSelectedIdx) def onEntryActivated(self,evt): self.theSelectedIdx = evt.GetIndex() name = self.GetItemText(self.theSelectedIdx) definition = self.GetItem(self.theSelectedIdx,1) dlg = DictionaryEntryDialog(self,name,definition.GetText()) if (dlg.ShowModal() == DICTIONARYENTRY_BUTTONCOMMIT_ID): self.SetStringItem(self.theSelectedIdx,0,dlg.name()) self.SetStringItem(self.theSelectedIdx,1,dlg.definition()) def load(self,entries): self.keys = entries.keys() self.keys.sort() for name in self.keys: idx = self.GetItemCount() self.InsertStringItem(idx,name) self.SetStringItem(idx,1,entries[name]) def dimensions(self): entries = [] for x in range(self.GetItemCount()): name = self.GetItemText(x) definition = self.GetItem(x,1) entries.append((name,definition.GetText())) return entries
1.492188
1
old_test/test-large.py
briandobbins/pynio
0
12247
from __future__ import print_function, division import numpy as np import Nio import time, os # # Creating a file # init_time = time.clock() ncfile = 'test-large.nc' if (os.path.exists(ncfile)): os.system("/bin/rm -f " + ncfile) opt = Nio.options() opt.Format = "LargeFile" opt.PreFill = False file = Nio.open_file(ncfile, 'w', options=opt) file.title = "Testing large files and dimensions" file.create_dimension('big', 2500000000) bigvar = file.create_variable('bigvar', "b", ('big',)) print("created bigvar") # note it is incredibly slow to write a scalar to a large file variable # so create an temporary variable x that will get assigned in steps x = np.empty(1000000,dtype = 'int8') #print x x[:] = 42 t = list(range(0,2500000000,1000000)) ii = 0 for i in t: if (i == 0): continue print(t[ii],i) bigvar[t[ii]:i] = x[:] ii += 1 x[:] = 84 bigvar[2499000000:2500000000] = x[:] bigvar[-1] = 84 bigvar.units = "big var units" #print bigvar[-1] print(bigvar.dimensions) # check unlimited status for dim in list(file.dimensions.keys()): print(dim, " unlimited: ",file.unlimited(dim)) print(file) print("closing file") print('elapsed time: ',time.clock() - init_time) file.close() #quit() # # Reading a file # print('opening file for read') print('elapsed time: ',time.clock() - init_time) file = Nio.open_file(ncfile, 'r') print('file is open') print('elapsed time: ',time.clock() - init_time) print(file.dimensions) print(list(file.variables.keys())) print(file) print("reading variable") print('elapsed time: ',time.clock() - init_time) x = file.variables['bigvar'] print(x[0],x[1000000],x[249000000],x[2499999999]) print("max and min") min = x[:].min() max = x[:].max() print(min, max) print('elapsed time: ',time.clock() - init_time) # check unlimited status for dim in list(file.dimensions.keys()): print(dim, " unlimited: ",file.unlimited(dim)) print("closing file") print('elapsed time: ',time.clock() - init_time) file.close()
2.890625
3
eeauditor/auditors/aws/Amazon_ECS_Auditor.py
kbhagi/ElectricEye
442
12248
#This file is part of ElectricEye. #SPDX-License-Identifier: Apache-2.0 #Licensed to the Apache Software Foundation (ASF) under one #or more contributor license agreements. See the NOTICE file #distributed with this work for additional information #regarding copyright ownership. The ASF licenses this file #to you under the Apache License, Version 2.0 (the #"License"); you may not use this file except in compliance #with the License. You may obtain a copy of the License at #http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, #software distributed under the License is distributed on an #"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #KIND, either express or implied. See the License for the #specific language governing permissions and limitations #under the License. import boto3 import datetime from check_register import CheckRegister registry = CheckRegister() # import boto3 clients ecs = boto3.client("ecs") # loop through ECS Clusters def list_clusters(cache): response = cache.get("list_clusters") if response: return response cache["list_clusters"] = ecs.list_clusters() return cache["list_clusters"] @registry.register_check("ecs") def ecs_cluster_container_insights_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[ECS.1] ECS clusters should have container insights enabled""" response = list_clusters(cache) myEcsClusters = response["clusterArns"] for clusters in myEcsClusters: clusterArn = str(clusters) try: response = ecs.describe_clusters(clusters=[clusterArn]) for clusterinfo in response["clusters"]: clusterName = str(clusterinfo["clusterName"]) ecsClusterArn = str(clusterinfo["clusterArn"]) for settings in clusterinfo["settings"]: contInsightsCheck = str(settings["value"]) # ISO Time iso8601Time = ( datetime.datetime.utcnow() .replace(tzinfo=datetime.timezone.utc) .isoformat() ) if contInsightsCheck == "disabled": finding = { "SchemaVersion": "2018-10-08", "Id": ecsClusterArn + "/ecs-cluster-container-insights-check", "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", "GeneratorId": ecsClusterArn, "AwsAccountId": awsAccountId, "Types": [ "Software and Configuration Checks/AWS Security Best Practices" ], "FirstObservedAt": iso8601Time, "CreatedAt": iso8601Time, "UpdatedAt": iso8601Time, "Severity": {"Label": "LOW"}, "Confidence": 99, "Title": "[ECS.1] ECS clusters should have container insights enabled", "Description": "ECS cluster " + clusterName + " does not have container insights enabled. Refer to the remediation instructions to remediate this behavior", "Remediation": { "Recommendation": { "Text": "For information on configuring Container Insights for your cluster refer to the Setting Up Container Insights on Amazon ECS for Cluster- and Service-Level Metrics section of the Amazon CloudWatch User Guide", "Url": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/deploy-container-insights-ECS-cluster.html", } }, "ProductFields": {"Product Name": "ElectricEye"}, "Resources": [ { "Type": "AwsEcsCluster", "Id": ecsClusterArn, "Partition": awsPartition, "Region": awsRegion, "Details": {"Other": {"ClusterName": clusterName}}, } ], "Compliance": { "Status": "FAILED", "RelatedRequirements": [ "NIST CSF DE.AE-3", "NIST SP 800-53 AU-6", "NIST SP 800-53 CA-7", "NIST SP 800-53 IR-4", "NIST SP 800-53 IR-5", "NIST SP 800-53 IR-8", "NIST SP 800-53 SI-4", "AICPA TSC CC7.2", "ISO 27001:2013 A.12.4.1", "ISO 27001:2013 A.16.1.7", ], }, "Workflow": {"Status": "NEW"}, "RecordState": "ACTIVE", } yield finding else: finding = { "SchemaVersion": "2018-10-08", "Id": ecsClusterArn + "/ecs-cluster-container-insights-check", "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", "GeneratorId": ecsClusterArn, "AwsAccountId": awsAccountId, "Types": [ "Software and Configuration Checks/AWS Security Best Practices" ], "FirstObservedAt": iso8601Time, "CreatedAt": iso8601Time, "UpdatedAt": iso8601Time, "Severity": {"Label": "INFORMATIONAL"}, "Confidence": 99, "Title": "[ECS.1] ECS clusters should have container insights enabled", "Description": "ECS cluster " + clusterName + " has container insights enabled.", "Remediation": { "Recommendation": { "Text": "For information on configuring Container Insights for your cluster refer to the Setting Up Container Insights on Amazon ECS for Cluster- and Service-Level Metrics section of the Amazon CloudWatch User Guide", "Url": "https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/deploy-container-insights-ECS-cluster.html", } }, "ProductFields": {"Product Name": "ElectricEye"}, "Resources": [ { "Type": "AwsEcsCluster", "Id": ecsClusterArn, "Partition": awsPartition, "Region": awsRegion, "Details": {"Other": {"ClusterName": clusterName}}, } ], "Compliance": { "Status": "PASSED", "RelatedRequirements": [ "NIST CSF DE.AE-3", "NIST SP 800-53 AU-6", "NIST SP 800-53 CA-7", "NIST SP 800-53 IR-4", "NIST SP 800-53 IR-5", "NIST SP 800-53 IR-8", "NIST SP 800-53 SI-4", "AICPA TSC CC7.2", "ISO 27001:2013 A.12.4.1", "ISO 27001:2013 A.16.1.7", ], }, "Workflow": {"Status": "RESOLVED"}, "RecordState": "ARCHIVED", } yield finding except Exception as e: print(e) @registry.register_check("ecs") def ecs_cluster_default_provider_strategy_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[ECS.2] ECS clusters should have a default cluster capacity provider strategy configured""" response = list_clusters(cache) myEcsClusters = response["clusterArns"] for clusters in myEcsClusters: clusterArn = str(clusters) try: response = ecs.describe_clusters(clusters=[clusterArn]) for clusterinfo in response["clusters"]: clusterName = str(clusterinfo["clusterName"]) ecsClusterArn = str(clusterinfo["clusterArn"]) defaultProviderStratCheck = str(clusterinfo["defaultCapacityProviderStrategy"]) # ISO Time iso8601Time = ( datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat() ) if defaultProviderStratCheck == "[]": finding = { "SchemaVersion": "2018-10-08", "Id": ecsClusterArn + "/ecs-cluster-default-provider-strategy-check", "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", "GeneratorId": ecsClusterArn, "AwsAccountId": awsAccountId, "Types": ["Software and Configuration Checks/AWS Security Best Practices"], "FirstObservedAt": iso8601Time, "CreatedAt": iso8601Time, "UpdatedAt": iso8601Time, "Severity": {"Label": "INFORMATIONAL"}, "Confidence": 99, "Title": "[ECS.2] ECS clusters should have a default cluster capacity provider strategy configured", "Description": "ECS cluster " + clusterName + " does not have a default provider strategy configured. Refer to the remediation instructions to remediate this behavior", "Remediation": { "Recommendation": { "Text": "For information on cluster capacity provider strategies for your cluster refer to the Amazon ECS Cluster Capacity Providers section of the Amazon Elastic Container Service Developer Guide", "Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-capacity-providers.html", } }, "ProductFields": {"Product Name": "ElectricEye"}, "Resources": [ { "Type": "AwsEcsCluster", "Id": ecsClusterArn, "Partition": awsPartition, "Region": awsRegion, "Details": {"Other": {"ClusterName": clusterName}}, } ], "Compliance": { "Status": "FAILED", "RelatedRequirements": [ "NIST CSF ID.AM-2", "NIST SP 800-53 CM-8", "NIST SP 800-53 PM-5", "AICPA TSC CC3.2", "AICPA TSC CC6.1", "ISO 27001:2013 A.8.1.1", "ISO 27001:2013 A.8.1.2", "ISO 27001:2013 A.12.5.1", ], }, "Workflow": {"Status": "NEW"}, "RecordState": "ACTIVE", } yield finding else: finding = { "SchemaVersion": "2018-10-08", "Id": ecsClusterArn + "/ecs-cluster-default-provider-strategy-check", "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", "GeneratorId": ecsClusterArn, "AwsAccountId": awsAccountId, "Types": ["Software and Configuration Checks/AWS Security Best Practices"], "FirstObservedAt": iso8601Time, "CreatedAt": iso8601Time, "UpdatedAt": iso8601Time, "Severity": {"Label": "INFORMATIONAL"}, "Confidence": 99, "Title": "[ECS.2] ECS clusters should have a default cluster capacity provider strategy configured", "Description": "ECS cluster " + clusterName + " has a default provider strategy configured.", "Remediation": { "Recommendation": { "Text": "For information on cluster capacity provider strategies for your cluster refer to the Amazon ECS Cluster Capacity Providers section of the Amazon Elastic Container Service Developer Guide", "Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-capacity-providers.html", } }, "ProductFields": {"Product Name": "ElectricEye"}, "Resources": [ { "Type": "AwsEcsCluster", "Id": ecsClusterArn, "Partition": awsPartition, "Region": awsRegion, "Details": {"Other": {"ClusterName": clusterName}}, } ], "Compliance": { "Status": "PASSED", "RelatedRequirements": [ "NIST CSF ID.AM-2", "NIST SP 800-53 CM-8", "NIST SP 800-53 PM-5", "AICPA TSC CC3.2", "AICPA TSC CC6.1", "ISO 27001:2013 A.8.1.1", "ISO 27001:2013 A.8.1.2", "ISO 27001:2013 A.12.5.1", ], }, "Workflow": {"Status": "RESOLVED"}, "RecordState": "ARCHIVED", } yield finding except Exception as e: print(e) @registry.register_check("ecs") def ecs_task_definition_privileged_container_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[ECS.3] ECS Task Definitions should not run privileged containers if not required""" for taskdef in ecs.list_task_definitions(status='ACTIVE')['taskDefinitionArns']: try: response = ecs.describe_task_definition(taskDefinition=taskdef)["taskDefinition"] taskDefinitionArn = str(response['taskDefinitionArn']) tdefFamily = str(response["family"]) # Loop container definitions for cdef in response["containerDefinitions"]: # ISO Time iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()) cdefName = str(cdef["name"]) # We are going to assume that if there is not a privileged flag...that it is ;) try: privCheck = str(cdef["privileged"]) except: privCheck = 'UNKNOWN' if privCheck != 'False': finding = { "SchemaVersion": "2018-10-08", "Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-privileged-container-check", "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", "GeneratorId": taskDefinitionArn + "/" + cdefName, "AwsAccountId": awsAccountId, "Types": [ "Software and Configuration Checks/AWS Security Best Practices", "TTPs/Privilege Escalation" ], "FirstObservedAt": iso8601Time, "CreatedAt": iso8601Time, "UpdatedAt": iso8601Time, "Severity": {"Label": "MEDIUM"}, "Confidence": 99, "Title": "[ECS.3] ECS Task Definitions should not run privileged containers if not required", "Description": "ECS Container Definition " + cdefName + " in Task Definition " + taskDefinitionArn + " has defined a Privileged container, which should be avoided unless absolutely necessary. Refer to the remediation instructions to remediate this behavior", "Remediation": { "Recommendation": { "Text": "Containers running as Privileged will have Root permissions, this should be avoided if not needed. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide", "Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions", } }, "ProductFields": {"Product Name": "ElectricEye"}, "Resources": [ { "Type": "AwsEcsTaskDefinition", "Id": taskDefinitionArn, "Partition": awsPartition, "Region": awsRegion, "Details": { "Other": { "Family": tdefFamily, "ContainerDefinitionName": cdefName } } } ], "Compliance": { "Status": "FAILED", "RelatedRequirements": [ "NIST CSF PR.AC-1", "NIST SP 800-53 AC-1", "NIST SP 800-53 AC-2", "NIST SP 800-53 IA-1", "NIST SP 800-53 IA-2", "NIST SP 800-53 IA-3", "NIST SP 800-53 IA-4", "NIST SP 800-53 IA-5", "NIST SP 800-53 IA-6", "NIST SP 800-53 IA-7", "NIST SP 800-53 IA-8", "NIST SP 800-53 IA-9", "NIST SP 800-53 IA-10", "NIST SP 800-53 IA-11", "AICPA TSC CC6.1", "AICPA TSC CC6.2", "ISO 27001:2013 A.9.2.1", "ISO 27001:2013 A.9.2.2", "ISO 27001:2013 A.9.2.3", "ISO 27001:2013 A.9.2.4", "ISO 27001:2013 A.9.2.6", "ISO 27001:2013 A.9.3.1", "ISO 27001:2013 A.9.4.2", "ISO 27001:2013 A.9.4.3", ], }, "Workflow": {"Status": "NEW"}, "RecordState": "ACTIVE", } yield finding else: finding = { "SchemaVersion": "2018-10-08", "Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-privileged-container-check", "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", "GeneratorId": taskDefinitionArn + "/" + cdefName, "AwsAccountId": awsAccountId, "Types": [ "Software and Configuration Checks/AWS Security Best Practices", "TTPs/Privilege Escalation" ], "FirstObservedAt": iso8601Time, "CreatedAt": iso8601Time, "UpdatedAt": iso8601Time, "Severity": {"Label": "INFORMATIONAL"}, "Confidence": 99, "Title": "[ECS.3] ECS Task Definitions should not run privileged containers if not required", "Description": "ECS Container Definition " + cdefName + " in Task Definition " + taskDefinitionArn + " has not defined a Privileged container.", "Remediation": { "Recommendation": { "Text": "Containers running as Privileged will have Root permissions, this should be avoided if not needed. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide", "Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions", } }, "ProductFields": {"Product Name": "ElectricEye"}, "Resources": [ { "Type": "AwsEcsTaskDefinition", "Id": taskDefinitionArn, "Partition": awsPartition, "Region": awsRegion, "Details": { "Other": { "Family": tdefFamily, "ContainerDefinitionName": cdefName } } } ], "Compliance": { "Status": "PASSED", "RelatedRequirements": [ "NIST CSF PR.AC-1", "NIST SP 800-53 AC-1", "NIST SP 800-53 AC-2", "NIST SP 800-53 IA-1", "NIST SP 800-53 IA-2", "NIST SP 800-53 IA-3", "NIST SP 800-53 IA-4", "NIST SP 800-53 IA-5", "NIST SP 800-53 IA-6", "NIST SP 800-53 IA-7", "NIST SP 800-53 IA-8", "NIST SP 800-53 IA-9", "NIST SP 800-53 IA-10", "NIST SP 800-53 IA-11", "AICPA TSC CC6.1", "AICPA TSC CC6.2", "ISO 27001:2013 A.9.2.1", "ISO 27001:2013 A.9.2.2", "ISO 27001:2013 A.9.2.3", "ISO 27001:2013 A.9.2.4", "ISO 27001:2013 A.9.2.6", "ISO 27001:2013 A.9.3.1", "ISO 27001:2013 A.9.4.2", "ISO 27001:2013 A.9.4.3", ], }, "Workflow": {"Status": "RESOLVED"}, "RecordState": "ARCHIVED", } yield finding except Exception as e: print(e) @registry.register_check("ecs") def ecs_task_definition_security_labels_check(cache: dict, awsAccountId: str, awsRegion: str, awsPartition: str) -> dict: """[ECS.4] ECS Task Definitions for EC2 should have Docker Security Options (SELinux or AppArmor) configured""" for taskdef in ecs.list_task_definitions(status='ACTIVE')['taskDefinitionArns']: try: response = ecs.describe_task_definition(taskDefinition=taskdef)["taskDefinition"] taskDefinitionArn = str(response["taskDefinitionArn"]) tdefFamily = str(response["family"]) # If there is a network mode of "awsvpc" it is likely a Fargate task - even though EC2 compute can run with that... # time for some funky edge cases, keep that in mind before you yeet an issue at me, please ;) if str(response["networkMode"]) == 'awsvpc': continue else: # Loop container definitions for cdef in response["containerDefinitions"]: # ISO Time iso8601Time = (datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc).isoformat()) cdefName = str(cdef["name"]) try: # This is a passing check secOpts = str(cdef["dockerSecurityOptions"]) finding = { "SchemaVersion": "2018-10-08", "Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-security-labels-check", "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", "GeneratorId": taskDefinitionArn + "/" + cdefName, "AwsAccountId": awsAccountId, "Types": ["Software and Configuration Checks/AWS Security Best Practices"], "FirstObservedAt": iso8601Time, "CreatedAt": iso8601Time, "UpdatedAt": iso8601Time, "Severity": {"Label": "INFORMATIONAL"}, "Confidence": 99, "Title": "[ECS.4] ECS Task Definitions for EC2 should have Docker Security Options (SELinux or AppArmor) configured", "Description": "ECS Container Definition " + cdefName + " in Task Definition " + taskDefinitionArn + " has Docker Security Options configured.", "Remediation": { "Recommendation": { "Text": "Containers running on EC2 Compute-types should have Docker Security Options configured. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide", "Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions" } }, "ProductFields": {"Product Name": "ElectricEye"}, "Resources": [ { "Type": "AwsEcsTaskDefinition", "Id": taskDefinitionArn, "Partition": awsPartition, "Region": awsRegion, "Details": { "Other": { "Family": tdefFamily, "ContainerDefinitionName": cdefName, 'DockerSecurityOptions': secOpts } } } ], "Compliance": { "Status": "PASSED", "RelatedRequirements": [ "NIST CSF PR.IP-1", "NIST SP 800-53 CM-2", "NIST SP 800-53 CM-3", "NIST SP 800-53 CM-4", "NIST SP 800-53 CM-5", "NIST SP 800-53 CM-6", "NIST SP 800-53 CM-7", "NIST SP 800-53 CM-9", "NIST SP 800-53 SA-10", "AICPA TSC A1.3", "AICPA TSC CC1.4", "AICPA TSC CC5.3", "AICPA TSC CC6.2", "AICPA TSC CC7.1", "AICPA TSC CC7.3", "AICPA TSC CC7.4", "ISO 27001:2013 A.12.1.2", "ISO 27001:2013 A.12.5.1", "ISO 27001:2013 A.12.6.2", "ISO 27001:2013 A.14.2.2", "ISO 27001:2013 A.14.2.3", "ISO 27001:2013 A.14.2.4", ], }, "Workflow": {"Status": "RESOLVED"}, "RecordState": "ARCHIVED" } yield finding except: secOpts = str('["NO_OPTIONS"]') finding = { "SchemaVersion": "2018-10-08", "Id": taskDefinitionArn + "/" + cdefName + "/ecs-task-definition-security-labels-check", "ProductArn": f"arn:{awsPartition}:securityhub:{awsRegion}:{awsAccountId}:product/{awsAccountId}/default", "GeneratorId": taskDefinitionArn + "/" + cdefName, "AwsAccountId": awsAccountId, "Types": ["Software and Configuration Checks/AWS Security Best Practices"], "FirstObservedAt": iso8601Time, "CreatedAt": iso8601Time, "UpdatedAt": iso8601Time, "Severity": {"Label": "HIGH"}, "Confidence": 99, "Title": "[ECS.4] ECS Task Definitions for EC2 should have Docker Security Options (SELinux or AppArmor) configured", "Description": "ECS Container Definition " + cdefName + " in Task Definition " + taskDefinitionArn + " does not have any Docker Security Options configured. Refer to the remediation instructions to remediate this behavior", "Remediation": { "Recommendation": { "Text": "Containers running on EC2 Compute-types should have Docker Security Options configured. Refer to the Task definition parameters Security section of the Amazon Elastic Container Service Developer Guide", "Url": "https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html#container_definitions" } }, "ProductFields": {"Product Name": "ElectricEye"}, "Resources": [ { "Type": "AwsEcsTaskDefinition", "Id": taskDefinitionArn, "Partition": awsPartition, "Region": awsRegion, "Details": { "Other": { "Family": tdefFamily, "ContainerDefinitionName": cdefName, 'DockerSecurityOptions': secOpts } } } ], "Compliance": { "Status": "FAILED", "RelatedRequirements": [ "NIST CSF PR.IP-1", "NIST SP 800-53 CM-2", "NIST SP 800-53 CM-3", "NIST SP 800-53 CM-4", "NIST SP 800-53 CM-5", "NIST SP 800-53 CM-6", "NIST SP 800-53 CM-7", "NIST SP 800-53 CM-9", "NIST SP 800-53 SA-10", "AICPA TSC A1.3", "AICPA TSC CC1.4", "AICPA TSC CC5.3", "AICPA TSC CC6.2", "AICPA TSC CC7.1", "AICPA TSC CC7.3", "AICPA TSC CC7.4", "ISO 27001:2013 A.12.1.2", "ISO 27001:2013 A.12.5.1", "ISO 27001:2013 A.12.6.2", "ISO 27001:2013 A.14.2.2", "ISO 27001:2013 A.14.2.3", "ISO 27001:2013 A.14.2.4", ], }, "Workflow": {"Status": "NEW"}, "RecordState": "ACTIVE" } yield finding except Exception as e: print(e)
2.140625
2
Mask/Interpolate slider without prepolate.py
typedev/RoboFont-1
1
12249
""" This slider controls interpolation between foreground and mask layers. Initial position for slider is at 1.0 (current foreground outline) Sliding left to 0.0 interpolates to mask Sliding right to 3.0 extrapolates away from mask. NOTE: Running this script opens an observer on the current glyph in the Glyph View window. The slider window must then be closed before it can be used on another glyph. """ from fontTools.misc.transform import Transform from vanilla import * g = CurrentGlyph() g.prepareUndo('interpolate with mask') ################### PREPOLATION ################################### ## Auto contour order and startpoints for foreground: #g.autoContourOrder() #for c in g: # c.autoStartSegment() ## Auto contour order and startpoints for mask: g.flipLayers("foreground", "mask") #g.autoContourOrder() #for c in g: # c.autoStartSegment() ## Gather point info for mask layer: maskpoints = [] for i in range(len(g)): maskpoints.append([]) for j in range(len(g[i])): maskpoints[i].append((g[i][j].onCurve.x,g[i][j].onCurve.y)) ## Gather point info for foreground layer: g.flipLayers("mask", "foreground") forepoints = [] for i in range(len(g)): forepoints.append([]) for j in range(len(g[i])): forepoints[i].append((g[i][j].onCurve.x,g[i][j].onCurve.y)) ## Compare length of each contour in mask and foreground: n = 0 print '-------------------------------' print 'Checking ' + str(g.name) + ' without auto ordering' def gradient(point1, point2): grad = (point2[1] - point1[1])/(point2[0] - point1[0] + 0.9) return grad mismatched = [] if len(maskpoints) == len(forepoints): for i in range(len(forepoints)): print '-------------------------------' if len(forepoints[i]) == len(maskpoints[i]): print 'Contour ' + str(i) + ' matches' else: n = n + 1 print 'Contour ' + str(i) + ':' print str(len(forepoints[i])) + ' points in foreground' print str(len(maskpoints[i])) + ' points in mask' print '-------------------------------' if len(forepoints[i]) > len(maskpoints[i]): count = len(maskpoints[i]) prob = 'mask' else: count = len(forepoints[i]) prob = 'foreground' for j in range(-1,count - 1): def foregradient(a,b): foregrad = gradient(forepoints[a][b],forepoints[a][b+1]) return foregrad def maskgradient(a,b): maskgrad = gradient(maskpoints[a][b],maskpoints[a][b+1]) return maskgrad foregrad = foregradient(i,j) maskgrad = maskgradient(i,j) if foregrad > 20: foregrad = 100 if maskgrad > 20: maskgrad = 100 if foregrad < -20: foregrad = -100 if maskgrad < -20: maskgrad = -100 if abs(foregrad - maskgrad) > 0.4: mismatched.append(j+1) mismatched = [mismatched[0]] ## Find second problem: if prob == 'foreground': foregrad = foregradient(i,j) maskgrad = maskgradient(i,j+1) else: foregrad = foregradient(i,j+1) maskgrad = maskgradient(i,j) if foregrad > 20: foregrad = 100 if maskgrad > 20: maskgrad = 100 if foregrad < -20: foregrad = -100 if maskgrad < -20: maskgrad = -100 if abs(foregrad - maskgrad) > 0.4: mismatched.append(j+1) if abs(len(forepoints[i]) - len(maskpoints[i])) == 1: if len(mismatched) == 1: print 'Check between points ' + str(mismatched[0]) + ' and ' + str(mismatched[0] + 1) else: print 'Check amongst the last few points' else: if len(mismatched) == 2: print 'Check between points ' + str(mismatched[0]) + ' and ' + str(mismatched[0] + 1) print 'Check between points ' + str(mismatched[1]) + ' and ' + str(mismatched[1] + 1) elif len(mismatched) == 1: print 'Check between points ' + str(mismatched[0]) + ' and ' + str(mismatched[0] + 1) print 'Check amongst the last few points' else: print 'Check amongst the last few points' else: print '-------------------------------' print 'Foreground has ' + str(len(forepoints)) + ' contours' print 'Mask has ' + str(len(maskpoints)) + ' contours' print '-------------------------------' ################### INTERP SLIDER ################################### ## Collect mask points: g.flipLayers("foreground", "mask") all_mask_points = [] all_mask_points_length = [] for i in range(len(g)): all_mask_points.append([]) for j in range(len(g[i].points)): all_mask_points[i].append((g[i].points[j].x, g[i].points[j].y)) all_mask_points_length.append(j) ## Collect initial foreground points: g.flipLayers("mask", "foreground") all_fore_points = [] all_fore_points_length = [] for i in range(len(g)): all_fore_points.append([]) for j in range(len(g[i].points)): all_fore_points[i].append((g[i].points[j].x, g[i].points[j].y)) all_fore_points_length.append(j) ## Check for compatibility: if n > 0: pass else: ## if compatible, interpolate: def interp_fore(Glif, int_val): for i in range(len(Glif)): for j in range(len(Glif[i].points)): fore_point = all_fore_points[i][j] mask_point = all_mask_points[i][j] Glif[i].points[j].x = mask_point[0] + ((fore_point[0] - mask_point[0]) * int_val) Glif[i].points[j].y = mask_point[1] + ((fore_point[1] - mask_point[1]) * int_val) class InterpWithMaskWindow: def __init__(self, glyph): if glyph is None: print "There should be a glyph window selected." return self.glyph = glyph self.w = Window((600, 36),"Interpolate Foreground with Mask (no AutoOrder):") self.w.int = Slider((10, 6, -10, 22), value=1, maxValue=3, minValue=0, callback=self.adjust) self.w.open() def adjust(self, sender): int_val = self.w.int.get() print round(int_val, 2) Glif = self.glyph interp_fore(Glif, int_val) Glif.update() OpenWindow(InterpWithMaskWindow, CurrentGlyph()) g.update() g.performUndo() t = Transform().translate(0, 0) g.transform(t, doComponents=True) g.update()
2.765625
3
ex062.py
paulo-caixeta/Exercicios_Curso_Python
0
12250
# Continuação do ex061 (Termos de PA) print('Gerador de PA') print('-=' * 10) primeiro = int(input('Primeiro termo: ')) razão = int(input('Razão: ')) i = 0 n = 10 novos = 10 total = 0 while novos != 0: total = total + novos while i < total: termo = primeiro + razão * i i += 1 print(termo, end=' -> ') print('PAUSA') novos = int(input('Deseja mostrar mais termos? Quantos? ')) print('FIM')
3.875
4
order/tests.py
DanLivassan/bookstore
0
12251
<gh_stars>0 from random import randint from django.contrib.auth import get_user_model from django.test import TestCase from django.urls import reverse from order.serializers import OrderSerializer from product.models import Product from order.models import Order from rest_framework import status from rest_framework.test import APIClient MAX_PER_PAGE = 5 def sample_user(email='<EMAIL>', password='<PASSWORD>'): """Creste a sample user""" return get_user_model().objects.create_user(email, password) def sample_product(title="My Product", price=1000, description="My product description"): raw_product = { 'title': title, 'price': price, 'description': description, } return Product.objects.create(**raw_product) def sample_order(user, products): raw_order = { # 'products': [product, product], 'user': user } order = Order.objects.create(**raw_order) for product in products: order.products.add(product) return order class ModelTests(TestCase): def test_order_creation_and_str(self): """Test that order is created and str is showed properly""" product1 = sample_product() product2 = sample_product() user = sample_user() order = sample_order(user, [product1, product2]) self.assertEqual(2, len(order.products.all())) self.assertIn(user.email, str(order)) self.assertIn("2", str(order)) class SerializerTests(TestCase): def test_order_serializer(self): """Test that order serializer""" product1 = sample_product() product2 = sample_product() user = sample_user() order = sample_order(user, [product1, product2]) serialized_data = OrderSerializer(order) data = serialized_data.data self.assertEqual(len(data['products']), 2) self.assertEqual(data['total'], 2000) class PublicApiTests(TestCase): def setUp(self) -> None: self.client = APIClient() def test_that_write_methods_fails(self): """Test if unauthenticated user perform write methods fails""" url = reverse('product-list', args=['v1']) post_response = self.client.post(url, {}) put_response = self.client.put(url, {}) patch_response = self.client.patch(url, {}) self.assertEqual(status.HTTP_403_FORBIDDEN, post_response.status_code) self.assertEqual(status.HTTP_403_FORBIDDEN, put_response.status_code) self.assertEqual(status.HTTP_403_FORBIDDEN, patch_response.status_code) def test_get_order_list(self): """Test that orders are retrieved porperly and if the sum of prices matches with sum of products price""" url = reverse('order-list', args=['v1']) total = 0 products = [] for i in range(5): price = randint(1000, 2000) total += price products.append(sample_product(price=price)) user = sample_user() sample_order(user=user, products=products) response = self.client.get(url) api_order = response.data['results'][0] self.assertEqual(api_order['total'], total) self.assertEqual(len(api_order['products']), 5) class PrivateApiTest(TestCase): def setUp(self) -> None: self.user = sample_user(email='<EMAIL>') self.client = APIClient() self.client.force_authenticate(self.user) def test_create_a_new_order(self): """Test that an order are created porperly""" url = reverse('order-list', args=['v1']) sample_product() sample_product() products = Product.objects.all() user = sample_user() order_payload = { 'user': user.id, 'products_ids': [p.id for p in products] } response = self.client.post(url, order_payload) orders = Order.objects.all() self.assertEqual(response.status_code, status.HTTP_201_CREATED) self.assertEqual(orders.count(), 1) self.assertEqual(orders[0].products.count(), 2)
2.671875
3
sam-app/tests/unit/test_apns.py
mgacy/Adequate-Backend
1
12252
import unittest from .mocks import BotoSessionMock from push_notification import apns class APNSTestCase(unittest.TestCase): def_apns_category = 'MGDailyDealCategory' # def setUp(self): # def tearDown(self): # push_notification # push_background # make_new_deal_message # make_delta_message def test_make_delta_comment_1(self): deal_id = 'a6k5A000000kP9LQAU' delta_type = 'commentCount' delta_value = 5 message = { 'id': deal_id, 'delta_type': delta_type, 'delta_value': delta_value } expected = ( '{"aps": {"content-available": 1}, ' '"deal-id": "a6k5A000000kP9LQAU", ' '"delta-type": "commentCount", ' '"delta-value": 5}' ) result = apns.make_delta_message(message) self.assertEqual(result, expected) def test_make_delta_status_1(self): deal_id = 'a6k5A000000kP9LQAU' delta_type = 'launchStatus' delta_value = 'launch' message = { 'id': deal_id, 'delta_type': delta_type, 'delta_value': delta_value } expected = ( '{"aps": {"content-available": 1}, ' '"deal-id": "a6k5A000000kP9LQAU", ' '"delta-type": "launchStatus", ' '"delta-value": "launch"}' ) result = apns.make_delta_message(message) self.assertEqual(result, expected) # publish_message def test_publish_delta_status_prod(self): message = ( '{"aps": {"content-available": 1}, ' '"deal-id": "a6k5A000000kP9LQAU", ' '"delta-type": "launchStatus", ' '"delta-value": "launch"}' ) # deal_id = 'a6k5A000000kP9LQAU' # delta_type = 'launchStatus' # delta_value = 'launch' # message = ( # '{"aps": {"content-available": 1}, ' # f'"deal-id": "{deal_id}", ' # f'"delta-type": "{delta_type}", ' # f'"delta-value": "{delta_value}"' # '}' # ) session = BotoSessionMock() default_message='default message' apns_server = 'prod' apns.publish_message(session, topic_arn='fake_topic_arn', apns_server=apns_server, apns_message=message, default_message=default_message) expected = ( '{' '"default": "default message", ' '"APNS": "{' '\\"aps\\": {' '\\"content-available\\": 1' '}, ' '\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", ' '\\"delta-type\\": \\"launchStatus\\", ' '\\"delta-value\\": \\"launch\\"' '}"' '}' ) result = session.client.message self.assertEqual(result, expected) def test_publish_delta_status_dev(self): message = ( '{"aps": {"content-available": 1}, ' '"deal-id": "a6k5A000000kP9LQAU", ' '"delta-type": "launchStatus", ' '"delta-value": "launch"}' ) session = BotoSessionMock() default_message='default message' apns_server = 'dev' apns.publish_message(session, topic_arn='fake_topic_arn', apns_server=apns_server, apns_message=message, default_message=default_message) expected = ( '{' '"default": "default message", ' '"APNS_SANDBOX": "{' '\\"aps\\": {' '\\"content-available\\": 1' '}, ' '\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", ' '\\"delta-type\\": \\"launchStatus\\", ' '\\"delta-value\\": \\"launch\\"' '}"' '}' ) result = session.client.message self.assertEqual(result, expected) def test_publish_delta_status_both(self): message = ( '{"aps": {"content-available": 1}, ' '"deal-id": "a6k5A000000kP9LQAU", ' '"delta-type": "launchStatus", ' '"delta-value": "launch"}' ) session = BotoSessionMock() default_message='default message' apns_server = 'both' apns.publish_message(session, topic_arn='fake_topic_arn', apns_server=apns_server, apns_message=message, default_message=default_message) expected = ( '{' '"default": "default message", ' '"APNS": "{' '\\"aps\\": {' '\\"content-available\\": 1' '}, ' '\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", ' '\\"delta-type\\": \\"launchStatus\\", ' '\\"delta-value\\": \\"launch\\"' '}", ' '"APNS_SANDBOX": "{' '\\"aps\\": {' '\\"content-available\\": 1' '}, ' '\\"deal-id\\": \\"a6k5A000000kP9LQAU\\", ' '\\"delta-type\\": \\"launchStatus\\", ' '\\"delta-value\\": \\"launch\\"' '}"' '}' ) result = session.client.message self.assertEqual(result, expected) def test_publish_invalid_server(self): session = BotoSessionMock() topic_arn='fake_topic_arn' apns_server = 'meh' apns_message ='{"aps": {"content-available": 1}' default_message='default message' self.assertRaises( ValueError, apns.publish_message, session, topic_arn, apns_server, apns_message, default_message) # _make_background_notification def test_make_background_notification_no_additional(self): additional = None expected = { 'aps': { 'content-available': 1 } } result = apns._make_background_notification(additional) self.assertEqual(result, expected) def test_make_background_notification_with_additional(self): deal_id = 'a6k5A000000kP9LQAU' delta_type = 'commentCount' delta_value = 5 additional = { 'id': deal_id, 'delta_type': delta_type, 'delta_value': delta_value } expected = { 'aps': { 'content-available': 1 }, 'id': deal_id, 'delta_type': delta_type, 'delta_value': delta_value } result = apns._make_background_notification(additional) self.assertDictEqual(result, expected) # _make_notification # def test_make_notification_1(self): # raise_for_status
2.703125
3
scrapets/extract.py
ownport/scrapets
2
12253
<gh_stars>1-10 # -*- coding: utf-8 -*- from HTMLParser import HTMLParser # ------------------------------------------------------- # # LinkExtractor: extract links from html page # class BaseExtractor(HTMLParser): def __init__(self): HTMLParser.__init__(self) self._links = [] @property def links(self): return self._links class LinkExtractor(BaseExtractor): def handle_starttag(self, tag, attrs): if tag == 'a': links = [v for k,v in attrs if k == 'href' and v not in self._links] self._links.extend(links) class ImageLinkExtractor(BaseExtractor): def handle_starttag(self, tag, attrs): if tag == 'img': links = [v for k,v in attrs if k == 'src' and v not in self._links] self._links.extend(links)
3
3
tour/forms.py
superdev0505/mtp-web
0
12254
<gh_stars>0 ## Django Packages from django import forms from django_select2 import forms as s2forms ## App packages from .models import * from datetime import datetime from bootstrap_datepicker_plus import DatePickerInput, TimePickerInput, DateTimePickerInput, MonthPickerInput, YearPickerInput from tags_input import fields from lib.classes import CustomTagsInputField ############################################################################ ############################################################################ class TourForm(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['name'] = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'data-validation': 'required'}), required=False) self.fields['description'] = forms.CharField( widget=forms.Textarea(attrs={'class': 'form-control', 'rows': 4, 'data-validation': 'required'}), required=False) self.fields['tour_tag'] = CustomTagsInputField( TourTag.objects.filter(is_actived=True), create_missing=True, required=False, ) # class Meta: model = Tour fields = ( 'name', 'description', 'tour_tag' ) class TourSearchForm(forms.Form): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['username'] = forms.CharField( label='Username', widget=forms.TextInput(attrs={'class': 'form-control'}), required=False ) self.fields['name'] = forms.CharField( label='Tour Name', widget=forms.TextInput(attrs={'class': 'form-control'}), required=False ) self.fields['tour_tag'] = CustomTagsInputField( TourTag.objects.filter(is_actived=True), create_missing=False, required=False, ) def _my(self, username): self.fields['username'] = forms.CharField( label='', widget=forms.TextInput(attrs={'class': 'form-control d-none', 'value': username}), required=False )
2.296875
2
bots/test_analyseGithub.py
RSE2-D2/RSE2-D2
3
12255
import analyseGithub def test_containsGithubURL_empty(): assert not analyseGithub.containsGitHubURL("") def test_containsGithubURL_noUrl(): assert not analyseGithub.containsGitHubURL("Some test tweet") def test_containsGithubURL_url(): repo = "https://github.com/git/git" assert analyseGithub.containsGitHubURL(repo) def test_extractGitHubLink(): repo = "https://github.com/git/git" assert analyseGithub.extractGitHubLink(f"{repo} more tweet") == "git/git"
2.828125
3
tests/unit/utils/filebuffer_test.py
gotcha/salt
2
12256
<reponame>gotcha/salt # -*- coding: utf-8 -*- ''' tests.unit.utils.filebuffer_test ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :codeauthor: :email:`<NAME> (<EMAIL>)` :copyright: © 2012 by the SaltStack Team, see AUTHORS for more details. :license: Apache 2.0, see LICENSE for more details. ''' # Import salt libs from saltunittest import TestCase, TestLoader, TextTestRunner from salt.utils.filebuffer import BufferedReader, InvalidFileMode class TestFileBuffer(TestCase): def test_read_only_mode(self): with self.assertRaises(InvalidFileMode): BufferedReader('/tmp/foo', mode='a') with self.assertRaises(InvalidFileMode): BufferedReader('/tmp/foo', mode='ab') with self.assertRaises(InvalidFileMode): BufferedReader('/tmp/foo', mode='w') with self.assertRaises(InvalidFileMode): BufferedReader('/tmp/foo', mode='wb') if __name__ == "__main__": loader = TestLoader() tests = loader.loadTestsFromTestCase(TestFileBuffer) TextTestRunner(verbosity=1).run(tests)
2.3125
2
ranking_baselines/ARCII/rank_metrics.py
dileep1996/mnsrf_ranking_suggestion
1
12257
############################################################################### # Author: <NAME> # Project: ARC-II: Convolutional Matching Model # Date Created: 7/18/2017 # # File Description: This script contains ranking evaluation functions. ############################################################################### import torch, numpy def mean_average_precision(logits, target): """ Compute mean average precision. :param logits: 2d tensor [batch_size x num_clicks_per_query] :param target: 2d tensor [batch_size x num_clicks_per_query] :return: mean average precision [a float value] """ assert logits.size() == target.size() sorted, indices = torch.sort(logits, 1, descending=True) map = 0 for i in range(indices.size(0)): average_precision = 0 num_rel = 0 for j in range(indices.size(1)): if target[i, indices[i, j].data[0]].data[0] == 1: num_rel += 1 average_precision += num_rel / (j + 1) average_precision = average_precision / num_rel map += average_precision return map / indices.size(0) def NDCG(logits, target, k): """ Compute normalized discounted cumulative gain. :param logits: 2d tensor [batch_size x rel_docs_per_query] :param target: 2d tensor [batch_size x rel_docs_per_query] :return: mean average precision [a float value] """ assert logits.size() == target.size() assert logits.size(1) >= k, 'NDCG@K cannot be computed, invalid value of K.' sorted, indices = torch.sort(logits, 1, descending=True) NDCG = 0 for i in range(indices.size(0)): DCG_ref = 0 num_rel_docs = torch.nonzero(target[i].data).size(0) for j in range(indices.size(1)): if j == k: break if target[i, indices[i, j].data[0]].data[0] == 1: DCG_ref += 1 / numpy.log2(j + 2) DCG_gt = 0 for j in range(num_rel_docs): if j == k: break DCG_gt += 1 / numpy.log2(j + 2) NDCG += DCG_ref / DCG_gt return NDCG / indices.size(0) def MRR(logits, target): """ Compute mean reciprocal rank. :param logits: 2d tensor [batch_size x rel_docs_per_query] :param target: 2d tensor [batch_size x rel_docs_per_query] :return: mean reciprocal rank [a float value] """ assert logits.size() == target.size() sorted, indices = torch.sort(logits, 1, descending=True) total_reciprocal_rank = 0 for i in range(indices.size(0)): for j in range(indices.size(1)): if target[i, indices[i, j].data[0]].data[0] == 1: total_reciprocal_rank += 1.0 / (j + 1) break return total_reciprocal_rank / logits.size(0)
2.359375
2
src/wwucs/bot/__init__.py
reillysiemens/wwucs-bot
0
12258
<filename>src/wwucs/bot/__init__.py """WWUCS Bot module.""" __all__ = [ "__author__", "__email__", "__version__", ] __author__ = "<NAME>" __email__ = "<EMAIL>" __version__ = "0.1.0"
1.21875
1
deploy/python/det_keypoint_unite_infer.py
Amanda-Barbara/PaddleDetection
0
12259
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import json import cv2 import math import numpy as np import paddle import yaml from det_keypoint_unite_utils import argsparser from preprocess import decode_image from infer import Detector, DetectorPicoDet, PredictConfig, print_arguments, get_test_images, bench_log from keypoint_infer import KeyPointDetector, PredictConfig_KeyPoint from visualize import visualize_pose from benchmark_utils import PaddleInferBenchmark from utils import get_current_memory_mb from keypoint_postprocess import translate_to_ori_images KEYPOINT_SUPPORT_MODELS = { 'HigherHRNet': 'keypoint_bottomup', 'HRNet': 'keypoint_topdown' } def predict_with_given_det(image, det_res, keypoint_detector, keypoint_batch_size, run_benchmark): rec_images, records, det_rects = keypoint_detector.get_person_from_rect( image, det_res) keypoint_vector = [] score_vector = [] rect_vector = det_rects keypoint_results = keypoint_detector.predict_image( rec_images, run_benchmark, repeats=10, visual=False) keypoint_vector, score_vector = translate_to_ori_images(keypoint_results, np.array(records)) keypoint_res = {} keypoint_res['keypoint'] = [ keypoint_vector.tolist(), score_vector.tolist() ] if len(keypoint_vector) > 0 else [[], []] keypoint_res['bbox'] = rect_vector return keypoint_res def topdown_unite_predict(detector, topdown_keypoint_detector, image_list, keypoint_batch_size=1, save_res=False): det_timer = detector.get_timer() store_res = [] for i, img_file in enumerate(image_list): # Decode image in advance in det + pose prediction det_timer.preprocess_time_s.start() image, _ = decode_image(img_file, {}) det_timer.preprocess_time_s.end() if FLAGS.run_benchmark: results = detector.predict_image( [image], run_benchmark=True, repeats=10) cm, gm, gu = get_current_memory_mb() detector.cpu_mem += cm detector.gpu_mem += gm detector.gpu_util += gu else: results = detector.predict_image([image], visual=False) results = detector.filter_box(results, FLAGS.det_threshold) if results['boxes_num'] > 0: keypoint_res = predict_with_given_det( image, results, topdown_keypoint_detector, keypoint_batch_size, FLAGS.run_benchmark) if save_res: save_name = img_file if isinstance(img_file, str) else i store_res.append([ save_name, keypoint_res['bbox'], [keypoint_res['keypoint'][0], keypoint_res['keypoint'][1]] ]) else: results["keypoint"] = [[], []] keypoint_res = results if FLAGS.run_benchmark: cm, gm, gu = get_current_memory_mb() topdown_keypoint_detector.cpu_mem += cm topdown_keypoint_detector.gpu_mem += gm topdown_keypoint_detector.gpu_util += gu else: if not os.path.exists(FLAGS.output_dir): os.makedirs(FLAGS.output_dir) visualize_pose( img_file, keypoint_res, visual_thresh=FLAGS.keypoint_threshold, save_dir=FLAGS.output_dir) if save_res: """ 1) store_res: a list of image_data 2) image_data: [imageid, rects, [keypoints, scores]] 3) rects: list of rect [xmin, ymin, xmax, ymax] 4) keypoints: 17(joint numbers)*[x, y, conf], total 51 data in list 5) scores: mean of all joint conf """ with open("det_keypoint_unite_image_results.json", 'w') as wf: json.dump(store_res, wf, indent=4) def topdown_unite_predict_video(detector, topdown_keypoint_detector, camera_id, keypoint_batch_size=1, save_res=False): video_name = 'output.mp4' if camera_id != -1: capture = cv2.VideoCapture(camera_id) else: capture = cv2.VideoCapture(FLAGS.video_file) video_name = os.path.split(FLAGS.video_file)[-1] # Get Video info : resolution, fps, frame count width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = int(capture.get(cv2.CAP_PROP_FPS)) frame_count = int(capture.get(cv2.CAP_PROP_FRAME_COUNT)) print("fps: %d, frame_count: %d" % (fps, frame_count)) if not os.path.exists(FLAGS.output_dir): os.makedirs(FLAGS.output_dir) out_path = os.path.join(FLAGS.output_dir, video_name) fourcc = cv2.VideoWriter_fourcc(* 'mp4v') writer = cv2.VideoWriter(out_path, fourcc, fps, (width, height)) index = 0 store_res = [] while (1): ret, frame = capture.read() if not ret: break index += 1 print('detect frame: %d' % (index)) frame2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) results = detector.predict_image([frame2], visual=False) results = detector.filter_box(results, FLAGS.det_threshold) if results['boxes_num'] == 0: writer.write(frame) continue keypoint_res = predict_with_given_det( frame2, results, topdown_keypoint_detector, keypoint_batch_size, FLAGS.run_benchmark) im = visualize_pose( frame, keypoint_res, visual_thresh=FLAGS.keypoint_threshold, returnimg=True) if save_res: store_res.append([ index, keypoint_res['bbox'], [keypoint_res['keypoint'][0], keypoint_res['keypoint'][1]] ]) writer.write(im) if camera_id != -1: cv2.imshow('Mask Detection', im) if cv2.waitKey(1) & 0xFF == ord('q'): break writer.release() print('output_video saved to: {}'.format(out_path)) if save_res: """ 1) store_res: a list of frame_data 2) frame_data: [frameid, rects, [keypoints, scores]] 3) rects: list of rect [xmin, ymin, xmax, ymax] 4) keypoints: 17(joint numbers)*[x, y, conf], total 51 data in list 5) scores: mean of all joint conf """ with open("det_keypoint_unite_video_results.json", 'w') as wf: json.dump(store_res, wf, indent=4) def main(): deploy_file = os.path.join(FLAGS.det_model_dir, 'infer_cfg.yml') with open(deploy_file) as f: yml_conf = yaml.safe_load(f) arch = yml_conf['arch'] detector_func = 'Detector' if arch == 'PicoDet': detector_func = 'DetectorPicoDet' detector = eval(detector_func)(FLAGS.det_model_dir, device=FLAGS.device, run_mode=FLAGS.run_mode, trt_min_shape=FLAGS.trt_min_shape, trt_max_shape=FLAGS.trt_max_shape, trt_opt_shape=FLAGS.trt_opt_shape, trt_calib_mode=FLAGS.trt_calib_mode, cpu_threads=FLAGS.cpu_threads, enable_mkldnn=FLAGS.enable_mkldnn, threshold=FLAGS.det_threshold) topdown_keypoint_detector = KeyPointDetector( FLAGS.keypoint_model_dir, device=FLAGS.device, run_mode=FLAGS.run_mode, batch_size=FLAGS.keypoint_batch_size, trt_min_shape=FLAGS.trt_min_shape, trt_max_shape=FLAGS.trt_max_shape, trt_opt_shape=FLAGS.trt_opt_shape, trt_calib_mode=FLAGS.trt_calib_mode, cpu_threads=FLAGS.cpu_threads, enable_mkldnn=FLAGS.enable_mkldnn, use_dark=FLAGS.use_dark) keypoint_arch = topdown_keypoint_detector.pred_config.arch assert KEYPOINT_SUPPORT_MODELS[ keypoint_arch] == 'keypoint_topdown', 'Detection-Keypoint unite inference only supports topdown models.' # predict from video file or camera video stream if FLAGS.video_file is not None or FLAGS.camera_id != -1: topdown_unite_predict_video(detector, topdown_keypoint_detector, FLAGS.camera_id, FLAGS.keypoint_batch_size, FLAGS.save_res) else: # predict from image img_list = get_test_images(FLAGS.image_dir, FLAGS.image_file) topdown_unite_predict(detector, topdown_keypoint_detector, img_list, FLAGS.keypoint_batch_size, FLAGS.save_res) if not FLAGS.run_benchmark: detector.det_times.info(average=True) topdown_keypoint_detector.det_times.info(average=True) else: mode = FLAGS.run_mode det_model_dir = FLAGS.det_model_dir det_model_info = { 'model_name': det_model_dir.strip('/').split('/')[-1], 'precision': mode.split('_')[-1] } bench_log(detector, img_list, det_model_info, name='Det') keypoint_model_dir = FLAGS.keypoint_model_dir keypoint_model_info = { 'model_name': keypoint_model_dir.strip('/').split('/')[-1], 'precision': mode.split('_')[-1] } bench_log(topdown_keypoint_detector, img_list, keypoint_model_info, FLAGS.keypoint_batch_size, 'KeyPoint') if __name__ == '__main__': paddle.enable_static() parser = argsparser() FLAGS = parser.parse_args() print_arguments(FLAGS) FLAGS.device = FLAGS.device.upper() assert FLAGS.device in ['CPU', 'GPU', 'XPU' ], "device should be CPU, GPU or XPU" main()
1.984375
2
plugins/google_cloud_compute/komand_google_cloud_compute/actions/disk_detach/action.py
lukaszlaszuk/insightconnect-plugins
46
12260
<reponame>lukaszlaszuk/insightconnect-plugins import insightconnect_plugin_runtime from .schema import DiskDetachInput, DiskDetachOutput, Input, Component class DiskDetach(insightconnect_plugin_runtime.Action): def __init__(self): super(self.__class__, self).__init__( name="disk_detach", description=Component.DESCRIPTION, input=DiskDetachInput(), output=DiskDetachOutput() ) def run(self, params={}): return self.connection.client.disk_detach( params.get(Input.ZONE), params.get(Input.INSTANCE), params.get(Input.DEVICENAME) )
2.0625
2
troupon/payment/serializers.py
andela/troupon
14
12261
"""Serializers for the payment app.""" from rest_framework import serializers from models import Purchases class TransactionSerializer(serializers.ModelSerializer): """Serializer for Transaction instances. """ class Meta: model = Purchases fields = ('id', 'item', 'price', 'quantity', 'title', 'description', 'stripe_transaction_id', 'stripe_transaction_status')
2.359375
2
mla/kmeans.py
anshulg5/MLAlgorithms
1
12262
import random import seaborn as sns import matplotlib.pyplot as plt import numpy as np from mla.base import BaseEstimator from mla.metrics.distance import euclidean_distance random.seed(1111) class KMeans(BaseEstimator): """Partition a dataset into K clusters. Finds clusters by repeatedly assigning each data point to the cluster with the nearest centroid and iterating until the assignments converge (meaning they don't change during an iteration) or the maximum number of iterations is reached. Parameters ---------- K : int The number of clusters into which the dataset is partitioned. max_iters: int The maximum iterations of assigning points to the nearest cluster. Short-circuited by the assignments converging on their own. init: str, default 'random' The name of the method used to initialize the first clustering. 'random' - Randomly select values from the dataset as the K centroids. '++' - Select a random first centroid from the dataset, then select K - 1 more centroids by choosing values from the dataset with a probability distribution proportional to the squared distance from each point's closest existing cluster. Attempts to create larger distances between initial clusters to improve convergence rates and avoid degenerate cases. """ y_required = False def __init__(self, K=5, max_iters=100, init='random'): self.K = K self.max_iters = max_iters self.clusters = [[] for _ in range(self.K)] self.centroids = [] self.init = init def _initialize_cetroids(self, init): """Set the initial centroids.""" if init == 'random': self.centroids = [self.X[x] for x in random.sample(range(self.n_samples), self.K)] elif init == '++': self.centroids = [random.choice(self.X)] while len(self.centroids) < self.K: self.centroids.append(self._choose_next_center()) else: raise ValueError('Unknown type of init parameter') def _predict(self, X=None): """Perform the clustering on the dataset.""" self._initialize_cetroids(self.init) centroids = self.centroids for _ in range(self.max_iters): self._assign(centroids) centroids_old = centroids centroids = [self._get_centroid(cluster) for cluster in self.clusters] if self._is_converged(centroids_old, centroids): break self.centroids = centroids return self._get_predictions() def _get_predictions(self): predictions = np.empty(self.n_samples) for i, cluster in enumerate(self.clusters): for index in cluster: predictions[index] = i return predictions def _assign(self, centroids): for row in range(self.n_samples): for i, cluster in enumerate(self.clusters): if row in cluster: self.clusters[i].remove(row) break closest = self._closest(row, centroids) self.clusters[closest].append(row) def _closest(self, fpoint, centroids): closest_index = None closest_distance = None for i, point in enumerate(centroids): dist = euclidean_distance(self.X[fpoint], point) if closest_index is None or dist < closest_distance: closest_index = i closest_distance = dist return closest_index def _get_centroid(self, cluster): """Get values by indices and take the mean.""" return [np.mean(np.take(self.X[:, i], cluster)) for i in range(self.n_features)] def _dist_from_centers(self): return np.array([min([euclidean_distance(x, c) for c in self.centroids]) for x in self.X]) def _choose_next_center(self): distances = self._dist_from_centers() probs = distances / distances.sum() cumprobs = probs.cumsum() r = random.random() ind = np.where(cumprobs >= r)[0][0] return self.X[ind] def _is_converged(self, centroids_old, centroids): return True if sum([euclidean_distance(centroids_old[i], centroids[i]) for i in range(self.K)]) == 0 else False def plot(self, data=None): sns.set(style="white") if data is None: data = self.X for i, index in enumerate(self.clusters): point = np.array(data[index]).T plt.scatter(*point, c=sns.color_palette("hls", self.K + 1)[i]) for point in self.centroids: plt.scatter(*point, marker='x', linewidths=10) plt.show()
3.5625
4
train_classifier.py
justusmattern/dist-embeds
0
12263
import os import sys import argparse import time import random import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim from torch.autograd import Variable # from sru import * import dataloader import modules class Model(nn.Module): def __init__(self, embedding, hidden_size=150, depth=1, dropout=0.3, cnn=False, nclasses=2, args=None): super(Model, self).__init__() self.cnn = cnn self.drop = nn.Dropout(dropout) self.args = args self.emb_layer = modules.EmbeddingLayer( embs = dataloader.load_embedding(embedding), dist_embeds = self.args.dist_embeds ) self.word2id = self.emb_layer.word2id if cnn: self.encoder = modules.CNN_Text( self.emb_layer.n_d, widths = [3,4,5], filters=hidden_size ) d_out = 3*hidden_size else: self.encoder = nn.LSTM( self.emb_layer.n_d, hidden_size//2, depth, dropout = dropout, # batch_first=True, bidirectional=True ) d_out = hidden_size # else: # self.encoder = SRU( # emb_layer.n_d, # args.d, # args.depth, # dropout = args.dropout, # ) # d_out = args.d self.out = nn.Linear(d_out, nclasses) def forward(self, input): if self.cnn: input = input.t() if self.args.dist_embeds: emb, kl_loss = self.emb_layer(input) else: emb = self.emb_layer(input) emb = self.drop(emb) if self.cnn: output = self.encoder(emb) else: output, hidden = self.encoder(emb) # output = output[-1] output = torch.max(output, dim=0)[0].squeeze() output = self.drop(output) if self.args.dist_embeds: return self.out(output), kl_loss else: return self.out(output) def text_pred(self, text, batch_size=32): batches_x = dataloader.create_batches_x( text, batch_size, ##TODO self.word2id ) outs = [] with torch.no_grad(): for x in batches_x: x = Variable(x) if self.cnn: x = x.t() emb = self.emb_layer(x) if self.cnn: output = self.encoder(emb) else: output, hidden = self.encoder(emb) # output = output[-1] output = torch.max(output, dim=0)[0] outs.append(F.softmax(self.out(output), dim=-1)) return torch.cat(outs, dim=0) def eval_model(niter, model, input_x, input_y): model.eval() # N = len(valid_x) # criterion = nn.CrossEntropyLoss() correct = 0.0 cnt = 0. # total_loss = 0.0 with torch.no_grad(): for x, y in zip(input_x, input_y): x, y = Variable(x, volatile=True), Variable(y) if model.args.dist_embeds: output, kl_loss = model(x) else: output = model(x) # loss = criterion(output, y) # total_loss += loss.item()*x.size(1) pred = output.data.max(1)[1] correct += pred.eq(y.data).cpu().sum() cnt += y.numel() model.train() return correct.item()/cnt def train_model(epoch, model, optimizer, train_x, train_y, test_x, test_y, best_test, save_path): model.train() niter = epoch*len(train_x) criterion = nn.CrossEntropyLoss() cnt = 0 for x, y in zip(train_x, train_y): niter += 1 cnt += 1 model.zero_grad() x, y = Variable(x), Variable(y) if model.args.dist_embeds: output, kl_loss = model(x) ce_loss = criterion(output, y) loss = ce_loss + model.args.kl_weight*kl_loss else: output = model(x) loss = criterion(output, y) loss.backward() optimizer.step() test_acc = eval_model(niter, model, test_x, test_y) if model.args.dist_embeds: sys.stdout.write("Epoch={} iter={} lr={:.6f} train_loss_class={:.6f} train_loss_kl={:.6f} train_loss_ovr = {:.6f} test_err={:.6f}\n".format( epoch, niter, optimizer.param_groups[0]['lr'], ce_loss.item(), kl_loss.item(), loss.item(), test_acc )) else: sys.stdout.write("Epoch={} iter={} lr={:.6f} train_loss = {:.6f} test_err={:.6f}\n".format( epoch, niter, optimizer.param_groups[0]['lr'], loss.item(), test_acc )) if test_acc > best_test: best_test = test_acc if save_path: torch.save(model.state_dict(), save_path) # test_err = eval_model(niter, model, test_x, test_y) sys.stdout.write("\n") return best_test def save_data(data, labels, path, type='train'): with open(os.path.join(path, type+'.txt'), 'w') as ofile: for text, label in zip(data, labels): ofile.write('{} {}\n'.format(label, ' '.join(text))) def main(args): if args.dataset == 'mr': # data, label = dataloader.read_MR(args.path) # train_x, train_y, test_x, test_y = dataloader.cv_split2( # data, label, # nfold=10, # valid_id=args.cv # ) # # if args.save_data_split: # save_data(train_x, train_y, args.path, 'train') # save_data(test_x, test_y, args.path, 'test') train_x, train_y = dataloader.read_corpus('data/mr/train.txt') test_x, test_y = dataloader.read_corpus('data/mr/test.txt') elif args.dataset == 'imdb': train_x, train_y = dataloader.read_corpus(os.path.join('/data/medg/misc/jindi/nlp/datasets/imdb', 'train_tok.csv'), clean=False, MR=True, shuffle=True) test_x, test_y = dataloader.read_corpus(os.path.join('/data/medg/misc/jindi/nlp/datasets/imdb', 'test_tok.csv'), clean=False, MR=True, shuffle=True) else: train_x, train_y = dataloader.read_corpus('/afs/csail.mit.edu/u/z/zhijing/proj/to_di/data/{}/' 'train_tok.csv'.format(args.dataset), clean=False, MR=False, shuffle=True) test_x, test_y = dataloader.read_corpus('/afs/csail.mit.edu/u/z/zhijing/proj/to_di/data/{}/' 'test_tok.csv'.format(args.dataset), clean=False, MR=False, shuffle=True) nclasses = max(train_y) + 1 # elif args.dataset == 'subj': # data, label = dataloader.read_SUBJ(args.path) # elif args.dataset == 'cr': # data, label = dataloader.read_CR(args.path) # elif args.dataset == 'mpqa': # data, label = dataloader.read_MPQA(args.path) # elif args.dataset == 'trec': # train_x, train_y, test_x, test_y = dataloader.read_TREC(args.path) # data = train_x + test_x # label = None # elif args.dataset == 'sst': # train_x, train_y, valid_x, valid_y, test_x, test_y = dataloader.read_SST(args.path) # data = train_x + valid_x + test_x # label = None # else: # raise Exception("unknown dataset: {}".format(args.dataset)) # if args.dataset == 'trec': # elif args.dataset != 'sst': # train_x, train_y, valid_x, valid_y, test_x, test_y = dataloader.cv_split( # data, label, # nfold = 10, # test_id = args.cv # ) model = Model(args.embedding, args.d, args.depth, args.dropout, args.cnn, nclasses, args=args).cuda() need_grad = lambda x: x.requires_grad optimizer = optim.Adam( filter(need_grad, model.parameters()), lr = args.lr ) train_x, train_y = dataloader.create_batches( train_x, train_y, args.batch_size, model.word2id, ) # valid_x, valid_y = dataloader.create_batches( # valid_x, valid_y, # args.batch_size, # emb_layer.word2id, # ) test_x, test_y = dataloader.create_batches( test_x, test_y, args.batch_size, model.word2id, ) best_test = 0 # test_err = 1e+8 for epoch in range(args.max_epoch): best_test = train_model(epoch, model, optimizer, train_x, train_y, # valid_x, valid_y, test_x, test_y, best_test, args.save_path ) if args.lr_decay>0: optimizer.param_groups[0]['lr'] *= args.lr_decay # sys.stdout.write("best_valid: {:.6f}\n".format( # best_valid # )) sys.stdout.write("test_err: {:.6f}\n".format( best_test )) if __name__ == "__main__": argparser = argparse.ArgumentParser(sys.argv[0], conflict_handler='resolve') argparser.add_argument("--cnn", action='store_true', help="whether to use cnn") argparser.add_argument("--lstm", action='store_true', help="whether to use lstm") argparser.add_argument("--dataset", type=str, default="mr", help="which dataset") argparser.add_argument("--embedding", type=str, required=True, help="word vectors") argparser.add_argument("--batch_size", "--batch", type=int, default=32) argparser.add_argument("--max_epoch", type=int, default=70) argparser.add_argument("--d", type=int, default=150) argparser.add_argument("--dropout", type=float, default=0.3) argparser.add_argument("--depth", type=int, default=1) argparser.add_argument("--lr", type=float, default=0.001) argparser.add_argument("--lr_decay", type=float, default=0) argparser.add_argument("--cv", type=int, default=0) argparser.add_argument("--save_path", type=str, default='') argparser.add_argument("--save_data_split", action='store_true', help="whether to save train/test split") argparser.add_argument("--gpu_id", type=int, default=0) argparser.add_argument("--kl_weight", type=float, default = 0.001) argparser.add_argument("--dist_embeds", action='store_true') args = argparser.parse_args() # args.save_path = os.path.join(args.save_path, args.dataset) print (args) torch.cuda.set_device(args.gpu_id) main(args)
2.515625
3
app/request/queue.py
infrared5/massroute-pi
0
12264
<filename>app/request/queue.py import logging from time import sleep logger = logging.getLogger(__name__) class StopRequestQueue: cursor = 0 queue = None service = None current_request = None request_delay = 0 # seconds def __init__(self, service, request_delay=10): self.queue = [] self.service = service self.request_delay = request_delay def add_request(self, request): self.queue.append(request) logger.info("Request added for %r. Queue length at %d" % (request.stop_id, len(self.queue))) def success(self, data): logger.debug("Success returned") # could become none upon stop(), considered inactive if self.current_request is not None: for component in self.current_request.components: component.data(data) sleep(self.request_delay) self.next() def failure(self, error): logger.debug("Failure returned") # could become none upon stop(), considered inactive if self.current_request is not None: for component in self.current_request.components: component.error(error) sleep(self.request_delay) self.next() def next(self, increment=True): logger.info('next()') self.cursor = self.cursor + 1 if increment else self.cursor if self.cursor < len(self.queue): self.current_request = self.queue[self.cursor] self.service.access(self.current_request.stop_id, self) """ Not allowing wrapped cursor. :next() is run through, then this queue is exited and the service availability is checked again, starting the sequence again. """ # self.cursor = 0 if self.cursor == len(self.queue) - 1 else self.cursor + 1 def start(self): logger.info('start()') self.cursor = 0 self.next(False) logger.info('start() - out') def stop(self): del self.queue[:] self.current_request = None
2.921875
3
trimap_module.py
lnugraha/trimap_generator
168
12265
<reponame>lnugraha/trimap_generator<filename>trimap_module.py<gh_stars>100-1000 #!/usr/bin/env python import cv2, os, sys import numpy as np def extractImage(path): # error handller if the intended path is not found image = cv2.imread(path, cv2.IMREAD_GRAYSCALE) return image def checkImage(image): """ Args: image: input image to be checked Returns: binary image Raises: RGB image, grayscale image, all-black, and all-white image """ if len(image.shape) > 2: print("ERROR: non-binary image (RGB)"); sys.exit(); smallest = image.min(axis=0).min(axis=0) # lowest pixel value: 0 (black) largest = image.max(axis=0).max(axis=0) # highest pixel value: 1 (white) if (smallest == 0 and largest == 0): print("ERROR: non-binary image (all black)"); sys.exit() elif (smallest == 255 and largest == 255): print("ERROR: non-binary image (all white)"); sys.exit() elif (smallest > 0 or largest < 255 ): print("ERROR: non-binary image (grayscale)"); sys.exit() else: return True class Toolbox: def __init__(self, image): self.image = image @property def printImage(self): """ Print image into a file for checking purpose unitTest = Toolbox(image); unitTest.printImage(image); """ f = open("image_results.dat", "w+") for i in range(0, self.image.shape[0]): for j in range(0, self.image.shape[1]): f.write("%d " %self.image[i,j]) f.write("\n") f.close() @property def displayImage(self): """ Display the image on a window Press any key to exit """ cv2.imshow('Displayed Image', self.image) cv2.waitKey(0) cv2.destroyAllWindows() def saveImage(self, title, extension): """ Save as a specific image format (bmp, png, or jpeg) """ cv2.imwrite("{}.{}".format(title,extension), self.image) def morph_open(self, image, kernel): """ Remove all white noises or speckles outside images Need to tune the kernel size Instruction: unit01 = Toolbox(image); kernel = np.ones( (9,9), np.uint8 ); morph = unit01.morph_open(input_image, kernel); """ bin_open = cv2.morphologyEx(self.image, cv2.MORPH_OPEN, kernel) return bin_open def morph_close(self, image, kernel): """ Remove all black noises or speckles inside images Need to tune the kernel size Instruction: unit01 = Toolbox(image); kernel = np.ones( (11,11)_, np.uint8 ); morph = unit01.morph_close(input_image, kernel); """ bin_close = cv2.morphologyEx(self.image, cv2.MORPH_CLOSE, kernel) return bin_close def trimap(image, name, size, number, erosion=False): """ This function creates a trimap based on simple dilation algorithm Inputs [4]: a binary image (black & white only), name of the image, dilation pixels the last argument is optional; i.e., how many iterations will the image get eroded Output : a trimap """ checkImage(image) row = image.shape[0] col = image.shape[1] pixels = 2*size + 1 ## Double and plus 1 to have an odd-sized kernel kernel = np.ones((pixels,pixels),np.uint8) ## Pixel of extension I get if erosion is not False: erosion = int(erosion) erosion_kernel = np.ones((3,3), np.uint8) ## Design an odd-sized erosion kernel image = cv2.erode(image, erosion_kernel, iterations=erosion) ## How many erosion do you expect image = np.where(image > 0, 255, image) ## Any gray-clored pixel becomes white (smoothing) # Error-handler to prevent entire foreground annihilation if cv2.countNonZero(image) == 0: print("ERROR: foreground has been entirely eroded") sys.exit() dilation = cv2.dilate(image, kernel, iterations = 1) dilation = np.where(dilation == 255, 127, dilation) ## WHITE to GRAY remake = np.where(dilation != 127, 0, dilation) ## Smoothing remake = np.where(image > 127, 200, dilation) ## mark the tumor inside GRAY remake = np.where(remake < 127, 0, remake) ## Embelishment remake = np.where(remake > 200, 0, remake) ## Embelishment remake = np.where(remake == 200, 255, remake) ## GRAY to WHITE ############################################# # Ensures only three pixel values available # # TODO: Optimization with Cython # ############################################# for i in range(0,row): for j in range (0,col): if (remake[i,j] != 0 and remake[i,j] != 255): remake[i,j] = 127 path = "./images/results/" ## Change the directory new_name = '{}px_'.format(size) + name + '_{}.png'.format(number) cv2.imwrite(os.path.join(path, new_name) , remake) ############################################# ### TESTING SECTION ### ############################################# if __name__ == '__main__': path = "./images/test_images/test_image_11.png" image = extractImage(path) size = 10 number = path[-5] title = "test_image" unit01 = Toolbox(image); kernel1 = np.ones( (11,11), np.uint8 ) unit01.displayImage opening = unit01.morph_close(image,kernel1) trimap(opening, title, size, number, erosion=False) unit02 = Toolbox(opening) unit02.displayImage ######################################################## ## Default instruction (no binary opening or closing ## ## trimap(image, title, size, number, erosion=False); ## ########################################################
2.890625
3
integration/keeper_secrets_manager_ansible/tests/keeper_init.py
Keeper-Security/secrets-manager
9
12266
<gh_stars>1-10 import unittest from unittest.mock import patch import os from .ansible_test_framework import AnsibleTestFramework, RecordMaker import keeper_secrets_manager_ansible.plugins import tempfile records = { "TRd_567FkHy-CeGsAzs8aA": RecordMaker.make_record( uid="TRd_567FkHy-CeGsAzs8aA", title="JW-F1-R1", fields={ "password": "<PASSWORD>" } ), "A_7YpGBUgRTeDEQLhVRo0Q": RecordMaker.make_file( uid="A_7YpGBUgRTeDEQLhVRo0Q", title="JW-F1-R2-File", files=[ {"name": "nailing it.mp4", "type": "video/mp4", "url": "http://localhost/abc", "data": "ABC123"}, {"name": "video_file.mp4", "type": "video/mp4", "url": "http://localhost/xzy", "data": "XYZ123"}, ] ) } def mocked_get_secrets(*args): if len(args) > 0: uid = args[0][0] ret = [records[uid]] else: ret = [records[x] for x in records] return ret class KeeperInitTest(unittest.TestCase): def setUp(self): self.yml_file_name = "test_keeper.yml" self.json_file_name = "test_keeper.json" # Add in addition Python libs. This includes the base # module for Keeper Ansible and the Keeper SDK. self.base_dir = os.path.dirname(os.path.realpath(__file__)) self.ansible_base_dir = os.path.join(self.base_dir, "ansible_example") self.yml_file = os.path.join(os.path.join(self.ansible_base_dir, self.yml_file_name)) self.json_file = os.path.join(os.path.join(self.ansible_base_dir, self.json_file_name)) for file in [self.yml_file, self.json_file]: if os.path.exists(file) is True: os.unlink(file) def tearDown(self): for file in [self.yml_file, self.json_file]: if os.path.exists(file) is True: os.unlink(file) def _common(self): with tempfile.TemporaryDirectory() as temp_dir: a = AnsibleTestFramework( base_dir=self.ansible_base_dir, playbook=os.path.join("playbooks", "keeper_init.yml"), inventory=os.path.join("inventory", "all"), plugin_base_dir=os.path.join(os.path.dirname(keeper_secrets_manager_ansible.plugins.__file__)), vars={ "keeper_token": "<KEY>", "keeper_config_file": self.yml_file_name, "show_config": True } ) r, out, err = a.run() result = r[0]["localhost"] self.assertEqual(result["ok"], 2, "1 things didn't happen") self.assertEqual(result["failures"], 0, "failures was not 0") self.assertEqual(result["changed"], 0, "0 things didn't change") self.assertTrue(os.path.exists(self.yml_file), "test_keeper.yml does not exist") a = AnsibleTestFramework( base_dir=self.ansible_base_dir, playbook=os.path.join("playbooks", "keeper_init.yml"), inventory=os.path.join("inventory", "all"), plugin_base_dir=os.path.join(os.path.dirname(keeper_secrets_manager_ansible.plugins.__file__)), vars={ "keeper_token": "<KEY>", "keeper_config_file": self.json_file_name, "show_config": False } ) r, out, err = a.run() result = r[0]["localhost"] self.assertEqual(result["ok"], 2, "1 things didn't happen") self.assertEqual(result["failures"], 0, "failures was not 0") self.assertEqual(result["changed"], 0, "0 things didn't change") self.assertTrue(os.path.exists(self.json_file), "test_keeper.json does not exist") # @unittest.skip @patch("keeper_secrets_manager_core.core.SecretsManager.get_secrets", side_effect=mocked_get_secrets) def test_keeper_get_mock(self, _): self._common() @unittest.skip def test_keeper_get_live(self): self._common()
2.15625
2
wonambi/attr/__init__.py
wonambi-python/wonambi
63
12267
<reponame>wonambi-python/wonambi<filename>wonambi/attr/__init__.py<gh_stars>10-100 """Packages containing all the possible attributes to recordings, such as - channels (module "chan") with class: - Chan - anatomical info (module "anat") with class: - Surf - annotations and sleep scores (module "annotations") with class: - Annotations Possibly include forward and inverse models. These attributes are only "attached" to the DataType, there should not be any consistency check when you load them. The risk is that attributes do not refer to the correct datatype, but the advantage is that we cannot keep track of all the possible inconsistencies (f.e. if the channel names are not the same between the actual channels and those stored in the Channels class). In addition, these classes are often used in isolation, even without a dataset, so do not assume that any of the classes in the module can call the main dataset. In other words, these classes shouldn't have methods calling the datatype, but there can be functions in the modules that use both the dataset and the classes below. """ from .chan import Channels from .anat import Brain, Surf, Freesurfer from .annotations import Annotations, create_empty_annotations
2.46875
2
Corpus/Pyramid Score/PyrEval/Pyramid/parameters.py
LCS2-IIITD/summarization_bias
1
12268
<filename>Corpus/Pyramid Score/PyrEval/Pyramid/parameters.py """ =========== What is Matter Parameters =================== """ #tups = [(125.0, 1.0), (125.0, 1.5), (125.0, 2.0), (125.0, 2.5), (125.0, 3.0), (150.0, 1.0), (150.0, 1.5), (150.0, 2.0), (150.0, 2.5), (150.0, 3.0), (175.0, 1.0), (175.0, 1.5), (175.0, 2.0), (175.0, 2.5), (175.0, 3.0), (200.0, 1.0), (200.0, 1.5), (200.0, 2.0), (200.0, 2.5), (200.0, 3.0), (225.0, 1.0), (225.0, 1.5), (225.0, 2.0), (225.0, 2.5), (225.0, 3.0), (250.0, 1.0), (250.0, 1.5), (250.0, 2.0), (250.0, 2.5), (250.0, 3.0)] """ =========== DUC Data ========== """ #tups = [(64.0, 1.0), (64.0, 1.5), (64.0, 2.0), (64.0, 2.5), (70.0, 1.0), (70.0, 1.5), (70.0, 2.0), (70.0, 2.5), (76.0, 1.0), (76.0, 1.5), (76.0, 2.0), (76.0, 2.5), (82.0, 1.0), (82.0, 1.5), (82.0, 2.0), (82.0, 2.5), (88.0, 1.0), (88.0, 1.5), (88.0, 2.0), (88.0, 2.5), (96.0, 1.0), (96.0, 1.5), (96.0, 2.0), (96.0, 2.5), (100.0, 1.0), (100.0, 1.5), (100.0, 2.0), (100.0, 2.5)] #b = [1.0,1.5,2.0,2.5,3.0] # alpha should be from [10,40] #a = range(len(segpool)+10,len(segpool)+60,10) #tups = list(itertools.product(a,b)) #print "Alll combinations ", tups #tups = [(125, 1.0), (125, 1.5), (125, 2.0), (125, 2.5), (125, 3.0), (135, 1.0), (135, 1.5), (135, 2.0), (135, 2.5), (135, 3.0), (145, 1.0), (145, 1.5), (145, 2.0), (145, 2.5), (145, 3.0), (155, 1.0), (155, 1.5), (155, 2.0), (155, 2.5), (155, 3.0), (165, 1.0), (165, 1.5), (165, 2.0), (165, 2.5), (165, 3.0)] #thresholds = [83]
1.703125
2
deploy/terraform/tasks.py
kinecosystem/blockchain-ops
15
12269
<filename>deploy/terraform/tasks.py<gh_stars>10-100 """Call various Terraform actions.""" import os import os.path from invoke import task import jinja2 import yaml TERRAFORM_VERSION = '0.11.7' @task def install(c, ostype='linux', version=TERRAFORM_VERSION): """Download a local version of Terraform.""" if ostype == 'mac': ostype = 'darwin' file = f'terraform_{version}_{ostype}_amd64.zip' if os.path.exists('terraform'): print('Terraform file found') return print(f'Downloading Terraform {version}') c.run(f'wget -q https://releases.hashicorp.com/terraform/{version}/{file}') c.run(f'unzip {file}') c.run(f'rm {file}') MAIN_TF_FILE = 'stellar-network.tf' @task def template(c, vars_file='vars.yml'): """Process Terraform file taht require templating. Terraform and HCL has limitations that can be easily solved using template languages like Jinja. For example, avoiding redundancy when calling a module multiple times with just a single different variable value every time. """ print('generating terraform files from templates') with open(vars_file) as f: variables = yaml.load(f) for root, _, files in os.walk("."): for file in files: stripped_file, ext = os.path.splitext(file) if ext != '.j2': continue out_file = f'{root}/{stripped_file}' print(f'processing file {root}/{file}') with open(f'{root}/{file}') as f: tmplate = jinja2.Template(f.read(), extensions=['jinja2.ext.do']) out = tmplate.render(variables, env_vars=os.environ) with open(out_file, 'w') as f: f.write(out) c.run(f'./terraform fmt {out_file}') @task(template) def init(c): """Call terraform init.""" print('initializing') c.run('./terraform init') @task(init) def new_workspace(c, vars_file='vars.yml'): """Set terraform workspace.""" print('setting workspace') with open(vars_file) as f: variables = yaml.load(f) workspace = variables['stellar']['network_name'] c.run(f'./terraform workspace new {workspace}') @task(init) def workspace(c, vars_file='vars.yml'): """Set terraform workspace.""" print('setting workspace') with open(vars_file) as f: variables = yaml.load(f) workspace = variables['stellar']['network_name'] c.run(f'./terraform workspace select {workspace}') @task(workspace) def modules(c): """Call terraform get.""" print('getting modules') c.run('./terraform get') @task(modules) def plan(c, destroy=False): """Call terraform plan.""" print('planning') c.run('./terraform plan {}'.format('-destroy' if destroy else '')) @task(modules) def apply(c, yes=False): """Call terraform destroy.""" print('applying') c.run('./terraform apply {}'.format('-auto-approve' if yes else '')) @task(modules) def destroy(c, yes=False): """Call terraform destroy.""" print('destroying') c.run('./terraform destroy {}'.format('-auto-approve' if yes else '')) @task(modules) def output(c): """Call terraform output.""" print('printing output') c.run('./terraform output')
2.46875
2
gdal/swig/python/scripts/gdal2xyz.py
Sokigo-GLS/gdal
0
12270
<filename>gdal/swig/python/scripts/gdal2xyz.py import sys # import osgeo.utils.gdal2xyz as a convenience to use as a script from osgeo.utils.gdal2xyz import * # noqa from osgeo.utils.gdal2xyz import main from osgeo.gdal import deprecation_warn deprecation_warn('gdal2xyz', 'utils') sys.exit(main(sys.argv))
1.476563
1
inferencia/task/person_reid/body_reid/model/body_reid_model_factory.py
yuya-mochimaru-np/inferencia
0
12271
from .body_reid_model_name import BodyReidModelName class BodyReidModelFactory(): def create(model_name, model_path, model_precision): if model_name == BodyReidModelName.osnet_x0_25.value: from .model.osnet.osnet_x0_25 import OSNetX025 return OSNetX025(model_path, model_precision) else: msg = "{} is not implemented. Choose from {}.".format( model_name, BodyReidModelName.names() ) raise NotImplementedError(msg)
2.140625
2
cheatingbee/twitter.py
exoskellyman/cheatingbee
0
12272
import datetime import io import os import tweepy from dotenv import load_dotenv from PIL import Image, ImageDraw, ImageFont class Twitter: """ A class used to manage the connection with the Twitter API ... Methods ------- post_tweet(solver_answers, nyt_answers, pangrams) Creates the tweet text and posts a picture with todays answers """ def __init__(self): load_dotenv() api_key = os.environ.get('TWITTER_API') api_key_secret = os.environ.get('TWITTER_API_SECRET') access_token = os.environ.get('TWITTER_ACCESS') access_token_secret = os.environ.get('TWITTER_ACCESS_SECRET') auth = tweepy.OAuthHandler(api_key, api_key_secret) auth.set_access_token(access_token, access_token_secret) self.api = tweepy.API(auth) def post_tweet(self, solver_answers, nyt_answers, pangrams): """Composes the tweet text and posts a picture with todays answers marked as NSFW to avoid spoilers Parameters ---------- solver_answers: list, required The answers returned by the solver nyt_answers: list, required The answers of todays New York Times Spelling Bee pangrams: list, required The pangrams in the answers of todays New York Times Spelling Bee """ pangrams.sort() nyt_answers.sort() text = ("Pangram(s):\n" + self.__make_rows(pangrams) + '\n\nAnswers:\n' + self.__make_rows(nyt_answers)) pic = self.__create_pic(text) media = self.api.media_upload( filename=str(datetime.date.today()), file=pic, possibly_sensitive=True) if len(solver_answers) == len(nyt_answers): tweet = "Cheating Bee got all {} answers on todays #SpellingBee!🐝🎓" tweet = tweet + "\n\nNeed help with todays puzzle? Click the image below!" tweet = tweet.format(len(nyt_answers)) else: tweet = "Cheating Bee got {}/{} answers on todays #SpellingBee!🐝" tweet = tweet + "\n\nNeed help with todays puzzle? Click the image below!" tweet = tweet.format(len(solver_answers), len(nyt_answers)) self.api.update_status(status=tweet, media_ids=[media.media_id]) def __make_rows(self, word_list): """Formats a list of words into a string with rows five words long Parameters ---------- word_list: list, required A list of words Returns ------- str The word list composed to a string with rows of five words """ text = '' for i in range(0, len(word_list), 5): if i + 5 < len(word_list): text = text + ', '.join(word_list[i:i+5]) + ',\n' else: text = text + ', '.join(word_list[i:len(word_list)]) return text def __create_pic(self, text): """Creates an image with and fills it with the text provided Parameters ---------- text: str, required The text string to be drawn on the picture Returns ------- file The picture as a file object """ font_size = 20 # number of lines plus 3 for padding height = (text.count('\n') + 3) * font_size # longest line in string length times font size at a ratio of .65 width = int( max([len(x) for x in text.splitlines()]) * font_size * 0.65) pic = Image.new("RGB", (width, height), (255, 255, 255)) font = ImageFont.truetype("Pillow/Tests/fonts/FreeMono.ttf", font_size) drawing = ImageDraw.Draw(pic) drawing.multiline_text((10, 10), text, font=font, fill=(0, 0, 0)) b = io.BytesIO() pic.save(b, 'png') b.seek(0) return b
3.140625
3
variables.py
MuhweziDeo/python_refresher
0
12273
x = 2 print(x) # multiple assignment a, b, c, d = (1, 2, 5, 9) print(a, b, c, d) print(type(str(a)))
3.46875
3
examples/authentication/demo_auth.py
jordiyeh/safrs
0
12274
<reponame>jordiyeh/safrs #!/usr/bin/env python # # This is a demo application to demonstrate the functionality of the safrs_rest REST API with authentication # # you will have to install the requirements: # pip3 install passlib flask_httpauth flask_login # # This script can be ran standalone like this: # python3 demo_auth.py [Listener-IP] # This will run the example on http://Listener-Ip:5000 # # - A database is created and a item is added # - User is created and the User endpoint is protected by user:admin & pass: <PASSWORD> # - swagger2 documentation is generated # import sys import os import logging import builtins from functools import wraps from flask import Flask, redirect, jsonify, make_response from flask import abort, request, g, url_for from flask_sqlalchemy import SQLAlchemy from sqlalchemy import Column, Integer, String from safrs import SAFRSBase, SAFRSJSONEncoder, Api, jsonapi_rpc from flask_swagger_ui import get_swaggerui_blueprint from flask_sqlalchemy import SQLAlchemy from flask_httpauth import HTTPBasicAuth from passlib.apps import custom_app_context as pwd_context from itsdangerous import (TimedJSONWebSignatureSerializer as Serializer, BadSignature, SignatureExpired) from flask.ext.login import LoginManager, UserMixin, \ login_required, login_user, logout_user db = SQLAlchemy() auth = HTTPBasicAuth() # Example sqla database object class Item(SAFRSBase, db.Model): ''' description: Item description ''' __tablename__ = 'items' id = Column(String, primary_key=True) name = Column(String, default = '') class User(SAFRSBase, db.Model): ''' description: User description ''' __tablename__ = 'users' id = db.Column(String, primary_key=True) username = db.Column(db.String(32), index=True) password_hash = db.Column(db.String(64)) custom_decorators = [auth.login_required] @jsonapi_rpc(http_methods = ['POST']) def hash_password(self, password): self.password_hash = pwd_context.encrypt(password) @jsonapi_rpc(http_methods = ['POST']) def verify_password(self, password): return pwd_context.verify(password, self.password_hash) @jsonapi_rpc(http_methods = ['POST']) def generate_auth_token(self, expiration=600): s = Serializer(app.config['SECRET_KEY'], expires_in=expiration) return s.dumps({'id': self.id}) @staticmethod @jsonapi_rpc(http_methods = ['POST']) def verify_auth_token(token): s = Serializer(app.config['SECRET_KEY']) try: data = s.loads(token) except SignatureExpired: return None # valid token, but expired except BadSignature: return None # invalid token user = User.query.get(data['id']) return user def start_app(app): api = Api(app, api_spec_url = '/api/swagger', host = '{}:{}'.format(HOST,PORT), schemes = [ "http" ] ) item = Item(name='test',email='em@il') user = User(username='admin') user.hash_password('<PASSWORD>') api.expose_object(Item) api.expose_object(User) # Set the JSON encoder used for object to json marshalling app.json_encoder = SAFRSJSONEncoder # Register the API at /api/docs swaggerui_blueprint = get_swaggerui_blueprint('/api', '/api/swagger.json') app.register_blueprint(swaggerui_blueprint, url_prefix='/api') print('Starting API: http://{}:{}/api'.format(HOST,PORT)) app.run(host=HOST, port = PORT) # # APP Initialization # app = Flask('demo_app') app.config.update( SQLALCHEMY_DATABASE_URI = 'sqlite://', SQLALCHEMY_TRACK_MODIFICATIONS = False, SECRET_KEY = b'<KEY>', DEBUG = True) HOST = sys.argv[1] if len(sys.argv) > 1 else '0.0.0.0' PORT = 5000 db.init_app(app) # # Authentication and custom routes # @auth.verify_password def verify_password(username_or_token, password): user = User.verify_auth_token(username_or_token) if not user: # try to authenticate with username/password user = User.query.filter_by(username=username_or_token).first() if not user or not user.verify_password(password): return False print('Authentication Successful for "{}"'.format(user.username)) return True @app.route('/') def goto_api(): return redirect('/api') @app.teardown_appcontext def shutdown_session(exception=None): '''cfr. http://flask.pocoo.org/docs/0.12/patterns/sqlalchemy/''' db.session.remove() # Start the application with app.app_context(): db.create_all() start_app(app)
2.609375
3
ev3/sensors/color.py
NewThingsCo/ev3-controller
1
12275
<reponame>NewThingsCo/ev3-controller import goless import time from sys import platform if platform == "linux" or platform == "linux2": import brickpi3 def start_color_sensor(brick, port, channel): print("start color sensor") setup_sensor(brick, port) goless.go(run_color_sensor, brick, port, channel) print("color sensor started") def setup_sensor(brick, port): brick.set_sensor_type(port, brick.SENSOR_TYPE.EV3_COLOR_REFLECTED) error = True while error: time.sleep(0.1) try: brick.get_sensor(port) error = False except brickpi3.SensorError as error: error = True def run_color_sensor(brick, port, channel): # sensor_modes = [(brick.SENSOR_TYPE.EV3_COLOR_REFLECTED, 'reflect'), # (brick.SENSOR_TYPE.EV3_COLOR_AMBIENT, 'ambiet'), # (brick.SENSOR_TYPE.EV3_COLOR_COLOR, 'color'), # (brick.SENSOR_TYPE.EV3_COLOR_COLOR_COMPONENTS, 'color_components')] sensor_modes = [(brick.SENSOR_TYPE.EV3_COLOR_COLOR, 'color')] while True: for sensor_mode in sensor_modes: time.sleep(0.01) sensor_value = read_sensor(brick, port, sensor_mode[0]) if sensor_value: channel.send((sensor_mode[1], sensor_value)) if isinstance(sensor_value, brickpi3.SensorError): break def read_sensor(brick, port, sensor_type): try: brick.set_sensor_type(port, sensor_type) time.sleep(0.01) return brick.get_sensor(port) except brickpi3.SensorError as error: print("error color", error) return error if __name__ == '__main__': print('for local testing read 100 color readings from port 1') brick = brickpi3.BrickPi3() readings = goless.chan() start_color_sensor(brick, brick.PORT_3, readings) for i in range(100): case, val = goless.select([goless.rcase(readings)]) print(case, val) print('100 reading are done, time to clean and exit') brick.reset_all()
2.625
3
model/BPE.py
djmhunt/TTpy
0
12276
# -*- coding: utf-8 -*- """ :Author: <NAME> """ import logging import numpy as np import scipy as sp import collections import itertools from model.modelTemplate import Model class BPE(Model): """The Bayesian predictor model Attributes ---------- Name : string The name of the class used when recording what has been used. Parameters ---------- alpha : float, optional Learning rate parameter epsilon : float, optional Noise parameter. The larger it is the less likely the model is to choose the highest expected reward number_actions : integer, optional The maximum number of valid actions the model can expect to receive. Default 2. number_cues : integer, optional The initial maximum number of stimuli the model can expect to receive. Default 1. number_critics : integer, optional The number of different reaction learning sets. Default number_actions*number_cues validRewards : list,np.ndarray, optional The different reward values that can occur in the task. Default ``array([0, 1])`` action_codes : dict with string or int as keys and int values, optional A dictionary used to convert between the action references used by the task or dataset and references used in the models to describe the order in which the action information is stored. dirichletInit : float, optional The initial values for values of the dirichlet distribution. Normally 0, 1/2 or 1. Default 1 prior : array of floats in ``[0, 1]``, optional Ignored in this case stimFunc : function, optional The function that transforms the stimulus into a form the model can understand and a string to identify it later. Default is blankStim rewFunc : function, optional The function that transforms the reward into a form the model can understand. Default is blankRew decFunc : function, optional The function that takes the internal values of the model and turns them in to a decision. Default is model.decision.discrete.weightProb See Also -------- model.BP : This model is heavily based on that one """ def __init__(self, alpha=0.3, epsilon=0.1, dirichletInit=1, validRewards=np.array([0, 1]), **kwargs): super(BPE, self).__init__(**kwargs) self.alpha = alpha self.epsilon = epsilon self.validRew = validRewards self.rewLoc = collections.OrderedDict(((k, v) for k, v in itertools.izip(self.validRew, range(len(self.validRew))))) self.dirichletVals = np.ones((self.number_actions, self.number_cues, len(self.validRew))) * dirichletInit self.expectations = self.updateExpectations(self.dirichletVals) self.parameters["epsilon"] = self.epsilon self.parameters["alpha"] = self.alpha self.parameters["dirichletInit"] = dirichletInit # Recorded information self.recDirichletVals = [] def returnTaskState(self): """ Returns all the relevant data for this model Returns ------- results : dict The dictionary contains a series of keys including Name, Probabilities, Actions and Events. """ results = self.standardResultOutput() results["dirichletVals"] = np.array(self.recDirichletVals) return results def storeState(self): """ Stores the state of all the important variables so that they can be accessed later """ self.storeStandardResults() self.recDirichletVals.append(self.dirichletVals.copy()) def rewardExpectation(self, observation): """Calculate the estimated reward based on the action and stimuli This contains parts that are task dependent Parameters ---------- observation : {int | float | tuple} The set of stimuli Returns ------- actionExpectations : array of floats The expected rewards for each action stimuli : list of floats The processed observations activeStimuli : list of [0, 1] mapping to [False, True] A list of the stimuli that were or were not present """ activeStimuli, stimuli = self.stimulus_shaper.processStimulus(observation) actionExpectations = self._actExpectations(self.dirichletVals, stimuli) return actionExpectations, stimuli, activeStimuli def delta(self, reward, expectation, action, stimuli): """ Calculates the comparison between the reward and the expectation Parameters ---------- reward : float The reward value expectation : float The expected reward value action : int The chosen action stimuli : {int | float | tuple | None} The stimuli received Returns ------- delta """ modReward = self.reward_shaper.processFeedback(reward, action, stimuli) return modReward def updateModel(self, delta, action, stimuli, stimuliFilter): """ Parameters ---------- delta : float The difference between the reward and the expected reward action : int The action chosen by the model in this trialstep stimuli : list of float The weights of the different stimuli in this trialstep stimuliFilter : list of bool A list describing if a stimulus cue is present in this trialstep """ # Find the new activities self._newExpect(action, delta, stimuli) # Calculate the new probabilities # We need to combine the expectations before calculating the probabilities actionExpectations = self._actExpectations(self.dirichletVals, stimuli) self.probabilities = self.calcProbabilities(actionExpectations) def _newExpect(self, action, delta, stimuli): self.dirichletVals[action, :, self.rewLoc[delta]] += self.alpha * stimuli/np.sum(stimuli) self.expectations = self.updateExpectations(self.dirichletVals) def _actExpectations(self, dirichletVals, stimuli): # If there are multiple possible stimuli, filter by active stimuli and calculate # calculate the expectations associated with each action. if self.number_cues > 1: actionExpectations = self.calcActExpectations(self.actStimMerge(dirichletVals, stimuli)) else: actionExpectations = self.calcActExpectations(dirichletVals[:, 0, :]) return actionExpectations def calcProbabilities(self, actionValues): # type: (np.ndarray) -> np.ndarray """ Calculate the probabilities associated with the actions Parameters ---------- actionValues : 1D ndArray of floats Returns ------- probArray : 1D ndArray of floats The probabilities associated with the actionValues """ cbest = actionValues == max(actionValues) deltaEpsilon = self.epsilon * (1 / self.number_actions) bestEpsilon = (1 - self.epsilon) / np.sum(cbest) + deltaEpsilon probArray = bestEpsilon * cbest + deltaEpsilon * (1 - cbest) return probArray def actorStimulusProbs(self): """ Calculates in the model-appropriate way the probability of each action. Returns ------- probabilities : 1D ndArray of floats The probabilities associated with the action choices """ probabilities = self.calcProbabilities(self.expectedRewards) return probabilities def actStimMerge(self, dirichletVals, stimuli): dirVals = dirichletVals * np.expand_dims(np.repeat([stimuli], self.number_actions, axis=0), 2) actDirVals = np.sum(dirVals, 1) return actDirVals def calcActExpectations(self, dirichletVals): actExpect = np.fromiter((np.sum(sp.stats.dirichlet(d).mean() * self.validRew) for d in dirichletVals), float, count=self.number_actions) return actExpect def updateExpectations(self, dirichletVals): def meanFunc(p, r=[]): return np.sum(sp.stats.dirichlet(p).mean() * r) expectations = np.apply_along_axis(meanFunc, 2, dirichletVals, r=self.validRew) return expectations
2.78125
3
affiliates/banners/tests/__init__.py
glogiotatidis/affiliates
15
12277
from django.db.models.signals import post_init from factory import DjangoModelFactory, Sequence, SubFactory from factory.django import mute_signals from affiliates.banners import models class CategoryFactory(DjangoModelFactory): FACTORY_FOR = models.Category name = Sequence(lambda n: 'test{0}'.format(n)) class BannerFactory(DjangoModelFactory): ABSTRACT_FACTORY = True category = SubFactory(CategoryFactory) name = Sequence(lambda n: 'test{0}'.format(n)) destination = 'https://mozilla.org/' visible = True class ImageBannerFactory(BannerFactory): FACTORY_FOR = models.ImageBanner @mute_signals(post_init) class ImageVariationFactory(DjangoModelFactory): ABSTRACT_FACTORY = True color = 'Blue' locale = 'en-us' image = 'uploads/image_banners/test.png' class ImageBannerVariationFactory(ImageVariationFactory): FACTORY_FOR = models.ImageBannerVariation banner = SubFactory(ImageBannerFactory) class TextBannerFactory(BannerFactory): FACTORY_FOR = models.TextBanner class TextBannerVariationFactory(DjangoModelFactory): FACTORY_FOR = models.TextBannerVariation banner = SubFactory(TextBannerFactory) locale = 'en-us' text = Sequence(lambda n: 'test{0}'.format(n)) class FirefoxUpgradeBannerFactory(BannerFactory): FACTORY_FOR = models.FirefoxUpgradeBanner @mute_signals(post_init) class FirefoxUpgradeBannerVariationFactory(ImageVariationFactory): FACTORY_FOR = models.FirefoxUpgradeBannerVariation banner = SubFactory(FirefoxUpgradeBannerFactory) image = 'uploads/firefox_upgrade_banners/test.png' upgrade_image = 'uploads/firefox_upgrade_banners/test_upgrade.png'
1.929688
2
cradlepy/framework/http.py
cblanquera/cradlepy
0
12278
from .request import Request from .response import Response class HttpRequestCookieTrait: 'Designed for the Request Object; Adds methods to store COOKIE data' def get_cookies(self, *args): 'Returns COOKIE given name or all COOKIE' return self.get('cookie', *args) def remove_cookies(self, *args): 'Removes COOKIE given name or all COOKIE' return self.remove('cookie', *args) def has_cookies(self, *args): 'Returns true if has COOKIE given name or if COOKIE is set' return self.exists('cookie', *args) def set_cookies(self, data, *args): 'Sets COOKIE' if isinstance(data, (list, dict, tuple)): return self.set('cookie', data) if len(args) == 0: return self return self.set('cookie', data, *args) class HttpRequestGetTrait: 'Designed for the Request Object; Adds methods to store GET data' def get_get(self, *args): 'Returns GET given name or all GET' return self.get('get', *args) def remove_get(self, *args): 'Removes GET given name or all GET' return self.remove('get', *args) def has_get(self, *args): 'Returns true if has GET given name or if GET is set' return self.exists('get', *args) def set_get(self, data, *args): 'Sets GET' if isinstance(data, (list, dict, tuple)): return self.set('get', data) if len(args) == 0: return self return self.set('get', data, *args) class HttpRequestPostTrait: 'Designed for the Request Object; Adds methods to store POST data' def get_post(self, *args): 'Returns POST given name or all POST' return self.get('post', *args) def remove_post(self, *args): 'Removes POST given name or all POST' return self.remove('post', *args) def has_post(self, *args): 'Returns true if has POST given name or if POST is set' return self.exists('post', *args) def set_post(self, data, *args): 'Sets POST' if isinstance(data, (list, dict, tuple)): return self.set('post', data) if len(args) == 0: return self return self.set('post', data, *args) class HttpRequestServerTrait: 'Designed for the Request Object; Adds methods to store SERVER data' def get_method(self): 'Returns method if set' pass def get_path(self, name = None): 'Returns path data given name or all path data' pass def get_query(self): 'Returns string query if set' pass def get_server(self, name = None): 'Returns SERVER data given name or all SERVER data' pass def has_server(self, name = None): 'Returns SERVER data given name or all SERVER data' pass def is_method(self, method): 'Returns true if method is the one given' pass def set_method(self, method): 'Sets request method' pass def set_path(self, path): 'Sets path given in string or array form' pass def set_query(self, query): 'Sets query string' pass def set_server(self, server): 'Sets SERVER' pass class HttpRequestSessionTrait: 'Designed for the Request Object; Adds methods to store SESSION data' def get_session(self, *args): 'Returns SESSION given name or all SESSION' return self.get('session', *args) def remove_session(self, *args): 'Removes SESSION given name or all SESSION' self.remove('session', *args) #TODO: link session object return self def has_session(self, *args): 'Returns true if has SESSION given name or if SESSION is set' return self.exists('session', *args) def set_session(self, data, *args): 'Sets SESSION' if isinstance(data, (list, dict, tuple)): return self.set('session', data) if len(args) == 0: return self self.set('session', data, *args) #TODO: link session object return self class HttpRequest( Request, HttpRequestCookieTrait, HttpRequestGetTrait, HttpRequestPostTrait, HttpRequestServerTrait, HttpRequestSessionTrait ): 'Http Request Object' def load(self): 'Loads default data given by WSGI' pass class HttpResponseHeaderTrait: 'Designed for the Response Object; Adds methods to process headers' def add_header(self, name, value = None): 'Adds a header parameter' pass def get_headers(self, name = None): 'Returns either the header value given the name or the all headers' pass def remove_header(self, name): 'Removes a header parameter' pass class HttpResponsePageTrait: 'Designed for the Response Object; Adds methods to process REST type responses' def add_meta(self, name, content): 'Adds a page meta item' pass def get_flash(self): 'Returns flash data' pass def get_meta(self, *args): 'Returns meta given path or all meta data' pass def get_page(self, *args): 'Returns page data given path or all page data' pass def has_page(self, *args): 'Returns true if theres any page data' pass def remove_page(self, *args): 'Removes arbitrary page data' pass def set_flash(self, message, type = 'info'): 'Sets a Page flash' pass def set_page(self, *args): 'Sets arbitrary page data' pass def set_title(self, title): 'Sets a Page title' pass class HttpResponseStatusTrait: 'Designed for the Response Object; Adds methods to process status codes' def get_status(self): 'Returns the status code' pass def set_status(self, code, status): 'Sets a status code' pass class HttpResponse( Response, HttpResponseHeaderTrait, HttpResponsePageTrait, HttpResponseStatusTrait ): 'Http Response Object' def load(self): 'Loads default data' pass class HttpRouterTrait: 'Designed for the HttpHandler we are parting this out to lessen the confusion' def all(self, path, callback): 'Adds routing middleware for all methods' pass def delete(self, path, callback): 'Adds routing middleware for delete method' pass def get(self, path, callback): 'Adds routing middleware for get method' pass def get_router(self): 'Returns a router object' pass def post(self, path, callback): 'Adds routing middleware for post method' pass def put(self, path, callback): 'Adds routing middleware for put method' pass def route(self, method, path, callback): 'Adds routing middleware' pass def set_router(self, router): 'Sets the router to use' pass def trigger_route(self, method, path, *args): 'Manually trigger a route' pass class HttpRouterInterface: 'Handles method-path matching and routing' def __init__(self, handler = None): 'Allow to pass a custom EventHandler' pass def process(self, request, *args): 'Process routes' pass def route(self, method, pattern, callback): 'Adds routing middleware' pass class HttpRouter(HttpRouterInterface): 'Handles method-path matching and routing' def __init__(self, handler = None): 'Allow to pass a custom EventHandler' pass def process(self, request, *args): 'Process routes' pass def route(self, method, pattern, callback): 'Adds routing middleware' pass class HttpDispatcher: pass class HttpHandler: pass class HttpDispatcherTrait: pass class HttpTrait: pass
2.921875
3
src/plugins/command/main.py
AlexCaranha/MyLauncher
0
12279
<filename>src/plugins/command/main.py import pluggy hookimpl = pluggy.HookimplMarker('mylauncher') def get_class(): return CommandPlugin() class CommandPlugin: @hookimpl def setup(self): print("Setup ...") @hookimpl def get_alias(self): return "command" @hookimpl def execute(self, input:str): return None
2.4375
2
setup.py
kmike/UnbalancedDataset
6
12280
#! /usr/bin/env python """Toolbox for unbalanced dataset in machine learning.""" from setuptools import setup, find_packages import os import sys import setuptools from distutils.command.build_py import build_py if sys.version_info[0] < 3: import __builtin__ as builtins else: import builtins descr = """Toolbox for unbalanced dataset in machine learning.""" DISTNAME = 'unbalanced_dataset' DESCRIPTION = 'Toolbox for unbalanced dataset in machine learning.' LONG_DESCRIPTION = descr MAINTAINER = '<NAME>, <NAME>' MAINTAINER_EMAIL = '<EMAIL>, <EMAIL>' URL = 'https://github.com/fmfn/UnbalancedDataset' LICENSE = 'new BSD' DOWNLOAD_URL = 'https://github.com/fmfn/UnbalancedDataset' # This is a bit (!) hackish: we are setting a global variable so that the main # skimage __init__ can detect if it is being loaded by the setup routine, to # avoid attempting to load components that aren't built yet: # the numpy distutils extensions that are used by UnbalancedDataset to # recursively build the compiled extensions in sub-packages is based on # the Python import machinery. builtins.__UNBALANCED_DATASET_SETUP__ = True with open('unbalanced_dataset/__init__.py') as fid: for line in fid: if line.startswith('__version__'): VERSION = line.strip().split()[-1][1:-1] break with open('requirements.txt') as fid: INSTALL_REQUIRES = [l.strip() for l in fid.readlines() if l] # requirements for those browsing PyPI REQUIRES = [r.replace('>=', ' (>= ') + ')' for r in INSTALL_REQUIRES] REQUIRES = [r.replace('==', ' (== ') for r in REQUIRES] REQUIRES = [r.replace('[array]', '') for r in REQUIRES] def configuration(parent_package='', top_path=None): if os.path.exists('MANIFEST'): os.remove('MANIFEST') from numpy.distutils.misc_util import Configuration config = Configuration(None, parent_package, top_path) config.set_options( ignore_setup_xxx_py=True, assume_default_configuration=True, delegate_options_to_subpackages=True, quiet=True) config.add_subpackage('unbalanced_dataset') return config if __name__ == "__main__": try: from numpy.distutils.core import setup extra = {'configuration': configuration} # Do not try and upgrade larger dependencies for lib in ['scipy', 'numpy', 'matplotlib']: try: __import__(lib) INSTALL_REQUIRES = [i for i in INSTALL_REQUIRES if lib not in i] except ImportError: pass except ImportError: if len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or sys.argv[1] in ('--help-commands', '--version', 'clean')): # For these actions, NumPy is not required. # # They are required to succeed without Numpy for example when # pip is used to install UnbalancedDataset when Numpy is not yet # present in the system. from setuptools import setup extra = {} else: print('To install UnbalancedDataset from source, you need numpy.' + 'Install numpy with pip:\n' + 'pip install numpy\n' 'Or use your operating system package manager.') sys.exit(1) setup( name=DISTNAME, description=DESCRIPTION, long_description=LONG_DESCRIPTION, maintainer=MAINTAINER, maintainer_email=MAINTAINER_EMAIL, url=URL, license=LICENSE, download_url=DOWNLOAD_URL, version=VERSION, classifiers=['Intended Audience :: Science/Research', 'Intended Audience :: Developers', 'License :: OSI Approved', 'Programming Language :: Python', 'Topic :: Software Development', 'Topic :: Scientific/Engineering', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Operating System :: Unix', 'Operating System :: MacOS', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], install_requires=INSTALL_REQUIRES, requires=REQUIRES, packages=setuptools.find_packages(exclude=['doc']), include_package_data=True, zip_safe=False, # the package can run out of an .egg file cmdclass={'build_py': build_py}, **extra )
1.757813
2
ores/scoring/models/__init__.py
elukey/ores
69
12281
<reponame>elukey/ores from .rev_id_scorer import RevIdScorer __all__ = [RevIdScorer]
1.0625
1
conjuntos.py
Tiesco789/guppe
0
12282
""" Conjuntos — Conjunto em qualquer linguagem de programação, estamos fazendo referência à teoria de conjuntos da matemática — Aqui no Python, os conjuntos são chamados de sets Dito isto, da mesma forma que na matemática: — Sets (conjuntos) não possuem valores duplicados; — Sets (conjuntos) não possuem valores ordenados; — Elementos não são acessados via índice, ou seja, conjuntos não são indexados; Conjuntos são bons para se utilizar quando precisamos armazenar elementos mas não nos importamos com a ordenaçào deles. Quando não precisamos se preocupar com chaves, valores e itens duplicados Os conjuntos (sets) são referenciados em python com chaves {} Diferença entre conjutnos (sets) e mapas (dicionários) em python: — Um dicionário tem chave/valor — Um conjunto tem apenas valor # Definindo um conjunto # Forma 1 s = set({1, 2, 3, 4, 5, 6, 7, 2, 3}) # Repare que temos valores repetidos print(s) print(type(s)) # OBS: Ao criar uim conjunto, caso seja adicionado um valor já existente, o mesmo será ignorado sem gerar error e nào fará parde do conjunto # Forma 2 s = {1, 2, 3, 4, 5, 5} print(s) print(type(s)) # Podemos verificar se um determinado valor está contido em um conjunto if 3 in s: print('Encontrei o valor 3') else: print('Não encontrei o valor 3') # Importante lembrar que, alem de não termos valores duplicados, os valores não são ordenados dados = 99, 2, 34, 23, 2, 12, 1, 44, 5, 34 # Listas aceitam valores duplicados, então temos 10 elementos lista = list(dados) print(f"Lista: {lista} com {len(lista)} elementos") # Tuplas aceitam valores duplicados, então temos 10 elementos tupla = tuple(dados) print(f"Tupla: {tupla} com {len(tupla)} elementos") # Dicionários não aceitam chaves duplicadas, então temos 8 elementos dicionario = {}.fromkeys(dados, 'dict') print(f"Dicionário: {dicionario} com {len(dicionario)} elementos") # Conjuntos não aceitam valores duplicados, então temos 8 elementos conjunto = set(dados) print(f"Conjunto: {conjunto} com {len(conjunto)} elementos") # Assim como os outros conjuntos python, podemos colocar tipos de dados misturados em Sets s = {1, 'b', True, 1.23, 44} print(s) print(type(s)) # Podemos iterar em um set normalmente for valor in s: print(valor) # Usos interessantes com sets # Imagine que fizemos um formulário de cadastro de visitantes em uma feira ou museu, # os visitantes informam manualmente a cidade de onde vieram # Nós adicionamos cada cidade em uma lista Python, já que em uma lista podemos adicionar novos elmentos e ter repetições cidades = ['Belo Horizante', 'São Paulo', 'Campo Grande', 'Cuiaba', 'Campo Grande', 'São Paulo', 'Cuiaba'] print(cidades) print(len(cidades)) # Agora precisamos saber quantas cidades distintas, ou seja, únicas, temos. # O que você faria? Faria um loop na lista? # Podemos utilizar o set para isso print(len(set(cidades))) s = {1, 2, 3} s.add(4) print(s) s = {1, 2, 3} s.remove(3) print(s) s.discard(2) print(s) # Copiando um conjunto para outro # Forma 1 - Deep Copy novo = s.copy() print(novo) novo.add(4) print(novo) print(s) # Forma 2 - Shallow Copy novo = s novo.add(4) print(novo) print(s) s = {1, 2, 3} print(s) s.clear() print(s) # Precisamos gerar qum conjunto com nomes de estudantes únicos # Forma 1 - Utilizando union # unicos1 = estudantes_python.union(estudantes_java) # print(unicos1) # Forma 2 - Utilizando o | pipe unicos2 = estudantes_python | estudantes_java print(unicos2) # Gerar um conjunto de estudantes que estão em ambos os cursos # Forma 1 - Utilizando union ambos1 = estudantes_python.intersection(estudantes_java) print(ambos1) # Forma 2 - utilizando o & ambos2 = estudantes_python & estudantes_java print(ambos2) # Métodos matemáticos de conjuntos # Imagine que temos dois conjuntos: um contendo estudantes do curso Python e um # Contendo estudantes do curso Java estudantes_python = {'Pedro', 'Maria', 'Cláudia', 'João', 'Marcos', 'Patricia'} estudantes_java = {'Ana', 'Maria', 'Cláudia', 'João', 'Marcos', 'Patricia'} # Veja que alguns alins que estudam python também estudam java. # Gerar um conjunto de estudantes que não estão no outro curso so_python = estudantes_python.difference(estudantes_java) print(so_python) so_java = estudantes_java.difference(estudantes_python) print(so_java) """
4.25
4
tests/test_gpreg.py
cdgreenidge/gdec
0
12283
<filename>tests/test_gpreg.py """Test gpreg.py.""" from typing import Tuple import numpy as np import pytest from gdec import gpreg, npgp @pytest.fixture(scope="module") def dataset() -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: np.random.seed(42) amplitude = 1.0 lengthscale = 12 sigma = 0.5 n = 128 def spectrum_fn(w: np.ndarray) -> np.ndarray: return npgp.rbf_spectrum(w, amplitude, lengthscale) basis, freqs = npgp.real_fourier_basis(n) coef_vars = npgp.rbf_spectrum(freqs, amplitude, lengthscale) z = np.arange(n) w = np.sqrt(coef_vars) * np.random.randn(n) f = basis @ w x = z.repeat(1) f_x = f.repeat(1) y = sigma * np.random.randn(*f_x.shape) + f_x return x[:, None], y, z[:, None], f def test_you_can_train_periodic_gp_regression_on_the_synthetic_dataset(dataset): X, y, z, f = dataset grid_size = np.unique(X).size model = gpreg.PeriodicGPRegression() model.fit(X, y, grid_size=grid_size) f_est = model.predict(z) error = np.max(np.abs(f - f_est)) assert error < 0.3 def test_training_pid_on_float_dataset_raises_value_error(dataset): X, y, _, _ = dataset X = X.astype(np.float32) grid_size = np.unique(X).size model = gpreg.PeriodicGPRegression() with pytest.raises(ValueError): model.fit(X, y, grid_size=grid_size)
2.265625
2
gizer/all_schema_engines.py
racker/gizer
0
12284
#!/usr/bin/env python __author__ = "<NAME>" __copyright__ = "Copyright 2016, Rackspace Inc." __email__ = "<EMAIL>" from mongo_schema import schema_engine import os def get_schema_files(schemas_dirpath): """ get list of js / json files resided in dirpath param. """ res = [] for fname in os.listdir(schemas_dirpath): if fname.endswith('json') or fname.endswith('js'): res.append(fname) res.sort() return res def get_schema_engines_as_dict(schemas_dirpath): """ Load schema engines into dict. Basename of schema file should be the name of collection""" js_schema_files = get_schema_files(schemas_dirpath) schemas = {} for fname in js_schema_files: collection_name = os.path.splitext(os.path.basename(fname))[0] schema_path = os.path.join(schemas_dirpath, fname) schemas[collection_name] = \ schema_engine.create_schema_engine(collection_name, schema_path) return schemas
2.53125
3
WeatherPy/config.py.py
Brownc03/python-api-challenge
0
12285
<reponame>Brownc03/python-api-challenge # OpenWeatherMap API Key weather_api_key = "ae41fcf95db0d612b74e2b509abe9684" # Google API Key g_key = "<KEY>"
1.484375
1
bin/runinterpret.py
christine-liu/somaticCNVpipeline
0
12286
#!usr/bin/python import os import numpy as np import common from interpret import qcfile, funcfile, analyzefiles def runAll(args): print('\n\n\nYou have requested to analyze CNV call data') print('\tWARNING:') print('\t\tIF USING ANY REFERENCES OTHER THAN THOSE I PROVIDE I CANNOT GUARANTEE RESULT ACCURACY') print('\n') #Set up environment# args.AnalysisDirectory = common.fixDirName(args.AnalysisDirectory) folderDict = {'LowessBinCounts': args.lowess, 'Segments': args.segments, 'PipelineStats': args.countstats} for i in list(folderDict.keys()): if not folderDict[i]: folderDict[i] = args.AnalysisDirectory + i + '/' else: folderDict[i] = common.fixDirName(folderDict[i]) QCdir = args.AnalysisDirectory + 'QC/' CNVdir = args.AnalysisDirectory + 'CNVlists/' summaryDir = args.AnalysisDirectory + 'SummaryFiles/' PloidyPlotDir = args.AnalysisDirectory + 'PloidyDeterminationPlots/' CNplotDir = args.AnalysisDirectory + 'CopyNumberProfilePlots/' ChromPlotDir = args.AnalysisDirectory + 'ChromosomeCopyNumberPlots/' for i in [args.AnalysisDirectory, QCdir, CNVdir, summaryDir, PloidyPlotDir, CNplotDir, ChromPlotDir]:# common.makeDir(i) #get list of samples to process #will involve checking infofile (if present) and whether required input files exist sampleFiles = common.getSampleList(folderDict['Segments'], args.samples, 'segments') sampleNames = [x.split('/')[-1].split('.')[0] for x in sampleFiles] # info = common.importInfoFile(args.infofile, args.columns, 'interpret') # if args.infofile: # refArray = info # else: # thisDtype = info # refArray = np.array( # [ (x, 1, 'unk',) for x in sampleNames], # dtype=thisDtype) #QC assessment# # qcfile.runQCone(sampleNames[0], args.species, folderDict['PipelineStats'], folderDict['LowessBinCounts'], folderDict['Segments'], QCdir, PloidyPlotDir) argList = [(x, args.species, folderDict['PipelineStats'], folderDict['LowessBinCounts'], folderDict['Segments'], QCdir, PloidyPlotDir) for x in sampleNames] common.daemon(qcfile.runQCone, argList, 'assess sample quality') analysisSamples = [] ploidyDict = {} genderDict = {} mergeQCfile = summaryDir + 'QCmetrics.txt' OUT = open(mergeQCfile, 'w') OUT.write('Name\tReads\tMAPD\tCS\tPloidy\tGender\tPASS\n') for i in sampleNames: IN = open(QCdir + i + '.qcTEMP.txt', 'r') data = IN.readline() OUT.write(data) data = data.rstrip().split('\t') if data[-1] == 'True': analysisSamples.append(i) ploidyDict[i] = float(data[4]) genderDict[i] = data[-2] IN.close() os.remove(QCdir + i + '.qcTEMP.txt') OUT.close() os.rmdir(QCdir) #FUnC: CNV filtering# if args.nofilter: print '\nFURTHER CODE IS ONLY DEVELOPED FOR WHEN FUnC IS IMPLEMENTED, EXITING NOW\n\n\n' raise SystemExit # funcfile.FUnCone(analysisSamples[0], args.species, folderDict['Segments'], CNVdir, # ploidyDict[analysisSamples[0]], genderDict[analysisSamples[0]]) argList = [(x, args.species, folderDict['Segments'], CNVdir, ploidyDict[x], genderDict[x]) for x in analysisSamples] common.daemon(funcfile.FUnCone, argList, 'remove unreliable CNV calls') #CNV analysis# # summaryStats = analyzefiles.analyzeOne(analysisSamples[0], args.species, CNVdir, folderDict['LowessBinCounts'], CNplotDir, ChromPlotDir, ploidyDict[analysisSamples[0]], genderDict[analysisSamples[0]]) # summaryStats = [summaryStats] argList = [(x, args.species, CNVdir, folderDict['LowessBinCounts'], CNplotDir, ChromPlotDir, ploidyDict[x], genderDict[x]) for x in analysisSamples] summaryStats = common.daemon(analyzefiles.analyzeOne, argList, 'create summary files') cellStatsFile = summaryDir + 'CellStats.txt' chromAmpFile = summaryDir + 'ChromosomeAmplifiedPercent.txt' chromDelFile = summaryDir + 'ChromosomeDeletedPercent.txt' #write summary statistics files# with open(cellStatsFile, 'w') as CELL, open(chromAmpFile, 'w') as AMP, open(chromDelFile, 'w') as DEL: CELL.write('Sample\tDeletionNumber\tAmplificationNumber\tTotalCNVnumber\tDeletedMB\tAmplifiedMB\tNetDNAalterdMB\n') chromHeader = 'Sample\t' + '\t'.join(summaryStats[0]['chroms']) + '\n' AMP.write(chromHeader) DEL.write(chromHeader) for i,j in enumerate(analysisSamples): CELL.write(str(j + '\t')) cellOut = [summaryStats[i]['cellStats']['delCount'], summaryStats[i]['cellStats']['ampCount'], summaryStats[i]['cellStats']['delCount'] + summaryStats[i]['cellStats']['ampCount'], np.round(summaryStats[i]['cellStats']['delMB'], 3), np.round(summaryStats[i]['cellStats']['ampMB'], 3), np.round(summaryStats[i]['cellStats']['ampMB'] - summaryStats[i]['cellStats']['delMB'], 3)] cellOut = '\t'.join(map(str, cellOut)) + '\n' CELL.write(cellOut) AMP.write(str(j + '\t')) ampOut = [np.round(summaryStats[i]['chromAmp'][x], 3) for x in summaryStats[0]['chroms']] ampOut = '\t'.join(map(str, ampOut)) + '\n' AMP.write(ampOut) DEL.write(str(j + '\t')) delOut = [np.round(summaryStats[i]['chromDel'][x], 3) for x in summaryStats[0]['chroms']] delOut = '\t'.join(map(str, delOut)) + '\n' DEL.write(delOut) print('\nCNV analysis complete\n\n\n')
2.421875
2
mbio/EM/mrc.py
wzmao/mbio
2
12287
# -*- coding: utf-8 -*- """This module contains the MRC file class. """ __author__ = '<NAME>' __all__ = ['MRC'] class MRCHeader(): """A header class for mrc file.""" def __init__(self, filename=None, **kwargs): """Provide the filename to parse or set it later.""" self.nx = self.ny = self.nz = None self.mode = None self.nxstart = self.nystart = self.nzstart = None self.mx = self.my = self.mz = None self.cella = [None] * 3 self.cellb = [None] * 3 self.mapc = None self.mapr = None self.maps = None self.dmin = self.dmax = self.dmean = None self.ispg = None self.nsymbt = None self.extra = None self.origin = [None] * 3 self.map = None self.machst = None self.rms = None self.nlabels = None self.label = [None] * 10 self.symdata = None self.xstart = self.ystart = self.zstart = None if filename: from os.path import exists, isfile if exists(filename) and isfile(filename): from .Cmrc import readHeader compress = 1 if filename.lower().endswith('.gz') else 0 temp = readHeader( filename=filename, header=self, compress=compress) if isinstance(temp, tuple): from ..IO.output import printError if temp[0] == None: printError(temp[1]) else: printError("Couldn't parse the Error information.") return None else: from numpy import array, argsort self = temp for i in xrange(10): self.label[i] = self.label[i][:80] if self.label[i].find('\0') != -1: self.label[i] = self.label[i][ :self.label[i].find("\0")] elif self.label[i] == ' ' * 80: self.label[i] = '' self.label[i] = self.label[i].rstrip() if self.symdata: self.symdata = self.symdata[:80] if self.symdata.find('\0') != -1: self.symdata = self.symdata[ :self.symdata.find('\0')] if self.extra: self.extra = self.extra[:80] if self.extra.find('\0') != -1: self.extra = self.extra[:self.extra.find('\0')] if self.origin == [0, 0, 0]: self.xstart, self.ystart, self.zstart = array( [self.nxstart * self.cella[0] / self.mx, self.nystart * self.cella[1] / self.my, self.nzstart * self.cella[2] / self.mz])[argsort([self.mapc, self.mapr, self.maps])] self.origin = list(array([self.xstart, self.ystart, self.zstart])[ [self.mapc - 1, self.mapr - 1, self.maps - 1]]) self.nxstart = self.nystart = self.nzstart = 0 else: self.nxstart = self.nystart = self.nzstart = 0 self.xstart, self.ystart, self.zstart = array( self.origin)[argsort([self.mapc, self.mapr, self.maps])] else: from ..IO.output import printError printError("The file doesn't exists or is not a file.") def parseHeader(self, filename=None, **kwargs): """Parse the MRC header information from the given file.""" if filename: from os.path import exists, isfile if exists(filename) and isfile(filename): from .Cmrc import readHeader compress = 1 if filename.lower().endswith('.gz') else 0 temp = readHeader( filename=filename, header=self, compress=compress) if isinstance(temp, tuple): from ..IO.output import printError if temp[0] == None: printError(temp[1]) else: printError("Couldn't parse the Error information.") return None else: from numpy import array, argsort self = temp for i in xrange(10): self.label[i] = self.label[i][:80] if self.label[i].find('\0') != -1: self.label[i] = self.label[i][ :self.label[i].find("\0")] elif self.label[i] == ' ' * 80: self.label[i] = '' self.label[i] = self.label[i].rstrip() if self.symdata: self.symdata = self.symdata[:80] if self.symdata.find('\0') != -1: self.symdata = self.symdata[ :self.symdata.find('\0')] if self.extra: self.extra = self.extra[:80] if self.extra.find('\0') != -1: self.extra = self.extra[:self.extra.find('\0')] if self.origin == [0, 0, 0]: self.xstart, self.ystart, self.zstart = array( [self.nxstart * self.cella[0] / self.mx, self.nystart * self.cella[1] / self.my, self.nzstart * self.cella[2] / self.mz])[argsort([self.mapc, self.mapr, self.maps])] self.origin = list(array([self.xstart, self.ystart, self.zstart])[ [self.mapc - 1, self.mapr - 1, self.maps - 1]]) self.nxstart = self.nystart = self.nzstart = 0 else: self.nxstart = self.nystart = self.nzstart = 0 self.xstart, self.ystart, self.zstart = array( self.origin)[argsort([self.mapc, self.mapr, self.maps])] else: from ..IO.output import printError printError("The file doesn't exists or is not a file.") else: from ..IO.output import printError printError("The filename must be provided.") def printInformation(self, **kwargs): """Print the information from the header.""" from ..IO.output import printInfo as p p("Num of columns, rows and sections: {0} {1} {2}".format( self.nx, self.ny, self.nz)) p("Mode: {0}".format(self.mode)) p("Num of First column, row, section: {0} {1} {2}".format( self.nxstart, self.nystart, self.nzstart)) p("Num of intervals along x, y, z: {0} {1} {2}".format( self.mx, self.my, self.mz)) p("Cell dimensions in angstroms: {0:.2f} {1:.2f} {2:.2f}".format( self.cella[0], self.cella[1], self.cella[2])) p("Cell angles in degrees: {0:.2f} {1:.2f} {2:.2f}".format( self.cellb[0], self.cellb[1], self.cellb[2])) p("Axis for cols, rows, sections: {0} {1} {2}".format( self.mapc, self.mapr, self.maps)) p("Min, max, mean density value: {0:.6f} {1:.6f} {2:.6f}".format( self.dmin, self.dmax, self.dmean)) p("Space group number: {0}".format(self.ispg)) p("Origin in X,Y,Z: {0:.4f} {1:.4f} {2:.4f}".format( self.origin[0], self.origin[1], self.origin[2])) p("Machine stamp: {0}".format(self.machst)) p("rms deviationfrom mean density: {0}".format(self.rms)) p("Num of labels being used: {0}".format(self.nlabels)) if self.nlabels != 0: p("Labels:") for i in self.label: if i != "": p("\t{0}".format(i)) p("Num of bytes for symmetry data: {0}".format(self.nsymbt)) if self.nsymbt != 0: p("\t{0}".format(self.symdata)) def getMatrixShape(self, **kwargs): """Get the data shape from the header information. Caution: it could be different with the data array.""" if (isinstance(self.nx, int) and isinstance(self.ny, int) and isinstance(self.nz, int)): return (self.nx, self.ny, self.nz) else: from ..IO.output import printError printError("There is no header information here.") return None def __repr__(self): return "MRCHeader" def setValue(self, label, value=None, **kwargs): """Set the value for a label.""" setattr(self, label, value) def getValue(self, label, default=None, **kwargs): """Get the value for a label.""" getattr(self, label, default) class MRC(): """This is a class to read and write MRC file. The data will always been store as x,y,z oreder.""" def __init__(self, filename=None, **kwargs): """Parse data from the given file.""" self.header = MRCHeader() self.data = None if filename: self.parseData(filename=filename, **kwargs) def __getattr__(self, name, **kwargs): if name in ['data', 'header']: return getattr(self, name) else: try: return getattr(self.header, name) except: return None def __setattr__(self, name, value, **kwargs): if name == 'data': self.__dict__[name] = value elif name == 'header': self.__dict__[name] = value else: if name in self.header.__dict__.keys(): setattr(self.header, name, value) elif name in self.__dict__.keys(): setattr(self, name, value) else: pass def __repr__(self): return "MRC" def __str__(self): return "MRC" def __dir__(self, **kwargs): return self.__dict__.keys() + self.header.__dict__.keys() def parseHeader(self, filename=None, **kwargs): """Parse the header only from a given file. If the data will be parsed in the future, the header will be overwrited by the new data file's header.""" if filename: from os.path import exists, isfile if exists(filename) and isfile(filename): self.header = MRCHeader(filename=filename) else: from ..IO.output import printError printError("The file doesn't exists or is not a file.") else: from ..IO.output import printError printError("The filename must be provided.") def parseData(self, filename=None, **kwargs): """Parse the data and header from a given file. If the header or data have already exists, all will be overwrited.""" if filename: from os.path import exists, isfile if exists(filename) and isfile(filename): from .Cmrc import readData from numpy import zeros, int8, int16, float32, uint8, uint16 from ..IO.output import printInfo, printError, printUpdateInfo if getattr(self, 'header', None): del self.header if kwargs.get('output', True): printUpdateInfo( "Parsing the Header from file {0}.".format(filename)) self.header = MRCHeader(filename=filename) if getattr(self, 'data', None): printInfo("Some data exists already, overwrite it.") del self.data if self.header.mode in [3, 4]: printError( "Sorry, we don't support the complex format yet.") del self.data self.data = None return None else: if self.header.mode == 0: self.data = zeros( (self.header.nz, self.header.ny, self.header.nx), dtype=int8) elif self.header.mode == 1: self.data = zeros( (self.header.nz, self.header.ny, self.header.nx), dtype=int16) elif self.header.mode == 2: self.data = zeros( (self.header.nz, self.header.ny, self.header.nx), dtype=float32) elif self.header.mode == 5: self.data = zeros( (self.header.nz, self.header.ny, self.header.nx), dtype=uint8) elif self.header.mode == 6: self.data = zeros( (self.header.nz, self.header.ny, self.header.nx), dtype=uint16) else: printError( "Couldn't understand the mode {0}".format(self.header.mode)) del self.data self.data = None return None if kwargs.get('output', True): printUpdateInfo( "Parsing the Data from file {0}.".format(filename)) self.data = self.data - 1 compress = 1 if filename.lower().endswith('.gz') else 0 temp = readData( filename=filename, nsymbt=self.header.nsymbt, datamode=self.header.mode, data=self.data, size=self.header.nz * self.header.ny * self.header.nx, compress=compress) if isinstance(temp, tuple): del self.data self.data = None if temp[0] == None: printError(temp[1]) else: printError("Couldn't parse the Error information.") return None else: from numpy import transpose, argsort if set([self.header.mapc, self.header.mapr, self.header.maps]) != set([1, 2, 3]): printError( "The MRC header contains no clear axis.(mapc, mapr and maps must cotain all 1,2,3.)") printError("Keep the data as it.") self.data = temp return None else: temporder = [ self.header.maps, self.header.mapr, self.header.mapc] self.data = transpose(temp, argsort(temporder)) del temp if self.header.transend: self.data.byteswap(True) else: printError("The file doesn't exists or is not a file.") return None else: printError("The filename must be provided.") return None def writeData(self, filename, skipupdate=False, force=False, **kwargs): """Write the MRC file into file. The header and data format will automaticly update. You could skip the update using `skipupdate` option. You could force it to overwrite files with `force` option.""" from ..IO.output import printInfo, printError from os.path import exists, isfile from numpy import transpose, array if filename: if exists(filename): if not isfile(filename): printError("The path is not a file.") return None else: if not force: back = raw_input( "* File {0} exists, do you want to overwrite it?(y/n)".format(filename)) while back.strip().lower() not in ['y', 'n']: back = raw_input( "* File {0} exists, do you want to overwrite it?(y/n)".format(filename)) if back.strip().lower() == 'n': printInfo("File not write.") return None else: printError("The filename must be provided.") return None if isinstance(self.data, type(None)): printError("No data to write.") return None find = False for i in xrange(10): if self.label[i].startswith("Written by mbio"): find = True from time import ctime from .. import __version__ self.label[i] = "Written by mbio {0} {1}".format( __version__, ctime()) self.label = self.label[:i] + \ self.label[i + 1:] + [self.label[i]] self.label = [j for j in self.label if j != ""] self.label = self.label + [""] * (10 - len(self.label)) break if not find: if self.nlabels != 10: from time import ctime from .. import __version__ self.label[self.nlabels] = "Written by mbio {0} {1}".format( __version__, ctime()) self.nlabels += 1 if not skipupdate: self.update() from .Cmrc import writeData if set([self.header.mapc, self.header.mapr, self.header.maps]) != set([1, 2, 3]): printError( "The MRC header contains no clear axis.(mapc, mapr and maps must cotain all 1,2,3.)") printError("Change it automaticly.") self.header.mapc, self.header.mapr, self.header.maps = 1, 2, 3 self.header.nxstart, self.header.nystart, self.header.nzstart = array( [self.header.nxstart, self.header.nystart, self.header.nzstart])[[self.header.mapc - 1, self.header.mapr - 1, self.header.maps - 1]] if kwargs.get('output', True): printInfo("Writing MRC to {0}".format(filename)) compress = 1 if filename.lower().endswith('.gz') else 0 temp = writeData(header=self.header, data=transpose( self.data, (self.header.maps - 1, self.header.mapr - 1, self.header.mapc - 1)), filename=filename, compress=compress) if isinstance(temp, tuple): if temp[0] == None: print temp printError(temp[1]) else: printError("Couldn't parse the Error information.") return None elif temp == 0: return None else: printError("Couldn't parse the Error information.") def update(self, **kwargs): """Update the MRC header information from the data array. Update the MRC data format based on the `header.mode` Include: nx, ny, nz, dmin, dmax, dmean, rms, nsymbt, nlabels and sort label nxstart, nystart, nzstart, xstart, ystart, zstart, map. Correct mapc, mapr and maps automaticly.""" from numpy import array, int8, int16, float32, uint8, uint16, argsort from ..IO.output import printError from platform import architecture if set([self.header.mapc, self.header.mapr, self.header.maps]) != set([1, 2, 3]): printError( "The MRC header contains no clear axis.(mapc, mapr and maps must cotain all 1,2,3.)") printError("Change it automaticly.") self.header.mapc, self.header.mapr, self.header.maps = 1, 2, 3 self.header.nx, self.header.ny, self.header.nz = array( self.data.shape)[[self.header.mapc - 1, self.header.mapr - 1, self.header.maps - 1]] if self.header.origin != [0., 0., 0.]: self.header.nxstart = self.header.nystart = self.header.nzstart = 0 self.header.xstart, self.header.ystart, self.header.zstart = array( self.header.origin)[argsort([self.header.mapc, self.header.mapr, self.header.maps])] elif self.header.nxstart != 0 or self.header.nystart != 0 or self.header.nzstart != 0: self.header.xstart, self.header.ystart, self.header.zstart = array( [self.header.nxstart * self.header.cella[0] / self.header.mx, self.header.nystart * self.header.cella[1] / self.header.my, self.header.nzstart * self.header.cella[2] / self.header.mz])[argsort([self.header.mapc, self.header.mapr, self.header.maps])] # self.header.nxstart, self.header.nystart, self.header.nzstart = array( # [self.header.nxstart, self.header.nystart, self.header.nzstart])[[self.header.mapc - 1, self.header.mapr - 1, self.header.maps - 1]] else: self.header.xstart, self.header.ystart, self.header.zstart = 0., 0., 0. self.header.dmin = self.data.min() self.header.dmax = self.data.max() self.header.dmean = self.data.mean() self.header.rms = (((self.data - self.data.mean()) ** 2).mean()) ** .5 # if architecture()[0].find('32')!=-1: # temp1=0. # temp2=0. # temp3=0. # for i in self.data: # for j in i: # for k in j: # temp1+=k**2 # temp2+=k # temp3+=1 # self.header.rms = (temp1/temp3-(temp2/temp3)**2)**.5 # else: # self.header.rms = (((self.data - self.data.mean())**2).mean())**.5 if self.header.symdata: self.header.nsymbt = 80 self.header.symdata = self.header.symdata[:80] else: self.header.nsymbt = 0 self.header.symdata = None self.header.nlabels = sum( [1 if i != "" else 0 for i in self.header.label]) self.header.label = [i[:80] for i in self.header.label if i != ""] self.header.label = self.header.label + \ [""] * (10 - len(self.header.label)) self.header.map = "MAP " if {0: int8, 1: int16, 2: float32, 5: uint8, 6: uint16}[self.header.mode] != self.data.dtype: self.data = array(self.data, dtype={0: int8, 1: int16, 2: float32, 5: uint8, 6: uint16}[self.header.mode]) def truncMatrix(self, index=[None, None, None, None, None, None], **kwargs): """Trunc the matrix by index. Related values will change accordingly. You need provide the start and end index(will be included) of x,y and z. Exapmle: MRC.truncMatrix([xstart, xend, ystart, yend, zstart, zend]) You could use *None* to indicate start from begin or to the end. """ from ..IO.output import printError, printInfo from numpy import array if len(index) != 6: printError("Must provide 6 indeces.") return None if index == [None] * 6: printInfo("Nothing changed.") return None xstart, xend, ystart, yend, zstart, zend = index if xstart == None: xstart = 0 if ystart == None: ystart = 0 if zstart == None: zstart = 0 if xend == None: xend = self.data.shape[0] + 1 else: xend += 1 if yend == None: yend = self.data.shape[1] + 1 else: yend += 1 if zend == None: zend = self.data.shape[2] + 1 else: zend += 1 if not 0 <= xstart <= self.data.shape[0]: printError("xstart is not in the range of x.") return None if not 0 <= xend <= self.data.shape[0]: printError("xend is not in the range of x.") return None if not xstart < xend: printError("xstart must less than xend.") return None if not 0 <= ystart <= self.data.shape[1]: printError("ystart is not in the range of y.") return None if not 0 <= yend <= self.data.shape[1]: printError("yend is not in the range of y.") return None if not ystart < yend: printError("ystart must less than yend.") return None if not 0 <= zstart <= self.data.shape[2]: printError("zstart is not in the range of z.") return None if not 0 <= zend <= self.data.shape[2]: printError("zend is not in the range of z.") return None if not zstart < zend: printError("zstart must less than zend.") return None self.data = self.data[xstart:xend, ystart:yend, zstart:zend] xstep, ystep, zstep = array( self.header.cella) * 1.0 / array([self.header.mx, self.header.my, self.header.mz]) self.header.xstart += xstart * xstep self.header.ystart += ystart * ystep self.header.zstart += zstart * zstep if self.header.origin == [0, 0, 0]: self.header.nxstart += xstart self.header.nystart += ystart self.header.nzstart += zstart else: self.header.nxstart = 0 self.header.nystart = 0 self.header.nzstart = 0 self.header.origin = list(array([self.header.xstart, self.header.ystart, self.header.zstart])[ [self.header.mapc - 1, self.header.mapr - 1, self.header.maps - 1]]) self.header.nx, self.header.ny, self.header.nz = array( self.data.shape)[[self.header.mapc - 1, self.header.mapr - 1, self.header.maps - 1]] # def getMatrixShape(self, **kwargs): # """Get the data shape from the header information. # Caution: it could be different with the data array.""" # if (isinstance(self.header.nx, int) and # isinstance(self.header.ny, int) and isinstance(self.header.nz, int)): # return (self.header.nx, self.header.ny, self.header.nz) # else: # from ..IO.output import printError # printError("There is no header information here.") # return None def getGridCoords(self, **kwargs): """Return the x, y and z coordinate for the whole grid.""" from numpy import array, arange, argsort xstep, ystep, zstep = array( self.header.cella) * 1.0 / array([self.header.mx, self.header.my, self.header.mz]) if self.header.origin == [0, 0, 0]: xcoor = (self.header.nxstart + arange(self.header.nx)) * xstep ycoor = (self.header.nystart + arange(self.header.ny)) * ystep zcoor = (self.header.nzstart + arange(self.header.nz)) * zstep coor = array([xcoor, ycoor, zcoor])[ argsort([self.header.mapc, self.header.mapr, self.header.maps])] return list(coor) else: xcoor = arange(self.header.nx) * xstep + self.header.origin[0] ycoor = arange(self.header.ny) * ystep + self.header.origin[1] zcoor = arange(self.header.nz) * zstep + self.header.origin[2] coor = array([xcoor, ycoor, zcoor])[ argsort([self.header.mapc, self.header.mapr, self.header.maps])] return list(coor) def getGridSteps(self, **kwargs): """Return the x, y and z coordinate steps.""" from numpy import array, arange, argsort step = array(array(self.header.cella) * 1.0 / array([self.header.mx, self.header.my, self.header.mz])) step = step[ argsort([self.header.mapc, self.header.mapr, self.header.maps])] return step def getArray(self, **kwargs): """Get the data from the MRC class""" return self.data def setMode(self, mode=2, **kwargs): """Set the data format for the data. The data will be change the format accordingly. Data type : 0 image : signed 8-bit bytes range -128 to 127 1 image : 16-bit halfwords 2 image : 32-bit reals 3 transform : complex 16-bit integers (not support now) 4 transform : complex 32-bit reals (not support now) 5 image : unsigned 8-bit range 0 to 255 6 image : unsigned 16-bit range 0 to 65535""" from numpy import array, int8, int16, float32, uint8, uint16 from ..IO.output import printError if mode not in xrange(7): printError("Mode must be 0,1,2,3,4,5,6.") elif mode in [3, 4]: printError("Sorry, the complex format is not supported now.") self.header.mode = mode if {0: int8, 1: int16, 2: float32, 5: uint8, 6: uint16}[self.header.mode] != self.data.dtype: self.data = array(self.data, dtype={0: int8, 1: int16, 2: float32, 5: uint8, 6: uint16}[self.header.mode]) def printInformation(self, **kwargs): """Print the information from the header.""" self.header.printInformation() def __del__(self): del self.data del self
2.5625
3
links/management/commands/seed_data.py
darth-dodo/hackernews-backend
3
12288
from random import randint from django.core.management.base import BaseCommand from django.db import transaction from faker import Faker from hn_users.models import HNUser, User from links.models import Link, Vote faker = Faker() class Command(BaseCommand): help = "Generate Links from a small user subset" def add_arguments(self, parser): parser.add_argument("no_of_users", type=int, nargs="?", default=4) parser.add_argument("no_of_links", type=int, nargs="?", default=20) @transaction.atomic() def handle(self, *args, **options): no_of_users = options.get("no_of_users") no_of_links = options.get("no_of_links") for user in range(no_of_users): user = self._create_user() hn_user = self._create_hn_user(django_user=user) for link in range(no_of_links): generated_link = self._create_link() generated_link.refresh_from_db() self.stdout.write( self.style.SUCCESS( f"Link {generated_link.url} generated with {generated_link.link_votes.count()} votes" ) ) def _create_link(self): all_users_count = HNUser.objects.count() number_of_users_who_voted = randint(1, all_users_count) # nosec randomly_ordered_users = HNUser.objects.all().order_by("?") # nosec random_users = randomly_ordered_users[:number_of_users_who_voted] hn_user = HNUser.objects.all().order_by("?").first() link = Link() link.posted_by = hn_user link.url = faker.url() link.description = faker.text() link.save() for random_user in random_users: vote = Vote() vote.link = link vote.user = random_user vote.save() return link def _create_user(self): simple_profile = faker.simple_profile() user = User() user.email = simple_profile["mail"] user.username = simple_profile["username"] user.first_name = simple_profile["name"].split(" ")[0] user.last_name = simple_profile["name"].split(" ")[-1] user.set_password(faker.password()) user.save() return user def _create_hn_user(self, django_user): hn_user = HNUser() hn_user.bio = faker.text() hn_user.django_user = django_user hn_user.save()
2.359375
2
locations/spiders/cenex.py
mfjackson/alltheplaces
0
12289
# -*- coding: utf-8 -*- import scrapy import json from locations.items import GeojsonPointItem class CenexSpider(scrapy.Spider): name = "cenex" item_attributes = {"brand": "Cenex", "brand_wikidata": "Q5011381"} allowed_domains = ["www.cenex.com"] def start_requests(self): yield scrapy.http.JsonRequest( "https://www.cenex.com/Common/Services/InteractiveMap.svc/GetLocations", method="POST", data={ "SearchRequest": { "Metadata": {"MapId": "", "Categories": []}, "Query": { "SearchLat": 0, "SearchLong": 0, "LocationTypes": [1, 16, 15], "Amenities": [], "Organizations": ["28e93e82-edfa-418e-90aa-7ded057a0c68"], "NELat": 90, "NELong": 180, "SWLat": -90, "SWLong": -180, }, }, "MapItemId": "40381d43-1c05-43e0-8477-78737b9974df", "AllOrganizationIds": [ "b4ed9d2c-cc3b-4ce0-b642-79d75eac11fa", "cb27078e-9b6a-4f4d-ac81-eb1d163a5ff6", "68be9e56-ff49-4724-baf0-90fc833fb459", "28e93e82-edfa-418e-90aa-7ded057a0c68", ], "ServiceUrl": "https://locatorservice.chsinc.ds/api/search", }, ) def parse(self, response): result = json.loads(response.body_as_unicode()) for store in result["SearchResponse"]["Locations"]: amenities = "|".join([a["Name"] for a in store["Amenities"]]) yield GeojsonPointItem( lon=store["Long"], lat=store["Lat"], ref=store["LocationId"], name=store["Name"], addr_full=" ".join([store["Address1"], store["Address2"]]).strip(), city=store["City"], state=store["State"], postcode=store["Zip"], country="US", phone=store["Phone"], website=store["WebsiteUrl"], opening_hours="24/7" if "24-Hour" in amenities else None, extras={ "amenity:fuel": True, "atm": "ATM" in amenities, "car_wash": "Car Wash" in amenities, "fuel:biodiesel": "Biodiesel" in amenities or None, "fuel:diesel": "Diesel" in amenities or None, "fuel:e85": "Flex Fuels" in amenities or None, "fuel:HGV_diesel": "Truck Stop" in amenities or None, "fuel:propane": "Propane" in amenities or None, "hgv": "Truck Stop" in amenities or None, "shop": "convenience" if "Convenience Store" in amenities else None, }, )
2.53125
3
core/dbt/flags.py
tskleonard/dbt-core
0
12290
import os import multiprocessing if os.name != "nt": # https://bugs.python.org/issue41567 import multiprocessing.popen_spawn_posix # type: ignore from pathlib import Path from typing import Optional # PROFILES_DIR must be set before the other flags # It also gets set in main.py and in set_from_args because the rpc server # doesn't go through exactly the same main arg processing. DEFAULT_PROFILES_DIR = os.path.join(os.path.expanduser("~"), ".dbt") PROFILES_DIR = os.path.expanduser(os.getenv("DBT_PROFILES_DIR", DEFAULT_PROFILES_DIR)) STRICT_MODE = False # Only here for backwards compatibility FULL_REFRESH = False # subcommand STORE_FAILURES = False # subcommand # Global CLI commands USE_EXPERIMENTAL_PARSER = None STATIC_PARSER = None WARN_ERROR = None WRITE_JSON = None PARTIAL_PARSE = None USE_COLORS = None DEBUG = None LOG_FORMAT = None VERSION_CHECK = None FAIL_FAST = None SEND_ANONYMOUS_USAGE_STATS = None PRINTER_WIDTH = 80 WHICH = None INDIRECT_SELECTION = None LOG_CACHE_EVENTS = None EVENT_BUFFER_SIZE = 100000 QUIET = None # Global CLI defaults. These flags are set from three places: # CLI args, environment variables, and user_config (profiles.yml). # Environment variables use the pattern 'DBT_{flag name}', like DBT_PROFILES_DIR flag_defaults = { "USE_EXPERIMENTAL_PARSER": False, "STATIC_PARSER": True, "WARN_ERROR": False, "WRITE_JSON": True, "PARTIAL_PARSE": True, "USE_COLORS": True, "PROFILES_DIR": DEFAULT_PROFILES_DIR, "DEBUG": False, "LOG_FORMAT": None, "VERSION_CHECK": True, "FAIL_FAST": False, "SEND_ANONYMOUS_USAGE_STATS": True, "PRINTER_WIDTH": 80, "INDIRECT_SELECTION": "eager", "LOG_CACHE_EVENTS": False, "EVENT_BUFFER_SIZE": 100000, "QUIET": False, } def env_set_truthy(key: str) -> Optional[str]: """Return the value if it was set to a "truthy" string value, or None otherwise. """ value = os.getenv(key) if not value or value.lower() in ("0", "false", "f"): return None return value def env_set_bool(env_value): if env_value in ("1", "t", "true", "y", "yes"): return True return False def env_set_path(key: str) -> Optional[Path]: value = os.getenv(key) if value is None: return value else: return Path(value) MACRO_DEBUGGING = env_set_truthy("DBT_MACRO_DEBUGGING") DEFER_MODE = env_set_truthy("DBT_DEFER_TO_STATE") ARTIFACT_STATE_PATH = env_set_path("DBT_ARTIFACT_STATE_PATH") ENABLE_LEGACY_LOGGER = env_set_truthy("DBT_ENABLE_LEGACY_LOGGER") def _get_context(): # TODO: change this back to use fork() on linux when we have made that safe return multiprocessing.get_context("spawn") # This is not a flag, it's a place to store the lock MP_CONTEXT = _get_context() def set_from_args(args, user_config): # N.B. Multiple `globals` are purely for line length. # Because `global` is a parser directive (as opposed to a language construct) # black insists in putting them all on one line global STRICT_MODE, FULL_REFRESH, WARN_ERROR, USE_EXPERIMENTAL_PARSER, STATIC_PARSER global WRITE_JSON, PARTIAL_PARSE, USE_COLORS, STORE_FAILURES, PROFILES_DIR, DEBUG, LOG_FORMAT global INDIRECT_SELECTION, VERSION_CHECK, FAIL_FAST, SEND_ANONYMOUS_USAGE_STATS global PRINTER_WIDTH, WHICH, LOG_CACHE_EVENTS, EVENT_BUFFER_SIZE, QUIET STRICT_MODE = False # backwards compatibility # cli args without user_config or env var option FULL_REFRESH = getattr(args, "full_refresh", FULL_REFRESH) STORE_FAILURES = getattr(args, "store_failures", STORE_FAILURES) WHICH = getattr(args, "which", WHICH) # global cli flags with env var and user_config alternatives USE_EXPERIMENTAL_PARSER = get_flag_value("USE_EXPERIMENTAL_PARSER", args, user_config) STATIC_PARSER = get_flag_value("STATIC_PARSER", args, user_config) WARN_ERROR = get_flag_value("WARN_ERROR", args, user_config) WRITE_JSON = get_flag_value("WRITE_JSON", args, user_config) PARTIAL_PARSE = get_flag_value("PARTIAL_PARSE", args, user_config) USE_COLORS = get_flag_value("USE_COLORS", args, user_config) PROFILES_DIR = get_flag_value("PROFILES_DIR", args, user_config) DEBUG = get_flag_value("DEBUG", args, user_config) LOG_FORMAT = get_flag_value("LOG_FORMAT", args, user_config) VERSION_CHECK = get_flag_value("VERSION_CHECK", args, user_config) FAIL_FAST = get_flag_value("FAIL_FAST", args, user_config) SEND_ANONYMOUS_USAGE_STATS = get_flag_value("SEND_ANONYMOUS_USAGE_STATS", args, user_config) PRINTER_WIDTH = get_flag_value("PRINTER_WIDTH", args, user_config) INDIRECT_SELECTION = get_flag_value("INDIRECT_SELECTION", args, user_config) LOG_CACHE_EVENTS = get_flag_value("LOG_CACHE_EVENTS", args, user_config) EVENT_BUFFER_SIZE = get_flag_value("EVENT_BUFFER_SIZE", args, user_config) QUIET = get_flag_value("QUIET", args, user_config) def get_flag_value(flag, args, user_config): lc_flag = flag.lower() flag_value = getattr(args, lc_flag, None) if flag_value is None: # Environment variables use pattern 'DBT_{flag name}' env_flag = f"DBT_{flag}" env_value = os.getenv(env_flag) if env_value is not None and env_value != "": env_value = env_value.lower() # non Boolean values if flag in [ "LOG_FORMAT", "PRINTER_WIDTH", "PROFILES_DIR", "INDIRECT_SELECTION", "EVENT_BUFFER_SIZE", ]: flag_value = env_value else: flag_value = env_set_bool(env_value) elif user_config is not None and getattr(user_config, lc_flag, None) is not None: flag_value = getattr(user_config, lc_flag) else: flag_value = flag_defaults[flag] if flag in ["PRINTER_WIDTH", "EVENT_BUFFER_SIZE"]: # must be ints flag_value = int(flag_value) if flag == "PROFILES_DIR": flag_value = os.path.abspath(flag_value) return flag_value def get_flag_dict(): return { "use_experimental_parser": USE_EXPERIMENTAL_PARSER, "static_parser": STATIC_PARSER, "warn_error": WARN_ERROR, "write_json": WRITE_JSON, "partial_parse": PARTIAL_PARSE, "use_colors": USE_COLORS, "profiles_dir": PROFILES_DIR, "debug": DEBUG, "log_format": LOG_FORMAT, "version_check": VERSION_CHECK, "fail_fast": FAIL_FAST, "send_anonymous_usage_stats": SEND_ANONYMOUS_USAGE_STATS, "printer_width": PRINTER_WIDTH, "indirect_selection": INDIRECT_SELECTION, "log_cache_events": LOG_CACHE_EVENTS, "event_buffer_size": EVENT_BUFFER_SIZE, "quiet": QUIET, }
1.90625
2
DataShine/DataShine.py
monk-after-90s/DataShine
0
12291
<gh_stars>0 import asyncio import functools from copy import deepcopy from ensureTaskCanceled import ensureTaskCanceled def _no_closed(method): ''' Can not be run when closed. :return: ''' @functools.wraps(method) def wrapper(*args, **kwargs): self = args[0] if self._closed: raise RuntimeError(f'{repr(self)} is already closed.') return method(*args, **kwargs) return wrapper class DataShine: def __init__(self): self._unlocked = asyncio.Event() self._unlocked.set() self._period_change_event = asyncio.Event() self._data_container = None self._q = asyncio.Queue() self._q_hanler_task = asyncio.create_task(self._q_hanler()) self._closed = False async def close(self): ''' Close the DataShine instance. :return: ''' await ensureTaskCanceled(self._q_hanler_task) self._closed = True async def _q_hanler(self): while True: new_data = await self._q.get() self._q.task_done() self._data_container = new_data self._period_change_event.clear() self._period_change_event.set() self._period_change_event.clear() await asyncio.sleep(0) @_no_closed async def push_data(self, data): ''' Set the lamp to carry a data to be taken, and shine the data to notify monitors new data coming. :param data: :return: ''' self._q.put_nowait(data) @property def data(self): ''' Query the data last pushed. :return: ''' return self._data_container @_no_closed async def wait_data_shine(self): ''' Wait the shined data. If you wait too later, you will lose the chance to get the data. If you can not wait the data in time every time but have to handle all the data, you can cache data in a instance of asyncio.Queue. :return: ''' await self._period_change_event.wait() return deepcopy(self._data_container) if __name__ == '__main__': async def test(): pass asyncio.create_task(test())
2.46875
2
src/python/deepseq2.py
yotamfr/prot2vec
8
12292
import os # os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # os.environ["CUDA_VISIBLE_DEVICES"] = "1" from src.python.baselines import * from pymongo import MongoClient from tqdm import tqdm import tensorflow as tf ### Keras from keras import optimizers from keras.models import Model from keras.layers import Input, Dense, Embedding, Activation from keras.layers import Conv2D, Conv1D from keras.layers import Dropout, BatchNormalization from keras.layers import MaxPooling1D, MaxPooling2D, GlobalMaxPooling1D, GlobalAveragePooling1D from keras.layers import Concatenate, Flatten, Reshape from keras.callbacks import Callback, EarlyStopping, ModelCheckpoint, LambdaCallback, LearningRateScheduler # from keras.losses import hinge, binary_crossentropy from keras import backend as K from sklearn.metrics import log_loss import math import argparse sess = tf.Session() K.set_session(sess) LR = 0.001 BATCH_SIZE = 32 LONG_EXPOSURE = True t0 = datetime(2014, 1, 1, 0, 0) t1 = datetime(2014, 9, 1, 0, 0) MAX_LENGTH = 2000 MIN_LENGTH = 30 def get_classes(db, onto, start=t0, end=t1): q1 = {'DB': 'UniProtKB', 'Evidence': {'$in': exp_codes}, 'Date': {"$lte": start}, 'Aspect': ASPECT} q2 = {'DB': 'UniProtKB', 'Evidence': {'$in': exp_codes}, 'Date': {"$gt": start, "$lte": end}, 'Aspect': ASPECT} def helper(q): seq2go, _ = GoAnnotationCollectionLoader( db.goa_uniprot.find(q), db.goa_uniprot.count(q), ASPECT).load() for i, (k, v) in enumerate(seq2go.items()): sys.stdout.write("\r{0:.0f}%".format(100.0 * i / len(seq2go))) seq2go[k] = onto.propagate(v) return reduce(lambda x, y: set(x) | set(y), seq2go.values(), set()) return onto.sort(helper(q1) | helper(q2)) def get_training_and_validation_streams(db, limit=None): q_train = {'DB': 'UniProtKB', 'Evidence': {'$in': exp_codes}, 'Date': {"$lte": t0}, 'Aspect': ASPECT} seq2go_trn, _ = GoAnnotationCollectionLoader(db.goa_uniprot.find(q_train), db.goa_uniprot.count(q_train), ASPECT).load() query = {"_id": {"$in": unique(list(seq2go_trn.keys())).tolist()}} count = limit if limit else db.uniprot.count(query) source = db.uniprot.find(query).batch_size(10) if limit: source = source.limit(limit) stream_trn = DataStream(source, count, seq2go_trn) q_valid = {'DB': 'UniProtKB', 'Evidence': {'$in': exp_codes}, 'Date': {"$gt": t0, "$lte": t1}, 'Aspect': ASPECT} seq2go_tst, _ = GoAnnotationCollectionLoader(db.goa_uniprot.find(q_valid), db.goa_uniprot.count(q_valid), ASPECT).load() query = {"_id": {"$in": unique(list(seq2go_tst.keys())).tolist()}} count = limit if limit else db.uniprot.count(query) source = db.uniprot.find(query).batch_size(10) if limit: source = source.limit(limit) stream_tst = DataStream(source, count, seq2go_tst) return stream_trn, stream_tst class DataStream(object): def __init__(self, source, count, seq2go): self._count = count self._source = source self._seq2go = seq2go def __iter__(self): count = self._count source = self._source seq2go = self._seq2go for k, seq in UniprotCollectionLoader(source, count): if not MIN_LENGTH <= len(seq) <= MAX_LENGTH: continue x = [AA.aa2index[aa] for aa in seq] yield k, x, seq2go[k] def __len__(self): return self._count def step_decay(epoch): initial_lrate = LR drop = 0.5 epochs_drop = 1.0 lrate = max(0.0001, initial_lrate * math.pow(drop, math.floor((1 + epoch) / epochs_drop))) return lrate def OriginalIception(inpt, num_channels=64): # tower_0 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt) tower_1 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt) tower_1 = Conv1D(num_channels, 3, padding='same', activation='relu')(tower_1) tower_2 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt) tower_2 = Conv1D(num_channels, 5, padding='same', activation='relu')(tower_2) # tower_3 = MaxPooling1D(3, padding='same')(inpt) # tower_3 = Conv1D(num_channels, 1, padding='same')(tower_3) return Concatenate(axis=2)([tower_1, tower_2,]) def LargeInception(inpt, num_channels=64): tower_1 = Conv1D(num_channels, 6, padding='same', activation='relu')(inpt) tower_1 = BatchNormalization()(tower_1) tower_1 = Conv1D(num_channels, 6, padding='same', activation='relu')(tower_1) tower_2 = Conv1D(num_channels, 10, padding='same', activation='relu')(inpt) tower_2 = BatchNormalization()(tower_2) tower_2 = Conv1D(num_channels, 10, padding='same', activation='relu')(tower_2) return Concatenate(axis=2)([tower_1, tower_2]) def SmallInception(inpt, num_channels=150): tower_1 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt) tower_1 = Conv1D(num_channels, 5, padding='same', activation='relu')(tower_1) # tower_1 = BatchNormalization()(tower_1) tower_2 = Conv1D(num_channels, 1, padding='same', activation='relu')(inpt) tower_2 = Conv1D(num_channels, 15, padding='same', activation='relu')(tower_2) # tower_2 = BatchNormalization()(tower_2) return Concatenate(axis=2)([tower_1, tower_2]) def Classifier(inp1d, classes): out = Dense(len(classes))(inp1d) out = BatchNormalization()(out) out = Activation('sigmoid')(out) return out def MotifNet(classes, opt): inpt = Input(shape=(None,)) out = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inpt) out = Conv1D(250, 15, activation='relu', padding='valid')(out) out = Dropout(0.2)(out) out = Conv1D(100, 15, activation='relu', padding='valid')(out) out = SmallInception(out) out = Dropout(0.2)(out) out = SmallInception(out) out = Dropout(0.2)(out) out = Conv1D(250, 5, activation='relu', padding='valid')(out) out = Dropout(0.2)(out) out = Classifier(GlobalMaxPooling1D()(out), classes) model = Model(inputs=[inpt], outputs=[out]) model.compile(loss='binary_crossentropy', optimizer=opt) return model def Inception(inpt, tower1=6, tower2=10): tower_1 = Conv1D(64, 1, padding='same', activation='relu')(inpt) tower_1 = Conv1D(64, tower1, padding='same', activation='relu')(tower_1) tower_2 = Conv1D(64, 1, padding='same', activation='relu')(inpt) tower_2 = Conv1D(64, tower2, padding='same', activation='relu')(tower_2) # tower_3 = MaxPooling1D(3, strides=1, padding='same')(inpt) # tower_3 = Conv1D(64, 1, padding='same', activation='relu')(tower_3) return Concatenate(axis=2)([tower_1, tower_2]) def ProteinInception(classes, opt): inpt = Input(shape=(None,)) img = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inpt) feats = Inception(Inception(img)) out = Classifier(GlobalMaxPooling1D()(feats), classes) model = Model(inputs=[inpt], outputs=[out]) model.compile(loss='binary_crossentropy', optimizer=opt) return model def Features(inpt): feats = Embedding(input_dim=26, output_dim=23, embeddings_initializer='uniform')(inpt) feats = Conv1D(250, 15, activation='relu', padding='valid')(feats) feats = Dropout(0.3)(feats) feats = Conv1D(100, 15, activation='relu', padding='valid')(feats) feats = Dropout(0.3)(feats) feats = Conv1D(100, 15, activation='relu', padding='valid')(feats) feats = Dropout(0.3)(feats) feats = Conv1D(250, 15, activation='relu', padding='valid')(feats) feats = Dropout(0.3)(feats) feats = GlobalMaxPooling1D()(feats) return feats def DeeperSeq(classes, opt): inp = Input(shape=(None,)) out = Classifier(Features(inp), classes) model = Model(inputs=[inp], outputs=[out]) model.compile(loss='binary_crossentropy', optimizer=opt) return model def batch_generator(stream, onto, classes): s_cls = set(classes) data = dict() def labels2vec(lbl): y = np.zeros(len(classes)) for go in onto.propagate(lbl, include_root=False): if go not in s_cls: continue y[classes.index(go)] = 1 return y def pad_seq(seq, max_length=MAX_LENGTH): delta = max_length - len(seq) left = [PAD for _ in range(delta // 2)] right = [PAD for _ in range(delta - delta // 2)] seq = left + seq + right return np.asarray(seq) def prepare_batch(sequences, labels): b = max(map(len, sequences)) + 100 Y = np.asarray([labels2vec(lbl) for lbl in labels]) X = np.asarray([pad_seq(seq, b) for seq in sequences]) return X, Y for k, x, y in stream: lx = len(x) if lx in data: data[lx].append([k, x, y]) ids, seqs, lbls = zip(*data[lx]) if len(seqs) == BATCH_SIZE: yield ids, prepare_batch(seqs, lbls) del data[lx] else: data[lx] = [[k, x, y]] for packet in data.values(): ids, seqs, lbls = zip(*packet) yield ids, prepare_batch(seqs, lbls) class LossHistory(Callback): def __init__(self): self.losses = [] def on_batch_end(self, batch, logs={}): self.losses.append(logs.get('loss')) def train(model, gen_xy, length_xy, epoch, num_epochs, history=LossHistory(), lrate=LearningRateScheduler(step_decay)): pbar = tqdm(total=length_xy) for _, (X, Y) in gen_xy: model.fit(x=X, y=Y, batch_size=BATCH_SIZE, epochs=num_epochs if LONG_EXPOSURE else epoch + 1, verbose=0, validation_data=None, initial_epoch=epoch, callbacks=[history]) pbar.set_description("Training Loss:%.5f" % np.mean(history.losses)) pbar.update(len(Y)) pbar.close() def zeroone2oneminusone(vec): return np.add(np.multiply(np.array(vec), 2), -1) def oneminusone2zeroone(vec): return np.divide(np.add(np.array(vec), 1), 2) def calc_loss(y_true, y_pred): return np.mean([log_loss(y, y_hat) for y, y_hat in zip(y_true, y_pred) if np.any(y)]) def predict(model, gen_xy, length_xy, classes): pbar = tqdm(total=length_xy, desc="Predicting...") i, m, n = 0, length_xy, len(classes) ids = list() y_pred, y_true = np.zeros((m, n)), np.zeros((m, n)) for i, (keys, (X, Y)) in enumerate(gen_xy): k = len(Y) ids.extend(keys) y_hat, y = model.predict(X), Y y_pred[i:i + k, ], y_true[i:i + k, ] = y_hat, y pbar.update(k) pbar.close() return ids, y_true, y_pred def evaluate(y_true, y_pred, classes): y_pred = y_pred[~np.all(y_pred == 0, axis=1)] y_true = y_true[~np.all(y_true == 0, axis=1)] prs, rcs, f1s = performance(y_pred, y_true, classes) return calc_loss(y_true, y_pred), prs, rcs, f1s def add_arguments(parser): parser.add_argument("--mongo_url", type=str, default='mongodb://localhost:27017/', help="Supply the URL of MongoDB"), parser.add_argument("--aspect", type=str, choices=['F', 'P', 'C'], default="F", help="Specify the ontology aspect.") parser.add_argument("--init_epoch", type=int, default=0, help="Which epoch to start training the model?") parser.add_argument("--arch", type=str, choices=['deepseq', 'motifnet', 'inception'], default="deepseq", help="Specify the model arch.") parser.add_argument('-r', '--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') if __name__ == "__main__": parser = argparse.ArgumentParser() add_arguments(parser) args = parser.parse_args() ASPECT = args.aspect # default: Molecular Function client = MongoClient(args.mongo_url) db = client['prot2vec'] print("Loading Ontology...") onto = get_ontology(ASPECT) # classes = get_classes(db, onto) classes = onto.classes classes.remove(onto.root) assert onto.root not in classes opt = optimizers.Adam(lr=LR, beta_1=0.9, beta_2=0.999, epsilon=1e-8) if args.arch == 'inception': model = ProteinInception(classes, opt) LONG_EXPOSURE = False num_epochs = 200 elif args.arch == 'deepseq': model = DeeperSeq(classes, opt) LONG_EXPOSURE = True num_epochs = 20 elif args.arch == 'motifnet': model = MotifNet(classes, opt) LONG_EXPOSURE = False num_epochs = 200 else: print('Unknown model arch') exit(0) if args.resume: model.load_weights(args.resume) print("Loaded model from disk") model.summary() for epoch in range(args.init_epoch, num_epochs): trn_stream, tst_stream = get_training_and_validation_streams(db) train(model, batch_generator(trn_stream, onto, classes), len(trn_stream), epoch, num_epochs) _, y_true, y_pred = predict(model, batch_generator(tst_stream, onto, classes), len(tst_stream), classes) loss, prs, rcs, f1s = evaluate(y_true, y_pred, classes) i = np.argmax(f1s) f_max = f1s[i] print("[Epoch %d/%d] (Validation Loss: %.5f, F_max: %.3f, precision: %.3f, recall: %.3f)" % (epoch + 1, num_epochs, loss, f1s[i], prs[i], rcs[i])) model_str = '%s-%d-%.5f-%.2f' % (args.arch, epoch + 1, loss, f_max) model.save_weights("checkpoints/%s.hdf5" % model_str) with open("checkpoints/%s.json" % model_str, "w+") as f: f.write(model.to_json()) np.save("checkpoints/%s.npy" % model_str, np.asarray(classes))
1.75
2
inbima.py
SkoltechAI/inbima
0
12293
import matplotlib.pyplot as plt import openpyxl import sys from fs import FS from journals import Journals from utils import load_sheet from utils import log from word import Word YEARS = [2017, 2018, 2019, 2020, 2021] class InBiMa(): def __init__(self, is_new_folder=False): self.fs = FS(is_new_folder) if is_new_folder: return self.wb = openpyxl.load_workbook(self.fs.get_path('cait.xlsx')) log('Excel file is opened', 'res') self.team = load_sheet(self.wb['team']) self.grants = load_sheet(self.wb['grants']) self.papers = load_sheet(self.wb['papers']) self.journals = Journals(load_sheet(self.wb['journals'])) # self.journals_ref = self.load_journals_ref() self.task = { 'authors': ['#cichocki'], 'grants': ['#megagrant1'], } log('Excel file is parsed', 'res') # log('Journal info is loaded', 'res') for uid in self.team.keys(): self.task['authors'] = [uid] self.export_word_cv() self.export_grant_papers() self.export_stat() def export_word_cv(self): if len(self.task.get('authors', [])) != 1: text = 'export_word_cv (task should contain only one author)' log(text, 'err') return person = self.team.get(self.task['authors'][0]) if person is None: text = 'export_word_cv (invalid team member uid in task)' log(text, 'err') return uid = person['id'] stat = self.get_papers_stat(uid, YEARS) photo_logo = self.fs.download_photo_logo() photo_person = self.fs.download_photo(uid[1:], person.get('photo')) self.word = Word(YEARS, self.get_papers) self.word.add_person_info(person, photo_person, photo_logo) self.word.add_person_stat(stat) self.word.add_note(is_grant=True) self.word.add_break() self.word.add_paper_list(stat, author=person['id']) fname = 'CAIT_' + person['surname'] + '_' + person['name'] + '.docx' fpath = self.fs.get_path(fname) self.word.save(fpath) log(f'Document "{fpath}" is saved', 'res') def export_grant_papers(self): if len(self.task.get('grants', [])) != 1: text = 'export_grant_papers (task should contain only one grant)' log(text, 'err') return grant = self.grants.get(self.task['grants'][0]) if grant is None: text = 'export_grant_papers (invalid grant uid in task)' log(text, 'err') return uid = grant['id'] stat = self.get_papers_stat(years=YEARS, grant=uid) photo_logo = self.fs.download_photo_logo() head = grant.get('head', '') head = self.team[head] self.word = Word(YEARS, self.get_papers) self.word.add_grant_info(grant, head, photo_logo) self.word.add_note(is_grant=True) self.word.add_break() self.word.add_paper_list(stat, grant=uid, with_links=True) fname = 'CAIT_' + uid[1:] + '.docx' fpath = self.fs.get_path(fname) self.word.save(fpath) log(f'Document "{fpath}" is saved', 'res') def export_stat(self): stats = {} for uid in self.team.keys(): if self.team[uid].get('active') != 'Yes': continue if self.team[uid].get('lead') != 'Yes': continue stats[uid] = self.get_papers_stat(uid, YEARS) for uid, stat in stats.items(): x = YEARS y = [stat[y]['total'] for y in YEARS] plt.plot(x, y, marker='o', label=uid) plt.legend(loc='best') fpath = self.fs.get_path('plot.png') plt.savefig(fpath) log(f'Figure "{fpath}" is saved', 'res') def get_papers(self, author=None, year=None, q=None, grant=None): res = {} for title, paper in self.papers.items(): if year and int(year) != int(paper['year']): continue if author and not author in paper['authors_parsed']: continue if grant and not grant in paper.get('grant', ''): continue if q is not None: journal = self.journals.data[paper['journal']] q1 = journal.get('sjr_q1', '') q2 = journal.get('sjr_q2', '') if q == 1 and len(q1) < 2: continue if q == 2 and (len(q1) >= 2 or len(q2) < 2): continue if q == 0 and (len(q1) >= 2 or len(q2) >= 2): continue res[title] = paper res[title]['journal_object'] = self.journals.data[paper['journal']] return res def get_papers_stat(self, author=None, years=[], grant=None): res = {} for year in years: res[year] = { 'q1': len(self.get_papers(author, year, q=1, grant=grant)), 'q2': len(self.get_papers(author, year, q=2, grant=grant)), 'q0': len(self.get_papers(author, year, q=0, grant=grant)), 'total': len(self.get_papers(author, year, grant=grant)) } res['total'] = { 'q1': sum(res[year]['q1'] for year in years), 'q2': sum(res[year]['q2'] for year in years), 'q0': sum(res[year]['q0'] for year in years), 'total': sum(res[year]['total'] for year in years), } return res if __name__ == '__main__': args = sys.argv[1:] if len(args) == 0: ibm = InBiMa() elif len(args) == 1 and args[0] == '-f': ibm = InBiMa(is_new_folder=True) elif len(args) == 2 and args[0] == '-j': journals = Journals() journals.load_ref() journals.log_ref(title=args[1]) else: raise ValueError('Invalid arguments for script')
2.484375
2
UkDatabaseAPI/UkDatabaseAPI/database/mongo_db.py
kplachkov/UkDatabase
0
12294
<reponame>kplachkov/UkDatabase import pymongo from bson.json_util import dumps from pymongo import MongoClient from UkDatabaseAPI.database.database import Database from UkDatabaseAPI.database.query_builder.mongo_query_builder import MongoQueryBuilder MONGO_URI = "mongodb://localhost:27017" """str: The MongoDB URI.""" class MongoDB(Database): def __init__(self): """Client for a MongoDB instance.""" # Opening db connection. self.__client = MongoClient(MONGO_URI) self.__db = self.__client.UkDatabase def __del__(self): """Close the connection.""" self.close_connection() def crate_collection_text_index(self): """Create a text index for the collection.""" self.__db.posts.create_index([('TEXT', pymongo.TEXT)], name='text', default_language='english') def close_connection(self): """Close the connection.""" self.__client.close() def find_posts(self, text: str, post_pub_date: str, number_of_results: int) -> str: """Find posts containing text or/and within a time range. Args: text: The text search criterion, from the URL argument. post_pub_date: The date or time range search criterion, from the URL argument. number_of_results: The number of results to return, from the URL argument. Returns: The posts containing the text or/and within a time range. """ queries = {} if text: queries.update(MongoQueryBuilder .get_query_for_search_by_text(text)) if post_pub_date: queries.update(MongoQueryBuilder .get_query_for_search_by_post_date(post_pub_date)) result = self.__db.posts.find({"$and": [queries]}, {"score": {"$meta": "textScore"}}) if number_of_results: # If int argument provided by the URL, the results are limited and sorted. result = result.sort([("score", {"$meta": "textScore"})]).limit(number_of_results) else: # Return all matched results sorted. result = result.sort([("score", {"$meta": "textScore"})]) return dumps(result)
2.9375
3
fb/forms.py
pure-python/brainmate
0
12295
from django.forms import ( Form, CharField, Textarea, PasswordInput, ChoiceField, DateField, ImageField, BooleanField, IntegerField, MultipleChoiceField ) from django import forms from fb.models import UserProfile class UserPostForm(Form): text = CharField(widget=Textarea( attrs={'rows': 1, 'cols': 40, 'class': 'form-control','placeholder': "What's on your mind?"})) class UserPostCommentForm(Form): text = CharField(widget=Textarea( attrs={'rows': 1, 'cols': 50, 'class': 'form-control','placeholder': "Write a comment..."})) class UserLogin(Form): username = CharField(max_length=30) password = CharField(widget=PasswordInput) class UserProfileForm(Form): first_name = CharField(max_length=100, required=False) last_name = CharField(max_length=100, required=False) gender = ChoiceField(choices=UserProfile.GENDERS, required=False) date_of_birth = DateField(required=False) avatar = ImageField(required=False) OPTIONS = ( ("Cars", "Cars"), ("Dogs", "Dogs"), ("Sports", "Sports"), ) interests = MultipleChoiceField(widget=forms.CheckboxSelectMultiple, choices=OPTIONS, required=False) class QuestionFrom(Form): question_description = CharField(max_length=300) points = IntegerField() class AddAnswerForm(Form): answer_description = CharField(max_length=30) correct_answer = BooleanField(required=False)
2.46875
2
Galaxy_Invander/user23_fTVPDKIDhRdCfUp.py
triump0870/Interactive_Programming_Python
1
12296
<reponame>triump0870/Interactive_Programming_Python # Simple implementation of GalaxyInvanders game # <NAME> (India) - 3 Nov 2013 # www.codeskulptor.org/#user23_fTVPDKIDhRdCfUp VER = "1.0" # "add various aliens" import simplegui, math, random, time #Global const FIELD_WIDTH = 850 FIELD_HEIGHT = 500 TOP_MARGIN = 75 LEFT_MARGIN = 25 ALIEN_WIDTH = 48 ALIEN_HEIGHT = 55 PLAYER_SPEED = 10 BULLET_SPEED = 10 BULLET_POWER = 1 BONUS_SPEED = 10 ALIEN_SPEED = [3, 5] # Images: pImage = simplegui.load_image('https://dl.dropbox.com/s/zhnjucatewcmfs4/player.png') aImages = [] for i in range(7): aImages.append([]) aImages[0].append(simplegui.load_image('https://dl.dropbox.com/s/0cck7w6r0mt8pzz/alien_1_1.png')) aImages[0].append(simplegui.load_image('https://dl.dropbox.com/s/j0kubnhzajbdngu/alien_1_2.png')) aImages[0].append(simplegui.load_image('https://dl.dropbox.com/s/zkeu6hqh9bakj25/alien_1_3.png')) aImages[1].append(simplegui.load_image('https://dl.dropbox.com/s/e75mkcylat70lnd/alien_2_1.png')) aImages[1].append(simplegui.load_image('https://dl.dropbox.com/s/pgjvaxg0z6rhco9/alien_2_2.png')) aImages[1].append(simplegui.load_image('https://dl.dropbox.com/s/en0hycfsi3cuzuo/alien_2_3.png')) aImages[2].append(simplegui.load_image('https://dl.dropbox.com/s/fu9weoll70acs8f/alien_3_1.png')) aImages[2].append(simplegui.load_image('https://dl.dropbox.com/s/b2rxru2nt5q2r1u/alien_3_2.png')) aImages[2].append(simplegui.load_image('https://dl.dropbox.com/s/x66vgj9fc2jlg53/alien_3_3.png')) aImages[3].append(simplegui.load_image('https://dl.dropbox.com/s/7o04ljg52kniyac/alien_4_1.png')) aImages[3].append(simplegui.load_image('https://dl.dropbox.com/s/b3v6tvami0rvl6r/alien_4_2.png')) aImages[3].append(simplegui.load_image('https://dl.dropbox.com/s/j451arcevsag36h/alien_4_3.png')) aImages[4].append(simplegui.load_image('https://dl.dropbox.com/s/jlhdigkm79nncnm/alien_5_1.png')) aImages[4].append(simplegui.load_image('https://dl.dropbox.com/s/wvlvjsa8yl6gka3/alien_5_2.png')) aImages[4].append(simplegui.load_image('https://dl.dropbox.com/s/rrg4y1tnsbrh04r/alien_5_3.png')) aImages[5].append(simplegui.load_image('https://dl.dropbox.com/s/oufyfy590tzf7cx/alien_6_1.png')) aImages[5].append(simplegui.load_image('https://dl.dropbox.com/s/p4ehd9f6mo2xfzc/alien_6_2.png')) aImages[5].append(simplegui.load_image('https://dl.dropbox.com/s/815gq3xyh6wmc0t/alien_6_3.png')) aImages[6].append(simplegui.load_image('https://dl.dropbox.com/s/bv4ycocuomsvj50/alien_7_1.png')) aImages[6].append(simplegui.load_image('https://dl.dropbox.com/s/krs2gtvdxxve79z/alien_7_2.png')) aImages[6].append(simplegui.load_image('https://dl.dropbox.com/s/v2wczi8lxwczq87/alien_7_3.png')) #backgrounds bckg = [] bckg.append(simplegui.load_image("https://dl.dropbox.com/s/ibfu2t9vrh4bhxd/back01.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/pcl8vzby25ovis8/back02.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/g8nwo1t9s4i9usg/back03.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/ee8oilluf7pe98h/back04.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/7jfgjoxinzwwlx4/back05.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/wh01g2q3607snvz/back06.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/b72ltp2xii9utnr/back07.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/av73jek8egezs1w/back08.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/ik54ttfklv3x3ai/back09.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/e9e6kpyg3yuoenc/back10.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/zrabwnnvlwvn7it/back11.jpg")) bckg.append(simplegui.load_image("https://dl.dropbox.com/s/a2infkx0rmn8b8m/back12.jpg")) # sounds sndPlayer = simplegui.load_sound('https://dl.dropbox.com/s/vl3as0o2m2wvlwu/player_shoot.wav') sndAlien = simplegui.load_sound('https://dl.dropbox.com/s/m4x0tldpze29hcr/alien_shoot.wav') sndPlayerExplosion = simplegui.load_sound('https://dl.dropbox.com/s/10fn2wh7kk7uoxh/explosion%2001.wav') sndAlienHit = simplegui.load_sound('https://dl.dropbox.com/s/80qdvup27n8j6r1/alien_hit.wav') sndAlienExplosion = simplegui.load_sound('https://dl.dropbox.com/s/qxm3je9vdlb469g/explosion_02.wav') sndBonus = simplegui.load_sound('https://dl.dropbox.com/s/tzp7e20e5v19l01/bonus.wav') sndPause = simplegui.load_sound('https://dl.dropbox.com/s/uzs9nixpd22asno/pause.wav') sndTheme = simplegui.load_sound('https://dl.dropbox.com/s/52zo892uemfkuzm/theme_01.mp3') sounds = [sndPlayer, sndAlien, sndPlayerExplosion, sndAlienExplosion, \ sndBonus, sndPause, sndTheme, sndAlienHit] #Global variables GameRunning = False GameEnded = False player_speed = 0 mes = "" timer_counter = 0 lives = 0 level = 1 scores = 0 killed = 0 current_back = 0 paused = False shoot_count = 0 level_time = [] ready, go = False, False #player = [FIELD_WIDTH //2, FIELD_HEIGHT - 30 + TOP_MARGIN] #game objects user_bullet = [] weapon_level = 1 weapon_speed = BULLET_SPEED alien_bullets = [] alien_fleet = None player = None frame = None aTimer = None dTimer = None bonuses = [] dCounter = 0 back = False bonus_count = [0, 0, 0, 0] player_killed = False player_killed_at = 0 level_map = [] for i in range(7): level_map.append([]) level_map[0] = [ 0, 0, 0, 0] level_map[1] = [129, 0, 0, 0] level_map[2] = [195, 129, 0, 0] level_map[3] = [255, 195, 60, 0] level_map[4] = [255, 231, 195, 195] level_map[5] = [255, 255, 231, 195] level_map[6] = [255, 255, 255, 231] def draw_text(canvas, text, point, size, delta, color): canvas.draw_text(text, point, size, color[0]) canvas.draw_text(text, [point[0]-delta[0], \ point[1]-delta[1]], size, color[1]) class Bonus: def __init__ (self, kind, point): self.kind = kind self.x = point[0] self.y = point[1] self.v = BONUS_SPEED #velocity self.width = 36 self.height = 36 return self def move(self): self.y += self.v return self def draw(self, canvas): if self.kind == 0: #speed of bullet canvas.draw_circle([self.x, self.y], 15, 3, "LightBlue") canvas.draw_text("WS", [self.x-12, self.y+5], self.width //2, "LightBlue") elif self.kind == 1: #weapon level canvas.draw_circle([self.x, self.y], 15, 3, "Red") canvas.draw_text("WL", [self.x-12, self.y+5], self.width //2, "Red") elif self.kind == 2: #life canvas.draw_circle([self.x, self.y], 15, 3, "LightGreen") canvas.draw_text("LF", [self.x-12, self.y+5], self.width //2, "LightGreen") elif self.kind == 3: #weapon power canvas.draw_circle([self.x, self.y], 15, 3, "8010df") canvas.draw_text("WP", [self.x-12, self.y+5], self.width //2, "8010df") return self def execute(self): global weapon_speed, weapon_level, player, scores, bonus_count bonus_count[self.kind] += 1 if self.kind == 0: #speed of bullet weapon_speed += 1 delta = round(math.pow(20, (1 + (1.0*level-1)/32))*5) scores = scores + delta elif self.kind == 1: #weapon level weapon_level += 1 delta = round(math.pow(30, (1 + (1.0*level-1)/32))*5) scores = scores + delta elif self.kind == 2: #life player.lives += 1 delta = round(math.pow(100, (1 + (1.0*level-1)/32))*5) scores = scores + delta elif self.kind == 3: #weapon power player.power += 0.1 delta = round(math.pow(100, (1 + (1.0*level-1)/32))*5) scores = scores + delta sndBonus.play() return self def dHandler(): global dCounter, back, player_killed dCounter += 1 if dCounter % 10 == 0: if back: frame.set_canvas_background("Red") else: frame.set_canvas_background("black") back = not back; if dCounter > 50: dCounter = 0 player_killed = False dTimer.stop() frame.set_canvas_background("black") class Bullet: def __init__ (self, point, color, velocity): self.x = point[0] self.y = point[1] self.color = color self.v = velocity self.width = 1 self.height = 1 def draw(self, canvas): canvas.draw_line([self.x, self.y-5], [self.x, self.y+5], 3, self.color) def move(self): self.y += self.v class Alien: def __init__(self, point, kind): self.x = point[0] self.y = point[1] self.kind = kind self.flying = False self.vy = 0 self.vx = 0 self.health = self.get_max_health() self.width = 20 self.height = 20 def get_max_health(self): return 1+0.6 * self.kind[1] def shoot(self): if len(alien_bullets)<level*2: bullet = Bullet([self.x, self.y], "LightRed", BULLET_SPEED) alien_bullets.append(bullet) sndAlien.play() def move(self, point): if self.flying: koef = 1.5 self.y += (self.vy / koef) if self.x>player.x: self.x -= (self.vx / koef) else: self.x += (self.vx / koef) if self.vx<ALIEN_SPEED[0]: self.vx += 1 if self.vy<ALIEN_SPEED[1]: self.vy += 1 else: self.x = point[0] self.y = point[1] def draw(self, canvas): if aImages[self.kind[1]][self.kind[0]].get_width()==0: w = 15 h = 15 canvas.draw_circle([self.x, self.y], 15, 5, "Red") else: # img = aImages[self.kind[1]][self.kind[0]] img = aImages[self.kind[1]][self.kind[0]] self.width = w = img.get_width() self.height = h = img.get_height() canvas.draw_image(img, (w//2, h//2), (w, h), (self.x, self.y), (w, h)) if self.health<>self.get_max_health(): ratio = w * (self.health*1.0) / self.get_max_health() canvas.draw_line([self.x-w//2, self.y-h//2-3], [self.x+w//2, self.y-h//2-3], 4, "red") canvas.draw_line([self.x-w//2, self.y-h//2-3], [self.x-w//2+ratio, self.y-h//2-3], 4, "green") return canvas class AliensFleet: def __init__ (self, point): def is_high_level(place): map_ = (level-1)%7 row = level_map[map_][place[1]] #255 - 0 return (row & (1 << place[0]))<>0 self.x = point[0] self.y = point[1] self.aliens = [] self.pattern = [255, 255, 255, 255] self.y_velocity = ALIEN_HEIGHT//3 + 1 self.x_velocity = - ALIEN_WIDTH//3 + 1 for i in range(self.get_aliens_count()): point = self.get_alien_position(i) place = self.get_alien_place(i) alien_level = (level-1)//7 + is_high_level(place) alien = Alien(point, [random.randrange(3), alien_level]) self.aliens.append(alien) def get_aliens_count(self): c = 0 for i in range(4): for j in range(8): if (self.pattern[i] & (1 << j))<>0: c+=1 return c def get_alien_position(self, n): #returns a screen x, y of alien with number n point = self.get_alien_place(n) x = point[0]*(ALIEN_WIDTH + 3) + self.x y = point[1]*(ALIEN_HEIGHT + 3) +self.y point = [x, y] return point def get_alien_place(self, n): #returns a fleet x, y of alien with number n x, y, c = 0, 0, 0 for i in range(4): for j in range(8): if (self.pattern[i] & (1 << j))<>0: if c==n: x, y = j, i c+=1 point = [x, y] return point def move_aliens(self): i = 0 for alien in self.aliens: point = self.get_alien_position(i) alien.move(point) i += 1 return self def move_down(self): self.y += self.y_velocity if self.y>400: player.explode() self.y = 100 self.move_aliens() def move_side(self): self.x -= self.x_velocity # check borders of fleet: left = 8 right = -1 for i in range(len(self.aliens)): point = self.get_alien_place(i) if point[0]<left: left = point[0] if point[0]>right: right = point[0] if (self.x+(left+1)*60 < LEFT_MARGIN + 10) or (self.x + (right+1)*45>FIELD_WIDTH-LEFT_MARGIN-60): self.x_velocity = -self.x_velocity self.move_aliens() def draw(self, canvas): for alien in self.aliens: alien.draw(canvas) def make_shoot(self): for alien in self.aliens: if len(alien_bullets) < level * 3 + 1: if random.randrange(101)<2: # alien.shoot() return self def alien_fly(self): i = 0 for alien in self.aliens: if alien.flying: i += 1 if (i<1+level) and (random.randrange(1000)<3) and (time.time()-level_time[len(level_time)-1]>60): alien.flying=True def check_death(self): global scores, killed, player i = 0 for bullet in user_bullet: for i in range(len(self.aliens)): alien = self.aliens[i] if isBulletHit(bullet, alien): if alien.health-player.power<=0: point = self.get_alien_place(i) sndAlienExplosion.play() self.aliens.remove(alien) x = ~int((1 << point[0])) self.pattern[point[1]] = self.pattern[point[1]] & x user_bullet.remove(bullet) delta = round(math.pow(5, (1 + (1.0*level-1)/32))*5) scores = scores + delta killed += 1 x = random.randrange(1000) if x<5: bonus = Bonus(3, [alien.x, alien.y]) bonuses.append(bonus) elif x<50: bonus = Bonus(2, [alien.x, alien.y]) bonuses.append(bonus) elif x<120: bonus = Bonus(1, [alien.x, alien.y]) bonuses.append(bonus) elif x<200: bonus = Bonus(0, [alien.x, alien.y]) bonuses.append(bonus) if killed % 500 == 0: player.lives += 1 sndBonus.play() break else: user_bullet.remove(bullet) alien.health -= player.power sndAlienHit.play() i += 1 class Player: def __init__(self, point, lives): self.x = point[0] self.y = point[1] self.lives = 3 self.speed = player_speed self.power = BULLET_POWER self.width = 20 self.height = 20 def draw(self, canvas): draw_user_image(canvas, [self.x, self.y]) def move(self): self.x += player_speed if self.x<LEFT_MARGIN*2: self.x = LEFT_MARGIN*2 if self.x>FIELD_WIDTH: self.x=FIELD_WIDTH def draw_lives_counter(self, canvas): if self.lives < 5: for i in range(self.lives): draw_user_image(canvas, [150+i*35, 15]) else: draw_user_image(canvas, [150, 15]) canvas.draw_text(" x "+str(int(self.lives)), [170, 25], 25, "Yellow") def explode(self): global dTimer, alien_bullets, user_bullet, weapon_level, weapon_speed global alien_fleet, player_killed_at, player_killed, player_speed player_speed = 0 player_killed_at = time.time() sndPlayerExplosion.play() for alien in alien_fleet.aliens: alien.flying = False player_killed = True alien_bullets = [] user_bullet = [] bonuses = [] weapon_level = level // 10 + 1 weapon_speed = BULLET_SPEED self.lives -= 1 if self.lives<0: stop_game() dTimer = simplegui.create_timer(25, dHandler) dTimer.start() #helper functions def dummy(key): return key def pause(): global paused paused = not paused sndPause.play() def draw_user_image(canvas, point): # draw a image of user ship # global player if pImage.get_width()==0: canvas.draw_circle(point, 12, 5, "Yellow") else: canvas.draw_image(pImage, (25, 36), (49, 72), point, (34, 50)) player.width = pImage.get_width() player.height = pImage.get_height() return canvas def draw_lives(canvas): # draw lives counter canvas.draw_text("Lives : ", [30, 25], 25, "Red") if player<>None: player.draw_lives_counter(canvas) return canvas def draw_weapons(canvas): canvas.draw_text("Weapon : ", [30, 60], 25, "Red") canvas.draw_text("Rocket lvl: "+str(int(weapon_level)), [135, 60], 25, "Yellow") canvas.draw_text("WS:"+str(weapon_speed/10.0), [280, 48], 10, "00c5fe") canvas.draw_text("WP:"+str(player.power), [280, 61], 10, "00c5fe") return canvas def draw_level(canvas): canvas.draw_text("Level : ", [FIELD_WIDTH-200, 50], 50, "Red") canvas.draw_text(str(level), [FIELD_WIDTH-50, 50], 50, "Yellow") return canvas def draw_scores(canvas): canvas.draw_text(str(int(scores)), [400, 50], 50, "LightBlue") return canvas def draw_screen(canvas): # border of board canvas.draw_image(bckg[current_back], (425, 250), (850, 500), \ (LEFT_MARGIN+FIELD_WIDTH//2, TOP_MARGIN+FIELD_HEIGHT//2),\ (FIELD_WIDTH, FIELD_HEIGHT)) canvas.draw_polygon([[LEFT_MARGIN, TOP_MARGIN], [LEFT_MARGIN, FIELD_HEIGHT+TOP_MARGIN], [FIELD_WIDTH+LEFT_MARGIN, FIELD_HEIGHT+TOP_MARGIN], [FIELD_WIDTH+LEFT_MARGIN, TOP_MARGIN]], 2, 'Orange') return canvas def draw_start_screen(canvas): img_count = 1 + len(aImages)*(len(aImages[0])) + len(bckg) loaded_img_count = 0 if pImage.get_width()<>0: loaded_img_count += 1 for bImage in bckg: if bImage.get_width()<>0: loaded_img_count += 1 for aImg in aImages: for img in aImg: if img.get_width()<>0: loaded_img_count += 1 loaded_sounds = 0 for snd in sounds: if snd <> None: loaded_sounds += 1 draw_text(canvas, "SPACE INVANDERS", [220, 150], 50, [3, 3], ["blue", "yellow"]) canvas.draw_text("ver. - "+VER, [600, 170], 20, "yellow") canvas.draw_text("03 nov. 2013", [600, 190], 20, "yellow") draw_text(canvas, "CONTROLS:", [110, 210], 24, [2, 2], ["green", "yellow"]) draw_text(canvas, "Arrows - to left and right, space - to fire, P to pause game", [110, 240], 24, [2, 2], ["green", "yellow"]) draw_text(canvas, "Bonuses: ", [110, 280], 24, [2, 2], ["green", "yellow"]) b = Bonus(0, [125, 310]) b.draw(canvas) draw_text(canvas, " - increase user's bullet speed", [150, 320], 24, [2, 2], ["green", "yellow"]) b = Bonus(1, [125, 350]) b.draw(canvas) draw_text(canvas, " - increase user's bullet number", [150, 360], 24, [2, 2], ["green", "yellow"]) b = Bonus(2, [125, 390]) b.draw(canvas) draw_text(canvas, " - add life", [150, 400], 24, [2, 2], ["green", "yellow"]) b = Bonus(3, [125, 430]) b.draw(canvas) draw_text(canvas, " - increase weapon power", [150, 440], 24, [2, 2], ["green", "yellow"]) if loaded_img_count<img_count: draw_text(canvas, "Please, wait for loading...", [280, 500], 40, [3, 3], ["Blue", "Yellow"]) s = "Loaded "+str(loaded_img_count)+" images of "+str(img_count) draw_text(canvas, s, [110, 550], 20, [2, 2], ["Blue", "yellow"]) s = "Loaded "+str(loaded_sounds)+" sounds of "+str(len(sounds)) draw_text(canvas, s, [510, 550], 20, [2, 2], ["Blue", "yellow"]) else: draw_text(canvas, "Click to start game", [300, 500], 40, [3, 3], ["Blue", "yellow"]) frame.set_mouseclick_handler(click_handler) return canvas def draw_end_screen(canvas): draw_text(canvas, "Game over!", [350, 180], 50, [2, 2], ["Blue", "Yellow"]) draw_text(canvas, "Your score is "+str(int(scores)), [330, 240], 35, [2, 2], ["blue", "Yellow"]) draw_text(canvas, "You shoot "+str(int(shoot_count))+" times", [150, 320], 24, [2, 2], ["blue", "Yellow"]) draw_text(canvas, "You kill a "+str(killed)+" aliens", [150, 360], 24, [2, 2], ["blue", "Yellow"]) if shoot_count == 0: s = "0" else: s = str(int(10000*float(killed)/shoot_count)/100.0) draw_text(canvas, "Your accuracy is "+s+"%", [150, 400], 24, [2, 2], ["blue", "Yellow"]) i = 0 for bc in bonus_count: b = Bonus(i, [505, 310 + 40*i]) b.draw(canvas) draw_text(canvas, " - used "+str(bonus_count[i])+" times", [530, 320+40*i], 24, [2, 2], ["blue", "yellow"]) i += 1 draw_text(canvas, "Click to start new game", [300, 500], 40, [2, 2], ["blue", "Yellow"]) canvas.draw_text("ver. - "+VER, [600, 540], 15, "yellow"); return canvas def draw_game_objects(canvas): player.draw(canvas) #draw_user_image(canvas, Player) for bullet in alien_bullets: bullet.draw(canvas) for bullet in user_bullet: bullet.draw(canvas) for bonus in bonuses: bonus.draw(canvas) alien_fleet.draw(canvas) readyGo() if paused: draw_text(canvas, "P A U S E", [380, 350], 50, [2, 2], ["Green", "Yellow"]) if ready: draw_text(canvas, "R E A D Y", [380, 350], 50, [2, 2], ["Green", "Yellow"]) if go: draw_text(canvas, "G O ! ! !", [380, 350], 50, [2, 2], ["Green", "Yellow"]) sndTheme.play() return canvas def moving_objects(): global timer_counter if not GameRunning: return None if paused or ready or go or player_killed: return None timer_counter += 1 player.move() for alien in alien_fleet.aliens: if alien.flying: alien.move([0,0]) if isBulletHit(alien, player): player.explode() if alien.y>FIELD_HEIGHT + TOP_MARGIN+20: alien.y = TOP_MARGIN for bonus in bonuses: bonus.move(); if bonus.y > FIELD_HEIGHT + TOP_MARGIN+20: bonuses.remove(bonus) if isBulletHit(bonus, player): bonus.execute() bonuses.remove(bonus) for bullet in user_bullet: bullet.move() alien_fleet.check_death() for bullet in user_bullet: if bullet.y<TOP_MARGIN+25: user_bullet.remove(bullet) # for bullet in alien_bullets: bullets_to_delete = [] for bullet in list(alien_bullets): bullet.move() if bullet.y > FIELD_HEIGHT + TOP_MARGIN -10: bullets_to_delete.append(bullet) if isBulletHit(bullet, player): player.explode() for bullet in bullets_to_delete: if bullet in alien_bullets: alien_bullets.remove(bullet) alien_fleet.make_shoot() alien_fleet.alien_fly() if level<30: x = 60 - level else: x = 1 if timer_counter % x == 0: alien_fleet.move_side() if timer_counter % (100 + x) == 0: alien_fleet.move_down() if alien_fleet.get_aliens_count() == 0: new_level() # Handler to draw on canvas def draw(canvas): draw_screen(canvas) canvas.draw_text(mes, [250, 250], 40, "Yellow") ###################### #check a begin of game # if GameEnded: draw_end_screen(canvas) elif not GameRunning: draw_start_screen(canvas) else: ################## # game info draw_lives(canvas) draw_weapons(canvas) draw_level(canvas) draw_scores(canvas) draw_game_objects(canvas) return canvas def readyGo(): global ready, go ready = time.time()-level_time[len(level_time)-1]<0.7 go = (not ready) and time.time()-level_time[len(level_time)-1]<1.5 player_killed = time.time() - player_killed_at < 1.2 #Initialization and start of game def start_game(): global GameRunning, alien_fleet, player, GameEnded global scores, killed, level, level_time, bonus_count scores = 0 bonus_count = [0, 0, 0, 0] killed = 0 level = 0 GameEnded = False GameRunning = True new_level() player = Player([FIELD_WIDTH //2, FIELD_HEIGHT + TOP_MARGIN-20], 3) return None def stop_game(): global GameRunning, GameEnded # aTimer.stop() GameEnded = True GameRunning = False level_time.append(time.time()) frame.set_keydown_handler(dummy) frame.set_keyup_handler(dummy) return None # Handler for mouse click def click_handler(position): if not GameRunning: start_game() #else: # stop_game() return position #### keydown_handler def keydown(key): global keypressed, mes, shoot_count, player_speed keypressed = key if (key == simplegui.KEY_MAP['p']) or \ (key == simplegui.KEY_MAP['P']): pause() else: if (key == simplegui.KEY_MAP['right']): #player.move('right') player_speed = PLAYER_SPEED elif (key == simplegui.KEY_MAP['left']): # player.move('left') player_speed = -PLAYER_SPEED if (key == simplegui.KEY_MAP['space'])and(GameRunning): if len(user_bullet) < weapon_level: b = Bullet([player.x, player.y], "LightBlue", -weapon_speed) user_bullet.append(b) sndPlayer.play() shoot_count += 1 return #### keyup_handler to stop keydown def keyup(key): global player_speed #if keytimer.is_running(): # keytimer.stop() if (key == simplegui.KEY_MAP['right'])or(key == simplegui.KEY_MAP['left']): player_speed = 0 return def isBulletHit(bullet, obj): if (bullet.y+bullet.height//2+2 > obj.y-obj.height // 2) and (bullet.y-bullet.height//2-2<obj.y+obj.height//2): if (bullet.x+bullet.width//2 +2> obj.x - obj.width//2) and (bullet.x-bullet.width//2 -2< obj.x + obj.width//2): return True else: return False else: return False def new_level(): global level, alien_fleet, user_bullet, alien_bullets, current_back, player global level_time, player_speed level_time.append(time.time()) current_back = random.randrange(12) level += 1 player_speed = 0 user_bullet = [] alien_bullets = [] alien_fleet = AliensFleet([250, 100]) if level % 10 == 0: player.lives += 1 sndBonus.play() # Create a frame and assign callbacks to event handlers frame = simplegui.create_frame("Galaxian", 900, 600, 0) frame.set_draw_handler(draw) frame.set_keydown_handler(keydown) frame.set_keyup_handler(keyup) aTimer = simplegui.create_timer(60, moving_objects) aTimer.start() # Start the frame animation frame.start()
3.015625
3
components/py-flask-wa/app.py
ajayns/amoc-project
26
12297
from flask import Flask, jsonify, request, render_template, redirect from flask_pymongo import PyMongo from werkzeug import secure_filename import base64 app = Flask(__name__) app.config['MONGO_DBNAME'] = 'restdb' app.config['MONGO_URI'] = 'mongodb://localhost:27017/restdb' mongo = PyMongo(app) @app.route('/') def index(): return render_template("index.html") @app.route('/w') def webcam(): return render_template("webcam.html") @app.route('/img') def img(): i = request.query_string f = open('a.png','wb') f.write(i.decode('base64')) return "success <img src='" + i + "'>" @app.route('/hello') def hello(): return "hello world" @app.route('/star', methods=['GET']) def get_all_stars(): star = mongo.db.stars output = [] for s in star.find(): output.append({'name' : s['name'], 'distance' : s['distance']}) return jsonify(output) @app.route('/star/', methods=['GET']) def get_one_star(name): star = mongo.db.stars s = star.find_one({'name' : name}) if s: output = {'name': s['name'], 'distance': s['distance']} else: output = "No such name" return jsonify(output) @app.route('/star', methods=['POST']) def add_star(): star = mongo.db.stars name = request.json['name'] distance = request.json['distance'] star_id = star.insert({'name': name, 'distance': distance}) new_star = star.find_one({'_id': star_id}) output = {'name' : new_star['name'], 'distance' : new_star['distance']} return jsonify(output) @app.route('/uploader', methods=['POST']) def upload_file(): f = request.files['file'] f.save(secure_filename('1')) return "uploaded" if __name__ == '__main__': app.run(debug=True)
2.71875
3
wc_lang/util.py
KarrLab/wc_lang
7
12298
""" Utilities :Author: <NAME> <<EMAIL>> :Date: 2016-11-10 :Copyright: 2016, Karr Lab :License: MIT """ from obj_tables import get_models as base_get_models from wc_lang import core from wc_lang import io from wc_utils.util import git def get_model_size(model): """ Get numbers of model components Args: model (:obj:`core.Model`): model Returns: :obj:`dict`: dictionary with numbers of each type of model component """ return { "submodels": len(model.get_submodels()), "compartments": len(model.get_compartments()), "species_types": len(model.get_species_types()), "species": len(model.get_species()), "parameters": len(model.get_parameters()), "references": len(model.get_references()), "reactions": len(model.get_reactions()), } def get_model_summary(model): """ Get textual summary of a model Args: model (:obj:`core.Model`): model Returns: :obj:`str`: textual summary of the model """ return "Model with:" \ + "\n{:d} submodels".format(len(model.get_submodels())) \ + "\n{:d} compartments".format(len(model.get_compartments())) \ + "\n{:d} species types".format(len(model.get_species_types())) \ + "\n{:d} species".format(len(model.get_species())) \ + "\n{:d} parameters".format(len(model.get_parameters())) \ + "\n{:d} references".format(len(model.get_references())) \ + "\n{:d} dFBA objective reactions".format(len(model.get_dfba_obj_reactions())) \ + "\n{:d} reactions".format(len(model.get_reactions())) \ + "\n{:d} rate laws".format(len(model.get_rate_laws())) def get_models(inline=True): """ Get list of models Args: inline (:obj:`bool`, optional): if true, return inline models Returns: :obj:`list` of :obj:`class`: list of models """ return base_get_models(module=core, inline=inline) def gen_ids(model): """ Generate ids for model objects Args: model (:obj:`core.Model`): model """ for obj in model.get_related(): if hasattr(obj, 'gen_id'): obj.id = obj.gen_id()
2.09375
2
synapse/tests/test_tools_autodoc.py
kcreyts/synapse
1
12299
import synapse.common as s_common import synapse.tests.utils as s_t_utils import synapse.tools.autodoc as s_autodoc class TestAutoDoc(s_t_utils.SynTest): async def test_tools_autodoc_docmodel(self): with self.getTestDir() as path: argv = ['--doc-model', '--savedir', path] outp = self.getTestOutp() self.eq(await s_autodoc.main(argv, outp=outp), 0) with s_common.genfile(path, 'datamodel_types.rst') as fd: buf = fd.read() s = buf.decode() self.isin('Base types are defined via Python classes.', s) self.isin('synapse.models.inet.Addr', s) self.isin('Regular types are derived from BaseTypes.', s) self.isin(r'inet\:server', s) with s_common.genfile(path, 'datamodel_forms.rst') as fd: buf = fd.read() s = buf.decode() self.isin('Forms are derived from types, or base types. Forms represent node types in the graph.', s) self.isin(r'inet\:ipv4', s) self.notin(r'file\:bytes:.created', s) self.isin('Universal props are system level properties which may be present on every node.', s) self.isin('.created', s) self.notin('..created\n', s) self.isin('An example of ``inet:dns:a``\\:', s) async def test_tools_autodoc_confdefs(self): with self.getTestDir() as path: argv = ['--savedir', path, '--doc-conf', 'synapse.tests.test_lib_stormsvc.StormvarServiceCell'] outp = self.getTestOutp() self.eq(await s_autodoc.main(argv, outp=outp), 0) with s_common.genfile(path, 'conf_stormvarservicecell.rst') as fd: buf = fd.read() s = buf.decode() self.isin('autodoc-stormvarservicecell-conf', s) self.isin('StormvarServiceCell Configuration Options', s) self.isin('See :ref:`devops-cell-config` for', s) self.isin('auth\\:passwd', s) self.isin('Environment Variable\n ``SYN_STORMVARSERVICECELL_AUTH_PASSWD``', s) self.isin('``--auth-passwd``', s) argv.append('--doc-conf-reflink') argv.append('`Configuring a Cell Service <https://synapse.docs.vertex.link/en/latest/synapse/devguides/devops_cell.html>`_') # truncate the current file with s_common.genfile(path, 'conf_stormvarservicecell.rst') as fd: fd.truncate() outp = self.getTestOutp() self.eq(await s_autodoc.main(argv, outp=outp), 0) with s_common.genfile(path, 'conf_stormvarservicecell.rst') as fd: buf = fd.read() s = buf.decode() self.isin('StormvarServiceCell Configuration Options', s) self.isin('See `Configuring a Cell Service <https://synapse', s) async def test_tools_autodoc_stormsvc(self): with self.getTestDir() as path: argv = ['--savedir', path, '--doc-storm', 'synapse.tests.test_lib_stormsvc.StormvarServiceCell'] outp = self.getTestOutp() self.eq(await s_autodoc.main(argv, outp=outp), 0) with s_common.genfile(path, 'stormsvc_stormvarservicecell.rst') as fd: buf = fd.read() s = buf.decode() self.isin('StormvarServiceCell Storm Service', s) self.isin('This documentation is generated for version 0.0.1 of the service.', s) self.isin('Storm Package\\: stormvar', s) self.isin('.. _stormcmd-stormvar-magic:\n', s) self.isin('magic\n-----', s) self.isin('Test stormvar support', s) self.isin('forms as input nodes', s) self.isin('``test:str``', s) self.isin('nodes in the graph', s) self.isin('``test:comp``', s) self.isin('nodedata with the following keys', s) self.isin('``foo`` on ``inet:ipv4``', s) async def test_tools_autodoc_stormtypes(self): with self.getTestDir() as path: argv = ['--savedir', path, '--doc-stormtypes'] outp = self.getTestOutp() self.eq(await s_autodoc.main(argv, outp=outp), 0) with s_common.genfile(path, 'stormtypes_libs.rst') as fd: libbuf = fd.read() libtext = libbuf.decode() self.isin('.. _stormlibs-lib-print:\n\n$lib.print(mesg, \\*\\*kwargs)\n============================', libtext) self.isin('Print a message to the runtime.', libtext) self.isin('\\*\\*kwargs (any): Keyword arguments to substitute into the mesg.', libtext) self.isin('.. _stormlibs-lib-time:\n\n*********\n$lib.time\n*********', libtext) self.isin('A Storm Library for interacting with timestamps.', libtext) with s_common.genfile(path, 'stormtypes_prims.rst') as fd: primbuf = fd.read() primstext = primbuf.decode() self.isin('.. _stormprims-storm-auth-user:\n\n*****************\nstorm\\:auth\\:user\n*****************', primstext) self.isin('iden\n====\n\nThe User iden.', primstext)
2.15625
2