hexsha
stringlengths
40
40
size
int64
5
2.06M
ext
stringclasses
11 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
3
251
max_stars_repo_name
stringlengths
4
130
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
191k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
3
251
max_issues_repo_name
stringlengths
4
130
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
3
251
max_forks_repo_name
stringlengths
4
130
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.05M
avg_line_length
float64
1
1.02M
max_line_length
int64
3
1.04M
alphanum_fraction
float64
0
1
5338514dfbd12161e51e2cea2de687a8253338f8
28,526
py
Python
PyDSTool/PyCont/BifPoint.py
mdlama/pydstool
3d298e908ff55340cd3612078508be0c791f63a8
[ "Python-2.0", "OLDAP-2.7" ]
2
2021-02-04T15:01:31.000Z
2021-02-25T16:08:43.000Z
PyDSTool/PyCont/BifPoint.py
mdlama/pydstool
3d298e908ff55340cd3612078508be0c791f63a8
[ "Python-2.0", "OLDAP-2.7" ]
null
null
null
PyDSTool/PyCont/BifPoint.py
mdlama/pydstool
3d298e908ff55340cd3612078508be0c791f63a8
[ "Python-2.0", "OLDAP-2.7" ]
1
2021-02-25T14:43:36.000Z
2021-02-25T14:43:36.000Z
""" Bifurcation point classes. Each class locates and processes bifurcation points. * _BranchPointFold is a version based on BranchPoint location algorithms * BranchPoint: Branch process is broken (can't find alternate branch -- see MATCONT notes) Drew LaMar, March 2006 """ from __future__ import absolute_import, print_function from .misc import * from PyDSTool.common import args from .TestFunc import DiscreteMap, FixedPointMap from numpy import Inf, NaN, isfinite, r_, c_, sign, mod, \ subtract, divide, transpose, eye, real, imag, \ conjugate, average from scipy import optimize, linalg from numpy import dot as matrixmultiply from numpy import array, float, complex, int, float64, complex64, int32, \ zeros, divide, subtract, reshape, argsort, nonzero ##### _classes = ['BifPoint', 'BPoint', 'BranchPoint', 'FoldPoint', 'HopfPoint', 'BTPoint', 'ZHPoint', 'CPPoint', 'BranchPointFold', '_BranchPointFold', 'DHPoint', 'GHPoint', 'LPCPoint', 'PDPoint', 'NSPoint', 'SPoint'] __all__ = _classes ##### # Codimension-2 bifurcations # Discrete maps
32.638444
120
0.534179
533866f2077fc08488ebf544ff7c3db315b050b5
283
py
Python
src/marion/marion/urls/__init__.py
OmenApps/marion
f501674cafbd91f0bbad7454e4dcf3527cf4445e
[ "MIT" ]
7
2021-04-06T20:33:31.000Z
2021-09-30T23:29:24.000Z
src/marion/marion/urls/__init__.py
OmenApps/marion
f501674cafbd91f0bbad7454e4dcf3527cf4445e
[ "MIT" ]
23
2020-09-09T15:01:50.000Z
2022-01-03T08:58:36.000Z
src/marion/marion/urls/__init__.py
OmenApps/marion
f501674cafbd91f0bbad7454e4dcf3527cf4445e
[ "MIT" ]
2
2020-12-14T10:07:07.000Z
2021-06-29T00:20:43.000Z
"""Urls for the marion application""" from django.urls import include, path from rest_framework import routers from .. import views router = routers.DefaultRouter() router.register(r"requests", views.DocumentRequestViewSet) urlpatterns = [ path("", include(router.urls)), ]
18.866667
58
0.749117
533987c4f01c2ae25c35913b042954d6d704d9b2
1,736
py
Python
setup.py
TanKingsley/pyxll-jupyter
4f7b3eb361079b74683d89340dfff9576fb2ff41
[ "MIT" ]
1
2020-12-28T10:40:38.000Z
2020-12-28T10:40:38.000Z
setup.py
TanKingsley/pyxll-jupyter
4f7b3eb361079b74683d89340dfff9576fb2ff41
[ "MIT" ]
null
null
null
setup.py
TanKingsley/pyxll-jupyter
4f7b3eb361079b74683d89340dfff9576fb2ff41
[ "MIT" ]
null
null
null
""" PyXLL-Jupyter This package integrated Jupyter notebooks into Microsoft Excel. To install it, first install PyXLL (see https://www.pyxll.com). Briefly, to install PyXLL do the following:: pip install pyxll pyxll install Once PyXLL is installed then installing this package will add a button to the PyXLL ribbon toolbar that will start a Jupyter notebook browser as a custom task pane in Excel. To install this package use:: pip install pyxll_jupyter """ from setuptools import setup, find_packages from os import path this_directory = path.abspath(path.dirname(__file__)) with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f: long_description = f.read() setup( name="pyxll_jupyter", description="Adds Jupyter notebooks to Microsoft Excel using PyXLL.", long_description=long_description, long_description_content_type='text/markdown', version="0.1.11", packages=find_packages(), include_package_data=True, package_data={ "pyxll_jupyter": [ "pyxll_jupyter/resources/ribbon.xml", "pyxll_jupyter/resources/jupyter.png", ] }, project_urls={ "Source": "https://github.com/pyxll/pyxll-jupyter", "Tracker": "https://github.com/pyxll/pyxll-jupyter/issues", }, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: Microsoft :: Windows" ], entry_points={ "pyxll": [ "modules = pyxll_jupyter.pyxll:modules", "ribbon = pyxll_jupyter.pyxll:ribbon" ] }, install_requires=[ "pyxll >= 5.0.0", "jupyter >= 1.0.0", "PySide2" ] )
26.707692
73
0.657258
5339fd0b2f57b238565a16867e9a32da801ab240
5,630
py
Python
board/views.py
albi23/Pyra
1c1ceece15d55cd0e0ecf41d7224683b93b72555
[ "MIT" ]
null
null
null
board/views.py
albi23/Pyra
1c1ceece15d55cd0e0ecf41d7224683b93b72555
[ "MIT" ]
6
2021-03-19T01:58:04.000Z
2021-09-22T18:53:15.000Z
board/views.py
albi23/Pyra
1c1ceece15d55cd0e0ecf41d7224683b93b72555
[ "MIT" ]
1
2020-06-29T18:16:29.000Z
2020-06-29T18:16:29.000Z
from typing import List from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from django.http import JsonResponse from django.shortcuts import render from django.urls import reverse_lazy from django.views import generic, View from board.forms import SignUpForm from .const import BOARD_VIEW_COLUMN_COUNT from .models import Board, Priority, Membership, Contribution from .models import Task class SignUp(generic.CreateView): form_class = SignUpForm success_url = reverse_lazy('login') template_name = 'signup.html' class CreateBoard(View): class CreateTask(View): class CreateBoardMembership(View): def parse_priority(value: str): choices = Priority.choices for i in range(0, len(choices)): if value == choices[i][1].lower(): return choices[i][0]
28.291457
101
0.610302
533acfb3888fb78753274cc7a4925350317c5e43
1,008
py
Python
setup.py
lazmond3/pylib-instagram-type
9683a7fb1dad9b1a770a3f98317f1cde1085f0a7
[ "BSD-2-Clause" ]
null
null
null
setup.py
lazmond3/pylib-instagram-type
9683a7fb1dad9b1a770a3f98317f1cde1085f0a7
[ "BSD-2-Clause" ]
null
null
null
setup.py
lazmond3/pylib-instagram-type
9683a7fb1dad9b1a770a3f98317f1cde1085f0a7
[ "BSD-2-Clause" ]
null
null
null
# -*- coding: utf-8 -*- # Learn more: https://github.com/kennethreitz/setup.py from setuptools import setup, find_packages import os with open('README.md') as f: readme = f.read() with open('LICENSE') as f: license = f.read() setup( name='lazmond3-pylib-instagram-type', version='1.0.8', description='update from 1.0.8: hasattr: 1.0.7: medias , str get multiple + init.py', long_description=readme, author='lazmond3', author_email='moikilo00@gmail.com', url='https://github.com/lazmond3/pylib-instagram-type.git', install_requires=["lazmond3-pylib-debug"], license=license, packages=find_packages(exclude=('tests', 'docs')), test_suite='tests' )
25.2
92
0.667659
533adf5d76752741ed41f712b78044d29524e61c
2,455
py
Python
tbx/core/migrations/0111_move_sign_up_form_into_new_app.py
arush15june/wagtail-torchbox
c4d06e096c72bd8007975dc016133024f9d27fab
[ "MIT" ]
null
null
null
tbx/core/migrations/0111_move_sign_up_form_into_new_app.py
arush15june/wagtail-torchbox
c4d06e096c72bd8007975dc016133024f9d27fab
[ "MIT" ]
null
null
null
tbx/core/migrations/0111_move_sign_up_form_into_new_app.py
arush15june/wagtail-torchbox
c4d06e096c72bd8007975dc016133024f9d27fab
[ "MIT" ]
null
null
null
# -*- coding: utf-8 -*- # Generated by Django 1.11.16 on 2019-01-15 22:49 from __future__ import unicode_literals from django.db import migrations
31.474359
100
0.619959
533c689999c368cfd2824982d040a984df189702
3,909
py
Python
tests/test_webframe.py
zsolt-beringer/osm-gimmisn
b0cbf2e88c1846ef49e33fd32aeb6b4ecabea4c0
[ "MIT" ]
null
null
null
tests/test_webframe.py
zsolt-beringer/osm-gimmisn
b0cbf2e88c1846ef49e33fd32aeb6b4ecabea4c0
[ "MIT" ]
null
null
null
tests/test_webframe.py
zsolt-beringer/osm-gimmisn
b0cbf2e88c1846ef49e33fd32aeb6b4ecabea4c0
[ "MIT" ]
null
null
null
#!/usr/bin/env python3 # # Copyright (c) 2019 Miklos Vajna and contributors. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """The test_webframe module covers the webframe module.""" from typing import List from typing import TYPE_CHECKING from typing import Tuple from typing import cast import configparser import datetime import os import unittest import unittest.mock import time # pylint: disable=unused-import import yattag import webframe if TYPE_CHECKING: # pylint: disable=no-name-in-module,import-error,unused-import from wsgiref.types import StartResponse # noqa: F401 if __name__ == '__main__': unittest.main()
33.991304
89
0.643643
533cc101e9c7e5be34bb424dc7dd27a2b33a585a
6,533
py
Python
spotify.py
nimatest1234/telegram_spotify_downloader_bot
7e0a9ba32ee219752582b917867600653337f3d1
[ "MIT" ]
null
null
null
spotify.py
nimatest1234/telegram_spotify_downloader_bot
7e0a9ba32ee219752582b917867600653337f3d1
[ "MIT" ]
null
null
null
spotify.py
nimatest1234/telegram_spotify_downloader_bot
7e0a9ba32ee219752582b917867600653337f3d1
[ "MIT" ]
null
null
null
from __future__ import unicode_literals import spotipy from spotipy.oauth2 import SpotifyClientCredentials import requests from youtube_search import YoutubeSearch import youtube_dl import eyed3.id3 import eyed3 import lyricsgenius import telepot spotifyy = spotipy.Spotify( client_credentials_manager=SpotifyClientCredentials(client_id='a145db3dcd564b9592dacf10649e4ed5', client_secret='389614e1ec874f17b8c99511c7baa2f6')) genius = lyricsgenius.Genius('biZZReO7F98mji5oz3cE0FiIG73Hh07qoXSIzYSGNN3GBsnY-eUrPAVSdJk_0_de') token = 'token bot' bot = telepot.Bot(token)
34.026042
106
0.590693
533d2e7d7e3bbba4894560b223b684f968b2d464
5,512
py
Python
tests/test_atomdict.py
Tillsten/atom
19b6291f7d3c9b3828dcd73e900b8dcbc2ddf92d
[ "BSD-3-Clause-Clear" ]
null
null
null
tests/test_atomdict.py
Tillsten/atom
19b6291f7d3c9b3828dcd73e900b8dcbc2ddf92d
[ "BSD-3-Clause-Clear" ]
null
null
null
tests/test_atomdict.py
Tillsten/atom
19b6291f7d3c9b3828dcd73e900b8dcbc2ddf92d
[ "BSD-3-Clause-Clear" ]
null
null
null
#------------------------------------------------------------------------------ # Copyright (c) 2018-2019, Nucleic Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. #------------------------------------------------------------------------------ """Test the typed dictionary. """ import sys import pytest from atom.api import Atom, Dict, Int, atomdict MEMBERS = ['untyped', 'keytyped', 'valuetyped', 'fullytyped', 'untyped_default', 'keytyped_default', 'valuetyped_default', 'fullytyped_default'] def test_setitem(atom_dict): """Test setting items. """ atom_dict.untyped[''] = 1 assert atom_dict.untyped[''] == 1 atom_dict.keytyped[1] = '' assert atom_dict.keytyped[1] == '' with pytest.raises(TypeError): atom_dict.keytyped[''] = 1 atom_dict.valuetyped[1] = 1 assert atom_dict.valuetyped[1] == 1 with pytest.raises(TypeError): atom_dict.valuetyped[''] = '' atom_dict.fullytyped[1] = 1 assert atom_dict.fullytyped[1] == 1 with pytest.raises(TypeError): atom_dict.fullytyped[''] = 1 with pytest.raises(TypeError): atom_dict.fullytyped[1] = '' def test_setdefault(atom_dict): """Test using setdefault. """ assert atom_dict.untyped.setdefault('', 1) == 1 assert atom_dict.untyped.setdefault('', 2) == 1 assert atom_dict.untyped[''] == 1 assert atom_dict.keytyped.setdefault(1, '') == '' assert atom_dict.keytyped[1] == '' with pytest.raises(TypeError): atom_dict.keytyped.setdefault('', 1) assert atom_dict.valuetyped.setdefault(1, 1) == 1 assert atom_dict.valuetyped.setdefault(1, '') == 1 assert atom_dict.valuetyped[1] == 1 with pytest.raises(TypeError): atom_dict.valuetyped.setdefault(2, '') assert atom_dict.fullytyped.setdefault(1, 1) == 1 assert atom_dict.fullytyped.setdefault(1, '') == 1 assert atom_dict.fullytyped[1] == 1 with pytest.raises(TypeError): atom_dict.fullytyped.setdefault('', 1) with pytest.raises(TypeError): atom_dict.fullytyped.setdefault(2, '') def test_update(atom_dict): """Test update a dict. """ atom_dict.untyped.update({'': 1}) assert atom_dict.untyped[''] == 1 atom_dict.untyped.update([('1', 1)]) assert atom_dict.untyped['1'] == 1 atom_dict.keytyped.update({1: 1}) assert atom_dict.keytyped[1] == 1 atom_dict.keytyped.update([(2, 1)]) assert atom_dict.keytyped[1] == 1 with pytest.raises(TypeError): atom_dict.keytyped.update({'': 1}) atom_dict.valuetyped.update({1: 1}) assert atom_dict.valuetyped[1] == 1 atom_dict.valuetyped.update([(2, 1)]) assert atom_dict.valuetyped[1] == 1 with pytest.raises(TypeError): atom_dict.valuetyped.update({'': ''}) atom_dict.fullytyped.update({1: 1}) assert atom_dict.fullytyped[1] == 1 atom_dict.fullytyped.update([(2, 1)]) assert atom_dict.fullytyped[1] == 1 with pytest.raises(TypeError): atom_dict.fullytyped.update({'': 1}) with pytest.raises(TypeError): atom_dict.fullytyped.update({'': ''})
28.559585
79
0.622097
533df513d0a6e1b0c3e33a0160d65ef452f32685
433
py
Python
dippy/core/timestamp.py
eggveloper/dippy.core
8ad613a50bcbf52132de1ece889e22fa4aba3a44
[ "MIT" ]
4
2021-04-23T10:26:28.000Z
2021-08-29T15:34:46.000Z
dippy/core/timestamp.py
eggveloper/dippy.core
8ad613a50bcbf52132de1ece889e22fa4aba3a44
[ "MIT" ]
23
2021-05-27T13:48:32.000Z
2021-12-15T15:41:28.000Z
dippy/core/timestamp.py
eggveloper/dippy.core
8ad613a50bcbf52132de1ece889e22fa4aba3a44
[ "MIT" ]
7
2021-05-22T17:16:57.000Z
2021-12-15T15:19:12.000Z
from datetime import datetime
24.055556
74
0.625866
533dfb21c5c2375616c84f6be95236ba70b32b20
701
py
Python
bible/admin.py
tushortz/biblelover
8ef4980d7f68e4037874373fb0ecde12d2d63d76
[ "MIT" ]
null
null
null
bible/admin.py
tushortz/biblelover
8ef4980d7f68e4037874373fb0ecde12d2d63d76
[ "MIT" ]
null
null
null
bible/admin.py
tushortz/biblelover
8ef4980d7f68e4037874373fb0ecde12d2d63d76
[ "MIT" ]
null
null
null
from django.contrib import admin from bible.models import Bible, VerseOfTheDay
26.961538
70
0.694722
533f6cd5ce74f507059e39e73891411c50d53556
15,158
py
Python
typy/nodes.py
Procrat/typy
668cedb7f929256a09f565af9ee43c02889bec3f
[ "MIT" ]
3
2016-03-08T09:55:20.000Z
2016-09-09T12:54:12.000Z
typy/nodes.py
Procrat/typy
668cedb7f929256a09f565af9ee43c02889bec3f
[ "MIT" ]
null
null
null
typy/nodes.py
Procrat/typy
668cedb7f929256a09f565af9ee43c02889bec3f
[ "MIT" ]
null
null
null
""" Our own implementation of an abstract syntax tree (AST). The convert function recursively converts a Python AST (from the module `ast`) to our own AST (of the class `Node`). """ import ast from logging import debug from typy.builtin import data_types from typy.exceptions import NotYetSupported, NoSuchAttribute, NotIterable from typy import types def _assign(target, value, type_map): value_type = value.check() if isinstance(target, Name): target_type = target.check() type_map.add_variable(target_type.id, value_type) elif isinstance(target, Attribute): target_type, attr = target.check() target_type.set_attribute(attr, value_type) else: raise NotYetSupported('assignment to', target) def convert(type_map, node): class_name = node.__class__.__name__ try: # Try to convert to a node class_ = globals()[class_name] return class_(type_map, node) except KeyError: try: # Try to convert to a builtin type class_ = getattr(data_types, class_name) return class_() except AttributeError: raise NotYetSupported('node', node)
29.547758
79
0.605159
533f744d195d3508a544cf4533f2224861641646
2,310
py
Python
anonlink-entity-service/backend/entityservice/integrationtests/objectstoretests/test_objectstore.py
Sam-Gresh/linkage-agent-tools
f405c7efe3fa82d99bc047f130c0fac6f3f5bf82
[ "Apache-2.0" ]
1
2020-05-19T07:29:31.000Z
2020-05-19T07:29:31.000Z
backend/entityservice/integrationtests/objectstoretests/test_objectstore.py
hardbyte/anonlink-entity-service
3c1815473bc8169ca571532c18e0913a45c704de
[ "Apache-2.0" ]
null
null
null
backend/entityservice/integrationtests/objectstoretests/test_objectstore.py
hardbyte/anonlink-entity-service
3c1815473bc8169ca571532c18e0913a45c704de
[ "Apache-2.0" ]
null
null
null
""" Testing: - uploading over existing files - using deleted credentials - using expired credentials """ import io import minio from minio import Minio import pytest from minio.credentials import AssumeRoleProvider, Credentials from entityservice.object_store import connect_to_object_store, connect_to_upload_object_store from entityservice.settings import Config restricted_upload_policy = """{ "Version": "2012-10-17", "Statement": [ { "Action": [ "s3:PutObject" ], "Effect": "Allow", "Resource": [ "arn:aws:s3:::uploads/2020/*" ], "Sid": "Upload-access-to-specific-bucket-only" } ] } """
32.535211
117
0.685281
533f98ae9a010e07bbde7ba6ae8dc19383efacaf
1,731
py
Python
soil/build/lib/soil/db/sqlalchemy/api.py
JackDan9/soil
ae612a4634634aace834491fbdefbc69e6167674
[ "MIT" ]
1
2020-08-06T11:58:35.000Z
2020-08-06T11:58:35.000Z
soil/build/lib/soil/db/sqlalchemy/api.py
JackDan9/soil
ae612a4634634aace834491fbdefbc69e6167674
[ "MIT" ]
4
2019-12-13T11:27:28.000Z
2022-02-27T11:58:38.000Z
soil/soil/db/sqlalchemy/api.py
JackDan9/soil
ae612a4634634aace834491fbdefbc69e6167674
[ "MIT" ]
null
null
null
# Copyright 2020 Soil, Inc. # Copyright (c) 2011 X.commerce, a business unit of eBay Inc. # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of SQLAlchemy backend.""" import collections import copy import datetime import functools import inspect import sys import threading from oslo_db.sqlalchemy import session as db_session from oslo_log import log as logging import soil.conf from soil.i18n import _ CONF = soil.conf.CONF LOG = logging.getLogger(__name__) _LOCK = threading.Lock() _FACADE = None
25.086957
78
0.716927
533fd29f460114d34ef9d86ca150f4d2360ad787
26,237
py
Python
tests/test_models.py
kykrueger/redash
5fd78fdb2324a7c194e8a99c13deb5a57268866c
[ "BSD-2-Clause" ]
1
2019-11-19T06:10:22.000Z
2019-11-19T06:10:22.000Z
tests/test_models.py
kykrueger/redash
5fd78fdb2324a7c194e8a99c13deb5a57268866c
[ "BSD-2-Clause" ]
3
2022-02-14T01:15:27.000Z
2022-02-27T11:21:50.000Z
tests/test_models.py
kykrueger/redash
5fd78fdb2324a7c194e8a99c13deb5a57268866c
[ "BSD-2-Clause" ]
1
2019-12-06T08:30:35.000Z
2019-12-06T08:30:35.000Z
import calendar import datetime from unittest import TestCase import pytz from dateutil.parser import parse as date_parse from tests import BaseTestCase from redash import models, redis_connection from redash.models import db, types from redash.utils import gen_query_hash, utcnow def _set_up_dashboard_test(d): d.g1 = d.factory.create_group(name='First', permissions=['create', 'view']) d.g2 = d.factory.create_group(name='Second', permissions=['create', 'view']) d.ds1 = d.factory.create_data_source() d.ds2 = d.factory.create_data_source() db.session.flush() d.u1 = d.factory.create_user(group_ids=[d.g1.id]) d.u2 = d.factory.create_user(group_ids=[d.g2.id]) db.session.add_all([ models.DataSourceGroup(group=d.g1, data_source=d.ds1), models.DataSourceGroup(group=d.g2, data_source=d.ds2) ]) d.q1 = d.factory.create_query(data_source=d.ds1) d.q2 = d.factory.create_query(data_source=d.ds2) d.v1 = d.factory.create_visualization(query_rel=d.q1) d.v2 = d.factory.create_visualization(query_rel=d.q2) d.w1 = d.factory.create_widget(visualization=d.v1) d.w2 = d.factory.create_widget(visualization=d.v2) d.w3 = d.factory.create_widget(visualization=d.v2, dashboard=d.w2.dashboard) d.w4 = d.factory.create_widget(visualization=d.v2) d.w5 = d.factory.create_widget(visualization=d.v1, dashboard=d.w4.dashboard) d.w1.dashboard.is_draft = False d.w2.dashboard.is_draft = False d.w4.dashboard.is_draft = False
46.355124
149
0.675192
534038a8e2bcedd293d3b518dec4b55832e33688
407
py
Python
.history/List of Capstone Projects/FibonacciSequence_20200516134123.py
EvanthiosPapadopoulos/Python3
ab773fd458e365c1510f98ecac65965234c881e8
[ "MIT" ]
1
2020-05-18T17:50:00.000Z
2020-05-18T17:50:00.000Z
.history/List of Capstone Projects/FibonacciSequence_20200516134123.py
EvanthiosPapadopoulos/Python3
ab773fd458e365c1510f98ecac65965234c881e8
[ "MIT" ]
null
null
null
.history/List of Capstone Projects/FibonacciSequence_20200516134123.py
EvanthiosPapadopoulos/Python3
ab773fd458e365c1510f98ecac65965234c881e8
[ "MIT" ]
null
null
null
''' Fibonacci Sequence ''' import HeaderOfFiles while True: try: f = int(input("Enter a number for Fibonacci: ")) break except: print("Give me a number please!") fibonacciSeq(f)
15.074074
56
0.547912
53424970ea429b3e768870aa46951fcf76807afa
463
py
Python
composer/algorithms/mixup/__init__.py
jacobfulano/composer
4ad81df2d2ca6e5f0b4922bb2db750cd76ba34e8
[ "Apache-2.0" ]
2
2022-03-17T04:48:04.000Z
2022-03-20T09:06:19.000Z
composer/algorithms/mixup/__init__.py
jacobfulano/composer
4ad81df2d2ca6e5f0b4922bb2db750cd76ba34e8
[ "Apache-2.0" ]
null
null
null
composer/algorithms/mixup/__init__.py
jacobfulano/composer
4ad81df2d2ca6e5f0b4922bb2db750cd76ba34e8
[ "Apache-2.0" ]
null
null
null
# Copyright 2021 MosaicML. All Rights Reserved. from composer.algorithms.mixup.mixup import MixUp as MixUp from composer.algorithms.mixup.mixup import MixUpHparams as MixUpHparams from composer.algorithms.mixup.mixup import mixup_batch as mixup_batch _name = 'MixUp' _class_name = 'MixUp' _functional = 'mixup_batch' _tldr = 'Blends pairs of examples and labels' _attribution = '(Zhang et al, 2017)' _link = 'https://arxiv.org/abs/1710.09412' _method_card = ''
33.071429
72
0.786177
5342baca137d0ce393a0884db4bee3c92fc045d0
1,503
py
Python
tests/simple_gan_test.py
alanpeixinho/NiftyNet
9a17022a71985974f9e5ca992c765d55860fdd7d
[ "Apache-2.0" ]
null
null
null
tests/simple_gan_test.py
alanpeixinho/NiftyNet
9a17022a71985974f9e5ca992c765d55860fdd7d
[ "Apache-2.0" ]
null
null
null
tests/simple_gan_test.py
alanpeixinho/NiftyNet
9a17022a71985974f9e5ca992c765d55860fdd7d
[ "Apache-2.0" ]
null
null
null
from __future__ import absolute_import, print_function import unittest import os import tensorflow as tf from tensorflow.keras import regularizers from niftynet.network.simple_gan import SimpleGAN from tests.niftynet_testcase import NiftyNetTestCase if __name__ == "__main__": tf.test.main()
30.673469
65
0.642715
53432a332241c8c7299ada338e326f5385523550
2,537
py
Python
tests/test.py
N4S4/thingspeak_wrapper
f5c26e52c09124b85cc6056782d766d145e65a31
[ "MIT" ]
null
null
null
tests/test.py
N4S4/thingspeak_wrapper
f5c26e52c09124b85cc6056782d766d145e65a31
[ "MIT" ]
null
null
null
tests/test.py
N4S4/thingspeak_wrapper
f5c26e52c09124b85cc6056782d766d145e65a31
[ "MIT" ]
null
null
null
import time import thingspeak_wrapper as tsw # Initiate the class ThingWrapper with (CHANNEL_ID, WRITE_API__KEY, READ_API_KEY) # if is a public channel just pass the CHANNEL_ID argument, api_key defaults are None my_channel = tsw.wrapper.ThingWrapper(501309, '6TQDNWJQ44FA0GAQ', '10EVD2N6YIHI5O7Z') # all set of functions are: # my_channel.sender() # my_channel.multiple_sender() # my_channel.get_json_feeds() # my_channel.get_json_feeds_from() # my_channel.get_xml_feeds() # my_channel.get_xml_feeds_from() # my_channel.get_csv_feeds() # my_channel.get_csv_feeds_from() # --------------------------- # Now you can use all the possible functions # Send a value to a single field my_channel.sender(1, 4) # this delay is due to limitation of thingspeak free account which allow you to post data every 15 sec minimum time.sleep(15) # --------------------------- # Send data to multiple field # It take 2 input as lists ([..], [..]) # Create lists of fields and values fields = [1, 2, 3] values = [22.0, 1029, 700] # pass them to the function my_channel.multiple_sender(fields, values) # --------------------------- # Get data functions returns data as json, xml, csv # optionally csv can be returned as Pandas Data frame # pass arguments to the function (field, data_quantity) # default values are ( fields='feeds', results_quantity=None) # you will get all fields and all values (max 8000) json_field1 = my_channel.get_json_feeds(1, 300) print(json_field1) # get xml data pass same values as previous function xml_field1 = my_channel.get_xml_feeds(1, 300) print(xml_field1) # get csv data # this function requires to specify (field, pandas_format=True, result_quantity=None) # defaults are (fields='feeds', pandas_format=True, result_quantity=None) csv_field1 = my_channel.get_csv_feeds(1, pandas_format=True, results_quantity=300) print(csv_field1) # data without pandas_format csv_no_pandas = my_channel.get_csv_feeds(1, pandas_format=False, results_quantity=300) print(csv_no_pandas) # there is the possibility to request data from and to specific dates # set date and time as strings YYYY-MM-DD HH:NN:SS start_date, start_time = '2018-05-21', '12:00:00' stop_date, stop_time = '2018-05-21', '23:59:59' # pass values to the function # defaults are (start_date, start_time, stop_date=None, stop_time=None, fields='feeds') values_from_date = my_channel.get_json_feeds_from(stop_date, start_time, stop_date, stop_time, 1) print(values_from_date)
35.236111
110
0.727237
5343c13e8e474004e5dc969475dce6a87967180c
6,798
py
Python
neptunecontrib/monitoring/skopt.py
neptune-ai/neptune-contrib
fe5c6853128020aaaa59b440cc5203b940dcd39a
[ "MIT" ]
22
2020-02-23T21:25:34.000Z
2021-06-11T16:34:27.000Z
neptunecontrib/monitoring/skopt.py
neptune-ai/neptune-contrib
fe5c6853128020aaaa59b440cc5203b940dcd39a
[ "MIT" ]
29
2020-02-11T11:10:22.000Z
2021-10-03T09:01:28.000Z
neptunecontrib/monitoring/skopt.py
neptune-ai/neptune-contrib
fe5c6853128020aaaa59b440cc5203b940dcd39a
[ "MIT" ]
7
2020-05-10T06:59:53.000Z
2021-06-11T16:34:32.000Z
# # Copyright (c) 2019, Neptune Labs Sp. z o.o. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import warnings import matplotlib.pyplot as plt import neptune import numpy as np import skopt.plots as sk_plots from skopt.utils import dump from neptunecontrib.monitoring.utils import axes2fig, expect_not_a_run def log_results(results, experiment=None, log_plots=True, log_pickle=True): """Logs runs results and parameters to neptune. Logs all hyperparameter optimization results to Neptune. Those include best score ('best_score' metric), best parameters ('best_parameters' property), convergence plot ('diagnostics' log), evaluations plot ('diagnostics' log), and objective plot ('diagnostics' log). Args: results('scipy.optimize.OptimizeResult'): Results object that is typically an output | of the function like `skopt.forest_minimize(...)` experiment(`neptune.experiments.Experiment`): Neptune experiment. Default is None. log_plots: ('bool'): If True skopt plots will be logged to Neptune. log_pickle: ('bool'): if True pickled skopt results object will be logged to Neptune. Examples: Run skopt training:: ... results = skopt.forest_minimize(objective, space, base_estimator='ET', n_calls=100, n_random_starts=10) Initialize Neptune:: import neptune neptune.init(api_token='ANONYMOUS', project_qualified_name='shared/showroom') neptune.create_experiment(name='optuna sweep') Send best parameters to Neptune:: import neptunecontrib.monitoring.skopt as sk_utils sk_utils.log_results(results) You can explore an example experiment in Neptune: https://ui.neptune.ai/o/shared/org/showroom/e/SHOW-1065/logs """ _exp = experiment if experiment else neptune expect_not_a_run(_exp) _log_best_score(results, _exp) _log_best_parameters(results, _exp) if log_plots: _log_plot_convergence(results, _exp) _log_plot_evaluations(results, _exp) _log_plot_regret(results, _exp) _log_plot_objective(results, _exp) if log_pickle: _log_results_object(results, _exp)
33.653465
109
0.699912
534549f2acefb6ab739ca7a1aa4369dcb66352ae
5,771
py
Python
snoopy/server/transforms/Maltego.py
aiddenkeli/Snoopy
dd76180145981b3574b419edce39dbb060bd8c8c
[ "MIT" ]
432
2015-01-07T09:56:32.000Z
2022-03-28T12:15:42.000Z
snoopy/server/transforms/Maltego.py
aiddenkeli/Snoopy
dd76180145981b3574b419edce39dbb060bd8c8c
[ "MIT" ]
9
2015-01-31T10:07:28.000Z
2021-09-10T08:13:47.000Z
snoopy/server/transforms/Maltego.py
aiddenkeli/Snoopy
dd76180145981b3574b419edce39dbb060bd8c8c
[ "MIT" ]
135
2015-01-07T15:06:35.000Z
2022-01-24T02:19:55.000Z
#!/usr/bin/python # # This might be horrible code... # ...but it works # Feel free to re-write in a better way # And if you want to - send it to us, we'll update ;) # maltego@paterva.com (2010/10/18) # import sys from xml.dom import minidom
28.428571
234
0.665916
53472d85e82afcf0ecb7050477e184968c938897
1,017
py
Python
metadeploy/api/migrations/0050_add_clickthrough_agreement.py
sfdc-qbranch/MetaDeploy
d22547b3814dbec6aefa4d86b9f81c6f175c1b67
[ "BSD-3-Clause" ]
33
2019-03-20T15:34:39.000Z
2022-03-30T15:59:40.000Z
metadeploy/api/migrations/0050_add_clickthrough_agreement.py
sfdc-qbranch/MetaDeploy
d22547b3814dbec6aefa4d86b9f81c6f175c1b67
[ "BSD-3-Clause" ]
2,718
2019-02-27T19:46:07.000Z
2022-03-11T23:18:09.000Z
metadeploy/api/migrations/0050_add_clickthrough_agreement.py
sfdc-qbranch/MetaDeploy
d22547b3814dbec6aefa4d86b9f81c6f175c1b67
[ "BSD-3-Clause" ]
28
2019-03-28T04:57:16.000Z
2022-02-04T16:49:25.000Z
# Generated by Django 2.1.5 on 2019-02-12 21:18 import django.db.models.deletion from django.db import migrations, models
27.486486
63
0.474926
5347385bc4a5e5ee6c5f4719e3b7b90b80842cc8
813
py
Python
invenio_iiif/config.py
dfdan/invenio-iiif
2ea2747fd29ab03b1d38e0ca6d2a9c1506aa8cbc
[ "MIT" ]
3
2019-07-25T16:25:22.000Z
2021-02-04T16:51:55.000Z
invenio_iiif/config.py
dfdan/invenio-iiif
2ea2747fd29ab03b1d38e0ca6d2a9c1506aa8cbc
[ "MIT" ]
26
2018-04-10T14:46:34.000Z
2021-06-16T08:51:09.000Z
invenio_iiif/config.py
dfdan/invenio-iiif
2ea2747fd29ab03b1d38e0ca6d2a9c1506aa8cbc
[ "MIT" ]
22
2018-04-04T09:41:38.000Z
2021-11-25T09:33:40.000Z
# -*- coding: utf-8 -*- # # This file is part of Invenio. # Copyright (C) 2018 CERN. # # Invenio is free software; you can redistribute it and/or modify it # under the terms of the MIT License; see LICENSE file for more details. """IIIF API for Invenio.""" IIIF_API_PREFIX = '/iiif/' """URL prefix to IIIF API.""" IIIF_UI_URL = '/api{}'.format(IIIF_API_PREFIX) """URL to IIIF API endpoint (allow hostname).""" IIIF_PREVIEWER_PARAMS = { 'size': '750,' } """Parameters for IIIF image previewer extension.""" IIIF_PREVIEW_TEMPLATE = 'invenio_iiif/preview.html' """Template for IIIF image preview.""" IIIF_API_DECORATOR_HANDLER = 'invenio_iiif.handlers:protect_api' """Image opener handler decorator.""" IIIF_IMAGE_OPENER_HANDLER = 'invenio_iiif.handlers:image_opener' """Image opener handler function."""
27.1
72
0.724477
5347c20f1c46f18f1b8fe3023d484d25d1b6adf3
14,306
py
Python
pub_ingest.py
mconlon17/vivo-pub-ingest
7c03ecdd6dc5418121a6b92de1572d1cc63f5cb5
[ "BSD-3-Clause" ]
null
null
null
pub_ingest.py
mconlon17/vivo-pub-ingest
7c03ecdd6dc5418121a6b92de1572d1cc63f5cb5
[ "BSD-3-Clause" ]
1
2015-04-04T01:38:51.000Z
2015-04-04T01:38:51.000Z
pubs/pub_ingest.py
mconlon17/vivo-1.5-improvement
44d8335eb7bbe518374a53c0e1f9f39014023ee7
[ "BSD-3-Clause" ]
null
null
null
#!/user/bin/env/python """ pub_ingest.py -- Read a bibtex file and make VIVO RDF The following objects will be made as needed: -- publisher -- journal -- information resource -- timestamp for the information resource -- people -- authorships -- concepts The resulting ADD and SUB RDF file can then be read into VIVO To Do -- Complete refactor as an update process. Create resuable parts so that a publication can be created from bibtex, doi or pmid -- Improve DateTimeValue accuracy. Currently all publications are entered as yearMonth precision. Sometimes we have more information, sometimes we have less. We should use the information as presented by the publisher, not overstate (yearMonth when there is only year) and not understate (yearMonth when we know the day). -- Reuse date objects -- only create dates when the appropriate date entity is not already in VIVO -- Update for VIVO-ISF -- Update or vivofoundation and vivopubs """ __author__ = "Michael Conlon" __copyright__ = "Copyright 2014, University of Florida" __license__ = "BSD 3-Clause license" __version__ = "1.3" import sys from datetime import datetime, date from pybtex.database.input import bibtex import tempita import vivotools MAX_AUTHORS = 50 publisher_report = {} journal_report = {} title_report = {} author_report = {} disambiguation_report = {} dictionaries = [] journal_dictionary = {} publisher_dictionary = {} title_dictionary = {} def open_files(bibtex_file_name): """ Give the name of the bibitex file to be used as input, generate the file names for rdf, rpt and lst. Return the open file handles """ base = bibtex_file_name[:bibtex_file_name.find('.')] rpt_file = open(base+'.rpt', 'w') lst_file = open(base+'.lst', 'w') rdf_file = open(base+'.rdf', 'w') return [rdf_file, rpt_file, lst_file] def update_disambiguation_report(authors, publication_uri): """ Given the authors structure and thte publication_uri, add to the report if any of the authors need to be disambiguated """ for value in authors.values(): if value[8] == "Disambig": if publication_uri in disambiguation_report: result = disambiguation_report[publication_uri] result[len(result.keys())+1] = value disambiguation_report[publication_uri] = result else: disambiguation_report[publication_uri] = {1:value} return # start here. Create a parser for bibtex and use it to read the file of # bibtex entries. open the output files print datetime.now(), "Read the BibTex" bibtex_file_name = sys.argv[1] [rdf_file, rpt_file, lst_file] = open_files(bibtex_file_name) parser = bibtex.Parser() bib_data = parser.parse_file(bibtex_file_name) bib_sorted = sorted(bib_data.entries.items(), key=lambda x: x[1].fields['title']) print >>rdf_file, "<!--", len(bib_data.entries.keys()),\ "publications to be processed -->" print datetime.now(), len(bib_data.entries.keys()),\ "publications to be processed." # make dictionaries for people, papers, publishers, journals, concepts print datetime.now(), "Creating the dictionaries" print datetime.now(), "Publishers" publisher_dictionary = vivotools.make_publisher_dictionary() print datetime.now(), "Journals" journal_dictionary = vivotools.make_journal_dictionary() print datetime.now(), "People" dictionaries = make_people_dictionaries() print datetime.now(), "Titles" title_dictionary = vivotools.make_title_dictionary() print datetime.now(), "Concepts" vivotools.make_concept_dictionary() # process the papers print >>rdf_file, vivotools.rdf_header() for key, value in bib_sorted: try: title = value.fields['title'].title() + " " except: title_report["No title"] = ["No Title", None, 1] print >>rdf_file, "<!-- No title found. No RDF necessary -->" continue title = abbrev_to_words(title) title = title[0:-1] if title in title_report: print >>rdf_file, "<!-- Title", title,\ "handled previously. No RDF necessary -->" title_report[title][2] = title_report[title][2] + 1 continue else: print >>rdf_file, "<!-- Begin RDF for " + title + " -->" print datetime.now(), "<!-- Begin RDF for " + title + " -->" document = {} document['title'] = title title_report[title] = ["Start", None, 1] [found, uri] = vivotools.find_title(title, title_dictionary) if not found: title_report[title][0] = "Create" # Create # Authors [author_rdf, authors] = make_author_rdf(value) document['authors'] = make_document_authors(authors) if count_uf_authors(authors) == 0: print >>rdf_file, "<!-- End RDF. No UF authors for " +\ title + " No RDF necessary -->" title_report[title][0] = "No UF Auth" continue update_author_report(authors) # Datetime [datetime_rdf, datetime_uri] = make_datetime_rdf(value, title) # Publisher [journal_create, journal_name, journal_uri] =\ make_journal_uri(value) [publisher_create, publisher, publisher_uri, publisher_rdf] =\ make_publisher_rdf(value) # Journal [journal_rdf, journal_uri] = make_journal_rdf(value,\ journal_create, journal_name, journal_uri) # Publisher/Journal bi-directional links publisher_journal_rdf = "" if journal_uri != "" and publisher_uri != "" and\ (journal_create or publisher_create): publisher_journal_rdf = \ make_publisher_journal_rdf(publisher_uri, journal_uri) # Authorships publication_uri = vivotools.get_vivo_uri() title_report[title][1] = publication_uri [authorship_rdf, authorship_uris] = make_authorship_rdf(authors,\ publication_uri) # AuthorInAuthorships author_in_authorship_rdf = make_author_in_authorship_rdf(authors,\ authorship_uris) # Journal/Publication bi-directional links if journal_uri != "" and publication_uri != "": journal_publication_rdf = \ make_journal_publication_rdf(journal_uri, publication_uri) # PubMed values pubmed_rdf = "" if 'doi' in value.fields: [pubmed_rdf, sub] = vivotools.update_pubmed(publication_uri,\ value.fields['doi']) if sub != "": raise Exception("Non empty subtraction RDF"+\ "for Update PubMed") # Publication publication_rdf = make_publication_rdf(value,\ title,publication_uri,datetime_uri,authorship_uris) print >>rdf_file, datetime_rdf, publisher_rdf, journal_rdf,\ publisher_journal_rdf, author_rdf, authorship_rdf,\ author_in_authorship_rdf, journal_publication_rdf,\ publication_rdf, pubmed_rdf print >>rdf_file, "<!-- End RDF for " + title + " -->" print >>lst_file, vivotools.string_from_document(document),\ 'VIVO uri', publication_uri, '\n' update_disambiguation_report(authors, publication_uri) else: title_report[title][0] = "Found" title_report[title][1] = uri print >>rdf_file, "<!-- Found: " + title + " No RDF necessary -->" print >>rdf_file, vivotools.rdf_footer() # # Reports # print >>rpt_file,""" Publisher Report Lists the publishers that appear in the bibtex file in alphabetical order. For each publisher, show the improved name, the number of papers in journals of this publisher, the action to be taken for the publisher and the VIVO URI -- the URI is the new URI to be created if Action is Create, otherwise it is the URI of the found publisher in VIVO. Publisher Papers Action VIVO URI ---------------------------------------------------------------------------------""" publisher_count = 0 actions = {} for publisher in sorted(publisher_report.keys()): publisher_count = publisher_count + 1 [create,uri,count] = publisher_report[publisher] if create: result = "Create" else: result = "Found " actions[result] = actions.get(result,0) + 1 print >>rpt_file, "{0:40}".format(publisher[0:40]),"{0:>3}".format(count),result,uri print >>rpt_file,"" print >>rpt_file, "Publisher count by action" print >>rpt_file, "" for action in sorted(actions): print >>rpt_file, action,actions[action] print >>rpt_file, publisher_count,"publisher(s)" print >>rpt_file, """ Journal Report Lists the journals that appear in the bibtex file in alphabetical order. For each journal, show the improved name, the number of papers t be linked to the journal, the action to be taken for the journal and the VIVO URI -- the URI is the new URI to be created if Action is Create, otherwise it is the URI of the found journal in VIVO. Journal Papers Action VIVO URI ---------------------------------------------------------------------------------""" journal_count = 0 actions = {} for journal in sorted(journal_report.keys()): journal_count = journal_count + 1 [create,uri,count] = journal_report[journal] if create: result = "Create" else: result = "Found " actions[result] = actions.get(result,0) + 1 print >>rpt_file, "{0:40}".format(journal[0:40]),"{0:>3}".format(count),result,uri print >>rpt_file, "" print >>rpt_file, "Journal count by action" print >>rpt_file, "" for action in sorted(actions): print >>rpt_file, action,actions[action] print >>rpt_file, journal_count,"journal(s)" print >>rpt_file, """ Title Report Lists the titles that appear in the bibtex file in alphabetical order. For each title, show the action to be taken, the number of times the title appears in the bibtex, the improved title and the VIVO URI of the publication -- the URI is the new URI to be created if action is Create, otherwise it is the URI of the found publication in VIVO. Action # Title and VIVO URI ---------------------------------------------------------------------------------""" title_count = 0 actions = {} for title in sorted(title_report.keys()): title_count = title_count +1 [action,uri,count] = title_report[title] actions[action] = actions.get(action,0) + 1 print >>rpt_file, "{0:>10}".format(action),title,uri print >>rpt_file, "" print >>rpt_file, "Title count by action" print >>rpt_file, "" for action in sorted(actions): print >>rpt_file, action,actions[action] print >>rpt_file, title_count,"title(s)" print >>rpt_file, """ Author Report For each author found in the bibtex file, show the author's name followed by the number of papers for the author in the bibtex to be entered, followed by a pair of results for each time the author appears on a paper in the bibtex. The result pair contains an action and a URI. The action is "non UF" if a non-UF author stub will be be created, the URI is the URI of the new author stub. Action "Make UF" if a new UF author stub will be created with the URI of the new author stub. "Found UF" indicate the author was found at the URI. "Disambig" if multiple UF people were found with the given name. The URI is the URI of one of the found people. Follow-up is needed to determine if correct and reassign author if not correct. Author Action URI Action URI ----------------------------------------------------------------------------------------------""" author_count = 0 actions = {} for author in sorted(author_report.keys()): author_count = author_count + 1 results = "" papers = len(author_report[author]) action = author_report[author][1][8] # 1st report, 8th value is action actions[action] = actions.get(action,0) + 1 for key in author_report[author].keys(): value = author_report[author][key] results = results + value[8] + " " + "{0:45}".format(value[9]) print >>rpt_file, "{0:25}".format(author),"{0:>3}".format(papers),results print >>rpt_file, "" print >>rpt_file, "Author count by action" print >>rpt_file, "" for action in sorted(actions): print >>rpt_file, action,actions[action] print >>rpt_file, author_count,"authors(s)" print >>rpt_file, """ Disambiguation Report For each publication with one or more authors to disambiguate, list the paper, and then the authors in question with each of the possible URIs to be disambiguated, show the URI of the paper, and then for each author that needs to be disambiguated on the paper, show the last name, first name and middle initial and the all the URIs in VIVO for UF persons with the same names. """ for uri in disambiguation_report.keys(): print >>rpt_file,"The publication at",uri,"has one or more authors in question" for key,value in disambiguation_report[uri].items(): uris = value[9].split(";") print >>rpt_file," ",value[4],value[5],value[6],":" for u in uris: person = vivotools.get_person(u) if 'last_name' not in person: person['last_name'] = "No last name" if 'middle_name' not in person: person['middle_name'] = "No middle name" if 'first_name' not in person: person['first_name'] = "No first name" if 'home_department_name' not in person: person['home_department_name'] = "No home department" npubs = len(person['authorship_uris']) print >>rpt_file," ",u,person['last_name'], \ person['first_name'],person['middle_name'], \ person['home_department_name'],"Number of pubs = ",npubs print >>rpt_file print >>rpt_file # # Close the files, we're done # rpt_file.close() rdf_file.close() lst_file.close()
37.255208
97
0.636306
5348118dbe9a56351f72fa4c704f5f49e6815a7c
63,527
py
Python
port/platform/common/automation/u_utils.py
u-blox/ubxlib
4dc1b16e6f12354b601cb1c9d799c10f4e2afb54
[ "Apache-2.0" ]
91
2020-12-21T13:10:19.000Z
2022-03-24T23:27:13.000Z
port/platform/common/automation/u_utils.py
u-blox/ubxlib
4dc1b16e6f12354b601cb1c9d799c10f4e2afb54
[ "Apache-2.0" ]
42
2021-01-04T13:35:18.000Z
2022-03-25T08:57:45.000Z
port/platform/common/automation/u_utils.py
u-blox/ubxlib
4dc1b16e6f12354b601cb1c9d799c10f4e2afb54
[ "Apache-2.0" ]
25
2021-01-02T12:37:34.000Z
2022-03-31T01:53:37.000Z
#!/usr/bin/env python '''Generally useful bits and bobs.''' import queue # For PrintThread and exe_run from time import sleep, time, gmtime, strftime # For lock timeout, exe_run timeout and logging from multiprocessing import RLock from copy import copy import threading # For PrintThread import sys import os # For ChangeDir, has_admin import stat # To help deltree out from collections import deque # For storing a window of debug from telnetlib import Telnet # For talking to JLink server import socket import shutil # To delete a directory tree import signal # For CTRL_C_EVENT import subprocess import platform # Figure out current OS import re # Regular Expression import serial # Pyserial (make sure to do pip install pyserial) import psutil # For killing things (make sure to do pip install psutil) import requests # For HTTP comms with a KMTronic box (do pip install requests) import u_settings # Since this function is used by the global variables below it needs # to be placed here. def is_linux(): '''Returns True when system is Linux''' return platform.system() == 'Linux' # Since this function is used by the global variables below it needs # to be placed here. def pick_by_os(linux=None, other=None): ''' This is a convenience function for selecting a value based on platform. As an example the line below will print out "Linux" when running on a Linux platform and "Not Linux" when running on some other platform: print( u_utils.pick_by_os(linux="Linux", other="Not Linux") ) ''' if is_linux(): return linux return other # The port that this agent service runs on # Deliberately NOT a setting, we need to be sure # everyone uses the same value AGENT_SERVICE_PORT = 17003 # The maximum number of characters that an agent will # use from controller_name when constructing a directory # name for a ubxlib branch to be checked out into AGENT_WORKING_SUBDIR_CONTROLLER_NAME_MAX_LENGTH = 4 # How long to wait for an install lock in seconds INSTALL_LOCK_WAIT_SECONDS = u_settings.INSTALL_LOCK_WAIT_SECONDS #(60 * 60) # The URL for Unity, the unit test framework UNITY_URL = u_settings.UNITY_URL #"https://github.com/ThrowTheSwitch/Unity" # The sub-directory that Unity is usually put in # (off the working directory) UNITY_SUBDIR = u_settings.UNITY_SUBDIR #"Unity" # The path to DevCon, a Windows tool that allows # USB devices to be reset, amongst other things DEVCON_PATH = u_settings.DEVCON_PATH #"devcon.exe" # The path to jlink.exe (or just the name 'cos it's on the path) JLINK_PATH = u_settings.JLINK_PATH #"jlink.exe" # The port number for SWO trace capture out of JLink JLINK_SWO_PORT = u_settings.JLINK_SWO_PORT #19021 # The port number for GDB control of ST-LINK GDB server STLINK_GDB_PORT = u_settings.STLINK_GDB_PORT #61200 # The port number for SWO trace capture out of ST-LINK GDB server STLINK_SWO_PORT = u_settings.STLINK_SWO_PORT #61300 # The format string passed to strftime() # for logging prints TIME_FORMAT = u_settings.TIME_FORMAT #"%Y-%m-%d_%H:%M:%S" # The default guard time waiting for a platform lock in seconds PLATFORM_LOCK_GUARD_TIME_SECONDS = u_settings.PLATFORM_LOCK_GUARD_TIME_SECONDS #60 * 60 # The default guard time for downloading to a target in seconds DOWNLOAD_GUARD_TIME_SECONDS = u_settings.DOWNLOAD_GUARD_TIME_SECONDS #60 # The default guard time for running tests in seconds RUN_GUARD_TIME_SECONDS = u_settings.RUN_GUARD_TIME_SECONDS #60 * 60 # The default inactivity timer for running tests in seconds RUN_INACTIVITY_TIME_SECONDS = u_settings.RUN_INACTIVITY_TIME_SECONDS #60 * 5 # The name of the #define that forms the filter string # for which tests to run FILTER_MACRO_NAME = u_settings.FILTER_MACRO_NAME #"U_CFG_APP_FILTER" # The name of the environment variable that indicates we're running under automation ENV_UBXLIB_AUTO = "U_UBXLIB_AUTO" # The time for which to wait for something from the # queue in exe_run(). If this is too short, in a # multiprocessing world or on a slow machine, it is # possible to miss things as the task putting things # on the queue may be blocked from doing so until # we've decided the queue has been completely emptied # and moved on EXE_RUN_QUEUE_WAIT_SECONDS = u_settings.EXE_RUN_QUEUE_WAIT_SECONDS #1 # The number of seconds a USB cutter and the bit positions of # a KMTronic box are switched off for HW_RESET_DURATION_SECONDS = u_settings.HW_RESET_DURATION_SECONDS # e.g. 5 # Executable file extension. This will be "" for Linux # and ".exe" for Windows EXE_EXT = pick_by_os(linux="", other=".exe") def keep_going(flag, printer=None, prompt=None): '''Check a keep_going flag''' do_not_stop = True if flag is not None and not flag.is_set(): do_not_stop = False if printer and prompt: printer.string("{}aborting as requested.".format(prompt)) return do_not_stop # subprocess arguments behaves a little differently on Linux and Windows # depending if a shell is used or not, which can be read here: # https://stackoverflow.com/a/15109975 # This function will compensate for these deviations def subprocess_osify(cmd, shell=True): ''' expects an array of strings being [command, param, ...] ''' if is_linux() and shell: line = '' for item in cmd: # Put everything in a single string and quote args containing spaces if ' ' in item: line += '\"{}\" '.format(item) else: line += '{} '.format(item) cmd = line return cmd def split_command_line_args(cmd_line): ''' Will split a command line string into a list of arguments. Quoted arguments will be preserved as one argument ''' return [p for p in re.split("( |\\\".*?\\\"|'.*?')", cmd_line) if p.strip()] def get_actual_path(path): '''Given a drive number return real path if it is a subst''' actual_path = path if is_linux(): return actual_path if os.name == 'nt': # Get a list of substs text = subprocess.check_output("subst", stderr=subprocess.STDOUT, shell=True) # Jenkins hangs without this for line in text.splitlines(): # Lines should look like this: # Z:\: => C:\projects\ubxlib_priv # So, in this example, if we were given z:\blah # then the actual path should be C:\projects\ubxlib_priv\blah text = line.decode() bits = text.rsplit(": => ") if (len(bits) > 1) and (len(path) > 1) and \ (bits[0].lower()[0:2] == path[0:2].lower()): actual_path = bits[1] + path[2:] break return actual_path def get_instance_text(instance): '''Return the instance as a text string''' instance_text = "" for idx, item in enumerate(instance): if idx == 0: instance_text += str(item) else: instance_text += "." + str(item) return instance_text # Get a list of instances as a text string separated # by spaces. def get_instances_text(instances): '''Return the instances as a text string''' instances_text = "" for instance in instances: if instance: instances_text += " {}".format(get_instance_text(instance)) return instances_text def remove_readonly(func, path, exec_info): '''Help deltree out''' del exec_info os.chmod(path, stat.S_IWRITE) func(path) def deltree(directory, printer, prompt): '''Remove an entire directory tree''' tries = 3 success = False if os.path.isdir(directory): # Retry this as sometimes Windows complains # that the directory is not empty when it # it really should be, some sort of internal # Windows race condition while not success and (tries > 0): try: # Need the onerror bit on Winders, see # this Stack Overflow post: # https://stackoverflow.com/questions/1889597/deleting-directory-in-python shutil.rmtree(directory, onerror=remove_readonly) success = True except OSError as ex: if printer and prompt: printer.string("{}ERROR unable to delete \"{}\" {}: \"{}\"". format(prompt, directory, ex.errno, ex.strerror)) sleep(1) tries -= 1 else: success = True return success # Some list types aren't quite list types: for instance, # the lists returned by RPyC look like lists but they # aren't of type list and so "in", for instance, will fail. # This converts an instance list (i.e. a list-like object # containing items that are each another list-like object) # into a plain-old two-level list. def copy_two_level_list(instances_in): '''Convert instances_in into a true list''' instances_out = [] if instances_in: for item1 in instances_in: instances_out1 = [] for item2 in item1: instances_out1.append(item2) instances_out.append(copy(instances_out1)) return instances_out # Check if admin privileges are available, from: # https://stackoverflow.com/questions/2946746/python-checking-if-a-user-has-administrator-privileges def has_admin(): '''Check for administrator privileges''' admin = False if os.name == 'nt': try: # only Windows users with admin privileges can read the C:\windows\temp if os.listdir(os.sep.join([os.environ.get("SystemRoot", "C:\\windows"), "temp"])): admin = True except PermissionError: pass else: # Pylint will complain about the following line but # that's OK, it is only executed if we're NOT on Windows # and there the geteuid() method will exist if "SUDO_USER" in os.environ and os.geteuid() == 0: admin = True return admin # Reset a USB port with the given Device Description def usb_reset(device_description, printer, prompt): ''' Reset a device''' instance_id = None found = False success = False try: # Run devcon and parse the output to find the given device printer.string("{}running {} to look for \"{}\"...". \ format(prompt, DEVCON_PATH, device_description)) cmd = [DEVCON_PATH, "hwids", "=ports"] text = subprocess.check_output(subprocess_osify(cmd), stderr=subprocess.STDOUT, shell=True) # Jenkins hangs without this for line in text.splitlines(): # The format of a devcon entry is this: # # USB\VID_1366&PID_1015&MI_00\6&38E81674&0&0000 # Name: JLink CDC UART Port (COM45) # Hardware IDs: # USB\VID_1366&PID_1015&REV_0100&MI_00 # USB\VID_1366&PID_1015&MI_00 # Compatible IDs: # USB\Class_02&SubClass_02&Prot_00 # USB\Class_02&SubClass_02 # USB\Class_02 # # Grab what we hope is the instance ID line = line.decode() if line.startswith("USB"): instance_id = line else: # If the next line is the Name we want then we're done if instance_id and ("Name: " + device_description in line): found = True printer.string("{}\"{}\" found with instance ID \"{}\"". \ format(prompt, device_description, instance_id)) break instance_id = None if found: # Now run devcon to reset the device printer.string("{}running {} to reset device \"{}\"...". \ format(prompt, DEVCON_PATH, instance_id)) cmd = [DEVCON_PATH, "restart", "@" + instance_id] text = subprocess.check_output(subprocess_osify(cmd), stderr=subprocess.STDOUT, shell=False) # Has to be False or devcon won't work for line in text.splitlines(): printer.string("{}{}".format(prompt, line.decode())) success = True else: printer.string("{}device with description \"{}\" not found.". \ format(prompt, device_description)) except subprocess.CalledProcessError: printer.string("{} unable to find and reset device.".format(prompt)) return success # Open the required serial port. def open_serial(serial_name, speed, printer, prompt): '''Open serial port''' serial_handle = None text = "{}: trying to open \"{}\" as a serial port...". \ format(prompt, serial_name) try: return_value = serial.Serial(serial_name, speed, timeout=0.05) serial_handle = return_value printer.string("{} opened.".format(text)) except (ValueError, serial.SerialException) as ex: printer.string("{}{} while accessing port {}: {}.". format(prompt, type(ex).__name__, serial_handle.name, str(ex))) return serial_handle def open_telnet(port_number, printer, prompt): '''Open telnet port on localhost''' telnet_handle = None text = "{}trying to open \"{}\" as a telnet port on localhost...". \ format(prompt, port_number) try: telnet_handle = Telnet("localhost", int(port_number), timeout=5) if telnet_handle is not None: printer.string("{} opened.".format(text)) else: printer.string("{} failed.".format(text)) except (socket.error, socket.timeout, ValueError) as ex: printer.string("{}{} failed to open telnet {}: {}.". format(prompt, type(ex).__name__, port_number, str(ex))) return telnet_handle def install_lock_acquire(install_lock, printer, prompt, keep_going_flag=None): '''Attempt to acquire install lock''' timeout_seconds = INSTALL_LOCK_WAIT_SECONDS success = False if install_lock: printer.string("{}waiting for install lock...".format(prompt)) while not install_lock.acquire(False) and (timeout_seconds > 0) and \ keep_going(keep_going_flag, printer, prompt): sleep(1) timeout_seconds -= 1 if timeout_seconds > 0: printer.string("{}got install lock.".format(prompt)) success = True else: printer.string("{}failed to aquire install lock.".format(prompt)) else: printer.string("{}warning, there is no install lock.".format(prompt)) return success def install_lock_release(install_lock, printer, prompt): '''Release install lock''' if install_lock: install_lock.release() printer.string("{}install lock released.".format(prompt)) def fetch_repo(url, directory, branch, printer, prompt, submodule_init=True, force=False): '''Fetch a repo: directory can be relative or absolute, branch can be a hash''' got_code = False success = False dir_text = directory if dir_text == ".": dir_text = "this directory" if printer and prompt: printer.string("{}in directory {}, fetching" " {} to {}.".format(prompt, os.getcwd(), url, dir_text)) if not branch: branch = "master" if os.path.isdir(directory): # Update existing code with ChangeDir(directory): if printer and prompt: printer.string("{}updating code in {}...". format(prompt, dir_text)) target = branch if branch.startswith("#"): # Actually been given a branch, lose the # preceding # target = branch[1:len(branch)] # Try this once and, if it fails and force is set, # do a git reset --hard and try again tries = 1 if force: tries += 1 while tries > 0: try: call_list = [] call_list.append("git") call_list.append("fetch") call_list.append("origin") call_list.append(target) if printer and prompt: text = "" for item in call_list: if text: text += " " text += item printer.string("{}in {} calling {}...". format(prompt, os.getcwd(), text)) # Try to pull the code text = subprocess.check_output(subprocess_osify(call_list), stderr=subprocess.STDOUT, shell=True) # Jenkins hangs without this for line in text.splitlines(): if printer and prompt: printer.string("{}{}".format(prompt, line.decode())) got_code = True except subprocess.CalledProcessError as error: if printer and prompt: printer.string("{}git returned error {}: \"{}\"". format(prompt, error.returncode, error.output)) if got_code: tries = 0 else: if force: # git reset --hard printer.string("{}in directory {} calling git reset --hard...". \ format(prompt, os.getcwd())) try: text = subprocess.check_output(subprocess_osify(["git", "reset", "--hard"]), stderr=subprocess.STDOUT, shell=True) # Jenkins hangs without this for line in text.splitlines(): if printer and prompt: printer.string("{}{}".format(prompt, line.decode())) except subprocess.CalledProcessError as error: if printer and prompt: printer.string("{}git returned error {}: \"{}\"". format(prompt, error.returncode, error.output)) force = False tries -= 1 if not got_code: # If we still haven't got the code, delete the # directory for a true clean start deltree(directory, printer, prompt) if not os.path.isdir(directory): # Clone the repo if printer and prompt: printer.string("{}cloning from {} into {}...". format(prompt, url, dir_text)) try: text = subprocess.check_output(subprocess_osify(["git", "clone", "-q", url, directory]), stderr=subprocess.STDOUT, shell=True) # Jenkins hangs without this for line in text.splitlines(): if printer and prompt: printer.string("{}{}".format(prompt, line.decode())) got_code = True except subprocess.CalledProcessError as error: if printer and prompt: printer.string("{}git returned error {}: \"{}\"". format(prompt, error.returncode, error.output)) if got_code and os.path.isdir(directory): # Check out the correct branch and recurse submodules with ChangeDir(directory): target = "origin/" + branch if branch.startswith("#"): # Actually been given a branch, so lose the # "origin/" and the preceding # target = branch[1:len(branch)] if printer and prompt: printer.string("{}checking out {}...". format(prompt, target)) try: call_list = ["git", "-c", "advice.detachedHead=false", "checkout", "--no-progress"] if submodule_init: call_list.append("--recurse-submodules") printer.string("{}also recursing sub-modules (can take some time" \ " and gives no feedback).".format(prompt)) call_list.append(target) if printer and prompt: text = "" for item in call_list: if text: text += " " text += item printer.string("{}in {} calling {}...". format(prompt, os.getcwd(), text)) text = subprocess.check_output(subprocess_osify(call_list), stderr=subprocess.STDOUT, shell=True) # Jenkins hangs without this for line in text.splitlines(): if printer and prompt: printer.string("{}{}".format(prompt, line.decode())) success = True except subprocess.CalledProcessError as error: if printer and prompt: printer.string("{}git returned error {}: \"{}\"". format(prompt, error.returncode, error.output)) return success def exe_where(exe_name, help_text, printer, prompt): '''Find an executable using where.exe or which on linux''' success = False try: printer.string("{}looking for \"{}\"...". \ format(prompt, exe_name)) # See here: # https://stackoverflow.com/questions/14928860/passing-double-quote-shell-commands-in-python-to-subprocess-popen # ...for why the construction "".join() is necessary when # passing things which might have spaces in them. # It is the only thing that works. if is_linux(): cmd = ["which {}".format(exe_name.replace(":", "/"))] printer.string("{}detected linux, calling \"{}\"...".format(prompt, cmd)) else: cmd = ["where", "".join(exe_name)] printer.string("{}detected nonlinux, calling \"{}\"...".format(prompt, cmd)) text = subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True) # Jenkins hangs without this for line in text.splitlines(): printer.string("{}{} found in {}".format(prompt, exe_name, line.decode())) success = True except subprocess.CalledProcessError: if help_text: printer.string("{}ERROR {} not found: {}". \ format(prompt, exe_name, help_text)) else: printer.string("{}ERROR {} not found". \ format(prompt, exe_name)) return success def exe_version(exe_name, version_switch, printer, prompt): '''Print the version of a given executable''' success = False if not version_switch: version_switch = "--version" try: text = subprocess.check_output(subprocess_osify(["".join(exe_name), version_switch]), stderr=subprocess.STDOUT, shell=True) # Jenkins hangs without this for line in text.splitlines(): printer.string("{}{}".format(prompt, line.decode())) success = True except subprocess.CalledProcessError: printer.string("{}ERROR {} either not found or didn't like {}". \ format(prompt, exe_name, version_switch)) return success def exe_terminate(process_pid): '''Jonathan's killer''' process = psutil.Process(process_pid) for proc in process.children(recursive=True): proc.terminate() process.terminate() def read_from_process_and_queue(process, read_queue): '''Read from a process, non-blocking''' while process.poll() is None: string = process.stdout.readline().decode() if string and string != "": read_queue.put(string) else: sleep(0.1) def queue_get_no_exception(the_queue, block=True, timeout=None): '''A version of queue.get() that doesn't throw an Empty exception''' thing = None try: thing = the_queue.get(block=block, timeout=timeout) except queue.Empty: pass return thing def capture_env_var(line, env, printer, prompt): '''A bit of exe_run that needs to be called from two places''' # Find a KEY=VALUE bit in the line, # parse it out and put it in the dictionary # we were given pair = line.split('=', 1) if len(pair) == 2: env[pair[0]] = pair[1].rstrip() else: printer.string("{}WARNING: not an environment variable: \"{}\"". format(prompt, line)) # Note: if returned_env is given then "set" # will be executed after the exe and the environment # variables will be returned in it. The down-side # of this is that the return value of the exe is, # of course, lost. def exe_run(call_list, guard_time_seconds=None, printer=None, prompt=None, shell_cmd=False, set_env=None, returned_env=None, bash_cmd=False, keep_going_flag=None): '''Call an executable, printing out what it does''' success = False start_time = time() flibbling = False kill_time = None read_time = start_time if returned_env is not None: # The caller wants the environment after the # command has run, so, from this post: # https://stackoverflow.com/questions/1214496/how-to-get-environment-from-a-subprocess # append a tag that we can detect # to the command and then call set, # from which we can parse the environment call_list.append("&&") call_list.append("echo") call_list.append("flibble") call_list.append("&&") if is_linux(): call_list.append("env") bash_cmd = True else: call_list.append("set") # I've seen output from set get lost, # possibly because the process ending # is asynchronous with stdout, # so add a delay here as well call_list.append("&&") call_list.append("sleep") call_list.append("2") try: popen_keywords = { 'stdout': subprocess.PIPE, 'stderr': subprocess.STDOUT, 'shell': shell_cmd, 'env': set_env, 'executable': "bin/bash" if bash_cmd else None } # Call the thang # Note: used to have bufsize=1 here but it turns out # that is ignored 'cos the output is considered # binary. Seems to work in any case, I guess # Winders, at least, is in any case line-buffered. process = subprocess.Popen(subprocess_osify(call_list, shell=shell_cmd), **popen_keywords) if printer: printer.string("{}{}, pid {} started with guard time {} second(s)". \ format(prompt, call_list[0], process.pid, guard_time_seconds)) # This is over complex but, unfortunately, necessary. # At least one thing that we try to run, nrfjprog, can # crash silently: just hangs and sends no output. However # it also doesn't flush and close stdout and so read(1) # will hang, meaning we can't read its output as a means # to check that it has hung. # So, here we poll for the return value, which is normally # how things will end, and we start another thread which # reads from the process's stdout. If the thread sees # nothing for guard_time_seconds then we terminate the # process. read_queue = queue.Queue() read_thread = threading.Thread(target=read_from_process_and_queue, args=(process, read_queue)) read_thread.start() while process.poll() is None: if keep_going_flag is None or keep_going(keep_going_flag, printer, prompt): if guard_time_seconds and (kill_time is None) and \ ((time() - start_time > guard_time_seconds) or (time() - read_time > guard_time_seconds)): kill_time = time() if printer: printer.string("{}guard time of {} second(s)." \ " expired, stopping {}...". format(prompt, guard_time_seconds, call_list[0])) exe_terminate(process.pid) else: exe_terminate(process.pid) line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS) read_time = time() while line is not None: line = line.rstrip() if flibbling: capture_env_var(line, returned_env, printer, prompt) else: if returned_env is not None and "flibble" in line: flibbling = True else: printer.string("{}{}".format(prompt, line)) line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS) read_time = time() sleep(0.1) # Can't join() read_thread here as it might have # blocked on a read() (if nrfjprog has anything to # do with it). It will be tidied up when this process # exits. # There may still be stuff on the queue, read it out here line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS) while line is not None: line = line.rstrip() if flibbling: capture_env_var(line, returned_env, printer, prompt) else: if returned_env is not None and "flibble" in line: flibbling = True else: printer.string("{}{}".format(prompt, line)) line = queue_get_no_exception(read_queue, True, EXE_RUN_QUEUE_WAIT_SECONDS) # There may still be stuff in the buffer after # the application has finished running so flush that # out here line = process.stdout.readline().decode() while line: line = line.rstrip() if flibbling: capture_env_var(line, returned_env, printer, prompt) else: if returned_env is not None and "flibble" in line: flibbling = True else: printer.string("{}{}".format(prompt, line)) line = process.stdout.readline().decode() if (process.poll() == 0) and kill_time is None: success = True if printer: printer.string("{}{}, pid {} ended with return value {}.". \ format(prompt, call_list[0], process.pid, process.poll())) except ValueError as ex: if printer: printer.string("{}failed: {} while trying to execute {}.". \ format(prompt, type(ex).__name__, str(ex))) except KeyboardInterrupt as ex: process.kill() raise KeyboardInterrupt from ex return success def set_process_prio_high(): '''Set the priority of the current process to high''' if is_linux(): print("Setting process priority currently not supported for Linux") # It should be possible to set prio with: # psutil.Process().nice(-10) # However we get "[Errno 13] Permission denied" even when run as root else: psutil.Process().nice(psutil.HIGH_PRIORITY_CLASS) def set_process_prio_normal(): '''Set the priority of the current process to normal''' if is_linux(): print("Setting process priority currently not supported for Linux") # It should be possible to set prio with: # psutil.Process().nice(0) # However we get "[Errno 13] Permission denied" even when run as root else: psutil.Process().nice(psutil.NORMAL_PRIORITY_CLASS) # Simple SWO decoder: only handles single bytes of application # data at a time, i.e. what ITM_SendChar() sends. # This stolen from here: # https://stackoverflow.com/questions/431684/how-do-i-change-the-working-directory-in-python def wait_for_completion(_list, purpose, guard_time_seconds, printer, prompt, keep_going_flag): '''Wait for a completion list to empty''' completed = False if len(_list) > 0: timeout_seconds = guard_time_seconds printer.string("{}waiting up to {} second(s)" \ " for {} completion...". \ format(prompt, guard_time_seconds, purpose)) count = 0 while (len(_list) > 0) and \ ((guard_time_seconds == 0) or (timeout_seconds > 0)) and \ keep_going(keep_going_flag, printer, prompt): sleep(1) timeout_seconds -= 1 count += 1 if count == 30: list_text = "" for item in _list: if list_text: list_text += ", " list_text += str(item) printer.string("{}still waiting {} second(s)" \ " for {} to complete (waiting" \ " for {}).". \ format(prompt, timeout_seconds, purpose, list_text)) count = 0 if len(_list) == 0: completed = True printer.string("{}{} completed.".format(prompt, purpose)) return completed def reset_nrf_target(connection, printer, prompt): '''Reset a Nordic NRFxxx target''' call_list = [] printer.string("{}resetting target...".format(prompt)) # Assemble the call list call_list.append("nrfjprog") call_list.append("--reset") if connection and "debugger" in connection and connection["debugger"]: call_list.append("-s") call_list.append(connection["debugger"]) # Print what we're gonna do tmp = "" for item in call_list: tmp += " " + item printer.string("{}in directory {} calling{}". \ format(prompt, os.getcwd(), tmp)) # Call it return exe_run(call_list, 60, printer, prompt) def usb_cutter_reset(usb_cutter_id_strs, printer, prompt): '''Cut and then un-cut USB cables using Cleware USB cutters''' # First switch the USB cutters off action = "1" count = 0 call_list_root = ["usbswitchcmd"] call_list_root.append("-s") call_list_root.append("-n") while count < 2: for usb_cutter_id_str in usb_cutter_id_strs: call_list = call_list_root.copy() call_list.append(usb_cutter_id_str) call_list.append(action) # Print what we're gonna do tmp = "" for item in call_list: tmp += " " + item if printer: printer.string("{}in directory {} calling{}". \ format(prompt, os.getcwd(), tmp)) # Set shell to keep Jenkins happy exe_run(call_list, 0, printer, prompt, shell_cmd=True) # Wait 5ish seconds if printer: printer.string("{}waiting {} second(s)...". \ format(prompt, HW_RESET_DURATION_SECONDS)) sleep(HW_RESET_DURATION_SECONDS) # "0" to switch the USB cutters on again action = "0" count += 1 def kmtronic_reset(ip_address, hex_bitmap, printer, prompt): '''Cut and then un-cut power using a KMTronic box''' # KMTronic is a web relay box which will be controlling # power to, for instance, EVKs The last byte of the URL # is a hex bitmap of the outputs where 0 sets off and 1 # sets on # Take only the last two digits of the hex bitmap hex_bitmap_len = len(hex_bitmap) hex_bitmap = hex_bitmap[hex_bitmap_len - 2:hex_bitmap_len] kmtronic_off = "http://" + ip_address + "FFE0" + hex_bitmap kmtronic_on = "http://" + ip_address + "FFE0" + "{0:x}".format(int(hex_bitmap, 16) ^ 0xFF) try: # First switch the given bit positions off if printer: printer.string("{}sending {}". \ format(prompt, kmtronic_off)) response = requests.get(kmtronic_off) # Wait 5ish seconds if printer: printer.string("{}...received response {}, waiting {} second(s)...". \ format(prompt, response.status_code, HW_RESET_DURATION_SECONDS)) sleep(HW_RESET_DURATION_SECONDS) # Switch the given bit positions on if printer: printer.string("{}sending {}".format(prompt, kmtronic_on)) response = requests.get(kmtronic_on) if printer: printer.string("{}...received response {}.". \ format(prompt, response.status_code)) except requests.ConnectionError: if printer: printer.string("{}unable to connect to KMTronic box at {}.". \ format(prompt, ip_address)) # Look for a single line anywhere in message # beginning with "test: ". This must be followed by # "x.y.z a.b.c m.n.o" (i.e. instance IDs space separated) # and then an optional "blah" filter string, or just "*" # and an optional "blah" filter string or "None". # Valid examples are: # # test: 1 # test: 1 3 7 # test: 1.0.3 3 7.0 # test: 1 2 example # test: 1.1 8 portInit # test: * # test: * port # test: none # # Filter strings must NOT begin with a digit. # There cannot be more than one * or a * with any other instance. # There can only be one filter string. # Only whitespace is expected after this on the line. # Anything else is ignored. # Populates instances with the "0 4.5 13.5.1" bit as instance # entries [[0], [4, 5], [13, 5, 1]] and returns the filter # string, if any. def commit_message_parse(message, instances, printer=None, prompt=None): '''Find stuff in a commit message''' instances_all = False instances_local = [] filter_string_local = None found = False if message: # Search through message for a line beginning # with "test:" if printer: printer.string("{}### parsing message to see if it contains a test directive...". \ format(prompt)) lines = message.split("\\n") for idx1, line in enumerate(lines): if printer: printer.string("{}text line {}: \"{}\"".format(prompt, idx1 + 1, line)) if line.lower().startswith("test:"): found = True instances_all = False # Pick through what follows parts = line[5:].split() for part in parts: if instances_all and (part[0].isdigit() or part == "*" or part.lower() == "none"): # If we've had a "*" and this is another one # or it begins with a digit then this is # obviously not a "test:" line, # leave the loop and try again. instances_local = [] filter_string_local = None if printer: printer.string("{}...badly formed test directive, ignoring.". \ format(prompt)) found = False break if filter_string_local: # If we've had a filter string then nothing # must follow so this is not a "test:" line, # leave the loop and try again. instances_local = [] filter_string_local = None if printer: printer.string("{}...extraneous characters after test directive," \ " ignoring.".format(prompt)) found = False break if part[0].isdigit(): # If this part begins with a digit it could # be an instance containing numbers instance = [] bad = False for item in part.split("."): try: instance.append(int(item)) except ValueError: # Some rubbish, not a test line so # leave the loop and try the next # line bad = True break if bad: instances_local = [] filter_string_local = None if printer: printer.string("{}...badly formed test directive, ignoring.". \ format(prompt)) found = False break if instance: instances_local.append(instance[:]) elif part == "*": if instances_local: # If we've already had any instances # this is obviously not a test line, # leave the loop and try again instances_local = [] filter_string_local = None if printer: printer.string("{}...badly formed test directive, ignoring.". \ format(prompt)) found = False break # If we haven't had any instances and # this is a * then it means "all" instances_local.append(part) instances_all = True elif part.lower() == "none": if instances_local: # If we've already had any instances # this is obviously not a test line, # leave the loop and try again if printer: printer.string("{}...badly formed test directive, ignoring.". \ format(prompt)) found = False instances_local = [] filter_string_local = None break elif instances_local and not part == "*": # If we've had an instance and this # is not a "*" then this must be a # filter string filter_string_local = part else: # Found some rubbish, not a "test:" # line after all, leave the loop # and try the next line instances_local = [] filter_string_local = None if printer: printer.string("{}...badly formed test directive, ignoring.". \ format(prompt)) found = False break if found: text = "found test directive with" if instances_local: text += " instance(s)" + get_instances_text(instances_local) if filter_string_local: text += " and filter \"" + filter_string_local + "\"" else: text += " instances \"None\"" if printer: printer.string("{}{}.".format(prompt, text)) break if printer: printer.string("{}no test directive found".format(prompt)) if found and instances_local: instances.extend(instances_local[:]) return found, filter_string_local
42.865722
120
0.533332
53481a8ce6431996b6ceac97f012f4f1b1b0f592
8,765
py
Python
faigler_mazeh.py
tcjansen/beer
c6421371b6506cef1adf88cefa9a55db2f04e2dc
[ "MIT" ]
null
null
null
faigler_mazeh.py
tcjansen/beer
c6421371b6506cef1adf88cefa9a55db2f04e2dc
[ "MIT" ]
null
null
null
faigler_mazeh.py
tcjansen/beer
c6421371b6506cef1adf88cefa9a55db2f04e2dc
[ "MIT" ]
null
null
null
import numpy as np import astropy.modeling.blackbody as bb import astropy.constants as const from astropy.io import fits from scipy.interpolate import interp2d def get_a(P, M_star, M_p): """ Use Kepler's third law to derive the star-planet separation. """ return (P ** 2 * const.G.value * (M_star + M_p) / (4 * np.pi ** 2)) ** (1/3)
31.989051
89
0.664803
534843a13bac167037ca6701e9e5332c6dec3235
2,986
py
Python
src/vanilla_pytorch/prune_model.py
f2010126/LTH_Master
709472e7e7962fbf3a56a620c536fb03d359734f
[ "MIT" ]
null
null
null
src/vanilla_pytorch/prune_model.py
f2010126/LTH_Master
709472e7e7962fbf3a56a620c536fb03d359734f
[ "MIT" ]
1
2021-06-30T13:35:32.000Z
2021-06-30T13:35:32.000Z
src/vanilla_pytorch/prune_model.py
f2010126/LTH_Master
709472e7e7962fbf3a56a620c536fb03d359734f
[ "MIT" ]
1
2021-06-30T13:22:15.000Z
2021-06-30T13:22:15.000Z
import torch.nn.utils.prune as prune import torch from src.vanilla_pytorch.utils import count_rem_weights from src.vanilla_pytorch.models.linearnets import LeNet, init_weights from src.vanilla_pytorch.models.resnets import Resnets def get_masks(model, prune_amts=None): """ prune the lowest p% weights by magnitude per layer :param model: model to prune :param p_rate: prune rate = 0.2 as per paper :param prune_amts: dictionary :return: the created mask. model has served it's purpose. """ # TODO: Adjust pruning with output layer if prune_amts is None: # ie dict is empty, use the default prune rate = 0.2 prune_amts = {"linear": 0.2, "conv": 0.2, "last": 0.2} for i, (name, module) in enumerate(model.named_modules()): # prune 20% of connections in all 2D-conv layers if isinstance(module, torch.nn.Conv2d): module = prune.l1_unstructured(module, name='weight', amount=prune_amts['conv']) # prune 20% of connections in all linear layers elif isinstance(module, torch.nn.Linear): module = prune.l1_unstructured(module, name='weight', amount=prune_amts['linear']) masks = list(model.named_buffers()) remove_pruning(model) return masks if __name__ == '__main__': net = Resnets(in_channels=3) net.apply(init_weights) prune_rate = 0.8 prune_custom = {"linear": 0.2, "conv": 0.2, "last": 0.1} for i in range(3): masks = get_masks(net, prune_amts=prune_custom) print(f"Count zero : {count_rem_weights(net)}")
38.779221
101
0.662425
534844fa3b3f1c68231a812a9b687424b61ad180
13,681
py
Python
Grid-neighbor-search/GNS/read_instance_2layer_2LMM_L.py
CitrusAqua/mol-infer
6d5411a2cdc7feda418f9413153b1b66b45a2e96
[ "MIT" ]
null
null
null
Grid-neighbor-search/GNS/read_instance_2layer_2LMM_L.py
CitrusAqua/mol-infer
6d5411a2cdc7feda418f9413153b1b66b45a2e96
[ "MIT" ]
null
null
null
Grid-neighbor-search/GNS/read_instance_2layer_2LMM_L.py
CitrusAqua/mol-infer
6d5411a2cdc7feda418f9413153b1b66b45a2e96
[ "MIT" ]
null
null
null
""" read_instance_BH-cyclic.py """ ''' [seed graph] V_C : "V_C" E_C : "E_C" [core specification] ell_LB : "\ell_{\rm LB}" ell_UB : "\ell_{\rm UB}" cs_LB : "\textsc{cs}_{\rm LB}" cs_UB : "\textsc{cs}_{\rm UB}" ''' import sys # prepare a set of chemical rooted tree if __name__=="__main__": V_C, E_C, \ E_ge_two, E_ge_one, E_zero_one, E_equal_one, \ I_ge_two, I_ge_one, I_zero_one, I_equal_one, \ ell_LB, ell_UB, n_LB_int, n_UB_int, \ n_LB, n_star, rho, \ ch_LB, ch_UB, bl_LB, bl_UB, \ Lambda, Lambda_dg_int, Gamma_int_ac, Gamma_int, \ Lambda_star, na_LB, na_UB, Lambda_int, \ na_LB_int, na_UB_int, ns_LB_int, ns_UB_int, \ ac_LB_int, ac_UB_int, ec_LB_int, ec_UB_int, \ bd2_LB, bd2_UB, bd3_LB, bd3_UB, dg_LB, dg_UB = read_seed_graph(sys.argv[1]) set_F, psi_epsilon, Code_F, n_psi, deg_r, \ beta_r, atom_r, ht, Lambda_ex = prepare_fringe_trees(sys.argv[2]) # print(V_C) # print(E_C) # print(E_ge_two) # print(E_ge_one) # print(E_zero_one) # print(E_equal_one) # print(ell_LB) # print(ell_UB) # print(bl_UB) for psi in (set_F + [psi_epsilon]): print(str(Code_F[psi]) + " " + str(n_psi[Code_F[psi]]) + " " + \ str(ht[Code_F[psi]]) + " " + str(atom_r[Code_F[psi]]) + " " + \ str(deg_r[Code_F[psi]]) + " " + str(beta_r[Code_F[psi]])) # print(Lambda_ex) # set_F_v = {v : set_F for v in V_C} # set_F_E = set_F # n_C = max(psi.numVertex - 1 for v in V_C for psi in set_F_v[v]) # n_T = max(psi.numVertex - 1 for psi in set_F_E) # n_F = max(psi.numVertex - 1 for psi in set_F_E) # print(str(n_C) + " " + str(n_T) + " " + str(n_F)) MAX_VAL = 4 val = {"C": 4, "O": 2, "N": 3} n_H = dict() na_alpha_ex = {ele : {i + 1 : 0} for i in range(len(set_F)) for ele in Lambda_ex} for i, psi in enumerate(set_F): n_H_tmp = {d : 0 for d in range(MAX_VAL)} na_ex_tmp = {ele : 0 for ele in Lambda_ex} for u, (ele, dep) in enumerate(psi.vertex[1:]): beta_tmp = 0 na_ex_tmp[ele] += 1 for v in psi.adj[u + 1]: beta_tmp += psi.beta[u + 1][v] d_tmp = val[ele] - beta_tmp n_H_tmp[d_tmp] += 1 for ele, d in na_alpha_ex.items(): d[i + 1] = na_ex_tmp[ele] n_H[i + 1] = n_H_tmp print(n_H) print(na_alpha_ex)
29.421505
133
0.493166
53485f957146c431dd0276a23463daf3fd0b5d3c
527
py
Python
gamma/system_input.py
ArtBIT/gamma
4ec03251fcd46cd7ae7b5123ad101064b0f9bdd1
[ "MIT" ]
15
2021-07-19T05:54:00.000Z
2022-03-16T10:24:30.000Z
gamma/system_input.py
ArtBIT/gamma
4ec03251fcd46cd7ae7b5123ad101064b0f9bdd1
[ "MIT" ]
1
2022-02-07T21:27:42.000Z
2022-02-09T12:32:28.000Z
gamma/system_input.py
ArtBIT/gamma
4ec03251fcd46cd7ae7b5123ad101064b0f9bdd1
[ "MIT" ]
3
2022-01-13T10:12:02.000Z
2022-02-08T11:55:29.000Z
from .system import * from .colours import *
23.954545
65
0.620493
53487b0b2e562895d1a372a23c376324cd33f385
3,484
py
Python
tensorflow_federated/python/research/utils/checkpoint_utils_test.py
mcognetta/federated
fa0c1a00b5d77768bc2f38f503f3ef1a65693945
[ "Apache-2.0" ]
null
null
null
tensorflow_federated/python/research/utils/checkpoint_utils_test.py
mcognetta/federated
fa0c1a00b5d77768bc2f38f503f3ef1a65693945
[ "Apache-2.0" ]
null
null
null
tensorflow_federated/python/research/utils/checkpoint_utils_test.py
mcognetta/federated
fa0c1a00b5d77768bc2f38f503f3ef1a65693945
[ "Apache-2.0" ]
null
null
null
# Lint as: python3 # Copyright 2019, The TensorFlow Federated Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for ServerState save.""" import functools import os import attr import tensorflow as tf import tensorflow_federated as tff from tensorflow_federated.python.examples.mnist import models from tensorflow_federated.python.research.utils import checkpoint_utils if __name__ == '__main__': tf.compat.v1.enable_v2_behavior() tf.test.main()
35.55102
77
0.751148
534883bea976b0a78d54a9c4ba718667cfc4884f
2,923
py
Python
website/models/user.py
alexli0707/pyforum
4f5ea4a0b07e094e24410ae699016590b9c20d59
[ "Apache-2.0" ]
4
2016-10-13T02:03:55.000Z
2017-04-05T03:21:46.000Z
website/models/user.py
alexli0707/pyforum
4f5ea4a0b07e094e24410ae699016590b9c20d59
[ "Apache-2.0" ]
null
null
null
website/models/user.py
alexli0707/pyforum
4f5ea4a0b07e094e24410ae699016590b9c20d59
[ "Apache-2.0" ]
1
2019-01-01T09:36:28.000Z
2019-01-01T09:36:28.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- import peewee from flask import current_app,abort from flask.ext.login import AnonymousUserMixin, UserMixin from itsdangerous import TimedJSONWebSignatureSerializer as Serializer from peewee import Model, IntegerField, CharField,PrimaryKeyField from website.app import db_wrapper, login_manager from website.http.main_exception import MainException from werkzeug.security import check_password_hash,generate_password_hash """ """ login_manager.anonymous_user = AnonymousUser
27.317757
99
0.63599
5349c9c6e7fa65e3aff751e7538d0e98f7e22725
32
py
Python
FlaskApp/__init__.py
robertavram/project5
12a2816b84be994b561f2f693cf34c0fa4f0ca19
[ "Apache-2.0" ]
7
2015-09-20T22:11:01.000Z
2021-11-09T18:04:47.000Z
FlaskApp/__init__.py
robertavram/project5
12a2816b84be994b561f2f693cf34c0fa4f0ca19
[ "Apache-2.0" ]
9
2020-03-24T15:24:59.000Z
2022-03-11T23:13:00.000Z
FlaskApp/__init__.py
robertavram/project5
12a2816b84be994b561f2f693cf34c0fa4f0ca19
[ "Apache-2.0" ]
2
2016-03-06T00:30:54.000Z
2017-04-06T10:15:06.000Z
# application import application
16
18
0.875
534a04c3322e1ecaccb87a17247c9e86ecb95e59
2,039
py
Python
sim2net/speed/constant.py
harikuts/dsr_optimization
796e58da578f7841a060233a8981eb69d92b798b
[ "MIT" ]
12
2018-06-17T05:29:35.000Z
2022-03-20T23:55:49.000Z
sim2net/speed/constant.py
harikuts/dsr_optimization
796e58da578f7841a060233a8981eb69d92b798b
[ "MIT" ]
2
2020-05-02T16:36:34.000Z
2021-03-12T17:40:02.000Z
sim2net/speed/constant.py
harikuts/dsr_optimization
796e58da578f7841a060233a8981eb69d92b798b
[ "MIT" ]
6
2015-09-09T00:00:22.000Z
2020-05-29T20:18:31.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # (c) 2012 Michal Kalewski <mkalewski at cs.put.poznan.pl> # # This file is a part of the Simple Network Simulator (sim2net) project. # USE, MODIFICATION, COPYING AND DISTRIBUTION OF THIS SOFTWARE IS SUBJECT TO # THE TERMS AND CONDITIONS OF THE MIT LICENSE. YOU SHOULD HAVE RECEIVED A COPY # OF THE MIT LICENSE ALONG WITH THIS SOFTWARE; IF NOT, YOU CAN DOWNLOAD A COPY # FROM HTTP://WWW.OPENSOURCE.ORG/. # # For bug reports, feature and support requests please visit # <https://github.com/mkalewski/sim2net/issues>. """ Provides an implementation of a constant node speed. In this case a speed of a node is constant at a given value. """ from math import fabs from sim2net.speed._speed import Speed from sim2net.utility.validation import check_argument_type __docformat__ = 'reStructuredText'
26.828947
79
0.60667
534aebd1f9c4e46d72dc93169bc74d5b8daf04ea
2,088
py
Python
nexula/nexula_utility/utility_extract_func.py
haryoa/nexula
cc3b5a9b8dd8294bdc47150a1971cb49c4dde225
[ "MIT" ]
3
2020-05-06T08:53:22.000Z
2020-09-24T07:45:38.000Z
nexula/nexula_utility/utility_extract_func.py
haryoa/nexula
cc3b5a9b8dd8294bdc47150a1971cb49c4dde225
[ "MIT" ]
null
null
null
nexula/nexula_utility/utility_extract_func.py
haryoa/nexula
cc3b5a9b8dd8294bdc47150a1971cb49c4dde225
[ "MIT" ]
null
null
null
from nexula.nexula_utility.utility_import_var import import_class
29.408451
77
0.594828
534b03a80c1d26fa6e09b0d0a301670d7c14eb1d
717
py
Python
marbas/preprocessing.py
MJ-Jang/Marbas
0a144e4f2ae868604ed4d3b7ae892a53fdebf388
[ "Apache-2.0" ]
null
null
null
marbas/preprocessing.py
MJ-Jang/Marbas
0a144e4f2ae868604ed4d3b7ae892a53fdebf388
[ "Apache-2.0" ]
null
null
null
marbas/preprocessing.py
MJ-Jang/Marbas
0a144e4f2ae868604ed4d3b7ae892a53fdebf388
[ "Apache-2.0" ]
null
null
null
import os from configparser import ConfigParser cfg = ConfigParser() #PATH_CUR = os.getcwd() + '/pynori' PATH_CUR = os.path.dirname(__file__) cfg.read(PATH_CUR+'/config.ini') # PREPROCESSING ENG_LOWER = cfg.getboolean('PREPROCESSING', 'ENG_LOWER')
18.868421
56
0.707113
534bd05a5abb5701233fcbfbd1621011b3753745
2,106
py
Python
pravash/servicenowplugin/xlr-servicenow-plugin-master/src/main/resources/servicenow/ServiceNowQueryTile.py
amvasudeva/rapidata
7b6e984d24866f5cf474847cf462ac628427cf48
[ "Apache-2.0" ]
null
null
null
pravash/servicenowplugin/xlr-servicenow-plugin-master/src/main/resources/servicenow/ServiceNowQueryTile.py
amvasudeva/rapidata
7b6e984d24866f5cf474847cf462ac628427cf48
[ "Apache-2.0" ]
7
2020-06-30T23:14:35.000Z
2021-08-02T17:08:05.000Z
pravash/servicenowplugin/xlr-servicenow-plugin-master/src/main/resources/servicenow/ServiceNowQueryTile.py
amvasudeva/rapidata
7b6e984d24866f5cf474847cf462ac628427cf48
[ "Apache-2.0" ]
null
null
null
# # THIS CODE AND INFORMATION ARE PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND/OR FITNESS # FOR A PARTICULAR PURPOSE. THIS CODE AND INFORMATION ARE NOT SUPPORTED BY XEBIALABS. # import com.xhaus.jyson.JysonCodec as json if not servicenowServer: raise Exception("ServiceNow server ID must be provided") if not username: username = servicenowServer["username"] if not password: password = servicenowServer["password"] servicenowUrl = servicenowServer['url'] credentials = CredentialsFallback(servicenowServer, username, password).getCredentials() content = None RESPONSE_OK_STATUS = 200 print "Sending content %s" % content servicenowAPIUrl = servicenowUrl + '/api/now/v1/table/%s?sysparm_display_value=true&sysparm_limit=1000&sysparm_query=%s' % (tableName, query) servicenowResponse = XLRequest(servicenowAPIUrl, 'GET', content, credentials['username'], credentials['password'], 'application/json').send() if servicenowResponse.status == RESPONSE_OK_STATUS: json_data = json.loads(servicenowResponse.read()) rows = {} for item in json_data['result']: row = item['number'] rows[row] = get_row_data(item) data = rows else: error = json.loads(servicenowResponse.read()) if 'Invalid table' in error['error']['message']: print "Invalid Table Name" data = {"Invalid table name"} servicenowResponse.errorDump() else: print "Failed to run query in Service Now" servicenowResponse.errorDump() sys.exit(1)
39
141
0.702279
534bea3e62a4def92bc88bd345a7c584ed8fc91f
272
py
Python
bc/recruitment/migrations/0022_merge_20200331_1633.py
Buckinghamshire-Digital-Service/buckinghamshire-council
bbbdb52b515bcdfc79a2bd9198dfa4828405370e
[ "BSD-3-Clause" ]
1
2021-02-27T07:27:17.000Z
2021-02-27T07:27:17.000Z
bc/recruitment/migrations/0022_merge_20200331_1633.py
Buckinghamshire-Digital-Service/buckinghamshire-council
bbbdb52b515bcdfc79a2bd9198dfa4828405370e
[ "BSD-3-Clause" ]
null
null
null
bc/recruitment/migrations/0022_merge_20200331_1633.py
Buckinghamshire-Digital-Service/buckinghamshire-council
bbbdb52b515bcdfc79a2bd9198dfa4828405370e
[ "BSD-3-Clause" ]
1
2021-06-09T15:56:54.000Z
2021-06-09T15:56:54.000Z
# Generated by Django 2.2.10 on 2020-03-31 15:33 from django.db import migrations
19.428571
52
0.665441
534d47fdc7a25cba8b55b44734cb77c92e4d9b0f
1,580
py
Python
Stage_3/Task11_Graph/depth_first_search.py
Pyabecedarian/Algorithms-and-Data-Structures-using-Python
08642357df60d48cb185b5487150204b42764260
[ "MIT" ]
null
null
null
Stage_3/Task11_Graph/depth_first_search.py
Pyabecedarian/Algorithms-and-Data-Structures-using-Python
08642357df60d48cb185b5487150204b42764260
[ "MIT" ]
null
null
null
Stage_3/Task11_Graph/depth_first_search.py
Pyabecedarian/Algorithms-and-Data-Structures-using-Python
08642357df60d48cb185b5487150204b42764260
[ "MIT" ]
null
null
null
""" The Depth First Search (DFS) The goal of a dfs is to search as deeply as possible, connecting as many nodes in the graph as possible and branching where necessary. Think of the BFS that builds a search tree one level at a time, whereas the DFS creates a search tree by exploring one branch of the tree as deeply as possible. As with bfs the dfs makes use of `predecessor` links to construct the tree. In addition, the dfs will make use of two additional instance variables in the Vertex class, `discovery` and `finish_time`. predecessor : same as bfs discovery : tracks the number of steps in the algorithm before a vertex is first encountered; finish_time : is the number of steps before a vertex is colored black """ from datastruct.graph import Vertex, Graph
30.384615
111
0.623418
534d9ad6eb702336fba7d16987e60cced1fa2979
250
py
Python
salt/_modules/freebsd_common.py
rbtcollins/rusty_rail
6ab8a95247b42a81add03500a75ce6678ede5d58
[ "Apache-2.0" ]
16
2017-08-23T20:04:36.000Z
2020-03-09T19:12:30.000Z
salt/_modules/freebsd_common.py
rbtcollins/rusty_rail
6ab8a95247b42a81add03500a75ce6678ede5d58
[ "Apache-2.0" ]
null
null
null
salt/_modules/freebsd_common.py
rbtcollins/rusty_rail
6ab8a95247b42a81add03500a75ce6678ede5d58
[ "Apache-2.0" ]
2
2018-08-18T04:59:26.000Z
2021-02-20T05:22:57.000Z
def sysrc(value): """Call sysrc. CLI Example: .. code-block:: bash salt '*' freebsd_common.sysrc sshd_enable=YES salt '*' freebsd_common.sysrc static_routes """ return __salt__['cmd.run_all']("sysrc %s" % value)
22.727273
54
0.612
534f1909a1ab524bb2f051997a7532e48c09b6dd
1,429
py
Python
auth0/v3/management/blacklists.py
jhunken/auth0-python
af5d863ffe75a4a7cd729c9d084cad6b37bd632e
[ "MIT" ]
null
null
null
auth0/v3/management/blacklists.py
jhunken/auth0-python
af5d863ffe75a4a7cd729c9d084cad6b37bd632e
[ "MIT" ]
null
null
null
auth0/v3/management/blacklists.py
jhunken/auth0-python
af5d863ffe75a4a7cd729c9d084cad6b37bd632e
[ "MIT" ]
null
null
null
from .rest import RestClient
28.58
83
0.588523
534f4e03ba246b728b20809e5d71ee70468b20fb
2,886
py
Python
test_backtest/simplebacktest.py
qzm/QUANTAXIS
055fdc16d67670fb4770e7097865336199e55f3e
[ "MIT" ]
1
2021-05-20T12:33:46.000Z
2021-05-20T12:33:46.000Z
test_backtest/simplebacktest.py
qzm/QUANTAXIS
055fdc16d67670fb4770e7097865336199e55f3e
[ "MIT" ]
null
null
null
test_backtest/simplebacktest.py
qzm/QUANTAXIS
055fdc16d67670fb4770e7097865336199e55f3e
[ "MIT" ]
null
null
null
# coding=utf-8 # # The MIT License (MIT) # # Copyright (c) 2016-2018 yutiansut/QUANTAXIS # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import QUANTAXIS as QA import random """ """ B = QA.QA_BacktestBroker() AC = QA.QA_Account() """ # AC.reset_assets(assets) # Order=AC.send_order(code='000001',amount=1000,time='2018-03-21',towards=QA.ORDER_DIRECTION.BUY,price=0,order_model=QA.ORDER_MODEL.MARKET,amount_model=QA.AMOUNT_MODEL.BY_AMOUNT) # dealmes=B.receive_order(QA.QA_Event(order=Order,market_data=data)) # AC.receive_deal(dealmes) # risk=QA.QA_Risk(AC) """ AC.reset_assets(20000000) # simple_backtest(AC, QA.QA_fetch_stock_block_adv( ).code[0:10], '2017-01-01', '2018-01-31') print(AC.message) AC.save() risk = QA.QA_Risk(AC) print(risk.message) risk.save()
37.480519
200
0.711365
53500afefcda695385af9237df24a3052bec880e
4,010
py
Python
artview/components/field.py
jjhelmus/artview
2af5ccad8d509d11ef6da7c97bee0f7b255b6879
[ "BSD-3-Clause" ]
null
null
null
artview/components/field.py
jjhelmus/artview
2af5ccad8d509d11ef6da7c97bee0f7b255b6879
[ "BSD-3-Clause" ]
null
null
null
artview/components/field.py
jjhelmus/artview
2af5ccad8d509d11ef6da7c97bee0f7b255b6879
[ "BSD-3-Clause" ]
null
null
null
""" field.py Class instance used for modifying field via Display window. """ # Load the needed packages from functools import partial from ..core import Variable, Component, QtGui, QtCore
32.868852
77
0.605486
5350847a4e985147242bdddaf7eae8ed5d884139
4,101
py
Python
rest_framework_mongoengine/fields.py
Careerleaf/django-rest-framework-mongoengine
fc28dbf7af760528f6f7247e567328df46458799
[ "MIT" ]
null
null
null
rest_framework_mongoengine/fields.py
Careerleaf/django-rest-framework-mongoengine
fc28dbf7af760528f6f7247e567328df46458799
[ "MIT" ]
null
null
null
rest_framework_mongoengine/fields.py
Careerleaf/django-rest-framework-mongoengine
fc28dbf7af760528f6f7247e567328df46458799
[ "MIT" ]
null
null
null
from bson.errors import InvalidId from django.core.exceptions import ValidationError from django.utils.encoding import smart_str from mongoengine import dereference from mongoengine.base.document import BaseDocument from mongoengine.document import Document from rest_framework import serializers from mongoengine.fields import ObjectId import sys if sys.version_info[0] >= 3:
29.717391
84
0.641795
5351727e655c86d144b817ff63881c17c72740b2
1,657
py
Python
tests/conftest.py
bbhunter/fuzz-lightyear
75c1318d2f747a4fac6b55a46649c944528769ba
[ "Apache-2.0" ]
169
2019-11-06T20:30:16.000Z
2022-01-22T15:55:19.000Z
tests/conftest.py
bbhunter/fuzz-lightyear
75c1318d2f747a4fac6b55a46649c944528769ba
[ "Apache-2.0" ]
29
2019-09-24T19:44:03.000Z
2021-10-01T09:29:30.000Z
tests/conftest.py
bbhunter/fuzz-lightyear
75c1318d2f747a4fac6b55a46649c944528769ba
[ "Apache-2.0" ]
27
2019-12-27T19:57:28.000Z
2021-12-08T05:38:10.000Z
import pytest from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_OPERATION from fuzz_lightyear.datastore import _ALL_POST_FUZZ_HOOKS_BY_TAG from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_OPERATION from fuzz_lightyear.datastore import _RERUN_POST_FUZZ_HOOKS_BY_TAG from fuzz_lightyear.datastore import get_excluded_operations from fuzz_lightyear.datastore import get_included_tags from fuzz_lightyear.datastore import get_non_vulnerable_operations from fuzz_lightyear.datastore import get_user_defined_mapping from fuzz_lightyear.plugins import get_enabled_plugins from fuzz_lightyear.request import get_victim_session_factory from fuzz_lightyear.supplements.abstraction import get_abstraction
37.659091
72
0.834037
5351910933f7e53efc48e359df0170e503cf6959
8,375
py
Python
src/diepvries/field.py
michael-the1/diepvries
ddba9c91ee5fb2014dc576ffb74faa40c3d0d04f
[ "MIT" ]
67
2021-08-20T14:30:49.000Z
2022-03-22T23:37:08.000Z
src/diepvries/field.py
michael-the1/diepvries
ddba9c91ee5fb2014dc576ffb74faa40c3d0d04f
[ "MIT" ]
1
2022-01-22T08:19:38.000Z
2022-02-02T08:48:34.000Z
src/diepvries/field.py
michael-the1/diepvries
ddba9c91ee5fb2014dc576ffb74faa40c3d0d04f
[ "MIT" ]
6
2021-09-03T17:21:16.000Z
2021-12-22T12:11:51.000Z
"""Module for a Data Vault field.""" from typing import Optional from . import ( FIELD_PREFIX, FIELD_SUFFIX, METADATA_FIELDS, TABLE_PREFIXES, UNKNOWN, FieldDataType, FieldRole, TableType, )
34.465021
88
0.612537
5351c8767281abfc0e99352875444fb190e31a5e
5,702
py
Python
mmdet/datasets/deepscoresV2.py
tuggeluk/mmdetection
669a535c944628a3ab43330cae5c77b643e13a4b
[ "Apache-2.0" ]
1
2020-01-22T15:25:20.000Z
2020-01-22T15:25:20.000Z
mmdet/datasets/deepscoresV2.py
tuggeluk/mmdetection
669a535c944628a3ab43330cae5c77b643e13a4b
[ "Apache-2.0" ]
2
2019-12-16T10:51:41.000Z
2020-10-06T13:46:25.000Z
mmdet/datasets/deepscoresV2.py
tuggeluk/mmdetection
669a535c944628a3ab43330cae5c77b643e13a4b
[ "Apache-2.0" ]
2
2020-04-20T08:58:40.000Z
2021-05-08T07:55:54.000Z
"""DEEPSCORESV2 Provides access to the DEEPSCORESV2 database with a COCO-like interface. The only changes made compared to the coco.py file are the class labels. Author: Lukas Tuggener <tugg@zhaw.ch> Yvan Satyawan <y_satyawan@hotmail.com> Created on: November 23, 2019 """ from .coco import * import os import json from obb_anns import OBBAnns
36.318471
118
0.590144
5351dc5962b2184cb179f5f6f4ba10be7538464e
81,840
py
Python
tests/go_cd_configurator_test.py
agsmorodin/gomatic
e6ae871ffc2d027823f6b7a5755e0ac65c724538
[ "MIT" ]
null
null
null
tests/go_cd_configurator_test.py
agsmorodin/gomatic
e6ae871ffc2d027823f6b7a5755e0ac65c724538
[ "MIT" ]
null
null
null
tests/go_cd_configurator_test.py
agsmorodin/gomatic
e6ae871ffc2d027823f6b7a5755e0ac65c724538
[ "MIT" ]
null
null
null
#!/usr/bin/env python import unittest from xml.dom.minidom import parseString import xml.etree.ElementTree as ET from decimal import Decimal from gomatic import GoCdConfigurator, FetchArtifactDir, RakeTask, ExecTask, ScriptExecutorTask, FetchArtifactTask, \ FetchArtifactFile, Tab, GitMaterial, PipelineMaterial, Pipeline from gomatic.fake import FakeHostRestClient, empty_config_xml, config, empty_config from gomatic.gocd.pipelines import DEFAULT_LABEL_TEMPLATE from gomatic.gocd.artifacts import Artifact from gomatic.xml_operations import prettify def simplified(s): return s.strip().replace("\t", "").replace("\n", "").replace("\\", "").replace(" ", "") def sneakily_converted_to_xml(pipeline): if pipeline.is_template: return ET.tostring(pipeline.element) else: return ET.tostring(pipeline.parent.element)
49.271523
208
0.697849
53527ec6ef2428da3e1c97ac08275c75fd6e2545
1,628
py
Python
gui/wellplot/settings/style/wellplotstylehandler.py
adriangrepo/qreservoir
20fba1b1fd1a42add223d9e8af2d267665bec493
[ "MIT" ]
2
2019-10-04T13:54:51.000Z
2021-05-21T19:36:15.000Z
gui/wellplot/settings/style/wellplotstylehandler.py
adriangrepo/qreservoir
20fba1b1fd1a42add223d9e8af2d267665bec493
[ "MIT" ]
3
2019-11-19T17:06:09.000Z
2020-01-18T20:39:54.000Z
gui/wellplot/settings/style/wellplotstylehandler.py
adriangrepo/qreservoir
20fba1b1fd1a42add223d9e8af2d267665bec493
[ "MIT" ]
2
2020-07-02T13:20:48.000Z
2020-11-11T00:18:51.000Z
import logging from qrutilities.imageutils import ImageUtils from PyQt4.QtGui import QColor logger = logging.getLogger('console')
45.222222
103
0.72973
5353098565b09d0a6b37ec215ad6356db9a8d2af
599
py
Python
utm_messages/urls.py
geoffreynyaga/ANGA-UTM
8371a51ad27c85d2479bb34d8c4e02ea28465941
[ "Apache-2.0" ]
7
2020-01-18T16:53:41.000Z
2021-12-21T07:02:43.000Z
utm_messages/urls.py
geoffreynyaga/ANGA-UTM
8371a51ad27c85d2479bb34d8c4e02ea28465941
[ "Apache-2.0" ]
28
2020-01-06T18:36:54.000Z
2022-02-10T10:03:55.000Z
utm_messages/urls.py
geoffreynyaga/ANGA-UTM
8371a51ad27c85d2479bb34d8c4e02ea28465941
[ "Apache-2.0" ]
3
2020-01-18T16:53:54.000Z
2020-10-26T11:21:41.000Z
from django.conf.urls import url from . import views app_name = "messages" urlpatterns = [ url(r'^$', views.InboxListView.as_view(), name='inbox'), url(r'^sent/$', views.SentMessagesListView.as_view(), name='sent'), url(r'^compose/$', views.MessagesCreateView.as_view(), name='compose'), # url(r'^compose-all/$', views.SendToAll.as_view(), name='compose_to_all'), url(r'^(?P<pk>\d+)/$', views.MessageDetailView.as_view(), name='message_detail'), url(r'^calendar/$', views.CalendarView.as_view(), name='calendar'), ]
29.95
90
0.60601
53548465e12a42f5d8a5f01db8be119e8fba5d7d
24,159
py
Python
server/apps/datablock/tests/test_create_worker.py
iotile/iotile_cloud
9dc65ac86d3a730bba42108ed7d9bbb963d22ba6
[ "MIT" ]
null
null
null
server/apps/datablock/tests/test_create_worker.py
iotile/iotile_cloud
9dc65ac86d3a730bba42108ed7d9bbb963d22ba6
[ "MIT" ]
null
null
null
server/apps/datablock/tests/test_create_worker.py
iotile/iotile_cloud
9dc65ac86d3a730bba42108ed7d9bbb963d22ba6
[ "MIT" ]
null
null
null
import datetime import json import dateutil.parser from django.contrib.auth import get_user_model from django.test import Client, TestCase from django.utils import timezone from apps.devicelocation.models import DeviceLocation from apps.physicaldevice.models import Device from apps.property.models import GenericProperty from apps.report.models import GeneratedUserReport from apps.sqsworker.exceptions import WorkerActionHardError from apps.stream.models import StreamId, StreamVariable from apps.streamdata.models import StreamData from apps.streamevent.models import StreamEventData from apps.streamfilter.models import * from apps.streamnote.models import StreamNote from apps.utils.data_mask.mask_utils import get_data_mask_event, set_data_mask from apps.utils.gid.convert import * from apps.utils.test_util import TestMixin from ..models import * from ..worker.archive_device_data import ArchiveDeviceDataAction user_model = get_user_model()
41.439108
120
0.634919
5354d3bcbb084eaac2e9dc5457335c7f402533a9
12,221
py
Python
nova/policies/servers.py
maya2250/nova
e483ca1cd9a5db5856f87fc69ca07c42d2be5def
[ "Apache-2.0" ]
null
null
null
nova/policies/servers.py
maya2250/nova
e483ca1cd9a5db5856f87fc69ca07c42d2be5def
[ "Apache-2.0" ]
1
2020-11-05T17:42:24.000Z
2020-11-05T17:42:24.000Z
nova/policies/servers.py
Mattlk13/nova
5b13eb59540aaf535a53920e783964d106de2620
[ "Apache-2.0" ]
1
2020-07-22T22:14:40.000Z
2020-07-22T22:14:40.000Z
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from nova.policies import base RULE_AOO = base.RULE_ADMIN_OR_OWNER SERVERS = 'os_compute_api:servers:%s' NETWORK_ATTACH_EXTERNAL = 'network:attach_external_network' ZERO_DISK_FLAVOR = SERVERS % 'create:zero_disk_flavor' REQUESTED_DESTINATION = 'compute:servers:create:requested_destination' CROSS_CELL_RESIZE = 'compute:servers:resize:cross_cell' rules = [ policy.DocumentedRuleDefault( SERVERS % 'index', RULE_AOO, "List all servers", [ { 'method': 'GET', 'path': '/servers' } ]), policy.DocumentedRuleDefault( SERVERS % 'detail', RULE_AOO, "List all servers with detailed information", [ { 'method': 'GET', 'path': '/servers/detail' } ]), policy.DocumentedRuleDefault( SERVERS % 'index:get_all_tenants', base.RULE_ADMIN_API, "List all servers for all projects", [ { 'method': 'GET', 'path': '/servers' } ]), policy.DocumentedRuleDefault( SERVERS % 'detail:get_all_tenants', base.RULE_ADMIN_API, "List all servers with detailed information for all projects", [ { 'method': 'GET', 'path': '/servers/detail' } ]), policy.DocumentedRuleDefault( SERVERS % 'allow_all_filters', base.RULE_ADMIN_API, "Allow all filters when listing servers", [ { 'method': 'GET', 'path': '/servers' }, { 'method': 'GET', 'path': '/servers/detail' } ]), policy.DocumentedRuleDefault( SERVERS % 'show', RULE_AOO, "Show a server", [ { 'method': 'GET', 'path': '/servers/{server_id}' } ]), # the details in host_status are pretty sensitive, only admins # should do that by default. policy.DocumentedRuleDefault( SERVERS % 'show:host_status', base.RULE_ADMIN_API, """ Show a server with additional host status information. This means host_status will be shown irrespective of status value. If showing only host_status UNKNOWN is desired, use the ``os_compute_api:servers:show:host_status:unknown-only`` policy rule. Microvision 2.75 added the ``host_status`` attribute in the ``PUT /servers/{server_id}`` and ``POST /servers/{server_id}/action (rebuild)`` API responses which are also controlled by this policy rule, like the ``GET /servers*`` APIs. """, [ { 'method': 'GET', 'path': '/servers/{server_id}' }, { 'method': 'GET', 'path': '/servers/detail' }, { 'method': 'PUT', 'path': '/servers/{server_id}' }, { 'method': 'POST', 'path': '/servers/{server_id}/action (rebuild)' } ]), policy.DocumentedRuleDefault( SERVERS % 'show:host_status:unknown-only', base.RULE_ADMIN_API, """ Show a server with additional host status information, only if host status is UNKNOWN. This policy rule will only be enforced when the ``os_compute_api:servers:show:host_status`` policy rule does not pass for the request. An example policy configuration could be where the ``os_compute_api:servers:show:host_status`` rule is set to allow admin-only and the ``os_compute_api:servers:show:host_status:unknown-only`` rule is set to allow everyone. """, [ { 'method': 'GET', 'path': '/servers/{server_id}' }, { 'method': 'GET', 'path': '/servers/detail' } ]), policy.DocumentedRuleDefault( SERVERS % 'create', RULE_AOO, "Create a server", [ { 'method': 'POST', 'path': '/servers' } ]), policy.DocumentedRuleDefault( SERVERS % 'create:forced_host', base.RULE_ADMIN_API, """ Create a server on the specified host and/or node. In this case, the server is forced to launch on the specified host and/or node by bypassing the scheduler filters unlike the ``compute:servers:create:requested_destination`` rule. """, [ { 'method': 'POST', 'path': '/servers' } ]), policy.DocumentedRuleDefault( REQUESTED_DESTINATION, base.RULE_ADMIN_API, """ Create a server on the requested compute service host and/or hypervisor_hostname. In this case, the requested host and/or hypervisor_hostname is validated by the scheduler filters unlike the ``os_compute_api:servers:create:forced_host`` rule. """, [ { 'method': 'POST', 'path': '/servers' } ]), policy.DocumentedRuleDefault( SERVERS % 'create:attach_volume', RULE_AOO, "Create a server with the requested volume attached to it", [ { 'method': 'POST', 'path': '/servers' } ]), policy.DocumentedRuleDefault( SERVERS % 'create:attach_network', RULE_AOO, "Create a server with the requested network attached to it", [ { 'method': 'POST', 'path': '/servers' } ]), policy.DocumentedRuleDefault( SERVERS % 'create:trusted_certs', RULE_AOO, "Create a server with trusted image certificate IDs", [ { 'method': 'POST', 'path': '/servers' } ]), policy.DocumentedRuleDefault( ZERO_DISK_FLAVOR, base.RULE_ADMIN_API, """ This rule controls the compute API validation behavior of creating a server with a flavor that has 0 disk, indicating the server should be volume-backed. For a flavor with disk=0, the root disk will be set to exactly the size of the image used to deploy the instance. However, in this case the filter_scheduler cannot select the compute host based on the virtual image size. Therefore, 0 should only be used for volume booted instances or for testing purposes. WARNING: It is a potential security exposure to enable this policy rule if users can upload their own images since repeated attempts to create a disk=0 flavor instance with a large image can exhaust the local disk of the compute (or shared storage cluster). See bug https://bugs.launchpad.net/nova/+bug/1739646 for details. """, [ { 'method': 'POST', 'path': '/servers' } ]), policy.DocumentedRuleDefault( NETWORK_ATTACH_EXTERNAL, 'is_admin:True', "Attach an unshared external network to a server", [ # Create a server with a requested network or port. { 'method': 'POST', 'path': '/servers' }, # Attach a network or port to an existing server. { 'method': 'POST', 'path': '/servers/{server_id}/os-interface' } ]), policy.DocumentedRuleDefault( SERVERS % 'delete', RULE_AOO, "Delete a server", [ { 'method': 'DELETE', 'path': '/servers/{server_id}' } ]), policy.DocumentedRuleDefault( SERVERS % 'update', RULE_AOO, "Update a server", [ { 'method': 'PUT', 'path': '/servers/{server_id}' } ]), policy.DocumentedRuleDefault( SERVERS % 'confirm_resize', RULE_AOO, "Confirm a server resize", [ { 'method': 'POST', 'path': '/servers/{server_id}/action (confirmResize)' } ]), policy.DocumentedRuleDefault( SERVERS % 'revert_resize', RULE_AOO, "Revert a server resize", [ { 'method': 'POST', 'path': '/servers/{server_id}/action (revertResize)' } ]), policy.DocumentedRuleDefault( SERVERS % 'reboot', RULE_AOO, "Reboot a server", [ { 'method': 'POST', 'path': '/servers/{server_id}/action (reboot)' } ]), policy.DocumentedRuleDefault( SERVERS % 'resize', RULE_AOO, "Resize a server", [ { 'method': 'POST', 'path': '/servers/{server_id}/action (resize)' } ]), policy.DocumentedRuleDefault( CROSS_CELL_RESIZE, base.RULE_NOBODY, "Resize a server across cells. By default, this is disabled for all " "users and recommended to be tested in a deployment for admin users " "before opening it up to non-admin users. Resizing within a cell is " "the default preferred behavior even if this is enabled. ", [ { 'method': 'POST', 'path': '/servers/{server_id}/action (resize)' } ]), policy.DocumentedRuleDefault( SERVERS % 'rebuild', RULE_AOO, "Rebuild a server", [ { 'method': 'POST', 'path': '/servers/{server_id}/action (rebuild)' } ]), policy.DocumentedRuleDefault( SERVERS % 'rebuild:trusted_certs', RULE_AOO, "Rebuild a server with trusted image certificate IDs", [ { 'method': 'POST', 'path': '/servers/{server_id}/action (rebuild)' } ]), policy.DocumentedRuleDefault( SERVERS % 'create_image', RULE_AOO, "Create an image from a server", [ { 'method': 'POST', 'path': '/servers/{server_id}/action (createImage)' } ]), policy.DocumentedRuleDefault( SERVERS % 'create_image:allow_volume_backed', RULE_AOO, "Create an image from a volume backed server", [ { 'method': 'POST', 'path': '/servers/{server_id}/action (createImage)' } ]), policy.DocumentedRuleDefault( SERVERS % 'start', RULE_AOO, "Start a server", [ { 'method': 'POST', 'path': '/servers/{server_id}/action (os-start)' } ]), policy.DocumentedRuleDefault( SERVERS % 'stop', RULE_AOO, "Stop a server", [ { 'method': 'POST', 'path': '/servers/{server_id}/action (os-stop)' } ]), policy.DocumentedRuleDefault( SERVERS % 'trigger_crash_dump', RULE_AOO, "Trigger crash dump in a server", [ { 'method': 'POST', 'path': '/servers/{server_id}/action (trigger_crash_dump)' } ]), ]
29.734793
79
0.524262
5355dedf12aa8e15115b8c77564d80f57eb0ec2a
1,577
py
Python
set-env.py
sajaldebnath/vrops-custom-group-creation
e3c821336832445e93706ad29afe216867660123
[ "MIT" ]
1
2017-08-14T07:51:42.000Z
2017-08-14T07:51:42.000Z
set-env.py
sajaldebnath/vrops-custom-group-creation
e3c821336832445e93706ad29afe216867660123
[ "MIT" ]
null
null
null
set-env.py
sajaldebnath/vrops-custom-group-creation
e3c821336832445e93706ad29afe216867660123
[ "MIT" ]
null
null
null
# !/usr/bin python """ # # set-env - a small python program to setup the configuration environment for data-push.py # data-push.py contains the python program to push attribute values to vROps # Author Sajal Debnath <sdebnath@vmware.com> # """ # Importing the required modules import json import base64 import os,sys # Getting the absolute path from where the script is being run # Getting the inputs from user # Getting the path where env.json file should be kept path = get_script_path() fullpath = path+"/"+"env.json" # Getting the data for the env.json file final_data = get_the_inputs() # Saving the data to env.json file with open(fullpath, 'w') as outfile: json.dump(final_data, outfile, sort_keys = True, indent = 2, separators=(',', ':'), ensure_ascii=False)
28.672727
107
0.689918
53564fa8ddf1d013bfaf1e0a0630a501757ce124
1,504
py
Python
week02/day08.py
gtadeus/LeetCodeChallenge2009
81d3fae205fb9071d7a98260df9bbeb1c8c8ffe0
[ "MIT" ]
null
null
null
week02/day08.py
gtadeus/LeetCodeChallenge2009
81d3fae205fb9071d7a98260df9bbeb1c8c8ffe0
[ "MIT" ]
null
null
null
week02/day08.py
gtadeus/LeetCodeChallenge2009
81d3fae205fb9071d7a98260df9bbeb1c8c8ffe0
[ "MIT" ]
null
null
null
import unittest # Definition for a binary tree node. if __name__ == "__main__": unittest.main()
28.923077
146
0.475399
53577342e6db4b3427645ab2e05fe5d3ca60a280
118
py
Python
config.py
tiuD/cross-prom
8b987138ec32e0ac64ca6ffe13d0e1cd0d18aef3
[ "MIT" ]
null
null
null
config.py
tiuD/cross-prom
8b987138ec32e0ac64ca6ffe13d0e1cd0d18aef3
[ "MIT" ]
null
null
null
config.py
tiuD/cross-prom
8b987138ec32e0ac64ca6ffe13d0e1cd0d18aef3
[ "MIT" ]
null
null
null
TOKEN = "1876415562:AAEsX_c9k3Fot2IT0BYRqkCCQ5vFEHQDLDQ" CHAT_ID = [957539786] # e.g. [1234567, 2233445, 3466123...]
29.5
59
0.754237
5357c161a07eb9258b2e704a13f4eda6f6ab93d0
63
py
Python
buchschloss/gui2/__init__.py
mik2k2/buchschloss
8a9d17de5847ccab48a0de48aa4b60af2a7cc045
[ "MIT" ]
1
2020-01-13T18:52:51.000Z
2020-01-13T18:52:51.000Z
buchschloss/gui2/__init__.py
mik2k2/buchschloss
8a9d17de5847ccab48a0de48aa4b60af2a7cc045
[ "MIT" ]
48
2020-01-13T10:32:22.000Z
2021-06-16T16:10:19.000Z
buchschloss/gui2/__init__.py
mik2k2/buchschloss
8a9d17de5847ccab48a0de48aa4b60af2a7cc045
[ "MIT" ]
1
2020-01-19T11:07:12.000Z
2020-01-19T11:07:12.000Z
"""entry point""" from . import main start = main.app.launch
10.5
23
0.666667
5358824aa89abe42dc1e1bfd86a2b5480905c96d
411
py
Python
src/tests/test_stop_at_task.py
francesco-p/FACIL
e719deebb6d2acb5778b60759294c23ea5e2b454
[ "MIT" ]
243
2020-09-22T11:26:34.000Z
2022-03-31T13:16:21.000Z
src/tests/test_stop_at_task.py
francesco-p/FACIL
e719deebb6d2acb5778b60759294c23ea5e2b454
[ "MIT" ]
15
2021-05-09T08:48:15.000Z
2022-03-28T16:07:45.000Z
src/tests/test_stop_at_task.py
francesco-p/FACIL
e719deebb6d2acb5778b60759294c23ea5e2b454
[ "MIT" ]
52
2021-03-01T15:08:29.000Z
2022-03-28T19:53:14.000Z
from tests import run_main_and_assert FAST_LOCAL_TEST_ARGS = "--exp-name local_test --datasets mnist" \ " --network LeNet --num-tasks 5 --seed 1 --batch-size 32" \ " --nepochs 2 --num-workers 0 --stop-at-task 3"
34.25
82
0.647202
53591d67014d7a8167c868c3b270950bcf55cca8
375
py
Python
Python/contains-duplicate.py
shreyventure/LeetCode-Solutions
74423d65702b78974e390f17c9d6365d17e6eed5
[ "MIT" ]
388
2020-06-29T08:41:27.000Z
2022-03-31T22:55:05.000Z
Python/contains-duplicate.py
shreyventure/LeetCode-Solutions
74423d65702b78974e390f17c9d6365d17e6eed5
[ "MIT" ]
178
2020-07-16T17:15:28.000Z
2022-03-09T21:01:50.000Z
Python/contains-duplicate.py
shreyventure/LeetCode-Solutions
74423d65702b78974e390f17c9d6365d17e6eed5
[ "MIT" ]
263
2020-07-13T18:33:20.000Z
2022-03-28T13:54:10.000Z
# Autor: Anuj Sharma (@optider) # Github Profile: https://github.com/Optider/ # Problem Link: https://leetcode.com/problems/contains-duplicate/
26.785714
65
0.581333
5359c8fd0dd897c7cd9afb3870d3437688b42ddc
8,824
py
Python
build/android/gyp/dex.py
google-ar/chromium
2441c86a5fd975f09a6c30cddb57dfb7fc239699
[ "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
2,151
2020-04-18T07:31:17.000Z
2022-03-31T08:39:18.000Z
build/android/gyp/dex.py
harrymarkovskiy/WebARonARCore
2441c86a5fd975f09a6c30cddb57dfb7fc239699
[ "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
395
2020-04-18T08:22:18.000Z
2021-12-08T13:04:49.000Z
build/android/gyp/dex.py
harrymarkovskiy/WebARonARCore
2441c86a5fd975f09a6c30cddb57dfb7fc239699
[ "Apache-2.0", "BSD-3-Clause-No-Nuclear-License-2014", "BSD-3-Clause" ]
338
2020-04-18T08:03:10.000Z
2022-03-29T12:33:22.000Z
#!/usr/bin/env python # # Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import logging import optparse import os import sys import tempfile import zipfile from util import build_utils if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
35.580645
80
0.68563
535aa0f5c23f246944ffe8092713608d551e77e5
11,199
py
Python
apps/views.py
Edwardhgj/meiduo
38796f5caf54676eb5620f50ade5474ee8700ad8
[ "MIT" ]
null
null
null
apps/views.py
Edwardhgj/meiduo
38796f5caf54676eb5620f50ade5474ee8700ad8
[ "MIT" ]
6
2020-06-05T23:02:49.000Z
2022-02-11T03:43:22.000Z
apps/views.py
Edwardhgj/meiduo
38796f5caf54676eb5620f50ade5474ee8700ad8
[ "MIT" ]
null
null
null
from django.shortcuts import render from django.http import HttpResponse from django.contrib.auth.hashers import check_password, make_password from django.views import View from utils.response_code import RET, error_map from rest_framework.views import APIView from rest_framework.response import Response from apps.serializers import * from datetime import datetime # Create your views here. # # import json # def reg(request): password = make_password('123') admin = Sadmin(username='admin', password=password, is_admin=True) admin.save() return HttpResponse('ok') # # # # # # # # # # # # # # def addCate(request): # cate = Cate.objects.filter(pid=0).all() id=request.GET.get('id') try: # one_cate=Cate.objects.get(id=id) print(one_cate) except: id="" return render(request, "admin/add_cate.html", locals()) # # # # from day01.settings import UPLOADFILES import os # # # # def deleteCate(request): id=request.GET.get('id') Cate.objects.get(id=id).delete() return render(request, "admin/cate_list.html") # # def deleteTag(request): id=request.GET.get('id') Cate.objects.get(id=id).delete() return render(request, "admin/tag_list.html") # # def deleteGoods(request): id=request.GET.get('id') Goods.objects.get(id=id).delete() return render(request, "admin/goods_list.html") # # def deleteNews(request): id=request.GET.get('id') News.objects.get(id=id).delete() return render(request,"admin/news_list.html") # #
21.331429
70
0.544156
535ab0b00b5e6dd49d2816d9ac5192041774bc04
4,283
py
Python
learnedevolution/targets/covariance/amalgam_covariance.py
realtwister/LearnedEvolution
2ec49b50a49acae9693cfb05ac114dfbcc4aa337
[ "MIT" ]
null
null
null
learnedevolution/targets/covariance/amalgam_covariance.py
realtwister/LearnedEvolution
2ec49b50a49acae9693cfb05ac114dfbcc4aa337
[ "MIT" ]
null
null
null
learnedevolution/targets/covariance/amalgam_covariance.py
realtwister/LearnedEvolution
2ec49b50a49acae9693cfb05ac114dfbcc4aa337
[ "MIT" ]
null
null
null
import numpy as np; from .covariance_target import CovarianceTarget;
30.592857
87
0.585337
535b6a1790a4b33142e1922aac85ef30e05ce452
1,487
gyp
Python
binding.gyp
terrorizer1980/fs-admin
e21216161c56def4ca76a3ef4e71844e2ba26074
[ "MIT" ]
25
2017-10-14T22:54:00.000Z
2022-02-28T16:45:44.000Z
binding.gyp
icecream17/fs-admin
e21216161c56def4ca76a3ef4e71844e2ba26074
[ "MIT" ]
46
2019-02-22T15:17:32.000Z
2022-03-15T16:04:38.000Z
binding.gyp
icecream17/fs-admin
e21216161c56def4ca76a3ef4e71844e2ba26074
[ "MIT" ]
19
2018-01-04T00:52:17.000Z
2022-02-05T17:18:17.000Z
{ 'target_defaults': { 'win_delay_load_hook': 'false', 'conditions': [ ['OS=="win"', { 'msvs_disabled_warnings': [ 4530, # C++ exception handler used, but unwind semantics are not enabled 4506, # no definition for inline function ], }], ], }, 'targets': [ { 'target_name': 'fs_admin', 'defines': [ "NAPI_VERSION=<(napi_build_version)", ], 'cflags!': [ '-fno-exceptions' ], 'cflags_cc!': [ '-fno-exceptions' ], 'xcode_settings': { 'GCC_ENABLE_CPP_EXCEPTIONS': 'YES', 'CLANG_CXX_LIBRARY': 'libc++', 'MACOSX_DEPLOYMENT_TARGET': '10.7', }, 'msvs_settings': { 'VCCLCompilerTool': { 'ExceptionHandling': 1 }, }, 'sources': [ 'src/main.cc', ], 'include_dirs': [ '<!(node -p "require(\'node-addon-api\').include_dir")', ], 'conditions': [ ['OS=="win"', { 'sources': [ 'src/fs-admin-win.cc', ], 'libraries': [ '-lole32.lib', '-lshell32.lib', ], }], ['OS=="mac"', { 'sources': [ 'src/fs-admin-darwin.cc', ], 'libraries': [ '$(SDKROOT)/System/Library/Frameworks/Security.framework', ], }], ['OS=="linux"', { 'sources': [ 'src/fs-admin-linux.cc', ], }], ], } ] }
24.377049
83
0.438467
535e247a8a58c1477770025f4437600400e0562a
2,917
py
Python
src/botwtracker/settings.py
emoritzx/botw-tracker
9c096e62825f2ba2f0f66167b646eaf5a1b5b50a
[ "MIT" ]
7
2017-04-25T10:16:20.000Z
2022-02-25T05:58:16.000Z
src/botwtracker/settings.py
emoritzx/botw-tracker
9c096e62825f2ba2f0f66167b646eaf5a1b5b50a
[ "MIT" ]
1
2020-01-02T20:40:35.000Z
2020-01-02T21:10:51.000Z
src/botwtracker/settings.py
emoritzx/botw-tracker
9c096e62825f2ba2f0f66167b646eaf5a1b5b50a
[ "MIT" ]
3
2020-01-02T21:11:21.000Z
2021-12-11T13:49:35.000Z
"""Django settings for botwtracker project. Copyright (c) 2017, Evan Moritz. botw-tracker is an open source software project released under the MIT License. See the accompanying LICENSE file for terms. """ import os from .config_local import * # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) DATA_DIR = os.path.join(BASE_DIR, '..', 'data') # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'quests.apps.QuestsConfig', 'user.apps.UserConfig', ] if USE_SIGNUP: INSTALLED_APPS.append('signup.apps.SignupConfig') MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'botwtracker.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [ os.path.join(BASE_DIR, 'templates') ], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'botwtracker.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(DATA_DIR, 'sqlite3.db'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/' STATICFILES_DIRS = [ os.path.join(BASE_DIR, "..", "static") ]
26.044643
91
0.683579
535e920c95d9b042b1a45ee54769faf051d34c56
1,013
py
Python
app/domains/users/views.py
Geo-Gabriel/eccomerce_nestle_mongodb
97bf5dbdc7bee20a9ca2f7cad98afc6e8f11bd3e
[ "MIT" ]
3
2020-06-21T15:51:25.000Z
2021-01-24T21:19:27.000Z
app/domains/users/views.py
Geo-Gabriel/eccomerce_nestle_mongodb
97bf5dbdc7bee20a9ca2f7cad98afc6e8f11bd3e
[ "MIT" ]
null
null
null
app/domains/users/views.py
Geo-Gabriel/eccomerce_nestle_mongodb
97bf5dbdc7bee20a9ca2f7cad98afc6e8f11bd3e
[ "MIT" ]
null
null
null
from flask import Blueprint, request, jsonify from app.domains.users.actions import get_all_users, insert_user, get_user_by_id, update_user, delete_user app_users = Blueprint('app.users', __name__)
27.378378
106
0.698914
535eb0ebfd076b333a6d2c988712739a93360c70
8,513
py
Python
legacy_code/tf_cnn_siamese/model.py
PerryXDeng/project_punyslayer
79529b020ca56a5473dbb85ac7155bc03dc5023a
[ "MIT" ]
2
2019-10-25T04:57:03.000Z
2020-06-16T00:34:18.000Z
legacy_code/tf_cnn_siamese/model.py
PerryXDeng/project_punyslayer
79529b020ca56a5473dbb85ac7155bc03dc5023a
[ "MIT" ]
null
null
null
legacy_code/tf_cnn_siamese/model.py
PerryXDeng/project_punyslayer
79529b020ca56a5473dbb85ac7155bc03dc5023a
[ "MIT" ]
1
2020-06-25T14:54:24.000Z
2020-06-25T14:54:24.000Z
import legacy_code.tf_cnn_siamese.configurations as conf import tensorflow as tf import numpy as np def construct_cnn(x, conv_weights, conv_biases, fc_weights, fc_biases, dropout = False): """ constructs the convolution graph for one image :param x: input node :param conv_weights: convolution weights :param conv_biases: relu biases for each convolution :param fc_weights: fully connected weights, only one set should be used here :param fc_biases: fully connected biases, only one set should be used here :param dropout: whether to add a dropout layer for the fully connected layer :return: output node """ k = conf.NUM_POOL for i in range(conf.NUM_CONVS): x = tf.nn.conv2d(x, conv_weights[i], strides=[1, 1, 1, 1], padding='SAME', data_format=conf.DATA_FORMAT) x = tf.nn.relu(tf.nn.bias_add(x, conv_biases[i], data_format=conf.DATA_FORMAT)) if k > 0: x = tf.nn.max_pool(x, ksize=conf.POOL_KDIM,strides=conf.POOL_KDIM, padding='VALID', data_format=conf.DATA_FORMAT) k -= 1 # Reshape the feature map cuboids into vectors for fc layers features_shape = x.get_shape().as_list() n = features_shape[0] m = features_shape[1] * features_shape[2] * features_shape[3] features = tf.reshape(x, [n, m]) # last fc_weights determine output dimensions fc = tf.nn.sigmoid(tf.matmul(features, fc_weights[0]) + fc_biases[0]) # for actual training if dropout: fc = tf.nn.dropout(fc, conf.DROP_RATE) return fc def construct_logits_model(x_1, x_2, conv_weights, conv_biases, fc_weights, fc_biases, dropout=False): """ constructs the logit node before the final sigmoid activation :param x_1: input image node 1 :param x_2: input image node 2 :param conv_weights: nodes for convolution weights :param conv_biases: nodes for convolution relu biases :param fc_weights: nodes for fully connected weights :param fc_biases: nodes for fully connected biases :param dropout: whether to include dropout layers :return: logit node """ with tf.name_scope("twin_1"): twin_1 = construct_cnn(x_1, conv_weights, conv_biases, fc_weights, fc_biases, dropout) with tf.name_scope("twin_2"): twin_2 = construct_cnn(x_2, conv_weights, conv_biases, fc_weights, fc_biases, dropout) # logits on squared difference sq_diff = tf.squared_difference(twin_1, twin_2) logits = tf.matmul(sq_diff, fc_weights[1]) + fc_biases[1] return logits def construct_full_model(x_1, x_2, conv_weights, conv_biases,fc_weights, fc_biases): """ constructs the graph for the neural network without loss node or optimizer :param x_1: input image node 1 :param x_2: input image node 2 :param conv_weights: nodes for convolution weights :param conv_biases: nodes for convolution relu biases :param fc_weights: nodes for fully connected weights :param fc_biases: nodes for fully connected biases :return: sigmoid output node """ logits = construct_logits_model(x_1, x_2, conv_weights, conv_biases, fc_weights, fc_biases, dropout=False) return tf.nn.sigmoid(logits) def construct_loss_optimizer(x_1, x_2, labels, conv_weights, conv_biases, fc_weights, fc_biases, dropout=False, lagrange=False): """ constructs the neural network graph with the loss and optimizer node :param x_1: input image node 1 :param x_2: input image node 2 :param labels: expected output :param conv_weights: nodes for convolution weights :param conv_biases: nodes for convolution relu biases :param fc_weights: nodes for fully connected weights :param fc_biases: nodes for fully connected biases :param dropout: whether to use dropout :param lagrange: whether to apply constraints :return: the node for the optimizer as well as the loss """ logits = construct_logits_model(x_1, x_2, conv_weights, conv_biases, fc_weights, fc_biases, dropout) # cross entropy loss on sigmoids of joined output and labels loss_vec = tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits) loss = tf.reduce_mean(loss_vec) if lagrange: # constraints on sigmoid layers regularizers = (tf.nn.l2_loss(fc_weights[0]) + tf.nn.l2_loss(fc_biases[0]) + tf.nn.l2_loss(fc_weights[1]) + tf.nn.l2_loss(fc_biases[1])) loss += conf.LAMBDA * regularizers # setting up the optimization batch = tf.Variable(0, dtype=conf.DTYPE) # vanilla momentum optimizer # accumulation = momentum * accumulation + gradient # every epoch: variable -= learning_rate * accumulation # batch_total = labels.shape[0] # learning_rate = tf.train.exponential_decay( # conf.BASE_LEARNING_RATE, # batch * conf.BATCH_SIZE, # Current index into the dataset. # batch_total, # conf.DECAY_RATE, # Decay rate. # staircase=True) # trainer = tf.train.MomentumOptimizer(learning_rate, conf.MOMENTUM)\ # .minimize(loss, global_step=batch) # adaptive momentum estimation optimizer # default params: learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08 trainer = tf.train.AdamOptimizer().minimize(loss, global_step=batch) return trainer, loss def construct_joined_model(twin_1, twin_2, fc_weights, fc_biases): """ constructs joined model for two sets of extracted features :param twin_1: features node extracted from first image :param twin_2: features node extracted from second image :param fc_weights: nodes for fully connected weights :param fc_biases: nodes for fully connected biases :return: logit node """ # logits on squared difference sq_diff = tf.squared_difference(twin_1, twin_2) logits = tf.matmul(sq_diff, fc_weights[1]) + fc_biases[1] return tf.nn.sigmoid(logits) def initialize_weights(): """ initializes the variable tensors to be trained in the neural network, decides network dimensions :return: nodes for the variables """ # twin network convolution and pooling variables conv_weights = [] conv_biases = [] fc_weights = [] fc_biases = [] for i in range(conf.NUM_CONVS): if i == 0: inp = conf.NUM_CHANNELS else: inp = conf.NUM_FILTERS[i - 1] out = conf.NUM_FILTERS[i] conv_dim = [conf.FILTER_LEN, conf.FILTER_LEN, inp, out] weight_name = "twin_conv" + str(i + 1) + "_weights" bias_name = "twin_conv" + str(i + 1) + "_biases" conv_weights.append(tf.Variable(tf.truncated_normal(conv_dim, stddev=0.1, seed=conf.SEED, dtype=conf.DTYPE), name=weight_name)) conv_biases.append(tf.Variable(tf.zeros([out], dtype=conf.DTYPE), name=bias_name)) # twin network fullly connected variables inp = conf.FEATURE_MAP_SIZE out = conf.NUM_FC_NEURONS fc_weights.append(tf.Variable(tf.truncated_normal([inp, out], stddev=0.1, seed=conf.SEED, dtype=conf.DTYPE), name="twin_fc_weights")) fc_biases.append(tf.Variable(tf.constant(0.1, shape=[out], dtype=conf.DTYPE), name="twin_fc_biases")) # joined network fully connected variables inp = conf.NUM_FC_NEURONS out = 1 fc_weights.append(tf.Variable(tf.truncated_normal([inp, out], stddev=0.1, seed=conf.SEED, dtype=conf.DTYPE), name="joined_fc_weights")) fc_biases.append(tf.Variable(tf.constant(0.1, shape=[out], dtype=conf.DTYPE), name="joined_fc_biases")) return conv_weights, conv_biases, fc_weights, fc_biases def num_params(): """ calculates the number of parameters in the model :return: m, number of parameters """ m = 0 for i in range(conf.NUM_CONVS): if i == 0: inp = conf.NUM_CHANNELS else: inp = conf.NUM_FILTERS[i - 1] out = conf.NUM_FILTERS[i] conv_dim = [conf.FILTER_LEN, conf.FILTER_LEN, inp, out] m += np.prod(conv_dim) + np.prod(out) inp = conf.FEATURE_MAP_SIZE out = conf.NUM_FC_NEURONS m += inp * out + out inp = conf.NUM_FC_NEURONS out = 1 m += inp * out + out return m if __name__ == "__main__": print("Number of Parameters: " + str(num_params()))
39.412037
80
0.670504
535f56691aa062ae2f47bfd3027e86cffdb80581
3,591
py
Python
tests/test_utils_log.py
FingerCrunch/scrapy
3225de725720bba246ba8c9845fe4b84bc0c82e7
[ "BSD-3-Clause" ]
41,267
2015-01-01T07:39:25.000Z
2022-03-31T20:09:40.000Z
tests/test_utils_log.py
FingerCrunch/scrapy
3225de725720bba246ba8c9845fe4b84bc0c82e7
[ "BSD-3-Clause" ]
4,420
2015-01-02T09:35:38.000Z
2022-03-31T22:53:32.000Z
tests/test_utils_log.py
FingerCrunch/scrapy
3225de725720bba246ba8c9845fe4b84bc0c82e7
[ "BSD-3-Clause" ]
11,080
2015-01-01T18:11:30.000Z
2022-03-31T15:33:19.000Z
import sys import logging import unittest from testfixtures import LogCapture from twisted.python.failure import Failure from scrapy.utils.log import (failure_to_exc_info, TopLevelFormatter, LogCounterHandler, StreamLogger) from scrapy.utils.test import get_crawler from scrapy.extensions import telnet
32.944954
77
0.662768
53620a02b1382e7015ce77097767be27a037d2cd
2,329
py
Python
astar.py
jeff012345/clue-part-duo
bd9ccd2ccdbc2fe358a696b31644b93e70ff874b
[ "MIT" ]
null
null
null
astar.py
jeff012345/clue-part-duo
bd9ccd2ccdbc2fe358a696b31644b93e70ff874b
[ "MIT" ]
null
null
null
astar.py
jeff012345/clue-part-duo
bd9ccd2ccdbc2fe358a696b31644b93e70ff874b
[ "MIT" ]
null
null
null
import heapq from typing import List from definitions import RoomPosition, Position import random import sys
26.465909
92
0.592529
53620e1797375b57cbec5b488715571deabfedc5
19,152
py
Python
src/py_scripts/fc_phasing.py
pb-jchin/FALCON_unzip
21b1df3491e3bb7b9d8ecd13fc0c9c1a45b6393f
[ "BSD-3-Clause-Clear" ]
2
2016-06-23T03:20:22.000Z
2016-10-07T23:45:26.000Z
src/py_scripts/fc_phasing.py
pb-jchin/FALCON_unzip
21b1df3491e3bb7b9d8ecd13fc0c9c1a45b6393f
[ "BSD-3-Clause-Clear" ]
null
null
null
src/py_scripts/fc_phasing.py
pb-jchin/FALCON_unzip
21b1df3491e3bb7b9d8ecd13fc0c9c1a45b6393f
[ "BSD-3-Clause-Clear" ]
null
null
null
from pypeflow.common import * from pypeflow.data import PypeLocalFile, makePypeLocalFile, fn from pypeflow.task import PypeTask, PypeThreadTaskBase, PypeTaskBase from pypeflow.controller import PypeWorkflow, PypeThreadWorkflow from falcon_kit.FastaReader import FastaReader import subprocess, shlex import os, re cigar_re = r"(\d+)([MIDNSHP=X])" if __name__ == "__main__": import argparse import re parser = argparse.ArgumentParser(description='phasing variants and reads from a bam file') # we can run this in parallel mode in the furture #parser.add_argument('--n_core', type=int, default=4, # help='number of processes used for generating consensus') parser.add_argument('--bam', type=str, help='path to sorted bam file', required=True) parser.add_argument('--fasta', type=str, help='path to the fasta file of contain the contig', required=True) parser.add_argument('--ctg_id', type=str, help='contig identifier in the bam file', required=True) parser.add_argument('--base_dir', type=str, default="./", help='the output base_dir, default to current working directory') args = parser.parse_args() bam_fn = args.bam fasta_fn = args.fasta ctg_id = args.ctg_id base_dir = args.base_dir ref_seq = "" for r in FastaReader(fasta_fn): rid = r.name.split()[0] if rid != ctg_id: continue ref_seq = r.sequence.upper() PypeThreadWorkflow.setNumThreadAllowed(1, 1) wf = PypeThreadWorkflow() bam_file = makePypeLocalFile(bam_fn) vmap_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "variant_map") ) vpos_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "variant_pos") ) q_id_map_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "q_id_map") ) parameters = {} parameters["ctg_id"] = ctg_id parameters["ref_seq"] = ref_seq parameters["base_dir"] = base_dir make_het_call_task = PypeTask( inputs = { "bam_file": bam_file }, outputs = { "vmap_file": vmap_file, "vpos_file": vpos_file, "q_id_map_file": q_id_map_file }, parameters = parameters, TaskType = PypeThreadTaskBase, URL = "task://localhost/het_call") (make_het_call) wf.addTasks([make_het_call_task]) atable_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "atable") ) parameters = {} parameters["ctg_id"] = ctg_id parameters["base_dir"] = base_dir generate_association_table_task = PypeTask( inputs = { "vmap_file": vmap_file }, outputs = { "atable_file": atable_file }, parameters = parameters, TaskType = PypeThreadTaskBase, URL = "task://localhost/g_atable") (generate_association_table) wf.addTasks([generate_association_table_task]) phased_variant_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "phased_variants") ) get_phased_blocks_task = PypeTask( inputs = { "vmap_file": vmap_file, "atable_file": atable_file }, outputs = { "phased_variant_file": phased_variant_file }, TaskType = PypeThreadTaskBase, URL = "task://localhost/get_phased_blocks") (get_phased_blocks) wf.addTasks([get_phased_blocks_task]) phased_read_file = makePypeLocalFile( os.path.join(base_dir, ctg_id, "phased_reads") ) get_phased_reads_task = PypeTask( inputs = { "vmap_file": vmap_file, "q_id_map_file": q_id_map_file, "phased_variant_file": phased_variant_file }, outputs = { "phased_read_file": phased_read_file }, parameters = {"ctg_id": ctg_id}, TaskType = PypeThreadTaskBase, URL = "task://localhost/get_phased_reads") (get_phased_reads) wf.addTasks([get_phased_reads_task]) wf.refreshTargets() #with open("fc_phasing_wf.dot", "w") as f: # print >>f, wf.graphvizDot
33.897345
155
0.477339
5362c554ddeabe0765b25a2b55000d5493c91742
8,490
py
Python
augmentation/combineds/wgan_gp_straight.py
pabloduque0/cnn_deconv_viz
3fc3d8a9dbad8e8e28d4df4023bdb438e4c9cf85
[ "MIT" ]
null
null
null
augmentation/combineds/wgan_gp_straight.py
pabloduque0/cnn_deconv_viz
3fc3d8a9dbad8e8e28d4df4023bdb438e4c9cf85
[ "MIT" ]
null
null
null
augmentation/combineds/wgan_gp_straight.py
pabloduque0/cnn_deconv_viz
3fc3d8a9dbad8e8e28d4df4023bdb438e4c9cf85
[ "MIT" ]
null
null
null
from keras.datasets import mnist from keras.layers.merge import _Merge from keras.layers import Input, Dense, Reshape, Flatten, Dropout from keras.layers import BatchNormalization, Activation, ZeroPadding2D from keras.layers.advanced_activations import LeakyReLU from keras.layers.convolutional import UpSampling2D, Conv2D from keras.models import Sequential, Model from keras.optimizers import RMSprop from functools import partial from augmentation.discriminators import wasserstein_discriminator from augmentation.generators import wasserstein_generator import keras.backend as K import matplotlib.pyplot as plt import sys import numpy as np
37.566372
99
0.593404
53630922cfad6e358b0a44487fb3bc1d5337ec0e
231
py
Python
Core/Block_C/RC480_Factory.py
BernardoB95/Extrator_SPEDFiscal
10b4697833c561d24654251da5f22d044f03fc16
[ "MIT" ]
1
2021-04-25T13:53:20.000Z
2021-04-25T13:53:20.000Z
Core/Block_C/RC480_Factory.py
BernardoB95/Extrator_SPEDFiscal
10b4697833c561d24654251da5f22d044f03fc16
[ "MIT" ]
null
null
null
Core/Block_C/RC480_Factory.py
BernardoB95/Extrator_SPEDFiscal
10b4697833c561d24654251da5f22d044f03fc16
[ "MIT" ]
null
null
null
from Core.IFactory import IFactory from Regs.Block_C import RC480
21
40
0.69697
5363e8818d7ff22e74bce67b8f3e1086a76a24c5
1,594
py
Python
keras2onnx/proto/__init__.py
mgoldchild/keras-onnx
8e700572b89a907ca21a3096556f64b62b7aa76c
[ "MIT" ]
null
null
null
keras2onnx/proto/__init__.py
mgoldchild/keras-onnx
8e700572b89a907ca21a3096556f64b62b7aa76c
[ "MIT" ]
null
null
null
keras2onnx/proto/__init__.py
mgoldchild/keras-onnx
8e700572b89a907ca21a3096556f64b62b7aa76c
[ "MIT" ]
null
null
null
############################################################################### # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. ############################################################################### import os import onnx from distutils.version import StrictVersion # Rather than using ONNX protobuf definition throughout our codebase, we import ONNX protobuf definition here so that # we can conduct quick fixes by overwriting ONNX functions without changing any lines elsewhere. from onnx import onnx_pb as onnx_proto from onnx import helper _check_onnx_version() is_tf_keras = False if os.environ.get('TF_KERAS', '0') != '0': is_tf_keras = True if is_tf_keras: from tensorflow.python import keras else: try: import keras except ImportError: is_tf_keras = True from tensorflow.python import keras
33.914894
117
0.69197
536436e3554ba3a4da46ab96d890765a1f73000c
554
py
Python
tests/test_load.py
ocefpaf/xroms
763d6e678e28fe074e0aaab26fecd2b74e51a8b0
[ "MIT" ]
4
2020-01-21T21:24:17.000Z
2020-10-02T03:09:32.000Z
tests/test_load.py
ocefpaf/xroms
763d6e678e28fe074e0aaab26fecd2b74e51a8b0
[ "MIT" ]
1
2020-04-08T00:11:39.000Z
2020-04-25T08:03:45.000Z
tests/test_load.py
ocefpaf/xroms
763d6e678e28fe074e0aaab26fecd2b74e51a8b0
[ "MIT" ]
1
2020-04-06T06:42:36.000Z
2020-04-06T06:42:36.000Z
'''Test package.''' import xroms from glob import glob import os def test_open_netcdf(): '''Test xroms.open_netcdf().''' base = os.path.join(xroms.__path__[0],'..','tests','input') files = glob('%s/ocean_his_000?.nc' % base) ds = xroms.open_netcdf(files) assert ds def test_open_zarr(): '''Test xroms.open_zarr().''' base = os.path.join(xroms.__path__[0],'..','tests','input') files = glob('%s/ocean_his_000?' % base) ds = xroms.open_zarr(files, chunks={'ocean_time':2}) assert ds
21.307692
63
0.597473
53646f201e13a30e6efd94fa6ebf56d02fafc4af
1,381
py
Python
demoproject/demoproject/urls.py
alvnary18/django-nvd3
4b7dffb1107b8202698212b99c26d1d0097afd1d
[ "MIT" ]
302
2015-01-06T14:38:22.000Z
2022-01-11T15:28:07.000Z
demoproject/demoproject/urls.py
alvnary18/django-nvd3
4b7dffb1107b8202698212b99c26d1d0097afd1d
[ "MIT" ]
63
2015-01-03T14:39:29.000Z
2021-04-19T09:29:15.000Z
demoproject/demoproject/urls.py
alvnary18/django-nvd3
4b7dffb1107b8202698212b99c26d1d0097afd1d
[ "MIT" ]
104
2015-01-07T21:40:53.000Z
2021-02-22T08:21:02.000Z
from django.conf.urls import url from . import views urlpatterns = [ url(r'^$', views.home, name='home'), url(r'^piechart/', views.demo_piechart, name='demo_piechart'), url(r'^linechart/', views.demo_linechart, name='demo_linechart'), url(r'^linechart_without_date/', views.demo_linechart_without_date, name='demo_linechart_without_date'), url(r'^linewithfocuschart/', views.demo_linewithfocuschart, name='demo_linewithfocuschart'), url(r'^multibarchart/', views.demo_multibarchart, name='demo_multibarchart'), url(r'^stackedareachart/', views.demo_stackedareachart, name='demo_stackedareachart'), url(r'^multibarhorizontalchart/', views.demo_multibarhorizontalchart, name='demo_multibarhorizontalchart'), url(r'^lineplusbarchart/', views.demo_lineplusbarchart, name='demo_lineplusbarchart'), url(r'^cumulativelinechart/', views.demo_cumulativelinechart, name='demo_cumulativelinechart'), url(r'^discretebarchart/', views.demo_discretebarchart, name='demo_discretebarchart'), url(r'^discretebarchart_with_date/', views.demo_discretebarchart_with_date, name='demo_discretebarchart_date'), url(r'^scatterchart/', views.demo_scatterchart, name='demo_scatterchart'), url(r'^linechart_with_ampm/', views.demo_linechart_with_ampm, name='demo_linechart_with_ampm'), # url(r'^demoproject/', include('demoproject.foo.urls')), ]
62.772727
115
0.766836
536501345147bcbb0b1035da0ccdac716533b14a
2,557
py
Python
wired_version/mcs_wired.py
Harri-Renney/Mind_Control_Synth
5a892a81a3f37444ef154f29a62d44fa1476bfbd
[ "MIT" ]
1
2020-12-20T09:53:20.000Z
2020-12-20T09:53:20.000Z
wired_version/mcs_wired.py
Harri-Renney/Mind_Control_Synth
5a892a81a3f37444ef154f29a62d44fa1476bfbd
[ "MIT" ]
null
null
null
wired_version/mcs_wired.py
Harri-Renney/Mind_Control_Synth
5a892a81a3f37444ef154f29a62d44fa1476bfbd
[ "MIT" ]
null
null
null
import time import mido from pinaps.piNapsController import PiNapsController from NeuroParser import NeuroParser """ Equation of motion used to modify virbato. """ CTRL_LFO_PITCH = 26 CTRL_LFO_RATE = 29 MIDI_MESSAGE_PERIOD = 1 vibratoPos = 0 vibratoVel = 0 vibratoAcc = 4 if __name__ == '__main__': main()
35.027397
134
0.658193
5366c96f79a37fc8c50479d35ab11dc62e0b3949
15,109
py
Python
pipeline/visualization/single_tab.py
windblood/kafka_stock
8dbe4a1cf5c367b3c210683d4027bbfaf955ed41
[ "Apache-2.0" ]
45
2019-08-06T09:06:58.000Z
2022-03-14T06:13:33.000Z
pipeline/visualization/single_tab.py
windblood/kafka_stock
8dbe4a1cf5c367b3c210683d4027bbfaf955ed41
[ "Apache-2.0" ]
2
2021-05-10T09:23:12.000Z
2021-12-20T07:06:54.000Z
pipeline/visualization/single_tab.py
windblood/kafka_stock
8dbe4a1cf5c367b3c210683d4027bbfaf955ed41
[ "Apache-2.0" ]
14
2020-03-19T04:38:25.000Z
2022-03-16T06:37:04.000Z
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Jul 31 11:47:47 2019 @author: yanyanyu """ """ Tab1-plot1: candlestick """ import json import datetime import pandas as pd from math import pi from random import choice from pytz import timezone from bokeh.plotting import figure,show from bokeh.palettes import all_palettes,Set3 from bokeh.models import ColumnDataSource, Select,HoverTool,LinearAxis, LabelSet,Range1d,PreText,Div from warehouse import CassandraStorage from util.util import pandas_factory,symbol_list,splitTextToTriplet,prev_weekday from util.config import path,timeZone
48.117834
144
0.53253
53683ad065e876599c6cda203cf6ca253e4f6885
7,499
py
Python
traffic_predict/model.py
Wangjw6/project
daae9de42fe7bf7ff29c20246e1164b62b7cef4a
[ "MIT" ]
null
null
null
traffic_predict/model.py
Wangjw6/project
daae9de42fe7bf7ff29c20246e1164b62b7cef4a
[ "MIT" ]
null
null
null
traffic_predict/model.py
Wangjw6/project
daae9de42fe7bf7ff29c20246e1164b62b7cef4a
[ "MIT" ]
null
null
null
# -*- coding:utf-8 -*- import tensorflow as tf
43.346821
119
0.642619
536933e136a1518afd79a7f6f89175f2c5e084a2
2,755
py
Python
VirtualMouse-mediapipe.py
SanLiWuXun/Virtual-Control
c3b38d4e2df201af851ca70a90de1fdc770158e4
[ "MIT" ]
null
null
null
VirtualMouse-mediapipe.py
SanLiWuXun/Virtual-Control
c3b38d4e2df201af851ca70a90de1fdc770158e4
[ "MIT" ]
null
null
null
VirtualMouse-mediapipe.py
SanLiWuXun/Virtual-Control
c3b38d4e2df201af851ca70a90de1fdc770158e4
[ "MIT" ]
null
null
null
import cv2 import mediapipe as mp from time import sleep import numpy as np import autopy import pynput wCam, hCam = 1280, 720 wScr, hScr = autopy.screen.size() cap = cv2.VideoCapture(0) cap.set(3, wCam) cap.set(4, hCam) mp_drawing = mp.solutions.drawing_utils mp_hands = mp.solutions.hands mouse = pynput.mouse.Controller() with mp_hands.Hands( min_detection_confidence=0.8, min_tracking_confidence=0.5) as hands: while cap.isOpened(): success, image = cap.read() if not success: print("Ignoring empty camera frame.") # If loading a video, use 'break' instead of 'continue'. continue # Flip the image horizontally for a later selfie-view display, and convert # the BGR image to RGB. image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB) # To improve performance, optionally mark the image as not writeable to # pass by reference. image.flags.writeable = False results = hands.process(image) # Draw the hand annotations on the image. image.flags.writeable = True image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) if results.multi_hand_landmarks: for hand_landmarks in results.multi_hand_landmarks: mp_drawing.draw_landmarks( image, hand_landmarks, mp_hands.HAND_CONNECTIONS) #cx, cy = int(hand_landmarks.landmark[8].x*wCam), int(hand_landmarks.landmark[8].y*hCam) targetX, targetY = int(hand_landmarks.landmark[8].x*wScr), int(hand_landmarks.landmark[8].y*hScr) mouse.position = (targetX, targetY) xy_dis_8_12, z_dis_8_12 = findNodeDistance(hCam, wCam, hand_landmarks.landmark, 8, 12) xy_dis_12_16, z_dis_12_16 = findNodeDistance(hCam, wCam, hand_landmarks.landmark, 12, 16) if xy_dis_8_12 < 40 and z_dis_8_12 < 20: mouse.click(pynput.mouse.Button.left) sleep(0.3) if xy_dis_12_16 < 40 and z_dis_12_16 < 20: mouse.click(pynput.mouse.Button.left, 2) sleep(0.3) cv2.imshow('MediaPipe Hands', image) if cv2.waitKey(5) & 0xFF == 27: break cap.release()
35.320513
113
0.622142
536a166e562f305f44e421c35ddf14c30aa9d207
2,804
py
Python
util/tools/split_train_val.py
JochenZoellner/tf_neiss-1
c91019e5bce6d3c7512237eec5ea997fd95304ac
[ "Apache-2.0" ]
null
null
null
util/tools/split_train_val.py
JochenZoellner/tf_neiss-1
c91019e5bce6d3c7512237eec5ea997fd95304ac
[ "Apache-2.0" ]
1
2020-08-07T13:04:43.000Z
2020-08-10T12:32:46.000Z
util/tools/split_train_val.py
JochenZoellner/tf_neiss-1
c91019e5bce6d3c7512237eec5ea997fd95304ac
[ "Apache-2.0" ]
1
2019-12-16T15:46:45.000Z
2019-12-16T15:46:45.000Z
import glob import logging import os import shutil import sys """script to divide a folder with generated/training data into a train and val folder - val folder contains 500 Samples if not changed in source code - DOES NOT work if images structured in subfolders, see below - if there is no dir in the given folder -> split this folder - if there are dir/s in the folder -> perform split on each folder - split on sorted list -> repeated runs should give the same result """ if __name__ == '__main__': main(sys.argv)
40.057143
117
0.579529
536b5c354fdb15e9bd9be57f477eacb913ce0e22
4,793
py
Python
Chapter10/neuroevolution/distributed_helpers.py
KonstantinKlepikov/Hands-on-Neuroevolution-with-Python
cdd35fa21f2a091d176c140427ab1644d9ecd1f2
[ "MIT" ]
51
2019-06-03T12:45:13.000Z
2022-02-16T15:48:28.000Z
Chapter10/neuroevolution/distributed_helpers.py
123mitnik/Hands-on-Neuroevolution-with-Python
b65c7dee49303c296ae22f2d82422614bdf7a168
[ "MIT" ]
3
2020-02-20T08:13:34.000Z
2020-09-16T10:11:52.000Z
Chapter10/neuroevolution/distributed_helpers.py
123mitnik/Hands-on-Neuroevolution-with-Python
b65c7dee49303c296ae22f2d82422614bdf7a168
[ "MIT" ]
30
2019-05-24T02:02:47.000Z
2022-03-03T22:48:22.000Z
import threading from queue import Queue from multiprocessing.pool import ApplyResult import tabular_logger as tlogger def close(self): self.available_workers.put(None) self.input_queue.put(None) self.done_buffer.put(None) class AsyncTaskHub(object):
32.828767
119
0.595869
536b943feabc16b11630bbf2f1fc6f9c7d3d5261
557
py
Python
make/platform/registry.py
tompis/casual
d838716c7052a906af8a19e945a496acdc7899a2
[ "MIT" ]
null
null
null
make/platform/registry.py
tompis/casual
d838716c7052a906af8a19e945a496acdc7899a2
[ "MIT" ]
null
null
null
make/platform/registry.py
tompis/casual
d838716c7052a906af8a19e945a496acdc7899a2
[ "MIT" ]
null
null
null
import os registry = {}
16.878788
57
0.563734
536bfa0db6a83d2b284796ec230b11252da51887
553
py
Python
mailer/admin.py
everyvoter/everyvoter
65d9b8bdf9b5c64057135c279f6e03b6c207e0fa
[ "MIT" ]
5
2019-07-01T17:50:44.000Z
2022-02-20T02:44:42.000Z
mailer/admin.py
everyvoter/everyvoter
65d9b8bdf9b5c64057135c279f6e03b6c207e0fa
[ "MIT" ]
3
2020-06-05T21:44:33.000Z
2021-06-10T21:39:26.000Z
mailer/admin.py
everyvoter/everyvoter
65d9b8bdf9b5c64057135c279f6e03b6c207e0fa
[ "MIT" ]
1
2021-12-09T06:32:40.000Z
2021-12-09T06:32:40.000Z
"""Django Admin Panels for App""" from django.contrib import admin from mailer import models
29.105263
73
0.672694
536e0e040e307bf9f906164571d5de4002db0a3c
492
py
Python
tests/settings.py
systemallica/django-belt
3035a8bad26a108d9c78daaccb81ab8a9a9ebd41
[ "MIT" ]
2
2019-10-08T08:56:46.000Z
2020-10-10T08:29:43.000Z
tests/settings.py
systemallica/django-belt
3035a8bad26a108d9c78daaccb81ab8a9a9ebd41
[ "MIT" ]
null
null
null
tests/settings.py
systemallica/django-belt
3035a8bad26a108d9c78daaccb81ab8a9a9ebd41
[ "MIT" ]
2
2019-10-08T08:59:54.000Z
2021-03-18T18:15:38.000Z
DEBUG = True USE_TZ = True SECRET_KEY = "dummy" DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}} INSTALLED_APPS = [ "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sites", "rest_framework", "django_filters", "belt", "tests.app", ] SITE_ID = 1 ROOT_URLCONF = "tests.app.urls" MIDDLEWARE = () REST_FRAMEWORK = { "DEFAULT_FILTER_BACKENDS": ("django_filters.rest_framework.DjangoFilterBackend",) }
18.222222
85
0.668699
72559fed2c0dfd5ae9506264bf674d020588c5b0
26,482
py
Python
Cell_Generation/fabric_CMC_NMOS.py
ALIGN-analoglayout/2018-01-ALIGN
931263cec2efc05d58657af9ecca88ae0040c3a5
[ "BSD-3-Clause" ]
8
2019-01-10T06:34:26.000Z
2021-06-30T05:44:49.000Z
Cell_Generation/fabric_CMC_NMOS.py
ALIGN-analoglayout/2018-01-ALIGN
931263cec2efc05d58657af9ecca88ae0040c3a5
[ "BSD-3-Clause" ]
null
null
null
Cell_Generation/fabric_CMC_NMOS.py
ALIGN-analoglayout/2018-01-ALIGN
931263cec2efc05d58657af9ecca88ae0040c3a5
[ "BSD-3-Clause" ]
2
2019-01-09T19:58:28.000Z
2019-03-08T22:58:50.000Z
import sys import json import transformation if __name__ == "__main__": fin_u1 = int(sys.argv[1]) x_cells = int(sys.argv[2]) y_cells = int(sys.argv[3]) assert (x_cells%2) == 0 gate_u = 2 if fin_u1%2 != 0: fin_u = fin_u1 + 1 else: fin_u = fin_u1 uc = UnitCell() for (x,y) in ( (x,y) for x in range(x_cells) for y in range(y_cells)): uc.unit( x, y) uc.computeBbox() with open( "./mydesign_dr_globalrouting.json", "wt") as fp: data = { 'bbox' : uc.bbox.toList(), 'globalRoutes' : [], 'globalRouteGrid' : [], 'terminals' : uc.terminals} fp.write( json.dumps( data, indent=2) + '\n')
55.987315
323
0.551809
725600e7a0a1963a9922b5936348396ff3c1bd52
13,457
py
Python
docs/testcases/all_in_one.py
tiramtaramta/conduit
ae4ca8e64fe64c2b6702d803d799e380fda84a92
[ "MIT" ]
null
null
null
docs/testcases/all_in_one.py
tiramtaramta/conduit
ae4ca8e64fe64c2b6702d803d799e380fda84a92
[ "MIT" ]
null
null
null
docs/testcases/all_in_one.py
tiramtaramta/conduit
ae4ca8e64fe64c2b6702d803d799e380fda84a92
[ "MIT" ]
null
null
null
from selenium import webdriver from selenium.common.exceptions import NoSuchElementException from selenium.webdriver.chrome.options import Options from selenium.webdriver.common.by import By import os import time import csv from webdriver_manager.chrome import ChromeDriverManager import math from basic_function import basic_login, find_element
37.48468
120
0.630081
72562c1a74abbba1475bce520bf08235d1927c9c
116
py
Python
config.py
amalshaji/python-playground
bd3a88a493f36230958613d60a9d70d64f971dba
[ "MIT" ]
14
2021-07-03T10:45:04.000Z
2022-03-15T18:42:29.000Z
config.py
amalshaji/python-playground
bd3a88a493f36230958613d60a9d70d64f971dba
[ "MIT" ]
null
null
null
config.py
amalshaji/python-playground
bd3a88a493f36230958613d60a9d70d64f971dba
[ "MIT" ]
1
2021-08-19T12:06:51.000Z
2021-08-19T12:06:51.000Z
from pydantic import BaseSettings settings = Settings()
12.888889
33
0.775862
725642bee9a909399840fd99543e731184a069ea
819
py
Python
IV_semester/os/configs.py
dainiusjocas/labs
25aa0ae2032681dbaf0afd83f3d80bedddea6407
[ "Beerware" ]
1
2019-04-16T22:05:42.000Z
2019-04-16T22:05:42.000Z
IV_semester/os/configs.py
dainiusjocas/labs
25aa0ae2032681dbaf0afd83f3d80bedddea6407
[ "Beerware" ]
null
null
null
IV_semester/os/configs.py
dainiusjocas/labs
25aa0ae2032681dbaf0afd83f3d80bedddea6407
[ "Beerware" ]
null
null
null
#!/usr/bin/env python ''' This module provides configuration options for OS project. No more magic numbers! ''' BLOCK_SIZE = 16 # words WORD_SIZE = 4 # bytes # length od RS in blocks RESTRICTED_LENGTH = 1 # length of DS in blocks DS_LENGTH = 6 # timer value TIMER_VALUE = 10 # buffer size BUFFER_SIZE = 16 # number of blocks in HD HD_BLOCKS_SIZE = 500 # default priorities ROOT_PRIORITY = 40 VM_PRIORITY = 50 LOADER_PRIORITY = 60 INTERRUPT_PRIORITY = 70 PRINT_PRIORITY = 70 # Process states RUNNING_STATE = 'running' READY_STATE = 'ready' BLOCKED_STATE = 'blocked' # Page tables PAGE_TABLE_STARTING_BLOCK = 0 PAGE_TABLE_ENDING_BLOCK = 14 # Shared memory SH_MEMEORY_STARTING_BLOCK = 15 SH_MEMORY_ENDING_BLOCK = 31 # blocks dedicated for user tasks are from USER_STARTING_BLOCK = 32 USER_ENDING_BLOCK = 255
18.2
89
0.764347
7256bed763fbd51245f430291a65885eb6f4534d
3,022
py
Python
roboticstoolbox/models/URDF/Puma560.py
Russ76/robotics-toolbox-python
4b3e82a6522757ffde1f83aef8d05b3ad475e9de
[ "MIT" ]
null
null
null
roboticstoolbox/models/URDF/Puma560.py
Russ76/robotics-toolbox-python
4b3e82a6522757ffde1f83aef8d05b3ad475e9de
[ "MIT" ]
null
null
null
roboticstoolbox/models/URDF/Puma560.py
Russ76/robotics-toolbox-python
4b3e82a6522757ffde1f83aef8d05b3ad475e9de
[ "MIT" ]
null
null
null
#!/usr/bin/env python import numpy as np from roboticstoolbox.robot.ERobot import ERobot from math import pi if __name__ == "__main__": # pragma nocover robot = Puma560() print(robot)
31.154639
130
0.610192
7258cd5e14cfcac3370c20a51efc82ed53ffd2ed
26,052
py
Python
functest/tests/unit/odl/test_odl.py
hashnfv/hashnfv-functest
ff34df7ec7be6cd5fcf0f7557b393bd5d6266047
[ "Apache-2.0" ]
null
null
null
functest/tests/unit/odl/test_odl.py
hashnfv/hashnfv-functest
ff34df7ec7be6cd5fcf0f7557b393bd5d6266047
[ "Apache-2.0" ]
null
null
null
functest/tests/unit/odl/test_odl.py
hashnfv/hashnfv-functest
ff34df7ec7be6cd5fcf0f7557b393bd5d6266047
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python # Copyright (c) 2016 Orange and others. # # All rights reserved. This program and the accompanying materials # are made available under the terms of the Apache License, Version 2.0 # which accompanies this distribution, and is available at # http://www.apache.org/licenses/LICENSE-2.0 """Define the classes required to fully cover odl.""" import errno import logging import os import unittest from keystoneauth1.exceptions import auth_plugins import mock from robot.errors import DataError, RobotError from robot.result import model from robot.utils.robottime import timestamp_to_secs import six from six.moves import urllib from functest.core import testcase from functest.opnfv_tests.sdn.odl import odl __author__ = "Cedric Ollivier <cedric.ollivier@orange.com>" def test_set_vars_auth1(self): self._test_set_vars( "@{AUTH1} foo bar", "@{AUTH1} foo bar") if __name__ == "__main__": logging.disable(logging.CRITICAL) unittest.main(verbosity=2)
40.642746
79
0.609857
7259d1c0671ff1b759aee401e67ca154f987dcca
5,059
py
Python
ntpclients/ntptrace.py
OptimalRanging/NTPsec
7fa9b38c3e91f96b173ffa02bafa29cf81173cf7
[ "CC-BY-4.0", "BSD-2-Clause", "NTP", "MIT", "BSD-3-Clause" ]
null
null
null
ntpclients/ntptrace.py
OptimalRanging/NTPsec
7fa9b38c3e91f96b173ffa02bafa29cf81173cf7
[ "CC-BY-4.0", "BSD-2-Clause", "NTP", "MIT", "BSD-3-Clause" ]
null
null
null
ntpclients/ntptrace.py
OptimalRanging/NTPsec
7fa9b38c3e91f96b173ffa02bafa29cf81173cf7
[ "CC-BY-4.0", "BSD-2-Clause", "NTP", "MIT", "BSD-3-Clause" ]
1
2021-09-24T18:19:49.000Z
2021-09-24T18:19:49.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- """ ntptrace - trace peers of an NTP server Usage: ntptrace [-n | --numeric] [-m number | --max-hosts=number] [-r hostname | --host=hostname] [--help | --more-help] hostname See the manual page for details. """ # SPDX-License-Identifier: BSD-2-Clause from __future__ import print_function import getopt import re import subprocess import sys try: import ntp.util except ImportError as e: sys.stderr.write( "ntptrace: can't find Python NTP library.\n") sys.stderr.write("%s\n" % e) sys.exit(1) usage = r"""ntptrace - trace peers of an NTP server USAGE: ntptrace [-<flag> [<val>] | --<name>[{=| }<val>]]... [host] -n, --numeric Print IP addresses instead of hostnames -m, --max-hosts=num Maximum number of peers to trace -r, --host=str Single remote host -?, --help Display usage information and exit --more-help Pass the extended usage text through a pager Options are specified by doubled hyphens and their name or by a single hyphen and the flag character.""" + "\n" try: (options, arguments) = getopt.getopt( sys.argv[1:], "m:nr:?", ["help", "host=", "max-hosts=", "more-help", "numeric"]) except getopt.GetoptError as err: sys.stderr.write(str(err) + "\n") raise SystemExit(1) numeric = False maxhosts = 99 host = '127.0.0.1' for (switch, val) in options: if switch == "-m" or switch == "--max-hosts": errmsg = "Error: -m parameter '%s' not a number\n" maxhosts = ntp.util.safeargcast(val, int, errmsg, usage) elif switch == "-n" or switch == "--numeric": numeric = True elif switch == "-r" or switch == "--host": host = val elif switch == "-?" or switch == "--help" or switch == "--more-help": print(usage, file=sys.stderr) raise SystemExit(0) if len(arguments): host = arguments[0] hostcount = 0 while True: hostcount += 1 info = get_info(host) if info is None: break if not numeric: host = ntp.util.canonicalize_dns(host) print("%s: stratum %d, offset %f, synch distance %f" % (host, int(info['stratum']), info['offset'], info['syncdistance']), end='') if int(info['stratum']) == 1: print(", refid '%s'" % info['refid'], end='') print() if (int(info['stratum']) == 0 or int(info['stratum']) == 1 or int(info['stratum']) == 16): break if re.search(r'^127\.127\.\d{1,3}\.\d{1,3}$', info['refid']): break if hostcount == maxhosts: break next_host = get_next_host(info['peer'], host) if next_host is None: break if re.search(r'^127\.127\.\d{1,3}\.\d{1,3}$', next_host): break host = next_host
27.796703
77
0.555841
7259dd2585e473152b9b222537cbe864940bc023
7,529
py
Python
lbrynet/wallet/server/block_processor.py
abueide/lbry
7f5deaf6c80422a30b3714d4bf12e028756ed9fe
[ "MIT" ]
null
null
null
lbrynet/wallet/server/block_processor.py
abueide/lbry
7f5deaf6c80422a30b3714d4bf12e028756ed9fe
[ "MIT" ]
null
null
null
lbrynet/wallet/server/block_processor.py
abueide/lbry
7f5deaf6c80422a30b3714d4bf12e028756ed9fe
[ "MIT" ]
null
null
null
import struct import msgpack from lbrynet.wallet.transaction import Transaction, Output from torba.server.hash import hash_to_hex_str from torba.server.block_processor import BlockProcessor from lbrynet.schema.claim import Claim from lbrynet.wallet.server.model import ClaimInfo
48.574194
119
0.663036
725bc15a18df21922924af39352a1db6548ae9d5
5,450
py
Python
ProjectEuler.Problem.013.py
jihunroh/ProjectEuler-Python
2fceaf5c3dd61038004b6128c5d9ee7a76142bca
[ "MIT" ]
null
null
null
ProjectEuler.Problem.013.py
jihunroh/ProjectEuler-Python
2fceaf5c3dd61038004b6128c5d9ee7a76142bca
[ "MIT" ]
null
null
null
ProjectEuler.Problem.013.py
jihunroh/ProjectEuler-Python
2fceaf5c3dd61038004b6128c5d9ee7a76142bca
[ "MIT" ]
null
null
null
from ProjectEulerCommons.Base import * numbers_list = """37107287533902102798797998220837590246510135740250 46376937677490009712648124896970078050417018260538 74324986199524741059474233309513058123726617309629 91942213363574161572522430563301811072406154908250 23067588207539346171171980310421047513778063246676 89261670696623633820136378418383684178734361726757 28112879812849979408065481931592621691275889832738 44274228917432520321923589422876796487670272189318 47451445736001306439091167216856844588711603153276 70386486105843025439939619828917593665686757934951 62176457141856560629502157223196586755079324193331 64906352462741904929101432445813822663347944758178 92575867718337217661963751590579239728245598838407 58203565325359399008402633568948830189458628227828 80181199384826282014278194139940567587151170094390 35398664372827112653829987240784473053190104293586 86515506006295864861532075273371959191420517255829 71693888707715466499115593487603532921714970056938 54370070576826684624621495650076471787294438377604 53282654108756828443191190634694037855217779295145 36123272525000296071075082563815656710885258350721 45876576172410976447339110607218265236877223636045 17423706905851860660448207621209813287860733969412 81142660418086830619328460811191061556940512689692 51934325451728388641918047049293215058642563049483 62467221648435076201727918039944693004732956340691 15732444386908125794514089057706229429197107928209 55037687525678773091862540744969844508330393682126 18336384825330154686196124348767681297534375946515 80386287592878490201521685554828717201219257766954 78182833757993103614740356856449095527097864797581 16726320100436897842553539920931837441497806860984 48403098129077791799088218795327364475675590848030 87086987551392711854517078544161852424320693150332 59959406895756536782107074926966537676326235447210 69793950679652694742597709739166693763042633987085 41052684708299085211399427365734116182760315001271 65378607361501080857009149939512557028198746004375 35829035317434717326932123578154982629742552737307 94953759765105305946966067683156574377167401875275 88902802571733229619176668713819931811048770190271 25267680276078003013678680992525463401061632866526 36270218540497705585629946580636237993140746255962 24074486908231174977792365466257246923322810917141 91430288197103288597806669760892938638285025333403 34413065578016127815921815005561868836468420090470 23053081172816430487623791969842487255036638784583 11487696932154902810424020138335124462181441773470 63783299490636259666498587618221225225512486764533 67720186971698544312419572409913959008952310058822 95548255300263520781532296796249481641953868218774 76085327132285723110424803456124867697064507995236 37774242535411291684276865538926205024910326572967 23701913275725675285653248258265463092207058596522 29798860272258331913126375147341994889534765745501 18495701454879288984856827726077713721403798879715 38298203783031473527721580348144513491373226651381 34829543829199918180278916522431027392251122869539 40957953066405232632538044100059654939159879593635 29746152185502371307642255121183693803580388584903 41698116222072977186158236678424689157993532961922 62467957194401269043877107275048102390895523597457 23189706772547915061505504953922979530901129967519 86188088225875314529584099251203829009407770775672 11306739708304724483816533873502340845647058077308 82959174767140363198008187129011875491310547126581 97623331044818386269515456334926366572897563400500 42846280183517070527831839425882145521227251250327 55121603546981200581762165212827652751691296897789 32238195734329339946437501907836945765883352399886 75506164965184775180738168837861091527357929701337 62177842752192623401942399639168044983993173312731 32924185707147349566916674687634660915035914677504 99518671430235219628894890102423325116913619626622 73267460800591547471830798392868535206946944540724 76841822524674417161514036427982273348055556214818 97142617910342598647204516893989422179826088076852 87783646182799346313767754307809363333018982642090 10848802521674670883215120185883543223812876952786 71329612474782464538636993009049310363619763878039 62184073572399794223406235393808339651327408011116 66627891981488087797941876876144230030984490851411 60661826293682836764744779239180335110989069790714 85786944089552990653640447425576083659976645795096 66024396409905389607120198219976047599490197230297 64913982680032973156037120041377903785566085089252 16730939319872750275468906903707539413042652315011 94809377245048795150954100921645863754710598436791 78639167021187492431995700641917969777599028300699 15368713711936614952811305876380278410754449733078 40789923115535562561142322423255033685442488917353 44889911501440648020369068063960672322193204149535 41503128880339536053299340368006977710650566631954 81234880673210146739058568557934581403627822703280 82616570773948327592232845941706525094512325230608 22918802058777319719839450180888072429661980811197 77158542502016545090413245809786882778948721859617 72107838435069186155435662884062257473692284509516 20849603980134001723930671666823555245252804609722 53503534226472524250874054075591789781264330331690""".splitlines() Answer( str(sum([int(line) for line in numbers_list]))[0:10] ) """ ------------------------------------------------ ProjectEuler.Problem.013.py The Answer is: 5537376230 Time Elasped: 0.005984783172607422sec ------------------------------------------------ """
47.391304
68
0.949725
725c4a78b42553c5dfa61cb7be78dad147ba621d
4,584
py
Python
api/app/endpoints/datasets.py
historeno/enermaps
ad3a97636baa153a56367e374d0fef7f009bf19d
[ "Apache-2.0" ]
null
null
null
api/app/endpoints/datasets.py
historeno/enermaps
ad3a97636baa153a56367e374d0fef7f009bf19d
[ "Apache-2.0" ]
null
null
null
api/app/endpoints/datasets.py
historeno/enermaps
ad3a97636baa153a56367e374d0fef7f009bf19d
[ "Apache-2.0" ]
null
null
null
"""Endpoint for the manipulation of datasets """ import hashlib from flask import Response from flask_restx import Namespace, Resource, abort from app.common import client from app.common import datasets as datasets_fcts from app.common import path api = Namespace("datasets", description="Datasets related endpoints") def add_openaire_links(datasets): for dataset in datasets: shared_id = dataset.get("shared_id") if not shared_id: dataset["openaireLink"] = "https://enermaps.openaire.eu/" else: shared_id_hash = hashlib.md5(shared_id.encode()) # nosec dataset["openaireLink"] = ( "https://enermaps.openaire.eu/search/dataset?datasetId=enermaps____::{}" .format(shared_id_hash.hexdigest()) )
28.830189
88
0.630017
725ca51ad9d691720ab9b25d90f6e0eda40f5f1a
59
py
Python
mlgorithms/knn/__init__.py
doycode/mlgorithms
b187efad474acdc9b7c6defe4761f101530bd1a3
[ "Apache-2.0" ]
9
2019-08-22T08:02:31.000Z
2019-12-08T07:14:41.000Z
mlgorithms/knn/__init__.py
shashashuai/mlgorithms
f9f30c109083213dc3c62b30d74121ad1ebd2835
[ "Apache-2.0" ]
null
null
null
mlgorithms/knn/__init__.py
shashashuai/mlgorithms
f9f30c109083213dc3c62b30d74121ad1ebd2835
[ "Apache-2.0" ]
3
2019-12-06T15:08:24.000Z
2020-12-21T16:44:13.000Z
from .knn import KNNClassifier __all__ = ['KNNClassifier']
19.666667
30
0.779661
725ce8235488dcfac8f6ef5c1aeb63ee7251e649
571
py
Python
apps/configuration/fields.py
sotkonstantinidis/testcircle
448aa2148fbc2c969e60f0b33ce112d4740a8861
[ "Apache-2.0" ]
3
2019-02-24T14:24:43.000Z
2019-10-24T18:51:32.000Z
apps/configuration/fields.py
sotkonstantinidis/testcircle
448aa2148fbc2c969e60f0b33ce112d4740a8861
[ "Apache-2.0" ]
17
2017-03-14T10:55:56.000Z
2022-03-11T23:20:19.000Z
apps/configuration/fields.py
sotkonstantinidis/testcircle
448aa2148fbc2c969e60f0b33ce112d4740a8861
[ "Apache-2.0" ]
2
2016-02-01T06:32:40.000Z
2019-09-06T04:33:50.000Z
import unicodedata from django.forms import fields
27.190476
79
0.644483
725e0b8e42aaad734ba9a21ce1eb2b48fbf8f5f0
2,575
py
Python
ademo.py
erikdelange/MicroPython-HTTP-Server
54bda9d55ac65b9a6bbf2189098a788add52b344
[ "MIT" ]
null
null
null
ademo.py
erikdelange/MicroPython-HTTP-Server
54bda9d55ac65b9a6bbf2189098a788add52b344
[ "MIT" ]
null
null
null
ademo.py
erikdelange/MicroPython-HTTP-Server
54bda9d55ac65b9a6bbf2189098a788add52b344
[ "MIT" ]
null
null
null
import sys import time import uasyncio as asyncio from ahttpserver import sendfile, Server app = Server() # @app.route("GET", "/") # if uncommented raises route already declared exception # async def also_root(reader, writer, request): # return async def hello(): """ For demo purposes show system is still alive """ count = 0 while True: print("hello", count) count += 1 await asyncio.sleep(60) if __name__ == "__main__": try: set_global_exception_handler() asyncio.create_task(hello()) asyncio.run(app.start()) # must be last, does not return except KeyboardInterrupt: pass finally: asyncio.run(app.stop()) asyncio.new_event_loop()
28.611111
87
0.626796
725f0a434de0934431956914cb716614971e97cb
3,851
py
Python
models/audio_net.py
vipulSharma18/Deep-Self-Supervised-Audio-Video-Cosegmentation-with-Adaptive-Noise-Cancellation
d52695be31a1552d0785f3b6634bde6ef9276a90
[ "MIT" ]
null
null
null
models/audio_net.py
vipulSharma18/Deep-Self-Supervised-Audio-Video-Cosegmentation-with-Adaptive-Noise-Cancellation
d52695be31a1552d0785f3b6634bde6ef9276a90
[ "MIT" ]
null
null
null
models/audio_net.py
vipulSharma18/Deep-Self-Supervised-Audio-Video-Cosegmentation-with-Adaptive-Noise-Cancellation
d52695be31a1552d0785f3b6634bde6ef9276a90
[ "MIT" ]
null
null
null
import torch import torch.nn as nn import torch.nn.functional as F # Defines the submodule with skip connection. # X -------------------identity---------------------- X # |-- downsampling -- |submodule| -- upsampling --|
35.657407
75
0.539081
726015e732db272b6ddb3ba0b812c3994b6a974f
6,117
py
Python
tests/test_core.py
cnschema/kgtool
599e23a9e8a856625143b171f9c36eb5b00623f6
[ "Apache-2.0" ]
7
2018-08-22T01:09:40.000Z
2022-03-31T18:03:33.000Z
tests/test_core.py
cnschema/kgtool
599e23a9e8a856625143b171f9c36eb5b00623f6
[ "Apache-2.0" ]
2
2020-05-09T12:01:15.000Z
2021-06-01T22:17:12.000Z
tests/test_core.py
cnschema/kgtool
599e23a9e8a856625143b171f9c36eb5b00623f6
[ "Apache-2.0" ]
8
2018-03-07T01:28:32.000Z
2020-09-06T18:27:27.000Z
#!/usr/bin/env python # -*- coding: utf-8 -*- # Path hack import os import sys sys.path.insert(0, os.path.abspath('..')) try: import unittest2 as unittest except ImportError: import unittest from kgtool.core import * # noqa if __name__ == '__main__': unittest.main()
29.839024
101
0.560896
7261ec9ea850db246c5fb7cbfebb2bc01fe9250d
903
py
Python
ui/Rhino/AGS/dev/AGS_toolbar_display_cmd.py
ricardoavelino/compas_ags
1c9e496bc4b72b11adc80ea97288ddc27e92c08e
[ "MIT" ]
1
2021-08-14T11:14:52.000Z
2021-08-14T11:14:52.000Z
ui/Rhino/AGS/dev/AGS_toolbar_display_cmd.py
ricardoavelino/compas_ags
1c9e496bc4b72b11adc80ea97288ddc27e92c08e
[ "MIT" ]
null
null
null
ui/Rhino/AGS/dev/AGS_toolbar_display_cmd.py
ricardoavelino/compas_ags
1c9e496bc4b72b11adc80ea97288ddc27e92c08e
[ "MIT" ]
null
null
null
from __future__ import print_function from __future__ import absolute_import from __future__ import division import scriptcontext as sc import compas_rhino from compas_ags.rhino import SettingsForm from compas_ags.rhino import FormObject from compas_ags.rhino import ForceObject __commandname__ = "AGS_toolbar_display" # ============================================================================== # Main # ============================================================================== if __name__ == '__main__': RunCommand(True)
25.083333
99
0.615725
726209920117b9b9ebcbf40bbfd0a7a9d4bd3f25
10,312
py
Python
lpp/evaluator.py
VidoniJorge/c-interprete
4f026d093b26289d3f692cd64d52069fdd1d954c
[ "Apache-2.0" ]
null
null
null
lpp/evaluator.py
VidoniJorge/c-interprete
4f026d093b26289d3f692cd64d52069fdd1d954c
[ "Apache-2.0" ]
null
null
null
lpp/evaluator.py
VidoniJorge/c-interprete
4f026d093b26289d3f692cd64d52069fdd1d954c
[ "Apache-2.0" ]
null
null
null
from typing import ( Any, cast, List, Optional, Type ) import lpp.ast as ast from lpp.builtins import BUILTINS from lpp.object import( Boolean, Builtin, Environment, Error, Function, Integer, Null, Object, ObjectType, String, Return ) TRUE = Boolean(True) FALSE = Boolean(False) NULL = Null() _NOT_A_FUNCTION = 'No es una funcion: {}' _TYPE_MISMATCH = 'Discrepancia de tipos: {} {} {}' _UNKNOWN_PREFIX_OPERATOR = 'Operador desconocido: {}{}' _UNKNOWN_INFIX_OPERATOR = 'Operador desconocido: {} {} {}' _UNKNOWN_IDENTIFIER = 'Identificador no encontrado: {}'
33.264516
113
0.650698
726246040afca77178e3293325a2bcbc9ed6e53e
121,132
py
Python
cli/tests/pcluster/config/test_validators.py
QPC-database/aws-parallelcluster
8c2e9595ca171340df21695c27d85dc00f19d3e4
[ "Apache-2.0" ]
1
2021-07-10T13:59:46.000Z
2021-07-10T13:59:46.000Z
cli/tests/pcluster/config/test_validators.py
QPC-database/aws-parallelcluster
8c2e9595ca171340df21695c27d85dc00f19d3e4
[ "Apache-2.0" ]
null
null
null
cli/tests/pcluster/config/test_validators.py
QPC-database/aws-parallelcluster
8c2e9595ca171340df21695c27d85dc00f19d3e4
[ "Apache-2.0" ]
null
null
null
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance # with the License. A copy of the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and # limitations under the License. import datetime import json import os import re import configparser import pytest from assertpy import assert_that import tests.pcluster.config.utils as utils from pcluster.config.cfn_param_types import CfnParam, CfnSection from pcluster.config.mappings import ALLOWED_VALUES, FSX from pcluster.config.validators import ( DCV_MESSAGES, EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS, FSX_MESSAGES, FSX_SUPPORTED_ARCHITECTURES_OSES, LOGFILE_LOGGER, architecture_os_validator, check_usage_class, cluster_type_validator, compute_resource_validator, disable_hyperthreading_architecture_validator, efa_gdr_validator, efa_os_arch_validator, fsx_ignored_parameters_validator, instances_architecture_compatibility_validator, intel_hpc_architecture_validator, queue_compute_type_validator, queue_validator, region_validator, s3_bucket_region_validator, settings_validator, ) from pcluster.constants import FSX_HDD_THROUGHPUT, FSX_SSD_THROUGHPUT from tests.common import MockedBoto3Request from tests.pcluster.config.defaults import DefaultDict def test_ec2_key_pair_validator(mocker, boto3_stubber): describe_key_pairs_response = { "KeyPairs": [ {"KeyFingerprint": "12:bf:7c:56:6c:dd:4f:8c:24:45:75:f1:1b:16:54:89:82:09:a4:26", "KeyName": "key1"} ] } mocked_requests = [ MockedBoto3Request( method="describe_key_pairs", response=describe_key_pairs_response, expected_params={"KeyNames": ["key1"]} ) ] boto3_stubber("ec2", mocked_requests) # TODO test with invalid key config_parser_dict = {"cluster default": {"key_name": "key1"}} utils.assert_param_validator(mocker, config_parser_dict) def test_ec2_volume_validator(mocker, boto3_stubber): describe_volumes_response = { "Volumes": [ { "AvailabilityZone": "us-east-1a", "Attachments": [ { "AttachTime": "2013-12-18T22:35:00.000Z", "InstanceId": "i-1234567890abcdef0", "VolumeId": "vol-12345678", "State": "attached", "DeleteOnTermination": True, "Device": "/dev/sda1", } ], "Encrypted": False, "VolumeType": "gp2", "VolumeId": "vol-049df61146c4d7901", "State": "available", # TODO add test with "in-use" "SnapshotId": "snap-1234567890abcdef0", "CreateTime": "2013-12-18T22:35:00.084Z", "Size": 8, } ] } mocked_requests = [ MockedBoto3Request( method="describe_volumes", response=describe_volumes_response, expected_params={"VolumeIds": ["vol-12345678"]}, ) ] boto3_stubber("ec2", mocked_requests) # TODO test with invalid key config_parser_dict = { "cluster default": {"ebs_settings": "default"}, "ebs default": {"shared_dir": "test", "ebs_volume_id": "vol-12345678"}, } utils.assert_param_validator(mocker, config_parser_dict) def test_ec2_vpc_id_validator(mocker, boto3_stubber): mocked_requests = [] # mock describe_vpc boto3 call describe_vpc_response = { "Vpcs": [ { "VpcId": "vpc-12345678", "InstanceTenancy": "default", "Tags": [{"Value": "Default VPC", "Key": "Name"}], "State": "available", "DhcpOptionsId": "dopt-4ef69c2a", "CidrBlock": "172.31.0.0/16", "IsDefault": True, } ] } mocked_requests.append( MockedBoto3Request( method="describe_vpcs", response=describe_vpc_response, expected_params={"VpcIds": ["vpc-12345678"]} ) ) # mock describe_vpc_attribute boto3 call describe_vpc_attribute_response = { "VpcId": "vpc-12345678", "EnableDnsSupport": {"Value": True}, "EnableDnsHostnames": {"Value": True}, } mocked_requests.append( MockedBoto3Request( method="describe_vpc_attribute", response=describe_vpc_attribute_response, expected_params={"VpcId": "vpc-12345678", "Attribute": "enableDnsSupport"}, ) ) mocked_requests.append( MockedBoto3Request( method="describe_vpc_attribute", response=describe_vpc_attribute_response, expected_params={"VpcId": "vpc-12345678", "Attribute": "enableDnsHostnames"}, ) ) boto3_stubber("ec2", mocked_requests) # TODO mock and test invalid vpc-id for vpc_id, expected_message in [("vpc-12345678", None)]: config_parser_dict = {"cluster default": {"vpc_settings": "default"}, "vpc default": {"vpc_id": vpc_id}} utils.assert_param_validator(mocker, config_parser_dict, expected_message) def _kms_key_stubber(mocker, boto3_stubber, kms_key_id, expected_message, num_calls): describe_key_response = { "KeyMetadata": { "AWSAccountId": "1234567890", "Arn": "arn:aws:kms:us-east-1:1234567890:key/{0}".format(kms_key_id), "CreationDate": datetime.datetime(2019, 1, 10, 11, 25, 59, 128000), "Description": "", "Enabled": True, "KeyId": kms_key_id, "KeyManager": "CUSTOMER", "KeyState": "Enabled", "KeyUsage": "ENCRYPT_DECRYPT", "Origin": "AWS_KMS", } } mocked_requests = [ MockedBoto3Request( method="describe_key", response=expected_message if expected_message else describe_key_response, expected_params={"KeyId": kms_key_id}, generate_error=True if expected_message else False, ) ] * num_calls boto3_stubber("kms", mocked_requests) def _head_bucket_stubber(mocker, boto3_stubber, bucket, num_calls): head_bucket_response = { "ResponseMetadata": { "AcceptRanges": "bytes", "ContentType": "text/html", "LastModified": "Thu, 16 Apr 2015 18:19:14 GMT", "ContentLength": 77, "VersionId": "null", "ETag": '"30a6ec7e1a9ad79c203d05a589c8b400"', "Metadata": {}, } } mocked_requests = [ MockedBoto3Request(method="head_bucket", response=head_bucket_response, expected_params=bucket) ] * num_calls boto3_stubber("s3", mocked_requests) mocker.patch("pcluster.config.validators.urllib.request.urlopen") ######### # # architecture validator tests # # Two things make it difficult to test validators that key on architecture in the same way that: # 1) architecture is a derived parameter and cannot be configured directly via the config file # 2) many validators key on the architecture, which makes it impossible to test some combinations of # parameters for validators that run later than others, because those run earlier will have # already raised exceptions. # # Thus, the following code mocks the pcluster_config object passed to the validator functions # and calls those functions directly (as opposed to patching functions and instantiating a config # as would be done when running `pcluster create/update`). # ######### def get_default_pcluster_sections_dict(): """Return a dict similar in structure to that of a cluster config file.""" default_pcluster_sections_dict = {} for section_default_dict in DefaultDict: if section_default_dict.name == "pcluster": # Get rid of the extra layer in this case default_pcluster_sections_dict["cluster"] = section_default_dict.value.get("cluster") else: default_pcluster_sections_dict[section_default_dict.name] = section_default_dict.value return default_pcluster_sections_dict def make_pcluster_config_mock(mocker, config_dict): """Mock the calls that made on a pcluster_config by validator functions.""" cluster_config_dict = get_default_pcluster_sections_dict() for section_key in config_dict: cluster_config_dict = utils.merge_dicts(cluster_config_dict.get(section_key), config_dict.get(section_key)) section_to_mocks = {} for section_key, section_dict in config_dict.items(): section_mock = mocker.MagicMock() section_mock.get_param_value.side_effect = lambda param: section_dict.get(param) section_to_mocks[section_key] = section_mock pcluster_config_mock = mocker.MagicMock() pcluster_config_mock.get_section.side_effect = lambda section: section_to_mocks.get(section) return pcluster_config_mock def run_architecture_validator_test( mocker, config, constrained_param_section, constrained_param_name, param_name, param_val, validator, expected_warnings, expected_errors, ): """Run a test for a validator that's concerned with the architecture param.""" mocked_pcluster_config = make_pcluster_config_mock(mocker, config) errors, warnings = validator(param_name, param_val, mocked_pcluster_config) mocked_pcluster_config.get_section.assert_called_once_with(constrained_param_section) mocked_pcluster_config.get_section.side_effect(constrained_param_section).get_param_value.assert_called_with( constrained_param_name ) assert_that(len(warnings)).is_equal_to(len(expected_warnings)) for warnings, expected_warnings in zip(warnings, expected_warnings): assert_that(warnings).matches(re.escape(expected_warnings)) assert_that(len(errors)).is_equal_to(len(expected_errors)) for errors, expected_errors in zip(errors, expected_errors): assert_that(errors).matches(re.escape(expected_errors)) ######### # # ignored FSx params validator test # # Testing a validator that requires the fsx_fs_id parameter to be specified requires a lot of # boto3 stubbing due to the complexity contained in the fsx_id_validator. # # Thus, the following code mocks the pcluster_config object passed to the validator functions # and calls the validator directly. # ######### def test_ebs_allowed_values_all_have_volume_size_bounds(): """Ensure that all known EBS volume types are accounted for by the volume size validator.""" allowed_values_all_have_volume_size_bounds = set(ALLOWED_VALUES["volume_types"]) <= set( EBS_VOLUME_TYPE_TO_VOLUME_SIZE_BOUNDS.keys() ) assert_that(allowed_values_all_have_volume_size_bounds).is_true()
39.780624
120
0.588383
72626018c04a7d52d09238e8d7436046a0c6a071
652
py
Python
flask_app.py
takamatsu-shyo/yolo-microservice
b8ab03b98c0939ab1849d0da938d0878b0ec441f
[ "MIT" ]
null
null
null
flask_app.py
takamatsu-shyo/yolo-microservice
b8ab03b98c0939ab1849d0da938d0878b0ec441f
[ "MIT" ]
null
null
null
flask_app.py
takamatsu-shyo/yolo-microservice
b8ab03b98c0939ab1849d0da938d0878b0ec441f
[ "MIT" ]
null
null
null
from flask import Flask from flask import request from flask import Response from resources import resourcePing, resourceResolution from message_protocol.resolution_input import parseResolutionInput import json app = Flask(__name__)
29.636364
66
0.759202