hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fbfd2e30f614a3b655c2662e8c6213275af8c2ac | 4,378 | py | Python | touch.py | mendelmaker/dipn | a4871ecf2e4eeb40ff3b7945150c255802694609 | [
"BSD-2-Clause"
] | 8 | 2020-11-17T16:55:34.000Z | 2021-04-28T09:24:37.000Z | touch.py | mendelmaker/dipn | a4871ecf2e4eeb40ff3b7945150c255802694609 | [
"BSD-2-Clause"
] | null | null | null | touch.py | mendelmaker/dipn | a4871ecf2e4eeb40ff3b7945150c255802694609 | [
"BSD-2-Clause"
] | 8 | 2021-07-05T05:10:17.000Z | 2022-03-02T12:10:25.000Z | #!/usr/bin/env python
import matplotlib.pyplot as plt
import numpy as np
import time
import cv2
from real.camera import Camera
from robot import Robot
from subprocess import Popen, PIPE
# User options (change me)
# --------------- Setup options ---------------
tcp_host_ip = '100.127.7.223' # IP and port to robot arm as TCP client (UR5)
tcp_host_ip = "172.19.97.157"
tcp_port = 30002
rtc_host_ip = '100.127.7.223' # IP and port to robot arm as real-time client (UR5)
rtc_host_ip = "172.19.97.157"
rtc_port = 30003
# Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
workspace_limits = np.asarray([[0.3, 0.748], [-0.224, 0.224], [-0.255, -0.1]])
workspace_limits = np.asarray([[-0.237, 0.211], [-0.683, -0.235], [0.18, 0.4]])
# workspace_limits = np.asarray([[-0.224, 0.224], [-0.674, -0.226], [0.18, 0.4]])
# Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
tool_orientation = [2.22, -2.22, 0]
tool_orientation = [0, -3.14, 0]
# ---------------------------------------------
# Move robot to home pose
robot = Robot(False, None, None, workspace_limits,
tcp_host_ip, tcp_port, rtc_host_ip, rtc_port,
False, None, None)
robot.open_gripper()
transformation_matrix = get_camera_to_robot_transformation(robot.camera)
# Slow down robot
robot.joint_acc = 1.4
robot.joint_vel = 1.05
# Callback function for clicking on OpenCV window
click_point_pix = ()
camera_color_img, camera_depth_img = robot.get_camera_data()
# Show color and depth frames
cv2.namedWindow('color')
cv2.setMouseCallback('color', mouseclick_callback)
cv2.namedWindow('depth')
while True:
camera_color_img, camera_depth_img = robot.get_camera_data()
bgr_data = cv2.cvtColor(camera_color_img, cv2.COLOR_RGB2BGR)
if len(click_point_pix) != 0:
bgr_data = cv2.circle(bgr_data, click_point_pix, 7, (0, 0, 255), 2)
cv2.imshow('color', bgr_data)
camera_depth_img[camera_depth_img < 0.19] = 0
cv2.imshow('depth', camera_depth_img)
if cv2.waitKey(1) == ord('c'):
break
cv2.destroyAllWindows()
| 35.885246 | 97 | 0.653952 |
fbfd539734cc022db7b79f3a3d092f8d88fe0ee4 | 3,708 | py | Python | py/WB-Klein/5/5.4_cc.py | kassbohm/wb-snippets | f1ac5194e9f60a9260d096ba5ed1ce40b844a3fe | [
"MIT"
] | null | null | null | py/WB-Klein/5/5.4_cc.py | kassbohm/wb-snippets | f1ac5194e9f60a9260d096ba5ed1ce40b844a3fe | [
"MIT"
] | null | null | null | py/WB-Klein/5/5.4_cc.py | kassbohm/wb-snippets | f1ac5194e9f60a9260d096ba5ed1ce40b844a3fe | [
"MIT"
] | null | null | null | # Header starts here.
from sympy.physics.units import *
from sympy import *
# Rounding:
import decimal
from decimal import Decimal as DX
from copy import deepcopy
# LateX:
kwargs = {}
kwargs["mat_str"] = "bmatrix"
kwargs["mat_delim"] = ""
# kwargs["symbol_names"] = {FB: "F^{\mathsf B}", }
# Units:
(k, M, G ) = ( 10**3, 10**6, 10**9 )
(mm, cm) = ( m/1000, m/100 )
Newton = kg*m/s**2
Pa = Newton/m**2
MPa = M*Pa
GPa = G*Pa
kN = k*Newton
deg = pi/180
half = S(1)/2
# Header ends here.
#
EA, l, F1, F2 = var("EA, l, F1, F2")
sub_list = [
( EA, 2 *Pa*m**2 ),
( l, 1 *m ),
( F1, 1 *Newton /2 ), # due to symmetry
( F2, 2 *Newton /2 ), # due to symmetry
]
def k(phi):
""" element stiffness matrix """
# phi is angle between:
# 1. vector along global x axis
# 2. vector along 1-2-axis of truss
# phi is counted positively about z.
# pprint("phi / deg:")
# pprint(N(deg(phi),3))
(c, s) = ( cos(phi), sin(phi) )
(cc, ss, sc) = ( c*c, s*s, s*c)
return Matrix(
[
[ cc, sc, -cc, -sc],
[ sc, ss, -sc, -ss],
[-cc, -sc, cc, sc],
[-sc, -ss, sc, ss],
])
(p1, p2, p3) = (315*pi/180, 0 *pi/180, 45 *pi/180)
# k2 uses only 1/2 A due to symmetry:
(k1, k2, k3) = (EA/l*k(p1), EA/2/l*k(p2), EA/l*k(p3))
pprint("\nk1 / (EA / l): ")
pprint(k1 / (EA/l) )
pprint("\nk2 / (EA / l): ")
pprint(k2 / (EA/l) )
pprint("\nk3 / (EA / l): ")
pprint(k3 / (EA/l) )
K = EA/l*Matrix([
[ 1 , -S(1)/2 ],
[ -S(1)/2, 1 ]
])
u2x, u3x = var("u2x, u3x")
u = Matrix([u2x , u3x ])
f = Matrix([F1 , F2 ])
u2x, u3x = var("u2x, u3x")
eq = Eq(K*u , f)
sol = solve(eq, [u2x, u3x])
pprint("\nSolution:")
pprint(sol)
u2x, u3x = sol[u2x], sol[u3x]
pprint("\nu2x / m:")
tmp = u2x.subs(sub_list)
tmp /= m
pprint(tmp)
pprint("\nu3x / m:")
tmp = u3x.subs(sub_list)
tmp /= m
pprint(tmp)
pprint("\nF1x / N:")
tmp = - EA/l * u2x/2
tmp = tmp.subs(sub_list)
tmp /= Newton
pprint(tmp)
# k1 / (EA / l):
# 1/2 -1/2 -1/2 1/2
#
# -1/2 1/2 1/2 -1/2
#
# -1/2 1/2 1/2 -1/2
#
# 1/2 -1/2 -1/2 1/2
#
# k2 / (EA / l):
# 1/2 0 -1/2 0
#
# 0 0 0 0
#
# -1/2 0 1/2 0
#
# 0 0 0 0
#
# k3 / (EA / l):
# 1/2 1/2 -1/2 -1/2
#
# 1/2 1/2 -1/2 -1/2
#
# -1/2 -1/2 1/2 1/2
#
# -1/2 -1/2 1/2 1/2
#
# Solution:
# 2l(2F + F) 2l(F + 2F)
# u2x: , u3x:
# 3EA 3EA
#
# u2x / m:
# 2/3
#
# u3x / m:
# 5/6
#
# F1x / N:
# -2/3
| 21.433526 | 66 | 0.432848 |
fbfd7d6f8721ebc2b678a03f3cc15caf4d6fced6 | 870 | py | Python | ucc_csv_create.py | MasonDMitchell/HackNC-2019 | 4656f9dcc15ee86c66885267006ed9f4f5b935e7 | [
"MIT"
] | null | null | null | ucc_csv_create.py | MasonDMitchell/HackNC-2019 | 4656f9dcc15ee86c66885267006ed9f4f5b935e7 | [
"MIT"
] | null | null | null | ucc_csv_create.py | MasonDMitchell/HackNC-2019 | 4656f9dcc15ee86c66885267006ed9f4f5b935e7 | [
"MIT"
] | 1 | 2019-10-12T15:09:06.000Z | 2019-10-12T15:09:06.000Z | #!/usr/bin/python3
import csv
ucc_dictionary_file_list = [
'./downloads/diary08/diary08/uccd08.txt',
'./downloads/diary09/diary09/uccd09.txt',
'./downloads/diary11/diary11/uccd11.txt',
'./downloads/diary10/diary10/uccd10.txt',
]
cleaned_ucc_dictionary = dict()
for dictionary in ucc_dictionary_file_list:
with open(dictionary) as file:
line_list = file.read().splitlines()
for line in line_list:
ucc_tuple = tuple(line.split(" ", 1))
cleaned_ucc_dictionary[int(ucc_tuple[0])] = ucc_tuple[1]
with open('cleaned_ucc_dictionary.csv', 'w', newline='') as csvfile:
ucc_writer = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for key, value in cleaned_ucc_dictionary.items():
ucc_writer.writerow([key, value])
# print(len(cleaned_ucc_dictionary.keys()))
# print(line_list) | 33.461538 | 78 | 0.688506 |
fbff8e3dec4d22f8cf3a2af319e44b94680c5703 | 30,937 | py | Python | eventsourcing/system/ray.py | gerbyzation/eventsourcing | a9e9ecf123af658762832cf97a9f00f8f7064393 | [
"BSD-3-Clause"
] | null | null | null | eventsourcing/system/ray.py | gerbyzation/eventsourcing | a9e9ecf123af658762832cf97a9f00f8f7064393 | [
"BSD-3-Clause"
] | null | null | null | eventsourcing/system/ray.py | gerbyzation/eventsourcing | a9e9ecf123af658762832cf97a9f00f8f7064393 | [
"BSD-3-Clause"
] | null | null | null | import datetime
import os
import traceback
from inspect import ismethod
from queue import Empty, Queue
from threading import Event, Lock, Thread
from time import sleep
from typing import Dict, Optional, Tuple, Type
import ray
from eventsourcing.application.process import ProcessApplication
from eventsourcing.application.simple import (
ApplicationWithConcreteInfrastructure,
Prompt,
PromptToPull,
is_prompt_to_pull,
)
from eventsourcing.domain.model.decorators import retry
from eventsourcing.domain.model.events import subscribe, unsubscribe
from eventsourcing.exceptions import (
EventSourcingError,
ExceptionWrapper,
OperationalError,
ProgrammingError,
RecordConflictError,
)
from eventsourcing.infrastructure.base import (
DEFAULT_PIPELINE_ID,
RecordManagerWithNotifications,
)
from eventsourcing.system.definition import (
AbstractSystemRunner,
System,
TProcessApplication,
)
from eventsourcing.system.rayhelpers import RayDbJob, RayPrompt
from eventsourcing.system.raysettings import ray_init_kwargs
from eventsourcing.system.runner import DEFAULT_POLL_INTERVAL
ray.init(**ray_init_kwargs)
MAX_QUEUE_SIZE = 1
PAGE_SIZE = 20
MICROSLEEP = 0.000
PROMPT_WITH_NOTIFICATION_IDS = False
PROMPT_WITH_NOTIFICATION_OBJS = False
GREEDY_PULL_NOTIFICATIONS = True
| 41.030504 | 95 | 0.580761 |
fbff951b3453445a7ed046dfadb09ce047c59a21 | 1,766 | py | Python | authentik/stages/password/migrations/0007_app_password.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 15 | 2020-01-05T09:09:57.000Z | 2020-11-28T05:27:39.000Z | authentik/stages/password/migrations/0007_app_password.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 302 | 2020-01-21T08:03:59.000Z | 2020-12-04T05:04:57.000Z | authentik/stages/password/migrations/0007_app_password.py | BeryJu/passbook | 350f0d836580f4411524614f361a76c4f27b8a2d | [
"MIT"
] | 3 | 2020-03-04T08:21:59.000Z | 2020-08-01T20:37:18.000Z | # Generated by Django 3.2.6 on 2021-08-23 14:34
import django.contrib.postgres.fields
from django.apps.registry import Apps
from django.db import migrations, models
from django.db.backends.base.schema import BaseDatabaseSchemaEditor
from authentik.stages.password import BACKEND_APP_PASSWORD, BACKEND_INBUILT
| 36.040816 | 100 | 0.625708 |
220006b165652d33b27e971f916a5a800cf16e0a | 1,211 | py | Python | article/tests/test_models.py | asb29/Redundant | ee816fd41f9217610bd11f757cf9175288723c70 | [
"MIT"
] | null | null | null | article/tests/test_models.py | asb29/Redundant | ee816fd41f9217610bd11f757cf9175288723c70 | [
"MIT"
] | null | null | null | article/tests/test_models.py | asb29/Redundant | ee816fd41f9217610bd11f757cf9175288723c70 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth.models import User
from article.models import Article, Category
| 29.536585 | 77 | 0.6218 |
2200800f734e84798d40a112ef14379650a7d44d | 145 | py | Python | tests/test_import.py | GoodManWEN/typehints_checker | 36e2b2f27b4c392543972e8e466f8e48dfeff274 | [
"MIT"
] | null | null | null | tests/test_import.py | GoodManWEN/typehints_checker | 36e2b2f27b4c392543972e8e466f8e48dfeff274 | [
"MIT"
] | null | null | null | tests/test_import.py | GoodManWEN/typehints_checker | 36e2b2f27b4c392543972e8e466f8e48dfeff274 | [
"MIT"
] | null | null | null | import os , sys
sys.path.append(os.getcwd())
import pytest
from typehints_checker import * | 18.125 | 31 | 0.737931 |
2200a38582a5987a8032f11e6758387289477471 | 2,240 | py | Python | models/FlagAttachment.py | jeffg2k/RootTheBox | 1bb971f98da96f66c868f5786c2405321b0be976 | [
"Apache-2.0"
] | 1 | 2020-02-28T16:23:12.000Z | 2020-02-28T16:23:12.000Z | models/FlagAttachment.py | Warlockk/RootTheBox | e24f3e0350aec1b65be81cdc71ff09a5e1b8e587 | [
"Apache-2.0"
] | null | null | null | models/FlagAttachment.py | Warlockk/RootTheBox | e24f3e0350aec1b65be81cdc71ff09a5e1b8e587 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Nov 24, 2014
@author: moloch
Copyright 2014 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from uuid import uuid4
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import Unicode, String, Integer
from models.BaseModels import DatabaseObject
from libs.StringCoding import encode, decode
from builtins import str
from tornado.options import options
| 31.111111 | 88 | 0.672768 |
22016594c64927e9cac7fbe2989ffcfcf16a646f | 1,278 | py | Python | connman_dispatcher/detect.py | a-sk/connman-dispatcher | 2561ae87ffd26d0f98bb1ab2b430e181be3d01c1 | [
"0BSD"
] | 4 | 2015-01-04T19:26:01.000Z | 2017-06-06T21:04:01.000Z | connman_dispatcher/detect.py | a-sk/connman-dispatcher | 2561ae87ffd26d0f98bb1ab2b430e181be3d01c1 | [
"0BSD"
] | 1 | 2015-04-04T13:19:15.000Z | 2015-04-04T13:19:15.000Z | connman_dispatcher/detect.py | a-sk/connman-dispatcher | 2561ae87ffd26d0f98bb1ab2b430e181be3d01c1 | [
"0BSD"
] | null | null | null | import glib
import dbus
from dbus.mainloop.glib import DBusGMainLoop
from pyee import EventEmitter
import logbook
logger = logbook.Logger('connman-dispatcher')
__all__ = ['detector']
detector = EventEmitter()
DBusGMainLoop(set_as_default=True)
bus = dbus.SystemBus()
bus.add_match_string_non_blocking("interface='net.connman.Manager'")
bus.add_message_filter(property_changed)
manager = dbus.Interface(bus.get_object('net.connman', "/"), 'net.connman.Manager')
detector.run = run
| 25.058824 | 83 | 0.661972 |
2202355dec4485d79be0734da044b8e85dc7a3dc | 4,294 | py | Python | integration/test/test_profile_overflow.py | avilcheslopez/geopm | 35ad0af3f17f42baa009c97ed45eca24333daf33 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | integration/test/test_profile_overflow.py | avilcheslopez/geopm | 35ad0af3f17f42baa009c97ed45eca24333daf33 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | integration/test/test_profile_overflow.py | avilcheslopez/geopm | 35ad0af3f17f42baa009c97ed45eca24333daf33 | [
"MIT",
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2015 - 2022, Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause
#
"""
Runs an application with a large number of short regions and checks
that the controller successfully runs.
"""
import sys
import unittest
import os
import subprocess
import glob
import geopmpy.io
import geopmpy.agent
import geopmdpy.error
import geopmdpy.topo
from integration.test import geopm_test_launcher
from integration.test import check_trace
def test_short_region_count(self):
'''
Test that the count for MPI_Barrier is as expected.
'''
report = geopmpy.io.RawReport(self._report_path)
hosts = report.host_names()
for hh in hosts:
region_data = report.raw_region(hh, 'MPI_Barrier')
count = region_data['count']
self.assertEqual(count, 10000000)
def test_sample_rate(self):
'''
Test that the sample rate is regular.
'''
traces = glob.glob(self._trace_path + "*")
if len(traces) == 0:
raise RuntimeError("No traces found with prefix: {}".format(self._trace_path_prefix))
for tt in traces:
check_trace.check_sample_rate(tt, 0.005)
if __name__ == '__main__':
unittest.main()
| 32.778626 | 97 | 0.614578 |
2203367508cf03902b996dcb408a29b0ce2106d4 | 13,627 | py | Python | Objects/optAlignRNA.py | MooersLab/jupyterlabpymolpysnipsplus | b886750d63372434df53d4d6d7cdad6cb02ae4e7 | [
"MIT"
] | null | null | null | Objects/optAlignRNA.py | MooersLab/jupyterlabpymolpysnipsplus | b886750d63372434df53d4d6d7cdad6cb02ae4e7 | [
"MIT"
] | null | null | null | Objects/optAlignRNA.py | MooersLab/jupyterlabpymolpysnipsplus | b886750d63372434df53d4d6d7cdad6cb02ae4e7 | [
"MIT"
] | null | null | null | # Description: OptiAlign.py by Jason Vertree modified for aligning multiple RNA structures.
# Source: Generated while helping Miranda Adams at U of Saint Louis.
"""
cmd.do('python')
cmd.do(' ##############################################################################')
cmd.do('#')
cmd.do('# @SUMMARY: -- QKabsch.py. A python implementation of the optimal superposition')
cmd.do('# of two sets of vectors as proposed by Kabsch 1976 & 1978.')
cmd.do('#')
cmd.do('# @AUTHOR: Jason Vertrees')
cmd.do('# @COPYRIGHT: Jason Vertrees (C), 2005-2007')
cmd.do('# @LICENSE: Released under GPL:')
cmd.do('# This program is free software; you can redistribute it and/or modify')
cmd.do('# it under the terms of the GNU General Public License as published by')
cmd.do('# the Free Software Foundation; either version 2 of the License, or')
cmd.do('# (at your option) any later version.')
cmd.do('# This program is distributed in the hope that it will be useful, but WITHOUT')
cmd.do('# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS')
cmd.do('# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.')
cmd.do('#')
cmd.do('# You should have received a copy of the GNU General Public License along with')
cmd.do('# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin')
cmd.do('# Street, Fifth Floor, Boston, MA 02110-1301, USA ')
cmd.do('#')
cmd.do('# DATE : 2007-01-01')
cmd.do('# REV : 2')
cmd.do('# REQUIREMENTS: numpy')
cmd.do('#')
cmd.do('#')
cmd.do('# Modified optAlign.py to use C1' carbon atoms of RNA for alignment.')
cmd.do('# Jan. 29, 2020 ')
cmd.do('# Blaine Mooers, PhD')
cmd.do('# Univ. of Oklahoma Health Sciences Center')
cmd.do('#')
cmd.do('#############################################################################')
cmd.do('from array import *')
cmd.do(' ')
cmd.do('# system stuff')
cmd.do('import os')
cmd.do('import copy')
cmd.do(' ')
cmd.do('# pretty printing')
cmd.do('import pprint')
cmd.do(' ')
cmd.do('# for importing as a plugin into PyMol')
cmd.do('from pymol import cmd')
cmd.do('from pymol import stored')
cmd.do('from pymol import selector')
cmd.do(' ')
cmd.do('# using numpy for linear algebra')
cmd.do('import numpy')
cmd.do(' ')
cmd.do('def optAlignRNA( sel1, sel2 ):')
cmd.do(' """')
cmd.do(' optAlignRNA performs the Kabsch alignment algorithm upon the C1' carbons of two selections.')
cmd.do(' Example: optAlignRNA 1JU7 and i. 1-16 and n. C1', 1CLL and i. 4-146 and n. C1'')
cmd.do(' ')
cmd.do(' Two RMSDs are returned. One comes from the Kabsch algorithm and the other from')
cmd.do(' PyMOL based upon your selections.')
cmd.do(' ')
cmd.do(' This function can be run in a for loop to fit multiple structures with a common prefix name:')
cmd.do(' ')
cmd.do(' for x in cmd.get_names(): optAlignRNA(x, "1JU7_0001")')
cmd.do(' ')
cmd.do(' or get the rmsds for all combinations, do the following:')
cmd.do(' ')
cmd.do(' [[optAlignRNA(x, y) for x in cmd.get_names()] for y in cmd.get_names()]')
cmd.do('')
cmd.do(' """')
cmd.do(' cmd.reset()')
cmd.do(' ')
cmd.do(' # make the lists for holding coordinates')
cmd.do(' # partial lists')
cmd.do(' stored.sel1 = []')
cmd.do(' stored.sel2 = []')
cmd.do(' # full lists')
cmd.do(' stored.mol1 = []')
cmd.do(' stored.mol2 = []')
cmd.do(' ')
cmd.do(' # -- CUT HERE')
cmd.do(' sel1 += " and N. C1'"')
cmd.do(' sel2 += " and N. C1'"')
cmd.do(' # -- CUT HERE')
cmd.do(' ')
cmd.do(' # Get the selected coordinates. We')
cmd.do(' # align these coords.')
cmd.do(' cmd.iterate_state(1, selector.process(sel1), "stored.sel1.append([x,y,z])")')
cmd.do(' cmd.iterate_state(1, selector.process(sel2), "stored.sel2.append([x,y,z])")')
cmd.do(' ')
cmd.do(' # get molecule name')
cmd.do(' mol1 = cmd.identify(sel1,1)[0][0]')
cmd.do(' mol2 = cmd.identify(sel2,1)[0][0]')
cmd.do(' ')
cmd.do(' # Get all molecule coords. We do this because')
cmd.do(' # we have to rotate the whole molcule, not just')
cmd.do(' # the aligned selection')
cmd.do(' cmd.iterate_state(1, mol1, "stored.mol1.append([x,y,z])")')
cmd.do(' cmd.iterate_state(1, mol2, "stored.mol2.append([x,y,z])")')
cmd.do(' ')
cmd.do(' # check for consistency')
cmd.do(' assert len(stored.sel1) == len(stored.sel2)')
cmd.do(' L = len(stored.sel1)')
cmd.do(' assert L > 0')
cmd.do(' ')
cmd.do(' # must alway center the two proteins to avoid')
cmd.do(' # affine transformations. Center the two proteins')
cmd.do(' # to their selections.')
cmd.do(' COM1 = numpy.sum(stored.sel1,axis=0) / float(L)')
cmd.do(' COM2 = numpy.sum(stored.sel2,axis=0) / float(L)')
cmd.do(' stored.sel1 -= COM1')
cmd.do(' stored.sel2 -= COM2')
cmd.do(' ')
cmd.do(' # Initial residual, see Kabsch.')
cmd.do(' E0 = numpy.sum( numpy.sum(stored.sel1 * stored.sel1,axis=0),axis=0) + numpy.sum( numpy.sum(stored.sel2 * stored.sel2,axis=0),axis=0)')
cmd.do(' ')
cmd.do(' #')
cmd.do(' # This beautiful step provides the answer. V and Wt are the orthonormal')
cmd.do(' # bases that when multiplied by each other give us the rotation matrix, U.')
cmd.do(' # S, (Sigma, from SVD) provides us with the error! Isn't SVD great!')
cmd.do(' V, S, Wt = numpy.linalg.svd( numpy.dot( numpy.transpose(stored.sel2), stored.sel1))')
cmd.do(' ')
cmd.do(' # we already have our solution, in the results from SVD.')
cmd.do(' # we just need to check for reflections and then produce')
cmd.do(' # the rotation. V and Wt are orthonormal, so their det's')
cmd.do(' # are +/-1.')
cmd.do(' reflect = float(str(float(numpy.linalg.det(V) * numpy.linalg.det(Wt))))')
cmd.do(' ')
cmd.do(' if reflect == -1.0:')
cmd.do(' S[-1] = -S[-1]')
cmd.do(' V[:,-1] = -V[:,-1]')
cmd.do(' ')
cmd.do(' RMSD = E0 - (2.0 * sum(S))')
cmd.do(' RMSD = numpy.sqrt(abs(RMSD / L))')
cmd.do(' ')
cmd.do(' #U is simply V*Wt')
cmd.do(' U = numpy.dot(V, Wt)')
cmd.do(' ')
cmd.do(' # rotate and translate the molecule')
cmd.do(' stored.sel2 = numpy.dot((stored.mol2 - COM2), U)')
cmd.do(' stored.sel2 = stored.sel2.tolist()')
cmd.do(' # center the molecule')
cmd.do(' stored.sel1 = stored.mol1 - COM1')
cmd.do(' stored.sel1 = stored.sel1.tolist()')
cmd.do(' ')
cmd.do(' # let PyMol know about the changes to the coordinates')
cmd.do(' cmd.alter_state(1,mol1,"(x,y,z)=stored.sel1.pop(0)")')
cmd.do(' cmd.alter_state(1,mol2,"(x,y,z)=stored.sel2.pop(0)")')
cmd.do(' ')
cmd.do(' #print("Moved: %s Reference: %s RMSD = %f" % mol1, mol2, RMSD)')
cmd.do(' print("% s, % s,% 5.3f" % (mol1, mol2, RMSD))')
cmd.do(' ')
cmd.do(' # make the alignment OBVIOUS')
cmd.do(' cmd.hide("everything")')
cmd.do(' cmd.show("ribbon", sel1 + " or " + sel2)')
cmd.do(' cmd.color("gray70", mol1 )')
cmd.do(' cmd.color("magenta", mol2 )')
cmd.do(' cmd.color("red", "visible")')
cmd.do(' cmd.show("ribbon", "not visible")')
cmd.do(' cmd.center("visible")')
cmd.do(' cmd.orient()')
cmd.do(' cmd.zoom("visible")')
cmd.do(' ')
cmd.do('cmd.extend("optAlignRNA", optAlignRNA)')
cmd.do('python end')
"""
cmd.do('python')
cmd.do(' ##############################################################################')
cmd.do('#')
cmd.do('# @SUMMARY: -- QKabsch.py. A python implementation of the optimal superposition')
cmd.do('# of two sets of vectors as proposed by Kabsch 1976 & 1978.')
cmd.do('#')
cmd.do('# @AUTHOR: Jason Vertrees')
cmd.do('# @COPYRIGHT: Jason Vertrees (C), 2005-2007')
cmd.do('# @LICENSE: Released under GPL:')
cmd.do('# This program is free software; you can redistribute it and/or modify')
cmd.do('# it under the terms of the GNU General Public License as published by')
cmd.do('# the Free Software Foundation; either version 2 of the License, or')
cmd.do('# (at your option) any later version.')
cmd.do('# This program is distributed in the hope that it will be useful, but WITHOUT')
cmd.do('# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS')
cmd.do('# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.')
cmd.do('#')
cmd.do('# You should have received a copy of the GNU General Public License along with')
cmd.do('# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin')
cmd.do('# Street, Fifth Floor, Boston, MA 02110-1301, USA ')
cmd.do('#')
cmd.do('# DATE : 2007-01-01')
cmd.do('# REV : 2')
cmd.do('# REQUIREMENTS: numpy')
cmd.do('#')
cmd.do('#')
cmd.do('# Modified optAlign.py to use C1' carbon atoms of RNA for alignment.')
cmd.do('# Jan. 29, 2020 ')
cmd.do('# Blaine Mooers, PhD')
cmd.do('# Univ. of Oklahoma Health Sciences Center')
cmd.do('#')
cmd.do('#############################################################################')
cmd.do('from array import *')
cmd.do(' ')
cmd.do('# system stuff')
cmd.do('import os')
cmd.do('import copy')
cmd.do(' ')
cmd.do('# pretty printing')
cmd.do('import pprint')
cmd.do(' ')
cmd.do('# for importing as a plugin into PyMol')
cmd.do('from pymol import cmd')
cmd.do('from pymol import stored')
cmd.do('from pymol import selector')
cmd.do(' ')
cmd.do('# using numpy for linear algebra')
cmd.do('import numpy')
cmd.do(' ')
cmd.do('def optAlignRNA( sel1, sel2 ):')
cmd.do(' """')
cmd.do(' optAlignRNA performs the Kabsch alignment algorithm upon the C1' carbons of two selections.')
cmd.do(' Example: optAlignRNA 1JU7 and i. 1-16 and n. C1', 1CLL and i. 4-146 and n. C1'')
cmd.do(' ')
cmd.do(' Two RMSDs are returned. One comes from the Kabsch algorithm and the other from')
cmd.do(' PyMOL based upon your selections.')
cmd.do(' ')
cmd.do(' This function can be run in a for loop to fit multiple structures with a common prefix name:')
cmd.do(' ')
cmd.do(' for x in cmd.get_names(): optAlignRNA(x, "1JU7_0001")')
cmd.do(' ')
cmd.do(' or get the rmsds for all combinations, do the following:')
cmd.do(' ')
cmd.do(' [[optAlignRNA(x, y) for x in cmd.get_names()] for y in cmd.get_names()]')
cmd.do('')
cmd.do(' """')
cmd.do(' cmd.reset()')
cmd.do(' ')
cmd.do(' # make the lists for holding coordinates')
cmd.do(' # partial lists')
cmd.do(' stored.sel1 = []')
cmd.do(' stored.sel2 = []')
cmd.do(' # full lists')
cmd.do(' stored.mol1 = []')
cmd.do(' stored.mol2 = []')
cmd.do(' ')
cmd.do(' # -- CUT HERE')
cmd.do(' sel1 += " and N. C1'"')
cmd.do(' sel2 += " and N. C1'"')
cmd.do(' # -- CUT HERE')
cmd.do(' ')
cmd.do(' # Get the selected coordinates. We')
cmd.do(' # align these coords.')
cmd.do(' cmd.iterate_state(1, selector.process(sel1), "stored.sel1.append([x,y,z])")')
cmd.do(' cmd.iterate_state(1, selector.process(sel2), "stored.sel2.append([x,y,z])")')
cmd.do(' ')
cmd.do(' # get molecule name')
cmd.do(' mol1 = cmd.identify(sel1,1)[0][0]')
cmd.do(' mol2 = cmd.identify(sel2,1)[0][0]')
cmd.do(' ')
cmd.do(' # Get all molecule coords. We do this because')
cmd.do(' # we have to rotate the whole molcule, not just')
cmd.do(' # the aligned selection')
cmd.do(' cmd.iterate_state(1, mol1, "stored.mol1.append([x,y,z])")')
cmd.do(' cmd.iterate_state(1, mol2, "stored.mol2.append([x,y,z])")')
cmd.do(' ')
cmd.do(' # check for consistency')
cmd.do(' assert len(stored.sel1) == len(stored.sel2)')
cmd.do(' L = len(stored.sel1)')
cmd.do(' assert L > 0')
cmd.do(' ')
cmd.do(' # must alway center the two proteins to avoid')
cmd.do(' # affine transformations. Center the two proteins')
cmd.do(' # to their selections.')
cmd.do(' COM1 = numpy.sum(stored.sel1,axis=0) / float(L)')
cmd.do(' COM2 = numpy.sum(stored.sel2,axis=0) / float(L)')
cmd.do(' stored.sel1 -= COM1')
cmd.do(' stored.sel2 -= COM2')
cmd.do(' ')
cmd.do(' # Initial residual, see Kabsch.')
cmd.do(' E0 = numpy.sum( numpy.sum(stored.sel1 * stored.sel1,axis=0),axis=0) + numpy.sum( numpy.sum(stored.sel2 * stored.sel2,axis=0),axis=0)')
cmd.do(' ')
cmd.do(' #')
cmd.do(' # This beautiful step provides the answer. V and Wt are the orthonormal')
cmd.do(' # bases that when multiplied by each other give us the rotation matrix, U.')
cmd.do(' # S, (Sigma, from SVD) provides us with the error! Isn't SVD great!')
cmd.do(' V, S, Wt = numpy.linalg.svd( numpy.dot( numpy.transpose(stored.sel2), stored.sel1))')
cmd.do(' ')
cmd.do(' # we already have our solution, in the results from SVD.')
cmd.do(' # we just need to check for reflections and then produce')
cmd.do(' # the rotation. V and Wt are orthonormal, so their det's')
cmd.do(' # are +/-1.')
cmd.do(' reflect = float(str(float(numpy.linalg.det(V) * numpy.linalg.det(Wt))))')
cmd.do(' ')
cmd.do(' if reflect == -1.0:')
cmd.do(' S[-1] = -S[-1]')
cmd.do(' V[:,-1] = -V[:,-1]')
cmd.do(' ')
cmd.do(' RMSD = E0 - (2.0 * sum(S))')
cmd.do(' RMSD = numpy.sqrt(abs(RMSD / L))')
cmd.do(' ')
cmd.do(' #U is simply V*Wt')
cmd.do(' U = numpy.dot(V, Wt)')
cmd.do(' ')
cmd.do(' # rotate and translate the molecule')
cmd.do(' stored.sel2 = numpy.dot((stored.mol2 - COM2), U)')
cmd.do(' stored.sel2 = stored.sel2.tolist()')
cmd.do(' # center the molecule')
cmd.do(' stored.sel1 = stored.mol1 - COM1')
cmd.do(' stored.sel1 = stored.sel1.tolist()')
cmd.do(' ')
cmd.do(' # let PyMol know about the changes to the coordinates')
cmd.do(' cmd.alter_state(1,mol1,"(x,y,z)=stored.sel1.pop(0)")')
cmd.do(' cmd.alter_state(1,mol2,"(x,y,z)=stored.sel2.pop(0)")')
cmd.do(' ')
cmd.do(' #print("Moved: %s Reference: %s RMSD = %f" % mol1, mol2, RMSD)')
cmd.do(' print("% s, % s,% 5.3f" % (mol1, mol2, RMSD))')
cmd.do(' ')
cmd.do(' # make the alignment OBVIOUS')
cmd.do(' cmd.hide("everything")')
cmd.do(' cmd.show("ribbon", sel1 + " or " + sel2)')
cmd.do(' cmd.color("gray70", mol1 )')
cmd.do(' cmd.color("magenta", mol2 )')
cmd.do(' cmd.color("red", "visible")')
cmd.do(' cmd.show("ribbon", "not visible")')
cmd.do(' cmd.center("visible")')
cmd.do(' cmd.orient()')
cmd.do(' cmd.zoom("visible")')
cmd.do(' ')
cmd.do('cmd.extend("optAlignRNA", optAlignRNA)')
cmd.do('python end')
| 41.419453 | 143 | 0.634329 |
220378f315f7e2f7d8cd6b8b856c000fc8a490f5 | 12,933 | py | Python | 2020/day11.py | asmeurer/advent-of-code | 3ba3edb0c29994487f1b3344383dc41dfea9bfcb | [
"MIT"
] | null | null | null | 2020/day11.py | asmeurer/advent-of-code | 3ba3edb0c29994487f1b3344383dc41dfea9bfcb | [
"MIT"
] | null | null | null | 2020/day11.py | asmeurer/advent-of-code | 3ba3edb0c29994487f1b3344383dc41dfea9bfcb | [
"MIT"
] | null | null | null | test_input = """
L.LL.LL.LL
LLLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLLL
L.LLLLLL.L
L.LLLLL.LL
"""
test_input2 = """
.......#.
...#.....
.#.......
.........
..#L....#
....#....
.........
#........
...#.....
"""
test_input3 = """
.............
.L.L.#.#.#.#.
.............
"""
test_input4 = """
.##.##.
#.#.#.#
##...##
...L...
##...##
#.#.#.#
.##.##.
"""
input = """
LL.LL.LLLLLL.LLLLLLLLLLLLLLLLLL.LLLLL..LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLL.LL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LL.LLLLLLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
.LL...LL.L.L....LL..LL..L.L.L..L.....L...LL.....LLL..L..L..L.....L.L..LLLL...LL.LL.L.......
LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLL..LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LL.LLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LL.L......L...LL....L...L.LL.L.....L.LL.L....L...LLL....LL.....LL.L.LLL...LL.L...LLL.L.L...
LLLLLLLLLLLL.LLLLLLLL.L.LL.L.LLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLLLLL.LL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.L.LLLLL.LLLLLLLLLLLL.LLLL.LLLLLLL..LLLLLL.LLLL.LLLLL
LLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLL.L.LL.LLLLL
.LLLL.LLLLLL.LLLLLLLL.LLLLLLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
...L..L......L..L.L.......LL...L.LL.L...LL...L..LL....L....L.L..L...L...L.L.....LL.....L..L
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLL.LL
LLLLL.LLLLLLLL.LL.LLLLLLLL.LLLL.LLLLLL.LLLLLLLLLLL.L.LLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLL.LLL.LLLLL.LLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.L.LLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLLLLLLLL.LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLL
.......LL.L.L...LL..L....LL....L.L.L....L......L..LL...LL.LLL..L....L......L.LLL.L.....LLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLLLLLL.LLLLLLLLL.LLLL.L.LLLL.LLLLLLLL.LLLLLL.L.LLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLLLLLLL.
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLL.LLL.LLLLLLLL.LLLL.LLLLLLLL.LLLLLL.LLL..LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLLLLL.LLLLLLL
LLLLL.LLLLLL.LL.LLLLLLLLLL.LLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLLLLLLL.LLL.LLLL.LLLLLLLLLLLLLLLLL
.L........L..L.L.LLLLL.......LL.......L..L.L..LL.L......L.......LLL..LLL.LL...L.L...L.LL.L.
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL..LLLLL.LLLLLLLL.LLLL.LLL..LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLL..LLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
..L..LL.......L.LLLL.L.....L...L.LL...LLLLL.L.....L..L...LL.LL..L..LLLLLL..........LL.....L
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL..LLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL..LLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL.LL.LLLLLLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLL.LLLL..LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
L...LL....L..L..LL.........L.L...LL..LL.L....L...........LL.L.......L.L.L.......L..L..LL..L
LLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLLL.LL.LLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.L.LLLLLLLLLLL.LL.LLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLL.L.LLLL.LLLLLLLLLLLL..L.LLLL.L.LL.LLLLLLLL.LLLLLLLLLLLLLLLL.
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLLL.LLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL
.....L.LLL...LL..LL.....L....LL.......L...LL..L..L...L...L.LL.LL.LL...LL..LLL.L..LLL..LLLL.
LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLLLLLLLL.L.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLLLLLL.LLLLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL..LLL.LLLLLLLLLLLLLL.LLLL..LLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLLLLL.LL.LLLLLLLLLLLLL.LL.LLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
..L..LL.........L....L.L.L.L...L....L...........LL....L..L...L.LL..L..LL.L..LL..L..L.L..L.L
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
....L............L....LL......L.LLL.LLL....LL.....L..L.LL.L........L..L......L.LLL..LL..LL.
LL.LLLLLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLL.LLLLLL.LLLLLLLL.L.LLLLLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL.LLLLLLLL..LLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLLL..LLLLLLLLLLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLLLLLL.LL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLLLLLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLL
LLLLL.L.LLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLLLLLLLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLL.LLLLLL.LLLLLLLLLL
.L......LLL...L.L.LL.L.....LL.L..L.L.LLLLL....LL..L...L..L.....L.L...L...L.L.LL.LL.L.......
LLLLLLLLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLLLLLLLLL.LLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLL.LLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLL
LLLLL.LLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLL.LLLLLLLLLLL.LLLLL
LLLLLLLLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLL
LLLLL.LLLLLLLLLLLLLL..LLLLLLLLL.LLLLLL.LLLLLLL.LLLLL.LLLLL..LLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL
LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLL.LLLLL.LLLLLL.LLLL.LLLLLLLL.LLLLLL.LLLL.LLLLL
"""
import numpy as np
val = {'L': -1, '#': 1, '.': 0}
rval = {v: k for k, v in val.items()}
print("Day 11")
print("Part 1")
print("Test input")
testa = strtoarray(test_input)
print(test_input)
print(testa)
print(arraytostr(testa))
print("Adjacent to 0, 0", arraytostr(adjacent(testa, 0, 0)))
print("Adjacent to 2, 2", arraytostr(adjacent(testa, 2, 2)))
test_finala = generations(testa)
print(np.sum(test_finala == 1))
print("Puzzle input")
a = strtoarray(input)
finala = generations(a)
print(np.sum(finala == 1))
print("Part 2")
print("Test input")
testa2 = strtoarray(test_input2)
assert testa2[4, 3] == -1
print(adjacent2(testa2, 4, 3))
testa3 = strtoarray(test_input3)
assert testa3[1, 3] == -1
print(adjacent2(testa3, 1, 3))
testa4 = strtoarray(test_input4)
assert testa4[3, 3] == -1
print(adjacent2(testa4, 3, 3))
test_finala = generations2(testa)
print(np.sum(test_finala==1))
print("Puzzle input")
finala = generations2(a)
print(np.sum(finala == 1))
| 45.861702 | 91 | 0.718163 |
220420bd932e73713baed1135186d8fa37af4fd2 | 2,849 | py | Python | final/runner_2.py | Pluriscient/sma2c-ipd | e6e4a5240930491a996afda4744714c5c4826ac2 | [
"MIT"
] | null | null | null | final/runner_2.py | Pluriscient/sma2c-ipd | e6e4a5240930491a996afda4744714c5c4826ac2 | [
"MIT"
] | null | null | null | final/runner_2.py | Pluriscient/sma2c-ipd | e6e4a5240930491a996afda4744714c5c4826ac2 | [
"MIT"
] | null | null | null | from SMA2CAgent import SMA2CAgent
from A2CAgent import A2CAgent
from RandomAgent import RandomAgent
# from .SMA2CAgent import SMA2CAgent
import gym
import numpy as np
from IPD_fixed import IPDEnv
import axelrod
import time
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--rounds", help='number of rounds to play per episode', type=int, default=20)
parser.add_argument("--episodes", help='number of episodes to play', type=int, default=1000)
parser.add_argument("--seed", help='random seed, -1 if random', type=int, default=-1)
parser.add_argument("--output", help="output folder", default=f'output-{time.time():.0f}')
parser.add_argument("--pure-a2c", help="Don't use an encoder", action='store_true')
parser.add_argument("--alpha", help='LR of encoder', type=float)
parser.add_argument("--beta", help = 'LR of A2C agent', type=float)
parser.add_argument("--lstm-dims", help='LSTM dimensions', type=int)
parser.add_argument("--encoder-fc", help='dimensions of encoder dense layers',type=int, action='append')
parser.add_argument("--a2c-fc", help='dimensions of a2c hidden layers', type=int, action='append')
parser.add_argument("--latent-dims", help='dimensions of code', type=int)
parser.add_argument("opponents", help='opponents that the bot should face', nargs="*")
parser.add_argument("--random", help="Don't use an agent, just random", action='store_true')
# parser.add_argument("")
args = parser.parse_args()
opponents = []
strats = dict([(s.name.lower(), s) for s in axelrod.all_strategies])
for opp in args.opponents:
if opp not in strats:
print(f'{opp} not found in strats')
s = strats[opp]
opponents.append(s)
env = IPDEnv({'rounds': args.rounds, 'opponents' : opponents})
seed = args.seed if args.seed != -1 else None
env.seed(seed=seed)
# remove empty values
config = {k: v for k, v in vars(args).items() if v is not None}
if config['pure_a2c']:
print("____USING PURE A2C_____")
agent= A2CAgent(env, config)
elif config['random']:
print("__RANDOM AGENT___")
agent = RandomAgent(env, config)
else:
print("____USING SMA2C______")
agent = SMA2CAgent(env, config)
# obs = env.reset()
# action = agent.act(obs, 0, 0, 1)
# print(f'resulting action: {action}')
# encodings_before = np.array(agent.encode_run(axelrod.Cooperator()))
# print(f'encodings before: {encodings_before}')
agent.run(episodes=args.episodes)
# encodings_after_c = np.array(agent.encode_run(axelrod.Cooperator()))
# encodings_after_d = np.array(agent.encode_run(axelrod.Defector()))
# print(f'encodings after: {encodings_after_c}')
# print(encodings_after_d)
agent.save()
| 43.830769 | 108 | 0.67708 |
2204368a20f00021e6e644e58818806aeac0f4fc | 876 | py | Python | 295-find-median-from-data-stream/295-find-median-from-data-stream.py | Dawit-Getachew/A2SV_Practice | 2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61 | [
"MIT"
] | null | null | null | 295-find-median-from-data-stream/295-find-median-from-data-stream.py | Dawit-Getachew/A2SV_Practice | 2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61 | [
"MIT"
] | null | null | null | 295-find-median-from-data-stream/295-find-median-from-data-stream.py | Dawit-Getachew/A2SV_Practice | 2fe06d725e0acfe668c6dae98fe3ef6e6e26ef61 | [
"MIT"
] | null | null | null | import heapq as h
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian() | 31.285714 | 67 | 0.586758 |
2204946bef0686b31437d34ea53f7a86c1f9035c | 1,781 | py | Python | mottak-arkiv-service/tests/routers/mappers/test_metadatafil.py | omBratteng/mottak | b7d2e1d063b31c2ad89c66e5414297612f91ebe9 | [
"Apache-2.0"
] | 4 | 2021-03-05T15:39:24.000Z | 2021-09-15T06:11:45.000Z | mottak-arkiv-service/tests/routers/mappers/test_metadatafil.py | omBratteng/mottak | b7d2e1d063b31c2ad89c66e5414297612f91ebe9 | [
"Apache-2.0"
] | 631 | 2020-04-27T10:39:18.000Z | 2022-03-31T14:51:38.000Z | mottak-arkiv-service/tests/routers/mappers/test_metadatafil.py | omBratteng/mottak | b7d2e1d063b31c2ad89c66e5414297612f91ebe9 | [
"Apache-2.0"
] | 3 | 2020-02-20T15:48:03.000Z | 2021-12-16T22:50:40.000Z | import pytest
from app.domain.models.Metadatafil import Metadatafil, MetadataType
from app.exceptions import InvalidContentType
from app.routers.mappers.metadafil import _get_file_content, metadatafil_mapper, _content_type2metadata_type
def test__content_type2metadata_type__success():
"""
GIVEN the string 'text/xml' as content_type
WHEN calling the method _content_type2metadata_type
THEN check that return value is MetadataType.XML_METS
"""
expected = MetadataType.XML_METS
actual = _content_type2metadata_type('text/xml')
assert actual == expected
def test__content_type2metadata_type__failure():
"""
GIVEN the string 'text' as content_type
WHEN calling the method _content_type2metadata_type
THEN check that a InvalidContentType Exception has been raised
"""
with pytest.raises(InvalidContentType):
_content_type2metadata_type('text')
def test__get_file_content(testfile, testfile_content):
"""
GIVEN a file with testdata where the content is an METS/XML file
WHEN calling the method _get_file_content
THEN check that the returned string is correct
"""
expected = testfile_content
actual = _get_file_content(testfile)
assert actual == expected
def test_metadatafil_mapper(testfile, testfile_content):
"""
GIVEN a file with testdata where the content is an METS/XML file
WHEN calling the method metadatafil_mapper
THEN check that the returned Metadatafil object is correct
"""
expected = Metadatafil(
filnavn="df53d1d8-39bf-4fea-a741-58d472664ce2.xml",
type_=MetadataType.XML_METS,
innhold=testfile_content)
actual = metadatafil_mapper(testfile)
assert vars(actual) == vars(expected)
| 34.25 | 108 | 0.742841 |
2204c4afb63d7b851791357727ac0902218aab44 | 2,748 | py | Python | src/niweb/apps/noclook/templatetags/rack_tags.py | emjemj/ni | a78e6d97d1e4610aad7698c4f0f459221c680b4f | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | src/niweb/apps/noclook/templatetags/rack_tags.py | emjemj/ni | a78e6d97d1e4610aad7698c4f0f459221c680b4f | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2019-07-24T12:41:11.000Z | 2020-03-31T10:10:04.000Z | src/niweb/apps/noclook/templatetags/rack_tags.py | emjemj/ni | a78e6d97d1e4610aad7698c4f0f459221c680b4f | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2019-02-25T14:58:20.000Z | 2019-02-25T14:58:20.000Z | from django import template
register = template.Library()
RACK_SIZE_PX = 20
MARGIN_HEIGHT = 2
| 28.926316 | 114 | 0.643377 |
2205e64387c6f4c5a706049e6175c53d8453ff11 | 442 | py | Python | Cryptography/Exp-1-Shamirs-Secret-Sharing/main.py | LuminolT/Cryptographic | 87fffae591eee9644641a4c511972df0c2a44df7 | [
"MIT"
] | null | null | null | Cryptography/Exp-1-Shamirs-Secret-Sharing/main.py | LuminolT/Cryptographic | 87fffae591eee9644641a4c511972df0c2a44df7 | [
"MIT"
] | null | null | null | Cryptography/Exp-1-Shamirs-Secret-Sharing/main.py | LuminolT/Cryptographic | 87fffae591eee9644641a4c511972df0c2a44df7 | [
"MIT"
] | 1 | 2022-03-07T13:56:55.000Z | 2022-03-07T13:56:55.000Z | import numpy as np
import matplotlib.pyplot as plt
from shamir import *
from binascii import hexlify
# img = plt.imread('cat.png')
# plt.imshow(img)
# plt.show()
s = 'TEST_STRING'.encode()
print("Original secret:", hexlify(s))
l = Shamir.split(3, 5, '12345'.encode())
for idx, item in l:
print("Share {}: {}".format(str(idx), hexlify(item)))
shares = l[1:4]
secret = Shamir.combine(shares)
print(f'Secret is : {secret.decode()}') | 18.416667 | 57 | 0.669683 |
2206a89728beed4abfd89a30818175cab85e95be | 825 | py | Python | P20-Stack Abstract Data Type/Stack - Reverse Stack.py | necrospiritus/Python-Working-Examples | 075d410673e470fc7c4ffc262e92109a3032132f | [
"MIT"
] | null | null | null | P20-Stack Abstract Data Type/Stack - Reverse Stack.py | necrospiritus/Python-Working-Examples | 075d410673e470fc7c4ffc262e92109a3032132f | [
"MIT"
] | null | null | null | P20-Stack Abstract Data Type/Stack - Reverse Stack.py | necrospiritus/Python-Working-Examples | 075d410673e470fc7c4ffc262e92109a3032132f | [
"MIT"
] | null | null | null | """Reverse stack is using a list where the top is at the beginning instead of at the end."""
s = Reverse_Stack()
print(s.is_empty())
s.push(4)
s.push("Dog")
print(s.peek())
s.push("Cat")
print(s.size())
print(s.is_empty())
s.pop()
print(s.peek())
print(s.size())
| 22.916667 | 92 | 0.632727 |
220737eae6a16eeacd5d110896be7e897b880d4e | 101 | py | Python | gerapy/cmd/server.py | awesome-archive/Gerapy | e9792d020397cd85b4d553b91b7829078b728b98 | [
"MIT"
] | 1 | 2018-12-07T02:05:32.000Z | 2018-12-07T02:05:32.000Z | gerapy/cmd/server.py | Tilyp/Gerapy | e9792d020397cd85b4d553b91b7829078b728b98 | [
"MIT"
] | null | null | null | gerapy/cmd/server.py | Tilyp/Gerapy | e9792d020397cd85b4d553b91b7829078b728b98 | [
"MIT"
] | null | null | null | from gerapy.server.manage import manage
import sys
| 14.428571 | 39 | 0.70297 |
220739480d36f76e621c523e3b7cf2bdd8e3c62a | 964 | py | Python | client/walt/client/term.py | dia38/walt-python-packages | e6fa1f166f45e73173195d57840d22bef87b88f5 | [
"BSD-3-Clause"
] | 4 | 2020-01-14T09:12:56.000Z | 2022-03-14T14:35:11.000Z | client/walt/client/term.py | dia38/walt-python-packages | e6fa1f166f45e73173195d57840d22bef87b88f5 | [
"BSD-3-Clause"
] | 73 | 2016-04-29T13:17:26.000Z | 2022-03-01T15:06:48.000Z | client/walt/client/term.py | dia38/walt-python-packages | e6fa1f166f45e73173195d57840d22bef87b88f5 | [
"BSD-3-Clause"
] | 3 | 2019-03-18T14:27:56.000Z | 2021-06-03T12:07:02.000Z | #!/usr/bin/env python
import sys, tty, termios, array, fcntl, curses
| 35.703704 | 69 | 0.629668 |
22075aae320e407eb3fd67de73b37aec7dd0a0b3 | 25,123 | py | Python | improver_tests/calibration/ensemble_calibration/test_CalibratedForecastDistributionParameters.py | cpelley/improver | ebf77fe2adc85ed7aec74c26671872a2e4388ded | [
"BSD-3-Clause"
] | null | null | null | improver_tests/calibration/ensemble_calibration/test_CalibratedForecastDistributionParameters.py | cpelley/improver | ebf77fe2adc85ed7aec74c26671872a2e4388ded | [
"BSD-3-Clause"
] | null | null | null | improver_tests/calibration/ensemble_calibration/test_CalibratedForecastDistributionParameters.py | cpelley/improver | ebf77fe2adc85ed7aec74c26671872a2e4388ded | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2017-2021 Met Office.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
Unit tests for the
`ensemble_calibration.CalibratedForecastDistributionParameters`
class.
"""
import unittest
import numpy as np
from iris.cube import CubeList
from iris.tests import IrisTest
from numpy.testing import assert_array_almost_equal
from improver.calibration.ensemble_calibration import (
CalibratedForecastDistributionParameters as Plugin,
)
from improver.calibration.ensemble_calibration import (
EstimateCoefficientsForEnsembleCalibration,
)
from improver.metadata.constants.attributes import MANDATORY_ATTRIBUTE_DEFAULTS
from improver.synthetic_data.set_up_test_cubes import set_up_variable_cube
from improver.utilities.warnings_handler import ManageWarnings
from .helper_functions import EnsembleCalibrationAssertions, SetupCubes
from .test_EstimateCoefficientsForEnsembleCalibration import SetupExpectedCoefficients
if __name__ == "__main__":
unittest.main()
| 42.294613 | 88 | 0.690841 |
2207cb5f7d7e3d98709c2a6697f808bd842caf1c | 2,897 | py | Python | rendaz/tests/test_daztools.py | veselosky/rendaz | c81298cb9b8f142c4748c28b7e93549a56ee248d | [
"Apache-2.0"
] | null | null | null | rendaz/tests/test_daztools.py | veselosky/rendaz | c81298cb9b8f142c4748c28b7e93549a56ee248d | [
"Apache-2.0"
] | null | null | null | rendaz/tests/test_daztools.py | veselosky/rendaz | c81298cb9b8f142c4748c28b7e93549a56ee248d | [
"Apache-2.0"
] | null | null | null | "Test handling/parsing of various DAZ Studio files"
from pathlib import Path
from tempfile import NamedTemporaryFile
from django.apps import apps
from rendaz.daztools import (
DSONFile,
ProductMeta,
manifest_files,
supplement_product_name,
)
TEST_DIR = Path(__file__).parent
def test_read_dson_compressed():
"Test reading compressed DSON files"
fname = TEST_DIR / "Sphere-compressed.duf"
duf = DSONFile(path=str(fname))
assert duf.path.name == "Sphere-compressed.duf"
assert duf.is_compressed
assert "asset_info" in duf.dson
def test_read_dson_uncompressed():
"Test reading uncompressed DSON files"
fname = TEST_DIR / "Sphere-uncompressed.duf"
duf = DSONFile(path=str(fname))
assert duf.path.name == "Sphere-uncompressed.duf"
assert duf.is_compressed is False
assert "asset_info" in duf.dson
def test_save_dson_compressed():
"Test write round trip, read uncompressed, write compressed, read back"
fname = TEST_DIR / "Sphere-uncompressed.duf"
duf = DSONFile(path=str(fname))
out = NamedTemporaryFile(mode="wt", delete=False)
tmpname = out.name
out.close()
try:
duf.save(tmpname, compress=True)
new = DSONFile(tmpname)
assert new.is_compressed
assert "asset_info" in new.dson
finally:
Path(tmpname).unlink()
def test_save_dson_uncompressed():
"Test write round trip, read compressed, write uncompressed, read back"
fname = TEST_DIR / "Sphere-compressed.duf"
duf = DSONFile(path=str(fname))
out = NamedTemporaryFile(mode="wt", delete=False)
tmpname = out.name
out.close()
try:
duf.save(tmpname, compress=False)
new = DSONFile(tmpname)
assert new.is_compressed is False
assert "asset_info" in new.dson
finally:
Path(tmpname).unlink()
| 29.561224 | 75 | 0.705557 |
2209880d39d84bdcf0ec5ef896046b892fe747ab | 20,000 | py | Python | src/core/models/graph2seq.py | talha1503/RL-based-Graph2Seq-for-NQG | 1039e0b6231ae7029ea6e4073b1e55df5ad2e928 | [
"Apache-2.0"
] | 100 | 2019-08-18T21:56:24.000Z | 2022-03-31T08:54:41.000Z | src/core/models/graph2seq.py | talha1503/RL-based-Graph2Seq-for-NQG | 1039e0b6231ae7029ea6e4073b1e55df5ad2e928 | [
"Apache-2.0"
] | 7 | 2019-12-26T03:49:20.000Z | 2021-11-26T19:11:19.000Z | src/core/models/graph2seq.py | talha1503/RL-based-Graph2Seq-for-NQG | 1039e0b6231ae7029ea6e4073b1e55df5ad2e928 | [
"Apache-2.0"
] | 17 | 2020-02-02T06:41:21.000Z | 2022-03-09T02:53:27.000Z | import random
import string
from typing import Union, List
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..layers.common import EncoderRNN, DecoderRNN, dropout
from ..layers.attention import *
from ..layers.graphs import GraphNN
from ..utils.generic_utils import to_cuda, create_mask
from ..utils.constants import VERY_SMALL_NUMBER
| 46.948357 | 194 | 0.6892 |
220aabb343a26ce1e8cadcc4df4a8b3a8adedfdb | 2,939 | py | Python | towhee/engine/pipeline.py | jeffoverflow/towhee | c576d22a4cdfc3909a3323b0d1decab87e83d26c | [
"Apache-2.0"
] | null | null | null | towhee/engine/pipeline.py | jeffoverflow/towhee | c576d22a4cdfc3909a3323b0d1decab87e83d26c | [
"Apache-2.0"
] | null | null | null | towhee/engine/pipeline.py | jeffoverflow/towhee | c576d22a4cdfc3909a3323b0d1decab87e83d26c | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from towhee.engine.graph_context import GraphContext
from towhee.dag.graph_repr import GraphRepr
from towhee.dataframe.dataframe import DFIterator
| 33.397727 | 92 | 0.638312 |
220ccdab937624a53d838d42a5f734ee87cb22a8 | 744 | py | Python | portfolio/models.py | MrInternauta/Python-Django-Portafolio-web-administrable | 0df6f76cb5bdc2f28eb691d21f3592f7f082ce80 | [
"MIT"
] | null | null | null | portfolio/models.py | MrInternauta/Python-Django-Portafolio-web-administrable | 0df6f76cb5bdc2f28eb691d21f3592f7f082ce80 | [
"MIT"
] | null | null | null | portfolio/models.py | MrInternauta/Python-Django-Portafolio-web-administrable | 0df6f76cb5bdc2f28eb691d21f3592f7f082ce80 | [
"MIT"
] | null | null | null | from django.db import models
# Create your models here.
| 43.764706 | 89 | 0.701613 |
220d1b0d3abc6c0db8d6bd13778e65f09dbb4290 | 231 | py | Python | src/notifications/tests.py | kullo/webconfig | 470839ed77fda11634d4e14a89bb5e7894aa707d | [
"BSD-3-Clause"
] | null | null | null | src/notifications/tests.py | kullo/webconfig | 470839ed77fda11634d4e14a89bb5e7894aa707d | [
"BSD-3-Clause"
] | null | null | null | src/notifications/tests.py | kullo/webconfig | 470839ed77fda11634d4e14a89bb5e7894aa707d | [
"BSD-3-Clause"
] | null | null | null | # Copyright 20152020 Kullo GmbH
#
# This source code is licensed under the 3-clause BSD license. See LICENSE.txt
# in the root directory of this source tree for details.
from django.test import TestCase
# Create your tests here.
| 28.875 | 78 | 0.774892 |
220da7c8db31ca8e3ea4491d39c1e1bb6b8b46fe | 1,018 | py | Python | examples/cam.py | jtme/button-shim | 19b80a236866fad068e6d3aeb643a1270d6ae934 | [
"MIT"
] | null | null | null | examples/cam.py | jtme/button-shim | 19b80a236866fad068e6d3aeb643a1270d6ae934 | [
"MIT"
] | null | null | null | examples/cam.py | jtme/button-shim | 19b80a236866fad068e6d3aeb643a1270d6ae934 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import signal
import buttonshim
print("""
Button SHIM: rainbow.py
Command on button press.
Press Ctrl+C to exit.
""")
import commands
signal.pause()
| 19.960784 | 74 | 0.681729 |
220e0c7e4d7e7b9e561c692a325977f16ecf70b4 | 153 | py | Python | built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/vega/algorithms/nas/sm_nas/mmdet_meta_cfgs/bbox_head/__init__.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 12 | 2020-12-13T08:34:24.000Z | 2022-03-20T15:17:17.000Z | built-in/TensorFlow/Official/cv/image_classification/ResnetVariant_for_TensorFlow/automl/vega/algorithms/nas/sm_nas/mmdet_meta_cfgs/bbox_head/__init__.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 3 | 2021-03-31T20:15:40.000Z | 2022-02-09T23:50:46.000Z | built-in/TensorFlow/Research/cv/image_classification/Darts_for_TensorFlow/automl/vega/algorithms/nas/sm_nas/mmdet_meta_cfgs/bbox_head/__init__.py | Huawei-Ascend/modelzoo | df51ed9c1d6dbde1deef63f2a037a369f8554406 | [
"Apache-2.0"
] | 2 | 2021-07-10T12:40:46.000Z | 2021-12-17T07:55:15.000Z | from .cascade_head import CascadeFCBBoxHead
from .convfc_bbox_head import SharedFCBBoxHead
__all__ = [
'CascadeFCBBoxHead',
'SharedFCBBoxHead']
| 21.857143 | 46 | 0.79085 |
221167d3228359aec0ed9b72908eb095312e240f | 3,146 | py | Python | monzo/model/monzoaccount.py | elementechemlyn/pythonzo | ff6124119c7fe8c68c4bfa0e5d59b79ad442c1fc | [
"MIT"
] | null | null | null | monzo/model/monzoaccount.py | elementechemlyn/pythonzo | ff6124119c7fe8c68c4bfa0e5d59b79ad442c1fc | [
"MIT"
] | 1 | 2021-06-01T22:01:40.000Z | 2021-06-01T22:01:40.000Z | monzo/model/monzoaccount.py | elementechemlyn/pythonzo | ff6124119c7fe8c68c4bfa0e5d59b79ad442c1fc | [
"MIT"
] | null | null | null | import datetime
from .monzobalance import MonzoBalance
from .monzopagination import MonzoPaging
from .monzotransaction import MonzoTransaction
| 35.75 | 95 | 0.651939 |
22117a7dbdd2a79f096b01e929739e3fe71da985 | 3,718 | py | Python | learninghouse/api/errors/__init__.py | DerOetzi/learninghouse-core | ece900b2a333b8ea9710609322cfefeeaf694cf8 | [
"MIT"
] | 1 | 2021-11-02T13:52:11.000Z | 2021-11-02T13:52:11.000Z | learninghouse/api/errors/__init__.py | DerOetzi/learninghouse-core | ece900b2a333b8ea9710609322cfefeeaf694cf8 | [
"MIT"
] | null | null | null | learninghouse/api/errors/__init__.py | DerOetzi/learninghouse-core | ece900b2a333b8ea9710609322cfefeeaf694cf8 | [
"MIT"
] | 1 | 2020-08-27T20:03:36.000Z | 2020-08-27T20:03:36.000Z | from typing import Dict, Optional
from fastapi import status, Request
from fastapi.responses import JSONResponse
from fastapi.exceptions import RequestValidationError
from learninghouse.models import LearningHouseErrorMessage
MIMETYPE_JSON = 'application/json'
| 34.747664 | 133 | 0.604088 |
2212ba611cd09cc95cb9831180998a6882517ddf | 1,751 | py | Python | moda/dataprep/create_dataset.py | Patte1808/moda | 312c9594754ae0f6d17cbfafaa2c4c790c58efe5 | [
"MIT"
] | null | null | null | moda/dataprep/create_dataset.py | Patte1808/moda | 312c9594754ae0f6d17cbfafaa2c4c790c58efe5 | [
"MIT"
] | null | null | null | moda/dataprep/create_dataset.py | Patte1808/moda | 312c9594754ae0f6d17cbfafaa2c4c790c58efe5 | [
"MIT"
] | null | null | null | import pandas as pd
def get_windowed_ts(ranged_ts, window_size, with_actual=True):
"""
Creates a data frame where each row is a window of samples from the time series.
Each consecutive row is a shift of 1 cell from the previous row.
For example: [[1,2,3],[2,3,4],[3,4,5]]
:param ranged_ts: a pd.DataFrame containing one column for values and one pd.DatetimeIndex for dates
:param window_size: The number of timestamps to be used as features
:param with_actual: Whether to increase window size by one, and treat the last column as the ground truth
(relevant for forecasting scenarios). Returns the same output just with a window size bigger by 1.
:return:
"""
windowed_ts = ranged_ts
windowed_ts_copy = windowed_ts.copy()
for i in range(window_size - 1 + int(with_actual)):
windowed_ts = pd.concat([windowed_ts, windowed_ts_copy.shift(-(i + 1))], axis=1)
windowed_ts = windowed_ts.dropna(axis=0)
return windowed_ts
def split_history_and_current(windowed_ts):
"""
Returns the first n-1 columns as X, and the last column as y. Useful mainly for forecasting scenarios
:param windowed_ts: a pd.DataFrame with a date index and a column per timestamp. see get_windowed_ts
:return:
"""
X = windowed_ts.iloc[:, :-1].values
y = windowed_ts.iloc[:, -1].values
return (X, y)
if __name__ == "__main__":
ranged_ts = pd.DataFrame({"date": range(6), "value": range(6)})
ranged_ts["date"] = pd.to_datetime(ranged_ts["date"])
ranged_ts = ranged_ts.set_index(pd.DatetimeIndex(ranged_ts["date"]))
ranged_ts = ranged_ts.drop(columns="date")
ranged_ts.head()
windowed_df = get_windowed_ts(ranged_ts, window_size=3, with_actual=False)
| 38.065217 | 109 | 0.703027 |
22135083df33d8282602ed03efa2652030de4212 | 1,310 | py | Python | test/test_ufunc.py | tuwien-cms/xprec | 8f213aa9475342000883a56c56d54bb5208eb930 | [
"MIT"
] | 6 | 2021-10-01T16:35:27.000Z | 2022-01-05T18:21:39.000Z | test/test_ufunc.py | tuwien-cms/xprec | 8f213aa9475342000883a56c56d54bb5208eb930 | [
"MIT"
] | 8 | 2022-01-20T20:33:26.000Z | 2022-03-25T09:27:49.000Z | test/test_ufunc.py | tuwien-cms/xprec | 8f213aa9475342000883a56c56d54bb5208eb930 | [
"MIT"
] | 1 | 2022-01-21T22:49:16.000Z | 2022-01-21T22:49:16.000Z | # Copyright (C) 2021 Markus Wallerberger and others
# SPDX-License-Identifier: MIT
import numpy as np
import xprec
| 24.716981 | 77 | 0.650382 |
22135b653bd172de4f59e045357620ffd83da98a | 48 | py | Python | echolect/millstone/__init__.py | ryanvolz/echolect | ec2594925f34fdaea69b64e725fccb0c99665a55 | [
"BSD-3-Clause"
] | 1 | 2022-03-24T22:48:12.000Z | 2022-03-24T22:48:12.000Z | echolect/millstone/__init__.py | scivision/echolect | ec2594925f34fdaea69b64e725fccb0c99665a55 | [
"BSD-3-Clause"
] | 1 | 2015-03-25T20:41:24.000Z | 2015-03-25T20:41:24.000Z | echolect/millstone/__init__.py | scivision/echolect | ec2594925f34fdaea69b64e725fccb0c99665a55 | [
"BSD-3-Clause"
] | null | null | null | from .read_hdf5 import *
from .hdf5_api import * | 24 | 24 | 0.770833 |
2214b7a4b4680d12ebfcca09d05d0ee1ade6215e | 932 | py | Python | pyptoolz/transforms.py | embedio/pyplinez | 14b2e84d0f0bd86870d492a78f02c0b19810d3f6 | [
"MIT"
] | null | null | null | pyptoolz/transforms.py | embedio/pyplinez | 14b2e84d0f0bd86870d492a78f02c0b19810d3f6 | [
"MIT"
] | null | null | null | pyptoolz/transforms.py | embedio/pyplinez | 14b2e84d0f0bd86870d492a78f02c0b19810d3f6 | [
"MIT"
] | null | null | null | from pathlib import Path
from toolz import itertoolz, curried
import vaex
transform_path_to_posix = lambda path: path.as_posix()
transform_xlsx_to_vaex = lambda path: vaex.from_ascii(path, seperator="\t")
transform_ascii_to_vaex = lambda path: vaex.from_ascii(path, seperator="\t")
transform_ascii_to_vaex2 = lambda path: vaex.from_ascii(path)
transform_vaex_to_list = lambda df: [itertoolz.second(x) for x in df.iterrows()]
transform_vaex_to_dict = lambda df: df.to_dict()
| 20.26087 | 80 | 0.784335 |
2214dd004a4a327669decd49302d44af0c040bf5 | 1,409 | py | Python | experiments/s3-image-resize/chalicelib/s3_helpers.py | llamapope/chalice-experiments | f08fa0bade19c2659788a0678d89a4a63c2402d5 | [
"MIT"
] | null | null | null | experiments/s3-image-resize/chalicelib/s3_helpers.py | llamapope/chalice-experiments | f08fa0bade19c2659788a0678d89a4a63c2402d5 | [
"MIT"
] | 2 | 2021-06-08T20:56:46.000Z | 2022-01-13T02:15:29.000Z | experiments/s3-image-resize/chalicelib/s3_helpers.py | llamapope/chalice-experiments | f08fa0bade19c2659788a0678d89a4a63c2402d5 | [
"MIT"
] | null | null | null | import PIL
from PIL import Image
from io import BytesIO
import re | 29.978723 | 69 | 0.628105 |
221552a2a64bb10ef85638e3e31fd395fcf10fcf | 4,792 | py | Python | synapse/handlers/room_member_worker.py | lukaslihotzki/synapse | 1dfdc87b9bb07cc3c958dde7f41f2af4322477e5 | [
"Apache-2.0"
] | 9,945 | 2015-01-02T07:41:06.000Z | 2022-03-31T23:22:42.000Z | synapse/handlers/room_member_worker.py | lukaslihotzki/synapse | 1dfdc87b9bb07cc3c958dde7f41f2af4322477e5 | [
"Apache-2.0"
] | 9,320 | 2015-01-08T14:09:03.000Z | 2022-03-31T21:11:24.000Z | synapse/handlers/room_member_worker.py | lukaslihotzki/synapse | 1dfdc87b9bb07cc3c958dde7f41f2af4322477e5 | [
"Apache-2.0"
] | 2,299 | 2015-01-31T22:16:29.000Z | 2022-03-31T06:08:26.000Z | # Copyright 2018-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from typing import TYPE_CHECKING, List, Optional, Tuple
from synapse.api.errors import SynapseError
from synapse.handlers.room_member import RoomMemberHandler
from synapse.replication.http.membership import (
ReplicationRemoteJoinRestServlet as ReplRemoteJoin,
ReplicationRemoteKnockRestServlet as ReplRemoteKnock,
ReplicationRemoteRejectInviteRestServlet as ReplRejectInvite,
ReplicationRemoteRescindKnockRestServlet as ReplRescindKnock,
ReplicationUserJoinedLeftRoomRestServlet as ReplJoinedLeft,
)
from synapse.types import JsonDict, Requester, UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
logger = logging.getLogger(__name__)
| 33.746479 | 77 | 0.660267 |
22159f4a1c6d6e72ce319e5cebbbcc4d51c13acd | 2,205 | py | Python | win/devkit/other/pymel/extras/completion/py/maya/app/edl/importExport.py | leegoonz/Maya-devkit | b81fe799b58e854e4ef16435426d60446e975871 | [
"ADSL"
] | 10 | 2018-03-30T16:09:02.000Z | 2021-12-07T07:29:19.000Z | win/devkit/other/pymel/extras/completion/py/maya/app/edl/importExport.py | leegoonz/Maya-devkit | b81fe799b58e854e4ef16435426d60446e975871 | [
"ADSL"
] | null | null | null | win/devkit/other/pymel/extras/completion/py/maya/app/edl/importExport.py | leegoonz/Maya-devkit | b81fe799b58e854e4ef16435426d60446e975871 | [
"ADSL"
] | 9 | 2018-06-02T09:18:49.000Z | 2021-12-20T09:24:35.000Z | import tempfile
import maya.OpenMaya as OpenMaya
import maya.OpenMayaRender as OpenMayaRender
import maya.OpenMayaMPx as OpenMayaMPx
import maya.cmds as cmds
import maya
import re
from maya.app.edl.fcp import *
def _setTimeCode(timecode):
pass
def doExport(fileName, allowPlayblast):
"""
Exports the Maya sequence using the EDL Exporter class.
"""
pass
def doMel(*args, **kwargs):
"""
Takes as input a string containing MEL code, evaluates it, and returns the result.
This function takes a string which contains MEL code and evaluates it using
the MEL interpreter. The result is converted into a Python data type and is
returned.
If an error occurs during the execution of the MEL script, a Python exception
is raised with the appropriate error message.
"""
pass
def audioClipCompare(a, b):
pass
def _getValidClipObjectName(clipName, isVideo):
pass
def doImport(fileName, useStartFrameOverride, startFrame):
"""
Imports the specified file using the EDL Importer class.
"""
pass
def _nameToNode(name):
pass
def getTimeCode():
pass
def videoClipCompare(a, b):
pass
def getShotsResolution():
"""
Returns the video resolution of the sequencer if all the shots have the same resolution
Otherwise it returns False, 0, 0
"""
pass
mayaFrameRates = {}
| 17.64 | 92 | 0.647166 |
22189dbba6fcdc9b59fa2a428105a701aaaf4a2f | 1,040 | py | Python | packages/mcni/python/mcni/instrument_simulator/__init__.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 5 | 2017-01-16T03:59:47.000Z | 2020-06-23T02:54:19.000Z | packages/mcni/python/mcni/instrument_simulator/__init__.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 293 | 2015-10-29T17:45:52.000Z | 2022-01-07T16:31:09.000Z | packages/mcni/python/mcni/instrument_simulator/__init__.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 1 | 2019-05-25T00:53:31.000Z | 2019-05-25T00:53:31.000Z | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 2006-2010 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
## Note:
## 1. This package depends on dsm
from mcni.neutron_coordinates_transformers import default as default_neutron_coordinates_transformer
default_simulator = simulator( default_neutron_coordinates_transformer )
# version
__id__ = "$Id$"
# End of file
| 26.666667 | 100 | 0.575 |
22199caafbe2cf83aa5b2f765370eb9a8ab49f37 | 169 | py | Python | todolist/wsgi.py | HangeZoe/django-todo-list | 8a3232916e57724d52f0f93124f346d82b72e0ce | [
"MIT"
] | null | null | null | todolist/wsgi.py | HangeZoe/django-todo-list | 8a3232916e57724d52f0f93124f346d82b72e0ce | [
"MIT"
] | null | null | null | todolist/wsgi.py | HangeZoe/django-todo-list | 8a3232916e57724d52f0f93124f346d82b72e0ce | [
"MIT"
] | null | null | null | import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'todolist.settings')
application = get_wsgi_application()
| 21.125 | 68 | 0.828402 |
221aaf010e11acc9785595a6c6873f1ea746ad9a | 4,501 | py | Python | cern_search_rest_api/modules/cernsearch/cli.py | inveniosoftware-contrib/citadel-search | 736fdb3a5b32f750111bc846bc815c4671978fa1 | [
"MIT"
] | 6 | 2020-04-12T18:30:08.000Z | 2021-09-15T05:53:40.000Z | cern_search_rest_api/modules/cernsearch/cli.py | inveniosoftware-contrib/cern-search | 736fdb3a5b32f750111bc846bc815c4671978fa1 | [
"MIT"
] | 6 | 2020-03-19T13:28:38.000Z | 2020-12-08T16:54:05.000Z | cern_search_rest_api/modules/cernsearch/cli.py | inveniosoftware-contrib/cern-search | 736fdb3a5b32f750111bc846bc815c4671978fa1 | [
"MIT"
] | 2 | 2019-04-22T21:20:17.000Z | 2019-05-16T08:50:38.000Z | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of CERN Search.
# Copyright (C) 2018-2021 CERN.
#
# Citadel Search is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Click command-line utilities."""
import json
import click
from flask.cli import with_appcontext
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_records.models import RecordMetadata
from invenio_search import current_search
from invenio_search.cli import es_version_check
from cern_search_rest_api.modules.cernsearch.indexer import CernSearchRecordIndexer
from cern_search_rest_api.modules.cernsearch.indexer_tasks import process_bulk_queue
def abort_if_false(ctx, param, value):
"""Abort command is value is False."""
if not value:
ctx.abort()
| 30.412162 | 108 | 0.682959 |
221ac1f2a8c5526fcda12d6ed18346f9e5d9d58a | 906 | py | Python | kasaya/core/backend/redisstore.py | AYAtechnologies/Kasaya-esb | 150fa96d4136641cd4632f3c9a09d4fc2610df07 | [
"BSD-2-Clause"
] | 1 | 2015-06-26T18:05:20.000Z | 2015-06-26T18:05:20.000Z | kasaya/core/backend/redisstore.py | AYAtechnologies/Kasaya-esb | 150fa96d4136641cd4632f3c9a09d4fc2610df07 | [
"BSD-2-Clause"
] | null | null | null | kasaya/core/backend/redisstore.py | AYAtechnologies/Kasaya-esb | 150fa96d4136641cd4632f3c9a09d4fc2610df07 | [
"BSD-2-Clause"
] | null | null | null | __author__ = 'wektor'
from generic import GenericBackend
import redis
| 25.166667 | 71 | 0.562914 |
221b92eff3eb5754a23903956aeef1d20d52980f | 11,288 | py | Python | venv/lib/python3.6/site-packages/ansible_collections/amazon/aws/plugins/module_utils/rds.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 22 | 2021-07-16T08:11:22.000Z | 2022-03-31T07:15:34.000Z | venv/lib/python3.6/site-packages/ansible_collections/amazon/aws/plugins/module_utils/rds.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 12 | 2020-02-21T07:24:52.000Z | 2020-04-14T09:54:32.000Z | venv/lib/python3.6/site-packages/ansible_collections/amazon/aws/plugins/module_utils/rds.py | usegalaxy-no/usegalaxy | 75dad095769fe918eb39677f2c887e681a747f3a | [
"MIT"
] | 39 | 2021-07-05T02:31:42.000Z | 2022-03-31T02:46:03.000Z | # Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from collections import namedtuple
from time import sleep
try:
from botocore.exceptions import BotoCoreError, ClientError, WaiterError
except ImportError:
pass
from ansible.module_utils._text import to_text
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
from .ec2 import AWSRetry
from .ec2 import ansible_dict_to_boto3_tag_list
from .ec2 import boto3_tag_list_to_ansible_dict
from .ec2 import compare_aws_tags
from .waiters import get_waiter
Boto3ClientMethod = namedtuple('Boto3ClientMethod', ['name', 'waiter', 'operation_description', 'cluster', 'instance'])
# Whitelist boto3 client methods for cluster and instance resources
cluster_method_names = [
'create_db_cluster', 'restore_db_cluster_from_db_snapshot', 'restore_db_cluster_from_s3',
'restore_db_cluster_to_point_in_time', 'modify_db_cluster', 'delete_db_cluster', 'add_tags_to_resource',
'remove_tags_from_resource', 'list_tags_for_resource', 'promote_read_replica_db_cluster'
]
instance_method_names = [
'create_db_instance', 'restore_db_instance_to_point_in_time', 'restore_db_instance_from_s3',
'restore_db_instance_from_db_snapshot', 'create_db_instance_read_replica', 'modify_db_instance',
'delete_db_instance', 'add_tags_to_resource', 'remove_tags_from_resource', 'list_tags_for_resource',
'promote_read_replica', 'stop_db_instance', 'start_db_instance', 'reboot_db_instance'
]
| 47.830508 | 159 | 0.710578 |
221baabdac2f34fa39aafcaa192dcd1f1b264104 | 176 | py | Python | week06/lecture/examples/src6/2/uppercase0.py | uldash/CS50x | c3ee0f42ad514b57a13c3ffbb96238b3ca3730e1 | [
"MIT"
] | null | null | null | week06/lecture/examples/src6/2/uppercase0.py | uldash/CS50x | c3ee0f42ad514b57a13c3ffbb96238b3ca3730e1 | [
"MIT"
] | null | null | null | week06/lecture/examples/src6/2/uppercase0.py | uldash/CS50x | c3ee0f42ad514b57a13c3ffbb96238b3ca3730e1 | [
"MIT"
] | null | null | null | # Uppercases string one character at a time
from cs50 import get_string
s = get_string("Before: ")
print("After: ", end="")
for c in s:
print(c.upper(), end="")
print()
| 17.6 | 43 | 0.653409 |
221c5dbcccafcacd09ca66b22dfdff675d20b942 | 2,050 | py | Python | tests/test_kobo.py | Donearm/kobuddy | 9c55f2f94c3c949c4d8a5ba18704be92c055873c | [
"MIT"
] | 75 | 2019-08-24T14:21:53.000Z | 2022-02-21T17:20:20.000Z | tests/test_kobo.py | Donearm/kobuddy | 9c55f2f94c3c949c4d8a5ba18704be92c055873c | [
"MIT"
] | 9 | 2019-10-15T19:30:16.000Z | 2021-08-17T15:24:00.000Z | tests/test_kobo.py | Donearm/kobuddy | 9c55f2f94c3c949c4d8a5ba18704be92c055873c | [
"MIT"
] | 4 | 2020-02-05T13:53:59.000Z | 2021-08-17T14:50:39.000Z | from datetime import datetime
from pathlib import Path
import pytz
import kobuddy
# a bit meh, but ok for now
kobuddy.set_databases(get_test_db())
from kobuddy import _iter_events_aux, get_events, get_books_with_highlights, _iter_highlights
| 28.472222 | 159 | 0.691707 |
221edc811e6d0e0ea5e013272ed5a112078a3713 | 1,062 | py | Python | tanks/views.py | BArdelean/djangostuff | b4b7b6bac5e1d8dbc73e2f5cb5a7e784a82c9519 | [
"bzip2-1.0.6"
] | null | null | null | tanks/views.py | BArdelean/djangostuff | b4b7b6bac5e1d8dbc73e2f5cb5a7e784a82c9519 | [
"bzip2-1.0.6"
] | null | null | null | tanks/views.py | BArdelean/djangostuff | b4b7b6bac5e1d8dbc73e2f5cb5a7e784a82c9519 | [
"bzip2-1.0.6"
] | null | null | null | from django.shortcuts import render
from .models import Tank
from django.db import models
from django.http import HttpResponse
from django.views import View
# Create your views here.
# The view for the created model Tank
| 21.673469 | 54 | 0.65725 |
221ee83a279f586d62cd3b5b659bf72ceddc7c10 | 8,125 | py | Python | VQVAE/main.py | bipashasen/How2Sign-Blob | 6e2af881d96d477fdb93104b8e53d943765c64ff | [
"MIT"
] | null | null | null | VQVAE/main.py | bipashasen/How2Sign-Blob | 6e2af881d96d477fdb93104b8e53d943765c64ff | [
"MIT"
] | null | null | null | VQVAE/main.py | bipashasen/How2Sign-Blob | 6e2af881d96d477fdb93104b8e53d943765c64ff | [
"MIT"
] | null | null | null | import os
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import argparse
from tqdm import tqdm
import sys
import distributed as dist
import utils
from models.vqvae import VQVAE, VQVAE_Blob2Full
from models.discriminator import discriminator
visual_folder = '/home2/bipasha31/python_scripts/CurrentWork/samples/VQVAE'
os.makedirs(visual_folder, exist_ok=True)
verbose = False
save_idx_global = 0
save_at = 100
did = 0
models = {
'gan': 0,
'vae': 1
}
model_to_train = models['vae']
results = {
'n_updates': 0,
'recon_errors': [],
'loss_vals': [],
'perplexities': [],
'd_loss': []
}
device = 'cuda:0'
def main(args):
"""
Set up VQ-VAE model with components defined in ./models/ folder
"""
model = VQVAE(args.n_hiddens, args.n_residual_hiddens,
args.n_residual_layers, args.n_embeddings,
args.embedding_dim, args.beta, device)
if args.ckpt:
model.load_state_dict(torch.load(args.ckpt)['model'])
model = model.to(device)
if args.test:
loader = utils.load_data_and_data_loaders(args.dataset, args.batch_size, test=True)
test(loader, model)
return
"""
Load data and define batch data loaders
"""
items = utils.load_data_and_data_loaders(args.dataset, args.batch_size)
training_loader, validation_loader = items[2], items[3]
x_train_var = items[4]
"""
Set up optimizer and training loop
"""
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate, amsgrad=True)
model.train()
if model_to_train == models['gan']:
train_vqgan(args, training_loader, validation_loader, x_train_var, model, optimizer)
else:
train(args, training_loader, validation_loader, x_train_var, model, optimizer)
if __name__ == "__main__":
# train_vqgan()
# train_blob2full()
parser = argparse.ArgumentParser()
"""
Hyperparameters
"""
timestamp = utils.readable_timestamp()
parser.add_argument("--batch_size", type=int, default=64)
parser.add_argument("--n_updates", type=int, default=50000)
parser.add_argument("--n_hiddens", type=int, default=128)
parser.add_argument("--n_residual_hiddens", type=int, default=32)
parser.add_argument("--n_residual_layers", type=int, default=2)
parser.add_argument("--embedding_dim", type=int, default=64)
parser.add_argument("--n_embeddings", type=int, default=512)
parser.add_argument("--beta", type=float, default=.25)
parser.add_argument("--learning_rate", type=float, default=3e-4)
parser.add_argument("--ckpt", type=str)
parser.add_argument("--log_interval", type=int, default=3)
parser.add_argument("--save_at", type=int, default=100)
parser.add_argument("--device_id", type=int, default=0)
parser.add_argument("--dataset", type=str, default='HandGestures')
parser.add_argument("--test", action='store_true')
# whether or not to save model
parser.add_argument("-save", action="store_true")
parser.add_argument("--filename", type=str, default=timestamp)
args = parser.parse_args()
args.save = True
if args.save and dist.is_primary():
print('Results will be saved in ./results/vqvae_' + args.filename + '.pth')
args.n_gpu = torch.cuda.device_count()
port = (
2 ** 15
+ 2 ** 14
+ hash(os.getuid() if sys.platform != "win32" else 1) % 2 ** 14
)+1
print(f'port: {port}')
print(args)
dist.launch(main, args.n_gpu, 1, 0, f"tcp://127.0.0.1:{port}", args=(args,))
| 30.204461 | 113 | 0.606769 |
2221af1a0ee8e71a36084e82816e4e484658018d | 1,245 | py | Python | api/voters/tests/test_models.py | citizenlabsgr/voter-engagement | 2d33eac1531471988543c6c3781b95ac73ec6dd9 | [
"MIT"
] | 6 | 2017-11-10T00:50:17.000Z | 2018-03-25T02:26:19.000Z | api/voters/tests/test_models.py | citizenlabsgr/voter-engagement | 2d33eac1531471988543c6c3781b95ac73ec6dd9 | [
"MIT"
] | 40 | 2017-10-25T16:16:55.000Z | 2018-08-15T05:27:36.000Z | api/voters/tests/test_models.py | citizenlabsgr/voter-engagement | 2d33eac1531471988543c6c3781b95ac73ec6dd9 | [
"MIT"
] | 3 | 2017-11-22T01:50:41.000Z | 2018-04-17T23:33:08.000Z | # pylint: disable=unused-variable,unused-argument,expression-not-assigned
from django.forms.models import model_to_dict
import arrow
import pytest
from expecter import expect
from api.elections.models import Election
from .. import models
def describe_registration_info():
def describe_voter():
def describe_status():
| 18.863636 | 73 | 0.658635 |
22259d4822670697f3a83a96fc5c76baa093e86f | 992 | py | Python | app/main/helpers/direct_award_helpers.py | uk-gov-mirror/alphagov.digitalmarketplace-buyer-frontend | ec3751b6d24842cc53febb20391ae340c0fea756 | [
"MIT"
] | 4 | 2017-10-12T16:15:01.000Z | 2020-11-28T03:41:15.000Z | app/main/helpers/direct_award_helpers.py | uk-gov-mirror/alphagov.digitalmarketplace-buyer-frontend | ec3751b6d24842cc53febb20391ae340c0fea756 | [
"MIT"
] | 615 | 2015-02-27T15:45:43.000Z | 2021-07-01T10:09:55.000Z | app/main/helpers/direct_award_helpers.py | uk-gov-mirror/alphagov.digitalmarketplace-buyer-frontend | ec3751b6d24842cc53febb20391ae340c0fea756 | [
"MIT"
] | 15 | 2015-06-30T14:35:20.000Z | 2021-04-10T18:06:36.000Z | from operator import itemgetter
| 31 | 113 | 0.658266 |
2227aded77b3fc2c225e7b80658dcf4702936914 | 2,261 | py | Python | daemon/api/endpoints/partial/pod.py | vishalbelsare/jina | ae72cc5ce1f7e7f4c662e72e96ea21dddc28bf43 | [
"Apache-2.0"
] | 2 | 2021-06-28T16:25:09.000Z | 2021-06-28T16:26:41.000Z | daemon/api/endpoints/partial/pod.py | vishalbelsare/jina | ae72cc5ce1f7e7f4c662e72e96ea21dddc28bf43 | [
"Apache-2.0"
] | null | null | null | daemon/api/endpoints/partial/pod.py | vishalbelsare/jina | ae72cc5ce1f7e7f4c662e72e96ea21dddc28bf43 | [
"Apache-2.0"
] | null | null | null | from typing import Optional, Dict, Any
from fastapi import APIRouter
from jina.helper import ArgNamespace
from jina.parsers import set_pod_parser
from ....excepts import PartialDaemon400Exception
from ....models import PodModel
from ....models.partial import PartialStoreItem
from ....stores import partial_store as store
router = APIRouter(prefix='/pod', tags=['pod'])
| 21.951456 | 74 | 0.657231 |
2227e03ea94ec70b6e6c0445734948d8034414f4 | 708 | py | Python | runehistory_api/app/config.py | RuneHistory/runehistory-api | 4e857c7fdbdf585d57cf4c7fe6214b565ac37a22 | [
"MIT"
] | null | null | null | runehistory_api/app/config.py | RuneHistory/runehistory-api | 4e857c7fdbdf585d57cf4c7fe6214b565ac37a22 | [
"MIT"
] | 6 | 2018-06-14T13:58:43.000Z | 2018-07-16T14:02:24.000Z | runehistory_api/app/config.py | RuneHistory/runehistory-api | 4e857c7fdbdf585d57cf4c7fe6214b565ac37a22 | [
"MIT"
] | null | null | null | import yaml
| 21.454545 | 53 | 0.572034 |
222831d9b44232f8e0bea417d43c813b4bde54d1 | 4,379 | py | Python | csv_filter/__init__.py | mooore-digital/csv_filter | 80afed0e4b366d195c5a90fb96ab2bf01620e3bf | [
"MIT"
] | 1 | 2018-08-13T05:51:21.000Z | 2018-08-13T05:51:21.000Z | csv_filter/__init__.py | mooore-digital/csv_filter | 80afed0e4b366d195c5a90fb96ab2bf01620e3bf | [
"MIT"
] | null | null | null | csv_filter/__init__.py | mooore-digital/csv_filter | 80afed0e4b366d195c5a90fb96ab2bf01620e3bf | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import argparse
import csv
import logging
import os
import re
import sys
DELIMITER = ','
if __name__ == '__main__':
sys.exit(main())
| 31.731884 | 120 | 0.577529 |
22298775a674168a052235a68913e1eaa95ece94 | 9,883 | py | Python | bluebottle/impact/tests/test_api.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 10 | 2015-05-28T18:26:40.000Z | 2021-09-06T10:07:03.000Z | bluebottle/impact/tests/test_api.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 762 | 2015-01-15T10:00:59.000Z | 2022-03-31T15:35:14.000Z | bluebottle/impact/tests/test_api.py | terrameijar/bluebottle | b4f5ba9c4f03e678fdd36091b29240307ea69ffd | [
"BSD-3-Clause"
] | 9 | 2015-02-20T13:19:30.000Z | 2022-03-08T14:09:17.000Z | # coding=utf-8
from builtins import str
import json
from django.contrib.auth.models import Group, Permission
from django.urls import reverse
from rest_framework import status
from bluebottle.impact.models import ImpactGoal
from bluebottle.impact.tests.factories import (
ImpactTypeFactory, ImpactGoalFactory
)
from bluebottle.time_based.tests.factories import DateActivityFactory
from bluebottle.members.models import MemberPlatformSettings
from bluebottle.test.factory_models.accounts import BlueBottleUserFactory
from bluebottle.test.utils import BluebottleTestCase, JSONAPITestClient
| 33.16443 | 82 | 0.610746 |
222b145f8daf822353fc31ee9861239abfadffb3 | 11,613 | py | Python | dotmotif/parsers/v2/test_v2_parser.py | aplbrain/dotmotif | db093ddad7308756e9cf7ee01199f0dca1369872 | [
"Apache-2.0"
] | 28 | 2020-06-12T20:46:15.000Z | 2022-02-05T18:33:46.000Z | dotmotif/parsers/v2/test_v2_parser.py | aplbrain/dotmotif | db093ddad7308756e9cf7ee01199f0dca1369872 | [
"Apache-2.0"
] | 26 | 2020-06-09T20:09:32.000Z | 2022-02-01T18:22:20.000Z | dotmotif/parsers/v2/test_v2_parser.py | aplbrain/dotmotif | db093ddad7308756e9cf7ee01199f0dca1369872 | [
"Apache-2.0"
] | 4 | 2021-03-08T02:47:49.000Z | 2021-09-13T19:16:29.000Z | from . import ParserV2
import dotmotif
import unittest
_THREE_CYCLE = """A -> B\nB -> C\nC -> A\n"""
_THREE_CYCLE_NEG = """A !> B\nB !> C\nC !> A\n"""
_THREE_CYCLE_INH = """A -| B\nB -| C\nC -| A\n"""
_THREE_CYCLE_NEG_INH = """A !| B\nB !| C\nC !| A\n"""
_ABC_TO_D = """\nA -> D\nB -> D\nC -> D\n"""
_THREE_CYCLE_CSV = """\nA,B\nB,C\nC,A\n"""
_THREE_CYCLE_NEG_CSV = """\nA,B\nB,C\nC,A\n"""
| 26.819861 | 87 | 0.490313 |
222b699bb098dde76e50ecba30e5ab86e3537dcc | 1,382 | py | Python | examples/classification_mnist/main.py | yassersouri/fandak | 2bbadb6d78fcf73dc39f5342aa34c53fa3341c5a | [
"MIT"
] | 15 | 2019-07-12T14:04:46.000Z | 2020-08-04T12:30:30.000Z | examples/classification_mnist/main.py | yassersouri/fandak | 2bbadb6d78fcf73dc39f5342aa34c53fa3341c5a | [
"MIT"
] | 2 | 2019-07-12T17:06:56.000Z | 2019-07-17T12:05:32.000Z | examples/classification_mnist/main.py | yassersouri/fandak | 2bbadb6d78fcf73dc39f5342aa34c53fa3341c5a | [
"MIT"
] | null | null | null | from typing import List
import click
import torch
from fandak.utils import common_config
from fandak.utils import set_seed
from fandak.utils.config import update_config
from proj.config import get_config_defaults
from proj.datasets import MNISTClassification
from proj.evaluators import ValidationEvaluator
from proj.models import MLPModel
from proj.trainers import SimpleTrainer
if __name__ == "__main__":
main()
| 26.075472 | 73 | 0.708394 |
222b80300db760788fdc862f944935f9de93f40f | 864 | py | Python | tests/test_problem_solving_algorithms_sorting.py | mxdzi/hackerrank | 4455f73e4479a4204b2e1167253f6a02351aa5b7 | [
"MIT"
] | null | null | null | tests/test_problem_solving_algorithms_sorting.py | mxdzi/hackerrank | 4455f73e4479a4204b2e1167253f6a02351aa5b7 | [
"MIT"
] | null | null | null | tests/test_problem_solving_algorithms_sorting.py | mxdzi/hackerrank | 4455f73e4479a4204b2e1167253f6a02351aa5b7 | [
"MIT"
] | null | null | null | from problem_solving.algorithms.sorting import *
| 25.411765 | 64 | 0.545139 |
222dd53901bfb2ab9baf636ea45e6459defef6a1 | 9,975 | py | Python | runOtakuBot.py | Eagleheardt/otakuBot | 6f8576423bb1b0701d5a60095bed7552b2711bab | [
"Unlicense"
] | null | null | null | runOtakuBot.py | Eagleheardt/otakuBot | 6f8576423bb1b0701d5a60095bed7552b2711bab | [
"Unlicense"
] | null | null | null | runOtakuBot.py | Eagleheardt/otakuBot | 6f8576423bb1b0701d5a60095bed7552b2711bab | [
"Unlicense"
] | null | null | null | import sqlite3
from sqlite3 import Error
import os
import time
import datetime
import re
import random
import schedule
import cryptography
from apscheduler.schedulers.background import BackgroundScheduler
from slackclient import SlackClient
from cryptography.fernet import Fernet
conn = sqlite3.connect('/home/ubuntu/otakuBot/data/anime.db')
serverCursor = conn.cursor()
keyFile = open('/home/ubuntu/otakuBot/data/otakubot_token.key', 'rb')
key = keyFile.read()
keyFile.close()
f = Fernet(key)
encryptedTokenFile = open('/home/ubuntu/otakuBot/data/otakubot_token.encrypted', 'rb')
encryptedToken = encryptedTokenFile.read()
decryptedToken = f.decrypt(encryptedToken)
SLACK_BOT_TOKEN = decryptedToken.decode()
# instantiate Slack client
slack_client = SlackClient(SLACK_BOT_TOKEN)
# starterbot's user ID in Slack: value is assigned after the bot starts up
otakuBotID = None
# constants
RTM_READ_DELAY = 0.5 # 0.5 second delay in reading events
schedule.every(15).minutes.do(logIt)
def handle_command(command, channel, aUser, tStamp):
"""
Executes bot command if the command is known
"""
#command = command.lower()
response = None
# This is where you start to implement more commands!
if command.lower().startswith("!help"):
response = """I'm Otaku Bot!
I don't do a lot yet. But watch out! I'm just getting started!
!addquote[SPACE][A quote of your choice!] - I will remember your quote!
!quote - I will reply with a random quote!
!addAniMusic[SPACE][Link to a Japanese anime song] - I will remember your music!
!addEngMusic[SPACE][Link to an English anime song] - I will remember your music!
!addIconic[SPACE][Link to an iconic anime moment] - I will remember your moment!
!animusic - I will reply with a Japanese anime song from memory!
!engmusic - I will reply with an English anime song from memory!
!iconic - I will show you an iconic anime moment!
"""
inChannelResponse(channel,response)
return
if command.lower().startswith("!addquote"):
newQuote = str(command[10:])
insertQuote(aUser,newQuote)
threadedResponse(channel,"I'll try to remember: " + newQuote ,tStamp)
stdOut("Quote Added: " + newQuote)
return
if command.lower().startswith("!quote"):
aQuote = getQuote(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!animusic"):
aQuote = getAniMusic(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!engmusic"):
aQuote = getEngMusic(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!iconic"):
aQuote = getIconic(conn)
inChannelResponse(channel,aQuote)
return
if command.lower().startswith("!onepunch"):
inChannelResponse(channel,"https://www.youtube.com/watch?v=_TUTJ0klnKk")
return
if command.lower().startswith("!addanimusic"):
newQuote = str(command[13:])
insertAniMusic(aUser,newQuote)
threadedResponse(channel,"I'll add this to the Anime music section: " + newQuote ,tStamp)
stdOut("Anime Music Added: " + newQuote)
return
if command.lower().startswith("!addengmusic"):
newQuote = str(command[13:])
insertEngMusic(aUser,newQuote)
threadedResponse(channel,"I'll add this to the English music section: " + newQuote ,tStamp)
stdOut("English Music Added: " + newQuote)
return
if command.lower().startswith("!addiconic"):
newQuote = str(command[11:])
insertIcon(aUser,newQuote)
threadedResponse(channel,"I'll add this to the Iconic moments section: " + newQuote ,tStamp)
stdOut("Iconic Moment Added: " + newQuote)
return
if command.lower().startswith("!delquote"):
if aUser == "UC176R92M":
num = command[10:]
deleteQuote(num)
inChannelResponse(channel,"You have removed a quote.")
else:
inChannelResponse(channel,"You don't have permission to do that!")
return
if command.lower().startswith("!getquotes"):
if aUser == "UC176R92M":
inChannelResponse(channel,getAllQuotes(conn))
else:
inChannelResponse(channel,"You don't have permission to do that!")
return
if command.startswith("!test"):
return
response = (("""Text:{0}
Channel:{1}
TS:{2}
User:{3}
""").format(command,channel,tStamp,aUser))
inChannelResponse(channel,response)
return
return
# Sends the response back to the channel
if __name__ == "__main__":
if slack_client.rtm_connect(with_team_state=False):
stdOut("Otaku Bot connected and running!")
# Read bot's user ID by calling Web API method `auth.test`
otakuBotID = slack_client.api_call("auth.test")["user_id"]
while True:
try:
command, channel,usr,stp = parseSlackInput(slack_client.rtm_read())
if command:
handle_command(command, channel,usr,stp)
except:
pass
schedule.run_pending()
time.sleep(RTM_READ_DELAY)
else:
stdOut("Connection failed. Exception traceback printed above.")
| 27.631579 | 95 | 0.665063 |
222fcf6d9b65f24912507aa874036c0ee4a1261b | 1,555 | py | Python | ocr.py | tunc2112/uet-img-processing | 6a191fe6927d7d0844742240cf4a39587c965d16 | [
"MIT"
] | null | null | null | ocr.py | tunc2112/uet-img-processing | 6a191fe6927d7d0844742240cf4a39587c965d16 | [
"MIT"
] | null | null | null | ocr.py | tunc2112/uet-img-processing | 6a191fe6927d7d0844742240cf4a39587c965d16 | [
"MIT"
] | null | null | null | from PIL import Image
import cv2
import pytesseract
import tesserocr
from pyocr import pyocr
from pyocr import builders
import sys
import os
| 27.280702 | 72 | 0.674598 |
2230323f70e41224c93df8ff861946c38acbb05d | 6,718 | py | Python | object_detection/det_heads/retinaNet_head/retinanet_head.py | no-name-xiaosheng/PaddleViT | 50226a3be5095b3727d3c62d2eab23ef1e9612ec | [
"Apache-2.0"
] | 993 | 2021-08-30T01:58:57.000Z | 2022-03-31T14:03:07.000Z | object_detection/det_heads/retinaNet_head/retinanet_head.py | Dongsheng-Bi/PaddleViT | c90a6c8dc3787e69cef3a37b9a260bd59eeff1f7 | [
"Apache-2.0"
] | 120 | 2021-09-03T13:05:32.000Z | 2022-03-29T02:08:22.000Z | object_detection/det_heads/retinaNet_head/retinanet_head.py | Dongsheng-Bi/PaddleViT | c90a6c8dc3787e69cef3a37b9a260bd59eeff1f7 | [
"Apache-2.0"
] | 253 | 2021-08-30T08:50:27.000Z | 2022-03-26T09:21:08.000Z | # Copyright (c) 2021 PPViT Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
import paddle.nn as nn
from paddle.nn.initializer import Normal, Constant
from retinanet_loss import RetinaNetLoss
from post_process import RetinaNetPostProcess
from det_utils.generator_utils import AnchorGenerator
| 40.227545 | 94 | 0.620125 |
223135074d80c82a77c3e9b47c439c7c3abe7792 | 632 | py | Python | brp/formutils.py | chop-dbhi/biorepo-portal | 7db13c40b2b9d62af43a28e4af08c2472b98fc96 | [
"BSD-2-Clause"
] | 6 | 2016-10-26T19:51:11.000Z | 2021-03-18T16:05:55.000Z | brp/formutils.py | chop-dbhi/biorepo-portal | 7db13c40b2b9d62af43a28e4af08c2472b98fc96 | [
"BSD-2-Clause"
] | 207 | 2015-09-24T17:41:37.000Z | 2021-05-18T18:14:08.000Z | brp/formutils.py | chop-dbhi/biorepo-portal | 7db13c40b2b9d62af43a28e4af08c2472b98fc96 | [
"BSD-2-Clause"
] | 8 | 2016-04-27T19:04:50.000Z | 2020-08-24T02:33:05.000Z | from django import template
from django.forms import widgets
register = template.Library()
| 28.727273 | 62 | 0.675633 |
2231aae6662593f94c1874f0078bab296c0ac96f | 2,104 | py | Python | SGE/src/configs/rng_seeds.py | dabingrosewood/MasterThesisProj | 7e40fa2395468a1bccef429362a61ed8515ecc11 | [
"MIT"
] | null | null | null | SGE/src/configs/rng_seeds.py | dabingrosewood/MasterThesisProj | 7e40fa2395468a1bccef429362a61ed8515ecc11 | [
"MIT"
] | null | null | null | SGE/src/configs/rng_seeds.py | dabingrosewood/MasterThesisProj | 7e40fa2395468a1bccef429362a61ed8515ecc11 | [
"MIT"
] | null | null | null | # CONFIG
seeds = [6598903756360202179, 2908409715321502665, 6126375328734039552, 1447957147463681860, 8611858271322161001, 1129180857020570158, 6362222119948958210, 7116573423379052515, 6183551438103583226, 4025455056998962241, 3253052445978017587, 8447055112402476503, 5958072666039141800, 704315598608973559, 1273141716491599966, 8030825590436937002, 6692768176035969914, 8405559442957414941, 5375803109627817298, 1491660193757141856, 3436611086188602011, 3271002097187013328, 4006294871837743001, 7473817498436254932, 7891796310200224764, 3130952787727334893, 697469171142516880, 133987617360269051, 1978176412643604703, 3541943493395593807, 5679145832406031548, 5942005640162452699, 5170695982942106620, 3168218038949114546, 9211443340810713278, 675545486074597116, 3672488441186673791, 6678020899892900267, 2416379871103035344, 8662874560817543122, 2122645477319220395, 2405200782555244715, 6145921643610737337, 5436563232962849112, 8616414727199277108, 3514934091557929937, 6828532625327352397, 4198622582999611227, 1404664771100695607, 2109913995355226572, 7499239331133290294, 1663854912663070382, 8773050872378084951, 847059168652279875, 2080440852605950627, 842456810578794799, 2969610112218411619, 8028963261673713765, 8849431138779094918, 6906452636298562639, 8279891918456160432, 3007521703390185509, 7384090506069372457, 2587992914778556505, 7951640286729988102, 812903075765965116, 4795333953396378316, 1140497104356211676, 8624839892588303806, 5867085452069993348, 8978621560802611959, 8687506047153117100, 1433098622112610322, 2329673189788559167, 1697681906179453583, 1151871187140419944, 7331838985682630168, 2010690807327394179, 8961362099735442061, 3782928183186245068, 8730275423842935904, 2250089307129376711, 6729072114456627667, 6426359511845339057, 1543504526754215874, 6764758859303816569, 438430728757175362, 850249168946095159, 7241624624529922339, 633139235530929889, 8443344843613690342, 5097223086273121, 3838826661110586915, 7425568686759148634, 5814866864074983273, 5375799982976616117, 6540402714944055605, 448708351215739494, 5101380446889426970, 8035666378249198606]
| 701.333333 | 2,094 | 0.901616 |
223331808a66e2aa15f291c872b40388de56a8a3 | 2,793 | py | Python | learning/modules/visitation_softmax.py | esteng/guiding-multi-step | 3f0db0ba70b5851cc83878f4ed48cf82342a2ddf | [
"BSD-2-Clause"
] | null | null | null | learning/modules/visitation_softmax.py | esteng/guiding-multi-step | 3f0db0ba70b5851cc83878f4ed48cf82342a2ddf | [
"BSD-2-Clause"
] | null | null | null | learning/modules/visitation_softmax.py | esteng/guiding-multi-step | 3f0db0ba70b5851cc83878f4ed48cf82342a2ddf | [
"BSD-2-Clause"
] | null | null | null | import torch
import torch.nn as nn
import numpy as np
| 52.698113 | 139 | 0.713928 |
223355bd5379be3ac4c24bf1261412562ebdf029 | 96 | py | Python | baekjoon/easy-math/17362-finger.py | honux77/algorithm | 2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee | [
"MIT"
] | 2 | 2019-02-08T01:23:07.000Z | 2020-11-19T12:23:52.000Z | baekjoon/easy-math/17362-finger.py | honux77/algorithm | 2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee | [
"MIT"
] | null | null | null | baekjoon/easy-math/17362-finger.py | honux77/algorithm | 2ed8cef1fbee7ad96d8f2ae583666d52bd8892ee | [
"MIT"
] | null | null | null | n = int(input()) % 8
if n == 0:
print(2)
elif n <= 5:
print(n)
else:
print(10 - n)
| 12 | 20 | 0.458333 |
22350d67f0af834116a7d33c446043e6c69e8a30 | 908 | py | Python | Module/nikodou_information.py | RyoTozawa/LineBot | 14b34094f9a27650d412128334c3f09c7444ffc9 | [
"MIT"
] | 1 | 2018-10-16T07:50:59.000Z | 2018-10-16T07:50:59.000Z | Module/nikodou_information.py | RyoTozawa/LineBot | 14b34094f9a27650d412128334c3f09c7444ffc9 | [
"MIT"
] | 1 | 2018-04-09T11:03:25.000Z | 2018-04-09T11:03:25.000Z | Module/nikodou_information.py | tozastation/Line-Bot | 14b34094f9a27650d412128334c3f09c7444ffc9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# coding:utf-8
import urllib
from xml.etree import ElementTree
import xml.dom.minidom as md
| 29.290323 | 95 | 0.643172 |
2235a02d239f6f03f7a8b8fc3826bf3189e27e0c | 927 | py | Python | climbing/add_new_climbs.py | JiriKalvoda/slama.dev | 4856d246858dd98a1852365b028873b61f5a6775 | [
"MIT"
] | 7 | 2019-09-15T19:55:11.000Z | 2021-12-27T11:40:20.000Z | climbing/add_new_climbs.py | JiriKalvoda/slama.dev | 4856d246858dd98a1852365b028873b61f5a6775 | [
"MIT"
] | 38 | 2020-05-18T12:49:13.000Z | 2022-03-23T12:51:28.000Z | climbing/add_new_climbs.py | JiriKalvoda/slama.dev | 4856d246858dd98a1852365b028873b61f5a6775 | [
"MIT"
] | 17 | 2020-02-16T19:49:25.000Z | 2022-02-06T14:28:35.000Z | #!/usr/bin/env python3
import os
import shutil
from subprocess import Popen, PIPE
from datetime import date
import yaml
os.chdir(os.path.dirname(os.path.realpath(__file__)))
CLIMBING_FOLDER = "."
CLIMBING_VIDEOS_FOLDER = os.path.join(CLIMBING_FOLDER, "videos")
CLIMBING_INFO = os.path.join(CLIMBING_FOLDER, "videos.yaml")
config = {}
if os.path.exists(CLIMBING_INFO):
with open(CLIMBING_INFO, "r") as f:
config = yaml.safe_load(f.read())
files = os.listdir(CLIMBING_VIDEOS_FOLDER)
for file in files:
if file.lower().endswith(".mp4") and file not in config:
print(f"adding new file {file}.")
config[file] = {
"color": "TODO",
"date": date.today(),
"zone": "TODO",
"new": None,
"rotate": "left",
"encode": None,
"trim": "TODO",
}
with open(CLIMBING_INFO, "w") as f:
f.write(yaml.dump(config))
| 24.394737 | 64 | 0.614887 |
22369660633e2973cf659ea963259b0f27b54f98 | 952 | py | Python | posts/models.py | dnetochaves/blog | e04fda385490b671540b671631726584a533369c | [
"MIT"
] | null | null | null | posts/models.py | dnetochaves/blog | e04fda385490b671540b671631726584a533369c | [
"MIT"
] | null | null | null | posts/models.py | dnetochaves/blog | e04fda385490b671540b671631726584a533369c | [
"MIT"
] | null | null | null | from django.db import models
from categorias.models import Categoria
from django.contrib.auth.models import User
from django.utils import timezone
# Create your models here.
| 43.272727 | 103 | 0.765756 |
2237660edc2b315c6d1a8e947bbdd55091f794e0 | 2,765 | py | Python | src/ros_vision_interaction/examples/example_interaction.py | HaaaO/vision-project | 72256af07834195cfe52ac344aee5effcd0da978 | [
"MIT"
] | null | null | null | src/ros_vision_interaction/examples/example_interaction.py | HaaaO/vision-project | 72256af07834195cfe52ac344aee5effcd0da978 | [
"MIT"
] | 21 | 2020-09-09T18:55:58.000Z | 2021-07-26T19:42:46.000Z | src/ros_vision_interaction/examples/example_interaction.py | HaaaO/vision-project | 72256af07834195cfe52ac344aee5effcd0da978 | [
"MIT"
] | 6 | 2020-12-20T17:19:29.000Z | 2021-08-09T22:33:04.000Z | #!/usr/bin/env python
import datetime
import logging
import os
import random
import rospy
import schedule
from interaction_engine.cordial_interface import CordialInterface
from interaction_engine.database import Database
from interaction_engine.int_engine import InteractionEngine
from interaction_engine.message import Message
from interaction_engine.state import State
from interaction_engine.state_collection import StateCollection
from cordial_msgs.msg import AskOnGuiAction, AskOnGuiGoal, MouseEvent
from std_msgs.msg import Bool
logging.basicConfig(level=logging.INFO)
greeting = State(
name=Keys.GREETING,
message_type=Message.Type.MULTIPLE_CHOICE_ONE_COLUMN,
content="Hello!",
next_states=[Keys.HOW_ARE_YOU],
transitions={"Hello!": Keys.HOW_ARE_YOU, "Hi!": Keys.HOW_ARE_YOU}
)
how_are_you = State(
name=Keys.HOW_ARE_YOU,
message_type=Message.Type.MULTIPLE_CHOICE_ONE_COLUMN,
content="How are you doing today?",
next_states=[Keys.TAKE_CARE],
transitions={
"Pretty good.": Keys.TAKE_CARE,
"Great!": Keys.TAKE_CARE,
"Not too good.": Keys.TAKE_CARE
}
)
take_care = State(
name=Keys.TAKE_CARE,
message_type=Message.Type.MULTIPLE_CHOICE_ONE_COLUMN,
content="Don't forget to drink enough water and get enough sleep!",
next_states=[Keys.WHEN_TO_TALK],
transitions={"Next": Keys.WHEN_TO_TALK}
)
when_to_talk = State(
name=Keys.WHEN_TO_TALK,
message_type=Message.Type.TIME_ENTRY,
content="When would you like to talk tomorrow?",
next_states=["exit"],
args=["15", "15:15"]
)
state_collection = StateCollection(
name="example interaction",
init_state_name=Keys.WHEN_TO_TALK,
states=[
greeting,
how_are_you,
take_care,
when_to_talk
]
)
cwd = os.getcwd()
database_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"example_interaction_database.json"
)
default_database_keys = [
Keys.GREETING,
Keys.HOW_ARE_YOU,
Keys.TAKE_CARE,
Keys.WHEN_TO_TALK
]
database_manager = Database(
database_file_name=database_file,
default_database_keys=default_database_keys
)
interface = CordialInterface(
action_name="cordial/say_and_ask_on_gui",
seconds_until_timeout=None
)
interaction_engine = InteractionEngine(
state_collection=state_collection,
database_manager=database_manager,
interface=interface
)
if __name__ == "__main__":
while not rospy.is_shutdown():
rospy.logdebug("Scheduled interaction running")
interaction_engine.run()
rospy.sleep(5)
| 24.469027 | 71 | 0.725859 |
2237956981da3e82e0d6350f1b78b20897718d48 | 2,441 | py | Python | explicalib/distribution/multiclass_distribution.py | euranova/estimating_eces | 9bfa81dd7a39ebe069c5b11b8e7a9bf9017e9350 | [
"MIT"
] | 2 | 2021-11-30T18:44:11.000Z | 2021-11-30T18:44:19.000Z | explicalib/distribution/multiclass_distribution.py | euranova/estimating_eces | 9bfa81dd7a39ebe069c5b11b8e7a9bf9017e9350 | [
"MIT"
] | null | null | null | explicalib/distribution/multiclass_distribution.py | euranova/estimating_eces | 9bfa81dd7a39ebe069c5b11b8e7a9bf9017e9350 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: nicolas.posocco
"""
from abc import ABC
import numpy as np
| 29.059524 | 116 | 0.643179 |
22386109daaa28a3082c4a5bbbaea3c931eb1b4c | 3,169 | py | Python | test.py | JulioPDX/ci_cd_dev | e9b72c1b16e9f05a5e93e22f045fda605aac509f | [
"MIT"
] | 6 | 2021-10-31T18:36:49.000Z | 2022-03-14T02:26:37.000Z | test.py | JulioPDX/ci_cd_dev | e9b72c1b16e9f05a5e93e22f045fda605aac509f | [
"MIT"
] | null | null | null | test.py | JulioPDX/ci_cd_dev | e9b72c1b16e9f05a5e93e22f045fda605aac509f | [
"MIT"
] | 2 | 2022-02-10T16:58:46.000Z | 2022-03-07T05:00:57.000Z | #!/usr/bin/env python
"""Script used to test the network with batfish"""
from pybatfish.client.commands import *
from pybatfish.question import load_questions
from pybatfish.client.asserts import (
assert_no_duplicate_router_ids,
assert_no_incompatible_bgp_sessions,
assert_no_incompatible_ospf_sessions,
assert_no_unestablished_bgp_sessions,
assert_no_undefined_references,
)
from rich.console import Console
console = Console(color_system="truecolor")
def test_duplicate_rtr_ids(snap):
"""Testing for duplicate router IDs"""
console.print(
":white_exclamation_mark: [bold yellow]Testing for duplicate router IDs[/bold yellow] :white_exclamation_mark:"
)
assert_no_duplicate_router_ids(
snapshot=snap,
protocols={"ospf", "bgp"},
)
console.print(
":green_heart: [bold green]No duplicate router IDs found[/bold green] :green_heart:"
)
def test_bgp_compatibility(snap):
"""Testing for incompatible BGP sessions"""
console.print(
":white_exclamation_mark: [bold yellow]Testing for incompatible BGP sessions[/bold yellow] :white_exclamation_mark:"
)
assert_no_incompatible_bgp_sessions(
snapshot=snap,
)
console.print(
":green_heart: [bold green]All BGP sessions compatible![/bold green] :green_heart:"
)
def test_ospf_compatibility(snap):
"""Testing for incompatible OSPF sessions"""
console.print(
":white_exclamation_mark: [bold yellow]Testing for incompatible OSPF sessions[/bold yellow] :white_exclamation_mark:"
)
assert_no_incompatible_ospf_sessions(
snapshot=snap,
)
console.print(
":green_heart: [bold green]All OSPF sessions compatible![/bold green] :green_heart:"
)
def test_bgp_unestablished(snap):
"""Testing for BGP sessions that are not established"""
console.print(
":white_exclamation_mark: [bold yellow]Testing for unestablished BGP sessions[/bold yellow] :white_exclamation_mark:"
)
assert_no_unestablished_bgp_sessions(
snapshot=snap,
)
console.print(
":green_heart: [bold green]All BGP sessions are established![/bold green] :green_heart:"
)
def test_undefined_references(snap):
"""Testing for any undefined references"""
console.print(
":white_exclamation_mark: [bold yellow]Testing for undefined references[/bold yellow] :white_exclamation_mark:"
)
assert_no_undefined_references(
snapshot=snap,
)
console.print(
":green_heart: [bold green]No undefined refences found![/bold green] :green_heart:"
)
def main():
"""init all the things"""
NETWORK_NAME = "PDX_NET"
SNAPSHOT_NAME = "snapshot00"
SNAPSHOT_DIR = "./snapshots"
bf_session.host = "192.168.10.193"
bf_set_network(NETWORK_NAME)
init_snap = bf_init_snapshot(SNAPSHOT_DIR, name=SNAPSHOT_NAME, overwrite=True)
load_questions()
test_duplicate_rtr_ids(init_snap)
test_bgp_compatibility(init_snap)
test_ospf_compatibility(init_snap)
test_bgp_unestablished(init_snap)
test_undefined_references(init_snap)
if __name__ == "__main__":
main()
| 30.471154 | 125 | 0.716945 |
22386711171a0a872717803582d333bc6bde0602 | 1,176 | py | Python | algs15_priority_queue/circular_queue.py | zhubaiyuan/learning-algorithms | ea9ee674878d535a9e9987c0d948c0357e0ed4da | [
"MIT"
] | null | null | null | algs15_priority_queue/circular_queue.py | zhubaiyuan/learning-algorithms | ea9ee674878d535a9e9987c0d948c0357e0ed4da | [
"MIT"
] | null | null | null | algs15_priority_queue/circular_queue.py | zhubaiyuan/learning-algorithms | ea9ee674878d535a9e9987c0d948c0357e0ed4da | [
"MIT"
] | null | null | null | """
A fixed-capacity queue implemented as circular queue.
Queue can become full.
* enqueue is O(1)
* dequeue is O(1)
"""
| 22.188679 | 54 | 0.519558 |
223a35394d8e357d916a263b18714241694b5330 | 4,300 | py | Python | pos_evaluation/create_train_dev_old.py | ayyoobimani/GNN-POSTAG | 47eb4bc6d64de565e87ee7cb8e9c5020d936138c | [
"MIT"
] | null | null | null | pos_evaluation/create_train_dev_old.py | ayyoobimani/GNN-POSTAG | 47eb4bc6d64de565e87ee7cb8e9c5020d936138c | [
"MIT"
] | null | null | null | pos_evaluation/create_train_dev_old.py | ayyoobimani/GNN-POSTAG | 47eb4bc6d64de565e87ee7cb8e9c5020d936138c | [
"MIT"
] | null | null | null | """
Create train and dev set from bronze data
Example call:
$ python3 create_train_dev.py --pos /mounts/work/ayyoob/results/gnn_align/yoruba/pos_tags_tam-x-bible-newworld_posfeatFalse_transformerFalse_trainWEFalse_maskLangTrue.pickle --bible tam-x-bible-newworld.txt --bronze 1 --lang tam
$ python3 create_train_dev.py --pos /mounts/work/ayyoob/results/gnn_align/yoruba/pos_tags_fin-x-bible-helfi_posfeatFalse_transformerFalse_trainWEFalse_maskLangTrue.pickle --bible fin-x-bible-helfi.txt --bronze 1 --lang fin
"""
import torch
import random
import argparse
if __name__ == "__main__":
main()
| 38.738739 | 228 | 0.621163 |
223b3e8319f85381a1c34694d9c35926bb3d9b11 | 1,076 | py | Python | lib/spack/spack/test/permissions.py | SimeonEhrig/spack | 7fe0230492ecf0e497a84d578ea163570cf460eb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2016-01-12T20:14:40.000Z | 2017-06-16T07:03:46.000Z | lib/spack/spack/test/permissions.py | SimeonEhrig/spack | 7fe0230492ecf0e497a84d578ea163570cf460eb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75 | 2016-07-27T11:43:00.000Z | 2020-12-08T15:56:53.000Z | lib/spack/spack/test/permissions.py | SimeonEhrig/spack | 7fe0230492ecf0e497a84d578ea163570cf460eb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8 | 2015-10-16T13:51:49.000Z | 2021-10-18T13:58:03.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import os
import pytest
import stat
from spack.hooks.permissions_setters import (
chmod_real_entries, InvalidPermissionsError
)
import llnl.util.filesystem as fs
| 29.888889 | 76 | 0.736059 |
223b618424ceff584aa410ff8121dcf69f5567f4 | 323 | py | Python | data_statistics/variable_statistics.py | go-jugo/ml_event_prediction_trainer | 0d644b737afdef078ad5b6fc2b7e2549b964b56f | [
"Apache-2.0"
] | null | null | null | data_statistics/variable_statistics.py | go-jugo/ml_event_prediction_trainer | 0d644b737afdef078ad5b6fc2b7e2549b964b56f | [
"Apache-2.0"
] | null | null | null | data_statistics/variable_statistics.py | go-jugo/ml_event_prediction_trainer | 0d644b737afdef078ad5b6fc2b7e2549b964b56f | [
"Apache-2.0"
] | null | null | null | import dask.dataframe as dd
| 40.375 | 67 | 0.718266 |
223b7dfcfebf8324a056e2a31a6551d0d0397ac2 | 393 | py | Python | code/com/caicongyang/python/study/base/pandas_sql.py | caicongyang/python-study | e5db4d1b033d183da7e9af6a8c930bcaba2962f7 | [
"Apache-2.0"
] | null | null | null | code/com/caicongyang/python/study/base/pandas_sql.py | caicongyang/python-study | e5db4d1b033d183da7e9af6a8c930bcaba2962f7 | [
"Apache-2.0"
] | null | null | null | code/com/caicongyang/python/study/base/pandas_sql.py | caicongyang/python-study | e5db4d1b033d183da7e9af6a8c930bcaba2962f7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import numpy as np
import pandas as pd
'''
sqlpandas
'''
from pandas import DataFrame, Series
from pandasql import sqldf, load_meat, load_births
df1 = DataFrame({'name': ['jack', 'tony', 'pony'], 'data1': range(3)})
print(df1)
sql = "select * from df1 where name = 'jack'"
pysqldf = lambda sql: sqldf(sql, globals());
print(pysqldf(sql))
| 19.65 | 70 | 0.676845 |
223ccb03fea812be5bc8d09d17bc6aa157ee7449 | 5,016 | py | Python | tests/test_publish_parq.py | jacobtobias/s3parq | 0a56fbc7d93168c68e823f05b073b8d03e67a665 | [
"MIT"
] | null | null | null | tests/test_publish_parq.py | jacobtobias/s3parq | 0a56fbc7d93168c68e823f05b073b8d03e67a665 | [
"MIT"
] | null | null | null | tests/test_publish_parq.py | jacobtobias/s3parq | 0a56fbc7d93168c68e823f05b073b8d03e67a665 | [
"MIT"
] | null | null | null | import pytest
from mock import patch
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
import boto3
from string import ascii_lowercase
import random
from dfmock import DFMock
import s3parq.publish_parq as parq
import s3fs
from moto import mock_s3
| 36.347826 | 105 | 0.622608 |
223dcc250b1cfb0dc6bb2a2c2757035efcba4a73 | 2,936 | py | Python | docs/examples/examplesCategorization.py | benstear/pyiomica | bc26032b610fc911cc03b54115d6abdf53a56fce | [
"MIT"
] | 12 | 2019-11-22T21:44:13.000Z | 2022-03-06T21:46:31.000Z | docs/examples/examplesCategorization.py | benstear/pyiomica | bc26032b610fc911cc03b54115d6abdf53a56fce | [
"MIT"
] | null | null | null | docs/examples/examplesCategorization.py | benstear/pyiomica | bc26032b610fc911cc03b54115d6abdf53a56fce | [
"MIT"
] | 5 | 2019-07-25T21:03:42.000Z | 2021-06-09T02:14:30.000Z | #import sys
#sys.path.append("../..")
import pyiomica as pio
from pyiomica import categorizationFunctions as cf
if __name__ == '__main__':
# Unzip example data
with pio.zipfile.ZipFile(pio.os.path.join(pio.ConstantPyIOmicaExamplesDirectory, 'SLV.zip'), "r") as zipFile:
zipFile.extractall(path=pio.ConstantPyIOmicaExamplesDirectory)
# Process sample dataset SLV_Hourly1
# Name of the fisrt data set
dataName = 'SLV_Hourly1TimeSeries'
# Define a directory name where results are be saved
saveDir = pio.os.path.join('results', dataName, '')
# Directory name where example data is (*.csv files)
dataDir = pio.os.path.join(pio.ConstantPyIOmicaExamplesDirectory, 'SLV')
# Read the example data into a DataFrame
df_data = pio.pd.read_csv(pio.os.path.join(dataDir, dataName + '.csv'), index_col=[0,1,2], header=0)
# Calculate time series categorization
cf.calculateTimeSeriesCategorization(df_data, dataName, saveDir, NumberOfRandomSamples = 10**5)
# Cluster the time series categorization results
cf.clusterTimeSeriesCategorization(dataName, saveDir)
# Make plots of the clustered time series categorization
cf.visualizeTimeSeriesCategorization(dataName, saveDir)
# Process sample dataset SLV_Hourly2, in the same way as SLV_Hourly1 above
dataName = 'SLV_Hourly2TimeSeries'
saveDir = pio.os.path.join('results', dataName, '')
dataDir = pio.os.path.join(pio.ConstantPyIOmicaExamplesDirectory, 'SLV')
df_data = pio.pd.read_csv(pio.os.path.join(dataDir, dataName + '.csv'), index_col=[0,1,2], header=0)
cf.calculateTimeSeriesCategorization(df_data, dataName, saveDir, NumberOfRandomSamples = 10**5)
cf.clusterTimeSeriesCategorization(dataName, saveDir)
cf.visualizeTimeSeriesCategorization(dataName, saveDir)
# Import data storage submodule to read results of processing sample datasets SLV_Hourly1 and SLV_Hourly2
from pyiomica import dataStorage as ds
# Use results from processing sample datasets SLV_Hourly1 and SLV_Hourly2 to calculate "Delta"
dataName = 'SLV_Hourly1TimeSeries'
df_data_processed_H1 = ds.read(dataName+'_df_data_transformed', hdf5fileName=pio.os.path.join('results',dataName,dataName+'.h5'))
dataName = 'SLV_Hourly2TimeSeries'
df_data_processed_H2 = ds.read(dataName+'_df_data_transformed', hdf5fileName=pio.os.path.join('results',dataName,dataName+'.h5'))
dataName = 'SLV_Delta'
saveDir = pio.os.path.join('results', dataName, '')
df_data = df_data_processed_H2.compareTwoTimeSeries(df_data_processed_H1, compareAllLevelsInIndex=False, mergeFunction=pio.np.median).fillna(0.)
cf.calculateTimeSeriesCategorization(df_data, dataName, saveDir, NumberOfRandomSamples = 10**5)
cf.clusterTimeSeriesCategorization(dataName, saveDir)
cf.visualizeTimeSeriesCategorization(dataName, saveDir) | 47.354839 | 149 | 0.742166 |
223df95481a53841ba1260a2ab6e7adbdff16f31 | 2,612 | py | Python | tworaven_apps/eventdata_queries/initialization/icews_unique_count.py | TwoRavens/TwoRavens | e5f820557d6646df525ceed15e17d79f4159cf0a | [
"Apache-2.0"
] | 20 | 2017-12-11T07:26:06.000Z | 2021-11-22T16:16:20.000Z | tworaven_apps/eventdata_queries/initialization/icews_unique_count.py | TwoRavens/TwoRavens | e5f820557d6646df525ceed15e17d79f4159cf0a | [
"Apache-2.0"
] | 849 | 2017-10-20T18:21:18.000Z | 2022-02-18T02:45:44.000Z | tworaven_apps/eventdata_queries/initialization/icews_unique_count.py | TwoRavens/TwoRavens | e5f820557d6646df525ceed15e17d79f4159cf0a | [
"Apache-2.0"
] | 1 | 2020-05-18T06:02:13.000Z | 2020-05-18T06:02:13.000Z | # returns number of unique records for icews with different filtering:
# -by rounded lat/lon (100,000)
# -by country, district, province, city (100,000)
# -by lat/lon, filtered by 2 or more matches (70,000)
from pymongo import MongoClient
import os
mongo_client = MongoClient(host='localhost', port=27017) # Default port
db = mongo_client.event_data
# icews_coordinates_rounded()
icews_coordinates()
# icews_names() | 20.092308 | 72 | 0.343415 |
223edb2e5970e591f491deb0d0fde065371aadb5 | 7,576 | py | Python | OcCo_Torch/models/pointnet_util.py | sun-pyo/OcCo | e2e12dbaa8f9b98fb8c42fc32682f49e99be302f | [
"MIT"
] | 158 | 2020-08-19T18:13:28.000Z | 2022-03-30T13:55:32.000Z | OcCo_Torch/models/pointnet_util.py | sun-pyo/OcCo | e2e12dbaa8f9b98fb8c42fc32682f49e99be302f | [
"MIT"
] | 28 | 2020-05-30T04:02:33.000Z | 2022-03-30T15:46:38.000Z | OcCo_Torch/models/pointnet_util.py | sun-pyo/OcCo | e2e12dbaa8f9b98fb8c42fc32682f49e99be302f | [
"MIT"
] | 18 | 2020-08-19T19:52:38.000Z | 2022-02-06T11:42:26.000Z | # Copyright (c) 2020. Hanchen Wang, hw501@cam.ac.uk
# Ref: https://github.com/fxia22/pointnet.pytorch/pointnet/model.py
import torch, torch.nn as nn, numpy as np, torch.nn.functional as F
from torch.autograd import Variable
# STN -> Spatial Transformer Network
| 33.522124 | 105 | 0.567318 |
2241d0bcc483df8ae598bcac2ffa98c9d73b4660 | 747 | py | Python | Week#3__Assignment#3/join.py | P7h/IntroToDataScience__Coursera_Course | 4f3d4073e552c7e6f5d1e31c0391eccec32d3786 | [
"Apache-2.0"
] | 1 | 2015-10-26T00:32:09.000Z | 2015-10-26T00:32:09.000Z | Week#3__Assignment#3/join.py | P7h/IntroToDataScience__Coursera_Course | 4f3d4073e552c7e6f5d1e31c0391eccec32d3786 | [
"Apache-2.0"
] | null | null | null | Week#3__Assignment#3/join.py | P7h/IntroToDataScience__Coursera_Course | 4f3d4073e552c7e6f5d1e31c0391eccec32d3786 | [
"Apache-2.0"
] | null | null | null | import MapReduce
import sys
"""
SQL style Joins in MapReduce
"""
mr = MapReduce.MapReduce()
# =============================
# Do not modify above this line
# Do not modify below this line
# =============================
if __name__ == '__main__':
inputdata = open(sys.argv[1])
mr.execute(inputdata, mapper, reducer)
| 20.189189 | 44 | 0.609103 |
224336ed5538a9d61b10299e07b1b099064fc032 | 823 | py | Python | tests/test_testresources.py | sjoerdk/anonapi | ade94da2b8eb2fb94f831ef77e563f750f88d0ba | [
"MIT"
] | null | null | null | tests/test_testresources.py | sjoerdk/anonapi | ade94da2b8eb2fb94f831ef77e563f750f88d0ba | [
"MIT"
] | 408 | 2018-11-24T19:41:10.000Z | 2022-03-31T23:48:54.000Z | tests/test_testresources.py | sjoerdk/anonapi | ade94da2b8eb2fb94f831ef77e563f750f88d0ba | [
"MIT"
] | 2 | 2018-11-11T12:56:03.000Z | 2021-08-09T01:53:07.000Z | from anonapi.testresources import (
MockAnonClientTool,
JobInfoFactory,
RemoteAnonServerFactory,
JobStatus,
)
| 30.481481 | 85 | 0.690158 |
2243b6cc5ce377cffdd91e6609c4b0155421a8c1 | 13,442 | py | Python | homeassistant/components/mikrotik/hub.py | jvitkauskas/home-assistant | 3718b25bd9528188530f291f0810a1c7970abcdb | [
"Apache-2.0"
] | null | null | null | homeassistant/components/mikrotik/hub.py | jvitkauskas/home-assistant | 3718b25bd9528188530f291f0810a1c7970abcdb | [
"Apache-2.0"
] | 7 | 2016-04-09T20:56:30.000Z | 2016-04-19T21:28:46.000Z | homeassistant/components/mikrotik/hub.py | jvitkauskas/home-assistant | 3718b25bd9528188530f291f0810a1c7970abcdb | [
"Apache-2.0"
] | null | null | null | """The Mikrotik router class."""
from datetime import timedelta
import logging
import socket
import ssl
import librouteros
from librouteros.login import plain as login_plain, token as login_token
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME, CONF_VERIFY_SSL
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
from .const import (
ARP,
ATTR_DEVICE_TRACKER,
ATTR_FIRMWARE,
ATTR_MODEL,
ATTR_SERIAL_NUMBER,
CAPSMAN,
CONF_ARP_PING,
CONF_DETECTION_TIME,
CONF_FORCE_DHCP,
DEFAULT_DETECTION_TIME,
DHCP,
IDENTITY,
INFO,
IS_WIRELESS,
MIKROTIK_SERVICES,
NAME,
WIRELESS,
)
from .errors import CannotConnect, LoginError
_LOGGER = logging.getLogger(__name__)
class MikrotikData:
"""Handle all communication with the Mikrotik API."""
def __init__(self, hass, config_entry, api):
"""Initialize the Mikrotik Client."""
self.hass = hass
self.config_entry = config_entry
self.api = api
self._host = self.config_entry.data[CONF_HOST]
self.all_devices = {}
self.devices = {}
self.available = True
self.support_wireless = bool(self.command(MIKROTIK_SERVICES[IS_WIRELESS]))
self.hostname = None
self.model = None
self.firmware = None
self.serial_number = None
def get_info(self, param):
"""Return device model name."""
cmd = IDENTITY if param == NAME else INFO
data = list(self.command(MIKROTIK_SERVICES[cmd]))
return data[0].get(param) if data else None
def get_hub_details(self):
"""Get Hub info."""
self.hostname = self.get_info(NAME)
self.model = self.get_info(ATTR_MODEL)
self.firmware = self.get_info(ATTR_FIRMWARE)
self.serial_number = self.get_info(ATTR_SERIAL_NUMBER)
def connect_to_hub(self):
"""Connect to hub."""
try:
self.api = get_api(self.hass, self.config_entry.data)
self.available = True
return True
except (LoginError, CannotConnect):
self.available = False
return False
def get_list_from_interface(self, interface):
"""Get devices from interface."""
result = list(self.command(MIKROTIK_SERVICES[interface]))
return self.load_mac(result) if result else {}
def restore_device(self, mac):
"""Restore a missing device after restart."""
self.devices[mac] = Device(mac, self.all_devices[mac])
def update_devices(self):
"""Get list of devices with latest status."""
arp_devices = {}
wireless_devices = {}
device_list = {}
try:
self.all_devices = self.get_list_from_interface(DHCP)
if self.support_wireless:
_LOGGER.debug("wireless is supported")
for interface in [CAPSMAN, WIRELESS]:
wireless_devices = self.get_list_from_interface(interface)
if wireless_devices:
_LOGGER.debug("Scanning wireless devices using %s", interface)
break
if self.support_wireless and not self.force_dhcp:
device_list = wireless_devices
else:
device_list = self.all_devices
_LOGGER.debug("Falling back to DHCP for scanning devices")
if self.arp_enabled:
arp_devices = self.get_list_from_interface(ARP)
# get new hub firmware version if updated
self.firmware = self.get_info(ATTR_FIRMWARE)
except (CannotConnect, socket.timeout, socket.error):
self.available = False
return
if not device_list:
return
for mac, params in device_list.items():
if mac not in self.devices:
self.devices[mac] = Device(mac, self.all_devices.get(mac, {}))
else:
self.devices[mac].update(params=self.all_devices.get(mac, {}))
if mac in wireless_devices:
# if wireless is supported then wireless_params are params
self.devices[mac].update(
wireless_params=wireless_devices[mac], active=True
)
continue
# for wired devices or when forcing dhcp check for active-address
if not params.get("active-address"):
self.devices[mac].update(active=False)
continue
# ping check the rest of active devices if arp ping is enabled
active = True
if self.arp_enabled and mac in arp_devices:
active = self.do_arp_ping(
params.get("active-address"), arp_devices[mac].get("interface")
)
self.devices[mac].update(active=active)
def do_arp_ping(self, ip_address, interface):
"""Attempt to arp ping MAC address via interface."""
_LOGGER.debug("pinging - %s", ip_address)
params = {
"arp-ping": "yes",
"interval": "100ms",
"count": 3,
"interface": interface,
"address": ip_address,
}
cmd = "/ping"
data = list(self.command(cmd, params))
if data is not None:
status = 0
for result in data:
if "status" in result:
status += 1
if status == len(data):
_LOGGER.debug(
"Mikrotik %s - %s arp_ping timed out", ip_address, interface
)
return False
return True
def command(self, cmd, params=None):
"""Retrieve data from Mikrotik API."""
try:
_LOGGER.info("Running command %s", cmd)
if params:
response = self.api(cmd=cmd, **params)
else:
response = self.api(cmd=cmd)
except (
librouteros.exceptions.ConnectionClosed,
socket.error,
socket.timeout,
) as api_error:
_LOGGER.error("Mikrotik %s connection error %s", self._host, api_error)
raise CannotConnect
except librouteros.exceptions.ProtocolError as api_error:
_LOGGER.warning(
"Mikrotik %s failed to retrieve data. cmd=[%s] Error: %s",
self._host,
cmd,
api_error,
)
return None
return response if response else None
def update(self):
"""Update device_tracker from Mikrotik API."""
if not self.available or not self.api:
if not self.connect_to_hub():
return
_LOGGER.debug("updating network devices for host: %s", self._host)
self.update_devices()
class MikrotikHub:
"""Mikrotik Hub Object."""
def __init__(self, hass, config_entry):
"""Initialize the Mikrotik Client."""
self.hass = hass
self.config_entry = config_entry
self._mk_data = None
self.progress = None
async def async_add_options(self):
"""Populate default options for Mikrotik."""
if not self.config_entry.options:
options = {
CONF_ARP_PING: self.config_entry.data.pop(CONF_ARP_PING, False),
CONF_FORCE_DHCP: self.config_entry.data.pop(CONF_FORCE_DHCP, False),
CONF_DETECTION_TIME: self.config_entry.data.pop(
CONF_DETECTION_TIME, DEFAULT_DETECTION_TIME
),
}
self.hass.config_entries.async_update_entry(
self.config_entry, options=options
)
def get_api(hass, entry):
"""Connect to Mikrotik hub."""
_LOGGER.debug("Connecting to Mikrotik hub [%s]", entry[CONF_HOST])
_login_method = (login_plain, login_token)
kwargs = {"login_methods": _login_method, "port": entry["port"]}
if entry[CONF_VERIFY_SSL]:
ssl_context = ssl.create_default_context()
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
_ssl_wrapper = ssl_context.wrap_socket
kwargs["ssl_wrapper"] = _ssl_wrapper
try:
api = librouteros.connect(
entry[CONF_HOST], entry[CONF_USERNAME], entry[CONF_PASSWORD], **kwargs,
)
_LOGGER.debug("Connected to %s successfully", entry[CONF_HOST])
return api
except (
librouteros.exceptions.LibRouterosError,
socket.error,
socket.timeout,
) as api_error:
_LOGGER.error("Mikrotik %s error: %s", entry[CONF_HOST], api_error)
if "invalid user name or password" in str(api_error):
raise LoginError
raise CannotConnect
| 32.468599 | 88 | 0.602515 |
22441b73e069c3528a08a1ed36f4c0850e6085f0 | 4,137 | py | Python | gemlog_from_rss/spip/page.py | Hookz/Gemlog-from-RSS | b57a311db3008e8b0df2442236c4729a06d9b74d | [
"MIT"
] | 1 | 2021-02-19T16:06:07.000Z | 2021-02-19T16:06:07.000Z | gemlog_from_rss/spip/page.py | Hookz/Gemlog-from-RSS | b57a311db3008e8b0df2442236c4729a06d9b74d | [
"MIT"
] | null | null | null | gemlog_from_rss/spip/page.py | Hookz/Gemlog-from-RSS | b57a311db3008e8b0df2442236c4729a06d9b74d | [
"MIT"
] | null | null | null | from bs4 import BeautifulSoup
import requests
import re
from lxml.html import fromstring
from lxml.etree import ParserError
from gemlog_from_rss.spip import SinglePost
| 32.069767 | 109 | 0.547982 |
2245fe03d22e27328780ecdf4fb5f3ecb80912ed | 1,421 | py | Python | simulation/aivika/modeler/arrival_timer.py | dsorokin/aivika-modeler | 5c112015f9af6ba1974a3b208842da01e705f9ac | [
"BSD-3-Clause"
] | null | null | null | simulation/aivika/modeler/arrival_timer.py | dsorokin/aivika-modeler | 5c112015f9af6ba1974a3b208842da01e705f9ac | [
"BSD-3-Clause"
] | null | null | null | simulation/aivika/modeler/arrival_timer.py | dsorokin/aivika-modeler | 5c112015f9af6ba1974a3b208842da01e705f9ac | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2017 David Sorokin <david.sorokin@gmail.com>
#
# Licensed under BSD3. See the LICENSE.txt file in the root of this distribution.
from simulation.aivika.modeler.model import *
from simulation.aivika.modeler.port import *
def create_arrival_timer(model, name, descr = None):
"""Return a new timer that allows measuring the processing time of transacts."""
y = ArrivalTimerPort(model, name = name, descr = descr)
code = 'newArrivalTimer'
y.write(code)
return y
def arrival_timer_stream(arrival_timer_port, stream_port):
"""Measure the processing time of transacts from the specified stream within the resulting stream."""
t = arrival_timer_port
s = stream_port
expect_arrival_timer(t)
expect_stream(s)
expect_same_model([t, s])
model = t.get_model()
item_data_type = s.get_item_data_type()
code = 'return $ runProcessor (arrivalTimerProcessor ' + t.read() + ') ' + s.read()
y = StreamPort(model, item_data_type)
y.write(code)
y.bind_to_input()
s.bind_to_output()
return y
def reset_arrival_timer(arrival_timer_port, reset_time):
"""Reset the arrival timer statistics at the specified modeling time."""
t = arrival_timer_port
expect_arrival_timer(t)
model = t.get_model()
code = 'runEventInStartTime $ enqueueEvent ' + str(reset_time)
code += ' $ resetArrivalTimer ' + t.read()
model.add_action(code)
| 36.435897 | 105 | 0.712878 |
2246b2f969806313a587b3933434d8a9f94f2b5f | 106 | py | Python | mundo 1/aula 7/exer8.py | jonatan098/cursopython | 6e4cbaef6229e230fdbc66d80ec1b5a089887b0d | [
"MIT"
] | null | null | null | mundo 1/aula 7/exer8.py | jonatan098/cursopython | 6e4cbaef6229e230fdbc66d80ec1b5a089887b0d | [
"MIT"
] | null | null | null | mundo 1/aula 7/exer8.py | jonatan098/cursopython | 6e4cbaef6229e230fdbc66d80ec1b5a089887b0d | [
"MIT"
] | 1 | 2020-02-22T17:21:05.000Z | 2020-02-22T17:21:05.000Z | m = float(input('digite o metro '))
print(f'{m} metros e igual {m*100} centimetros e {m*1000} milimetros') | 53 | 70 | 0.688679 |
2246b7989fa46d68e0d0a29597a7a6170e8898d9 | 859 | py | Python | ros/src/airsim_ros_pkgs/scripts/readscan.py | juwangvsu/AirSim | fcf8c7d89821a9067d53f0d083c7bc6efac1776c | [
"MIT"
] | null | null | null | ros/src/airsim_ros_pkgs/scripts/readscan.py | juwangvsu/AirSim | fcf8c7d89821a9067d53f0d083c7bc6efac1776c | [
"MIT"
] | null | null | null | ros/src/airsim_ros_pkgs/scripts/readscan.py | juwangvsu/AirSim | fcf8c7d89821a9067d53f0d083c7bc6efac1776c | [
"MIT"
] | 1 | 2021-03-23T23:11:02.000Z | 2021-03-23T23:11:02.000Z | #!/usr/bin/env python
import rospy
import rosbag
import os
import sys
import textwrap
import yaml
lidarmsg=None
################# read the lidar msg from yaml file and return ##############
if __name__ == '__main__':
readlidardummy()
| 23.861111 | 100 | 0.694994 |
2247d6e5fabfa0bfb68bd37d1e8736537d83e496 | 4,375 | py | Python | flytekit/models/qubole.py | slai/flytekit | 9d73d096b748d263a638e6865d15db4880845305 | [
"Apache-2.0"
] | null | null | null | flytekit/models/qubole.py | slai/flytekit | 9d73d096b748d263a638e6865d15db4880845305 | [
"Apache-2.0"
] | 2 | 2021-06-26T04:32:43.000Z | 2021-07-14T04:47:52.000Z | flytekit/models/qubole.py | slai/flytekit | 9d73d096b748d263a638e6865d15db4880845305 | [
"Apache-2.0"
] | null | null | null | from flyteidl.plugins import qubole_pb2 as _qubole
from flytekit.models import common as _common
| 26.355422 | 109 | 0.597257 |
224b518501a4fc21242f8b7e19fdfbb05fcec01d | 1,583 | py | Python | app/app.py | SogoKato/mecab-parser | 402f22b4da283dfc74935fc66f092452158beb56 | [
"MIT"
] | null | null | null | app/app.py | SogoKato/mecab-parser | 402f22b4da283dfc74935fc66f092452158beb56 | [
"MIT"
] | null | null | null | app/app.py | SogoKato/mecab-parser | 402f22b4da283dfc74935fc66f092452158beb56 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from dataclasses import asdict
from logging import DEBUG
import os
from flask import Flask, jsonify, request
from werkzeug.exceptions import HTTPException
from mecab_parser import MecabParserFactory
app = Flask(__name__)
app.config["JSON_AS_ASCII"] = False
if __name__ == "__main__":
port = int(os.getenv("PORT", 5000))
app.logger.level = DEBUG
app.run(host="0.0.0.0", port=port)
| 27.77193 | 94 | 0.599495 |
224e634019de5a0413838cff34d92fa96b8463fb | 1,402 | py | Python | src/data_migrator/emitters/__init__.py | schubergphilis/data-migrator | b5ced9abd6bc2c60e9c115951fee38c2fd08923f | [
"MIT"
] | 18 | 2017-04-27T08:57:40.000Z | 2021-04-01T22:39:40.000Z | src/data_migrator/emitters/__init__.py | schubergphilis/data-migrator | b5ced9abd6bc2c60e9c115951fee38c2fd08923f | [
"MIT"
] | 1,077 | 2017-04-13T20:56:44.000Z | 2022-03-31T01:23:50.000Z | src/data_migrator/emitters/__init__.py | schubergphilis/data-migrator | b5ced9abd6bc2c60e9c115951fee38c2fd08923f | [
"MIT"
] | 6 | 2017-04-17T14:14:45.000Z | 2020-05-05T10:05:23.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""Emitters are used to export models to output format.
This module contains all classes for emitters: base and actuals. Currently
the system has two emitters: :class:`~.CSVEmitter` and :class:`~.MySQLEmitter`
implemented, of which the last is the default emitter. An emitter provides the
export format for the scanned and cleaned datasets. It also provides preambles
and postambles in the output files, for example to clean the target table
before loading it.
The following classes are defined in this module:
* :class:`~.BaseEmitter`
* :class:`~.MySQLEmitter`
* :class:`~.CSVEmitter`
* :class:`~.JSONEmitter`
* :class:`~.UpdateEmitter`
The basic structure for emitting is a combination between
:class:`~.BaseManager` and :class:`~.BaseEmitter`:
.. code-block:: python
e = Emitter(manager=Model.objects)
print e.preamble(header=[..my header lines to add..])
for l in Model.objects.all():
print e.emit(l) # emit is returning a list of strings!
.. note::
At this moment *data-migrator* does not an actively take part in schema
migrations of any sort. It is purely about cleaning, anonymizing and
transforming data (yet!).
"""
from .update import UpdateEmitter # noqa
from .mysql import MySQLEmitter # noqa
from .csv import CSVEmitter # noqa
from .json_emit import JSONEmitter # noqa
from .singer import SingerEmitter # noqa
| 32.604651 | 78 | 0.738231 |
22529ca1da0ceee85ccd01a18946c0340e79ffbb | 330 | py | Python | cycleshare/migrations/0013_remove_cycle_toprofile.py | vasundhara7/College-EWallet | 0a4c32bc08218650635a04fb9a9e28446fd4f3e1 | [
"Apache-2.0"
] | 2 | 2019-07-28T00:34:09.000Z | 2020-06-18T11:58:03.000Z | cycleshare/migrations/0013_remove_cycle_toprofile.py | vasundhara7/College-EWallet | 0a4c32bc08218650635a04fb9a9e28446fd4f3e1 | [
"Apache-2.0"
] | null | null | null | cycleshare/migrations/0013_remove_cycle_toprofile.py | vasundhara7/College-EWallet | 0a4c32bc08218650635a04fb9a9e28446fd4f3e1 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.0.9 on 2018-12-12 08:18
from django.db import migrations
| 18.333333 | 47 | 0.593939 |
2252c6b7f19070373e0889abae519f050d93b6aa | 5,495 | py | Python | projects/aFPGA/10_python/npu/generate_npu_sram_init_file.py | helloworld1983/NPU_on_FPGA | 1d0626dbed463cfacd47805a3214a43662f5b28c | [
"BSD-2-Clause"
] | 63 | 2018-08-05T14:30:47.000Z | 2022-03-31T09:41:55.000Z | projects/aFPGA/10_python/npu/generate_npu_sram_init_file.py | cxdzyq1110/NPU_on_FPGA | 1d0626dbed463cfacd47805a3214a43662f5b28c | [
"BSD-2-Clause"
] | null | null | null | projects/aFPGA/10_python/npu/generate_npu_sram_init_file.py | cxdzyq1110/NPU_on_FPGA | 1d0626dbed463cfacd47805a3214a43662f5b28c | [
"BSD-2-Clause"
] | 22 | 2018-11-06T13:01:28.000Z | 2022-03-09T08:52:27.000Z | import math
import numpy as np
from scipy import signal
#%%
M = 16; N = 100;
AddImm = 1000;
MAT_M = 3; MAT_N = 5; MAT_P = 7;
Pm = 5; Pn = 5;
Km = 5; Kn = 5;
MODE = 1;
fpp = open("./npu_verification_para.list", "w")
fpp.write("%d\n"%(M))
fpp.write("%d\n"%(N))
fpp.write("%d\n"%(AddImm))
fpp.write("%d\n"%(MAT_M))
fpp.write("%d\n"%(MAT_N))
fpp.write("%d\n"%(MAT_P))
fpp.write("%d\n"%(Km))
fpp.write("%d\n"%(Kn))
fpp.write("%d\n"%(Pm))
fpp.write("%d\n"%(Pn))
fpp.write("%d\n"%(MODE))
fpp.close()
#%%
fpd = open("./source_sram_dq.list", "w")
#%%
Dollar1 = np.random.randint(-50,-1, size=(M,N))*2**16
fpd.write("@%X\n"%(2*0x010000))
for i in range(0, M):
for j in range(0, N):
tmp_v = int(Dollar1[i, j])
if tmp_v<0:
tmp_v = tmp_v + 0x100000000
fpd.write("%04X\n"%((tmp_v >> 0)&0xFFFF))
fpd.write("%04X\n"%((tmp_v >> 16)&0xFFFF))
#%%
Dollar22 = np.random.randint(-15,-10, size=(M,N))*2**16
fpd.write("@%X\n"%(2*0x020000))
for i in range(0, M):
for j in range(0, N):
tmp_v = int(Dollar22[i, j])
if tmp_v<0:
tmp_v = tmp_v + 0x100000000
fpd.write("%04X\n"%((tmp_v >> 0)&0xFFFF))
fpd.write("%04X\n"%((tmp_v >> 16)&0xFFFF))
#%%
fp_add = open("./fp_add_test.txt", "w")
fp_addi = open("./fp_addi_test.txt", "w")
fp_sub = open("./fp_sub_test.txt", "w")
fp_dot = open("./fp_dot_test.txt", "w")
#
for i in range(0, len(Dollar1)):
for j in range(0, len(Dollar1[0])):
add_value = int((Dollar1[i, j]+Dollar22[i, j]))
addi_value = int((Dollar1[i, j]+AddImm))
sub_value = int((Dollar1[i, j]-Dollar22[i, j]))
dot_value = int((Dollar1[i, j]/2**16*Dollar22[i, j]))
fp_add.write("%d\n"%(add_value))
fp_sub.write("%d\n"%(sub_value))
fp_dot.write("%d\n"%(dot_value))
fp_addi.write("%d\n"%(addi_value))
fp_add.close()
fp_addi.close()
fp_sub.close()
fp_dot.close()
#%%
fp_tran = open("./fp_tran_test.txt", "w")
#
for j in range(0, len(Dollar1[0])):
for i in range(0, len(Dollar1)):
tran_value = int((Dollar1[i, j]))
fp_tran.write("%d\n"%(tran_value))
fp_tran.close()
#%%
kernel = np.random.randint(-15,10, size=(Km,Kn))*2**16
fpd.write("@%X\n"%(2*0x030000))
for i in range(0, len(kernel)):
for j in range(0, len(kernel[0])):
tmp_v = int(kernel[i, j])
if tmp_v<0:
tmp_v = tmp_v + 0x100000000
fpd.write("%04X\n"%((tmp_v >> 0)&0xFFFF))
fpd.write("%04X\n"%((tmp_v >> 16)&0xFFFF))
d1 = Dollar1
d2 = kernel
d1x = d1/2**16;
d2x = d2/2**16;
dcx = (signal.convolve2d(d1x, d2x, 'valid') * 2**16).astype(np.int)
#
fp_conv = open("./fp_conv_test.txt", "w")
for i in range(0, len(dcx)):
for j in range(0, len(dcx[0])):
conv_value = int(dcx[i, j])
fp_conv.write("%d\n"%(conv_value))
fp_conv.close()
#%% pooling
fp_pool = open("./fp_pool_test.txt", "w")
dpx = np.zeros((M//Pm, N//Pn))
for i in range(0, M//Pm):
for j in range(0, N//Pn):
if MODE==0:
dpx[i, j] = np.mean(d1x[Pm*i:Pm*i+Pm, Pn*j:Pn*j+Pn])
elif MODE==1:
dpx[i, j] = np.max(d1x[Pm*i:Pm*i+Pm, Pn*j:Pn*j+Pn])
pool_value = int(2**16*dpx[i, j])
fp_pool.write("%d\n"%(pool_value))
fp_pool.close()
#%% MULT
mat1 = np.random.randint(-1,2, size=(MAT_M,MAT_N))
mat2 = np.random.randint(-2,-1, size=(MAT_N,MAT_P))
mat1_216 = 2**16*mat1
mat2_216 = 2**16*mat2
mat3 = np.dot(mat1, mat2)
fpd.write("@%X\n"%(2*0x040000))
#
for i in range(0, len(mat1)):
for j in range(0, len(mat1[0])):
mult_value = int(2**16*mat1[i, j])
fpd.write("%04X\n"%((mult_value >> 0)&0xFFFF))
fpd.write("%04X\n"%((mult_value >> 16)&0xFFFF))
fpd.write("@%X\n"%(2*0x050000))
for i in range(0, len(mat2)):
for j in range(0, len(mat2[0])):
mult_value = int(2**16*mat2[i, j])
fpd.write("%04X\n"%((mult_value >> 0)&0xFFFF))
fpd.write("%04X\n"%((mult_value >> 16)&0xFFFF))
#
fp_mult = open("./fp_mult_test.txt", "w")
for i in range(0, len(mat3)):
for j in range(0, len(mat3[0])):
mult_value = int(2**16*mat3[i, j])
fp_mult.write("%d\n"%(mult_value))
fp_mult.close()
#%%
######################
fp_tanh = open("./fp_tanh_test.txt", "w")
Dollar2 = np.random.randn(M,N)*2**16
fpd.write("@%X\n"%(2*0x060000))
for i in range(0, M):
for j in range(0, N):
tmp_v = int(Dollar2[i, j])
if tmp_v<0:
tmp_v = tmp_v + 0x100000000
fpd.write("%04X\n"%((tmp_v >> 0)&0xFFFF))
fpd.write("%04X\n"%((tmp_v >> 16)&0xFFFF))
tanh_value = int(2**16*math.tanh(Dollar2[i, j]/(2**16)))
fp_tanh.write("%d\n"%(tanh_value))
fp_tanh.close()
#%%
fp_adds = open("./fp_adds_test.txt", "w")
Dollar2_ini = Dollar2[0, 0]
#
for i in range(0, len(Dollar1)):
for j in range(0, len(Dollar1[0])):
adds_value = int((Dollar1[i, j] + Dollar2_ini))
fp_adds.write("%d\n"%(adds_value))
fp_adds.close()
#%% RGB565
fp_gray = open("./fp_gray_test.txt", "w")
fpd.write("@%X\n"%(2*0x070000))
red = np.random.randint(0,2**5, size=(M,N))
green = np.random.randint(0,2**6, size=(M,N))
blue = np.random.randint(0,2**5, size=(M,N))
rgb565 = red*2**11 + green*2**5 + blue
#
for i in range(0, len(rgb565)):
for j in range(0, len(rgb565[0])):
r = ((rgb565[i][j]>>11) & 0x1F) *8
g = ((rgb565[i][j]>>5) & 0x3F) *4
b = ((rgb565[i][j]>>0) & 0x1F) *8
gray_value = int((r*66 + g*129 + b*25)/256) + 16
if gray_value<16:
gray_value = 16
elif gray_value>235:
gray_value = 235
#
fpd.write("%04X\n"%((rgb565[i][j] >> 0)&0xFFFF))
fpd.write("%04X\n"%((rgb565[i][j] >> 16)&0xFFFF))
fp_gray.write("%d\n"%(gray_value))
fp_gray.close()
#%%
fpd.close() | 25.67757 | 67 | 0.601274 |
225438e5c2b8551e69ccb321df71b6704ae2b4d5 | 17 | py | Python | 2.py | modianor/git_project | 21d664bfa31d6f3e584ffc594514ea4342b6bc3f | [
"MIT"
] | null | null | null | 2.py | modianor/git_project | 21d664bfa31d6f3e584ffc594514ea4342b6bc3f | [
"MIT"
] | null | null | null | 2.py | modianor/git_project | 21d664bfa31d6f3e584ffc594514ea4342b6bc3f | [
"MIT"
] | null | null | null | A = 1
B = 2
C = 4 | 5.666667 | 5 | 0.352941 |
2255fb2ff207d881f927e1b321a4dc62c8ca610a | 17 | py | Python | src/ixu/commands/server/__init__.py | luanguimaraesla/ixu | f213bdf27fc7336a76110cf3f89e30ae1d5a64fb | [
"Apache-2.0"
] | 2 | 2021-05-14T17:14:09.000Z | 2021-06-13T21:35:04.000Z | src/ixu/commands/server/__init__.py | luanguimaraesla/ixu | f213bdf27fc7336a76110cf3f89e30ae1d5a64fb | [
"Apache-2.0"
] | null | null | null | src/ixu/commands/server/__init__.py | luanguimaraesla/ixu | f213bdf27fc7336a76110cf3f89e30ae1d5a64fb | [
"Apache-2.0"
] | null | null | null | from . import up
| 8.5 | 16 | 0.705882 |
225632b6786bc6319f8c49ffcd364075da52275b | 247 | py | Python | pythoncev/exercicios/ex096.py | gustavobelloni/Python | 156a99d10f460f0fcaea18a87d35d4b0e3dba493 | [
"MIT"
] | null | null | null | pythoncev/exercicios/ex096.py | gustavobelloni/Python | 156a99d10f460f0fcaea18a87d35d4b0e3dba493 | [
"MIT"
] | null | null | null | pythoncev/exercicios/ex096.py | gustavobelloni/Python | 156a99d10f460f0fcaea18a87d35d4b0e3dba493 | [
"MIT"
] | null | null | null |
print('Controle de Terrenos')
print('--------------------')
l = float(input('Largura (m): '))
c = float(input('Comprimento (m): '))
rea(l, c)
| 22.454545 | 59 | 0.554656 |
22567858f90706f357ad018d732e41ca1cb74961 | 2,678 | py | Python | readthedocs/proxito/views/decorators.py | tkoyama010/readthedocs.org | aac8fb39586db902d9fbb51b639dd281c819dae2 | [
"MIT"
] | 2 | 2021-05-16T06:57:15.000Z | 2021-05-16T06:57:18.000Z | readthedocs/proxito/views/decorators.py | tkoyama010/readthedocs.org | aac8fb39586db902d9fbb51b639dd281c819dae2 | [
"MIT"
] | 12 | 2021-03-21T13:16:50.000Z | 2022-03-12T00:55:05.000Z | readthedocs/proxito/views/decorators.py | mondeja/readthedocs.org | fb01c6d9d78272e3f4fd146697e8760c04e4fbb6 | [
"MIT"
] | 1 | 2021-07-09T14:02:39.000Z | 2021-07-09T14:02:39.000Z | import logging
from functools import wraps
from django.http import Http404
from readthedocs.projects.models import Project, ProjectRelationship
log = logging.getLogger(__name__) # noqa
def map_subproject_slug(view_func):
"""
A decorator that maps a ``subproject_slug`` URL param into a Project.
:raises: Http404 if the Project doesn't exist
.. warning:: Does not take into account any kind of privacy settings.
"""
return inner_view
def map_project_slug(view_func):
"""
A decorator that maps a ``project_slug`` URL param into a Project.
:raises: Http404 if the Project doesn't exist
.. warning:: Does not take into account any kind of privacy settings.
"""
return inner_view
| 33.475 | 100 | 0.581404 |
2256a0c4684d099dc813eeba74a7fcc9133e772a | 4,217 | py | Python | tortue_geniale/tg_channel_events.py | vavalm/discord_bot_tortue_geniale | 2fa2865166dd109b1138b77ed7f21d8e59efd8ab | [
"MIT"
] | null | null | null | tortue_geniale/tg_channel_events.py | vavalm/discord_bot_tortue_geniale | 2fa2865166dd109b1138b77ed7f21d8e59efd8ab | [
"MIT"
] | null | null | null | tortue_geniale/tg_channel_events.py | vavalm/discord_bot_tortue_geniale | 2fa2865166dd109b1138b77ed7f21d8e59efd8ab | [
"MIT"
] | null | null | null | import discord
import asyncio
import re
import logging
from data.groups_name import free_random_name
logging.basicConfig(level=logging.INFO)
client = discord.Client()
| 35.436975 | 116 | 0.596396 |
22574ae53ea97f421f17d51078183bbb695cb068 | 566 | py | Python | middleware/login_required.py | ahmetelgun/flask-boilerplate | 56bc0235b5f00a7aaca6a9536a1744442863b8b6 | [
"Apache-2.0"
] | 2 | 2021-12-01T20:48:44.000Z | 2022-02-04T16:33:33.000Z | middleware/login_required.py | ahmetelgun/flask_authentication_boilerplate | 56bc0235b5f00a7aaca6a9536a1744442863b8b6 | [
"Apache-2.0"
] | null | null | null | middleware/login_required.py | ahmetelgun/flask_authentication_boilerplate | 56bc0235b5f00a7aaca6a9536a1744442863b8b6 | [
"Apache-2.0"
] | null | null | null | from flask import request, make_response, jsonify, g
from datetime import datetime
from functools import wraps
import jwt
from models import DBContext, User
from settings import SECRET_KEY
from service import is_token_valid
| 22.64 | 52 | 0.681979 |
22574e100d92910a2acaa1fb84cd7d78b47e8242 | 2,189 | py | Python | libs/sqlservice/utils.py | smartadvising/smartadvising-api | 74cfcc336c87523fcb011a96bc4506ecdef93afe | [
"MIT"
] | null | null | null | libs/sqlservice/utils.py | smartadvising/smartadvising-api | 74cfcc336c87523fcb011a96bc4506ecdef93afe | [
"MIT"
] | null | null | null | libs/sqlservice/utils.py | smartadvising/smartadvising-api | 74cfcc336c87523fcb011a96bc4506ecdef93afe | [
"MIT"
] | null | null | null | """
Utilities
---------
The utilities module.
"""
from collections.abc import Mapping, Sequence
from functools import wraps
import types
def classonce(meth):
"""Decorator that executes a class method once, stores the results at the
class level, and subsequently returns those results for every future method
call.
"""
return decorated
def is_sequence(value):
"""Test if `value` is a sequence but ``str``. This function is mainly used
to determine if `value` can be treated like a ``list`` for iteration
purposes.
"""
return (is_generator(value) or
(isinstance(value, Sequence) and not isinstance(value, str)))
def is_generator(value):
"""Return whether `value` is a generator or generator-like."""
return (isinstance(value, types.GeneratorType) or
(hasattr(value, '__iter__') and hasattr(value, '__next__') and
not hasattr(value, '__getitem__')))
| 27.708861 | 79 | 0.640018 |
2257918164befc9b4532377573ee36b973df3e73 | 1,817 | py | Python | tests/test_normals.py | foobarbecue/trimesh | db2c649ebc577874702644248964b3295bd6ed5b | [
"MIT"
] | null | null | null | tests/test_normals.py | foobarbecue/trimesh | db2c649ebc577874702644248964b3295bd6ed5b | [
"MIT"
] | null | null | null | tests/test_normals.py | foobarbecue/trimesh | db2c649ebc577874702644248964b3295bd6ed5b | [
"MIT"
] | 1 | 2019-05-31T03:37:21.000Z | 2019-05-31T03:37:21.000Z | try:
from . import generic as g
except BaseException:
import generic as g
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
| 41.295455 | 79 | 0.587782 |
2257acdf6a3fd8c165f7da9ebcd60fa9bce30be5 | 14,692 | py | Python | flagmaker/settings.py | google/sa360-bigquery-bootstrapper | ca25e9d9f4d00f392970e5e942942f9acb21952b | [
"Apache-2.0"
] | 4 | 2020-03-14T03:26:46.000Z | 2020-12-13T14:43:22.000Z | flagmaker/settings.py | google/sa360-bigquery-bootstrapper | ca25e9d9f4d00f392970e5e942942f9acb21952b | [
"Apache-2.0"
] | 1 | 2020-11-17T16:26:50.000Z | 2020-11-17T16:26:50.000Z | flagmaker/settings.py | google/sa360-bigquery-bootstrapper | ca25e9d9f4d00f392970e5e942942f9acb21952b | [
"Apache-2.0"
] | 1 | 2020-10-13T17:02:23.000Z | 2020-10-13T17:02:23.000Z | # /***********************************************************************
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Note that these code samples being shared are not official Google
# products and are not formally supported.
# ************************************************************************/
import os
from typing import Union
import yaml
from yaml.parser import ParserError
from collections.abc import Iterable
from enum import EnumMeta
from typing import ClassVar
from typing import Dict
from typing import Generic
from typing import List
from typing import TypeVar
from absl import flags
from prompt_toolkit import ANSI
from prompt_toolkit import prompt
from prompt_toolkit.shortcuts import CompleteStyle
from termcolor import colored
from termcolor import cprint
from flagmaker.building_blocks import list_to_string_list
from flagmaker.exceptions import FlagMakerPromptInterruption
from flagmaker.validators import ChoiceValidator
from .building_blocks import SettingOptionInterface
from .building_blocks import SettingsInterface
from .building_blocks import Value
from .exceptions import FlagMakerConfigurationError
from .exceptions import FlagMakerInputError
from .hints import StringKeyDict
from .sanity import Validator
FLAGS = flags.FLAGS
T = TypeVar('T', bound=SettingsInterface)
def get_option_prompt(self, k, default, prompt_val):
if not isinstance(self._options, EnumMeta):
raise FlagMakerConfigurationError('Need to add options for ' + k)
options = list_to_string_list(self.options)
return (
'{0}\n'
'{1}\n'
'{2}\n'
'Choices{3}: '
).format(
k,
colored('Options:', attrs=['underline']),
options,
default, prompt_val
)
def get_basic_prompt(self, k, default, prompt_val):
return '{}{}{}'.format(k, default, prompt_val)
def _set_value(self, value):
if value is None:
self._value.set_val(None)
return
if self.method == flags.DEFINE_boolean:
if value in ['1', 'true', 'True', True]:
value = True
elif value in ['0', 'false', 'False', False]:
value = False
elif self.method == flags.DEFINE_integer:
value = int(value)
elif self.method == flags.DEFINE_enum:
options = self.options
is_iterable = isinstance(options, Iterable)
if not (is_iterable and value in options):
raise FlagMakerInputError(
'Need to choose one of [{}]'.format(', '.join(options))
)
self._value.set_val(value)
# perform actions
if self.after is None:
self._error = False
return
in_called = (self, self.after) not in self.called
if in_called:
self.called[(self, self.after)] = True
self.after(self)
class SettingBlock:
def __init__(self, block: str,
settings: Dict[str, SettingOption],
conditional: callable = None):
self.name = block
self.settings = settings
self.conditional = conditional
AbstractSettingsClass = ClassVar[T]
| 33.239819 | 80 | 0.544854 |
225800b91896c420ce99d82a83f56df5a8a804aa | 19,390 | py | Python | cli/sub.py | mylovage/GolemQT | 7560d4e67d564022fbbdc00c069a51c673bfe267 | [
"MIT"
] | null | null | null | cli/sub.py | mylovage/GolemQT | 7560d4e67d564022fbbdc00c069a51c673bfe267 | [
"MIT"
] | null | null | null | cli/sub.py | mylovage/GolemQT | 7560d4e67d564022fbbdc00c069a51c673bfe267 | [
"MIT"
] | null | null | null | # coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2018-2020 azai/Rgveda/GolemQuant base on QUANTAXIS/yutiansut
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import json
import sys
import websocket
from datetime import datetime as dt, timezone, timedelta, date
import datetime
import time as timer
import numba as nb
import traceback
try:
import easyquotation
easyquotation_not_install = False
except:
easyquotation_not_install = True
try:
import QUANTAXIS as QA
from QUANTAXIS.QAUtil.QAParameter import ORDER_DIRECTION
from QUANTAXIS.QAUtil.QASql import QA_util_sql_mongo_sort_ASCENDING
from QUANTAXIS.QAUtil.QADate_trade import (
QA_util_if_tradetime,
QA_util_get_pre_trade_date,
QA_util_get_real_date,
trade_date_sse
)
from QUANTAXIS.QAData.QADataStruct import (
QA_DataStruct_Index_min,
QA_DataStruct_Index_day,
QA_DataStruct_Stock_day,
QA_DataStruct_Stock_min
)
from QUANTAXIS.QAIndicator.talib_numpy import *
from QUANTAXIS.QAUtil.QADate_Adv import (
QA_util_timestamp_to_str,
QA_util_datetime_to_Unix_timestamp,
QA_util_print_timestamp
)
from QUANTAXIS.QAUtil import (
DATABASE,
QASETTING,
QA_util_log_info,
QA_util_log_debug,
QA_util_log_expection,
QA_util_to_json_from_pandas
)
except:
print('PLEASE run "pip install QUANTAXIS" before call GolemQ.cli.sub modules')
pass
try:
from GolemQ.utils.parameter import (
AKA,
INDICATOR_FIELD as FLD,
TREND_STATUS as ST,
)
except:
from GolemQ.utils.symbol import (
normalize_code
)
def formater_l1_tick(code: str, l1_tick: dict) -> dict:
"""
Tick tdx l1 tick
"""
if ((len(code) == 6) and code.startswith('00')):
l1_tick['code'] = normalize_code(code, l1_tick['now'])
else:
l1_tick['code'] = normalize_code(code)
l1_tick['servertime'] = l1_tick['time']
l1_tick['datetime'] = '{} {}'.format(l1_tick['date'], l1_tick['time'])
l1_tick['price'] = l1_tick['now']
l1_tick['vol'] = l1_tick['volume']
del l1_tick['date']
del l1_tick['time']
del l1_tick['now']
del l1_tick['name']
del l1_tick['volume']
# print(l1_tick)
return l1_tick
def formater_l1_ticks(l1_ticks: dict, codelist: list = None, stacks=None, symbol_list=None) -> dict:
"""
l1 ticks
"""
if (stacks is None):
l1_ticks_data = []
symbol_list = []
else:
l1_ticks_data = stacks
for code, l1_tick_values in l1_ticks.items():
# l1_tick = namedtuple('l1_tick', l1_ticks[code])
# formater_l1_tick_jit(code, l1_tick)
if (codelist is None) or \
(code in codelist):
l1_tick = formater_l1_tick(code, l1_tick_values)
if (l1_tick['code'] not in symbol_list):
l1_ticks_data.append(l1_tick)
symbol_list.append(l1_tick['code'])
return l1_ticks_data, symbol_list
def sub_l1_from_sina():
"""
L13mongodbSSD
Intel DC P3600 800GB SSD 3600tick < 0.6s
"""
client = QASETTING.client['QAREALTIME']
if (easyquotation_not_install == True):
print(u'PLEASE run "pip install easyquotation" before call GolemQ.cli.sub modules')
return
quotation = easyquotation.use('sina') # ['sina'] ['tencent', 'qq']
sleep_time = 2.0
sleep = int(sleep_time)
_time1 = dt.now()
database = collections_of_today()
get_once = True
# /
end_time = dt.strptime(str(dt.now().date()) + ' 16:30',
'%Y-%m-%d %H:%M')
start_time = dt.strptime(str(dt.now().date()) + ' 09:15',
'%Y-%m-%d %H:%M')
day_changed_time = dt.strptime(str(dt.now().date()) + ' 01:00',
'%Y-%m-%d %H:%M')
while (dt.now() < end_time):
# /
end_time = dt.strptime(str(dt.now().date()) + ' 16:30',
'%Y-%m-%d %H:%M')
start_time = dt.strptime(str(dt.now().date()) + ' 09:15',
'%Y-%m-%d %H:%M')
day_changed_time = dt.strptime(str(dt.now().date()) + ' 01:00',
'%Y-%m-%d %H:%M')
_time = dt.now()
if QA_util_if_tradetime(_time) and \
(dt.now() < day_changed_time):
#
print(u'~ {} '.format(datetime.date.today()))
database = collections_of_today()
print(u'Not Trading time A {}'.format(_time))
timer.sleep(sleep)
continue
symbol_list = []
l1_ticks_data = []
if QA_util_if_tradetime(_time) or \
(get_once): #
l1_ticks = quotation.market_snapshot(prefix=True)
l1_ticks_data, symbol_list = formater_l1_ticks(l1_ticks)
if (dt.now() < start_time) or \
((len(l1_ticks_data) > 0) and \
(dt.strptime(l1_ticks_data[-1]['datetime'],
'%Y-%m-%d %H:%M:%S') < dt.strptime(str(dt.now().date()) + ' 00:00',
'%Y-%m-%d %H:%M'))):
print(u'Not Trading time A {}'.format(_time))
timer.sleep(sleep)
continue
#
l1_ticks = quotation.market_snapshot(prefix=False)
l1_ticks_data, symbol_list = formater_l1_ticks(l1_ticks,
stacks=l1_ticks_data,
symbol_list=symbol_list)
# tick
query_id = {
"code": {
'$in': list(set([l1_tick['code'] for l1_tick in l1_ticks_data]))
},
"datetime": sorted(list(set([l1_tick['datetime'] for l1_tick in l1_ticks_data])))[-1]
}
# print(sorted(list(set([l1_tick['datetime'] for l1_tick in
# l1_ticks_data])))[-1])
refcount = database.count_documents(query_id)
if refcount > 0:
if (len(l1_ticks_data) > 1):
#
# print('Delete', refcount, list(set([l1_tick['datetime']
# for l1_tick in l1_ticks_data])))
database.delete_many(query_id)
database.insert_many(l1_ticks_data)
else:
#
database.replace_one(query_id, l1_ticks_data[0])
else:
# tick
# print('insert_many', refcount)
database.insert_many(l1_ticks_data)
if (get_once != True):
print(u'Trading time now A {}\nProcessing ticks data cost:{:.3f}s'.format(dt.now(),
(
dt.now() - _time).total_seconds()))
if ((dt.now() - _time).total_seconds() < sleep):
timer.sleep(sleep - (dt.now() - _time).total_seconds())
print('Program Last Time {:.3f}s'.format((dt.now() - _time1).total_seconds()))
get_once = False
else:
print(u'Not Trading time A {}'.format(_time))
timer.sleep(sleep)
# 5 QUANTAXIS/save X
save_time = dt.strptime(str(dt.now().date()) + ' 17:00', '%Y-%m-%d %H:%M')
if (dt.now() > end_time) and \
(dt.now() < save_time):
# 16:0017:00
# block
#
pass
# While513
print(u'While513 tick')
timer.sleep(40000)
def sub_codelist_l1_from_sina(codelist: list = None):
"""
L13mongodbSSD
Intel DC P3600 800GB SSD 3600tick < 0.6s
"""
quotation = easyquotation.use('sina') # ['sina'] ['tencent', 'qq']
sleep_time = 2.0
sleep = int(sleep_time)
_time1 = dt.now()
database = collections_of_today()
get_once = True
# /
end_time = dt.strptime(str(dt.now().date()) + ' 16:30', '%Y-%m-%d %H:%M')
start_time = dt.strptime(str(dt.now().date()) + ' 09:15', '%Y-%m-%d %H:%M')
day_changed_time = dt.strptime(str(dt.now().date()) + ' 01:00',
'%Y-%m-%d %H:%M')
while (dt.now() < end_time):
# /
end_time = dt.strptime(str(dt.now().date()) + ' 16:30', '%Y-%m-%d %H:%M')
start_time = dt.strptime(str(dt.now().date()) + ' 09:15', '%Y-%m-%d %H:%M')
day_changed_time = dt.strptime(str(dt.now().date()) + ' 01:00',
'%Y-%m-%d %H:%M')
_time = dt.now()
if QA_util_if_tradetime(_time) and \
(dt.now() < day_changed_time):
#
print(u'~ {} '.format(datetime.date.today()))
database = collections_of_today()
print(u'Not Trading time A {}'.format(_time))
timer.sleep(sleep)
continue
if QA_util_if_tradetime(_time) or \
(get_once): #
l1_ticks = quotation.market_snapshot(prefix=True)
l1_ticks_data, symbol_list = formater_l1_ticks(l1_ticks, codelist=codelist)
if (dt.now() < start_time) or \
((len(l1_ticks_data) > 0) and \
(dt.strptime(l1_ticks_data[-1]['datetime'],
'%Y-%m-%d %H:%M:%S') < dt.strptime(str(dt.now().date()) + ' 00:00',
'%Y-%m-%d %H:%M'))):
print(u'Not Trading time A {}'.format(_time))
timer.sleep(sleep)
continue
#
l1_ticks = quotation.market_snapshot(prefix=False)
l1_ticks_data, symbol_list = formater_l1_ticks(l1_ticks,
codelist=codelist,
stacks=l1_ticks_data,
symbol_list=symbol_list)
# tick
query_id = {
"code": {
'$in': list(set([l1_tick['code'] for l1_tick in l1_ticks_data]))
},
"datetime": sorted(list(set([l1_tick['datetime'] for l1_tick in l1_ticks_data])))[-1]
}
# print(symbol_list, len(symbol_list))
refcount = database.count_documents(query_id)
if refcount > 0:
if (len(l1_ticks_data) > 1):
#
database.delete_many(query_id)
database.insert_many(l1_ticks_data)
else:
#
database.replace_one(query_id, l1_ticks_data[0])
else:
# tick
database.insert_many(l1_ticks_data)
if (get_once != True):
print(u'Trading time now A {}\nProcessing ticks data cost:{:.3f}s'.format(dt.now(),
(
dt.now() - _time).total_seconds()))
if ((dt.now() - _time).total_seconds() < sleep):
timer.sleep(sleep - (dt.now() - _time).total_seconds())
print('Program Last Time {:.3f}s'.format((dt.now() - _time1).total_seconds()))
get_once = False
else:
print(u'Not Trading time A {}'.format(_time))
timer.sleep(sleep)
# 5 QUANTAXIS/save X
save_time = dt.strptime(str(dt.now().date()) + ' 17:00', '%Y-%m-%d %H:%M')
if (dt.now() > end_time) and \
(dt.now() < save_time):
# 16:0017:00
# block
#
# save_X_func()
pass
# While513
print(u'While513 tick')
timer.sleep(40000)
def sub_1min_from_tencent_lru():
"""
K
"""
blockname = ['MSCI', 'MSCI', 'MSCI', '',
'180', '380', '300', '380',
'300', '50', '', '',
'100', '150', '300', '100',
'500', '', '', '',
'', '1000', '', '',
'', '', '', '',
'', '', '', '100',
'', '', 'A50', '',
'', '', '', '', '',
'', '', '', '',
'', '', '100', '',
'', '', '', '4G5G',
'5G', '', '', '',
'', '', '']
all_stock_blocks = QA.QA_fetch_stock_block_adv()
for blockname in blocks:
if (blockname in all_stock_blocks.block_name):
codelist_300 = all_stock_blocks.get_block(blockname).code
print(u'QA{}'.format(blockname))
print(codelist_300)
else:
print(u'QA{}'.format(blockname))
quotation = easyquotation.use("timekline")
data = quotation.real([codelist], prefix=False)
while (True):
l1_tick = quotation.market_snapshot(prefix=False)
print(l1_tick)
return True
if __name__ == '__main__':
# tick
#
# TCP/IP
# 3
"""
sub.py D:\\QUANTAXIS\QUANTAXIS\cli
__init__.py2__init__.py
sub.py
PowerShellsub_l1.ps1
D:
CD D:\\QUANTAXIS\
$n = 1
while($n -lt 6)
{
python -m QUANTAXIS.cli.sub
Start-Sleep -Seconds 3
}
Cmd/Batchsub_l1.cmd
D:
CD D:\\QUANTAXIS\
:start
python -m QUANTAXIS.cli.sub
@ping 127.0.0.1 -n 3 >nul
goto start
pause
Linux Bashlinux
"""
import sys
sys.path.append('/root/ipython/')
import CommonUtils as cu
try:
cu.sendDingMsg("Start realtime sub from sina_l1 progress start now.")
sub_l1_from_sina()
except:
traceback.print_exc()
cu.sendDingMsg("Realtime sub from sina_l1 progress has stopped. please check it soon.")
# sub_l1_from_sina()
# sub_1min_from_tencent_lru()
pass
| 37.145594 | 143 | 0.545848 |
225834b3b08a2a19311dda4b3b5c026b4df674f0 | 3,125 | py | Python | setup.py | voxel51/eta | 17fb1148e853704872ed50e0e30c7800272b8398 | [
"Apache-2.0"
] | 25 | 2018-07-21T02:37:34.000Z | 2022-03-30T12:57:54.000Z | setup.py | voxel51/eta | 17fb1148e853704872ed50e0e30c7800272b8398 | [
"Apache-2.0"
] | 183 | 2018-06-13T18:57:00.000Z | 2022-02-24T14:35:49.000Z | setup.py | voxel51/eta | 17fb1148e853704872ed50e0e30c7800272b8398 | [
"Apache-2.0"
] | 13 | 2018-09-10T18:46:58.000Z | 2022-02-07T02:25:31.000Z | #!/usr/bin/env python
"""
Installs ETA.
Copyright 2017-2021, Voxel51, Inc.
voxel51.com
"""
import os
from setuptools import setup, find_packages
from wheel.bdist_wheel import bdist_wheel
VERSION = "0.6.1"
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="voxel51-eta",
version=get_version(),
description="Extensible Toolkit for Analytics",
author="Voxel51, Inc.",
author_email="info@voxel51.com",
url="https://github.com/voxel51/eta",
license="Apache",
long_description=long_description,
long_description_content_type="text/markdown",
packages=find_packages(),
include_package_data=True,
install_requires=[
"argcomplete",
"dill",
"future",
"glob2",
"importlib-metadata; python_version<'3.8'",
"ndjson",
"numpy",
"opencv-python-headless<5,>=4.1",
"packaging",
"patool",
"Pillow>=6.2",
"python-dateutil",
"pytz",
"requests",
"retrying",
"six",
"scikit-image",
"sortedcontainers",
"tabulate",
"tzlocal",
],
extras_require={
"pipeline": ["blockdiag", "Sphinx", "sphinxcontrib-napoleon"],
"storage": [
"boto3>=1.15",
"google-api-python-client",
"google-cloud-storage>=1.36",
"httplib2<=0.15",
"pysftp",
],
},
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Scientific/Engineering :: Image Processing",
"Topic :: Scientific/Engineering :: Image Recognition",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Visualization",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
entry_points={"console_scripts": ["eta=eta.core.cli:main"]},
python_requires=">=2.7",
cmdclass={"bdist_wheel": BdistWheelCustom},
)
| 28.935185 | 70 | 0.58944 |