blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 288 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 684 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 147 values | src_encoding stringclasses 25 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 128 12.7k | extension stringclasses 142 values | content stringlengths 128 8.19k | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b97d80c5929e9b8e0c5d063c370f852396e5f312 | 03d7375757324e7ff1d3c9ab187230581d54416d | /backend/rent_a_road_test_3250/wsgi.py | 579cf1810f7da700f3d420b0e011e94c1c08dd8b | [] | no_license | crowdbotics-apps/rent-a-road-test-3250 | 6d657e2d68dc1c5d94fa4504b239ff2b5caea1fc | 847636c17ed56722f64dc3b18765c81364aaf80a | refs/heads/master | 2022-12-08T10:54:58.557243 | 2019-05-13T01:35:42 | 2019-05-13T01:35:42 | 186,325,879 | 0 | 0 | null | 2022-12-06T15:59:27 | 2019-05-13T01:35:39 | JavaScript | UTF-8 | Python | false | false | 420 | py | """
WSGI config for rent_a_road_test_3250 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rent_a_road_test_3250.settings")
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
76272de482e0e63b6b221d492c32ad0b05144b48 | 3a9f2b3d79cf214704829427ee280f4b49dca70a | /saigon/rat/RuckusAutoTest/tests/zd/CB_ZD_Mesh_Recovery_SSID_Testing.py | fda260e9213a84cb8606005a0f95c60cdf109186 | [] | no_license | jichunwei/MyGitHub-1 | ae0c1461fe0a337ef459da7c0d24d4cf8d4a4791 | f826fc89a030c6c4e08052d2d43af0b1b4b410e3 | refs/heads/master | 2021-01-21T10:19:22.900905 | 2016-08-20T03:34:52 | 2016-08-20T03:34:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,930 | py | # Copyright (C) 2011 Ruckus Wireless, Inc. All rights reserved.
# Please make sure the following module docstring is accurate since it will be used in report generation.
"""
Description:
@author: An Nguyen
@contact: an.nguyen@ruckuswireless.com
@since: Feb 2012
Prerequisite (Assumptions about the state of the test bed/DUT):
1. Build under test is loaded on the AP and Zone Director
Required components: 'Station', 'RuckusAP', 'ZoneDirector'
Test parameters:
Test procedure:
1. Config:
-
2. Test:
- Verify if the mesh tree are match with expected
3. Cleanup:
-
Result type: PASS/FAIL
Results: PASS: If the mesh tree is not changed
FAIL: If the mesh tree is changed
Messages: If FAIL the test script returns a message related to the criterion that is not satisfied
"""
import time
import logging
from pprint import pformat
from RuckusAutoTest.models import Test
from RuckusAutoTest.components import Helpers as lib
from RuckusAutoTest.tests.zd import libZD_TestMethods as tmethod
from RuckusAutoTest.tests.zd import libZD_TestConfig as tconfig
from RuckusAutoTest.common import lib_Debug as bugme
class CB_ZD_Mesh_Recovery_SSID_Testing(Test):
required_components = ['Station', 'RuckusAP', 'ZoneDirector']
parameter_description = {
}
def config(self, conf):
self._init_test_parameter(conf)
def test(self):
self._get_recovery_ssid_info()
if self.errmsg: return self.returnResult('FAIL', self.errmsg)
self._change_mesh_ssid_and_reboot_ap()
if self.errmsg: return self.returnResult('FAIL', self.errmsg)
self._verify_station_could_scan_the_recovery_ssid()
if self.errmsg: return self.returnResult('FAIL', self.errmsg)
self._verify_station_could_connect_to_recovery_ssid_wlan()
if self.errmsg: return self.returnResult('FAIL', self.errmsg)
return self.returnResult('PASS', self.passmsg)
def cleanup(self):
pass
def _init_test_parameter(self, conf):
self.conf = {'timeout': 600}
self.conf.update(conf)
self.errmsg = ''
self.passmsg = ''
self.zd = self.testbed.components['ZoneDirector']
self.active_ap = self.carrierbag[self.conf['ap_tag']]['ap_ins']
self.target_station = self.carrierbag[self.conf['sta_tag']]['sta_ins']
def _get_recovery_ssid_info(self):
self.recovery_ssid_info = lib.apcli.radiogrp.get_recovery_ssid(self.active_ap)
if not self.recovery_ssid_info['timeout'] and not self.recovery_ssid_info['service_wlan']:
self.errmsg = 'The recovery-ssid function is not supported. Please check the system.'
elif self.recovery_ssid_info['service_wlan'].lower() != 'enabled':
self.errmsg = 'The service wlan is not enabled as expected.'
if self.errmsg: return
self.recovery_wlan_cfg = {}
wlan_list_info = lib.apcli.radiogrp.get_wlanlist(self.active_ap)
for wlan_info in wlan_list_info:
if wlan_info['name'] == 'recovery-ssid':
self.recovery_wlan_cfg['ssid'] = wlan_info['ssid']
passphrase = lib.apcli.radiogrp.get_passphrase(self.active_ap, wlan_info['wlanid'])
if passphrase['passphrase'] == 'DID NOT EXIST':
self.errmsg = 'Can not get the recovery-ssid wlan passphrase'
return
self.recovery_wlan_cfg['key_string'] = passphrase['passphrase'][0]
self.recovery_wlan_cfg['encryption'] = 'AES'
self.recovery_wlan_cfg['wpa_ver'] = 'WPA2'
self.recovery_wlan_cfg['auth'] = 'PSK'
return
self.errmsg = 'Can not find out the recovery-ssid wlan settings'
def _change_mesh_ssid_and_reboot_ap(self):
"""
the mesh ap could not reconnect to the system during the meshu and meshd ssid be change
"""
test_ssid = 'test-recovery-ssid'
logging.info('Change the mesh ssid to %s and reboot the active ap' % test_ssid)
self.active_ap.cmd('set ssid meshu %s' % test_ssid)
self.active_ap.cmd('set ssid meshd %s' % test_ssid)
self.active_ap.reboot(login=False)
msg = 'Waiting %s seconds for the recovery-ssid wlan [%s] is up'
msg = msg % (self.recovery_ssid_info['timeout'], self.recovery_wlan_cfg['ssid'])
logging.info(msg)
time.sleep(int(self.recovery_ssid_info['timeout']))
def _verify_station_could_scan_the_recovery_ssid(self):
"""
"""
msg = tmethod.verify_wlan_in_the_air(self.target_station,
self.recovery_wlan_cfg['ssid'],
self.conf['timeout'])
if "The station didn't see the WLAN" in msg:
self.errmsg = '[SCANNED IN %s SECS] %s' % (self.conf['timeout'], msg)
else:
self.passmsg = 'The recovery-ssid wlan[%s] is up as expected.' % self.recovery_wlan_cfg['ssid']
def _verify_station_could_connect_to_recovery_ssid_wlan(self):
"""
"""
tconfig.remove_all_wlan_from_station(self.target_station,
check_status_timeout = self.conf['timeout'])
self.errmsg = tmethod.assoc_station_with_ssid(self.target_station,
self.recovery_wlan_cfg,
self.conf['timeout'])
if not self.errmsg:
passmsg = '; target station could connect to the recovery-ssid wlan %s'
passmsg = passmsg % str(self.recovery_wlan_cfg)
self.passmsg += passmsg | [
"tan@xx.com"
] | tan@xx.com |
1f82a2aa9fdec35d68ece70fff55bb94073d3df2 | 62e5b9ccdc8ee3671156919a2b44fba17e429089 | /bin/env.py | 4f6cb5de4ccf59fb09d32cb7b7ad6f32862e84bf | [
"CC0-1.0"
] | permissive | ccgeom/ccg-notes | 609cf4a761ec3d0f24a14cea408e9d5604fafc74 | 6fade9e0ebbbe747d0f07457aa8047470d15ca1b | refs/heads/master | 2022-08-26T10:43:15.112786 | 2022-07-08T04:39:46 | 2022-07-08T04:39:46 | 62,777,276 | 12 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,046 | py | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
import sys
import os
import virtualenv as venv
"""
Colorful output
"""
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = "\033[1m"
def head(msg):
print(HEADER + msg + ENDC)
def info(msg):
print(msg)
def infog(msg):
print(OKGREEN + msg + ENDC)
def infob(msg):
print(OKBLUE + msg + ENDC)
def warn(msg):
print(WARNING + msg + ENDC)
def err(msg):
print(FAIL + msg + ENDC)
"""
Welcome message
"""
head("Welcome!")
"""
Check python version
"""
info("checking python version...")
req_version = (3, 7)
cur_version = sys.version_info
if cur_version < req_version:
err("Your Python interpreter is too old. Please consider upgrading to 3.6 or above.")
sys.exit(-1)
"""
Check virtual enviroment
"""
if not os.path.exists(".py"):
if cur_version >= (3, 7, 7):
sys.argv = ['.py']
venv.cli_run(sys.argv)
#else:
#sys.argv = ['virtualenv', '.py']
#venv.main()
| [
"mingli.yuan@gmail.com"
] | mingli.yuan@gmail.com |
a7d718c61e135fceba95959631eed10c720cf4dd | 53ccc4f5198d10102c8032e83f9af25244b179cf | /SoftUni Lessons/Python Development/Python Fundamentals June 2019/Problems and Files/03. PYTHON INTRO, FUNCTIONS, DEBUGGING/Functions-and-Debugging/04. Draw a Filled Square.py | e4dde5e933fe0544aed1d0ecfb3f1f25e1cdd415 | [] | no_license | SimeonTsvetanov/Coding-Lessons | aad32e0b4cc6f5f43206cd4a937fec5ebea64f2d | 8f70e54b5f95911d0bdbfda7d03940cb824dcd68 | refs/heads/master | 2023-06-09T21:29:17.790775 | 2023-05-24T22:58:48 | 2023-05-24T22:58:48 | 221,786,441 | 13 | 6 | null | null | null | null | UTF-8 | Python | false | false | 1,563 | py | """
Functions and Debugging
Проверка: https://judge.softuni.bg/Contests/Compete/Index/922#3
04. Draw a Filled Square
Условие:
Draw at the console a filled square of size n like in the example:
Examples
Input:
4
Output:
--------
-\/\/\/-
-\/\/\/-
--------
Hints
1. Read the input
2. Create a function which will print the top and the bottom rows (they are the same).
Don’t forget to give it a descriptive name and to give it as a parameter some length
3. Create the function which will print the middle rows.
4. Use the functions that you've just created to draw a square.
"""
num = int(input())
def top_and_bottom():
for i in range(num * 2):
print(f"-", end="")
print()
def filling():
print(f"-", end="")
for i in range(int(((num * 2) - 2) / 2)):
print(f"\/", end="")
print(f"-", end="")
print()
def square():
top_and_bottom()
for i in range(num - 2):
filling()
top_and_bottom()
square()
# """
# Input: 4
#
# Output:
# --------
# -\/\/\/-
# -\/\/\/-
# --------
# """
#
#
# def header_and_footer(number):
# print(number * 2 * "-")
#
#
# def filling(number):
# internal = int((number * 2 - 2) / 2) * "\\/"
# for row in range(number - 2):
# print(f"-{internal}-")
#
#
# def print_square(number):
# header_and_footer(number)
# filling(number)
# header_and_footer(number)
#
#
# if __name__ == '__main__':
# print_square(number=int(input()))
#
#
| [
"noreply@github.com"
] | SimeonTsvetanov.noreply@github.com |
9a9cca7de31b0609496ea4dcd0f6039c86ee463c | ac0b9c85542e6d1ef59c5e9df4618ddf22223ae0 | /kratos/applications/ManagerApplication/test_examples/henry.py | d9cc93e644ba5e85b9ef36843485c17350e78ac9 | [] | no_license | UPC-EnricBonet/trunk | 30cb6fbd717c1e78d95ec66bc0f6df1a041b2b72 | 1cecfe201c8c9a1b87b2d87faf8e505b7b1f772d | refs/heads/master | 2021-06-04T05:10:06.060945 | 2016-07-15T15:29:00 | 2016-07-15T15:29:00 | 33,677,051 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,736 | py | #setting the domain size for the problem to be solved
domain_size = 2 # 2D problem
#including kratos path
import sys
from KratosMultiphysics import * #we import the KRATOS
from KratosMultiphysics.ManagerApplication import * #and now our application
from KratosMultiphysics.HenryApplication import * #and now our application
#Create a new empty model part called "ExampleModelPart"
model_part = ModelPart("ExampleModelPart");
print ("Model part defined: ExampleModelPart")
#we import the python file that includes the commands that we need
import fractional_iterative_solver
#import variables that we will need from solver to our recent created model_part
fractional_iterative_solver.AddVariables(model_part)
# (note that our model part does not have nodes or elements yet)
# introducing input & outoput (also postProcess) file name
output_file_name = "result_henry"
input_file_name = "henry"
# Mesh built by GID for the postProcess
gid_mode = GiDPostMode.GiD_PostAscii
multifile = MultiFileFlag.MultipleFiles
deformed_mesh_flag = WriteDeformedMeshFlag.WriteUndeformed
write_conditions = WriteConditionsFlag.WriteElementsOnly
gid_io = GidIO(output_file_name, gid_mode, multifile, deformed_mesh_flag, write_conditions)
model_part_io_fluid = ModelPartIO(input_file_name)
model_part_io_fluid.ReadModelPart(model_part)
model_part.ProcessInfo.SetValue(IS_FLOW_STATIONARY, 1);
model_part.ProcessInfo.SetValue(IS_BUOYANCY, 0);
model_part.ProcessInfo.SetValue(IS_TRANSPORT_STATIONARY, 0);
model_part.ProcessInfo.SetValue(GRAVITY_X, 0.0);
model_part.ProcessInfo.SetValue(GRAVITY_Y, -9.81);
#Aarchivo = open("MatrixSystem.txt", "r")
#Acontenido = archivo.read()
#Aprint (contenido)
#the buffer size should be set up after the mesh is read for the first time (Transient problem =2,3. Steady problem =1)
model_part.SetBufferSize(2)
# we add the DoFs
fractional_iterative_solver.AddDofs(model_part)
print ("Time steps values on each time for unkown=2 (Buffer size)")
#creating flow solver (custom)
solver = fractional_iterative_solver.FractionalIterativeSolver(model_part,domain_size)
#we import the C++ file that includes the commands that we need
#import fractional_iterative_strategy
## This part is contained is for the FractionalSolver not included at fractional_iterative_solver.py
solver.time_order = 1
solver.echo_level = 3
print ("Iterative solver create succesfully")
solver.Initialize()
Nsteps=40
Dt=2
out=0
out_step=1
print ("Solver inicializate!")
#solver.calculateDensityNodes()
#Arhivo de comparación:
filePost="SystemTest.txt"
#print(model_part.Elements)
#darcyXmatrix=Matrix(Nsteps,300)
solver.ReadFile(filePost,Nsteps)
print("Unitary")
solver.UnitaryTest()
#darcyXmatrix=solver.ReadFile(filePost,Nsteps)
#print ("darcyXmatrix[1,200]")
#print (darcyXmatrix[1,200])
mesh_name = 0.0
gid_io.InitializeMesh( mesh_name );
gid_io.WriteMesh((model_part).GetMesh());
gid_io.FinalizeMesh()
gid_io.InitializeResults(mesh_name,(model_part).GetMesh())
#clean file output for matrix
open('MatrixSystem.txt', 'w').close()
print ("Mesh read for the postProcess")
#if step==0:
#for node in model_part.Nodes:
#node.SetSolutionStepValue(PRESSURE,0,0.0)
#node.SetSolutionStepValue(CONCENTRATION,0,0.0)
#if(node.Y < 0.5):
#node.SetSolutionStepValue(PRESSURE,0,9800.0)
#node.SetSolutionStepValue(CONCENTRATION,0,0.0)
#else:
#node.SetSolutionStepValue(PRESSURE,0,0.0)
#node.SetSolutionStepValue(CONCENTRATION,0,0.0)
gid_io.WriteNodalResults(PRESSURE,model_part.Nodes,0, 0)
gid_io.WriteNodalResults(CONCENTRATION,model_part.Nodes,0,0)
gid_io.WriteNodalResults(DENSITY,model_part.Nodes,0,1000)
gid_io.WriteNodalResults(DARCY_FLOW_BALANCE,model_part.Nodes,0,0)
gid_io.WriteNodalResults(SINKSOURCE_BALANCE,model_part.Nodes,0,0)
import time as timer
t1 = timer.time()
for step in range(1,Nsteps):
out=out+1
time = Dt*step
print ("new step, time:",time)
model_part.CloneTimeStep(time)
solver.Solve()
print ("Assert")
solver.AssertVariables(step,Nsteps)
if out==out_step:
out=0
# print ("printing step:",step)
gid_io.WriteNodalResults(PRESSURE,model_part.Nodes,time,0)
gid_io.WriteNodalResults(CONCENTRATION,model_part.Nodes,time,0)
gid_io.WriteNodalResults(DENSITY,model_part.Nodes,time,0)
gid_io.WriteNodalResults(DARCY_FLOW_BALANCE,model_part.Nodes,time,0)
gid_io.WriteNodalResults(SINKSOURCE_BALANCE,model_part.Nodes,time,0)
gid_io.Flush()
t2=timer.time()
total_time=t2-t1
print ("total_time", total_time)
gid_io.FinalizeResults()
| [
"enriquebonetgil@hotmail.com"
] | enriquebonetgil@hotmail.com |
6f444bfe806a2c58d244d733db0a18853b36e3a1 | 97cb35277f4cbeb44b83ce75a4bcd8c008863d2d | /articles/migrations/0001_initial.py | e6c9b63f6136de188e39b1c141e3a7f462120d3d | [] | no_license | TareqMonwer/CS-Basic-Django-Project | 586dd17370929428dcc1ad8a3a9b83b487a40382 | a0769caba38b6063b4761d3f8cdb593f7053f12e | refs/heads/main | 2023-07-15T19:19:49.337261 | 2021-08-28T17:21:54 | 2021-08-28T17:21:54 | 400,535,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 782 | py | # Generated by Django 3.2.6 on 2021-08-28 16:38
from django.db import migrations, models
import django.db.models.functions.text
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Article',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255)),
('content', models.TextField()),
],
options={
'verbose_name': 'Post',
'verbose_name_plural': 'Posts',
'ordering': [django.db.models.functions.text.Lower('title')],
},
),
]
| [
"tareqmonwer137@gmail.com"
] | tareqmonwer137@gmail.com |
e2f87b38b11fcc268984013e2471c1e0b3709e7a | 8951fd5293dfb77c64ceddd19459e99a0c1cf677 | /kvmagent/kvmagent/test/test_nfs_primary_storage_create_empty_volume.py | c77db100f78667df482bbef222b8634d3420813f | [
"Apache-2.0"
] | permissive | SoftwareKing/zstack-utility | 7cdc229f05ac511214135fcaa88b5acf5aa08126 | 4765928650cde4f4472a960de9e93a849a5555e3 | refs/heads/master | 2021-01-18T18:20:55.913454 | 2015-09-01T13:39:26 | 2015-09-01T13:39:26 | 41,954,728 | 1 | 0 | null | 2015-09-05T08:46:12 | 2015-09-05T08:46:12 | null | UTF-8 | Python | false | false | 2,127 | py | '''
@author: Frank
'''
import unittest
import time
import os.path
from kvmagent import kvmagent
from kvmagent.plugins import nfs_primarystorage_plugin
from zstacklib.utils import http
from zstacklib.utils import jsonobject
from zstacklib.utils import log
from zstacklib.utils import uuidhelper
from zstacklib.utils import linux
logger = log.get_logger(__name__)
class Test(unittest.TestCase):
NFS_URL = 'localhost:/home/primary'
CALLBACK_URL = 'http://localhost:7070/testcallback'
def callback(self, req):
rsp = jsonobject.loads(req[http.REQUEST_BODY])
print jsonobject.dumps(rsp, True)
def setUp(self):
self.service = kvmagent.new_rest_service()
kvmagent.get_http_server().register_sync_uri('/testcallback', self.callback)
self.service.start(True)
time.sleep(1)
def mount(self):
cmd = nfs_primarystorage_plugin.MountCmd()
cmd.url = self.NFS_URL
cmd.mountPath = os.path.join('/mnt', uuidhelper.uuid())
callurl = kvmagent._build_url_for_test([nfs_primarystorage_plugin.MOUNT_PATH])
ret = http.json_dump_post(callurl, cmd)
rsp = jsonobject.loads(ret)
self.assertTrue(rsp.success, rsp.error)
self.assertTrue(linux.is_mounted(cmd.url, cmd.mountPath))
def testName(self):
self.mount()
cmd = nfs_primarystorage_plugin.CreateEmptyVolumeCmd()
cmd.accountUuid = uuidhelper.uuid()
cmd.hypervisorType = 'KVM'
cmd.installUrl = '/tmp/emptyvolume.qcow2'
cmd.name = 'testEmptyVolume'
cmd.size = '1410400256'
cmd.uuid = uuidhelper.uuid()
url = kvmagent._build_url_for_test([nfs_primarystorage_plugin.CREATE_EMPTY_VOLUME_PATH])
rsp = http.json_dump_post(url, cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL})
time.sleep(5)
self.service.stop()
linux.umount_by_url(self.NFS_URL)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | [
"xing5820@gmail.com"
] | xing5820@gmail.com |
9f6bb109db8b941489c1b95f3573d745e1820854 | 64b135891387dac3a4bb29f3001a524830d0e4e4 | /identities/urls.py | abfe0abcb60a404366d0a0c606133c58c7fdf59d | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | dynamicguy/treeio | 9ad52802722b64a212e22710c04dbb0bb50d831e | 4f674898cff2331711639a9b5f6812c874a2cb25 | refs/heads/master | 2021-08-28T11:25:41.504635 | 2014-01-31T17:16:22 | 2014-01-31T17:16:22 | 11,323,559 | 0 | 0 | NOASSERTION | 2021-08-16T20:18:53 | 2013-07-10T20:31:31 | Python | UTF-8 | Python | false | false | 4,876 | py | # encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Identities module URLs
"""
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('treeio.identities.views',
url(r'^(\.(?P<response_format>\w+))?$', 'index', name='identities'),
url(r'^index(\.(?P<response_format>\w+))?$', 'index', name='identities_index'),
url(r'^users(\.(?P<response_format>\w+))?/?$', 'index_users', name='identities_index_users'),
url(r'^groups(\.(?P<response_format>\w+))?/?$', 'index_groups', name='identities_index_groups'),
url(r'^types/(?P<type_id>\d+)(\.(?P<response_format>\w+))?/?$',
'type_view', name='identities_index_by_type'),
# Types
url(r'^type/view/(?P<type_id>\d+)(\.(?P<response_format>\w+))?/?$',
'type_view', name='identities_type_view'),
url(r'^type/edit/(?P<type_id>\d+)(\.(?P<response_format>\w+))?/?$',
'type_edit', name='identities_type_edit'),
url(r'^type/delete/(?P<type_id>\d+)(\.(?P<response_format>\w+))?/?$',
'type_delete', name='identities_type_delete'),
url(r'^type/add(\.(?P<response_format>\w+))?/?$', 'type_add', name='identities_type_add'),
# Fields
url(r'^field/view/(?P<field_id>\d+)(\.(?P<response_format>\w+))?/?$',
'field_view', name='identities_field_view'),
url(r'^field/edit/(?P<field_id>\d+)(\.(?P<response_format>\w+))?/?$',
'field_edit', name='identities_field_edit'),
url(r'^field/delete/(?P<field_id>\d+)(\.(?P<response_format>\w+))?/?$',
'field_delete', name='identities_field_delete'),
url(r'^field/add(\.(?P<response_format>\w+))?/?$', 'field_add', name='identities_field_add'),
# Contacts
url(r'^contact/add(\.(?P<response_format>\w+))?/?$', 'contact_add', name='identities_contact_add'),
url(r'^contact/add/(?P<type_id>\d+)(\.(?P<response_format>\w+))?/?$',
'contact_add_typed', name='identities_contact_add_typed'),
url(r'^me(\.(?P<response_format>\w+))?/?$', 'contact_me', name='identities_contact_me'),
url(r'^me/objects/(?P<attribute>[a-z_.]+)/list(\.(?P<response_format>\w+))?/?$',
'contact_me', name='identities_contact_me_objects'),
url(r'^contact/view/(?P<contact_id>\d+)(\.(?P<response_format>\w+))?/?$',
'contact_view', name='identities_contact_view'),
url(r'^contact/view/(?P<contact_id>\d+)/objects/(?P<attribute>[a-z_.]+)/list(\.(?P<response_format>\w+))?/?$',
'contact_view', name='identities_contact_view_objects'),
url(r'^contact/view/(?P<contact_id>\d+)/picture/?$',
'contact_view_picture', name='identities_contact_view_picture'),
url(r'^contact/edit/(?P<contact_id>\d+)(\.(?P<response_format>\w+))?/?$',
'contact_edit', name='identities_contact_edit'),
url(r'^contact/delete/(?P<contact_id>\d+)(\.(?P<response_format>\w+))?/?$',
'contact_delete', name='identities_contact_delete'),
url(r'^user/view/(?P<user_id>\d+)(\.(?P<response_format>\w+))?/?$',
'user_view', name='identities_user_view'),
url(r'^group/view/(?P<group_id>\d+)(\.(?P<response_format>\w+))?/?$',
'group_view', name='identities_group_view'),
url(r'^settings/view(\.(?P<response_format>\w+))?/?$',
'settings_view', name='identities_settings_view'),
# Locations
url(r'^location/index(\.(?P<response_format>\w+))?/?$',
'location_index', name='identities_location_index'),
url(r'^location/view/(?P<location_id>\d+)(\.(?P<response_format>\w+))?/?$',
'location_view', name='identities_location_view'),
url(r'^location/edit/(?P<location_id>\d+)(\.(?P<response_format>\w+))?/?$',
'location_edit', name='identities_location_edit'),
url(r'^location/delete/(?P<location_id>\d+)(\.(?P<response_format>\w+))?/?$',
'location_delete', name='identities_location_delete'),
url(r'^location/add(\.(?P<response_format>\w+))?/?$', 'location_add', name='identities_location_add'),
# AJAX callbacks
url(r'^ajax/users(\.(?P<response_format>\w+))?/?$',
'ajax_user_lookup', name='identities_ajax_user_lookup'),
url(r'^ajax/access(\.(?P<response_format>\w+))?/?$',
'ajax_access_lookup', name='identities_ajax_access_lookup'),
url(r'^ajax/contacts(\.(?P<response_format>\w+))?/?$',
'ajax_contact_lookup', name='identities_ajax_contact_lookup'),
url(r'^ajax/locations(\.(?P<response_format>\w+))?/?$',
'ajax_location_lookup', name='identities_ajax_location_lookup'),
)
| [
"letoosh@gmail.com"
] | letoosh@gmail.com |
2a4b56c826c10ff5f08b34c6eaad0370b5779382 | ca2ebf664e1d4b7338d014ca92a95bebe31063ff | /greeter_client.py | 3edd2bd9bf6ef469dc18f0f5cd08cb4c4f03ab5b | [] | no_license | ttpro1995/playground-grpc | cfc5d9fbad085da39286cad25181884d3d052468 | 3b41ebbaadc27deb0c06b806a3dcc928f3eaaf34 | refs/heads/master | 2023-03-26T10:26:10.595902 | 2021-03-27T11:29:57 | 2021-03-27T11:29:57 | 352,055,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 464 | py | import grpc
import hello_pb2_grpc
import hello_pb2
channel = grpc.insecure_channel('localhost:50051')
stub = hello_pb2_grpc.GreeterStub(channel)
response = stub.SayHello(hello_pb2.HelloRequest(name='you'))
print("Greeter client received: " + response.message)
response = stub.SayHelloAgain(hello_pb2.HelloRequest(name='you'))
print("Greeter client received: " + response.message)
response = stub.SayHelloAgain(hello_pb2.HelloRequest(name='meow'))
print(response)
| [
"tthien@apcs.vn"
] | tthien@apcs.vn |
f4084b1a4d000098c78ec59feb3865cb7fad3d77 | d23e26d37b42fbce4fe51add8f2d3b29bc38f865 | /projecteuler/p035.py | fd3d2afae9dbc1a44e36e60c0025772d79f975b7 | [
"MIT"
] | permissive | rene-d/math | 6728908a3c6c6c6dc5cf77c1c8a52412c90459b9 | 34d33bdfbf2756f442c0deb085b940262d8a1f44 | refs/heads/master | 2022-11-05T04:20:41.204352 | 2022-10-23T08:01:04 | 2022-10-25T06:06:19 | 117,944,288 | 4 | 0 | null | 2018-02-02T21:45:40 | 2018-01-18T06:50:42 | Python | UTF-8 | Python | false | false | 814 | py | """
Circular primes
The number, 197, is called a circular prime because all rotations of the digits:
197, 971, and 719, are themselves prime.
There are thirteen such primes below 100: 2, 3, 5, 7, 11, 13, 17, 31, 37, 71, 73, 79, and 97.
How many circular primes are there below one million?
https://projecteuler.net/problem=35
"""
from eulerlib import Crible
def rotation(n):
nombres = []
chiffres = 0
q = n
while q != 0:
q //= 10
chiffres += 1
decalage = 10 ** (chiffres - 1)
for _ in range(chiffres):
n, r = divmod(n, 10)
n += r * decalage
nombres.append(n)
return nombres
crible = Crible(1000000)
resultat = 0
for i in crible.liste():
if all([crible.est_premier(j) for j in rotation(i)]):
resultat += 1
print(resultat)
| [
"rene.devichi@gmail.com"
] | rene.devichi@gmail.com |
ed18e68ccd011c6613dbedac5a2f84f27a16f8ca | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/common/dossiers2/__init__.py | 88ded47466e28f07d7c6af032904a1d6282b0a24 | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 610 | py | # 2017.08.29 21:52:49 Střední Evropa (letní čas)
# Embedded file name: scripts/common/dossiers2/__init__.py
from dossiers2.common.utils import getDossierVersion
from dossiers2.custom import updaters
from dossiers2.custom.builders import *
def init():
from dossiers2.custom import init as custom_init
custom_init()
from dossiers2.ui import init as ui_init
ui_init()
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\common\dossiers2\__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:52:49 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
7b0ab2d7afed8a101332881219940266b8fe20d0 | 20acd4e916ce4bccbfaba12158e348e49923c46b | /setup.py | ff24496d325c047d807ce593e0428cf9216e5367 | [] | no_license | noseapp/noseapp_alchemy | 3df4999a9fcc476f42624609ed049a8528dbdfff | f6606990befd147852fd939c16a6f85de143d52f | refs/heads/master | 2023-04-30T21:46:28.407386 | 2015-05-29T17:47:36 | 2015-05-29T17:47:36 | 30,965,543 | 1 | 1 | null | 2023-04-16T02:42:30 | 2015-02-18T12:56:29 | Python | UTF-8 | Python | false | false | 655 | py | # -*- coding: utf-8 -*-
from setuptools import setup
from setuptools import find_packages
__version__ = '1.0.0'
if __name__ == '__main__':
setup(
name='noseapp_alchemy',
url='https://github.com/trifonovmixail/noseapp_alchemy',
version=__version__,
packages=find_packages(),
author='Mikhail Trifonov',
author_email='mikhail.trifonov@corp.mail.ru',
description='SqlAlchemy extension for noseapp lib',
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'noseapp>=1.0.9',
'sqlalchemy==0.9.8',
],
)
| [
"mikhail.trifonov@corp.mail.ru"
] | mikhail.trifonov@corp.mail.ru |
f65b0b1dc6d7043f86f1a147c66acad09a14d0b1 | f7574ee7a679261e758ba461cb5a5a364fdb0ed1 | /PopulatingNextRightPointersinEachNodeII.py | 1ff5842f547af6b76b2f6a0ce91307d35175faa3 | [] | no_license | janewjy/Leetcode | 807050548c0f45704f2f0f821a7fef40ffbda0ed | b4dccd3d1c59aa1e92f10ed5c4f7a3e1d08897d8 | refs/heads/master | 2021-01-10T19:20:22.858158 | 2016-02-26T16:03:19 | 2016-02-26T16:03:19 | 40,615,255 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,657 | py | # Definition for binary tree with next pointer.
# class TreeLinkNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution(object):
def connect(self, root):
"""
:type root: TreeLinkNode
:rtype: nothing
"""
if not root:
return
queue = []
front = [root]
while front:
for i in xrange(len(front)):
if front[i].left:
queue.append(front[i].left)
if front[i].right:
queue.append(front[i].right)
if i < len(front) -1:
front[i].next = front[i+1]
front = queue
queue = []
# Definition for binary tree with next pointer.
# class TreeLinkNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution(object):
def connect(self, root):
"""
:type root: TreeLinkNode
:rtype: nothing
"""
if not root:
return
front = [root]
while front:
level = []
for i in xrange(len(front)):
if front[i].left:
level.append(front[i].left)
if front[i].right:
level.append(front[i].right)
if i != len(front)-1:
front[i].next = front[i+1]
front = level
| [
"janewjy87@gmail.com"
] | janewjy87@gmail.com |
793df36891361844131a4d33e050c547c0b01bfc | 59d7db2d959e071991ece694728958b08a6f7c58 | /envs/create_game/levels/create_game_marker.py | 772a0d58b12cf5021463822a56ef3b8f0d5823b7 | [] | no_license | Sivinious/cse257 | 04cdd6f14a7ac0db66626e93305e4015256f1433 | 6b6f21c289094487da89b261af0dacba8135cd25 | refs/heads/main | 2023-05-15T08:12:54.908326 | 2021-05-29T10:12:39 | 2021-05-29T10:12:39 | 371,932,535 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,203 | py | import numpy as np
from .create_level_file import CreateLevelFile
def ball_begin_handler(arbiter, space, data):
obj_1 = arbiter.shapes[1]
obj_2 = arbiter.shapes[0]
if hasattr(obj_1, 'is_marker') and hasattr(obj_2, 'is_target'):
obj_1.hit_target = True
if hasattr(obj_2, 'is_marker') and hasattr(obj_1, 'is_target'):
obj_2.hit_target = True
return True
class CreateGameMarker(CreateLevelFile):
"""
Defines additional behavior in logic game for when there is another ball
that must first collide with the target ball. We call this the "Marker"
ball. Inherent from this class to provide additional
"""
def __init__(self, available_tools=None, gravity=(0.0, -2.0),
tool_variety=True, tool_gen=None):
super().__init__()
self.hit_target_handler = None
self.marker_must_hit = False
self.target_reward = 0.0
def set_settings(self, settings):
super().set_settings(settings)
self.target_reward = settings.target_reward
def reset(self):
obs = super().reset()
self.marker_obj = self.env_tools[0]
self.marker_obj.shape.is_marker = True
self.target_obj.shape.is_target = True
if self.hit_target_handler is None:
self.hit_target_handler = self.space.add_collision_handler(self.marker_obj.shape.collision_type,
self.target_obj.shape.collision_type)
self.hit_target_handler.begin = ball_begin_handler
self.prev_dist = self.calc_distance(self.target_obj.body.position, self.marker_obj.body.position)
self.target_hit = 0.0
self.marker_collided = False
return obs
def step(self, action):
obs, reward, done, info = super().step(action)
general_reward = reward
# Dense reward based off of distance from target ball to the goal
cur_target_pos = self.target_obj.body.position
move_dist = self.calc_distance(self.target_obj_start_pos, cur_target_pos)
if self.target_hit == 0 and move_dist > self.settings.move_thresh and \
(not self.marker_must_hit or hasattr(self.marker_obj.shape, 'hit_target')):
if self.settings.marker_reward == 'reg':
self.target_hit += 1.
reward += self.target_reward
elif self.settings.marker_reward == 'dir':
goal_on_left = self.target_obj_start_pos[0] < self.goal_pos[0]
moved_target_left = self.target_obj_start_pos[0] < cur_target_pos[0]
if goal_on_left == moved_target_left:
self.target_hit += 1.0
reward += self.target_reward
else:
raise ValueError('Unknown marker reward type')
self.prev_dist = self.calc_distance(cur_target_pos, self.goal_pos)
else:
distance = self.calc_distance(cur_target_pos,
self.marker_obj.body.position)
reward += self.dense_reward_scale * (self.prev_dist - distance)
self.episode_dense_reward += self.dense_reward_scale * (self.prev_dist - distance)
self.prev_dist = distance
# Terminate if the marker ball is out of bounds AND target is not hit yet
if (not self.within_bounds(self.marker_obj.body.position)) and self.target_hit == 0:
done = True
reward += self.settings.marker_gone_reward
self.episode_reward += (reward - general_reward)
if done:
info['ep_len'] = self.episode_len
info['ep_target_hit'] = self.target_hit
info['ep_goal_hit'] = self.goal_hit
info['ep_reward'] = self.episode_reward
info['ep_subgoal_reward'] = self.total_subgoal_add_reward
info['ep_no_op'] = self.no_op_count
info['ep_invalid_action'] = self.invalid_action_count
info['ep_blocked_action'] = self.blocked_action_count
info['ep_overlap_action'] = self.overlap_action_count
info['ep_dense_reward'] = self.episode_dense_reward
info['ep_placed_tools'] = len(self.placed_tools)
return obs, reward, done, info
| [
"Sivinious@users.noreply.github.com"
] | Sivinious@users.noreply.github.com |
6ce7c3b81e8347c816509e1759efdab460e04679 | 7733ae47afbf86989e1d3bfd06b9c4ca3edba0e1 | /data_structures/stacks/stack_using_linked_list.py | 0779cac32020c3ac7817c2ae4335cb52817268a7 | [] | no_license | EricMontague/Datastructures-and-Algorithms | 853ac290557e9ecf60c187401a7d576a99529ba7 | 2ce6d8b893f0b8affc8c880165fb3f7ecfdeb19b | refs/heads/master | 2021-07-25T14:59:59.059604 | 2021-01-26T18:24:44 | 2021-01-26T18:24:44 | 242,427,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,302 | py | """This module contains my implementation of a stack
based on a singly linked list.
"""
class StackItem:
"""Class to represent an item in a stack."""
def __init__(self, data):
self.data = data
self.next = None
class Stack:
"""Class to represent a stack based on a singly linked list."""
def __init__(self):
self._head = None
self._size = 0
def push(self, data):
"""Insert the given data on top of the stack."""
item = StackItem(data)
item.next = self._head
self._head = item
self._size += 1
def pop(self):
"""Remove and return the value of the item on top of the
stack.
"""
if self.is_empty():
raise IndexError("Stack is empty.")
item = self._head.data
self._head = self._head.next
self._size -= 1
return item
def is_empty(self):
"""Return True if the stack is empty, else return False."""
return self._size == 0
def peek(self):
"""Return the value of the item on the top of the stack."""
if self.is_empty():
return None
return self._head.data
@property
def size(self):
"""Return the number of items in the stack."""
return self._size
| [
"eric.g.montague@gmail.com"
] | eric.g.montague@gmail.com |
9707670f3dec472dded3c7da0ce0d31e2033090f | d668209e9951d249020765c011a836f193004c01 | /tools/pnnx/tests/test_F_unfold.py | 51f19a4f48a4b788476ea755f31fd662ef8f4214 | [
"BSD-3-Clause",
"Zlib",
"BSD-2-Clause"
] | permissive | Tencent/ncnn | d8371746c00439304c279041647362a723330a79 | 14b000d2b739bd0f169a9ccfeb042da06fa0a84a | refs/heads/master | 2023-08-31T14:04:36.635201 | 2023-08-31T04:19:23 | 2023-08-31T04:19:23 | 95,879,426 | 18,818 | 4,491 | NOASSERTION | 2023-09-14T15:44:56 | 2017-06-30T10:55:37 | C++ | UTF-8 | Python | false | false | 1,747 | py | # Tencent is pleased to support the open source community by making ncnn available.
#
# Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
#
# Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import torch
import torch.nn as nn
import torch.nn.functional as F
from packaging import version
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(self, x):
x0 = F.unfold(x, kernel_size=3)
x1 = F.unfold(x, kernel_size=(2,4), stride=(2,1), padding=2, dilation=1)
x2 = F.unfold(x, kernel_size=(1,3), stride=1, padding=(2,4), dilation=(1,2))
return x0, x1, x2
def test():
net = Model()
net.eval()
torch.manual_seed(0)
x = torch.rand(1, 12, 64, 64)
a0, a1, a2 = net(x)
# export torchscript
mod = torch.jit.trace(net, x)
mod.save("test_F_unfold.pt")
# torchscript to pnnx
import os
os.system("../src/pnnx test_F_unfold.pt inputshape=[1,12,64,64]")
# pnnx inference
import test_F_unfold_pnnx
b0, b1, b2 = test_F_unfold_pnnx.test_inference()
return torch.equal(a0, b0) and torch.equal(a1, b1) and torch.equal(a2, b2)
if __name__ == "__main__":
if test():
exit(0)
else:
exit(1)
| [
"noreply@github.com"
] | Tencent.noreply@github.com |
a5513dfc2836838bb3c4606c9f502067189dd421 | fd3da963aa5ad8ff0d6cf0cc4c9d9ff05f9135ca | /apps/goodss/urls.py | dd136dd536f0526fb077f7ce93ae0e9e66f4032a | [] | no_license | huanshenyi/rental-system-backed | 083dbfe18d28e7f0111282a93c84c415098d07f5 | 3c9487dcb9e650036a2a533a10a3c66f762b6fdb | refs/heads/master | 2022-12-14T18:05:29.876973 | 2020-08-15T13:32:55 | 2020-08-15T13:32:55 | 281,384,752 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | __author__ = "ハリネズミ"
from rest_framework.routers import DefaultRouter
from . import views
router = DefaultRouter(trailing_slash=False)
router.register("goods", views.GoodsViewSet, basename="goods")
router.register("category", views.CategoryViewSet, basename="category")
router.register("tag", views.TagViewSet, basename="tag")
app_name = "goodss"
urlpatterns = [
] + router.urls
| [
"txy1226052@gmail.com"
] | txy1226052@gmail.com |
b28c117e4cb45e821c162342a952aaee187604aa | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_BoxCox/trend_ConstantTrend/cycle_0/ar_12/test_artificial_128_BoxCox_ConstantTrend_0_12_100.py | 02cb32eb108b5b7dafaa5e8d7ad78612f5428a54 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 272 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 0, transform = "BoxCox", sigma = 0.0, exog_count = 100, ar_order = 12); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
1770713e3f35011aa2ee76ea0965250841cc4f2c | 08a851f0d7218beb6c32b5438595c44bb2498af9 | /library/migrations/0004_auto_20150908_1625.py | 3ed5fa8c629bbc36ffed2de3dd854b3f049ef822 | [] | no_license | KobiBeef/base_src | 47ff5a1ecbab0953f74b41533cafbd26eb428e16 | 975294df5edee8d1f441470a7e1cf8ce59778a0b | refs/heads/master | 2020-06-05T08:30:21.358458 | 2015-12-08T08:09:06 | 2015-12-08T08:09:06 | 41,631,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 362 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('library', '0003_testcomment'),
]
operations = [
migrations.AlterModelOptions(
name='contact',
options={'ordering': ['-pk']},
),
]
| [
"ezekielbacungan@gmail.com"
] | ezekielbacungan@gmail.com |
a91ef44aa843bd22308c6d92577a6f5676cd70fc | d7fb8743b6faa4d948b2b08ca0dbdd3b0f11379b | /测试代码/keras/已经/1keras_lstm1/stock_lstm4.py | 9b08b23f74975fac2ad2d7555aae9d4185fb679b | [] | no_license | bancheng/Stock-market | 219e9882858e6d10edad1d13fba67dadbedc27ba | 142ea0eaed0fdccd8e79a51c34d66d1be1c336ed | refs/heads/master | 2021-01-20T15:13:14.667022 | 2017-09-10T06:31:10 | 2017-09-10T06:31:10 | 90,737,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,884 | py | from __future__ import absolute_import
from __future__ import print_function
import numpy as np
import cPickle as pkl
from keras.preprocessing import sequence, text
from keras.optimizers import SGD, RMSprop, Adagrad
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM, GRU
f=open("data.pkl",'rb')
datax = pkl.load(f)
datay = pkl.load(f)
_rand = np.random.randint(len(datay),size=len(datay))
X_test = datax[_rand[0]]
y_test = datay[_rand[0]]
X_train = datax[_rand[2000]]
y_train = datay[_rand[2000]]
i=1
while i<2000:
X_test = np.vstack((X_test,datax[_rand[i]]))
y_test = np.vstack((y_test,datay[_rand[i]]))
i=i+1
X_test = X_test.reshape(X_test.shape[0]/50,50,6)
i=2001
while (i>1999) & (i<len(datay)):
X_train = np.vstack((X_train, datax[_rand[i]]))
y_train = np.vstack((y_train, datay[_rand[i]]))
i=i+1
X_train = X_train.reshape(X_train.shape[0]/50,50,6)
# print('X_train shape:', X_train.shape)
# print('X_test shape:', X_test.shape)
model = Sequential()
model.add(LSTM(1, input_shape=(50, 6)))
# print('Build model...')
# model = Sequential()
# model.add(Embedding(max_features, 256))
# model.add(LSTM(256, 128)) # try using a GRU instead, for fun
model.add(Dropout(0.2))
model.add(Dense(1))
model.add(Activation('sigmoid'))
#
# # try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy', optimizer='adam')
#
print("Train...")
model.fit(X_train, y_train, batch_size=1, nb_epoch=50, validation_split=0.2, show_accuracy=True)
score = model.evaluate(X_test, y_test, batch_size=1)
print('Test score:', score)
#
# classes = model.predict_classes(X_test, batch_size=batch_size)
# acc = np_utils.accuracy(classes, y_test)
#
# print('Test accuracy:', acc)
#
# store_weights = {}
# for layer in model.layers :
# store_weights[layer] = layer.get_weights()
#
# # create a new model of same structure minus last layers, to explore intermediate outputs
# print('Build truncated model')
# chopped_model = Sequential()
# chopped_model.add(Embedding(max_features, 256, weights=model.layers[0].get_weights()))
# chopped_model.add(LSTM(256, 128, weights=model.layers[1].get_weights()))
# chopped_model.compile(loss='binary_crossentropy', optimizer='adam', class_mode="binary")
#
# # pickle intermediate outputs, model weights
# train_activations = chopped_model.predict(X_train, batch_size=batch_size)
# test_activations = chopped_model.predict(X_test, batch_size=batch_size)
# outputs = dict(final=classes, acc=acc, weights=store_weights, y_train=y_train, y_test=y_test,
# train_activations=train_activations, test_activations=test_activations)
#
# pkl.dump(outputs, open('results/predicted_activations.pkl', 'wb'),
# protocol=pkl.HIGHEST_PROTOCOL)
| [
"tangdongge@buaa.edu.cn"
] | tangdongge@buaa.edu.cn |
e8b83c5dc9d4999541cb30f76d63cb23ff3fff7d | 814992618962991b1b6dd6f1cdf2853687cbfcd0 | /examples/demo_013_HEOM.py | 8fee2c5b5ca6c9822e512aeed24a2b19cfcb9ad4 | [
"MIT"
] | permissive | MichalPt/quantarhei | a5db7916405236dc78778e4ef378141a19a28ff2 | 536d4f39bb7f7d6893664520351d93eac2bc90f1 | refs/heads/master | 2022-12-15T09:36:53.108896 | 2022-07-28T09:44:12 | 2022-07-28T09:44:12 | 226,359,238 | 1 | 0 | MIT | 2019-12-06T15:37:24 | 2019-12-06T15:37:23 | null | UTF-8 | Python | false | false | 7,159 | py | # -*- coding: utf-8 -*-
_show_plots_ = True
import time
import numpy
import quantarhei as qr
from quantarhei.qm.liouvillespace.integrodiff.integrodiff \
import IntegrodiffPropagator
print("")
print("***********************************************************")
print("* *")
print("* Quantarhei's HEOM implementation demo *")
print("* *")
print("***********************************************************")
###############################################################################
#
# Model system definition
#
###############################################################################
# Three molecules
with qr.energy_units("1/cm"):
m1 = qr.Molecule([0.0, 10100.0])
m2 = qr.Molecule([0.0, 10300.0])
m3 = qr.Molecule([0.0, 10000.0])
# Aggregate is built from the molecules
agg = qr.Aggregate([m1, m2, m3])
# Couplings between them are set
with qr.energy_units("1/cm"):
agg.set_resonance_coupling(0,1,80.0)
agg.set_resonance_coupling(0,2,100.0)
# Interaction with the bath is set through bath correlation functions
timea = qr.TimeAxis(0.0, 500, 1.0)
cpar1 = dict(ftype="OverdampedBrownian-HighTemperature", reorg=50,
cortime=50, T=300)
cpar2 = dict(ftype="OverdampedBrownian-HighTemperature", reorg=50,
cortime=50, T=300)
with qr.energy_units("1/cm"):
cfce1 = qr.CorrelationFunction(timea, cpar1)
cfce2 = qr.CorrelationFunction(timea, cpar2)
m1.set_transition_environment((0, 1), cfce1)
m2.set_transition_environment((0, 1), cfce1)
m3.set_transition_environment((0, 1), cfce2)
# Aggregate is built
agg.build()
###############################################################################
#
# Definition of the hierarchy
#
###############################################################################
# Hamiltonian and the system-bath interaction operator is needed to
# define the Kubo-Tanimura hierarchy
ham = agg.get_Hamiltonian()
sbi = agg.get_SystemBathInteraction()
# We define the hierarchy
#Hy3 = qr.KTHierarchy(ham, sbi, 3)
#Hy4 = qr.KTHierarchy(ham, sbi, 4)
#Hy5 = qr.KTHierarchy(ham, sbi, 5)
Hy6 = qr.KTHierarchy(ham, sbi, 3)
print("Size of hierarchy of depth",Hy6.depth,"is",Hy6.hsize)
Hy7 = qr.KTHierarchy(ham, sbi, 4)
print("Size of hierarchy of depth",Hy7.depth,"is",Hy7.hsize)
# testing generation of hierarchy indices
#print(Hy.generate_indices(4, level=4))
#
#raise Exception()
###############################################################################
#
# Propagation of the HEOM
#
###############################################################################
# Initial density matrix
rhoi = qr.ReducedDensityMatrix(dim=ham.dim)
with qr.eigenbasis_of(ham):
rhoi.data[2,2] = 0.8
rhoi.data[1,1] = 0.1
rhoi.data[3,3] = 0.1
#print(rhoi)
# Definition of the HEOM propagator
#kprop3 = qr.KTHierarchyPropagator(timea, Hy3)
#kprop4 = qr.KTHierarchyPropagator(timea, Hy4)
#kprop5 = qr.KTHierarchyPropagator(timea, Hy5)
kprop6 = qr.KTHierarchyPropagator(timea, Hy6)
kprop7 = qr.KTHierarchyPropagator(timea, Hy7)
# Propagation of the hierarchy and saving the density operator
t1 = time.time()
#rhot3 = kprop3.propagate(rhoi, report_hierarchy=False, free_hierarchy=False)
#rhot4 = kprop4.propagate(rhoi, report_hierarchy=False, free_hierarchy=False)
#rhot5 = kprop5.propagate(rhoi, report_hierarchy=False, free_hierarchy=False)
rhot6 = kprop6.propagate(rhoi, report_hierarchy=False, free_hierarchy=False)
t2 = time.time()
print("Propagated in", t2-t1,"s")
t1 = time.time()
rhot7 = kprop7.propagate(rhoi, report_hierarchy=False, free_hierarchy=False)
t2 = time.time()
print("Propagated in", t2-t1,"s")
###############################################################################
#
# Graphical output of the results
#
###############################################################################
if _show_plots_:
import matplotlib.pyplot as plt
N = timea.length
with qr.eigenbasis_of(ham):
# plt.plot(timea.data[0:N], rhot3.data[0:N,1,1],"-b")
# plt.plot(timea.data[0:N], rhot3.data[0:N,2,2],"-r")
# plt.plot(timea.data[0:N], rhot3.data[0:N,3,3],"-k")
# plt.plot(timea.data[0:N], rhot4.data[0:N,2,2],"-r")
# plt.plot(timea.data[0:N], rhot4.data[0:N,1,1],"-b")
# plt.plot(timea.data[0:N], rhot4.data[0:N,3,3],"-k")
# plt.plot(timea.data[0:N], rhot5.data[0:N,1,1],"-b")
# plt.plot(timea.data[0:N], rhot5.data[0:N,2,2],"-r")
# plt.plot(timea.data[0:N], rhot5.data[0:N,3,3],"-k")
plt.plot(timea.data[0:N], rhot6.data[0:N,0,0])
plt.plot(timea.data[0:N], rhot6.data[0:N,1,3],"-b")
plt.plot(timea.data[0:N], rhot6.data[0:N,2,3],"-r")
plt.plot(timea.data[0:N], rhot6.data[0:N,1,2],"-k")
plt.plot(timea.data[0:N], rhot7.data[0:N,1,3],"--b")
plt.plot(timea.data[0:N], rhot7.data[0:N,2,3],"--r")
plt.plot(timea.data[0:N], rhot7.data[0:N,1,2],"--k")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,1], "-k")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,2], "-k")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,3], "-b")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,4], "-b")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,5], "-b")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,6], "-r")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,7], "-r")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,8], "-r")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,9], "-r")
#plt.plot(timea.data[0:N], Hy.hpop[0:N,10], "-g")
plt.show()
print("Kernel generation")
ker = Hy6.get_kernel(timea)
ip8 = IntegrodiffPropagator(timea, ham, kernel=ker,
fft=True, timefac=3, decay_fraction=2.0)
#fft=False) #, cutoff_time=100)
rhot8 = ip8.propagate(rhoi)
trc = numpy.zeros(timea.length, dtype=qr.REAL)
for ti in range(timea.length):
trc[ti] = numpy.real(numpy.trace(rhot8.data[ti,:,:]))
if _show_plots_:
N = timea.length
with qr.eigenbasis_of(ham):
#plt.plot(timea.data[0:N], rhot8.data[0:N,0,0])
#plt.plot(timea.data[0:N], trc[0:N],"-m")
plt.plot(timea.data[0:N], ker[0:N,1,1,1,1],"-m")
plt.plot(timea.data[0:N], ker[0:N,1,2,1,2],"-m")
plt.plot(timea.data[0:N], ker[0:N,2,2,2,2],"-m")
plt.show()
plt.plot(timea.data[0:N], rhot8.data[0:N,1,1],"-b")
plt.plot(timea.data[0:N], rhot8.data[0:N,2,2],"-r")
plt.plot(timea.data[0:N], rhot8.data[0:N,1,2],"-k")
plt.plot(timea.data[0:N], rhot6.data[0:N,1,1],"--b")
plt.plot(timea.data[0:N], rhot6.data[0:N,2,2],"--r")
plt.plot(timea.data[0:N], rhot6.data[0:N,1,2],"--k")
plt.show()
print("")
print("***********************************************************")
print("* *")
print("* Demo finished successfully *")
print("* *")
print("***********************************************************")
| [
"tmancal74@gmail.com"
] | tmancal74@gmail.com |
93b9fc099bbdf4f52185cf649eff703a84c41fea | 8adec48dfaee1cdfd6c7f4d2fb3038aa1c17bda6 | /WProf/build/third_party/twisted_8_1/twisted/flow/.svn/text-base/pipe.py.svn-base | 8b38e07fb29e21544889a65343483bfd058430b2 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | kusoof/wprof | ef507cfa92b3fd0f664d0eefef7fc7d6cd69481e | 8511e9d4339d3d6fad5e14ad7fff73dfbd96beb8 | refs/heads/master | 2021-01-11T00:52:51.152225 | 2016-12-10T23:51:14 | 2016-12-10T23:51:14 | 70,486,057 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,446 | # Copyright (c) 2001-2004 Twisted Matrix Laboratories.
# See LICENSE for details.
#
# Author: Clark Evans (cce@clarkevans.com)
"""
flow.pipe
This contains various filter stages which have exactly one input stage. These
stages take a single input and modify its results, ie a rewrite stage.
"""
from base import *
from wrap import wrap
from twisted.python.failure import Failure
class Pipe(Stage):
""" abstract stage which takes a single input stage """
def __init__(self, source, *trap):
Stage.__init__(self, *trap)
self._source = wrap(source)
def _yield(self):
while not self.results \
and not self.stop \
and not self.failure:
source = self._source
instruction = source._yield()
if instruction:
return instruction
if source.failure:
self.failure = source.failure
return
results = source.results
stop = source.stop
if stop:
self.stop = True
source.results = []
self.process(results, stop)
def process(self, results):
""" process implemented by the pipe
Take a set of possibly empty results and sets the member
variables: results, stop, or failure appropriately
"""
raise NotImplementedError
class Filter(Pipe):
"""
flow equivalent to filter: Filter(function, source, ... )
Yield those elements from a source stage for which a function returns true.
If the function is None, the identity function is assumed, that is, all
items yielded that are false (zero or empty) are discarded.
For example::
def odd(val):
if val % 2:
return True
def range():
yield 1
yield 2
yield 3
yield 4
source = flow.Filter(odd,range)
printFlow(source)
"""
def __init__(self, func, source, *trap):
Pipe.__init__(self, source, *trap)
self._func = func
def process(self, results, stop):
self.results.extend(filter(self._func,results))
class LineBreak(Pipe):
""" pipe stage which breaks its input into lines """
def __init__(self, source, *trap, **kwargs):
Pipe.__init__(self, source, *trap)
self._delimiter = kwargs.get('delimiter','\r\n')
self._maxlen = int(kwargs.get('maxlength', 16384))+1
self._trailer = int(kwargs.get('trailer',False))
self._buffer = []
self._currlen = 0
def process(self, results, stop):
for block in results:
lines = str(block).split(self._delimiter)
if len(lines) < 2:
tail = lines[0]
else:
tail = lines.pop()
if self._buffer:
self._buffer.append(lines.pop(0))
self.results.append("".join(self._buffer))
self._buffer = []
self.results.extend(lines)
self._currlen = 0
if tail:
self._currlen += len(tail)
self._buffer.append(tail)
if stop and self._buffer:
tail = "".join(self._buffer)
if self._trailer:
self.results.append(tail)
else:
raise RuntimeError, "trailing data remains: '%s'" % tail[:10]
| [
"kusoof@kookaburra.(none)"
] | kusoof@kookaburra.(none) | |
07b9fc3d3d2f5b66826d0b99c52e23bcaeee837f | b3bf0dfda920950cbc4215a2f591606473398706 | /contact_manager/users/apps.py | b9b9054731b1bdc5afc95af824ba0d884b4ac2f8 | [] | no_license | adeelehsan/contact_manager | 34fa9d4fc9a6e03651b7e81cd144d9380629be33 | d5455a524726ca8577a628d6b2abb6885291e600 | refs/heads/master | 2020-03-25T18:07:28.213197 | 2018-08-08T13:08:34 | 2018-08-08T13:08:34 | 144,013,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | from django.apps import AppConfig
class UsersAppConfig(AppConfig):
name = "contact_manager.users"
verbose_name = "Users"
def ready(self):
try:
import users.signals # noqa F401
except ImportError:
pass
| [
"adeel.ehsan@arbisoft.com"
] | adeel.ehsan@arbisoft.com |
0702dfcb63672e54fa4461c0fef1e5dec473a471 | d4442db5a7ab9db2b04fef640a9864f3fba54758 | /src/python/WMCore/RequestManager/RequestMaker/Processing/StoreResultsRequest.py | 6262e7d3ca23c220082d5f4d37caec6a43e306c8 | [] | no_license | stuartw/WMCore | fa25ff19ab5058a635d35d3c58a0ac56a3e079a1 | 38c39c43f7237fd316930839674ac9be3c0ee8cc | refs/heads/master | 2021-01-18T07:18:18.324604 | 2012-10-18T22:30:34 | 2012-10-18T22:30:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,051 | py | #!/usr/bin/env python
"""
_StoreResultsRequest_
"""
from WMCore.RequestManager.RequestMaker.RequestMakerInterface import RequestMakerInterface
from WMCore.RequestManager.DataStructs.RequestSchema import RequestSchema
from WMCore.RequestManager.RequestMaker.Registry import registerRequestType, retrieveRequestMaker
class StoreResultsRequest(RequestMakerInterface):
"""
_StoreResultsRequest_
RequestMaker to two file input data processing requests and workflows
"""
def __init__(self):
RequestMakerInterface.__init__(self)
class StoreResultsSchema(RequestSchema):
"""
_StoreResults_
Data Required for a standard cmsRun two file read processing request.
"""
def __init__(self):
RequestSchema.__init__(self)
# not used yet
self.validateFields = [
'InputDatasets',
'CMSSWVersion',
'ScramArch',
'Group',
'DbsUrl'
]
registerRequestType("StoreResults", StoreResultsRequest, StoreResultsSchema)
| [
"metson@4525493e-7705-40b1-a816-d608a930855b"
] | metson@4525493e-7705-40b1-a816-d608a930855b |
49914a6ca92efeecfc33636379136600b1830cee | cb30d1a3a4fa6c8f7a6f89a671fbdb4a808e19e3 | /c6/prime-iter.py | b70bd78b2b00c2e5f38c042c7396b319d0036b5b | [] | no_license | systemchip/python-for-everyone | 0b45172ca5b41c3b5fc1a835fbccf4a479c282ea | 9fb7f751a97fb6a110079e1e3e1dd9601fb24374 | refs/heads/master | 2021-09-02T09:18:22.013704 | 2017-07-17T07:46:19 | 2017-07-17T07:46:19 | 115,913,547 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | # 소수를 열거하는 이터레이터 클래스
class PrimeIter:
def __init__(self, max):
""" 최대값을 지정한다 """
self.max = max
def __iter__(self):
""" 값을 초기화한다 """
self.n = 1
return self
def __next__(self):
""" 다음 소수를 찾아서 반환한다 """
is_prime = False
self.n += 1
# 소수를 찾는다
while not is_prime:
is_prime = True
for i in range(2, self.n):
if self.n % i == 0:
is_prime = False
break
if is_prime: break
self.n += 1
# 최대값에 도달하면 예외를 발생시킨다
if self.n >= self.max:
raise StopIteration
return self.n
# 100 이하 소수를 열거한다
it = PrimeIter(100)
for no in it:
print(no, end=",")
| [
"dylee@wikibook.co.kr"
] | dylee@wikibook.co.kr |
30f827f6ddccc3e1c29f529fc940673d80998089 | 55c250525bd7198ac905b1f2f86d16a44f73e03a | /Python/Lazymux/sqlmap/lib/core/optiondict.py | 28b61b85b21e4eae996bc3bcfdf792f120d171bf | [] | no_license | NateWeiler/Resources | 213d18ba86f7cc9d845741b8571b9e2c2c6be916 | bd4a8a82a3e83a381c97d19e5df42cbababfc66c | refs/heads/master | 2023-09-03T17:50:31.937137 | 2023-08-28T23:50:57 | 2023-08-28T23:50:57 | 267,368,545 | 2 | 1 | null | 2022-09-08T15:20:18 | 2020-05-27T16:18:17 | null | UTF-8 | Python | false | false | 129 | py | version https://git-lfs.github.com/spec/v1
oid sha256:d81f4b1eec17cebb5bc92268f0648acad5c4739e63d3fe400f570e68878dc011
size 7119
| [
"nateweiler84@gmail.com"
] | nateweiler84@gmail.com |
229f24e48680f186185ca451cd5de90fd1dd6eda | 0df73a877fd521b5e0ab95bb261751c87f1f4b39 | /Scripts/getBranchGroupFnPlanning.py | 39cf13426b15ffa606a9f6bd92b2b1dea52ecc56 | [] | no_license | bikiranguha/Bus-Map | 6bcb907c257e2dc4fcc47dd27772159b51fa2b08 | 8ef96e9027e3abb953834bd76981bcc689ef5250 | refs/heads/master | 2020-03-08T06:56:55.703128 | 2018-07-15T17:51:12 | 2018-07-15T17:51:12 | 127,983,097 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,868 | py | """
Function to generate a dictionary which will contain a list of ties of the bus.
Bus will not be present in keys if no ties connected to it
"""
import math
from Queue import Queue
#import sys
#sys.path.insert(0,'C:/Users/Bikiran/Google Drive/Bus Mapping Project Original/Donut Hole Approach/Donut Hole v2')
#from getBusDataFn import getBusData
highImpedanceTieList = []
def makeBranchGroups(planningRaw):
BranchGroupDict = {}
#BranchGroupList = []
with open(planningRaw,'r') as f:
filecontent = f.read()
fileLines = filecontent.split('\n')
branchStartIndex = fileLines.index('0 / END OF GENERATOR DATA, BEGIN BRANCH DATA')+1
branchEndIndex = fileLines.index('0 / END OF BRANCH DATA, BEGIN TRANSFORMER DATA')
#BusDataDict = getBusData(Raw)
for i in range(branchStartIndex,branchEndIndex): # search through branch data
words = fileLines[i].split(',')
#BranchCode = words[2].strip()
R = float(words[3].strip())
X = float(words[4].strip())
Z = math.sqrt(R**2 + X**2)
status = words[-5].strip()
if Z <= 2e-4 and status == '1':
Bus1 = words[0].strip()
Bus2 = words[1].strip()
"""
#check whether all lines with ckt id == '99' are just ties
Bus1Area = BusDataDict[Bus1].area
Bus2Area = BusDataDict[Bus2].area
if Z > 4e-6 and Bus1Area == '222' and Bus2Area == '222':
highImpedanceTieList.append(fileLines[i])
"""
if Bus1 not in BranchGroupDict.keys():
BranchGroupDict[Bus1] = set()
BranchGroupDict[Bus1].add(Bus2)
if Bus2 not in BranchGroupDict.keys():
BranchGroupDict[Bus2] = set()
BranchGroupDict[Bus2].add(Bus1)
# get complete bus groups
CompleteBranchGroupDict = {} # each bus has the full bus group as a set
for Bus in BranchGroupDict.keys(): # scan each key and generates a full bus group set
if Bus in CompleteBranchGroupDict.keys(): # Bus already has the group, so skip
continue
frontier = Queue(maxsize=0)
frontier.put(Bus)
BusGroup = set()
# do something similar to BFS
while not frontier.empty():
currentBus = frontier.get()
frontier.task_done()
BusGroup.add(currentBus)
ties = BranchGroupDict[currentBus]
for tie in ties:
if tie not in BusGroup:
frontier.put(tie)
BusGroup.add(tie)
####
for t in list(BusGroup):
CompleteBranchGroupDict[t] = BusGroup
return CompleteBranchGroupDict
if __name__ == "__main__":
planningRaw = 'hls18v1dyn_1219.raw'
BranchGroupDict = makeBranchGroups(planningRaw)
"""
while True:
searchTerm = raw_input('Enter bus number whose list of ties you are looking for: ')
if searchTerm in BranchGroupDict.keys():
for Bus in list(BranchGroupDict[searchTerm.strip()]):
print Bus
else:
print 'Bus has no ties'
"""
"""
with open('tmp.txt','w') as f:
for line in highImpedanceTieList:
f.write(line)
f.write('\n')
"""
| [
"Bikiran Guha"
] | Bikiran Guha |
cb696f10d6758f10de4b5722c710e854e06b2176 | 6ef3fc3ffa5f33e6403cb7cb0c30a35623a52d0d | /samples/generated_samples/vision_v1p3beta1_generated_product_search_delete_product_set_sync.py | 5fdcd34e44b44a9d64020ff62b4d50001492f599 | [
"Apache-2.0"
] | permissive | vam-google/python-vision | 61405506e3992ab89e6a454e4dda9b05fe2571f2 | 09e969fa30514d8a6bb95b576c1a2ae2c1e11d54 | refs/heads/master | 2022-08-15T08:40:35.999002 | 2022-07-18T16:04:35 | 2022-07-18T16:04:35 | 254,789,106 | 0 | 0 | Apache-2.0 | 2020-04-11T03:59:02 | 2020-04-11T03:59:01 | null | UTF-8 | Python | false | false | 1,432 | py | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for DeleteProductSet
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-vision
# [START vision_v1p3beta1_generated_ProductSearch_DeleteProductSet_sync]
from google.cloud import vision_v1p3beta1
def sample_delete_product_set():
# Create a client
client = vision_v1p3beta1.ProductSearchClient()
# Initialize request argument(s)
request = vision_v1p3beta1.DeleteProductSetRequest(
name="name_value",
)
# Make the request
client.delete_product_set(request=request)
# [END vision_v1p3beta1_generated_ProductSearch_DeleteProductSet_sync]
| [
"noreply@github.com"
] | vam-google.noreply@github.com |
1c88935573d4ec33c04ab459f2367009017d9a8e | 872cd13f25621825db0c598268ecd21b49cc2c79 | /Lesson_15/client/jim/constants.py | 9177b988ee7dc22ebac20bffc54f392cdef79639 | [] | no_license | ss2576/client_server_applications_Python | c4e9ebe195d23c8ca73211894aa50a74014013d5 | 9b599e37e5dae5af3dca06e197916944f12129d5 | refs/heads/master | 2022-12-15T10:40:22.935880 | 2020-08-12T11:02:21 | 2020-08-12T11:02:21 | 271,764,749 | 0 | 0 | null | 2020-06-12T10:05:00 | 2020-06-12T09:52:03 | Python | UTF-8 | Python | false | false | 513 | py | """ Module of constants used in jim protocol """
TYPE = 'type'
REQUEST = 'request'
RESPONSE = 'response'
ACTION = 'action'
TIME = 'time'
BODY = 'body'
CODE = 'code'
MESSAGE = 'message'
USERNAME = 'username'
PASSWORD = 'password'
SENDER = 'sender'
TO = 'to'
TEXT = 'text'
class RequestAction:
""" Class the storage of request actions """
PRESENCE = 'presence'
AUTH = 'auth'
MESSAGE = 'msg'
QUIT = 'quit'
COMMAND = 'command'
START_CHAT = 'start_chat'
ACCEPT_CHAT = 'accept_chat' | [
"ss2576@mail.ru"
] | ss2576@mail.ru |
3ea54c81060a1f933135e5577dde53919207f182 | 286b6dc56323f982092ffafbfac8a32dbbaeb7ef | /training_assignments/SandipBarde/SandipBarde_day_5_assignment/exception_02.py | 99842d6af8c79e12aa1ef5f2077b2c9dc80d9a8e | [] | no_license | learndevops19/pythonTraining-CalsoftInc | ccee0d90aadc00bfdb17f9578620f6bf92f80a4c | c5f61516b835339b394876edd1c6f62e7cc6f0c3 | refs/heads/master | 2021-02-05T04:27:17.590913 | 2019-11-20T17:27:06 | 2019-11-20T17:27:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py |
class NegativePriceException(Exception):
def __init__(self, msg):
self.message = msg
if __name__ == "__main__":
price = float(input("Enter the price\n"))
try:
if(price < 0):
raise NegativePriceException("Inside the Exception:- Price is less than zero.")
else:
print("Execution completed successfully.")
except NegativePriceException as e:
print(e.message) | [
"rajpratik71@gmail.com"
] | rajpratik71@gmail.com |
2844c12c08b01fdbe4ddf201e376874c8b13a2d0 | 01f2986123256d03d731303daa68b742ea4fe23d | /Второй максимум.py | bbc8490544693dbda3ae5d4a80a48a3842bd4ba5 | [] | no_license | mayhem215/Python | 65e05176de50b1f589ca991ac5d9f03b4ca00fa2 | f974d89d52a5aa8553151ea15a8b62e7c7c07cf5 | refs/heads/master | 2020-04-08T11:03:20.372914 | 2018-11-27T07:07:09 | 2018-11-27T07:07:09 | 159,291,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 200 | py | m1 = int(input())
m2 = int(input())
if m1 < m2:
m1, m2 = m2, m1
n = int(input())
while n != 0:
if n > m1:
m2, m1 = m1, m2
elif n > m2:
m2 = n
n = int(input())
print(m2) | [
"mayhem15@mail.ru"
] | mayhem15@mail.ru |
4a71558e39cfe45057c00d2d00e55cb99ba434b8 | c3a3ae45f6fb22bdb3c622498c7ff1c2c2732f6a | /day20/homework/s12bbs/bbs/views.py | 7c19039a94703905939c5b42343c2ea255fe2444 | [] | no_license | huyuedong/S12 | df6b56cf05bb9f9c4a6e54b6a5228f1715e20245 | 61aa6d91f4e70f87c9b4c4b1e2042d5eeb2e2c3d | refs/heads/master | 2020-12-14T08:50:57.514965 | 2016-07-30T01:45:03 | 2016-07-30T01:45:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,285 | py | from django.shortcuts import render, redirect, HttpResponse
from bbs import models
from django.db.models import F, Q, Count, Sum, aggregates
from bbs import forms
from bbs.bll import uploadfile_handler, comments_handler
from datetime import datetime, timezone
# Create your views here.
# 从数据库中找出所有set_as_top_menu=True的版块,并按照position_index排序
category_list = models.Category.objects.filter(set_as_top_menu=True).order_by("position_index")
# 首页
def index(request):
category_obj = models.Category.objects.get(position_index=1) # 找到第一个版块
article_list = models.Article.objects.filter(status='published') # 找到所有的发布的文章
return render(request, 'bbs/index.html', {
'category_list': category_list,
'article_list': article_list,
'category_obj': category_obj,
})
# 版块页面
def category(request, category_id):
category_obj = models.Category.objects.get(id=category_id)
if category_obj.position_index == 1: # 首页
article_list = models.Article.objects.filter(status='published')
else:
article_list = models.Article.objects.filter(category_id=category_obj.id, status='published')
return render(request, 'bbs/index.html', {
'category_list': category_list, # 顶部菜单
'category_obj': category_obj, # 版块对象
'article_list': article_list, # 文章列表
})
# 文章页面
def article_detail(request, article_id):
article_obj = models.Article.objects.get(id=article_id)
return render(request, "bbs/article_detail.html", {
"category_list": category_list,
"article_obj": article_obj,
})
# 评论提交
def post_comment(request):
if request.method == "POST":
new_comment_obj = models.Comment(
comment_type=request.POST.get("comment_type"),
parent_comment_id=request.POST.get("parent_comment_id", None),
article_id=request.POST.get("article_id"),
user_id=request.user.userprofile.id,
comment=request.POST.get("comment"),
)
new_comment_obj.save()
return HttpResponse("OK")
# 获取评论
def get_comments(request, article_id):
article_obj = models.Article.objects.get(id=article_id)
# comment_set = article_obj.comment_set.select_related().filter(comment_type=1) # 只取评论
comment_set = article_obj.comment_set.select_related()
comment_tree = comments_handler.build_comment_tree(comment_set)
html_str = comments_handler.render_comment_tree(comment_tree)
return HttpResponse(html_str)
def new_article(request):
if request.method == "POST":
article_form = forms.ArticleForm(request.POST, request.FILES) # 验证数据和文件
if article_form.is_valid(): # 使用form进行验证
form_data = article_form.cleaned_data
form_data["author_id"] = request.user.userprofile.id # 文章作者
form_data["pub_date"] = datetime.now(timezone.utc)
new_article_img_path = uploadfile_handler.uploadfile_handle(request)
form_data["head_img"] = new_article_img_path
new_article_obj = models.Article(**form_data) # 返回文章id
new_article_obj.save()
return render(request, "bbs/new_article.html", {"new_article_obj": new_article_obj})
else:
print(article_form.errors)
all_category_list = models.Category.objects.all()
return render(request, "bbs/new_article.html", {"category_list": all_category_list})
| [
"liwenzhou7@gmail.com"
] | liwenzhou7@gmail.com |
37ea7ab064e4996dca287d814428daa3078abc0a | aa4b80cf7e7ac0028d0c7f67ade982d9b740a38b | /python/touple/touple_t.py | 9137b51f9f319510cd7c35b7e666637c5b9bd7b2 | [] | no_license | ratularora/python_code | 9ac82492b8dc2e0bc2d96ba6df6fdc9f8752d322 | ddce847ba338a41b0b2fea8a36d49a61aa0a5b13 | refs/heads/master | 2021-01-19T04:34:22.038909 | 2017-09-27T08:14:45 | 2017-09-27T08:14:45 | 84,435,244 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py |
aList = [123, 'xyz', 'zara', 'abc'];
aTuple = tuple(aList)
print "Tuple elements : ", aTuple
aTuple.append('effd')
print aTuple
| [
"arora.ratul@gmail.com"
] | arora.ratul@gmail.com |
0498e3544e187420ab7cacac12ad7fd5a5fb2a9c | f908adce7e25824f7daaffddfaacb2a18b3e721b | /feder/letters/logs/migrations/0002_auto_20170820_1447.py | 4a5853834afff5e659b5fc69e8d1aaf1bbcfd885 | [
"MIT"
] | permissive | miklobit/feder | 7c0cfdbcb0796f8eb66fd67fa4dabddb99370a7c | 14a59e181a18af5b625ccdcbd892c3b886a8d97e | refs/heads/master | 2023-01-13T23:03:51.266990 | 2020-11-12T14:31:52 | 2020-11-12T15:47:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,032 | py | # Generated by Django 1.11.4 on 2017-08-20 14:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("logs", "0001_initial")]
operations = [
migrations.AddField(
model_name="emaillog",
name="status",
field=models.CharField(
choices=[
(b"open", "Open"),
(b"ok", "Open"),
(b"spambounce", "Open"),
(b"softbounce", "Open"),
(b"hardbounce", "Open"),
(b"dropped", "Open"),
(b"deferred", "Deferred"),
(b"unknown", "Unknown"),
],
default=b"unknown",
max_length=20,
),
),
migrations.AddField(
model_name="emaillog",
name="to",
field=models.CharField(default="", max_length=255, verbose_name="To"),
preserve_default=False,
),
]
| [
"naczelnik@jawnosc.tk"
] | naczelnik@jawnosc.tk |
16cea51f52ae930ff0c9d3a383497d26d391b856 | 80a3d98eae1d755d6914b5cbde63fd10f5cc2046 | /autox/autox_video/mmaction2/mmaction/datasets/rawframe_dataset.py | 9359e117b7f52bc234b0e389de0b731e96c9e8db | [
"Apache-2.0"
] | permissive | 4paradigm/AutoX | efda57b51b586209e1d58e1dab7d0797083aadc5 | 7eab9f4744329a225ff01bb5ec360c4662e1e52e | refs/heads/master | 2023-05-24T00:53:37.109036 | 2023-02-14T14:21:50 | 2023-02-14T14:21:50 | 388,068,949 | 752 | 162 | Apache-2.0 | 2022-07-12T08:28:09 | 2021-07-21T09:45:41 | Jupyter Notebook | UTF-8 | Python | false | false | 7,927 | py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import torch
from mmaction.datasets.pipelines import Resize
from .base import BaseDataset
from .builder import DATASETS
@DATASETS.register_module()
class RawframeDataset(BaseDataset):
"""Rawframe dataset for action recognition.
The dataset loads raw frames and apply specified transforms to return a
dict containing the frame tensors and other information.
The ann_file is a text file with multiple lines, and each line indicates
the directory to frames of a video, total frames of the video and
the label of a video, which are split with a whitespace.
Example of a annotation file:
.. code-block:: txt
some/directory-1 163 1
some/directory-2 122 1
some/directory-3 258 2
some/directory-4 234 2
some/directory-5 295 3
some/directory-6 121 3
Example of a multi-class annotation file:
.. code-block:: txt
some/directory-1 163 1 3 5
some/directory-2 122 1 2
some/directory-3 258 2
some/directory-4 234 2 4 6 8
some/directory-5 295 3
some/directory-6 121 3
Example of a with_offset annotation file (clips from long videos), each
line indicates the directory to frames of a video, the index of the start
frame, total frames of the video clip and the label of a video clip, which
are split with a whitespace.
.. code-block:: txt
some/directory-1 12 163 3
some/directory-2 213 122 4
some/directory-3 100 258 5
some/directory-4 98 234 2
some/directory-5 0 295 3
some/directory-6 50 121 3
Args:
ann_file (str): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transforms.
data_prefix (str | None): Path to a directory where videos are held.
Default: None.
test_mode (bool): Store True when building test or validation dataset.
Default: False.
filename_tmpl (str): Template for each filename.
Default: 'img_{:05}.jpg'.
with_offset (bool): Determines whether the offset information is in
ann_file. Default: False.
multi_class (bool): Determines whether it is a multi-class
recognition dataset. Default: False.
num_classes (int | None): Number of classes in the dataset.
Default: None.
modality (str): Modality of data. Support 'RGB', 'Flow'.
Default: 'RGB'.
sample_by_class (bool): Sampling by class, should be set `True` when
performing inter-class data balancing. Only compatible with
`multi_class == False`. Only applies for training. Default: False.
power (float): We support sampling data with the probability
proportional to the power of its label frequency (freq ^ power)
when sampling data. `power == 1` indicates uniformly sampling all
data; `power == 0` indicates uniformly sampling all classes.
Default: 0.
dynamic_length (bool): If the dataset length is dynamic (used by
ClassSpecificDistributedSampler). Default: False.
"""
def __init__(self,
ann_file,
pipeline,
data_prefix=None,
test_mode=False,
filename_tmpl='img_{:05}.jpg',
with_offset=False,
multi_class=False,
num_classes=None,
start_index=1,
modality='RGB',
sample_by_class=False,
power=0.,
dynamic_length=False,
**kwargs):
self.filename_tmpl = filename_tmpl
self.with_offset = with_offset
super().__init__(
ann_file,
pipeline,
data_prefix,
test_mode,
multi_class,
num_classes,
start_index,
modality,
sample_by_class=sample_by_class,
power=power,
dynamic_length=dynamic_length)
self.short_cycle_factors = kwargs.get('short_cycle_factors',
[0.5, 0.7071])
self.default_s = kwargs.get('default_s', (224, 224))
def load_annotations(self):
"""Load annotation file to get video information."""
if self.ann_file.endswith('.json'):
return self.load_json_annotations()
video_infos = []
with open(self.ann_file, 'r') as fin:
for line in fin:
line_split = line.strip().split()
video_info = {}
idx = 0
# idx for frame_dir
frame_dir = line_split[idx]
if self.data_prefix is not None:
frame_dir = osp.join(self.data_prefix, frame_dir)
video_info['frame_dir'] = frame_dir
idx += 1
if self.with_offset:
# idx for offset and total_frames
video_info['offset'] = int(line_split[idx])
video_info['total_frames'] = int(line_split[idx + 1])
idx += 2
else:
# idx for total_frames
video_info['total_frames'] = int(line_split[idx])
idx += 1
# idx for label[s]
label = [int(x) for x in line_split[idx:]]
assert label, f'missing label in line: {line}'
if self.multi_class:
assert self.num_classes is not None
video_info['label'] = label
else:
assert len(label) == 1
video_info['label'] = label[0]
video_infos.append(video_info)
return video_infos
def prepare_train_frames(self, idx):
"""Prepare the frames for training given the index."""
def pipeline_for_a_sample(idx):
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
if self.multi_class:
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
if isinstance(idx, tuple):
index, short_cycle_idx = idx
last_resize = None
for trans in self.pipeline.transforms:
if isinstance(trans, Resize):
last_resize = trans
origin_scale = self.default_s
long_cycle_scale = last_resize.scale
if short_cycle_idx in [0, 1]:
# 0 and 1 is hard-coded as PySlowFast
scale_ratio = self.short_cycle_factors[short_cycle_idx]
target_scale = tuple(
[int(round(scale_ratio * s)) for s in origin_scale])
last_resize.scale = target_scale
res = pipeline_for_a_sample(index)
last_resize.scale = long_cycle_scale
return res
else:
return pipeline_for_a_sample(idx)
def prepare_test_frames(self, idx):
"""Prepare the frames for testing given the index."""
results = copy.deepcopy(self.video_infos[idx])
results['filename_tmpl'] = self.filename_tmpl
results['modality'] = self.modality
results['start_index'] = self.start_index
# prepare tensor in getitem
if self.multi_class:
onehot = torch.zeros(self.num_classes)
onehot[results['label']] = 1.
results['label'] = onehot
return self.pipeline(results)
| [
"caixiaochen@4ParadigmdeMacBook-Pro.local"
] | caixiaochen@4ParadigmdeMacBook-Pro.local |
1bc8f879513747c4fcd355558feb0b1ee673f864 | c11cd1d6a99eafa740c3aa6d9a9e90d622af9630 | /examples/ConvolutionalPoseMachines/load-cpm.py | b8999c203ea53bde85e9ba5e4158f11b1413ecf9 | [
"Apache-2.0"
] | permissive | bzhong2/tensorpack | 0c06e45ed2357cedd0d459511a2c85a07b522d2c | 0202038159fda7aa4baa2e249903b929949e0976 | refs/heads/master | 2021-07-02T19:05:10.948197 | 2017-09-24T09:47:13 | 2017-09-24T09:47:13 | 105,573,277 | 1 | 0 | null | 2017-10-02T19:02:49 | 2017-10-02T19:02:48 | null | UTF-8 | Python | false | false | 4,599 | py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: load-cpm.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import cv2
import tensorflow as tf
import numpy as np
import argparse
from tensorpack import *
from tensorpack.utils import viz
from tensorpack.utils.argtools import memoized
"""
15 channels:
0-1 head, neck
2-4 right shoulder, right elbow, right wrist
5-7 left shoulder, left elbow, left wrist
8-10 right hip, right knee, right ankle
11-13 left hip, left knee, left ankle
14: background
"""
def colorize(img, heatmap):
""" img: bgr, [0,255]
heatmap: [0,1]
"""
heatmap = viz.intensity_to_rgb(heatmap, cmap='jet')[:, :, ::-1]
return img * 0.5 + heatmap * 0.5
@memoized
def get_gaussian_map():
sigma = 21
gaussian_map = np.zeros((368, 368), dtype='float32')
for x_p in range(368):
for y_p in range(368):
dist_sq = (x_p - 368 / 2) * (x_p - 368 / 2) + \
(y_p - 368 / 2) * (y_p - 368 / 2)
exponent = dist_sq / 2.0 / (21**2)
gaussian_map[y_p, x_p] = np.exp(-exponent)
return gaussian_map.reshape((1, 368, 368, 1))
class Model(ModelDesc):
def _get_inputs(self):
return [InputDesc(tf.float32, (None, 368, 368, 3), 'input'),
InputDesc(tf.float32, (None, 368, 368, 15), 'label'),
]
def _build_graph(self, inputs):
image, label = inputs
image = image / 256.0 - 0.5
gmap = tf.constant(get_gaussian_map())
gmap = tf.pad(gmap, [[0, 0], [0, 1], [0, 1], [0, 0]])
pool_center = AvgPooling('mappool', gmap, 9, stride=8, padding='VALID')
with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu,
W_init=tf.random_normal_initializer(stddev=0.01)):
shared = (LinearWrap(image)
.Conv2D('conv1_1', 64)
.Conv2D('conv1_2', 64)
.MaxPooling('pool1', 2)
# 184
.Conv2D('conv2_1', 128)
.Conv2D('conv2_2', 128)
.MaxPooling('pool2', 2)
# 92
.Conv2D('conv3_1', 256)
.Conv2D('conv3_2', 256)
.Conv2D('conv3_3', 256)
.Conv2D('conv3_4', 256)
.MaxPooling('pool3', 2)
# 46
.Conv2D('conv4_1', 512)
.Conv2D('conv4_2', 512)
.Conv2D('conv4_3_CPM', 256)
.Conv2D('conv4_4_CPM', 256)
.Conv2D('conv4_5_CPM', 256)
.Conv2D('conv4_6_CPM', 256)
.Conv2D('conv4_7_CPM', 128)())
def add_stage(stage, l):
l = tf.concat([l, shared, pool_center], 3,
name='concat_stage{}'.format(stage))
for i in range(1, 6):
l = Conv2D('Mconv{}_stage{}'.format(i, stage), l, 128)
l = Conv2D('Mconv6_stage{}'.format(stage), l, 128, kernel_shape=1)
l = Conv2D('Mconv7_stage{}'.format(stage),
l, 15, kernel_shape=1, nl=tf.identity)
return l
with argscope(Conv2D, kernel_shape=7, nl=tf.nn.relu):
out1 = (LinearWrap(shared)
.Conv2D('conv5_1_CPM', 512, kernel_shape=1)
.Conv2D('conv5_2_CPM', 15, kernel_shape=1, nl=tf.identity)())
out2 = add_stage(2, out1)
out3 = add_stage(3, out2)
out4 = add_stage(4, out3)
out5 = add_stage(5, out4)
out6 = add_stage(6, out4)
resized_map = tf.image.resize_bilinear(out6,
[368, 368], name='resized_map')
def run_test(model_path, img_file):
param_dict = np.load(model_path, encoding='latin1').item()
predict_func = OfflinePredictor(PredictConfig(
model=Model(),
session_init=DictRestore(param_dict),
input_names=['input'],
output_names=['resized_map']
))
im = cv2.imread(img_file, cv2.IMREAD_COLOR).astype('float32')
im = cv2.resize(im, (368, 368))
out = predict_func([[im]])[0][0]
hm = out[:, :, :14].sum(axis=2)
viz = colorize(im, hm)
cv2.imwrite("output.jpg", viz)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--load', required=True, help='.npy model file')
parser.add_argument('--input', required=True, help='input image')
args = parser.parse_args()
run_test(args.load, args.input)
| [
"ppwwyyxxc@gmail.com"
] | ppwwyyxxc@gmail.com |
2b199881e94f19fe4b040e1da9ae0108a52c857f | 292d23019c18d0b724aed88f04a0f20b5b616bb9 | /Python1/Crawler/douban_movie1.py | b845a75327d8ec2485e72a9f2f10dabbb124ec4b | [] | no_license | RedAnanas/macbendi | 6f1f6fd41ed1fe8b71408dffa0b964464bd00aa8 | 8d5aa39d9389937f0e0c3f7a7d6532537f33cda8 | refs/heads/master | 2023-06-11T18:57:31.061009 | 2021-06-29T15:04:56 | 2021-06-29T15:04:56 | 380,759,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,912 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2019/2/7 17:41
# @Software: PyCharm
import requests
from bs4 import BeautifulSoup
base_url = 'https://movie.douban.com/top250?start=%d&filter='
for page in range(0, 25, 25):
allurl = base_url %int(page)
resp=requests.get(allurl)
soup = BeautifulSoup(resp.text, 'lxml')
all_names = soup.find_all('span', class_='title')
names = [a.get_text() for a in all_names]
all_names1 = soup.find_all('span', class_='other')
names1 = [a1.get_text() for a1 in all_names1]
all_grade = soup.find_all('span', class_='rating_num')
grade = [a.get_text() for a in all_grade]
all_director = soup.find_all('p', class_='')
director = [a.get_text() for a in all_director]
all_intro = soup.find_all('span', class_='inq')
intro = [a.get_text() for a in all_intro]
for names, names1,grade, director, intro in zip(all_names, all_names1, all_grade,all_director, all_intro):
name = '影名:' + str(names.text) + '\n'
author = '别名:' + str(names1.text) + '\n'
grade = '评分:' + str(grade.text) + '\n'
# str.replace(u'\xa0', u' ')
score = '导演:' + str(director.text).replace(' ','') + '\n'
# score = '导演:' + str(director.text) + '\n'
sum = '简介:' + str(intro.text) + '\n'
data = name + author + grade + score + sum
# print(data)
# 文件名
filename = '豆瓣电影Top250.txt'
# 保存文件操作
with open(filename, 'a', encoding='utf-8') as f:
# 保存数据
f.writelines(data + '=======================' + '\n')
print('保存成功')
# print(names)
# print(names1)
# print(director)
# print(intro)
# all_author = soup.find_all('p', class_='pl')
# author = [b.text for b in all_author]
# # print(author)
#
# all_grade = soup.find_all('span',class_='rating_nums')
# grade = [c.text for c in all_grade]
# # print(grade)
#
# all_intro = soup.find_all('span',class_='inq')
# intro = [d.text for d in all_intro]
# # print(intro)
#
# for name, author, score, sum in zip(names, all_author, all_grade, all_intro):
# name = '书名:' + str(name) + '\n'
# author = '作者:' + str(author.text) + '\n'
# score = '评分:' + str(score.text) + '\n'
# sum = '简介:' + str(sum.text) + '\n'
# data = name + author + score + sum
# # print(data)
#
# # 文件名
# filename = '豆瓣图书Top250.txt'
# # 保存文件操作
# with open(filename, 'a', encoding='utf-8') as f:
# # 保存数据
# f.writelines(data + '=======================' + '\n')
# print('保存成功')
| [
"1315157388@qq.com"
] | 1315157388@qq.com |
13700a8e3b257b54c718ee11ebc82eb267a92b87 | af8f0d50bb11279c9ff0b81fae97f754df98c350 | /src/tests/account/registration.py | 8717aa6a49451c1b33cfc8c6aa4bdab639888e5e | [
"Apache-2.0"
] | permissive | DmytroKaminskiy/ltt | 592ed061efe3cae169a4e01f21d2e112e58714a1 | d08df4d102e678651cd42928e2343733c3308d71 | refs/heads/master | 2022-12-18T09:56:36.077545 | 2020-09-20T15:57:35 | 2020-09-20T15:57:35 | 292,520,616 | 0 | 0 | Apache-2.0 | 2020-09-20T15:49:58 | 2020-09-03T09:09:26 | HTML | UTF-8 | Python | false | false | 4,191 | py | from urllib.parse import urlparse
from account.models import User
from django.conf import settings
from django.core import mail
from django.urls import reverse, reverse_lazy
from tests.const import URLS_PATTERN
URL = reverse_lazy('account:django_registration_register')
def test_registration_get(client):
response = client.get(URL)
assert response.status_code == 200
assert 'form' in response.context_data
def test_registration_create_empty_data(client):
user_count = User.objects.count()
response = client.post(URL, data={})
assert response.status_code == 200
assert response.context_data['form'].errors == {
'email': ['This field is required.'],
'password1': ['This field is required.'],
'password2': ['This field is required.'],
}
assert len(mail.outbox) == 0
assert User.objects.count() == user_count
def test_registration_create_different_password(client, fake):
user_count = User.objects.count()
data = {
'email': fake.email(),
'password1': fake.password(),
'password2': fake.password(),
}
response = client.post(URL, data=data)
assert response.status_code == 200
assert response.context_data['form'].errors == {
'password2': ["The two password fields didn't match."]
}
assert len(mail.outbox) == 0
assert User.objects.count() == user_count
def test_registration_create_same_password(client, fake):
user_count = User.objects.count()
data = {
'email': fake.email(),
'password1': fake.password(),
}
data['password2'] = data['password1']
response = client.post(URL, data=data)
assert response.status_code == 302
assert response['Location'] == reverse('django_registration_complete')
assert User.objects.count() == user_count + 1
user = User.objects.last()
assert user.email == data['email']
assert user.is_active is False
assert len(mail.outbox) == 1
email = mail.outbox[0]
assert email.to == [data['email']]
assert email.cc == []
assert email.bcc == []
assert email.reply_to == []
assert email.from_email == settings.DEFAULT_FROM_EMAIL
assert email.subject == 'Activate your Account'
assert 'Thanks for signing up!' in email.body
url = urlparse(URLS_PATTERN.findall(email.body)[-1])
response = client.get(url.path)
assert response.status_code == 302
assert response['Location'] == reverse('django_registration_activation_complete')
user.refresh_from_db()
assert user.is_active is True
# post same data again
response = client.post(URL, data=data)
assert response.status_code == 200
assert response.context_data['form'].errors == {
'email': ['This email address is already in use. Please supply a different email address.'],
}
assert User.objects.count() == user_count + 1
assert len(mail.outbox) == 1
assert response.wsgi_request.user.is_authenticated is False
# test login wrong password
response = client.post(
reverse('login'),
data={'username': data['email'], 'password': 'wrong-password'},
)
assert response.status_code == 200
assert response.context_data['form'].errors == {
'__all__': ['Please enter a correct email address and password. Note that both fields may be case-sensitive.']
}
assert response.wsgi_request.user.is_authenticated is False
# test login wrong email
response = client.post(
reverse('login'),
data={'username': fake.email(), 'password': data['password1']},
)
assert response.status_code == 200
assert response.context_data['form'].errors == {
'__all__': ['Please enter a correct email address and password. Note that both fields may be case-sensitive.']
}
assert response.wsgi_request.user.is_authenticated is False
# test login correct
assert response.wsgi_request.user.is_authenticated is False
response = client.post(
reverse('login'),
data={'username': data['email'], 'password': data['password1']},
)
assert response.status_code == 302
assert response.wsgi_request.user.is_authenticated is True
| [
"dmytro.kaminskyi92@gmail.com"
] | dmytro.kaminskyi92@gmail.com |
1c6a1215b0db21e8519fe8f44c4fd556a89e12d7 | 78c3808342711fe04e662cfea3d394e34841f2fb | /docs/rg/rgkod11.py | 05478f163c5d469994dbf4f3844fdac574ad4c29 | [] | no_license | astefaniuk/linetc | cd0f8aa1bb2858e971caddaf6e6396363ca50a47 | b23b3b4747dded19f7030862bf486a9e0f65b4e0 | refs/heads/master | 2021-01-22T13:08:15.266332 | 2015-06-12T21:37:59 | 2015-06-12T21:37:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | # idź bezpiecznie na najbliższego wroga
# wersja oparta na zbiorach
if wrogowie:
najblizszy_wrog = mindist(wrogowie,self.location)
else:
najblizszy_wrog = rg.CENTER_POINT
| [
"xinulsw@gmail.com"
] | xinulsw@gmail.com |
3510224b9ff10ba629557b67a1f2a7494d96ed42 | 4e353bf7035eec30e5ad861e119b03c5cafc762d | /QtCore/QElapsedTimer.py | 907ac3c36dc910112b15b9bf46aa8486cbb0d152 | [] | no_license | daym/PyQt4-Stubs | fb79f54d5c9a7fdb42e5f2506d11aa1181f3b7d5 | 57d880c0d453641e31e1e846be4087865fe793a9 | refs/heads/master | 2022-02-11T16:47:31.128023 | 2017-10-06T15:32:21 | 2017-10-06T15:32:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,312 | py | # encoding: utf-8
# module PyQt4.QtCore
# from C:\Python27\lib\site-packages\PyQt4\QtCore.pyd
# by generator 1.145
# no doc
# imports
import sip as __sip
class QElapsedTimer(): # skipped bases: <type 'sip.simplewrapper'>
"""
QElapsedTimer()
QElapsedTimer(QElapsedTimer)
"""
def clockType(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.clockType() -> QElapsedTimer.ClockType """
pass
def elapsed(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.elapsed() -> int """
return 0
def hasExpired(self, p_int): # real signature unknown; restored from __doc__
""" QElapsedTimer.hasExpired(int) -> bool """
return False
def invalidate(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.invalidate() """
pass
def isMonotonic(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.isMonotonic() -> bool """
return False
def isValid(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.isValid() -> bool """
return False
def msecsSinceReference(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.msecsSinceReference() -> int """
return 0
def msecsTo(self, QElapsedTimer): # real signature unknown; restored from __doc__
""" QElapsedTimer.msecsTo(QElapsedTimer) -> int """
return 0
def nsecsElapsed(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.nsecsElapsed() -> int """
return 0
def restart(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.restart() -> int """
return 0
def secsTo(self, QElapsedTimer): # real signature unknown; restored from __doc__
""" QElapsedTimer.secsTo(QElapsedTimer) -> int """
return 0
def start(self): # real signature unknown; restored from __doc__
""" QElapsedTimer.start() """
pass
def __eq__(self, y): # real signature unknown; restored from __doc__
""" x.__eq__(y) <==> x==y """
pass
def __ge__(self, y): # real signature unknown; restored from __doc__
""" x.__ge__(y) <==> x>=y """
pass
def __gt__(self, y): # real signature unknown; restored from __doc__
""" x.__gt__(y) <==> x>y """
pass
def __init__(self, QElapsedTimer=None): # real signature unknown; restored from __doc__ with multiple overloads
pass
def __le__(self, y): # real signature unknown; restored from __doc__
""" x.__le__(y) <==> x<=y """
pass
def __lt__(self, y): # real signature unknown; restored from __doc__
""" x.__lt__(y) <==> x<y """
pass
def __ne__(self, y): # real signature unknown; restored from __doc__
""" x.__ne__(y) <==> x!=y """
pass
__weakref__ = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""list of weak references to the object (if defined)"""
MachAbsoluteTime = 3
MonotonicClock = 1
PerformanceCounter = 4
SystemTime = 0
TickCounter = 2
| [
"thekewlstore@gmail.com"
] | thekewlstore@gmail.com |
fa7f36f70571120cbb262878199b1a168357ff47 | d9ecb105ed56979691f7776238301a3d0564665e | /ParameterUI/__init__.py | ac2bbe925bef7623a4a85333d4e5c6e16cb5d4d1 | [] | no_license | muyr/hapi_test | 4dcc5eb8e5aea4a18556002aec3d68301cb09024 | 910ca037d9afc8fd112ff6dc4fc8686f7f188eb0 | refs/heads/main | 2023-03-28T19:37:25.518719 | 2021-03-30T12:03:20 | 2021-03-30T12:03:20 | 309,288,978 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 787 | py | from MParameterLabel import MParameterLabel
from MParameterString import MParameterString
from MParameterStringChoice import MParameterStringChoice
from MParameterInteger import MParameterInteger
from MParameterIntegerChoice import MParameterIntegerChoice
from MParameterFloat import MParameterFloat
from MParameterFolder import MParameterFolder
from MParameterFolderList import MParameterFolderList
from MParameterToggle import MParameterToggle
from MParameterMulti import MParameterMulti
from MParameterMultiInstance import MParameterMultiInstance
from MParameterButton import MParameterButton
from MParameterColor import MParameterColor
from MParameterSeparator import MParameterSeparator
from MParameterPathFile import MParameterPathFile
from MParameterNode import MParameterNode
| [
"muyanru345@163.com"
] | muyanru345@163.com |
ad94f6c7e328945e686c5c49d7071033fa26365a | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03549/s009493844.py | b43be6ebe855df366e38343d97e9fb47993b6a8d | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 608 | py | import sys
from collections import defaultdict, deque, Counter
import math
# import copy
from bisect import bisect_left, bisect_right
# import heapq
# sys.setrecursionlimit(1000000)
# input aliases
input = sys.stdin.readline
getS = lambda: input().strip()
getN = lambda: int(input())
getList = lambda: list(map(int, input().split()))
getZList = lambda: [int(x) - 1 for x in input().split()]
INF = 10 ** 20
MOD = 10**9 + 7
divide = lambda x: pow(x, MOD-2, MOD)
def main():
n, m= getList()
one = 1900 * m + 100 * (n - m)
print(one * (2 ** m))
if __name__ == "__main__":
main()
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
c6c4e0a698e2185e960aa28af3da3501e4305561 | a2cbd654d7126c21442111fb315454561790b579 | /backend/dating/api/v1/urls.py | f32a7d281cd91a40f3f177ab0d3f660a362883a6 | [] | no_license | crowdbotics-apps/breakify-23632 | d43c9cfb23bf185c10499301f6e14ec441181907 | 14bc9010e101062f22a98837b5ac7e10de0511bf | refs/heads/master | 2023-02-09T11:07:18.680204 | 2020-12-31T16:20:19 | 2020-12-31T16:20:19 | 325,820,032 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 620 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .viewsets import (
SettingViewSet,
ProfileViewSet,
InboxViewSet,
DislikeViewSet,
MatchViewSet,
UserPhotoViewSet,
LikeViewSet,
)
router = DefaultRouter()
router.register("inbox", InboxViewSet)
router.register("profile", ProfileViewSet)
router.register("setting", SettingViewSet)
router.register("dislike", DislikeViewSet)
router.register("like", LikeViewSet)
router.register("match", MatchViewSet)
router.register("userphoto", UserPhotoViewSet)
urlpatterns = [
path("", include(router.urls)),
]
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
7bcc3b56f063f47763a83bf930c3fc789819f161 | 0ffd4524067a737faf34bb60c4041a23258ac5cd | /assignment1/q1_softmax.py | 9fc47b3c18a9c55b546f6e0848605fbe5bffcbe2 | [] | no_license | gjwei/cs224n | 6dc410ab2efc8dfc665711daac5dd1e396ae7c8f | 1ebdd31d5f3943547dc1654c756387ae5d7ef9f3 | refs/heads/master | 2021-08-23T02:38:47.120738 | 2017-12-02T16:11:32 | 2017-12-02T16:11:32 | 112,850,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,168 | py | import numpy as np
def softmax(x):
"""
Compute the softmax function for each row of the input x.
It is crucial that this function is optimized for speed because
it will be used frequently in later code.
You might find numpy functions np.exp, np.sum, np.reshape,
np.max, and numpy broadcasting useful for this task. (numpy
broadcasting documentation:
http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
You should also make sure that your code works for one
dimensional inputs (treat the vector as a row), you might find
it helpful for your later problems.
You must implement the optimization in problem 1(a) of the
written assignment!
"""
### YOUR CODE HERE
if x.ndim > 1:
x -= np.max(x, axis=1, keepdims=True)
x = np.exp(x)
x /= np.sum(x, axis=1, keepdims=True)
else:
x -= np.max(x)
x = np.exp(x)
x /= np.sum(x)
### END YOUR CODE
return x
def test_softmax_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
print "Running basic tests..."
test1 = softmax(np.array([1, 2]))
print test1
assert np.amax(np.fabs(test1 - np.array(
[0.26894142, 0.73105858]))) <= 1e-6
test2 = softmax(np.array([[1001, 1002], [3, 4]]))
print test2
assert np.amax(np.fabs(test2 - np.array(
[[0.26894142, 0.73105858], [0.26894142, 0.73105858]]))) <= 1e-6
test3 = softmax(np.array([[-1001, -1002]]))
print test3
assert np.amax(np.fabs(test3 - np.array(
[0.73105858, 0.26894142]))) <= 1e-6
print "You should verify these results!\n"
def test_softmax():
"""
Use this space to test your softmax implementation by running:
python q1_softmax.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
print "Running your tests..."
### YOUR CODE HERE
a = np.random.random(size=(100, 49)) * 10
result = softmax(a)
### END YOUR CODE
if __name__ == "__main__":
test_softmax_basic()
test_softmax()
| [
"1449894353@qq.com"
] | 1449894353@qq.com |
efeea6d5486e83703709910a52331973707ea48f | e47875e83c19f8e7ec56fb1cf2ae7e67e650f15b | /kRPC/OrbitalLaunch/old/LaunchIntoOrbit_2.py | 4237e0b917cd2ff8ff4d5bc79f569b6e41f796bc | [] | no_license | crashtack/KSP | a69b031ca942adb9fd798de034605b2b2c229b8d | 2549319c116a4687639a0ebb59adafd8b6ce1ad9 | refs/heads/master | 2021-01-19T04:26:20.143710 | 2017-12-07T05:17:50 | 2017-12-07T05:17:50 | 63,728,682 | 0 | 0 | null | 2017-12-07T05:17:51 | 2016-07-19T21:17:52 | null | UTF-8 | Python | false | false | 2,517 | py | import krpc, time, math
turn_start_altitude = 250
turn_end_altitude = 90000
target_altitude = 200000
conn = krpc.connect(name='Launch Science Station to Orbit')
vessel = conn.space_center.active_vessel
# Set up streams for telemetry
ut = conn.add_stream(getattr, conn.space_center, 'ut')
altitude = conn.add_stream(getattr, vessel.flight(), 'mean_altitude')
apoapsis = conn.add_stream(getattr, vessel.orbit, 'apoapsis_altitude')
periapsis = conn.add_stream(getattr, vessel.orbit, 'periapsis_altitude')
eccentricity = conn.add_stream(getattr, vessel.orbit, 'eccentricity')
stage_2_resources = vessel.resources_in_decouple_stage(stage=2, cumulative=False)
stage_3_resources = vessel.resources_in_decouple_stage(stage=3, cumulative=False)
srb_fuel = conn.add_stream(stage_3_resources.amount, 'SolidFuel')
launcher_fuel = conn.add_stream(stage_2_resources.amount, 'LiquidFuel')
# Pre-launch setup
vessel.control.sas = False
vessel.control.rcs = False
vessel.control.throttle = 1
# Countdown...
print('3...'); time.sleep(1)
print('2...'); time.sleep(1)
print('1...'); time.sleep(1)
print('Launch!')
# Activate the first stage
vessel.control.activate_next_stage()
vessel.auto_pilot.engage()
vessel.auto_pilot.target_pitch_and_heading(90, 90)
# Main ascent loop
srbs_separated = False
turn_angle = 0
while True:
time.sleep(.05)
print("altitude: %.2f" % altitude())
# Gravity turn
if altitude() > turn_start_altitude and altitude() < turn_end_altitude:
frac = (altitude() - turn_start_altitude) / (turn_end_altitude - turn_start_altitude)
new_turn_angle = frac * 90
if abs(new_turn_angle - turn_angle) > 0.5:
turn_angle = new_turn_angle
vessel.auto_pilot.target_pitch_and_heading(90-turn_angle, 90)
# Separate SRBs when finished
if not srbs_separated:
#print("srb fuel: %f" % srb_fuel())
if srb_fuel() < .1:
time.sleep(.5)
vessel.control.activate_next_stage()
srbs_separated = True
print('SRBs separated')
# Decrease throttle when approaching target apoapsis
if apoapsis() > target_altitude*0.9:
print('Approaching target apoapsis')
break
# Disable engines when target apoapsis is reached
vessel.control.throttle = 0.25
while apoapsis() < target_altitude:
pass
print('Target apoapsis reached')
vessel.control.throttle = 0
# Wait until out of atmosphere
print('Coasting out of atmosphere')
while altitude() < 70500:
pass
print('Launch complete')
| [
"crashtack@gmail.com"
] | crashtack@gmail.com |
d67d3eaeff1fdd029f4ca5a75a83df6c79287ba1 | 7bc54bae28eec4b735c05ac7bc40b1a8711bb381 | /src/models/keras_model/AverageEmbedding.py | 3a6238d5bcbbe898432e3adcac1b93bdc85d2781 | [] | no_license | clover3/Chair | 755efd4abbd5f3f2fb59e9b1bc6e7bc070b8d05e | a2102ebf826a58efbc479181f1ebb5de21d1e49f | refs/heads/master | 2023-07-20T17:29:42.414170 | 2023-07-18T21:12:46 | 2023-07-18T21:12:46 | 157,024,916 | 0 | 0 | null | 2023-02-16T05:20:37 | 2018-11-10T21:55:29 | Python | UTF-8 | Python | false | false | 828 | py | import keras
import tensorflow as tf
class WeightedSum(keras.layers.Layer):
def __init__(self):
super(WeightedSum, self).__init__()
def call(self, args):
x = args[0]
m = args[1]
s = tf.reduce_sum(x, axis=1)
d = tf.reduce_sum(tf.cast(tf.equal(m, 0), tf.float32), axis=-1)
s = s / tf.expand_dims(d, 1)
return s
def make_embedding_layer(params, name: str = 'embedding',) -> keras.layers.Layer:
return keras.layers.Embeddings(
params['embedding_input_dim'],
params['embedding_output_dim'],
trainable=params['embedding_trainable'],
name=name,
)
def build_model(word_index, embedding_matrix, embedding_dim, max_seq_length):
embedding = make_embedding_layer()
model = keras.Model(inputs=[query, doc], outputs=[out])
| [
"lesterny@gmail.com"
] | lesterny@gmail.com |
3d720c47a2290b71c9e834a808bf58802fdc1e16 | ecc1638f75a6ccd814923cb980e69d770c2525b7 | /Workspace for Python/studying file/class/Greeter.py | cac9ab6ba069fec9304a0df0bdd989b97385857d | [
"MIT"
] | permissive | ArchibaldChain/python-workspace | 5570e1df01f29f9916129e12d7fb1fb0608255d7 | 71890f296c376155e374b2096ac3d8f1d286b7d2 | refs/heads/master | 2022-12-01T03:00:37.224908 | 2020-08-04T10:04:47 | 2020-08-04T10:04:47 | 174,573,744 | 0 | 1 | MIT | 2022-11-22T04:02:07 | 2019-03-08T16:45:09 | Jupyter Notebook | UTF-8 | Python | false | false | 342 | py | class Greeter(object):
# constructor
def __init__(self, name):
self.name = name
# instance method
def greet(self, loud=False):
if loud:
print('HELLO, %s' % self.name.upper())
else:
print("hello, %s" % self.name)
g = Greeter('Freed')
g.greet()
g.greet(True)
| [
"1156618773@qq.com"
] | 1156618773@qq.com |
8c9d773cf834776b2cef2c5b7df3300f7601ecc1 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/bgp/bdevi.py | ced83174740c95c1b16930b1c648c607d05579b5 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,393 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class BDEvi(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = ClassMeta("cobra.model.bgp.BDEvi")
meta.moClassName = "bgpBDEvi"
meta.rnFormat = "bdevi-[%(encap)s]"
meta.category = MoCategory.REGULAR
meta.label = "Bridge Domain Ethernet VPN Instance"
meta.writeAccessMask = 0x8008020040001
meta.readAccessMask = 0x8008020040001
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.childClasses.add("cobra.model.bgp.EVpnImetRoute")
meta.childClasses.add("cobra.model.bgp.EVpnMacIpRoute")
meta.childClasses.add("cobra.model.bgp.EVpnPfxRoute")
meta.childClasses.add("cobra.model.bgp.CktEpEvi")
meta.childClasses.add("cobra.model.bgp.RttP")
meta.childNamesAndRnPrefix.append(("cobra.model.bgp.EVpnMacIpRoute", "evpnmaciprt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.bgp.EVpnImetRoute", "evpnimetrt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.bgp.EVpnPfxRoute", "evpnpfxrt-"))
meta.childNamesAndRnPrefix.append(("cobra.model.bgp.CktEpEvi", "cktepevi-"))
meta.childNamesAndRnPrefix.append(("cobra.model.bgp.RttP", "rttp-"))
meta.parentClasses.add("cobra.model.bgp.Dom")
meta.superClasses.add("cobra.model.nw.Conn")
meta.superClasses.add("cobra.model.bgp.EviBase")
meta.superClasses.add("cobra.model.nw.Item")
meta.superClasses.add("cobra.model.nw.CpDom")
meta.superClasses.add("cobra.model.nw.GEp")
meta.superClasses.add("cobra.model.bgp.EncapEviBase")
meta.rnPrefixes = [
('bdevi-', True),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "encap", "encap", 20695, PropCategory.REGULAR)
prop.label = "Encapsulation"
prop.isConfig = True
prop.isAdmin = True
prop.isCreateOnly = True
prop.isNaming = True
meta.props.add("encap", prop)
prop = PropMeta("str", "lcOwn", "lcOwn", 9, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "local"
prop._addConstant("implicit", "implicit", 4)
prop._addConstant("local", "local", 0)
prop._addConstant("policy", "policy", 1)
prop._addConstant("replica", "replica", 2)
prop._addConstant("resolveOnBehalf", "resolvedonbehalf", 3)
meta.props.add("lcOwn", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "name", "name", 16434, PropCategory.REGULAR)
prop.label = "Name"
prop.isConfig = True
prop.isAdmin = True
prop.range = [(1, 128)]
meta.props.add("name", prop)
prop = PropMeta("str", "rd", "rd", 20636, PropCategory.REGULAR)
prop.label = "Route Distinguisher"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("rd", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
meta.namingProps.append(getattr(meta.props, "encap"))
getattr(meta.props, "encap").needDelimiter = True
def __init__(self, parentMoOrDn, encap, markDirty=True, **creationProps):
namingVals = [encap]
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"bkhoward@live.com"
] | bkhoward@live.com |
08a482edc4791f5a7c670fbd4fb08bbe58dbb95f | 6ee8765a4d98472d32b1aa22f9a885f4ab54adae | /select_with_filter.py | 8c55222aa57348a1abbd9c23bbe17e2ba7b5dcec | [] | no_license | rohitaswchoudhary/mysql_python | 31e9b55f30fdd87a7c7eb7d2b24e75f8d8cf58ce | 9160b7e374472ccfafadc39d6692bc7a798d99c0 | refs/heads/main | 2023-06-03T23:25:59.713109 | 2021-02-26T15:01:12 | 2021-02-26T15:01:12 | 377,754,217 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,207 | py | import mysql.connector
mydb = mysql.connector.connect(
host='localhost',
user='root',
password='Rohitasw@2002',
database = "mydatabase"
)
mycursor = mydb.cursor()
# Select With a Filter
# When selecting records from a table, you can filter the selection by using the "WHERE" statement:
sql = "SELECT * FROM customers WHERE address ='Park Lane 38'"
mycursor.execute(sql)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
# Wildcard Characters
# You can also select the records that starts, includes, or ends with a given letter or phrase.
# Use the % to represent wildcard characters:
sql = "SELECT * FROM customers WHERE address LIKE '%way%'"
mycursor.execute(sql)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
# Prevent SQL Injection
# When query values are provided by the user, you should escape the values.
# This is to prevent SQL injections, which is a common web hacking technique to destroy or misuse your database.
# The mysql.connector module has methods to escape query values:
sql = "SELECT * FROM customers WHERE address = %s"
adr = ("Yellow Garden 2", )
mycursor.execute(sql, adr)
myresult = mycursor.fetchall()
for x in myresult:
print(x)
| [
"you@example.com"
] | you@example.com |
a343f6cb593f4ac770460ec9fed2e071f2bc7a98 | e0f133b49f9f0f416f14da70a2cadb7011c0cb7b | /new_spider/downloader_sx/sx_render_local_downloader_phantomJS.py | 8cdd9dc021fba1d1b144c03a5b6055d148945024 | [] | no_license | cash2one/python_frame | ac52d052fd3698303f1f4fa022f3b35a56e07533 | 2dbda155780a19cf42d5376104879d0667fbbf75 | refs/heads/master | 2021-06-18T13:28:40.356527 | 2017-06-28T02:51:35 | 2017-06-28T02:51:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,789 | py | # -*- coding: utf8 -*-
import sys
import traceback
import urllib2
from downloader.downloader import Downloader
from downloader_sx.list_pulldown_picture.sx_picture_phantomjs import FindPicture
reload(sys)
sys.setdefaultencoding('utf8')
# from downloader.picture_phantomjs
class HtmlLocalDownloader(Downloader):
"""
html下载器
"""
def __init__(self, set_mode='db', get_mode='db'):
super(HtmlLocalDownloader, self).__init__(set_mode, get_mode)
def set(self, request):
try:
results = dict()
# param = request.downloader_get_param('http')
param = request.downloader_set_param('http')
for url in param['urls']:
# print url
results[url["url"]] = 1
return results
except Exception:
print(traceback.format_exc())
return 0
@staticmethod
def encoding(data):
types = ['utf-8', 'gb2312', 'gbk', 'gb18030', 'iso-8859-1']
for t in types:
try:
return data.decode(t)
except Exception, e:
pass
return None
def get(self, request):
param = request.downloader_set_param('http')
if param is None:
return 0
urls = param['urls']
if len(urls) > 0:
try:
results = dict()
for url in urls:
task = {"url": url["url"], "type": 4, "store_type": 1, "status": "3", "result": ""}
result = {"url": url["url"], "status": "3", "result": "", "header": ""}
for i in range(0, 2):
try:
import datetime
starttime = datetime.datetime.now()
print "开始截图"
# render = WebRender(task)
# sx_result = render.result
# sx_result = ""
sx = FindPicture()
sx_result = sx.picture_screenshot_html(url["url"])
endtime = datetime.datetime.now()
print (endtime - starttime).seconds
if sx_result:
result['status'] = 2
result['result'] = sx_result
break
except Exception as e:
print e
print('抓取失败:第%d次' % (i + 1))
results[url['md5']] = result
return results
except Exception, e:
print sx_result
print e
print 'get:'+(traceback.format_exc())
return 0
def get_result(self, opener, request, result):
for i in range(0, 2):
try:
response = opener.open(request, timeout=10)
# 什么情况下 是 元祖
if isinstance(response, tuple):
result["redirect_url"] = response[0]
result["code"] = response[1]
headers = {}
if "User-agent" in request.headers.keys():
headers = {"User-agent": request.headers.get("User-agent")}
request = urllib2.Request(result["redirect_url"], headers=headers)
self.get_result(opener, request, result)
else:
header = response.info()
body = response.read()
if ('Content-Encoding' in header and header['Content-Encoding']) or \
('content-encoding' in header and header['content-encoding']):
import gzip
import StringIO
d = StringIO.StringIO(body)
gz = gzip.GzipFile(fileobj=d)
body = gz.read()
gz.close()
body = self.encoding(body)
if body is not None:
# result["result"] = body
# base64.b64encode()
result["result"] = body
result["status"] = "2"
if str(result["type"]) == "2":
result["header"] = str(header)
break
except urllib2.HTTPError, e:
print e.code
result["code"] = e.code
break
except Exception, e:
# 404 页面有可能断网 也返回这边
# print e
pass
class UnRedirectHandler(urllib2.HTTPRedirectHandler):
def __init__(self):
pass
def http_error_302(self, req, fp, code, msg, headers):
# if 'location' in headers:
# newurl = headers.getheaders('location')[0]
# print 'header location:'+newurl
# return newurl
print headers
if 'location' in headers:
newurl = headers.getheaders('location')[0]
return newurl, code
pass
if __name__ == "__main__":
sx = FindPicture()
sx_result = sx.picture_screenshot_html("https://www.baidu.com/s?wd=%E6%B7%AE%E5%AE%89%E4%BA%BA%E6%89%8D%E7%BD%91%E6%9C%80%E6%96%B0%E6%8B%9B%E8%81%98%E4%BF%A1%E6%81%AF&rsv_spt=1&rsv_iqid=0x9d684d0e0000cecd&issp=1&f=8&rsv_bp=1&rsv_idx=2&ie=utf-8&rqlang=cn&tn=78040160_5_pg&rsv_enter=0&oq=%E6%8B%9B%E8%81%98&rsv_t=7e39msJWAhkatRpmx%2F691Ir2BU1904ljWxb%2B3gy7cl5pNJsIfLHDNBbY7prEA2Kv9ez9OQ&rsv_pq=dd1bb49d0003954a&inputT=135689006&rsv_n=2&rsv_sug3=1298&bs=%E6%8B%9B%E8%81%98")
print sx_result | [
"1925453680@qq.com"
] | 1925453680@qq.com |
a547eea325ba1119f9fbf2b2bb4e7fcf323eb6cb | 9e30a239886210dc57e6c7cb9a71ad95a840712e | /views/posts_with_more_postive_recations/tests/__init__.py | 5bdbd747b63d59acf9e27cc1f37f3940355ac2b3 | [] | no_license | sridhar562345/fb_post_v2 | 0a26d661a3f335d9a9cf129c24265d7674b3fb22 | dfd150ab5521f05291f66944d7a8686a00477547 | refs/heads/master | 2022-11-08T00:32:35.752419 | 2020-06-23T15:32:02 | 2020-06-23T15:32:02 | 274,440,245 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 339 | py | # pylint: disable=wrong-import-position
APP_NAME = "fb_post_v2"
OPERATION_NAME = "posts_with_more_postive_recations"
REQUEST_METHOD = "get"
URL_SUFFIX = "more/postive/reactions/posts/v1/"
from .test_case_01 import TestCase01PostsWithMorePostiveRecationsAPITestCase
__all__ = [
"TestCase01PostsWithMorePostiveRecationsAPITestCase"
]
| [
"="
] | = |
5a4a390b1acf15d380fc6a36f240cf181b7614db | f6078890ba792d5734d289d7a0b1d429d945a03a | /hw2/submission/babbejames/babbejames_11046_1275480_119_hw_2_jtb/119_hw2.2.py | 68334c12c326ab34413cf2530584fbe2321c2faa | [] | no_license | huazhige/EART119_Lab | 1c3d0b986a0f59727ee4ce11ded1bc7a87f5b7c0 | 47931d6f6a2c7bc053cd15cef662eb2f2027712c | refs/heads/master | 2020-05-04T23:40:53.709217 | 2019-06-11T18:30:45 | 2019-06-11T18:30:45 | 179,552,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,943 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: jtbabbe
"""
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.basemap import Basemap
#------------my modules-----------------------
import seis_utils
#--------------------------0---------------------------------------------
# params, dirs, files, equations
#------------------------------------------------------------------------
file_eq = 'seism_OK.txt'
file_well = 'injWell_OK.txt'
dPar = { 'nClicks' : 10,
'tmin' : 2010,
'areaOK' : 181*1e3,#in km
# -----basemap params----------------------
'xmin' : -101, 'xmax' : -94,
'ymin' : 33.5, 'ymax' : 37.1,
'projection' : 'aea',# or 'cea' 'aea' for equal area projections
}
# Decimal year eq
YR = mSeis[1]
MO = mSeis[2]
DY = mSeis[3]
HR = mSeis[4]
MN = mSeis[5]
SC = mSeis[6]
def DecYear( YR, MO, DY, HR, MN, SC):
return YR + (MO-1)/12 + (DY-1)/365.25 + HR/(365.25) + MN/(365.25*24*60)
+ SC/(365.25*24*3600)
#--------------------------1---------------------------------------------
# load data
#------------------------------------------------------------------------
# load seismicity and well data using loadtxt
mSeis = np.loadtxt( file_eq).T
#time to decimal years
aTime = DecYear( YR, MO, DY, HR, MN, SC)
mSeis = np.array( [aTime, mSeis[7], mSeis[8], mSeis[-1]])
#select most recent seismic events
sort_id = aTime.argsort()
sel = aTime(sort_id)
mSeis = mSeis.T[sel].T
mWells = np.loadtxt( file_well).T
#--------------------------2---------------------------------------------
# map view, select boundaries of seismicity
#------------------------------------------------------------------------
pat_bin = np.arange( dPar['tmin'], 2018, dPar['dt_map'])
for i in range( at_bin.shape[0]-1):
t1, t2 = at_bin[i], at_bin[i+1]
#select earthquakes after 2010
sel_eq = mSeis[0] >= 'tmin'
#select wells with start dates after tmin
sel_well = mWells[1] >= 'tmin'
# create basemap object
plt.figure(2)
plt.cla()
ax2 = plt.subplot(111)
lon_0, lat_0 = .5*( dPar['xmin']+dPar['xmax']), .5*( dPar['ymin']+dPar['ymax'])
# project into equal area system
m = Basemap(llcrnrlon = dPar['xmin'], urcrnrlon=dPar['xmax'],
llcrnrlat = dPar['ymin'], urcrnrlat=dPar['ymax'],
projection=dPar['projection'], lon_0 = lon_0, lat_0 = lat_0)
#draw state boundaries
m.drawstates(color = 'aqua')
#convert spherical to 2D coordinate system using basemap
xpt_Seis, ypt_Seis = m(mSeis[-4][sel_eq], mSeis[-3][sel_eq])
xpt_Well, ypt_Well = m(mWell[3][sel_well],mWell[4][sel_well])
#plot seismicity and well locations
plt.plot(xpt_Seis, ypt_Seis, 'ro', ms = 6, mew = 1.5, mfc = 'none', label = 'seismicity')
plt.plot(xpt_Well, ypt_Well, 'bo', ms = 6, mew = 1.5, mfc = 'none', label = 'wells')
# x and y labels
m.drawparallels( np.arange( 33, 38, 1), fmt='%i',labels=[1,0,0,0])
m.drawmeridians( np.arange(-100, -92, 2), fmt='%i',labels=[0,0,0,1])
print("Please click %i times"%( dPar['nClicks']))
tCoord = plt.ginput( dPar['nClicks'])
print("clicked", tCoord)
plt.show()
aLon = np.array( tCoord).T[0]
aLat = np.array( tCoord).T[1]
#--------------------------3---------------------------------------------
# compute affected area
#------------------------------------------------------------------------
#TODO: compute area using eis_utils.area_poly
def area_poly( aX, aY):
sumVert1 = np.dot( aX[0:-1], aY[1::])+aX[-1]*aY[0]
sumVert2 = np.dot(aY[0:-1], aX[1::])+aY[-1]*aX[0]
sum = (aX[0:-1]*aY[1::] - aY[0:-1]*aX[1::]).sum() + (aX[-1]*aY[0]-aY[-1]*aX[0])
return 0.5*abs( sumVert1 - sumVert2)
A_seis = area_poly(aLon, aLat)
print('total area affected by seismicity: ', A_seis)
print( 'fraction of area of OK', A_seis/(dPar['areaOK'])) # about 1/3
| [
"hge2@ucsc.edu"
] | hge2@ucsc.edu |
6c2600913c51f10bf6c0e7f362a0a11f1888479f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02422/s883793535.py | 8a63bbefd3dc6a9170a3efaa6447ac90fe2600e7 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | #! python3
# transformation.py
sent = input()
def print_(arr):
global sent
print(sent[int(arr[0]):int(arr[1])+1])
def reverse_(arr):
global sent
sent = sent[:int(arr[0])] + sent[int(arr[0]):int(arr[1])+1][::-1] + sent[int(arr[1])+1:]
def replace_(arr):
global sent
sent = sent[:int(arr[0])] + arr[2] + sent[int(arr[1])+1:]
ops = {'print': print_, 'reverse': reverse_, 'replace': replace_}
q = int(input())
for i in range(q):
arr = input().split()
ops[arr[0]](arr[1:])
| [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
7c00b913a6745d7e2678de5fe3526def5f55a2ef | cfd5892a220ec7702d5c416aa1821d2429480ede | /neodroidagent/common/architectures/distributional/__init__.py | ce4e44cdf60a3308c53090f5e034df2228bd23f1 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | pything/agent | 611eac80348127b951a72ca76d2ab1f5a7732775 | 71c3bc072a8f3b7c3c1d873757cf5a1dafc3302d | refs/heads/master | 2022-04-28T08:13:27.705296 | 2020-11-17T16:58:46 | 2020-11-17T16:58:46 | 150,600,674 | 0 | 0 | Apache-2.0 | 2019-01-10T11:01:17 | 2018-09-27T14:31:45 | Python | UTF-8 | Python | false | false | 151 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = "Christian Heider Nielsen"
__doc__ = ""
from .categorical import *
from .normal import *
| [
"mrnaah@gmail.com"
] | mrnaah@gmail.com |
975d95103dc51fff26bbb0543f1d8172c841ff9b | ced81611f03554989bd338ac32b08fd393ac424a | /src/mlog_kafka.py | c274ec8cdc5a6f462590f13eed0f9a27fa910e32 | [] | no_license | jack139/mlog | c20a8777c326014e7e319e4f80e9408a57ed9190 | fcedd1d22bd4043e614d19b49735d83d0ca538cc | refs/heads/master | 2023-03-17T09:46:49.884896 | 2020-05-12T10:21:15 | 2020-05-12T10:21:15 | 341,840,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,709 | py | # -*- coding: utf-8 -*-
# 从kafka读入数据:
#
#
import sys, os
from kafka import KafkaConsumer
import predict
out_dir = 'logs/rt'
# 根据时间日期获取 label
# 参考格式: 2020/04/17 07:09:27 [error] 26119#0: ...
def get_nginx_error_label(log, minute=60): # 按min分钟为间隔,默认是60分钟
log_split = log.split()
if len(log_split)<2: # 可能是空行
return None
date = log_split[0]
time = log_split[1].split(':')
q = int(time[1])//minute
return '%s_%s_%d'%(date, time[0], q) # 20200417_07_1 按间隔返回
# 使用log进行预测
def predict_it(log_lines, label):
struct_log, unknown_templates = predict.parse_log(log_lines)
y_test = predict.predict_IM(struct_log)
print(y_test)
if y_test[0]==1: # 出现异常,保存日志
filepath = os.path.join(out_dir, 'anomaly_'+label+'.log')
with open(filepath, 'w') as f:
f.write(''.join(log_lines))
if len(unknown_templates)>0:
f.write('\nUNKNOWN templates:\n')
f.write(''.join(unknown_templates))
print('-------------------->>>> ANOMALY detected:', filepath)
sys.stdout.flush()
if __name__ == '__main__':
if len(sys.argv)>1:
out_dir = sys.argv[1] # 第一个参数:异常日志保存的路径
current_label = None
log_lines = []
last_dt = ''
consumer = KafkaConsumer('mlog', bootstrap_servers=['localhost:9092'])
for message in consumer:
line = message.value.decode('utf-8')
if len(line)==0:
continue
if (len(line.split('\n\r'))>1): # 检查是否存在一次多行,按说不应该
print('WARNING: more than one line!')
print(line.split('\n\r'))
# 预处理wechat-manger 的 Java 日志
if line[0] == '\t': # java异常的中间内容,忽略
continue
if line[0].isdigit(): # 正常格式日志
l2 = line.split()
last_dt = l2[0]+' '+l2[1]
else: # java异常,首行
if last_dt=='': # 没有时间记录,跳过
continue
else:
line = last_dt + ' [-] ERROR ' + line
label = get_nginx_error_label(line, predict.period)
if label is None:
continue
if label != current_label:
# 生成一个日志集合,开始预测计算
if len(log_lines)>0:
predict_it(log_lines, current_label)
current_label = label
log_lines = []
print(current_label)
log_lines.append(line)
# 结束
if len(log_lines)>0:
predict_it(log_lines, current_label)
| [
"jack139@gmail.com"
] | jack139@gmail.com |
b7452f3daa64a23ec2b6a144ea7b499a2a56416e | ab7c6042f69a921095ac036bd6919a81255847b7 | /pipeline/article_processing/controller.py | c568918eb7861d4d2c38454cd50b4698cb7dc070 | [] | no_license | EST-Team-Adam/TheReadingMachine | 49eb768d08ec0e266d076f113933a04b4bf66674 | 25c217602b3909410d9a2dc6189e3de584146a1b | refs/heads/master | 2022-12-12T20:09:46.945526 | 2019-10-21T20:27:25 | 2019-10-21T20:27:25 | 70,123,521 | 1 | 2 | null | 2022-11-21T21:26:36 | 2016-10-06T04:46:20 | HTML | UTF-8 | Python | false | false | 7,906 | py | from __future__ import division
import itertools
import pandas as pd
import string
from nltk.corpus import stopwords
from nltk.tokenize import RegexpTokenizer
from nltk.stem import SnowballStemmer
from nltk import pos_tag
from datetime import datetime
# Manual invalid title and link
maintenance_title = ['Reduced service at Agrimoney.com',
'Apology to Agrimoney.com subscribers']
irrelevant_link = ['https://www.euractiv.com/topics/news/?type_filter=video',
'http://www.euractiv.com/topics/news/?type_filter=video',
'http://www.euractiv.com/topics/news/?type_filter=news',
'http://www.euractiv.com/topics/news/?type_filter=all',
'https://www.euractiv.com/topics/news/?type_filter=all',
'https://www.euractiv.com/topics/news/',
'https://www.euractiv.com/topics/news/?type_filter=news',
'http://www.euractiv.com/topics/news/',
'https://www.euractiv.com/news/',
'http://www.euractiv.com/news/']
def scraper_post_processing(raw_articles, model_start_date, id_col='id',
article_col='article', title_col='title',
link_col='link', date_col='date'):
'''Perform post processing of articles scrapped by the scrapper.
There have been a few issues identified regarding the
scraper. Certain issues are either impossible or difficult to
eliminate with the scrapy implementation. Thus, we post process
the data to resolve these known issues.
'''
# If an ID has already been created, then we drop it.
if id_col in raw_articles.columns:
raw_articles = raw_articles.drop(id_col, 1)
# Drop duplciates based on article content
processed_articles = (raw_articles
.drop_duplicates(subset=article_col))
# Remvoe entries that are associated with maintenance or service.
processed_articles = processed_articles[~processed_articles[title_col].isin(
maintenance_title)]
# Remoe links that are not associated with news articles.
processed_articles = processed_articles[~processed_articles[link_col].isin(
irrelevant_link)]
# Subset the data only after the model_start_date
processed_articles = processed_articles[processed_articles[date_col]
> model_start_date]
return processed_articles
def text_processor(text, remove_captalisation=True, remove_noun=False,
remove_numerical=True, remove_punctuation=True,
stem=False, tokenizer=None):
'''The function process the texts with the intention for topic
modelling.
The following steps are performed:
1. Tokenise
2. Prune words
3. Removal of stopwords
Details:
The regular expression tokeniser is used as we are interested just
on the key words, punctuation is irrelevant. Numerical and
captalisation removal can be specified as a parameter. Stop words
and certain manually coded phrases are also removed.
NOTE(Michael): The remove_noun is currently inactive. Further
investigation is required for the implementation.
'''
# Tokenize
if tokenizer is None:
tokenizer = RegexpTokenizer(r'\w+')
tokenized_text = tokenizer.tokenize(text)
else:
tokenized_text = tokenizer(text)
if remove_punctuation:
punct = string.punctuation
tokenized_text = [t for t in tokenized_text if t not in punct]
# This step is extremely computational expensive. The benchmark
# shows it would increase the total time by 12 times.
if remove_noun:
noun_set = set(['NNP', 'NNPS'])
tokenized_text = [w for w, t in pos_tag(tokenized_text)
if t not in noun_set]
# Stemming
if stem:
stemmer = SnowballStemmer('english')
tokenized_text = [stemmer.stem(word) for word in tokenized_text]
# This option is available as certain capital word has intrinsic
# meaning. e.g. Apple vs apple.
if remove_captalisation:
tokenized_text = [word.lower() for word in tokenized_text]
if remove_numerical:
tokenized_text = [word for word in tokenized_text
if not word.isdigit()]
# Remove stopwords and manual exlusion set
meaningless_words = ['euractiv', 'com',
'bloomberg', 'reuters', 'jpg', 'png']
exclusion_words = stopwords.words('english') + meaningless_words
nonstopword_text = [word
for word in tokenized_text
if word.lower() not in exclusion_words]
return nonstopword_text
def article_summariser(article_list):
'''Function to summarise the processing of the article text.
The purpose of this summary is to identify any significant changes
to the text extraction and processing.
'''
article_count = len(article_list)
vocab_size = len(set(itertools.chain.from_iterable(article_list)))
article_length = [len(t) for t in article_list]
article_vocab_size = [len(set(t)) for t in article_list]
lexical_diversity = [vs / l if l != 0 else 0
for l, vs in zip(article_length, article_vocab_size)]
average_lexical_diversity = sum(lexical_diversity) / len(lexical_diversity)
average_article_length = sum(article_length) / len(article_length)
# TODO (Michael): Should also save the data sources.
summary = {'createTime': datetime.utcnow(),
'article_count': article_count,
'vocab_size': vocab_size,
'average_lexical_diversity': average_lexical_diversity,
'average_article_length': average_article_length}
return pd.DataFrame(summary, index=[0])
def text_preprocessing(article_df, article_col, min_length,
remove_captalisation=True, remove_noun=False,
remove_numerical=True, remove_punctuation=True,
stem=False, date_col='date'):
'''Process the text extracted from the scrapper.
In addition, articles with tokens less than the min_length
specified will be dropped. This is because certain articles were
extracted incorrectly or contains insufficient information, thus
they are removed to avoid contamination of the output.
'''
# Tokenise and process the text
tokenised_text = [text_processor(a,
remove_captalisation=remove_captalisation,
remove_noun=remove_noun,
remove_numerical=remove_numerical,
remove_punctuation=remove_punctuation,
stem=stem)
for a in article_df[article_col]]
# Find the index of entries where the article length is less than
# the specified length. The entries are then removed from the
# article and the original data frame.
min_length_ind = [i for i, t in enumerate(tokenised_text)
if len(t) > min_length]
min_length_tokens = [tokenised_text[i] for i in min_length_ind]
exclude_min_length_df = article_df.iloc[min_length_ind, ].copy()
# Create the summary
summary = article_summariser(min_length_tokens)
# Concatenate the text together. This step is to enable the result
# to be saved in to a standard database.
exclude_min_length_df[article_col] = [' '.join(tt)
for tt in min_length_tokens]
# Recreate the index
exclude_min_length_df.sort_values([date_col], ascending=[1], inplace=True)
exclude_min_length_df['id'] = range(1, exclude_min_length_df.shape[0] + 1)
return exclude_min_length_df, summary
| [
"mkao006@gmail.com"
] | mkao006@gmail.com |
89f25f38fdef1463139ed5b10736c753d7dc5d5a | 59636b143a2ab189145b17a7ea9a38de5af1f7a5 | /All/selenium_all/guanjiaPlus/zonghe_test_object.py | d8dfaeeffee6ccfdb47fb72792cb45fdb42eb84d | [] | no_license | woshichenya/hezi | 880a70c34cc61b6b6bcf1ccb65fa54989595fb71 | 4211ff8ef78f5d15d8fc8065247f916dfe9d305d | refs/heads/master | 2020-04-28T21:46:02.664025 | 2019-05-14T08:47:33 | 2019-05-14T08:47:33 | 175,593,966 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,673 | py | import guanjiaPlus.xiaochengxuguanliyemian
import traceback
import time
from selenium.webdriver.support.ui import Select
'''调用进入小程序管理页面的脚本'''
go=guanjiaPlus.xiaochengxuguanliyemian.go
Go=guanjiaPlus.xiaochengxuguanliyemian.Go
GO=guanjiaPlus.xiaochengxuguanliyemian.GO
bug_num=0
def dianpu():
go.Ctext("店铺","店铺超链接","进入店铺超链接","Bug--无法进入店铺超链接")
#幻灯片操作
go.Ctext("幻灯片","幻灯片超链接","进入幻灯片超链接","Bug--无法进入幻灯片超链接")
go.Ctext("添加幻灯片", "添加幻灯片超链接", "进入添加幻灯片超链接", "Bug--无法进入添加幻灯片超链接")
go.Ctext("返回列表","返回列表按钮","点击返回列表按钮","Bug--无法点击返回列表按钮")
# 导航图标
go.Ctext("导航图标", "导航图标超链接", "进入导航图标超链接", "Bug--无法进入导航图标超链接")
go.Ctext("添加首页导航", "添加首页导航超链接", "点击添加首页导航超链接", "Bug--无法点击添加首页导航超链接")
go.CTag_name_zidingyi("input","value","返回列表","返回列表按钮", "点击返回列表按钮", "Bug--无法点击返回列表按钮")
# 广告
go.Ctext("广告", "广告超链接", "进入广告超链接", "Bug--无法进入广告超链接")
go.Ctext("添加广告", "添加广告超链接", "点击添加广告超链接", "Bug--无法点击添加广告超链接")
go.CTag_name_zidingyi("input", "value", "返回列表", "返回列表按钮", "点击返回列表按钮", "Bug--无法点击返回列表按钮")
#魔方推荐
go.Ctext("魔方推荐", "魔方推荐超链接", "点击魔方推荐超链接", "Bug--无法点击魔方推荐超链接")
go.Cxpath("/html/body/div[6]/div[2]/form/table/tfoot/tr/td/button","添加魔方按钮", "点击添加魔方按钮", "Bug--无法点击添加魔方按钮")
#商品推荐
go.Ctext("商品推荐","商品推荐超链接","点击商品超链接按钮","Bug--无法点击商品按钮")
#排版设置
go.Ctext("排版设置", "排版设置超链接", "点击排版设置按钮", "Bug--无法点击排版设置按钮")
#商城
go.C_class_text("商城超链接","div",'menu-header ',"商城", "点击商城按钮", "Bug--无法点击商城按钮")
go.Ctext("公告管理", "公告管理超链接", "点击公告管理按钮", "Bug--无法点击公告管理按钮")
go.Ctext("添加公告", "添加公告超链接", "点击添加公告超链接", "Bug--无法点击添加公告超链接")
go.CTag_name_zidingyi("input", "value", "返回列表", "返回列表按钮", "点击返回列表按钮", "Bug--无法点击返回列表按钮")
go.Ctext("评价管理", "评价管理超链接", "点击评价管理按钮", "Bug--无法点击评价管理按钮")
go.Ctext("添加虚拟评论", "添加虚拟评论超链接", "点击添加虚拟评论超链接", "Bug--无法点击添加虚拟评论超链接")
go.CTag_name_zidingyi("input", "value", "返回列表", "返回列表按钮", "点击返回列表按钮", "Bug--无法点击返回列表按钮")
go.Ctext("退货地址", "退货地址超链接", "点击退货地址按钮", "Bug--无法点击退货地址按钮")
go.Ctext("添加退货地址", "添加退货地址超链接", "点击添加退货地址超链接", "Bug--无法点击添加退货地址超链接")
go.CTag_name_zidingyi("input", "value", "返回列表", "返回列表按钮", "点击返回列表按钮", "Bug--无法点击返回列表按钮")
#配送方式
#go.Ctext("配送方式", "配送方式超链接", "点击配送方式按钮", "Bug--无法点击配送方式按钮")
go.C_class_text("配送方式超链接", "div", 'menu-header ', "配送方式", "点击配送方式按钮", "Bug--无法点击配送方式按钮")
go.Ctext("普通快递", "普通快递超链接", "点击普通快递按钮", "Bug--无法点击普通快递按钮")
go.Ctext("添加配送方式", "添加配送方式超链接", "点击添加配送方式超链接", "Bug--无法点击添加配送方式超链接")
go.CTag_name_zidingyi("input", "value", "返回列表", "返回列表按钮", "点击返回列表按钮", "Bug--无法点击返回列表按钮")
#店铺装修
go.Ctext("店铺装修", "店铺装修超链接", "点击店铺装修按钮", "Bug--无法点击店铺装修按钮")
try:
dianpu()
print("***************************************************测试通过")
except:
print("***************************************************会员测试过程之中有Bug")
bug_num += 1 | [
"bj_xiaoya@163.com"
] | bj_xiaoya@163.com |
1e12cfdc813dbe8b0998b82ee3627f8c4a7712ae | 0adb68bbf576340c8ba1d9d3c07320ab3bfdb95e | /regexlib/2021-5-15/python_re2_test_file/regexlib_3439.py | 343711792907104d76d4f43c1009d2d954ef90b2 | [
"MIT"
] | permissive | agentjacker/ReDoS-Benchmarks | c7d6633a3b77d9e29e0ee2db98d5dfb60cde91c6 | f5b5094d835649e957bf3fec6b8bd4f6efdb35fc | refs/heads/main | 2023-05-10T13:57:48.491045 | 2021-05-21T11:19:39 | 2021-05-21T11:19:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 533 | py | # 3439
# [\w*|\W*]*<[[\w*|\W*]*|/[\w*|\W*]]>[\w*|\W*]*
# POLYNOMIAL
# nums:5
# POLYNOMIAL AttackString:""+"<"*5000+"@1 _SLQ_2"
import re2 as re
from time import perf_counter
regex = """[\w*|\W*]*<[[\w*|\W*]*|/[\w*|\W*]]>[\w*|\W*]*"""
REGEX = re.compile(regex)
for i in range(0, 150000):
ATTACK = "" + "<" * i * 10000 + "@1 _SLQ_2"
LEN = len(ATTACK)
BEGIN = perf_counter()
m = REGEX.search(ATTACK)
# m = REGEX.match(ATTACK)
DURATION = perf_counter() - BEGIN
print(f"{i *10000}: took {DURATION} seconds!") | [
"liyt@ios.ac.cn"
] | liyt@ios.ac.cn |
718a8d60b16760ec352731a29ee43b53d90b448c | 1dd72195bc08460df7e5bb82d3b7bac7a6673f49 | /api/app/wildfire_one/query_builders.py | 8a69476dd8130f4f63a78b313c9e2d522773494d | [
"Apache-2.0",
"MIT"
] | permissive | bcgov/wps | c4347c39cadfad6711502d47776abc8d03895593 | 0ba707b0eddc280240964efa481988df92046e6a | refs/heads/main | 2023-08-19T00:56:39.286460 | 2023-08-16T18:03:06 | 2023-08-16T18:03:06 | 235,861,506 | 35 | 9 | Apache-2.0 | 2023-09-11T21:35:07 | 2020-01-23T18:42:10 | Python | UTF-8 | Python | false | false | 5,089 | py | """ Query builder classes for making requests to WFWX API """
from typing import List, Tuple
from abc import abstractmethod, ABC
from app import config
class BuildQuery(ABC):
""" Base class for building query urls and params """
def __init__(self):
""" Initialize object """
self.max_page_size = config.get('WFWX_MAX_PAGE_SIZE', 1000)
self.base_url = config.get('WFWX_BASE_URL')
@abstractmethod
def query(self, page) -> Tuple[str, dict]:
""" Return query url and params """
class BuildQueryStations(BuildQuery):
""" Class for building a url and RSQL params to request all active stations. """
def __init__(self):
""" Prepare filtering on active, test and project stations. """
super().__init__()
self.param_query = None
# In conversation with Dana Hicks, on Apr 20, 2021 - Dana said to show active, test and project.
for status in ('ACTIVE', 'TEST', 'PROJECT'):
if self.param_query:
self.param_query += f',stationStatus.id=="{status}"'
else:
self.param_query = f'stationStatus.id=="{status}"'
def query(self, page) -> Tuple[str, dict]:
""" Return query url and params with rsql query for all weather stations marked active. """
params = {'size': self.max_page_size, 'sort': 'displayLabel',
'page': page, 'query': self.param_query}
url = f'{self.base_url}/v1/stations'
return url, params
class BuildQueryByStationCode(BuildQuery):
""" Class for building a url and params to request a list of stations by code """
def __init__(self, station_codes: List[int]):
""" Initialize object """
super().__init__()
self.querystring = ''
for code in station_codes:
if len(self.querystring) > 0:
self.querystring += ' or '
self.querystring += f'stationCode=={code}'
def query(self, page) -> Tuple[str, dict]:
""" Return query url and params for a list of stations """
params = {'size': self.max_page_size,
'sort': 'displayLabel', 'page': page, 'query': self.querystring}
url = f'{self.base_url}/v1/stations/rsql'
return url, params
class BuildQueryAllHourliesByRange(BuildQuery):
""" Builds query for requesting all hourlies in a time range"""
def __init__(self, start_timestamp: int, end_timestamp: int):
""" Initialize object """
super().__init__()
self.querystring: str = "weatherTimestamp >=" + \
str(start_timestamp) + ";" + "weatherTimestamp <" + str(end_timestamp)
def query(self, page) -> Tuple[str, dict]:
""" Return query url for hourlies between start_timestamp, end_timestamp"""
params = {'size': self.max_page_size, 'page': page, 'query': self.querystring}
url = f'{self.base_url}/v1/hourlies/rsql'
return url, params
class BuildQueryAllForecastsByAfterStart(BuildQuery):
""" Builds query for requesting all dailies in a time range"""
def __init__(self, start_timestamp: int):
""" Initialize object """
super().__init__()
self.querystring = f"weatherTimestamp >={start_timestamp};recordType.id == 'FORECAST'"
def query(self, page) -> Tuple[str, dict]:
""" Return query url for dailies between start_timestamp, end_timestamp"""
params = {'size': self.max_page_size, 'page': page, 'query': self.querystring}
url = f'{self.base_url}/v1/dailies/rsql'
return url, params
class BuildQueryDailiesByStationCode(BuildQuery):
""" Builds query for requesting dailies in a time range for the station codes"""
def __init__(self, start_timestamp: int, end_timestamp: int, station_ids: List[str]):
""" Initialize object """
super().__init__()
self.start_timestamp = start_timestamp
self.end_timestamp = end_timestamp
self.station_ids = station_ids
def query(self, page) -> Tuple[str, dict]:
""" Return query url for dailies between start_timestamp, end_timestamp"""
params = {'size': self.max_page_size,
'page': page,
'startingTimestamp': self.start_timestamp,
'endingTimestamp': self.end_timestamp,
'stationIds': self.station_ids}
url = (f'{self.base_url}/v1/dailies/search/findDailiesByStationIdIsInAndWeather' +
'TimestampBetweenOrderByStationIdAscWeatherTimestampAsc')
return url, params
class BuildQueryStationGroups(BuildQuery):
""" Builds a query for requesting all station groups """
def __init__(self):
""" Initilize object. """
super().__init__()
self.param_query = None
def query(self, page) -> Tuple[str, dict]:
""" Return query url and params with query for all weather stations groups. """
params = {'size': self.max_page_size, 'page': page, 'sort': 'groupOwnerUserId,asc'}
url = f'{self.base_url}/v1/stationGroups'
return url, params
| [
"noreply@github.com"
] | bcgov.noreply@github.com |
cda579e2676fc52ddc8593410c78e43762839efd | 53d0b80b64d201def809ef11acbeca38da2c1574 | /hw_proyecciones/migrations/0003_drilldown.py | 80145377e2f3e6f8f620aebac2f51441c52f7df9 | [] | no_license | NOKIA-NI/niproyecciones | 068558b27afd26bc2eb6ab9c32f98a37742817ce | 90c5829250643443f90ae4cbb9b234464a2fcaef | refs/heads/master | 2022-12-11T18:22:32.928214 | 2018-10-25T14:52:52 | 2018-10-25T14:52:52 | 127,053,712 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,609 | py | # Generated by Django 2.0.9 on 2018-10-25 14:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hw_proyecciones', '0002_hwcontrolrfe_hwsitelist'),
]
operations = [
migrations.CreateModel(
name='DrillDown',
fields=[
('id_drill_down_d1', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Site_Name', models.CharField(blank=True, max_length=255, null=True)),
('Implemented', models.CharField(blank=True, max_length=255, null=True)),
('TSS', models.CharField(blank=True, max_length=255, null=True)),
('RFIC', models.IntegerField(blank=True, null=True)),
('FC_RFIC', models.IntegerField(blank=True, null=True)),
('CPO_Status1', models.CharField(blank=True, max_length=255, null=True)),
('CPO_Status2', models.CharField(blank=True, max_length=255, null=True)),
('HW_Status', models.CharField(blank=True, max_length=255, null=True)),
('FC_HW', models.IntegerField(blank=True, null=True)),
('Status_Despachos', models.CharField(blank=True, max_length=255, null=True)),
('FC_Antenas', models.IntegerField(blank=True, null=True)),
],
options={
'verbose_name': 'DrillDown',
'verbose_name_plural': 'DrillDown',
'db_table': 'DrillDown_D1',
'managed': False,
},
),
]
| [
"jucebridu@gmail.com"
] | jucebridu@gmail.com |
da137d50e78fd365f0c5dc7bb54e588af907cded | c48570083578f2ad5b10d397889c452a69091582 | /sge-python/practica05/ej8.py | ecaad2fbf012935615a6b80628281ab05a3fbc1b | [] | no_license | RamonMR95/sge-python | c9471fcf3e33eeda540982a8f947971c94e8254c | 895e3a7012f62518c4c1f61717f18f9d493f2ad0 | refs/heads/master | 2022-04-09T07:13:26.633241 | 2020-03-01T11:18:44 | 2020-03-01T11:18:44 | 234,789,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,845 | py | # !/usr/bin/env python3
# 8.- Supón que mantenemos dos listas con igual número de elementos. Una de ellas,
# llamada alumnos, contiene una serie de nombres y la otra, llamada notas, una serie de números flotantes 0.0 y 10.0.
# En notas guardamos la calificación obtenida por los alumnos cuyos nombres están en alumnos. La nota notas[i]
# corresponde al estudiante alumnos[i]. Una posible configuración de las listas sería esta:
# alumnos = ['Ana_Pi','Pau_Lopez', 'Luis_Sol', 'Mar_Vega', 'Paz_Mir']
# notas = [10, 5.5, 2.0, 8.5, 7.0]
# De acuerdo con ella, el alumno Pau López, por ejemplo, fue calificado con un 5.5. Nos piden diseñar un procedimiento
# que recibe como datos las dos listas y una cadena con el nombre de un estudiante. Si el estudiante pertenece a la
# clase, el procedimiento imprimirá su nombre y nota en pantalla. Si no es un alumno incluido en la lista, se imprimirá
# un mensaje que lo advierta.
# Realizar las siguientes funciones:
# 1) Diseñar una función que reciba las dos listas y que devuelva el nombre de todos los estudiantes
# que aprobaron el examen
# 2) Diseñar una función que reciba la lista de notas y devuelva el número de aprobados
# 3) Diseñar una función que reciba las dos listas y devuelva el nombre de todos los estudiantes que
# obtuvieron la máxima nota.
# 4) Diseñar una función que reciba las dos listas y devuelva el nombre de todos los estudiantes cuya
# calificación es igual o superior a la calificación media.
# 5) Diseñar una función que reciba las dos listas y un nombre (una cadena); si el nombre está en la
# lista de estudiantes, devolverá su nota, si no, devolverá None.
# Haciendo uso de las funciones anteriores y diseñando nuevas funciones si es necesario. Construir un
# programa que presente el siguiente menú y permita ejecutar las acciones correspondientes a cada opción:
# 1) Añadir estudiante y calificación
# 2) Mostrar lista de estudiantes con sus calificaciones
# 3) Mostrar estudiantes aprobados
# 4) Número de aprobados
# 5) Estudiantes con máxima nota
# 6) Estudiantes con nota mayor o igual a la media
# 7) Nota estudiante
# 8) Finalizar ejecución del programa
__author__ = "Ramón Moñino Rubio"
__email__ = "ramonmr16@gmail.com"
__version__ = "1.0.0"
alumnos = ['Ana_Pi', 'Pau_Lopez', 'Luis_Sol', 'Mar_Vega', 'Paz_Mir']
notas = [10, 5.5, 2.0, 8.5, 7.0]
def mostrar_aprobados(alums, nots):
aprobados = []
for i in range(len(notas)):
if notas[i] >= 5:
aprobados.append(alumnos[i])
return aprobados
def mostrar_numero_aprobados(nots):
n_aprobados = 0
for nota in nots:
if nota >= 5:
n_aprobados += 1
return n_aprobados
def mostrar_alumnos_max_nota(alums, nots):
alumnos_max_nota = []
max_nota = max(nots)
for i in range(len(nots)):
if nots[i] == max_nota:
alumnos_max_nota.append(alumnos[i])
return alumnos_max_nota
def mostrar_alumnos_nota_sup_media(alums, nots):
alumnos_media = []
media = sum(nots) / len(nots)
for i in range(len(nots)):
if nots[i] >= media:
alumnos_media.append(alumnos[i])
return alumnos_media
def is_in_alumnos(alums, nots, nombre):
for i in range(len(alums)):
if alumnos[i] == nombre:
return nots[i]
return None
def mostrar_alumnos_calificaciones(alums, nots):
for i in range(len(alums)):
print(f"Alumno: {alums[i]}: {nots[i]}")
menu = f"1) Añadir estudiante y calificación\n" \
f"2) Mostrar lista de estudiantes con sus calificaciones\n" \
f"3) Mostrar estudiantes aprobados\n" \
f"4) Número de aprobados\n" \
f"5) Estudiantes con máxima nota\n" \
f"6) Estudiantes con nota mayor o igual a la media\n" \
f"7) Nota estudiante\n" \
f"8) Finalizar ejecución del programa\n"
opcion = int(input(menu))
while opcion != 8:
if opcion == 1:
print("Añadir estudiante: ")
nombre = input("Introduce nombre del estudiante: ")
calificacion = float(input("Introduce la nota del estudiante: "))
alumnos.append(nombre)
notas.append(calificacion)
elif opcion == 2:
mostrar_alumnos_calificaciones(alumnos, notas)
elif opcion == 3:
print(mostrar_aprobados(alumnos, notas))
elif opcion == 4:
print(mostrar_numero_aprobados(notas))
elif opcion == 5:
print(mostrar_alumnos_max_nota(alumnos, notas))
elif opcion == 6:
print(mostrar_alumnos_nota_sup_media(alumnos, notas))
elif opcion == 7:
nombre = input("Introduce el nombre del estudiante: ")
nota = is_in_alumnos(alumnos, notas, nombre)
if nota:
print(f" Alumno: {nombre}, Nota {nota}")
else:
print("El estudiante no existe en la lista")
opcion = int(input(menu))
| [
"ramonmr16@gmail.com"
] | ramonmr16@gmail.com |
47c7839a7f260182143c3fbbc27d47a4c6133d74 | d02abf740dd326f12b48357692af41282616a271 | /dataset-analysis/pyplot/simple_bar.py | 2b87ed287b5210a8c63a83d86b0d214fb17486bb | [] | no_license | jacowk/python-projects | a762c5542b7dab6cd7915b367b472a1a20d63a0a | 2e5f39868d0cdf292b4b1a792e946d169de61780 | refs/heads/master | 2021-01-05T05:18:35.550518 | 2020-09-27T08:42:11 | 2020-09-27T08:42:11 | 240,889,795 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 18:15:55 2020
@author: jaco
"""
from matplotlib import pyplot as plt
DayOfWeekOfCall = [1,2,3]
DispatchesOnThisWeekday = [77, 32, 42]
LABELS = ["Monday", "Tuesday", "Wednesday"]
plt.bar(DayOfWeekOfCall, DispatchesOnThisWeekday, align='center')
plt.xticks(DayOfWeekOfCall, LABELS)
plt.show() | [
"jaco.wk@gmail.com"
] | jaco.wk@gmail.com |
a3ae4cd9c8a7f48e4de904e34f3da61fec637a6d | f7cc8d3f04d34b7d7e64e1b54ba458e4b39bce49 | /PythonLibraries/kiwisolver/1.0.1/package.py | 187167ad925ae027309d3867621154cc3d81d323 | [
"MIT"
] | permissive | cashmerepipeline/CashmereRez | 80a53af61ddb8506bb111cd16450538c3b405689 | 13a73931d715ffac27c337abcd6df97b5c47534b | refs/heads/master | 2020-05-09T12:59:28.106229 | 2019-04-17T16:39:46 | 2019-04-17T16:39:46 | 181,132,180 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 419 | py | # -*- coding: utf-8 -*-
name = u'kiwisolver'
version = '1.0.1'
description = \
"""
kiwisolver library
"""
requires = []
variants = []
def commands():
import os
kiwisolver_libs_path = os.path.join(getenv("PYTHON_LIBS_PATH"), "kiwisolver", "%s" % version)
# env.PATH.append(os.path.join(kiwisolver_libs_path, 'lib'))
env.PYTHONPATH.append(os.path.join(kiwisolver_libs_path, 'lib'))
| [
"yes7rose@sina.com"
] | yes7rose@sina.com |
3f082db88de075c9ca3e973e5512ab9093ea3e0c | c317f6a390de255540c2fb6a2e637c20bec03762 | /final/rev-kyrkkodning/obfuscate.py | 5bb035cc6b5f9c3b84c76c559e079fe4659e3869 | [] | no_license | Kodsport/sakerhetssm-2021-solutions | a7329ef22862bcfc4c970d43ac210bbe951cf3a8 | 85bc2aa619d55139acf7c91483259088329c15e2 | refs/heads/master | 2023-05-12T00:54:24.546337 | 2021-06-07T14:12:32 | 2021-06-07T14:12:32 | 353,975,490 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 290 | py | from pathlib import Path
import re
source = Path("challeasy.py").read_text()
names = [row.split(" = ")[0] for row in source.split("\n") if " = " in row]
for i,name in enumerate(names):
source = re.sub("\\b"+name+"\\b",chr(ord("A")+i),source)
Path("challhard.py").write_text(source)
| [
"drwal.mateusz@gmail.com"
] | drwal.mateusz@gmail.com |
1fbf502a09cc2ddd28c1e76023f44e6a52f422d4 | 65dce36be9eb2078def7434455bdb41e4fc37394 | /Two Sum - Closest to target.py | 83946cbedd32144499f639c865b05e651eadea0f | [] | no_license | EvianTan/Lintcode-Leetcode | 9cf2d2f6a85c0a494382b9c347bcdb4ee0b5d21a | d12dd31e98c2bf24acc20c5634adfa950e68bd97 | refs/heads/master | 2021-01-22T08:13:55.758825 | 2017-10-20T21:46:23 | 2017-10-20T21:46:23 | 92,607,185 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,070 | py | '''
Given an array nums of n integers, find two integers in nums such that the sum is closest to a given number, target.
Return the difference between the sum of the two integers and the target.
Have you met this question in a real interview? Yes
Example
Given array nums = [-1, 2, 1, -4], and target = 4.
The minimum difference is 1. (4 - (2 + 1) = 1).
Challenge
Do it in O(nlogn) time complexity.
'''
class Solution:
"""
@param: nums: an integer array
@param: target: An integer
@return: the difference between the sum and the target
"""
import sys
def twoSumClosest(self, nums, target):
# write your code here
nums.sort()
i = 0
j = len(nums)-1
diff = sys.maxint
while i < j:
if nums[i]+nums[j] < target:
diff = min(diff, target-nums[i]-nums[j])
i += 1
elif nums[i]+nums[j] > target:
diff = min(diff, nums[i] + nums[j] - target)
j -= 1
else:
return 0
return diff | [
"yiyun.tan@uconn.edu"
] | yiyun.tan@uconn.edu |
16176f545abb87396a77a21ab271f39fd5f52f3b | 0347ed077da6f98d2740809d8582928485afc4e6 | /wechatutility/wechatReceiveMsg.py | 2640346a39717961923d2e95c0907c0cbfc19776 | [] | no_license | AhMay/wechatpublic_practice | 140c45f2ca4c7423bea15dc5b57d26d032a8a03b | b38683c5e8e6a1db078c164342fead10af78818a | refs/heads/master | 2020-11-27T23:57:20.358331 | 2019-12-23T01:15:27 | 2019-12-23T01:15:27 | 229,653,225 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,240 | py | '''微信公众号接收到的用户消息类型
https://developers.weixin.qq.com/doc/offiaccount/Message_Management/Receiving_standard_messages.html
'''
import xml.etree.ElementTree as ET
def parse_xml(web_data):
if len(web_data) == 0:
return None
xmlData = ET.fromstring(web_data)
msg_type = xmlData.find('MsgType').text
if msg_type == 'text':
return ReceiveTextMsg(xmlData)
elif msg_type == 'image':
return ReceiveImageMsg(xmlData)
elif msg_type == 'voice':
return ReceiveVoiceMsg(xmlData)
elif msg_type in ('video','shortvideo'):
return ReceiveVideoMsg(xmlData)
elif msg_type == 'location':
return ReceiveLocationMsg(xmlData)
elif msg_type == 'link':
return ReceiveLinkMsg(xmlData)
elif msg_type == 'event':
recEventObj = ReceiveEventMsg(xmlData)
if recEventObj.Event == 'LOCATION':
return ReveiveLocationEventMsg(xmlData)
return recEventObj
else:
print('不能识别的消息类型:'+ msg_type)
return None
class ReceiveMsg(object):
'''基类'''
def __init__(self,xmlData):
self.ToUserName = xmlData.find('ToUserName').text
self.FromUserName = xmlData.find('FromUserName').text
self.CreateTime = xmlData.find('CreateTime').text
self.MsgType = xmlData.find('MsgType').text
self.MsgId =''
if xmlData.find('MsgId') is not None:
self.MsgId = xmlData.find('MsgId').text
class ReceiveTextMsg(ReceiveMsg):
'''文本消息'''
def __init__(self,xmlData):
super(ReceiveTextMsg,self).__init__(xmlData)
self.Content = xmlData.find('Content').text
class ReceiveImageMsg(ReceiveMsg):
'''图片消息'''
def __init__(self,xmlData):
super(ReceiveImageMsg,self).__init__(xmlData)
self.PicUrl = xmlData.find('PicUrl').text
self.MediaId = xmlData.find('MediaId').text
class ReceiveVoiceMsg(ReceiveMsg):
'''语音消息'''
def __init__(self,xmlData):
super(ReceiveVoiceMsg,self).__init__(xmlData)
self.Format = xmlData.find('Format').text
self.MediaId = xmlData.find('MediaId').text
self.Recognition = ''
if xmlData.find('Recognition') is not None:
self.Recognition ='' if xmlData.find('Recognition').text is None else xmlData.find('Recognition').text
class ReceiveVideoMsg(ReceiveMsg):
'''视频消息和小视频消息'''
def __init__(self,xmlData):
super(ReceiveVideoMsg,self).__init__(xmlData)
self.ThumbMediaId = xmlData.find('ThumbMediaId').text
self.MediaId = xmlData.find('MediaId').text
class ReceiveLocationMsg(ReceiveMsg):
'''地理位置消息'''
def __init__(self,xmlData):
super(ReceiveLocationMsg,self).__init__(xmlData)
self.Location_X = xmlData.find('Location_X').text
self.Location_Y = xmlData.find('Location_Y').text
self.Scale = xmlData.find('Scale').text
self.Label = xmlData.find('Label').text
class ReceiveLinkMsg(ReceiveMsg):
'''链接消息'''
def __init__(self,xmlData):
super(ReceiveLinkMsg,self).__init__(xmlData)
self.Title = xmlData.find('Title').text
self.Description = xmlData.find('Description').text
self.Url = xmlData.find('Url').text
class ReceiveEventMsg(ReceiveMsg):
'''普通事件'''
def __init__(self, xmlData):
super(ReceiveEventMsg,self).__init__(xmlData)
self.Event = xmlData.find('Event').text
self.EventKey = (False,'')
if xmlData.find('EventKey') is not None:
eventkey ='' if xmlData.find('EventKey').text is None else xmlData.find('EventKey').text
self.EventKey =(True, eventkey)
self.Ticket = (False,'')
if xmlData.find('Ticket') is not None:
ticket = '' if xmlData.find('Ticket').text is None else xmlData.find('Ticket').text
self.Ticket =(True, ticket)
class ReveiveLocationEventMsg(ReceiveEventMsg):
'''上报地理位置事件'''
def __init__(self,xmlData):
super(ReveiveLocationEventMsg,self).__init__(xmlData)
self.Latitude = xmlData.find('Latitude').text
self.Longitude = xmlData.find('Longitude').text
self.Precision = xmlData.find('Precision').text
class ReceiveViewEventMsg(ReceiveEventMsg):
'''view 和小程序'''
def __init__(self, xmlData):
super(ReceiveViewEventMsg, self).__init__(xmlData)
self.MenuId = xmlData.find('MenuId').text
class ReceiveScanCodeEventMsg(ReceiveEventMsg):
'''scancode_push scancode_waitmsg'''
def __init__(self, xmlData):
super(ReceiveScanCodeEventMsg, self).__init__(xmlData)
self.ScanCodeInfo = xmlData.find('ScanCodeInfo').text
self.ScanResult = xmlData.find('ScanResult').text
class ReceivePicEventMsg(ReceiveEventMsg):
'''pic_sysphoto scancode_waitmsg pic_weixin'''
def __init__(self, xmlData):
super(ReceivePicEventMsg, self).__init__(xmlData)
self.ScanCodeInfo = xmlData.find('ScanCodeInfo').text
self.ScanResult = xmlData.find('ScanResult').text
picItems = xmlData.getiterator('PicMd5Sum')
self.SendPicsInfo = [x.text for x in picItems] | [
"meizi111082@hotmail.com"
] | meizi111082@hotmail.com |
74701596a433057380ef80ccb963ff63b98ec52e | f9b1157ac3486709f2655810b196086dc181adc2 | /backend/test_upgrade_plan_de_1173/wsgi.py | 0def1be27bb2fd809c50a6995188f5110839d443 | [] | no_license | crowdbotics-apps/test-upgrade-plan-de-1173 | 21364bb141fefbab45e1ee53283f9f298482f1f9 | fc5e4584fc8ddb571b2f76b52f4c7c9c3ee3bd25 | refs/heads/master | 2023-02-14T20:46:31.539334 | 2019-12-11T18:29:30 | 2019-12-11T18:29:30 | 227,433,239 | 0 | 0 | null | 2023-01-24T00:56:00 | 2019-12-11T18:28:03 | JavaScript | UTF-8 | Python | false | false | 427 | py | """
WSGI config for test_upgrade_plan_de_1173 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_upgrade_plan_de_1173.settings')
application = get_wsgi_application()
| [
"team@crowdbotics.com"
] | team@crowdbotics.com |
023e8cfd0b3aeb66d815ceac84e680bcb62c4bca | 9137e1ccf070b3f9d92d8635662c569639910ae5 | /apps/modules/setting/apis/session_set.py | 07590e1711af07d77f4c7d0d5f050da905e11017 | [
"BSD-2-Clause"
] | permissive | zhangning123798/osroom | 08e87a4c32e9d92807a66109e7074723279179cc | 21859b77b8980ccb8a5392f02c76bd552b2bf653 | refs/heads/master | 2020-05-26T10:00:38.658768 | 2019-05-23T08:48:03 | 2019-05-23T08:48:03 | 188,195,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | # -*-coding:utf-8-*-
from flask import request
from apps.core.blueprint import api
from apps.core.flask.permission import permission_required
from apps.core.flask.response import response_format
from apps.modules.setting.process.session_set import language_set
__author__ = "Allen Woo"
@api.route('/session/language-set', methods=['PUT'])
@permission_required(use_default=False)
def api_language_set():
"""
PUT :
修改当前语言
language:<str>, 如en_US, zh_CN
:return:
"""
data = language_set()
return response_format(data)
| [
"624393877@qq.com"
] | 624393877@qq.com |
49eaa8cbc43819fdcce800289fb77bb25c70faaa | bf11991193cd09d5d95e2706ed4168c36221f582 | /HW1/q2.py | e8aa57704cc17595e677bc46cfc4b7a1d61f2ed9 | [] | no_license | NightKirie/COMPUTER-VISION-AND-DEEP-LEARNING_2020 | f3569bf06cb824aace5fa3c3b01c26909b7b1a68 | 8b770c12bdc1e149c12c424d0637ab8f35df0370 | refs/heads/main | 2023-02-05T07:45:03.399847 | 2020-12-18T12:51:10 | 2020-12-18T12:51:10 | 304,540,542 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,464 | py | import cv2
import numpy as np
WIDTH = 11
HEIGHT = 8
def draw(img, corners, imgpts):
img = cv2.line(img, tuple(imgpts[0].ravel()), tuple(imgpts[1].ravel()), (0,0,255), 5)
img = cv2.line(img, tuple(imgpts[0].ravel()), tuple(imgpts[2].ravel()), (0,0,255), 5)
img = cv2.line(img, tuple(imgpts[0].ravel()), tuple(imgpts[3].ravel()), (0,0,255), 5)
img = cv2.line(img, tuple(imgpts[1].ravel()), tuple(imgpts[2].ravel()), (0,0,255), 5)
img = cv2.line(img, tuple(imgpts[1].ravel()), tuple(imgpts[3].ravel()), (0,0,255), 5)
img = cv2.line(img, tuple(imgpts[2].ravel()), tuple(imgpts[3].ravel()), (0,0,255), 5)
return img
def augmentedReality():
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
obj_point_list = []
img_point_list = []
img_list = []
objp = np.zeros((WIDTH*HEIGHT,3), np.float32)
objp[:,:2] = np.mgrid[0:WIDTH,0:HEIGHT].T.reshape(-1,2)
axis = np.float32([[3, 3, -3], [1, 1, 0], [3, 5, 0], [5, 1, 0]]).reshape(-1,3)
for i in range(1, 6):
img = cv2.imread(f"Q1_image/{i}.bmp")
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray_img, (WIDTH, HEIGHT), None)
if ret == True:
obj_point_list.append(objp)
img_point_list.append(corners)
ret, mtx_p, dist, rvecs, tvecs = cv2.calibrateCamera(obj_point_list, img_point_list, gray_img.shape[::-1], None, None)
for i in range(1, 6):
img = cv2.imread(f"Q2_image/{i}.bmp")
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray_img, (WIDTH, HEIGHT), None)
if ret == True:
corners2 = cv2.cornerSubPix(gray_img,corners,(11,11),(-1,-1),criteria)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera([objp], [corners], gray_img.shape[::-1], None, None)
# Find the rotation and translation vectors.
ret, rvecs, tvecs = cv2.solvePnP(objp, corners2, mtx_p, dist)
# project 3D points to image plane
imgpts, jac = cv2.projectPoints(axis, rvecs, tvecs, mtx_p, dist)
img = draw(img,corners2,imgpts)
img = cv2.resize(img, (720, 720))
img_list.append(img)
for i in range(0, 5):
cv2.imshow('img',img_list[i])
cv2.waitKey(500)
if i == 4:
cv2.destroyAllWindows() | [
"qwer55113322@gmail.com"
] | qwer55113322@gmail.com |
da9b60fd9c51d7ee28d89f0b14483faf7101b364 | 423670088b9795d645cacc760dc9d9b0df0a2b34 | /vlttng/conf_template.py | fa86571506a859086343dda2d80b64c24c00f653 | [
"MIT"
] | permissive | eepp/vlttng | 4711ecadec7249795190566809a51f589b051bcd | 27c775db01ba13f27cb77bd5e6eca022c0db42bc | refs/heads/master | 2022-10-06T02:31:23.733577 | 2022-09-14T18:59:32 | 2022-09-14T18:59:32 | 59,193,437 | 9 | 4 | MIT | 2021-05-13T16:33:16 | 2016-05-19T09:38:00 | Python | UTF-8 | Python | false | false | 2,563 | py | # The MIT License (MIT)
#
# Copyright (c) 2016 Philippe Proulx <eepp.ca>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
conf_template = '''#!/usr/bin/env bash
# The MIT License (MIT)
#
# Copyright (c) 2016 Philippe Proulx <eepp.ca>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Run this file from your shell to configure the {name} project
# within this virtual environment.
# Make sure we're in the right current working directory
cd {src_path}
# Set the original build-time environment
{exports}
# Configure {name}
{conf_lines}
'''
| [
"eeppeliteloop@gmail.com"
] | eeppeliteloop@gmail.com |
7df1adc8cd446b6046a1e4172ae5851a82b39653 | ec77edd3a7db89f8b12202fe6ecc21ce2897bce0 | /examples/digits_basic.py | 06b871b0b71a2b192d0751a0f558f4853ad66f3e | [] | no_license | lantunes/wehd | db2a3ea9edd4b9b012f1bb8e56d6f7d331f386e6 | c13f9fb3b417d800fab09ab6f724c350c2f4d8cc | refs/heads/main | 2023-07-12T06:07:00.410568 | 2021-08-16T14:34:05 | 2021-08-16T14:34:05 | 395,657,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,764 | py | import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from scipy.stats import mode
from sklearn import datasets
from sklearn.manifold import TSNE
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.metrics.cluster import rand_score
from sklearn.preprocessing import MinMaxScaler
from sklearn_extra.cluster import KMedoids
import warnings
warnings.filterwarnings("ignore")
if __name__ == '__main__':
digits = datasets.load_digits()
X = digits.data
y = digits.target
scaler = MinMaxScaler().fit(X)
X = scaler.transform(X)
kmedoids = KMedoids(n_clusters=10, metric="euclidean").fit(X)
labels = kmedoids.labels_
print("Rand Index of K-medoids classifier: %s" % rand_score(y, labels))
# re-map the cluster labels so that they match class labels
labels_remapped = np.zeros_like(labels)
for i in range(10):
mask = (labels == i)
labels_remapped[mask] = mode(y[mask])[0]
print("accuracy score: %s" % accuracy_score(y, labels_remapped))
print("confusion matrix: \n%s" % confusion_matrix(y, labels_remapped))
tsne = TSNE(n_components=2, verbose=1, perplexity=50, n_iter=500, learning_rate=10, metric="euclidean")
result = tsne.fit_transform(X)
norm = matplotlib.colors.Normalize(vmin=0, vmax=10, clip=True)
mapper = cm.ScalarMappable(norm=norm, cmap=cm.Accent)
colors = [mapper.to_rgba(label) for label in labels]
fig = plt.figure()
plt.scatter(result[:, 0], result[:, 1], c=colors, marker="o", edgecolors="black", picker=True)
def onpick(event):
ind = event.ind
print()
for i in ind:
print(y[i])
fig.canvas.mpl_connect("pick_event", onpick)
plt.show()
| [
"lantunes@gmail.com"
] | lantunes@gmail.com |
f97b906b4a697556ced6a92198111804e4fcb722 | 1e53216c58f3c7843031721305590b83dbaed3f2 | /week_five/log_and_reg/log_and_reg/settings.py | c6158a025afa487c688f6c615e4526c97c9271a4 | [] | no_license | MTaylorfullStack/python_july_20 | 991852ba12d6f06d6b93b8efc60b66ee311b5cb3 | bdfb0d9a74300f2d6743ac2d108571692ca43ad9 | refs/heads/master | 2022-12-12T18:03:00.886048 | 2020-08-27T23:53:31 | 2020-08-27T23:53:31 | 277,956,745 | 2 | 2 | null | 2023-06-30T20:06:11 | 2020-07-08T01:09:34 | Python | UTF-8 | Python | false | false | 3,118 | py | """
Django settings for log_and_reg project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'o7qi48o7r49tj(i7mj245h==i2=@cv_n^0wyck!&f9*1nu&*z7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'logregapp',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'log_and_reg.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'log_and_reg.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"mtaylor@codingdojo.com"
] | mtaylor@codingdojo.com |
bb938cd5a049722984a0234f69971204ca9c7af4 | 4310871d6d0ad59332360a0d6e980e0a41d25fdd | /comment/serializers.py | a79978c15226c0e45d99ee2139bc53813bc6fc26 | [] | no_license | jacegem/drf-test | 21bf7eaa47aeb045c31d2ba8239e587804c27a86 | 00105b6ecd38b68c58f2e5b498e0fb7ad2de099d | refs/heads/master | 2022-12-12T13:18:02.168914 | 2020-08-04T09:08:10 | 2020-08-04T09:08:10 | 146,073,595 | 0 | 0 | null | 2022-12-08T09:34:16 | 2018-08-25T07:01:35 | Python | UTF-8 | Python | false | false | 201 | py | from rest_framework import serializers
from .models import Comment
class CommentSerializer(serializers.ModelSerializer):
class Meta:
model = Comment
fields = '__all__'
| [
"jacegem@gmail.com"
] | jacegem@gmail.com |
36801ff15a033f2b405073e33139f9994ac55639 | 98879590858368d5c32c389db31b761e479a0ab8 | /python-features/iterators.py | fc8bf99d47289b0d870921c1830127ecb4664be9 | [] | no_license | zhiruchen/get_hands_dirty | 0bbf3719113dcf474baae571ecd55e5c234072a3 | af98a11bbeb8183428fe41cb7c9fa9a2354983e9 | refs/heads/master | 2020-04-17T12:00:44.275247 | 2017-06-24T16:28:43 | 2017-06-24T16:28:43 | 66,988,195 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,966 | py | # -*- encoding: utf-8 -*-
"""
迭代器
http://anandology.com/python-practice-book/iterators.html
http://nvie.com/posts/iterators-vs-generators/
迭代器一定是可迭代的,
"""
from collections import Iterable, Iterator
class yrange(object):
"""迭代器,yrange的对象既是可迭代的也是迭代器"""
def __init__(self, n):
self.n = n
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i < self.n:
i = self.i
self.i += 1
return i
else:
raise StopIteration()
class zrange(object):
"""zrange的实例仅是可迭代的"""
def __init__(self, n):
self.n = n
def __iter__(self):
return zrange_iter(self.n)
class zrange_iter(object):
def __init__(self, n):
self.n = n
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i < self.n:
i = self.i
self.i += 1
return i
else:
raise StopIteration()
class reverse_iter(object):
"""反向迭代器"""
def __init__(self, lst):
self.lst = lst
self.start = -1
self.end = 0 - len(lst)
def __iter__(self):
return self
def next(self):
if self.start >= self.end:
index = self.start
self.start -= 1
return self.lst[index]
raise StopIteration()
def test_yrange():
assert list(yrange(4)) == [0, 1, 2, 3]
assert sum(yrange(4)) == 6
assert isinstance(yrange(3), Iterable) is True
assert isinstance(yrange(3), Iterator) is True
def test_zrange():
z = zrange(4)
z_list1 = list(z)
z_list2 = list(z)
assert z_list1 == [0, 1, 2, 3]
assert z_list2 == [0, 1, 2, 3]
assert isinstance(z, Iterable) is True
assert isinstance(z, Iterator) is False
if __name__ == '__main__':
test_yrange()
test_zrange()
| [
"zhiruchen1992@foxmail.com"
] | zhiruchen1992@foxmail.com |
ba4660a4acd3ec99cc6333f14b87878bb163b698 | d54ef1dee58c239d3a5bb74cbf8d8a717dcdcb33 | /paytest/test_paytest.py | 6cf586627df8c94c2155c66542e5871c2a50535e | [
"BSD-3-Clause"
] | permissive | bhoovd/lightningd-plugins | 88cef7c51b5291ef14b32761a1b4ceeb26ee5784 | a7a0007dcee1fcf3cc401ca83663a66b678e07d3 | refs/heads/master | 2023-03-14T20:50:46.115145 | 2021-02-16T10:15:54 | 2021-02-26T10:28:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,873 | py | from pyln.testing.fixtures import * # noqa: F401,F403
from pyln.testing.utils import DEVELOPER
from pyln.client import RpcError
import os
import unittest
import pytest
from pprint import pprint
pluginopt = {'plugin': os.path.join(os.path.dirname(__file__), "paytest.py")}
EXPERIMENTAL_FEATURES = int(os.environ.get("EXPERIMENTAL_FEATURES", "0"))
def test_start(node_factory):
node_factory.get_node(options=pluginopt)
def test_invoice(node_factory):
l1 = node_factory.get_node(options=pluginopt)
inv = l1.rpc.testinvoice('03'*33)
details = l1.rpc.decodepay(inv['invoice'])
pprint(details)
def test_simple_pay(node_factory):
""" l1 generates and pays an invoice on behalf of l2.
"""
l1, l2 = node_factory.line_graph(2, opts=pluginopt, wait_for_announce=True)
inv = l1.rpc.testinvoice(destination=l2.info['id'], amount=1)['invoice']
details = l1.rpc.decodepay(inv)
pprint(details)
# Paying the invoice without the reinterpretation from paytest
# will cause an unknown payment details directly.
with pytest.raises(RpcError, match=r'WIRE_INCORRECT_OR_UNKNOWN_PAYMENT_DETAILS'):
l1.rpc.pay(inv)
def test_mpp_pay(node_factory):
""" l1 send a payment that is going to be split.
"""
l1, l2 = node_factory.line_graph(2, opts=pluginopt, wait_for_announce=True)
res = l1.rpc.paytest(l2.info['id'], 10**8)
from pprint import pprint
#pprint(res)
l2.daemon.wait_for_log(r'Received 100000000/100000000 with [0-9]+ parts')
parts = res['status']['attempts']
assert len(parts) > 2 # Initial split + >1 part
failures = [p['failure']['data'] for p in parts if 'failure' in p and 'data' in p['failure']]
pprint(failures)
outcomes = [f['failcode'] for f in failures]
is16399 = [p == 16399 for p in outcomes]
assert all(is16399)
assert len(is16399) >= 1
| [
"decker.christian@gmail.com"
] | decker.christian@gmail.com |
2a7781ca167806d15563fda92dec8c31c733744d | df44affab179c2546fb3e0d1dc29eebcfdf51c1c | /toughradius/txradius/authorize.py | fedf4305359d7f9665ba6b081ea9725c186fcd57 | [] | no_license | sailorhdx/taurusradius | 121c508e7faffaddcd5326d2b6d3710eaf0ed08e | 92d30820611a0c9102ae41713ea3c35437a3c6ee | refs/heads/master | 2021-01-22T02:28:31.543338 | 2017-06-17T02:15:33 | 2017-06-17T02:15:33 | 92,362,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,662 | py | #!/usr/bin/env python
# coding=utf-8
import os
import six
from twisted.python import log
from twisted.internet import protocol
from twisted.internet import reactor, defer
from toughradius.txradius.radius import packet
from toughradius.txradius.ext import ikuai
from toughradius.txradius import message
from toughradius.txradius.radius import dictionary
from toughradius import txradius
RADIUS_DICT = dictionary.Dictionary(os.path.join(os.path.dirname(txradius.__file__), 'dictionary/dictionary'))
def get_dm_packet(vendor_id, nas_secret, nas_addr, coa_port = 3799, **kwargs):
coa_request = message.CoAMessage(code=packet.DisconnectRequest, dict=RADIUS_DICT, secret=six.b(str(nas_secret)), **kwargs)
username = coa_request['User-Name'][0]
if int(vendor_id) == ikuai.VENDOR_ID:
pkg = ikuai.create_dm_pkg(six.b(str(nas_secret)), username)
return (pkg, nas_addr, coa_port)
else:
return (coa_request.RequestPacket(), nas_addr, coa_port)
class CoAClient(protocol.DatagramProtocol):
def __init__(self, vendor_id, dictionary, nas_secret, nas_addr, coa_port = 3799, debug = False):
self.dictionary = dictionary
self.secret = six.b(str(nas_secret))
self.addr = nas_addr
self.port = int(coa_port)
self.vendor_id = int(vendor_id)
self.debug = debug
self.uport = reactor.listenUDP(0, self)
def close(self):
if self.transport is not None:
self.transport.stopListening()
self.transport = None
return
def onError(self, err):
log.err('Packet process error: %s' % str(err))
reactor.callLater(0.01, self.close)
return err
def onResult(self, resp):
reactor.callLater(0.01, self.close)
return resp
def onTimeout(self):
if not self.deferrd.called:
defer.timeout(self.deferrd)
def sendDisconnect(self, **kwargs):
timeout_sec = kwargs.pop('timeout', 5)
coa_req = message.CoAMessage(code=packet.DisconnectRequest, dict=self.dictionary, secret=self.secret, **kwargs)
username = coa_req['User-Name'][0]
if self.vendor_id == ikuai.VENDOR_ID:
pkg = ikuai.create_dm_pkg(self.secret, username)
if self.debug:
log.msg('send ikuai radius Coa Request to (%s:%s) [username:%s]: %s' % (self.addr,
self.port,
username,
repr(pkg)))
self.transport.write(pkg, (self.addr, self.port))
else:
if self.debug:
log.msg('send radius Coa Request to (%s:%s) [username:%s] : %s' % (self.addr,
self.port,
username,
coa_req))
self.transport.write(coa_req.RequestPacket(), (self.addr, self.port))
self.deferrd = defer.Deferred()
self.deferrd.addCallbacks(self.onResult, self.onError)
reactor.callLater(timeout_sec, self.onTimeout)
return self.deferrd
def datagramReceived(self, datagram, (host, port)):
try:
response = packet.Packet(packet=datagram)
if self.debug:
log.msg('Received Radius Response from (%s:%s): %s' % (host, port, repr(response)))
self.deferrd.callback(response.code)
except Exception as err:
log.err('Invalid Response packet from %s: %s' % ((host, port), str(err)))
self.deferrd.errback(err)
def disconnect(vendor_id, dictionary, nas_secret, nas_addr, coa_port = 3799, debug = False, **kwargs):
return CoAClient(vendor_id, dictionary, nas_secret, nas_addr, coa_port, debug).sendDisconnect(**kwargs) | [
"sailorhdx@hotmail.com"
] | sailorhdx@hotmail.com |
25bb83c6f782b5a36d42a6c483e404476b3c2f1e | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /yfTyMb3SSumPQeuhm_5.py | 80ed33b96fc160972e0d5109dcaa669d35356e56 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | """
Write a function that efficiently calculates Fibonacci terms.
### Examples
fibonacci(1) ➞ 1
fibonacci(2) ➞ 1
fibonacci(4) ➞ 3
fibonacci(64) ➞ 10610209857723
### Notes
The input will always be a power of two.
"""
def fibonacci(n):
def fib_inner(n):
if n == 0:
return 0, 1
u, v = fib_inner(n >> 1)
q = (n & 2) - 1
u *= u
v *= v
if (n & 1):
return u + v, 3*v - 2*(u - q)
return 2*(v + q) - 3*u, u + v
u, v = fib_inner(n >> 1)
l = 2*v - u
if (n & 1):
q = (n & 2) - 1
return v * l + q
return u * l
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
b954d06a1d1d14afe11039a99c3494d4ccb1ac89 | 77327e4c6de68fab4061c5acdb569e95cb8a9fae | /TestEnv/PrisonerDilemma.py | 92ce9bbf4e611e7790df41d38d79049698cca47e | [] | no_license | jiankangren/RARL-1 | 577148384bd065708c868c854ff53bc8d16fac99 | a7b28d9b56cfc7e5f832bb839d2c8f5db4d5b5ab | refs/heads/master | 2020-04-08T06:33:53.706725 | 2018-08-27T22:45:48 | 2018-08-27T22:45:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | from rllab.envs.base import Env
from rllab.spaces import Box, Discrete
from rllab.envs.base import Step
class PrisonerDilemma(Env):
def __init__(self):
self.num_action = 2
def render(self):
pass
@property
def observation_space(self):
return Discrete(1)
@property
def action_space(self):
return Discrete(self.num_action)
def close(self):
pass
def reset(self):
return 0
def step(self, Action):
obs = 0
done = True
action = Action['action']
a1 = action[0]
a2 = action[1]
policy_num = Action['policy_num']
r = -100
if a1 == 0:
if a2 == 0:
r = -1
else:
if policy_num == 1:
r = -3
else:
r = 0
else:
if a2 == 0:
if policy_num == 1:
r = 0
else:
r = -3
else:
r = -2
return Step(observation=obs, reward=r, done=done) | [
"maxiaoba@umich.edu"
] | maxiaoba@umich.edu |
fd0348e317dfb0cc806d13e3c1787e7508e61c8a | f8d043a7941cb311d9ea8e991b7c5be3c461675f | /mac/shop/urls.py | a1db41ac4bb8b25b8045d600df7ea205fe297640 | [] | no_license | shreyakapadia10/Django-Project | acf5c56f95ea552b589dfc6f8819b92339b3a377 | 4e9f5c2697ef1ed07209f50b1d8ed8a1bd953b87 | refs/heads/master | 2023-05-02T18:47:08.892747 | 2021-05-25T11:11:54 | 2021-05-25T11:11:54 | 370,583,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 524 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='ShopHome'),
path('about/', views.about, name='AboutUs'),
path('contact/', views.contact, name='ContactUs'),
path('tracker/', views.tracker, name='TrackingStatus'),
path('search/', views.search, name='Search'),
path('product/<int:vid>', views.viewproduct, name='ViewProduct'),
path('checkout/', views.checkout, name='Checkout'),
path('handlerequest/', views.handlerequest, name='HandleRequest'),
]
| [
"shreyakapadia8@gmail.com"
] | shreyakapadia8@gmail.com |
fe3be7f503140e17f885a9751de4ac6ce8bd2a4e | 234f0a885f6f6bffdfe21dcb4882ed9bc611029d | /fullcyclepy/helpers/taskschedule.py | ed6c9f97ea07397779e5441fbc22dddcd94fee81 | [
"MIT"
] | permissive | gitter-badger/fullcycle | 4a0fe84d92f93a333094de76706c6aeb7c4b9402 | 1f21fb5bfacdaa7005f506bd6327689368b421d4 | refs/heads/master | 2020-03-28T07:04:31.463488 | 2018-09-06T23:15:00 | 2018-09-06T23:15:00 | 147,879,359 | 0 | 0 | null | 2018-09-07T22:08:08 | 2018-09-07T22:08:08 | null | UTF-8 | Python | false | false | 1,188 | py |
import datetime
class TaskSchedule(object):
lastrun = None
start = None
pending_run = False
#default to 0 seconds means disabled
#interval is in seconds
interval = 0
q = None
def __init__(self, run_on_init=False):
self.pending_run = run_on_init
def is_disabled(self):
return self.interval <= 0
def is_time_to_run(self):
if self.is_disabled(): return False
now = datetime.datetime.now()
if self.pending_run:
self.pending_run = False
return self.kick_it_off(True)
if self.lastrun is None and self.start is None:
#never run before, run now
return self.kick_it_off(True)
elif self.start is not None and self.lastrun is None:
#never run before and after start time
return self.kick_it_off(now > self.start)
else:
sincelast = now - self.lastrun
if sincelast.total_seconds() > self.interval:
return self.kick_it_off(True)
return False
def kick_it_off(self, dorun=False):
if dorun:
self.lastrun = datetime.datetime.now()
return dorun
| [
"dfoderick@gmail.com"
] | dfoderick@gmail.com |
91a59fef16036229feabbf59c9aa7c02b86b5d38 | 3f7fd5abd0fe3a516d620a6948f9079bc34c1f5e | /glooey/__init__.py | 6790c321f675d52a9caeabef925ecb67ace45d5b | [
"MIT"
] | permissive | wkentaro/glooey | f8cb4723e266a29941da41d5ab81f8d2b809d2f2 | 4eacfdc7c14b5903f1bc3d5d4fa2b355f5fc5ee1 | refs/heads/master | 2020-05-17T19:42:38.554766 | 2019-04-28T05:41:43 | 2019-04-28T05:41:43 | 183,922,793 | 1 | 0 | MIT | 2019-04-28T15:02:41 | 2019-04-28T15:02:40 | null | UTF-8 | Python | false | false | 319 | py | #!/usr/bin/env python3
__version__ = '0.1.2'
from .widget import *
from .root import *
from .containers import *
from .text import *
from .images import *
from .buttons import *
from .dialogs import *
from .scrolling import *
from .misc import *
from . import drawing
from .drawing import Color
from . import themes
| [
"kale@thekunderts.net"
] | kale@thekunderts.net |
0f1b8f58a2fabca481414c2c84477d370d059f5d | be4d32d35fd4af3cf4ecf3736c8e879d50b8ae37 | /Python/Django/djangoform/djangoform/wsgi.py | 145141d25d9c0de325f197f0995939cb185d6510 | [] | no_license | yangluo0901/CodingDojo_Assignment | f09bbec26f87b5b276fd6ef3c77f27d13518937e | 44ccb5158b12c1656793bac76f1a7a707b147982 | refs/heads/master | 2021-10-11T21:12:14.889189 | 2019-01-29T19:52:56 | 2019-01-29T19:52:56 | 105,716,841 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 398 | py | """
WSGI config for djangoform project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "djangoform.settings")
application = get_wsgi_application()
| [
"yangluo0901@gmail.com"
] | yangluo0901@gmail.com |
8ba2e00236e70179ab87d41710598309635d289b | c4dacd0f5e397422018460c268ec8375aebe6419 | /pyRMSD/benchmark/alias/test/TestCondensedMatrix.py | 1fe99c0cac1171d98093825018e25272436a89ee | [
"MIT"
] | permissive | asford/pyRMSD | 032c9e03094392a957dfc5650b28b6e70bcdf17a | 8f1149fb631bfffebeb595c5b164d6945f7444fa | refs/heads/master | 2021-01-15T20:04:10.524616 | 2013-08-01T12:56:04 | 2013-08-01T12:56:04 | 12,771,855 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,491 | py | '''
Created on 30/01/2012
@author: victor
'''
import unittest
import scipy.spatial.distance as distance
import cStringIO
import random
from pyproclust.matrix.condensedMatrix import CondensedDistanceMatrix, load_condensed_matrix, calc_number_of_rows,complete_to_condensed,zero_condensed
from pyproclust.matrix.completeMatrix import CompleteDistanceMatrix
import numpy as np
class Test(unittest.TestCase):
def test_equal(self):
cm1 = CondensedDistanceMatrix([1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9])
cm2 = CondensedDistanceMatrix([1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9])
cm3 = CondensedDistanceMatrix([6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4])
cm4 = CondensedDistanceMatrix([6,7,8,9,0,1,2,3])
self.assertEqual(cm1 == cm2, True)
self.assertEqual(cm1 == cm3, False)
self.assertEqual(cm1 == cm4, False)
self.assertEqual(cm2 == cm3, False)
self.assertEqual(cm2 == cm4, False)
self.assertEqual(cm3 == cm4, False)
def test_compare_condensed_matrixes(self):
cm1 = CondensedDistanceMatrix([1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9])
cm2 = CondensedDistanceMatrix([6,7,8,9,0,1,2,3,4,5,6,7,8,9,0,1,2,3,4])
cm3 = CondensedDistanceMatrix([1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1])
cm4 = CondensedDistanceMatrix([0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5,0.5])
result_1 = cm1.compare_with(cm2)
result_2 = cm1.compare_with(cm3)
result_3 = cm3.compare_with(cm4,1.,2.)
result_4 = cm3.compare_with(cm4,1.,1.)
self.assertEqual(result_1, (5.0, 0.0))
self.assertEqual(result_2, (3.8421052631578947, 2.6008734948643863))
self.assertEqual(result_3, (0., 0.))
self.assertEqual(result_4, (0.5, 0.))
def test_get_number_of_rows(self):
random.seed()
for i in range(100): #@UnusedVariable
rows = random.randint(1,1000)
number_of_elements = (rows *(rows-1)) / 2
calculated_rows = calc_number_of_rows(number_of_elements)
self.assertEqual(rows,calculated_rows)
def test_normalize_condensed_matrix(self):
condensed = CondensedDistanceMatrix([ 1., 4.5, 8.5, 7.2, 4.5, 7.8, 6.7, 3.6,2.2, 2.])
expected = CondensedDistanceMatrix([0.0, 0.47, 1.0, 0.83, 0.47, 0.91, 0.76, 0.35, 0.16, 0.13])
minmax = condensed.get_minimum_and_maximum()
condensed.normalize(minmax[0], minmax[1])
for i in range(len(condensed.get_data())):
self.assertAlmostEqual(condensed.get_data()[i],expected.get_data()[i],2)
def test_data_sharing(self):
mylist = [ 1., 4.5, 8.5, 7.2, 4.5, 7.8, 6.7, 3.6,2.2, 2.]
myarray = np.array([ 1., 4.5, 8.5, 7.2, 4.5, 7.8, 6.7, 3.6,2.2, 2.])
mylistaarray = np.array(mylist)
condensed1 = CondensedDistanceMatrix(mylist)
condensed2 = CondensedDistanceMatrix(myarray)
condensed3 = CondensedDistanceMatrix(mylistaarray)
mylist[5] = 0.
self.assertEqual(False, mylist[5] == condensed1.get_data()[5])
myarray[5] = 0.
self.assertEqual(False, myarray[5] == condensed2.get_data()[5])
mylistaarray[5] = 0.
self.assertEqual(False, mylistaarray[5] == condensed3.get_data()[5])
mycontents = condensed3.get_data()
mycontents[5] = 0.
self.assertEqual(True, mycontents[5] == condensed3.get_data()[5] and\
condensed3.get_data()[5] == 0.)
def test_gen_condensed_matrix(self):
obs = [(1,1),(2,1),(4,5),(7,7),(5,7)]
## distance matrix
distance_matrix = CompleteDistanceMatrix(distance.cdist(obs,obs))
## lower distance matrix (wo diagonal)
expected_distance_condensed = CondensedDistanceMatrix(distance.pdist(obs))
distance_condensed = complete_to_condensed(distance_matrix)
self.assertEqual(True,distance_condensed == expected_distance_condensed)
def test_validate_dimensions(self):
condensed_matrix_1 = CondensedDistanceMatrix([ 1., 4.5, 8.5, 7.2, 4.5, 7.8, 6.7, 3.6,2.2, 2.])
self.assertEqual(True,condensed_matrix_1._CondensedDistanceMatrix__validate_dimensions())
condensed_matrix_2 = CondensedDistanceMatrix([ 1., 4.5, 8.5, 7.2, 4.5, 7.8, 6.7, 3.6])
self.assertEqual(False,condensed_matrix_2._CondensedDistanceMatrix__validate_dimensions())
def test_minmax_condensed(self):
condensed_matrix = CondensedDistanceMatrix([ 1.,
4.5, 8.5,
7.2, 4.5, 7.8,
6.7, 3.6,2.2, 2.0])
expected = (1,8.5)
self.assertEqual(condensed_matrix.get_minimum_and_maximum(),expected)
def test_save_condensed_matrix(self):
# with final spaces!
expected_matrix_string = """1.0 4.5 7.2 6.7
8.5 4.5 3.6
7.8 2.2
2.0
"""
condensed_matrix = CondensedDistanceMatrix([1.0, 4.5, 7.2, 6.7,
8.5, 4.5, 3.6,
7.8, 2.2,
2.0])
output = cStringIO.StringIO()
condensed_matrix.save(output)
self.assertEqual(expected_matrix_string,output.getvalue())
def test_load_condensed_matrix(self):
matrix_string = """1.0
4.5 8.5
7.2 4.5 7.8
6.7 3.6 2.2 2.0
"""
expected_matrix = CondensedDistanceMatrix([ 1., 4.5, 8.5, 7.2, 4.5, 7.8, 6.7, 3.6,2.2, 2.])
input = cStringIO.StringIO(matrix_string)
loaded_matrix = load_condensed_matrix(input)
for i in range(len(expected_matrix.get_data())):
self.assertAlmostEqual(expected_matrix.get_data()[i],\
loaded_matrix.get_data()[i],3)
def test_item_access(self):
condensed_matrix_1 = CondensedDistanceMatrix([1.0, 4.5,7.2,
8.5, 4.5,
7.8])
condensed_matrix_2 = CondensedDistanceMatrix([.0]*6)
complete_matrix = [[0.0, 1.0, 4.5, 7.2],
[1.0, 0.0, 8.5, 4.5],
[4.5, 8.5, 0.0, 7.8],
[7.2, 4.5, 7.8, 0.0]]
row_len = condensed_matrix_1.row_length
for i in range(row_len):
for j in range(row_len):
condensed_matrix_2[i,j] = complete_matrix[i][j]
## The access for a complete and a condensed matrix is exactly the same
for i in range(row_len):
for j in range(row_len):
self.assertEquals(condensed_matrix_1[i,j],complete_matrix[i][j])
## And we can build a condensed matrix as a complete matrix
self.assertItemsEqual(condensed_matrix_1.get_data(), condensed_matrix_2.get_data())
def test_zero_condensed(self):
row_len = 5
zeroed_condensed = zero_condensed(row_len)
self.assertEqual(row_len,zeroed_condensed.row_length)
for i in range(row_len):
for j in range(row_len):
self.assertEquals(zeroed_condensed[i,j],0.)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() | [
"victor.gil.sepulveda@gmail.com"
] | victor.gil.sepulveda@gmail.com |
e68071c5d957adac8446ee734833fc462a73c82f | ac6922fbaa51c3068883c3d60b407350f13213f9 | /src/einsteinpy/tests/test_metric/test_schwarzschild.py | 42b0c43107a4dc2b105cb234aeecafd5dac58e47 | [
"MIT",
"LicenseRef-scancode-proprietary-license"
] | permissive | divya144/einsteinpy | 719c961e3c4698c1d7df77a78a3f9b51f234d077 | e5f7c15fb80f8fef8d1e8ca41188d9ac7ee668ec | refs/heads/master | 2020-05-03T09:26:27.003181 | 2019-04-09T13:33:35 | 2019-04-10T10:32:36 | 178,553,798 | 2 | 1 | MIT | 2019-03-30T12:08:14 | 2019-03-30T12:08:14 | null | UTF-8 | Python | false | false | 7,272 | py | import warnings
import numpy as np
import pytest
from astropy import units as u
from numpy.testing import assert_allclose
from einsteinpy import constant
from einsteinpy.metric import Schwarzschild
from einsteinpy.utils import schwarzschild_radius
_c = constant.c.value
@pytest.mark.parametrize(
"pos_vec, vel_vec, time, M, start_lambda, end_lambda, OdeMethodKwargs",
[
(
[306 * u.m, np.pi / 2 * u.rad, np.pi / 2 * u.rad],
[0 * u.m / u.s, 0 * u.rad / u.s, 951.0 * u.rad / u.s],
0 * u.s,
4e24 * u.kg,
0.0,
0.002,
{"stepsize": 0.5e-6},
),
(
[1 * u.km, 0.15 * u.rad, np.pi / 2 * u.rad],
[
0.1 * _c * u.m / u.s,
0.5e-5 * _c * u.rad / u.s,
0.5e-4 * _c * u.rad / u.s,
],
0 * u.s,
5.972e24 * u.kg,
0.0,
0.0001,
{"stepsize": 0.5e-6},
),
(
[50 * u.km, np.pi / 2 * u.rad, np.pi / 2 * u.rad],
[0.1 * _c * u.m / u.s, 2e-7 * _c * u.rad / u.s, 1e-5 * u.rad / u.s],
0 * u.s,
5.972e24 * u.g,
0.0,
0.001,
{"stepsize": 5e-6},
),
],
)
def test_calculate_trajectory(
pos_vec, vel_vec, time, M, start_lambda, end_lambda, OdeMethodKwargs
):
cl = Schwarzschild.from_spherical(pos_vec, vel_vec, time, M)
ans = cl.calculate_trajectory(
start_lambda=start_lambda,
end_lambda=end_lambda,
OdeMethodKwargs=OdeMethodKwargs,
)
_c, _scr = constant.c.value, schwarzschild_radius(M).value
ans = ans[1]
testarray = (
(1 - (_scr / ans[:, 1])) * np.square(ans[:, 4])
- (np.square(ans[:, 5])) / ((1 - (_scr / ans[:, 1])) * (_c ** 2))
- np.square(ans[:, 1] / _c)
* (np.square(ans[:, 6]) + np.square(np.sin(ans[:, 2])) * np.square(ans[:, 7]))
)
comparearray = np.ones(shape=ans[:, 4].shape, dtype=float)
assert_allclose(testarray, comparearray, 1e-4)
def test_calculate_trajectory2():
# based on the revolution of earth around sun
# data from https://en.wikipedia.org/wiki/Earth%27s_orbit
M = 1.989e30 * u.kg
distance_at_perihelion = 147.10e6 * u.km
speed_at_perihelion = 30.29 * u.km / u.s
angular_vel = (speed_at_perihelion / distance_at_perihelion) * u.rad
pos_vec = [distance_at_perihelion, np.pi / 2 * u.rad, 0 * u.rad]
vel_vec = [0 * u.km / u.s, 0 * u.rad / u.s, angular_vel]
end_lambda = ((1 * u.year).to(u.s)).value
cl = Schwarzschild.from_spherical(pos_vec, vel_vec, 0 * u.s, M)
ans = cl.calculate_trajectory(
start_lambda=0.0,
end_lambda=end_lambda,
OdeMethodKwargs={"stepsize": end_lambda / 2e3},
)[1]
# velocity should be 29.29 km/s at apehelion(where r is max)
i = np.argmax(ans[:, 1]) # index whre radial distance is max
v_apehelion = (((ans[i][1] * ans[i][7]) * (u.m / u.s)).to(u.km / u.s)).value
assert_allclose(v_apehelion, 29.29, rtol=0.01)
def test_calculate_trajectory3():
# same test as with test_calculate_trajectory2(),
# but initialialized with cartesian coordinates
# and function returning cartesian coordinates
M = 1.989e30 * u.kg
distance_at_perihelion = 147.10e6 * u.km
speed_at_perihelion = 30.29 * u.km / u.s
pos_vec = [
distance_at_perihelion / np.sqrt(2),
distance_at_perihelion / np.sqrt(2),
0 * u.km,
]
vel_vec = [
-1 * speed_at_perihelion / np.sqrt(2),
speed_at_perihelion / np.sqrt(2),
0 * u.km / u.h,
]
end_lambda = ((1 * u.year).to(u.s)).value
cl = Schwarzschild.from_cartesian(pos_vec, vel_vec, 0 * u.min, M)
ans = cl.calculate_trajectory(
start_lambda=0.0,
end_lambda=end_lambda,
return_cartesian=True,
OdeMethodKwargs={"stepsize": end_lambda / 2e3},
)[1]
# velocity should be 29.29 km/s at apehelion(where r is max)
R = np.sqrt(ans[:, 1] ** 2 + ans[:, 2] ** 2 + ans[:, 3] ** 2)
i = np.argmax(R) # index whre radial distance is max
v_apehelion = (
(np.sqrt(ans[i, 5] ** 2 + ans[i, 6] ** 2 + ans[i, 7] ** 2) * (u.m / u.s)).to(
u.km / u.s
)
).value
assert_allclose(v_apehelion, 29.29, rtol=0.01)
@pytest.mark.parametrize(
"pos_vec, vel_vec, time, M, start_lambda, end_lambda, OdeMethodKwargs, return_cartesian",
[
(
[306 * u.m, np.pi / 2 * u.rad, np.pi / 2 * u.rad],
[0 * u.m / u.s, 0.1 * u.rad / u.s, 951.0 * u.rad / u.s],
0 * u.s,
4e24 * u.kg,
0.0,
0.0003,
{"stepsize": 0.3e-6},
True,
),
(
[1 * u.km, 0.15 * u.rad, np.pi / 2 * u.rad],
[_c * u.m / u.s, 0.5e-5 * _c * u.rad / u.s, 1e-4 * _c * u.rad / u.s],
0 * u.s,
5.972e24 * u.kg,
0.0,
0.0004,
{"stepsize": 0.5e-6},
False,
),
],
)
def test_calculate_trajectory_iterator(
pos_vec,
vel_vec,
time,
M,
start_lambda,
end_lambda,
OdeMethodKwargs,
return_cartesian,
):
cl1 = Schwarzschild.from_spherical(pos_vec, vel_vec, time, M)
arr1 = cl1.calculate_trajectory(
start_lambda=start_lambda,
end_lambda=end_lambda,
OdeMethodKwargs=OdeMethodKwargs,
return_cartesian=return_cartesian,
)[1]
cl2 = Schwarzschild.from_spherical(pos_vec, vel_vec, time, M)
it = cl2.calculate_trajectory_iterator(
start_lambda=start_lambda,
OdeMethodKwargs=OdeMethodKwargs,
return_cartesian=return_cartesian,
)
arr2_list = list()
for _, val in zip(range(100), it):
arr2_list.append(val[1])
arr2 = np.array(arr2_list)
assert_allclose(arr1[:100, :], arr2, rtol=1e-10)
def test_calculate_trajectory_iterator_RuntimeWarning():
pos_vec = [306 * u.m, np.pi / 2 * u.rad, np.pi / 2 * u.rad]
vel_vec = [0 * u.m / u.s, 0.01 * u.rad / u.s, 10 * u.rad / u.s]
time = 0 * u.s
M = 1e25 * u.kg
start_lambda = 0.0
OdeMethodKwargs = {"stepsize": 0.4e-6}
cl = Schwarzschild.from_spherical(pos_vec, vel_vec, time, M)
with warnings.catch_warnings(record=True) as w:
it = cl.calculate_trajectory_iterator(
start_lambda=start_lambda,
OdeMethodKwargs=OdeMethodKwargs,
stop_on_singularity=True,
)
for _, _ in zip(range(1000), it):
pass
assert len(w) >= 1
def test_calculate_trajectory_iterator_RuntimeWarning2():
pos_vec = [306 * u.m, np.pi / 2 * u.rad, np.pi / 3 * u.rad]
vel_vec = [0 * u.m / u.s, 0.01 * u.rad / u.s, 10 * u.rad / u.s]
time = 0 * u.s
M = 1e25 * u.kg
start_lambda = 0.0
OdeMethodKwargs = {"stepsize": 0.4e-6}
cl = Schwarzschild.from_spherical(pos_vec, vel_vec, time, M)
with warnings.catch_warnings(record=True) as w:
it = cl.calculate_trajectory_iterator(
start_lambda=start_lambda,
OdeMethodKwargs=OdeMethodKwargs,
stop_on_singularity=False,
)
for _, _ in zip(range(1000), it):
pass
assert len(w) >= 1
| [
"s.ritwik98@gmail.com"
] | s.ritwik98@gmail.com |
a29f11c438bd1ae1ff2edaed687eb601ffaa6d46 | 8fc2ab3d29a30e603e19b30bb9517928de529167 | /CoinChange_5.py | 9879dd2cfd48bef094087426f31b8a20a3a81491 | [] | no_license | rushilchugh/Practise | 35a9861bec6786580dc0a440eb25d78e43cb7bc9 | 98fd593b95dad641bef1d519c6c6ed1daaae630f | refs/heads/master | 2020-03-13T21:14:14.013604 | 2018-04-27T12:23:50 | 2018-04-27T12:23:50 | 131,291,684 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | import math
def coinChange(cList, S):
minVals = [math.inf for _ in range(S + 1)]
minVals[0] = 0
for i in range(1, S + 1):
for j in range(len(cList)):
currCoin = cList[j]
if cList[j] <= i and minVals[i - cList[j]] + 1 < minVals[i]:
minVals[i] = minVals[i - cList[j]] + 1
return minVals
print(coinChange([1, 5, 7], 8)) | [
"noreply@github.com"
] | rushilchugh.noreply@github.com |
aa697365a29cb901be7dae97bff8fa8573350419 | a2e638cd0c124254e67963bda62c21351881ee75 | /Extensions/Settlement/FPythonCode/FSettlementCommitterFunctions.py | bbb921bda4c49e2e4f2cd0efe447daea3a2bc283 | [] | no_license | webclinic017/fa-absa-py3 | 1ffa98f2bd72d541166fdaac421d3c84147a4e01 | 5e7cc7de3495145501ca53deb9efee2233ab7e1c | refs/heads/main | 2023-04-19T10:41:21.273030 | 2021-05-10T08:50:05 | 2021-05-10T08:50:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | """ Compiled: 2020-09-18 10:38:49 """
#__src_file__ = "extensions/settlement/etc/FSettlementCommitterFunctions.py"
import acm
#-------------------------------------------------------------------------
def CommitCommitters(committerList, logger):
commitSuccessful = True
acm.BeginTransaction()
try:
for committer in committerList:
settlement = committer.GetSettlement()
RunSTPAndUpdateStateChart(settlement)
committer.Commit()
acm.CommitTransaction()
except Exception as e:
acm.AbortTransaction()
commitSuccessful = False
logger.LP_Log("Exception occurred while committing settlements: {}".format(str(e)))
logger.LP_Flush()
return commitSuccessful
#-------------------------------------------------------------------------
def RunSTPAndUpdateStateChart(settlement):
if settlement.IsValidForSTP():
settlement.STP()
stateChart = acm.Operations.GetMappedSettlementProcessStateChart(settlement)
settlement.StateChart(stateChart)
if settlement.IsValidForSTP():
settlement.STP() | [
"81222178+nenchoabsa@users.noreply.github.com"
] | 81222178+nenchoabsa@users.noreply.github.com |
1db686d84707ba2314d219fbd59ee026669dd20e | 14de6d507e471d582a7e7f5cba898f72f6ba186d | /python/Linked-List-Cycle/hashmap.py | 2ac2a62d573e60b6ccf32acf0e8d80d75b77303c | [
"MIT"
] | permissive | yutong-xie/Leetcode-Solution | a7d9c1f73a0fecd9de1d04dbd4c06393959dd95a | 6578f288a757bf76213030b73ec3319a7baa2661 | refs/heads/master | 2023-03-30T01:59:58.554650 | 2021-03-27T21:09:46 | 2021-03-27T21:09:46 | 290,101,434 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Copyright 2020, Yutong Xie, UIUC.
Using hashmap to test whether a cycle exists in the linked list
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def hasCycle(self, head):
"""
:type head: ListNode
:rtype: bool
"""
hashmap = {}
while head:
if head in hashmap:
return True
else:
hashmap[head] = 0
head = head.next
return False
| [
"36655655+yutong-xie@users.noreply.github.com"
] | 36655655+yutong-xie@users.noreply.github.com |
0f41900cd8ccc0a02af26e8e6ac41f8c02048d26 | 7caa438706a423dd9779a81f8345fcf1ec11e921 | /NXT-Python/pyglet-1.2.4/examples/opengl_3.py | 0298cc248eb88ac3b54cfbdfafdf472714967189 | [
"BSD-3-Clause"
] | permissive | tamarinvs19/python-learning | 5dd2582f5dc504e19a53e9176677adc5170778b0 | 1e514ad7ca8f3d2e2f785b11b0be4d57696dc1e9 | refs/heads/master | 2021-07-15T13:23:24.238594 | 2021-07-08T07:07:21 | 2021-07-08T07:07:21 | 120,604,826 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,616 | py | #!/usr/bin/python
# $Id:$
'''In order to use the new features of OpenGL 3, you must explicitly create
an OpenGL 3 context. You can do this by supplying the `major_version` and
`minor_version` attributes for a GL Config.
This example creates an OpenGL 3 context, prints the version string to stdout,
and exits.
At time of writing, only the beta nvidia driver on Windows and Linux support
OpenGL 3, and requires an 8-series or higher.
On Windows, OpenGL 3 API must be explicitly enabled using the nvemulate tool
[1]. Additionally, at time of writing the latest driver did not yet support
forward compatible or debug contexts.
On Linux, the only driver that currently exposes the required GLX extensions
is 177.61.02 -- later drivers (177.67, 177.68, 177.7*, 177.8*, 180.06) seem to
be missing the extensions.
[1] http://developer.nvidia.com/object/nvemulate.html
'''
import pyglet
# Specify the OpenGL version explicitly to request 3.0 features, including
# GLSL 1.3.
#
# Some other attributes relevant to OpenGL 3:
# forward_compatible = True To request a context without deprecated
# functionality
# debug = True To request a debug context
config = pyglet.gl.Config(major_version=3, minor_version=0)
# Create a context matching the above configuration. Will fail if
# OpenGL 3 is not supported by the driver.
window = pyglet.window.Window(config=config, visible=False)
# Print the version of the context created.
print 'OpenGL version:', window.context.get_info().get_version()
window.close()
| [
"slavabarsuk@ya.ru"
] | slavabarsuk@ya.ru |
cbffe481b8c2609d89a052878599e76a03d759bc | 95697a9f8fed6d45cb8ae9ae2525873c99cc7cfb | /Project File/02. Second Project - notice board/server/View/V1/API/service/ShowUser.py | 33e58e2227326af8328954721c03870669d60b25 | [] | no_license | parkjinhong03/Python-Flask | d30c7447c70eb0cbda0454bfd2f2168347209adb | 5732f17c594c1fc213940b214c0beafd4448bc14 | refs/heads/master | 2022-02-24T21:04:35.868873 | 2019-09-04T02:13:49 | 2019-09-04T02:13:49 | 198,320,040 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | from View.V1.Function import LoginCheck
import os
# 사용자 명단 라우터
def showuser():
if LoginCheck.logincheck() == False:
return 'Please Login First!'
count = 0
Userlist = ''
for root, dirs, files in os.walk('./Data/List'):
for dir in dirs:
count = count + 1
Userlist = Userlist + str(count) + '. '
Userlist = Userlist + dir + '\n'
return 'You can choose this user\'s list:\n' + Userlist | [
"jinhong0719@naver.com"
] | jinhong0719@naver.com |
978d109a9d79229a04673c409ce440e1dc8754e1 | 6923f79f1eaaba0ab28b25337ba6cb56be97d32d | /Introduction_to_numerical_programming_using_Python_and_CPP_Beu/Ch09/Python/P09-LinFit.py | ad49008506665cc9671c465c648ff7ed9fc70fb7 | [] | no_license | burakbayramli/books | 9fe7ba0cabf06e113eb125d62fe16d4946f4a4f0 | 5e9a0e03aa7ddf5e5ddf89943ccc68d94b539e95 | refs/heads/master | 2023-08-17T05:31:08.885134 | 2023-08-14T10:05:37 | 2023-08-14T10:05:37 | 72,460,321 | 223 | 174 | null | 2022-10-24T12:15:06 | 2016-10-31T17:24:00 | Jupyter Notebook | UTF-8 | Python | false | false | 2,416 | py | # Linear fit of a model to observed data points
from modfunc import *
from graphlib import *
# main
nn = [0]*4 # end-indexes of the 3 plots
col = [""]*4 # colors of plots
sty = [0]*4 # styles of plots
n = 5 # number of observed data
nfit = 2 # number of points plotted from model
n1 = n + nfit; n2 = n + 2*nfit # end indexes
x = [0]*(n2+1); y = [0]*(n2+1) # observed data
sigmy = [0]*(n+1) # standard deviations of observed data
x[1] = 1e0; y[1] = 0.8e0 # data points
x[2] = 2e0; y[2] = 2.1e0
x[3] = 3e0; y[3] = 2.8e0
x[4] = 4e0; y[4] = 4.0e0
x[5] = 5e0; y[5] = 4.4e0
iopt = 0 # least squares fit: equal errors sigmy
(a, b, sigma, sigmb, chi2) = LinFit(x,y,sigmy,n,iopt)
print("Least squares fit:")
print("a = {0:8.4f} +/- {1:8.4f}".format(a,sigma))
print("b = {0:8.4f} +/- {1:8.4f}".format(b,sigmb))
print("Chi^2 = {0:8.4f}".format(chi2))
h = (x[n]-x[1])/(nfit-1)
for i in range(1,nfit+1): # append model points
x[n+i] = x[1] + (i-1)*h
y[n+i] = a*x[n+i] + b # regression line
for i in range(1,n+1): sigmy[i] = 0.15*y[i] # generate standard deviations
iopt = 1 # Chi-square fit: different errors sigmy
(a, b, sigma, sigmb, chi2) = LinFit(x,y,sigmy,n,iopt)
print("\nChi-square fit:")
print("a = {0:8.4f} +/- {1:8.4f}".format(a,sigma))
print("b = {0:8.4f} +/- {1:8.4f}".format(b,sigmb))
print("Chi^2 = {0:8.4f}".format(chi2))
for i in range(1,nfit+1): # append model points
x[n1+i] = x[n+i]
y[n1+i] = a*x[n+i] + b # Chi-square regression line
GraphInit(800,600)
nn[1] = n ; col[1] = "black"; sty[1] = 4 # data points
nn[2] = n1; col[2] = "red" ; sty[2] = -1 # least squares fit
nn[3] = n2; col[3] = "blue" ; sty[3] = 1 # Chi-square fit
MultiPlot(x,y,sigmy,nn,col,sty,3,10,0.5e0,5.5e0,1,0e0,0e0,0,
0.15,0.95,0.15,0.85,"x","y","Linear fit")
MainLoop()
| [
"me@yomama.com"
] | me@yomama.com |
6176f6f471d91cdedce7764c2d165f4fab302ff0 | 9d89530e784922173aa1c032dcfaf772a26cf99e | /vulnerabilities/tests/test_suse.py | a4695d8265175e094b60103b31d56577c44d9269 | [
"Python-2.0",
"Apache-2.0"
] | permissive | nileshprasad137/vulnerablecode | 7da3d9c15e436919bedb29d3bfeb574a233f3a5b | 4677f70c654a15da529a80d19d7de1ca013ef8eb | refs/heads/main | 2023-08-22T23:51:30.806190 | 2021-10-08T13:13:32 | 2021-10-08T13:13:32 | 418,471,773 | 1 | 0 | Apache-2.0 | 2021-10-18T11:33:24 | 2021-10-18T11:33:24 | null | UTF-8 | Python | false | false | 7,207 | py | import os
import unittest
import xml.etree.ElementTree as ET
from vulnerabilities.oval_parser import OvalParser
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TEST_DATA = os.path.join(BASE_DIR, "test_data/")
class TestSUSEOvalParser(unittest.TestCase):
@classmethod
def setUpClass(cls):
xml_doc = ET.parse(os.path.join(TEST_DATA, "suse_oval_data.xml"))
translator = {"less than": "<"}
# all the elements which require "equals" are ignored(because they are not useful)
cls.parsed_oval = OvalParser(translator, xml_doc)
def setUp(self):
self.definition_1 = self.parsed_oval.all_definitions[0]
self.definition_2 = self.parsed_oval.all_definitions[1]
def test_get_definitions(self):
assert len(self.parsed_oval.all_definitions) == 2
assert (
self.parsed_oval.all_definitions[0].getId() == "oval:org.opensuse.security:def:20094112"
)
assert (
self.parsed_oval.all_definitions[1].getId() == "oval:org.opensuse.security:def:20112767"
)
def test_get_tests_of_definition(self):
definition_1_test_ids = {
"oval:org.opensuse.security:tst:2009281999",
"oval:org.opensuse.security:tst:2009282000",
}
definition_2_test_ids = {
"oval:org.opensuse.security:tst:2009271113",
"oval:org.opensuse.security:tst:2009271114",
}
assert definition_1_test_ids == {
i.getId() for i in self.parsed_oval.get_tests_of_definition(self.definition_1)
}
assert definition_2_test_ids == {
i.getId() for i in self.parsed_oval.get_tests_of_definition(self.definition_2)
}
def test_get_vuln_id_from_definition(self):
vuln_id_1 = "CVE-2009-4112"
vuln_id_2 = "CVE-2011-2767"
assert vuln_id_1 == self.parsed_oval.get_vuln_id_from_definition(self.definition_1)
assert vuln_id_2 == self.parsed_oval.get_vuln_id_from_definition(self.definition_2)
def test_get_object_state_of_test(self):
# This method is inherited as it is from UbuntuOvalParser
# this test ensures that the method works with suse OVAL documents
assert len(self.parsed_oval.oval_document.getTests()) == 9
test_1 = self.parsed_oval.oval_document.getTests()[0]
test_2 = self.parsed_oval.oval_document.getTests()[1]
obj_t1, state_t1 = self.parsed_oval.get_object_state_of_test(test_1)
obj_t2, state_t2 = self.parsed_oval.get_object_state_of_test(test_2)
assert state_t1.getId() == "oval:org.opensuse.security:ste:2009068342"
assert state_t2.getId() == "oval:org.opensuse.security:ste:2009072069"
assert obj_t2.getId() == "oval:org.opensuse.security:obj:2009031297"
assert obj_t1.getId() == "oval:org.opensuse.security:obj:2009031246"
def test_get_pkgs_from_obj(self):
assert len(self.parsed_oval.oval_document.getObjects()) == 5
obj_t1 = self.parsed_oval.oval_document.getObjects()[0]
obj_t2 = self.parsed_oval.oval_document.getObjects()[1]
pkg_set1 = set(self.parsed_oval.get_pkgs_from_obj(obj_t1))
pkg_set2 = set(self.parsed_oval.get_pkgs_from_obj(obj_t2))
assert pkg_set1 == {"openSUSE-release"}
# In a full run we wont get pkg_set1 because we won't obtain
# it's object due to filters to avoid such tests in the first place
assert pkg_set2 == {"cacti"}
def test_get_version_range_from_state(self):
assert len(self.parsed_oval.oval_document.getStates()) == 4
state_1 = self.parsed_oval.oval_document.getStates()[0]
state_2 = self.parsed_oval.oval_document.getStates()[1]
exp_range_1 = None
exp_range_2 = "<1.2.11-lp151.3.6"
# In a full run we wont get exp_range1 because we won't obtain
# it's state due to filters to avoid such tests in the first place
assert self.parsed_oval.get_version_range_from_state(state_1) == exp_range_1
assert self.parsed_oval.get_version_range_from_state(state_2) == exp_range_2
def test_get_urls_from_definition(self):
def1_urls = {
"http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-4112",
"https://www.suse.com/security/cve/CVE-2009-4112.html",
"https://bugzilla.suse.com/1122535",
"https://bugzilla.suse.com/558664",
}
assert def1_urls == self.parsed_oval.get_urls_from_definition(self.definition_1)
def2_urls = {
"http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2011-2767",
"https://bugzilla.suse.com/1156944",
"https://www.suse.com/security/cve/CVE-2011-2767.html",
}
assert def2_urls == self.parsed_oval.get_urls_from_definition(self.definition_2)
def test_get_data(self):
expected_data = [
{
"test_data": [
{
"package_list": ["cacti"],
"version_ranges": "<1.2.11-lp151.3.6",
},
{
"package_list": ["cacti-spine"],
"version_ranges": "<1.2.11-lp151.3.6",
},
],
"description": '\n Cacti 0.8.7e and earlier allows remote authenticated administrators to gain privileges by modifying the "Data Input Method" for the "Linux - Get Memory Usage" setting to contain arbitrary commands.\n ',
"vuln_id": "CVE-2009-4112",
"reference_urls": {
"https://bugzilla.suse.com/1122535",
"https://bugzilla.suse.com/558664",
"http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2009-4112",
"https://www.suse.com/security/cve/CVE-2009-4112.html",
},
},
{
"test_data": [
{
"package_list": ["apache2-mod_perl"],
"version_ranges": "<2.0.11-lp151.3.3",
},
{
"package_list": ["apache2-mod_perl-devel"],
"version_ranges": "<2.0.11-lp151.3.3",
},
],
"description": "\n mod_perl 2.0 through 2.0.10 allows attackers to execute arbitrary Perl code by placing it in a user-owned .htaccess file, because (contrary to the documentation) there is no configuration option that permits Perl code for the administrator's control of HTTP request processing without also permitting unprivileged users to run Perl code in the context of the user account that runs Apache HTTP Server processes.\n ",
"vuln_id": "CVE-2011-2767",
"reference_urls": {
"https://bugzilla.suse.com/1156944",
"https://www.suse.com/security/cve/CVE-2011-2767.html",
"http://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2011-2767",
},
},
]
assert expected_data == self.parsed_oval.get_data()
| [
"shivam.sandbhor@gmail.com"
] | shivam.sandbhor@gmail.com |
1be8a4ba8920c53045cf4597fe0649599d965eeb | cd9f819b968def4f9b57448bdd926dc5ffa06671 | /B_Python程式設計大數據資料分析_蔡明志_碁峰_2018/ch12/shape.py | 6abc3387132728cee32128b7b8749b663d682949 | [] | no_license | AaronCHH/jb_pyoop | 06c67f3c17e722cf18147be4ae0fac81726e4cbc | 356baf0963cf216db5db7e11fb67234ff9b31b68 | refs/heads/main | 2023-04-02T05:55:27.477763 | 2021-04-07T01:48:04 | 2021-04-07T01:48:13 | 344,676,005 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 421 | py | import math
class Shape:
def __init__(self, xPoint = 0, yPoint = 0):
self.__xPoint = xPoint
self.__yPoint = yPoint
def getPoint(self):
return self.__xPoint, self.__yPoint
def setPoint(self, xPoint, yPoint):
self.__xPoint = xPoint
self.__yPoint = yPoint
def __str__(self):
print('xPoint = %d, yPoint = %d'%(self.__xPoint, self.__yPoint)) | [
"aaronhsu219@gmail.com"
] | aaronhsu219@gmail.com |
a84a792cbc0b3a11189f3108b5bb171958a14f1f | 0ced6dc4f7c30cd58475bc5a13a7a8ad00081bab | /AndroidCase/test_023_mydingdan.py | a69a7eb2a0f03ec5b805b1325343d92ee87a1967 | [] | no_license | duanbibo/app-autotes-python | 1ef7bc635a7dcd9e82a61441ac7a567bba1c1e25 | 951fcb2a138a75e7722a7714cde62990f33b0f3e | refs/heads/master | 2022-04-25T03:39:20.675426 | 2020-04-27T15:08:47 | 2020-04-27T15:08:47 | 259,368,112 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,798 | py | import unittest
from time import sleep
from util.driver import Driver
from util.isElement import iselement
class Login(unittest.TestCase):
# 我的订单
def setUp(self):
self.driver = Driver.DRIVER
def test_mydingdan(self):
driver = self.driver
sleep(2)
driver.find_element_by_xpath(
"//*[@resource-id='cn.xinzhili.core:id/tl_home_tabs']/android.widget.LinearLayout[4]").click()
sleep(2)
driver.find_element_by_xpath("//*[@text='我的订单']").click()
sleep(3)
self.assertIn("订单编号", driver.page_source)
driver.find_element_by_xpath("//*[@text='未完成']").click()
sleep(2)
if "进行中" in driver.page_source:
self.assertIn("进行中", driver.page_source)
elif "待支付" in driver.page_source:
self.assertIn("待支付", driver.page_source)
else:
self.assertEqual(1,2)
sleep(3)
driver.find_element_by_xpath("//*[@text='已完成']").click()
sleep(3)
self.assertIn("已取消", driver.page_source)
sleep(2)
driver.find_element_by_xpath("//*[@text='退款']").click()
sleep(2)
self.assertIn("退款成功", driver.page_source)
def tearDown(self):
self.driver = Driver.DRIVER
exit = iselement()
back = exit.findelementid("cn.xinzhili.core:id/iv_title_back")
back2 = exit.findelementid("cn.xinzhili.core:id/iv_title_left")
if back is True:
self.driver.find_element_by_id("cn.xinzhili.core:id/iv_title_back").click()
else:
sleep(2)
if back2 is True:
self.driver.find_element_by_id("cn.xinzhili.core:id/iv_title_left").click()
else:
sleep(2)
| [
"820355197@qq.com"
] | 820355197@qq.com |
4b32656a28a2c68bceb7c7b718403512b88994f1 | 6bfda75657070e177fa620a43c917096cbd3c550 | /kubernetes/test/test_autoscaling_v2alpha1_api.py | 6f7571908e7305b29558bf7d6d8a9a4487042303 | [
"Apache-2.0"
] | permissive | don41382/client-python | 8e7e747a62f9f4fc0402eea1a877eab1bb80ab36 | e69d4fe204b98f7d7ee3ada3996b4f5fbceae5fe | refs/heads/master | 2021-01-19T23:15:50.172933 | 2017-04-18T18:00:48 | 2017-04-18T18:00:48 | 88,943,866 | 0 | 0 | null | 2017-04-21T05:19:52 | 2017-04-21T05:19:52 | null | UTF-8 | Python | false | false | 2,946 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.apis.autoscaling_v2alpha1_api import AutoscalingV2alpha1Api
class TestAutoscalingV2alpha1Api(unittest.TestCase):
""" AutoscalingV2alpha1Api unit test stubs """
def setUp(self):
self.api = kubernetes.client.apis.autoscaling_v2alpha1_api.AutoscalingV2alpha1Api()
def tearDown(self):
pass
def test_create_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for create_namespaced_horizontal_pod_autoscaler
"""
pass
def test_delete_collection_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for delete_collection_namespaced_horizontal_pod_autoscaler
"""
pass
def test_delete_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for delete_namespaced_horizontal_pod_autoscaler
"""
pass
def test_get_api_resources(self):
"""
Test case for get_api_resources
"""
pass
def test_list_horizontal_pod_autoscaler_for_all_namespaces(self):
"""
Test case for list_horizontal_pod_autoscaler_for_all_namespaces
"""
pass
def test_list_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for list_namespaced_horizontal_pod_autoscaler
"""
pass
def test_patch_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for patch_namespaced_horizontal_pod_autoscaler
"""
pass
def test_patch_namespaced_horizontal_pod_autoscaler_status(self):
"""
Test case for patch_namespaced_horizontal_pod_autoscaler_status
"""
pass
def test_read_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for read_namespaced_horizontal_pod_autoscaler
"""
pass
def test_read_namespaced_horizontal_pod_autoscaler_status(self):
"""
Test case for read_namespaced_horizontal_pod_autoscaler_status
"""
pass
def test_replace_namespaced_horizontal_pod_autoscaler(self):
"""
Test case for replace_namespaced_horizontal_pod_autoscaler
"""
pass
def test_replace_namespaced_horizontal_pod_autoscaler_status(self):
"""
Test case for replace_namespaced_horizontal_pod_autoscaler_status
"""
pass
if __name__ == '__main__':
unittest.main()
| [
"mehdy@google.com"
] | mehdy@google.com |
e1a3999957bf6d49d0e8a8693b74c61ff9a7ae7d | 4de2bfe570af0ae03db661223dc36524642a4016 | /libermatic_customization/libermatic_customization/doctype/libermatic_settings/libermatic_settings.py | ea23f8e293f4e03572f53c1e63b16d67b32d51f8 | [
"MIT"
] | permissive | libermatic/libermatic_customization | 6b645c6424987cee39653499bfa7b2683da65f42 | 38ac7abd691ee289ee85cb3926cafb3989af24e1 | refs/heads/master | 2020-03-31T18:29:28.152141 | 2019-08-27T11:16:14 | 2019-08-27T11:16:14 | 152,460,666 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2018, Libermatic and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
from frappe.model.document import Document
class LibermaticSettings(Document):
pass
| [
"sun@libermatic.com"
] | sun@libermatic.com |
129e7bf0bd902cefe5098246834c9b2b435b12ac | 67117fb75f765d3426b8d0b06567b9a0d446e25b | /src/gtk/toga_gtk/libs/gtk.py | 95678bd2d52085e07397acbccbe29539cb31503f | [
"BSD-3-Clause"
] | permissive | maks232/toga | 2e61723aea004c97f97f6ac4e8f2e0e6de193b8f | 47b8961ded119bc147961c0a7054d354e3f3222f | refs/heads/master | 2022-12-10T14:09:25.963436 | 2020-09-03T11:53:53 | 2020-09-03T11:53:53 | 292,660,054 | 0 | 0 | NOASSERTION | 2020-09-03T19:20:57 | 2020-09-03T19:20:56 | null | UTF-8 | Python | false | false | 771 | py | import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk, GdkPixbuf, Gio, GLib # noqa: F401, E402
# The following import will fail if WebKit or its API wrappers aren't
# installed; handle failure gracefully
# (see https://github.com/beeware/toga/issues/26)
# Accept any API version greater than 3.0
WebKit2 = None
for version in ['4.0', '3.0']:
try:
gi.require_version('WebKit2', version)
from gi.repository import WebKit2 # noqa: F401, E402
break
except (ImportError, ValueError):
pass
try:
gi.require_version("Pango", "1.0")
from gi.repository import Pango # noqa: F401, E402
except ImportError:
Pango = None
try:
import cairo # noqa: F401, E402
except ImportError:
cairo = None
| [
"russell@keith-magee.com"
] | russell@keith-magee.com |
539d0113207a5c499e7c828ef9766dbe5e11d854 | 48832d27da16256ee62c364add45f21b968ee669 | /res/scripts/client/gui/shared/utils/methodsrules.py | 660a7b0a547bb320ff1113ef3462da88efbd7c12 | [] | no_license | webiumsk/WOT-0.9.15.1 | 0752d5bbd7c6fafdd7f714af939ae7bcf654faf7 | 17ca3550fef25e430534d079876a14fbbcccb9b4 | refs/heads/master | 2021-01-20T18:24:10.349144 | 2016-08-04T18:08:34 | 2016-08-04T18:08:34 | 64,955,694 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 3,585 | py | # 2016.08.04 19:53:25 Střední Evropa (letní čas)
# Embedded file name: scripts/client/gui/shared/utils/MethodsRules.py
from collections import defaultdict
from types import MethodType
from debug_utils import LOG_DEBUG
class MethodsRules(object):
__slots__ = ('__listenersToSkip', '__notificationToDelay', '__delayersProcessed')
class skipable(object):
def __init__(self, func):
self.__listerner = func
def __call__(self, *args, **kwargs):
instance = args[0]
if not isinstance(instance, MethodsRules):
raise AssertionError('Wrong inheritance.')
instance.skip(self.__listerner) and LOG_DEBUG('Notification skipped: ', instance, self.__listerner)
return
self.__listerner(*args, **kwargs)
def __get__(self, obj, objtype = None):
return MethodType(self, obj, objtype)
class delayable(object):
def __init__(self, delayerName = None):
self.__delayerName = delayerName
def __call__(self, listener):
def wrapper(*args, **kwargs):
instance = args[0]
if not isinstance(instance, MethodsRules):
raise AssertionError('Wrong inheritance.')
instance.delay(self.__delayerName, listener, *args, **kwargs) and LOG_DEBUG('Notification delayed: ', listener, *args, **kwargs)
return
result = listener(*args, **kwargs)
instance.processDelayer(listener.__name__)
return result
return wrapper
def __get__(self, obj, objtype = None):
return MethodType(self, obj, objtype)
def __init__(self):
super(MethodsRules, self).__init__()
self.__listenersToSkip = []
self.__notificationToDelay = defaultdict(list)
self.__delayersProcessed = set()
def clear(self):
self.__listenersToSkip = []
self.__notificationToDelay.clear()
self.__delayersProcessed.clear()
def skipListenerNotification(self, wrapper):
self.__listenersToSkip.append(wrapper.listener)
def isSkipable(self, listener):
return listener in self.__listenersToSkip
def isDelayerProcessed(self, delayerName):
return delayerName in self.__delayersProcessed
def skip(self, listener):
if self.isSkipable(listener):
self.__listenersToSkip.remove(listener)
return True
return False
def delay(self, delayerName, notification, *args, **kwargs):
if delayerName is not None and not self.isDelayerProcessed(delayerName):
self.__notificationToDelay[delayerName].append((notification, args, kwargs))
return True
else:
return False
def processDelayer(self, delayerName):
LOG_DEBUG('Delayer processed: ', delayerName)
self.__delayersProcessed.add(delayerName)
pending = self.__notificationToDelay.pop(delayerName, ())
delayers = set()
for notification, args, kwargs in pending:
LOG_DEBUG('Notification processed: ', notification, args, kwargs)
notification(*args, **kwargs)
delayers.add(notification.__name__)
for delayerName in delayers:
self.processDelayer(delayerName)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\scripts\client\gui\shared\utils\methodsrules.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2016.08.04 19:53:26 Střední Evropa (letní čas)
| [
"info@webium.sk"
] | info@webium.sk |
4a651167e8552a4ce397906eb3f85051e3281757 | 1065ec75d9ee668ffd7aafc6a8de912d7c2cee6f | /addons/script.icechannel.extn.extra.uk/plugins/livetv_uk/sky_news_ltvi.py | 0533ee893da846b0f640a240561d8b1b06333b69 | [] | no_license | bopopescu/kodiprofile | 64c067ee766e8a40e5c148b8e8ea367b4879ffc7 | 7e78640a569a7f212a771aab6a4a4d9cb0eecfbe | refs/heads/master | 2021-06-11T17:16:15.498281 | 2016-04-03T06:37:30 | 2016-04-03T06:37:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,004 | py | '''
Ice Channel
'''
from entertainment.plugnplay.interfaces import LiveTVIndexer
from entertainment.plugnplay import Plugin
from entertainment import common
class sky_news(LiveTVIndexer):
implements = [LiveTVIndexer]
display_name = "Sky News"
name = "sky_news"
other_names = "sky_news,Sky News"
import xbmcaddon
import os
addon_id = 'script.icechannel.extn.extra.uk'
addon = xbmcaddon.Addon(addon_id)
img = os.path.join( addon.getAddonInfo('path'), 'resources', 'images', name + '.png' )
regions = [
{
'name':'United Kingdom',
'img':addon.getAddonInfo('icon'),
'fanart':addon.getAddonInfo('fanart')
},
]
languages = [
{'name':'English', 'img':'', 'fanart':''},
]
genres = [
{'name':'News', 'img':'', 'fanart':''}
]
addon = None
| [
"sokasoka@hotmail.com"
] | sokasoka@hotmail.com |
b4ee0e774735f7e22ec95c1c42dc59f30949f18c | e2b5f9c5ccc51be2f3c0b55f580b882f2adb4875 | /docs/conf.py | 37ab66664e95edd05101d56e93214e099d7deeac | [
"MIT"
] | permissive | bfontaine/firapria | 5e0930db20689dd5d5a5bfa4511f6781b1521c21 | a2eeeab6f6d1db50337950cfbd6f835272306ff0 | refs/heads/master | 2021-01-25T06:36:39.101391 | 2014-09-30T22:09:38 | 2014-09-30T22:09:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,173 | py | # -*- coding: utf-8 -*-
#
# Firapria documentation build configuration file, created by
# sphinx-quickstart on Fri Mar 28 23:02:11 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Firapria'
copyright = u'2014, Baptiste Fontaine'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.1'
# The full version, including alpha/beta/rc tags.
release = '0.1.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Firapriadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Firapria.tex', u'Firapria Documentation',
u'Baptiste Fontaine', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'firapria', u'Firapria Documentation',
[u'Baptiste Fontaine'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Firapria', u'Firapria Documentation',
u'Baptiste Fontaine', 'Firapria', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| [
"batifon@yahoo.fr"
] | batifon@yahoo.fr |
07c23570e03becd7f5bb4afa422a232f230fa5d1 | f60eb7d15ce3ca06e2db1dc0af8b3b87bed08c37 | /home/migrations/0012_auto_20170609_2252.py | 848d2b5ca42484916880c1fe1c4dcfbfde12ea15 | [] | no_license | wlminimal/epc | 96136f0c5f2b4ddc04fbc7e7b76d6a41c631ea26 | 2127a4e273a69a3ca0d5711fd1452c1bc5ab7590 | refs/heads/master | 2022-12-12T11:33:57.711869 | 2019-04-12T16:33:58 | 2019-04-12T16:33:58 | 92,700,181 | 0 | 0 | null | 2022-12-07T23:58:05 | 2017-05-29T02:20:33 | Python | UTF-8 | Python | false | false | 879 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-09 22:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0011_delete_sermonday'),
]
operations = [
migrations.CreateModel(
name='SermonDay',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sermon_day', models.CharField(default="Lord's Day", max_length=50)),
],
),
migrations.AddField(
model_name='sermonvideo',
name='sermon_day',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='home.SermonDay'),
),
]
| [
"wlminimal@gmail.com"
] | wlminimal@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.