blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e9b5cf2445399642b2b7c925cbf7645c8e7e2f58
|
5864e86954a221d52d4fa83a607c71bacf201c5a
|
/trinity/renderjobs.py
|
394ddcb28608b85b76dfb5fc0412e2471051f7de
|
[] |
no_license
|
connoryang/1v1dec
|
e9a2303a01e5a26bf14159112b112be81a6560fd
|
404f2cebf13b311e754d45206008918881496370
|
refs/heads/master
| 2021-05-04T02:34:59.627529
| 2016-10-19T08:56:26
| 2016-10-19T08:56:26
| 71,334,417
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,474
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\packages\trinity\renderjobs.py
import decometaclass
from . import _trinity as trinity
class RenderJobs(object):
__cid__ = 'trinity.Tr2RenderJobs'
__metaclass__ = decometaclass.BlueWrappedMetaclass
def __init__(self):
pass
def UnscheduleByName(self, name):
for rj in self.recurring:
if rj.name == name:
self.recurring.remove(rj)
return True
return False
def FindByName(self, name):
for rj in self.recurring:
if rj.name == name:
return rj
def FindStepByName(self, name):
def FindInJob(rj):
for step in rj.steps:
if step.name == name:
return step
for rj in self.recurring:
ret = FindInJob(rj)
if ret is not None:
return ret
def FindScenes(self, sceneType, filter = lambda x: True):
results = set({})
def RecursiveSearch(job):
for step in job.steps:
if hasattr(step, 'object') and type(step.object) is sceneType and filter(step.object):
results.add(step.object)
return
if type(step) is trinity.TriStepRunJob:
RecursiveSearch(step.job)
for job in self.recurring:
RecursiveSearch(job)
return results
|
[
"le02005@163.com"
] |
le02005@163.com
|
d4a4f7cad1ae98a307e8097d46ba07924f6a4adb
|
1f85142263a08d2e20080f18756059f581d524df
|
/chromium_extension/branches/timeline/src/build/common.gypi
|
764680d3d99285d67d4b0c6a767afa03d918f377
|
[] |
no_license
|
songlibo/page-speed
|
60edce572136a4b35f4d939fd11cc4d3cfd04567
|
8776e0441abd3f061da969644a9db6655fe01855
|
refs/heads/master
| 2021-01-22T08:27:40.145133
| 2016-02-03T15:34:40
| 2016-02-03T15:34:40
| 43,261,473
| 0
| 0
| null | 2015-09-27T19:32:17
| 2015-09-27T19:32:17
| null |
UTF-8
|
Python
| false
| false
| 1,260
|
gypi
|
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
# Make sure we link statically so everything gets linked into a
# single shared object.
'library': 'static_library',
# The nacl toolchain fails to build valid nexes when we enable gc
# sections, at least on 64 bit builds. TODO: revisit this to see
# if a newer nacl toolchain supports it.
'no_gc_sections': 1,
# We're building a shared library, so everything needs to be built
# with Position-Independent Code.
'linux_fpic': 1,
},
'includes': [
'../third_party/libpagespeed/src/build/common.gypi',
],
# 'target_defaults': {
# 'include_dirs': [
# '<(DEPTH)/build/nacl_header_stubs',
# ],
# },
}
|
[
"bmcquade@google.com"
] |
bmcquade@google.com
|
499899b07cb558bc9dd599794ace8b8746cee9ba
|
06c9edb02884ced68c62b5527d2be0e1a2e65bf1
|
/9012.py
|
3ce6c980e1d0a4c7ae29f246559b2957d47c7fc6
|
[] |
no_license
|
0x232/BOJ
|
3c5d3973b62036bfe9b761c88c822cf7fe909bce
|
5f135ac51b1c304eff4630798fb5c516b666a5c6
|
refs/heads/master
| 2021-07-03T02:56:00.132987
| 2020-10-31T02:18:19
| 2020-10-31T02:18:19
| 191,161,517
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 381
|
py
|
n = int(input())
for _ in range(n):
paren = input()
counter = 0
answer = True
for p in paren:
if counter < 0:
answer = False
break
if p == '(':
counter += 1
if p == ')':
counter -= 1
if counter != 0:
answer = False
if answer:
print('YES')
else:
print('NO')
|
[
"51640066+0x232@users.noreply.github.com"
] |
51640066+0x232@users.noreply.github.com
|
158391a0ca82c0639608e6f98dede3195bd12b40
|
9d862dd68f8b4ea4e7de9397fef8592824c77449
|
/app/top/api/rest/FenxiaoDiscountsGetRequest.py
|
2989652b695920224f032670cc2c84c122f36215
|
[] |
no_license
|
hi-noikiy/tmall-sku-outer_id
|
ffaca630dfb288ca33d962b8a050932d1047b9c8
|
1bcf29386a513bcb210bf5d91016e0dcb1ebc1ad
|
refs/heads/master
| 2021-05-09T18:20:27.150316
| 2017-03-08T06:43:57
| 2017-03-08T06:43:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 333
|
py
|
'''
Created by auto_sdk on 2016.03.05
'''
from app.top.api.base import RestApi
class FenxiaoDiscountsGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.discount_id = None
self.ext_fields = None
def getapiname(self):
return 'taobao.fenxiao.discounts.get'
|
[
"1037096435@qq.com"
] |
1037096435@qq.com
|
e925cae9746d4510a5277d88ffa5e8a07c3c90e6
|
4eaab9327d25f851f9e9b2cf4e9687d5e16833f7
|
/problems/critical_connections_in_a_network/solution.py
|
7ddf628a29bc4208c9823e84011f61a218c0010c
|
[] |
no_license
|
kadhirash/leetcode
|
42e372d5e77d7b3281e287189dcc1cd7ba820bc0
|
72aea7d43471e529ee757ff912b0267ca0ce015d
|
refs/heads/master
| 2023-01-21T19:05:15.123012
| 2020-11-28T13:53:11
| 2020-11-28T13:53:11
| 250,115,603
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 949
|
py
|
class Solution:
def criticalConnections(self, n: int, connections: List[List[int]]) -> List[List[int]]:
def dfs(previous = -1, current = 0, depth = 1):
nonlocal depths, output
temp_depth = depth
depths[current] = depth
for neighbor in graph[current]:
if neighbor == previous:
continue
neighbor_depth = depths[neighbor] or dfs(current, neighbor, depth + 1)
if depth < neighbor_depth:
output.append((current, neighbor))
elif neighbor_depth < temp_depth:
temp_depth = neighbor_depth
depths[current] = temp_depth
return temp_depth
graph = [[] for _ in range(n)]
depths = [0] * n
output = []
for u, v in connections:
graph[u].append(v)
graph[v].append(u)
dfs()
return output
|
[
"kadhirash@gmail.com"
] |
kadhirash@gmail.com
|
8bc175401c234330dcca0e841f43babb1b91a34e
|
e831c22c8834030c22c54b63034e655e395d4efe
|
/Strings/409-LongestPalindrome.py
|
a7c78ae605311f965fabd78f56853df5f5a2ed97
|
[] |
no_license
|
szhmery/leetcode
|
a5eb1a393422b21f9fd4304b3bdc4a9db557858c
|
9fcd1ec0686db45d24e2c52a7987d58c6ef545a0
|
refs/heads/master
| 2023-08-16T00:27:56.866626
| 2021-10-23T07:35:37
| 2021-10-23T07:35:37
| 331,875,151
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 593
|
py
|
from collections import Counter
class Solution:
#https://leetcode.com/problems/longest-palindrome/solution/
def longestPalindrome(self, s: str) -> int:
ans = 0
map = Counter(s)
for v in map.values():
ans += v // 2 * 2
if ans % 2 == 0 and v % 2 != 0: # aaaccc, if a is 3, add 1 more.
ans += 1
return ans
solution = Solution()
result = solution.longestPalindrome('abccb')
print(result)
result = solution.longestPalindrome('ccc')
print(result)
result = solution.longestPalindrome('cccaaadde')
print(result)
|
[
"szhmery@gmail.com"
] |
szhmery@gmail.com
|
eadd064afcb20f96f92a1dd01fffdcfba42712a5
|
24dd3c272457110b2b51bb783715d1245afcd9ce
|
/eth_dev/infura.py
|
73181d7325cfb92aa1ccb3a2719e9daa434c82ab
|
[] |
no_license
|
fubuloubu/eth-dev
|
81761da7942927a97830c426cccf650046e6db74
|
383e51bba0b4471ef1c7a5d6ee2d1ff6a0562f8a
|
refs/heads/master
| 2020-04-30T04:24:29.606074
| 2019-03-19T23:15:56
| 2019-03-19T23:15:56
| 176,610,133
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
import os
import sys
from importlib import import_module
def get_web3(network: str, project_id: str):
# Infura websocket API requires Project ID token as of March 23rd
print("Setting Infura Project ID to", project_id, file=sys.stderr)
os.environ['WEB3_INFURA_PROJECT_ID'] = project_id
# Dynamically load the correct autoloader (based on network)
print("Connecting to the", network, "network (using Infura)", file=sys.stderr)
infura_module = import_module("web3.auto.infura.%s" % network)
# Return w3 autoloader for network
return getattr(infura_module, 'w3')
|
[
"fubuloubu@gmail.com"
] |
fubuloubu@gmail.com
|
b28a6e9427e27b1ccb8fa350686110b8a21e74e3
|
68c4805ad01edd612fa714b1e0d210115e28bb7d
|
/venv/Lib/site-packages/numba/cuda/tests/cudapy/test_print.py
|
59513d127a2aebd9b1461428f48b504cac50b75b
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Happy-Egg/redesigned-happiness
|
ac17a11aecc7459f4ebf0afd7d43de16fb37ae2c
|
08b705e3569f3daf31e44254ebd11dd8b4e6fbb3
|
refs/heads/master
| 2022-12-28T02:40:21.713456
| 2020-03-03T09:04:30
| 2020-03-03T09:04:30
| 204,904,444
| 2
| 1
|
Apache-2.0
| 2022-12-08T06:19:04
| 2019-08-28T10:18:05
|
Python
|
UTF-8
|
Python
| false
| false
| 1,919
|
py
|
from __future__ import print_function
import numpy as np
from numba import cuda
from numba import unittest_support as unittest
from numba.cuda.testing import captured_cuda_stdout, SerialMixin
def cuhello():
i = cuda.grid(1)
print(i, 999)
print(-42)
def printfloat():
i = cuda.grid(1)
print(i, 23, 34.75, 321)
def printstring():
i = cuda.grid(1)
print(i, "hop!", 999)
def printempty():
print()
class TestPrint(SerialMixin, unittest.TestCase):
def test_cuhello(self):
jcuhello = cuda.jit('void()', debug=False)(cuhello)
with captured_cuda_stdout() as stdout:
jcuhello[2, 3]()
# The output of GPU threads is intermingled, but each print()
# call is still atomic
out = stdout.getvalue()
lines = sorted(out.splitlines(True))
expected = ['-42\n'] * 6 + ['%d 999\n' % i for i in range(6)]
self.assertEqual(lines, expected)
def test_printfloat(self):
jprintfloat = cuda.jit('void()', debug=False)(printfloat)
with captured_cuda_stdout() as stdout:
jprintfloat()
# CUDA and the simulator use different formats for float formatting
self.assertIn(stdout.getvalue(), ["0 23 34.750000 321\n",
"0 23 34.75 321\n"])
def test_printempty(self):
cufunc = cuda.jit('void()', debug=False)(printempty)
with captured_cuda_stdout() as stdout:
cufunc()
self.assertEqual(stdout.getvalue(), "\n")
def test_string(self):
cufunc = cuda.jit('void()', debug=False)(printstring)
with captured_cuda_stdout() as stdout:
cufunc[1, 3]()
out = stdout.getvalue()
lines = sorted(out.splitlines(True))
expected = ['%d hop! 999\n' % i for i in range(3)]
self.assertEqual(lines, expected)
if __name__ == '__main__':
unittest.main()
|
[
"yangyang4910709@163.com"
] |
yangyang4910709@163.com
|
4ad18edeba3a472fa88ee13931a6c5ad42d6a3dc
|
d7779408c44502a0cb8da4e3923e1b68492b1610
|
/apps/organization/forms.py
|
ccfe66e97ba094e2b0233dc63e529b03fbcc07b3
|
[
"MIT"
] |
permissive
|
codelieche/moocweb
|
5c4429d3ebee43452d42db63fdd364935e2d6eee
|
0e25efa597a79a38066ec41559334be604388f30
|
refs/heads/master
| 2021-01-13T11:49:08.444658
| 2017-02-26T16:36:08
| 2017-02-26T16:36:08
| 81,343,579
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 684
|
py
|
# _*_ coding:utf-8 _*_
import re
from django import forms
from operation.models import UserAsk
class UserAskForm(forms.ModelForm):
'''用户咨询Form'''
class Meta:
model = UserAsk
fields = ['name', 'mobile', 'course_name']
def clean_mobil(self):
'''
验证手机号码是否合法
:return:
'''
mobile = self.cleaned_data['mobile']
REGEX_MOBILE = '^1[358]\d{9}$|^147\d{8}$|^176\d{8}$'
p = re.compile(REGEX_MOBILE)
if p.match(mobile):
return mobile
else:
return False
raise forms.ValidationError("手机号码非法", code="mobile_invalid")
|
[
"codelieche@gmail.com"
] |
codelieche@gmail.com
|
bf6f236aa05ce0ae841dd0b933b1930625d39351
|
a75d4e8ff5e2d0641e539af3980768c10298dfb9
|
/main.py
|
d8058f25dc7e3b8e940ce79bf28746f3235b9492
|
[] |
no_license
|
vitvara/tk-space-1
|
c70942af4c235ebabc8648d7d49efc9c31feb961
|
57f668f3137ce893d576f03c8f7c6ffc0cb794c3
|
refs/heads/main
| 2023-03-23T23:35:07.711907
| 2021-03-24T23:17:02
| 2021-03-24T23:17:02
| 351,682,625
| 1
| 0
| null | 2021-03-26T06:20:18
| 2021-03-26T06:20:17
| null |
UTF-8
|
Python
| false
| false
| 5,424
|
py
|
import math
from random import randint, random
import tkinter as tk
from gamelib import Sprite, GameApp, Text
from consts import *
from elements import Ship, Bullet, Enemy
from utils import random_edge_position, normalize_vector, direction_to_dxdy, vector_len, distance
class SpaceGame(GameApp):
def init_game(self):
self.ship = Ship(self, CANVAS_WIDTH // 2, CANVAS_HEIGHT // 2)
self.level = 1
self.level_text = Text(self, '', 100, 580)
self.update_level_text()
self.score = 0
self.score_wait = 0
self.score_text = Text(self, '', 100, 20)
self.update_score_text()
self.bomb_power = BOMB_FULL_POWER
self.bomb_wait = 0
self.bomb_power_text = Text(self, '', 700, 20)
self.update_bomb_power_text()
self.elements.append(self.ship)
self.enemies = []
self.bullets = []
def add_enemy(self, enemy):
self.enemies.append(enemy)
def add_bullet(self, bullet):
self.bullets.append(bullet)
def bullet_count(self):
return len(self.bullets)
def bomb(self):
if self.bomb_power == BOMB_FULL_POWER:
self.bomb_power = 0
self.bomb_canvas_id = self.canvas.create_oval(
self.ship.x - BOMB_RADIUS,
self.ship.y - BOMB_RADIUS,
self.ship.x + BOMB_RADIUS,
self.ship.y + BOMB_RADIUS
)
self.after(200, lambda: self.canvas.delete(self.bomb_canvas_id))
for e in self.enemies:
if self.ship.distance_to(e) <= BOMB_RADIUS:
e.to_be_deleted = True
self.update_bomb_power_text()
def update_score_text(self):
self.score_text.set_text('Score: %d' % self.score)
def update_bomb_power_text(self):
self.bomb_power_text.set_text('Power: %d%%' % self.bomb_power)
def update_level_text(self):
self.level_text.set_text('Level: %d' % self.level)
def update_score(self):
self.score_wait += 1
if self.score_wait >= SCORE_WAIT:
self.score += 1
self.score_wait = 0
self.update_score_text()
def update_bomb_power(self):
self.bomb_wait += 1
if (self.bomb_wait >= BOMB_WAIT) and (self.bomb_power != BOMB_FULL_POWER):
self.bomb_power += 1
self.bomb_wait = 0
self.update_bomb_power_text()
def create_enemy_star(self):
enemies = []
x = randint(100, CANVAS_WIDTH - 100)
y = randint(100, CANVAS_HEIGHT - 100)
while vector_len(x - self.ship.x, y - self.ship.y) < 200:
x = randint(100, CANVAS_WIDTH - 100)
y = randint(100, CANVAS_HEIGHT - 100)
for d in range(18):
dx, dy = direction_to_dxdy(d * 20)
enemy = Enemy(self, x, y, dx * ENEMY_BASE_SPEED, dy * ENEMY_BASE_SPEED)
enemies.append(enemy)
return enemies
def create_enemy_from_edges(self):
x, y = random_edge_position()
vx, vy = normalize_vector(self.ship.x - x, self.ship.y - y)
vx *= ENEMY_BASE_SPEED
vy *= ENEMY_BASE_SPEED
enemy = Enemy(self, x, y, vx, vy)
return [enemy]
def create_enemies(self):
if random() < 0.2:
enemies = self.create_enemy_star()
else:
enemies = self.create_enemy_from_edges()
for e in enemies:
self.add_enemy(e)
def pre_update(self):
if random() < 0.1:
self.create_enemies()
def process_bullet_enemy_collisions(self):
for b in self.bullets:
for e in self.enemies:
if b.is_colliding_with_enemy(e):
b.to_be_deleted = True
e.to_be_deleted = True
def process_ship_enemy_collision(self):
for e in self.enemies:
if self.ship.is_colliding_with_enemy(e):
self.stop_animation()
def process_collisions(self):
self.process_bullet_enemy_collisions()
self.process_ship_enemy_collision()
def update_and_filter_deleted(self, elements):
new_list = []
for e in elements:
e.update()
e.render()
if e.to_be_deleted:
e.delete()
else:
new_list.append(e)
return new_list
def post_update(self):
self.process_collisions()
self.bullets = self.update_and_filter_deleted(self.bullets)
self.enemies = self.update_and_filter_deleted(self.enemies)
self.update_score()
self.update_bomb_power()
def on_key_pressed(self, event):
if event.keysym == 'Left':
self.ship.start_turn('LEFT')
elif event.keysym == 'Right':
self.ship.start_turn('RIGHT')
elif event.char == ' ':
self.ship.fire()
elif event.char.upper() == 'Z':
self.bomb()
def on_key_released(self, event):
if event.keysym == 'Left':
self.ship.stop_turn('LEFT')
elif event.keysym == 'Right':
self.ship.stop_turn('RIGHT')
if __name__ == "__main__":
root = tk.Tk()
root.title("Space Fighter")
# do not allow window resizing
root.resizable(False, False)
app = SpaceGame(root, CANVAS_WIDTH, CANVAS_HEIGHT, UPDATE_DELAY)
app.start()
root.mainloop()
|
[
"jittat@gmail.com"
] |
jittat@gmail.com
|
4f12b2cc59d6c1796f624bc5b10d8d35fa779390
|
22749c6a569661b2637233cc0aebdc1701033b26
|
/src/python/pants/backend/codegen/protobuf/python/python_protobuf_module_mapper_test.py
|
d1a882ed3ab9459719226cada03e3667f28f2afd
|
[
"Apache-2.0"
] |
permissive
|
akk5597/pants
|
2eceb226c39b8ef7f603dfa96684b7522e1a9065
|
7ad295f71d2990eebbbe9c778bbf70f7d9e66584
|
refs/heads/main
| 2023-08-27T02:40:54.753545
| 2021-11-10T03:42:18
| 2021-11-10T03:42:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,124
|
py
|
# Copyright 2020 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from pants.backend.codegen.protobuf.python import additional_fields, python_protobuf_module_mapper
from pants.backend.codegen.protobuf.python.python_protobuf_module_mapper import (
PythonProtobufMappingMarker,
)
from pants.backend.codegen.protobuf.target_types import ProtobufSourcesGeneratorTarget
from pants.backend.codegen.protobuf.target_types import rules as python_protobuf_target_types_rules
from pants.backend.python.dependency_inference.module_mapper import FirstPartyPythonMappingImpl
from pants.core.util_rules import stripped_source_files
from pants.engine.addresses import Address
from pants.testutil.rule_runner import QueryRule, RuleRunner
from pants.util.frozendict import FrozenDict
@pytest.fixture
def rule_runner() -> RuleRunner:
return RuleRunner(
rules=[
*additional_fields.rules(),
*stripped_source_files.rules(),
*python_protobuf_module_mapper.rules(),
*python_protobuf_target_types_rules(),
QueryRule(FirstPartyPythonMappingImpl, [PythonProtobufMappingMarker]),
],
target_types=[ProtobufSourcesGeneratorTarget],
)
def test_map_first_party_modules_to_addresses(rule_runner: RuleRunner) -> None:
rule_runner.set_options(["--source-root-patterns=['root1', 'root2', 'root3']"])
rule_runner.write_files(
{
"root1/protos/f1.proto": "",
"root1/protos/f2.proto": "",
"root1/protos/BUILD": "protobuf_sources()",
# These protos would result in the same module name, so neither should be used.
"root1/two_owners/f.proto": "",
"root1/two_owners/BUILD": "protobuf_sources()",
"root2/two_owners/f.proto": "",
"root2/two_owners/BUILD": "protobuf_sources()",
# A file with grpc. This also uses the `python_source_root` mechanism, which should be
# irrelevant to the module mapping because we strip source roots.
"root1/tests/f.proto": "",
"root1/tests/BUILD": "protobuf_sources(grpc=True, python_source_root='root3')",
}
)
result = rule_runner.request(FirstPartyPythonMappingImpl, [PythonProtobufMappingMarker()])
assert result == FirstPartyPythonMappingImpl(
mapping=FrozenDict(
{
"protos.f1_pb2": (Address("root1/protos", relative_file_path="f1.proto"),),
"protos.f2_pb2": (Address("root1/protos", relative_file_path="f2.proto"),),
"tests.f_pb2": (Address("root1/tests", relative_file_path="f.proto"),),
"tests.f_pb2_grpc": (Address("root1/tests", relative_file_path="f.proto"),),
}
),
ambiguous_modules=FrozenDict(
{
"two_owners.f_pb2": (
Address("root1/two_owners", relative_file_path="f.proto"),
Address("root2/two_owners", relative_file_path="f.proto"),
)
}
),
)
|
[
"noreply@github.com"
] |
akk5597.noreply@github.com
|
7b512c468b007c8b2f336f735e4eb125dfc4082e
|
a03eba726a432d8ef133f2dc55894ba85cdc4a08
|
/config/hostsconf/views.py
|
f6ec260509c9276a8b978a80bd4cf61bb22bcbaf
|
[
"MIT"
] |
permissive
|
mansonul/events
|
2546c9cfe076eb59fbfdb7b4ec8bcd708817d59b
|
4f6ca37bc600dcba3f74400d299826882d53b7d2
|
refs/heads/master
| 2021-01-15T08:53:22.442929
| 2018-01-30T16:14:20
| 2018-01-30T16:14:20
| 99,572,230
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
from django.http import HttpResponseRedirect
from django_hosts.resolvers import reverse as host_reverse
def www_root_redirect(request, path=None):
url_ = host_reverse("home", host='www')
if path is not None:
url_ = url_ + path
return HttpResponseRedirect(host_reverse('home'))
|
[
"contact@dragosnicu.com"
] |
contact@dragosnicu.com
|
90e783ea257a3f30cbf5ecd45264e3e1bfb0f5e5
|
dc221edce0ad617aac3b9ad8f4f347ff84f56bf9
|
/.history/client_20200807180109.py
|
54c6da2132910d7f0425fdabfa0c1da205eccabc
|
[] |
no_license
|
zlm05170/cacontroller
|
310014c83ecf130643230eba87990e635fe1575f
|
e76d2eb5d58d6adfe7823e0dcd0059027c52b6bc
|
refs/heads/master
| 2022-12-21T08:05:58.315017
| 2020-09-23T11:45:07
| 2020-09-23T11:45:07
| 284,527,141
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,720
|
py
|
import asyncio
import websockets
import time
import json
import traceback
def view_actor_data(actor, port_type, port_name):
pass
def get_port_value_by_name(port_list, name):
for port in port_list:
if port['port']['name'] == name:
return port['value']
def find_port_index_by_name(actor, port_type, port_name):
port_list = actor[port_type]
num_port = len(port_list)
for i in range(num_port):
if port_list[i]['port']['name'] == port_name:
return i
def print_port_data_by_index(actor, port_type, index):
print(actor[port_type][index]['port']['name'] + ': ' + actor[port_type][index]['port']['value'])
async def start():
uri = "ws://192.168.114.18:8887"
actor_info = {
'clazz' : '',
'name' : '',
'uuid' : None,
'parent_uuid' : None
}
gps_gunnerus = actor_info.copy()
gps_gunnerus['clazz'] = 'GPSController'
gps_gunnerus['name'] = 'GPS1'
gps_target_ship_1 = actor_info.copy()
gps_target_ship_1['clazz'] = 'GPSController'
gps_target_ship_1['name'] = 'Target Ship 1'
gps_target_ship_2 = actor_info.copy()
gps_target_ship_2['clazz'] = 'GPSController'
gps_target_ship_2['name'] = 'Target Ship 2'
gunnerus_thruster_port = actor_info.copy()
gunnerus_thruster_port['clazz'] = 'ThrusterActor'
gunnerus_thruster_port['name'] = 'Port'
gunnerus_thruster_starboard = actor_info.copy()
gunnerus_thruster_starboard['clazz'] = 'ThrusterActor'
gunnerus_thruster_starboard['name'] = 'Starboard'
actor_info_list = [gps_gunnerus, gps_target_ship_1, gps_target_ship_2, gunnerus_thruster_port, gunnerus_thruster_starboard]
actor_list = [None for i in range(5)]
async with websockets.connect(uri, ping_timeout=None) as websocket:
while True:
# name = f"luman!"
# await websocket.send(name)
# #print(f"> {name}")
#await sendmessage(websocket)
gunnerus = None
ts1 = None
ts2 = None
if not websocket.open:
print('reconnecting')
websocket = await websockets.connect(uri)
else:
resp = await websocket.recv()
try:
data_dic = json.loads(resp[resp.index('{'):])
evaluate(data_dic)
except:
traceback.print_exc()
await sendmessage()
# async def sendmessage():
# name = f"luman"
# return websocket.send(name)
async def evaluate(data_dic, clazz, name):
x = False if data_dic['clazz'].find(clazz) == -1 else True
y = (data_dic['name'] == name)
for i in range(len(actor_list)):
actor_info = actor_info_list[i]
actor = await evaluate(resp, actor_info['clazz'], actor_info['name'])
if actor != None:
actor_info['uuid'] = actor['uuid']
actor_info['parent_uuid'] = get_port_value_by_name(actor['output'],'PARENT')
print_port_data_by_index(find_port_index_by_name(actor_list[0], 'output', 'longitude'.upper()))
#print(print_port_data_by_index)
if x and y:
return data_dic
def clazz_ls(data_dic):
#print(data_dic['output']) # list
lon, lat, east, north, course, speed, rpm, alpha = 0.0, 0.0, 0.0, 0.0, 0.0, [], [], []
for message in data_dic['output']:
port = message['port']['name']
if port == "longitude".upper():
lon = message['value']['value']
elif port == "latitude".upper():
lat = message['value']['value']
elif port == "easting".upper():
east = message['value']['value']
elif port == "northing".upper():
north = message['value']['value']
elif port == "bearing".upper():
course = message['value']['value']
elif port == "WORLD_VELOCITY".upper():
value_ls = message['value']['valueObjects']
for v in value_ls:
speed.append(v['value'])
elif port == "ACTUAL_RPM".upper():
rpm = message['value']['value']
elif port == "ANGLE".upper():
alpha = message['value']['value']
else:
pass
all_data = [lon, lat, east, north, course, speed, rpm, alpha]
#return all_data
print(all_data)
async def savefile(receivedata):
#time.sleep(5)
with open('serverdata.json', 'w') as json_file:
json_file.writelines(receivedata)
if __name__=='__main__':
#rospy.init_node("simulator_drl")
asyncio.get_event_loop().run_until_complete(start())
asyncio.get_event_loop().run_forever()
|
[
"angelxx05170@gmail.com"
] |
angelxx05170@gmail.com
|
769a920462f74093deebe33e6db9ca5f4ce57734
|
bc6e2056500afdd5d11a28a613d6d73f5dd05447
|
/moneycash/produccion/admin.py
|
28741262c044e84e45e8db2088d83ef264941422
|
[] |
no_license
|
xangcastle/respaldo
|
d0540fabc089f947f052019431d55a9c3c85f131
|
48c5f53b2a2bce0bfa79b1fcc657aa40268e702b
|
refs/heads/master
| 2021-01-10T01:52:48.102689
| 2015-12-16T15:42:02
| 2015-12-16T15:42:02
| 48,118,925
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,700
|
py
|
from django.contrib import admin
from .models import *
from moneycash.entidad import entidad_admin
from moneycash.documento import documento_admin
from django.template.context import RequestContext
from django.shortcuts import render_to_response
class contadores_tabular(admin.TabularInline):
model = equipo_periodo
extra = 0
classes = ('grp-collapse grp-open',)
#fields = ('equipo', 'contador_inicial', 'contador_final')
class detalle_recibo_tabular(admin.TabularInline):
model = recibo_detalle
extra = 0
classes = ('grp-collapse grp-open',)
class recibo_admin(documento_admin):
list_display = ('numero', 'fecha', 'area', 'copias', 'importe')
inlines = [detalle_recibo_tabular]
fieldsets = (('Datos del Recibo',
{'classes': ('grp-collapse grp-open',),
'fields': (('numero', 'fecha'), 'area'), }),
("Detalle Inlines", {"classes":
("placeholder recibo_detalle_set-group",), "fields": ()}),
('Datos de Facturacion',
{'classes': ('grp-collapse grp-open',),
'fields': (('copias', 'importe', 'tc'),), }),)
actions = ['generar_imprimir', 'facturar']
list_filter = ('periodo', 'area')
def generar_imprimir(self, request, queryset):
for r in queryset:
r.impreso = True
r.save()
id_unico = False
if queryset.count() == 1:
id_unico = True
ctx = {'queryset': queryset, 'id_unico': id_unico}
return render_to_response('moneycash/produccion/recibo.html', ctx,
context_instance=RequestContext(request))
generar_imprimir.short_description = "Imprimir recibos selecionados"
def facturar(self, request, queryset):
facturar(queryset)
class periodo_admin(admin.ModelAdmin):
list_display = ('short_name', 'inicio_produccion', 'fin_produccion',
'copias_equipos', 'copias_areas', 'importe_produccion', 'cerrado')
inlines = [contadores_tabular]
fieldsets = (('Datos del Periodo', {'classes': ('grp-collapse grp-open',),
'fields': (('fecha_inicial', 'fecha_final'),
('inicio_produccion', 'fin_produccion'),)}),)
def generar_recibos(self, request, queryset):
for p in queryset:
crear_recibos(p)
generar_recibos.short_description = \
'generar recibos de los periodos seleccionados'
def cargar_copias(self, request, queryset):
for p in queryset:
cargar_copias(p)
cargar_copias.short_description = \
'cargar copias de los periodos seleccionados'
def activar_equipos(self, request, queryset):
for p in queryset:
activar_equipos(p)
activar_equipos.short_description = \
'activar equipos de los periodos seleccionados'
def cerrar_(self, request, queryset):
for p in queryset:
cerrar(p)
cerrar.short_description = \
'cerrar periodos seleccionados'
actions = [generar_recibos, cargar_copias, activar_equipos, cerrar_]
class equipo_admin(entidad_admin):
list_display = ('code', 'modelo', 'serie', 'marca', 'contador_inicial',
'contador_actual', 'vida_util', 'costo_compra', 'depreciacion_copia',
'valor_depreciado', 'precio_venta', 'activo',)
search_fields = ('code', 'name', 'modelo', 'serie')
list_filter = ('activo', 'marca', 'ubicacion')
fieldsets = (('Datos Generales',
{'classes': ('grp-collapse grp-open',),
'fields': (('code', 'modelo'), ('serie', 'marca'),
('velocidad', 'ubicacion')), }),
('Datos de Facturacion',
{'classes': ('grp-collapse grp-open',),
'fields': (('contador_inicial', 'contador_actual', 'vida_util'),
('costo_compra', 'depreciacion_copia', 'valor_depreciado'),
('precio_venta', 'activo'), ('costo_copia',
'precio_copia')), }),)
ordering = ['code']
class cliente_admin(entidad_admin):
list_display = ('code', 'name', 'identificacion', 'telefono', 'direccion',
'activo')
search_fields = ('code', 'name', 'telefono')
list_filter = ('activo', )
fieldsets = (('Datos Generales',
{'classes': ('grp-collapse grp-open',),
'fields': (('code', 'name'), ('identificacion', 'telefono'),
('direccion',), ('contacto', 'nombre_area'), 'activo'), }),)
class area_admin(entidad_admin):
list_display = ('code', 'name', 'encargado', 'unidad_ejecutora',
'ubicacion', 'activo')
search_fields = ('code', 'name', 'encargado')
list_filter = ('activo', 'cliente', 'ubicacion')
fieldsets = (('Datos del Area',
{'classes': ('grp-collapse grp-open',),
'fields': (('code', 'name'), ('encargado', 'unidad_ejecutora'),
('equipos', 'activo'), ('ubicacion', 'cliente'), 'item'), }),)
class factura_detalle_admin(admin.TabularInline):
model = factura_detalle
extra = 0
classes = ('grp-collapse grp-open',)
class factura_admin(documento_admin):
list_display = ('numero', 'fecha', 'cliente', 'subtotal', 'descuento',
'iva', 'total', 'total', 'tc', 'ir', 'al', 'impreso')
fieldsets = (
('Datos de la Factura',
{'classes': ('grp-collapse grp-open',),
'fields': (('numero', 'fecha'), 'cliente',
('exento_iva', 'exento_ir', 'exento_al')), }),
("Detalle Inlines",
{"classes": ("placeholder factura_detalle_set-group",),
'fields': ()}),
('Totales de la Factura',
{'classes': ('grp-collapse grp-open',),
'fields': (('subtotal', 'descuento'),
('iva', 'total'), ('ir', 'al'), 'tc'), }),
)
inlines = [factura_detalle_admin]
actions = ['generar_imprimir']
def generar_imprimir(self, request, queryset):
id_unico = False
if queryset.count() == 1:
id_unico = True
ctx = {'queryset': queryset, 'id_unico': id_unico}
queryset.update(impreso=True)
return render_to_response('moneycash/produccion/factura.html', ctx,
context_instance=RequestContext(request))
generar_imprimir.short_description = "Imprimir Facturas Selecionadas"
admin.site.register(Marca, entidad_admin)
admin.site.register(Equipo, equipo_admin)
admin.site.register(Area, area_admin)
admin.site.register(Ubicacion, entidad_admin)
admin.site.register(Cliente, cliente_admin)
admin.site.register(Periodo, periodo_admin)
admin.site.register(Recibo, recibo_admin)
admin.site.register(Factura, factura_admin)
admin.site.register(Item, entidad_admin)
admin.site.register(Categoria, entidad_admin)
|
[
"cesarabel@johnmay.com.ni"
] |
cesarabel@johnmay.com.ni
|
762c5f01dc26bf85b36b2cda337b1e05fd67f44e
|
22f96e07b22e3ca89ee757badd1f35ed9efcc034
|
/docs/conf.py
|
d4b5fe98e2c13e6412c9c4feeec2f5eaf200fdf8
|
[
"MIT"
] |
permissive
|
Duc98f/MangAdventure
|
83e341ecbdb6592c947f77e32848346dcc23e861
|
fe69c850f6adce1d9a8755e5aa63db358a6084f6
|
refs/heads/master
| 2023-06-09T23:08:25.595545
| 2021-06-13T10:55:16
| 2021-06-13T11:16:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,077
|
py
|
# -- Setup Django --
from os import environ as env
from os.path import dirname, join
from sys import path
path.insert(0, dirname(dirname(__file__)))
path.insert(1, join(dirname(__file__), '_ext'))
env['DJANGO_SETTINGS_MODULE'] = 'MangAdventure.tests.settings'
__import__('django').setup()
# -- Project information --
import MangAdventure as MA # noqa: E402
project = 'MangAdventure'
author = MA.__author__
release = MA.__version__
copyright = f'2018-2021, {project}, {MA.__license__} license'
# -- General configuration --
extensions = [
'sphinx.ext.autodoc',
'mangadventure_patches',
'sphinx_autodoc_typehints',
'sphinx.ext.intersphinx',
'sphinx.ext.extlinks',
'sphinx.ext.viewcode',
]
templates_path = ['_templates']
source_suffix = '.rst'
master_doc = 'index'
language = 'en'
pygments_style = 'manni'
needs_sphinx = '3.3'
# -- InterSphinx & extlinks configuration --
_django = 'https://docs.djangoproject.com/en/3.2/'
_mdn = 'https://developer.mozilla.org/en-US/docs/Web/'
intersphinx_mapping = {
'django': (_django, f'{_django}_objects/'),
'python': ('https://docs.python.org/3.6/', None),
}
extlinks = {
'setting': (f'{_django}ref/settings/#std:setting-%s', ''),
'tag': (f'{_django}ref/templates/builtins/#%s', ''),
'auth': ('https://django-allauth.rtfd.io/en/latest/%s', ''),
'csp': (f'{_mdn}HTTP/Headers/Content-Security-Policy/%s', ''),
'status': (f'{_mdn}HTTP/Status/%s', ''),
'header': (f'{_mdn}HTTP/Headers/%s', ''),
'schema': ('https://schema.org/%s', ''),
}
# -- Autodoc configuration --
autodoc_default_options = {
'member-order': 'bysource',
'special-members': True,
'undoc-members': True,
'exclude-members': ','.join((
'__new__',
'__dict__',
'__repr__',
'__init__',
'__slots__',
'__module__',
'__weakref__',
'__slotnames__',
'__annotations__',
))
}
autodoc_mock_imports = ['pytest']
autodoc_inherit_docstrings = True
always_document_param_types = True
set_type_checking_flag = True
typehints_fully_qualified = False
typehints_document_rtype = True
# disable sphinx.ext.autodoc.typehints
autodoc_typehints = 'none'
# -- Options for HTML output --
html_theme = 'sphinx_rtd_theme'
html_theme_path = [__import__(html_theme).get_html_theme_path()]
html_theme_options = {
'logo_only': True,
'display_version': False,
'collapse_navigation': True,
}
html_static_path = ['_static']
html_logo = '_static/logo.png'
# html_sidebars = {}
# -- Options for HTMLHelp output --
htmlhelp_basename = f'{project}Doc'
# -- Options for LaTeX output --
latex_elements = {}
latex_documents = [(
master_doc, f'{project}.tex',
f'{project} Documentation', author, 'manual'
)]
# -- Options for manual page output --
man_pages = [(
master_doc, project.lower(),
f'{project} Documentation', author.split(', '), 7
)]
# -- Options for Texinfo output --
texinfo_documents = [(
master_doc, project, f'{project} Documentation',
author, project, MA.__doc__, 'Miscellaneous'
)]
|
[
"chronobserver@disroot.org"
] |
chronobserver@disroot.org
|
f3a43ef0015900475f2c2da760ba827c2fe933df
|
923f1c7bd149d37c23c5b2f067baab3f5b95a4cf
|
/setup.py
|
309502c9f88be647a041ae202762971497a89441
|
[
"BSD-2-Clause"
] |
permissive
|
Lokeshburade007/python-mammoth
|
7467d08ad906e932fbdba720557ee5fd8d862c28
|
f8eb2e1214b7ef1749f2cf73a91b09c9f3adf6a8
|
refs/heads/master
| 2023-08-21T00:35:06.783844
| 2021-10-12T18:52:31
| 2021-10-12T18:52:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,514
|
py
|
#!/usr/bin/env python
import os
import sys
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='mammoth',
version='1.4.17',
description='Convert Word documents from docx to simple and clean HTML and Markdown',
long_description=read("README"),
author='Michael Williamson',
author_email='mike@zwobble.org',
url='http://github.com/mwilliamson/python-mammoth',
packages=['mammoth', 'mammoth.docx', 'mammoth.html', 'mammoth.styles', 'mammoth.styles.parser', 'mammoth.writers'],
entry_points={
"console_scripts": [
"mammoth=mammoth.cli:main"
]
},
keywords="docx word office clean html markdown md",
install_requires=[
"cobble>=0.1.3,<0.2",
],
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*',
license="BSD-2-Clause",
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
|
[
"mike@zwobble.org"
] |
mike@zwobble.org
|
452d6a1116be732f045e520d350dc705407e2c81
|
8fcc27160f8700be46296568260fa0017a0b3004
|
/client/eve/client/script/ui/shared/fitting/panels/offensePanel.py
|
2f426e8e743c0e2dd09191bd7a22606f6464d826
|
[] |
no_license
|
connoryang/dec-eve-serenity
|
5d867f4eedfa896a4ef60f92556356cafd632c96
|
b670aec7c8b4514fc47cd52e186d7ccf3aabb69e
|
refs/heads/master
| 2021-01-22T06:33:16.303760
| 2016-03-16T15:15:32
| 2016-03-16T15:15:32
| 56,389,750
| 1
| 0
| null | 2016-04-16T15:05:24
| 2016-04-16T15:05:24
| null |
UTF-8
|
Python
| false
| false
| 3,079
|
py
|
#Embedded file name: e:\jenkins\workspace\client_SERENITY\branches\release\SERENITY\eve\client\script\ui\shared\fitting\panels\offensePanel.py
from carbonui import const as uiconst
from carbonui.primitives.container import Container
from carbonui.primitives.sprite import Sprite
from eve.client.script.ui.control.eveLabel import EveLabelMedium
from eve.client.script.ui.station.fitting.fittingTooltipUtils import SetFittingTooltipInfo
from eve.client.script.ui.shared.fitting.panels.basePanel import BaseMenuPanel
from localization import GetByLabel
import uthread
class OffensePanel(BaseMenuPanel):
damageStats = (('turretDps', 'res:/UI/Texture/Icons/26_64_1.png', 'UI/Fitting/FittingWindow/TurretDpsTooltip', 'DamagePerSecondTurrets'), ('droneDps', 'res:/UI/Texture/Icons/drones.png', 'UI/Fitting/FittingWindow/DroneDpsTooltip', 'DamagePerSecondDrones'), ('missileDps', 'res:/UI/Texture/Icons/81_64_16.png', 'UI/Fitting/FittingWindow/MissileDpsTooltip', 'DamagePerSecondMissiles'))
iconSize = 26
def ApplyAttributes(self, attributes):
BaseMenuPanel.ApplyAttributes(self, attributes)
def LoadPanel(self, initialLoad = False):
self.Flush()
self.ResetStatsDicts()
self.display = True
parentGrid = self.GetValueParentGrid(columns=len(self.damageStats))
for dps, texturePath, hintPath, tooltipName in self.damageStats:
hint = GetByLabel(hintPath)
c = self.GetValueCont(self.iconSize)
parentGrid.AddCell(cellObject=c)
icon = Sprite(texturePath=texturePath, parent=c, align=uiconst.CENTERLEFT, pos=(0,
0,
self.iconSize,
self.iconSize), state=uiconst.UI_DISABLED)
SetFittingTooltipInfo(targetObject=c, tooltipName=tooltipName)
c.hint = hint
label = EveLabelMedium(text='', parent=c, state=uiconst.UI_DISABLED, align=uiconst.CENTERLEFT)
self.statsLabelsByIdentifier[dps] = label
self.statsIconsByIdentifier[dps] = icon
self.statsContsByIdentifier[dps] = c
BaseMenuPanel.FinalizePanelLoading(self, initialLoad)
def UpdateOffenseStats(self):
uthread.new(self._UpdateOffenseStats)
def _UpdateOffenseStats(self):
itemID = self.controller.GetItemID()
turretDps, missileDps = self.dogmaLocation.GetTurretAndMissileDps(itemID)
dpsText = GetByLabel('UI/Fitting/FittingWindow/DpsLabel', dps=turretDps)
self.SetLabel('turretDps', dpsText)
missileText = GetByLabel('UI/Fitting/FittingWindow/DpsLabel', dps=missileDps)
self.SetLabel('missileDps', missileText)
droneDps, drones = self.dogmaLocation.GetOptimalDroneDamage(itemID)
droneText = GetByLabel('UI/Fitting/FittingWindow/DpsLabel', dps=droneDps)
self.SetLabel('droneDps', droneText)
totalDps = turretDps + missileDps + droneDps
totalDpsText = GetByLabel('UI/Fitting/FittingWindow/DpsLabel', dps=totalDps)
self.SetStatusText(totalDpsText)
|
[
"masaho.shiro@gmail.com"
] |
masaho.shiro@gmail.com
|
90d61a45791a4c4fca451ce3958912b1271ff667
|
f71d67025b732e66e1a37c02c05392c3dd116d65
|
/Lessons/ITP1/08_Character/d.py
|
81a5619fd4674529f96b237cb3fef6f221b7ee12
|
[] |
no_license
|
clarinet758/aoj
|
2829f92137dd1a93734445e1e92513f8e3e0b5c0
|
21787ffee1a6dd60c717d7b880b63107187e4710
|
refs/heads/main
| 2023-06-25T12:04:40.127040
| 2023-06-19T16:36:38
| 2023-06-19T16:36:38
| 34,978,951
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
import time
import sys
import io
import re
import math
import itertools
#sys.stdin=file('input.txt')
#sys.stdout=file('output.txt','w')
#10**9+7
mod=1000000007
#mod=1777777777
pi=3.141592653589
xy=[(1,0),(-1,0),(0,1),(0,-1)]
bs=[(-1,-1),(-1,1),(1,1),(1,-1)]
#start = time.clock()
n=raw_input()*2
print'Yes' if raw_input() in n else'No'
ans=chk=0
#end = time.clock()
#print end - start
|
[
"clarinet758@gmail.com"
] |
clarinet758@gmail.com
|
262a46b28e0f81a173486d6faa14c8be88a61e79
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2738/60598/281309.py
|
b37b2756d6e576c77a144d36933054d39da07823
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 850
|
py
|
input()
matrix = []
while 1:
s = input().replace(" ","")
if s == "]":
break
if s[-1] == ',':
matrix.append(s[1:-2].split(","))
else:
matrix.append(s[1:-1].split(","))
row = len(matrix)
col = len(matrix[0])
result = 0
are = []
for i in range(row):
for j in range(col):
if matrix[i][j] == "\"1\"":
high = 0
wides = []
for h in range(i, row):
high += 1
wide = 0
for s in range(j, col):
if matrix[h][s] == "\"1\"":
wide += 1
else:
break
wides.append(wide)
tempAre = high * min(wides)
if tempAre == 0:
break
are.append(tempAre)
print(max(are))
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
1160fe2c4176a9a8392411959eb0d17929231848
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-gaussdbforopengauss/huaweicloudsdkgaussdbforopengauss/v3/model/gauss_d_bfor_open_gauss_user_for_list.py
|
ffed016aea17b6dc6eec320863498e523cbc368d
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 4,944
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class GaussDBforOpenGaussUserForList:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'name': 'str',
'attributes': 'GaussDBforOpenGaussUserForListAttributes',
'memberof': 'str'
}
attribute_map = {
'name': 'name',
'attributes': 'attributes',
'memberof': 'memberof'
}
def __init__(self, name=None, attributes=None, memberof=None):
"""GaussDBforOpenGaussUserForList
The model defined in huaweicloud sdk
:param name: 帐号名。
:type name: str
:param attributes:
:type attributes: :class:`huaweicloudsdkgaussdbforopengauss.v3.GaussDBforOpenGaussUserForListAttributes`
:param memberof: 用户的默认权限。
:type memberof: str
"""
self._name = None
self._attributes = None
self._memberof = None
self.discriminator = None
self.name = name
if attributes is not None:
self.attributes = attributes
if memberof is not None:
self.memberof = memberof
@property
def name(self):
"""Gets the name of this GaussDBforOpenGaussUserForList.
帐号名。
:return: The name of this GaussDBforOpenGaussUserForList.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this GaussDBforOpenGaussUserForList.
帐号名。
:param name: The name of this GaussDBforOpenGaussUserForList.
:type name: str
"""
self._name = name
@property
def attributes(self):
"""Gets the attributes of this GaussDBforOpenGaussUserForList.
:return: The attributes of this GaussDBforOpenGaussUserForList.
:rtype: :class:`huaweicloudsdkgaussdbforopengauss.v3.GaussDBforOpenGaussUserForListAttributes`
"""
return self._attributes
@attributes.setter
def attributes(self, attributes):
"""Sets the attributes of this GaussDBforOpenGaussUserForList.
:param attributes: The attributes of this GaussDBforOpenGaussUserForList.
:type attributes: :class:`huaweicloudsdkgaussdbforopengauss.v3.GaussDBforOpenGaussUserForListAttributes`
"""
self._attributes = attributes
@property
def memberof(self):
"""Gets the memberof of this GaussDBforOpenGaussUserForList.
用户的默认权限。
:return: The memberof of this GaussDBforOpenGaussUserForList.
:rtype: str
"""
return self._memberof
@memberof.setter
def memberof(self, memberof):
"""Sets the memberof of this GaussDBforOpenGaussUserForList.
用户的默认权限。
:param memberof: The memberof of this GaussDBforOpenGaussUserForList.
:type memberof: str
"""
self._memberof = memberof
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, GaussDBforOpenGaussUserForList):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
af85091132f201033888c47fc10b43a4b7e8d32d
|
a8b37bd399dd0bad27d3abd386ace85a6b70ef28
|
/airbyte-integrations/connectors/source-aircall/setup.py
|
25b830a1e3cce6526bed07734eb77ef89e7f7d8b
|
[
"MIT",
"LicenseRef-scancode-free-unknown",
"Elastic-2.0"
] |
permissive
|
thomas-vl/airbyte
|
5da2ba9d189ba0b202feb952cadfb550c5050871
|
258a8eb683634a9f9b7821c9a92d1b70c5389a10
|
refs/heads/master
| 2023-09-01T17:49:23.761569
| 2023-08-25T13:13:11
| 2023-08-25T13:13:11
| 327,604,451
| 1
| 0
|
MIT
| 2021-01-07T12:24:20
| 2021-01-07T12:24:19
| null |
UTF-8
|
Python
| false
| false
| 628
|
py
|
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
from setuptools import find_packages, setup
MAIN_REQUIREMENTS = [
"airbyte-cdk~=0.1",
]
TEST_REQUIREMENTS = [
"requests-mock~=1.9.3",
"pytest~=6.2",
"pytest-mock~=3.6.1",
]
setup(
name="source_aircall",
description="Source implementation for Aircall.",
author="Airbyte",
author_email="contact@airbyte.io",
packages=find_packages(),
install_requires=MAIN_REQUIREMENTS,
package_data={"": ["*.json", "*.yaml", "schemas/*.json", "schemas/shared/*.json"]},
extras_require={
"tests": TEST_REQUIREMENTS,
},
)
|
[
"noreply@github.com"
] |
thomas-vl.noreply@github.com
|
f060ef31d43c3220db23ba2d5f5b9638358bec69
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_nitpicked.py
|
c24415023c45b6fa685872e33916c3f83b705177
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 228
|
py
|
#calss header
class _NITPICKED():
def __init__(self,):
self.name = "NITPICKED"
self.definitions = nitpick
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['nitpick']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
98491800978764c42bde1d1d36a77b8dc13c9ef3
|
1e249067ab2dabc17cb7ebda46f9f23a5cfad552
|
/tests/test_processor.py
|
149ae513f1fdcfc1b09bbec275c63aac1d55b556
|
[
"BSD-2-Clause"
] |
permissive
|
STIRLIN6/indra_cogex
|
6e4cba84ee1ce82a404154e7370f88fc340400cb
|
552cefd71431b08b8118b2cc0428fd8681e6fc83
|
refs/heads/main
| 2023-08-14T01:28:14.852108
| 2021-09-14T04:18:13
| 2021-09-14T04:18:13
| 377,100,238
| 0
| 0
|
BSD-2-Clause
| 2021-06-15T09:01:23
| 2021-06-15T09:01:23
| null |
UTF-8
|
Python
| false
| false
| 184
|
py
|
from indra_cogex.representation import norm_id
def test_norm_id():
assert norm_id("UP", "P12345") == "uniprot:P12345"
assert norm_id("CHEBI", "CHEBI:12345") == "chebi:12345"
|
[
"ben.gyori@gmail.com"
] |
ben.gyori@gmail.com
|
29689d82e65139fffd325b2517ea32a511041d38
|
9734c93c86c982b1ce046340bac9e53645b261b8
|
/tests/formatters/yaml_formatters_file.py
|
f69655c1080aa94b6d70e50bbc0002921e12694b
|
[
"Apache-2.0"
] |
permissive
|
log2timeline/plaso
|
cd72dd407d6c5627506c14f58cb8f6a6926aa808
|
d6022f8cfebfddf2d08ab2d300a41b61f3349933
|
refs/heads/main
| 2023-09-02T08:43:48.241198
| 2023-08-19T07:28:12
| 2023-08-19T07:28:12
| 23,812,315
| 1,506
| 421
|
Apache-2.0
| 2023-09-04T08:24:53
| 2014-09-08T23:29:28
|
Python
|
UTF-8
|
Python
| false
| false
| 3,363
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the YAML-based formatters file."""
import io
import unittest
from plaso.formatters import yaml_formatters_file
from plaso.lib import errors
from tests import test_lib as shared_test_lib
class YAMLFormattersFileTest(shared_test_lib.BaseTestCase):
"""Tests for the YAML-based formatters file."""
# pylint: disable=protected-access
_FORMATTERS_YAML = {
'type': 'conditional',
'data_type': 'test:fs:stat',
'message': [
'{display_name}',
'Type: {file_entry_type}',
'({unallocated})'],
'short_message': [
'{filename}'],
'short_source': 'SOURCE',
'source': 'My Custom Log Source'}
def testReadFormatterDefinition(self):
"""Tests the _ReadFormatterDefinition function."""
test_formatters_file = yaml_formatters_file.YAMLFormattersFile()
formatter = test_formatters_file._ReadFormatterDefinition(
self._FORMATTERS_YAML)
self.assertIsNotNone(formatter)
self.assertEqual(formatter.data_type, 'test:fs:stat')
with self.assertRaises(errors.ParseError):
test_formatters_file._ReadFormatterDefinition({})
with self.assertRaises(errors.ParseError):
test_formatters_file._ReadFormatterDefinition({'type': 'bogus'})
with self.assertRaises(errors.ParseError):
test_formatters_file._ReadFormatterDefinition({'type': 'conditional'})
with self.assertRaises(errors.ParseError):
test_formatters_file._ReadFormatterDefinition({
'type': 'conditional',
'data_type': 'test:fs:stat'})
with self.assertRaises(errors.ParseError):
test_formatters_file._ReadFormatterDefinition({
'type': 'conditional',
'data_type': 'test:fs:stat',
'message': [
'{display_name}',
'Type: {file_entry_type}',
'({unallocated})']})
with self.assertRaises(errors.ParseError):
test_formatters_file._ReadFormatterDefinition({
'type': 'conditional',
'data_type': 'test:fs:stat',
'message': [
'{display_name}',
'Type: {file_entry_type}',
'({unallocated})']})
with self.assertRaises(errors.ParseError):
test_formatters_file._ReadFormatterDefinition({'bogus': 'error'})
def testReadFromFileObject(self):
"""Tests the _ReadFromFileObject function."""
test_file_path = self._GetTestFilePath(['formatters', 'format_test.yaml'])
self._SkipIfPathNotExists(test_file_path)
test_formatters_file = yaml_formatters_file.YAMLFormattersFile()
with io.open(test_file_path, 'r', encoding='utf-8') as file_object:
formatters = list(test_formatters_file._ReadFromFileObject(file_object))
self.assertEqual(len(formatters), 2)
def testReadFromFile(self):
"""Tests the ReadFromFile function."""
test_file_path = self._GetTestFilePath(['formatters', 'format_test.yaml'])
self._SkipIfPathNotExists(test_file_path)
test_formatters_file = yaml_formatters_file.YAMLFormattersFile()
formatters = list(test_formatters_file.ReadFromFile(test_file_path))
self.assertEqual(len(formatters), 2)
self.assertEqual(formatters[0].data_type, 'test:event')
self.assertEqual(formatters[1].data_type, 'test:fs:stat')
if __name__ == '__main__':
unittest.main()
|
[
"noreply@github.com"
] |
log2timeline.noreply@github.com
|
0e1cdca49f5eeb7315a63e0091ae55029d0eece7
|
32c56293475f49c6dd1b0f1334756b5ad8763da9
|
/google-cloud-sdk/lib/third_party/kubernetes/client/models/v1_watch_event.py
|
eeac0514753ca0d2cfe0c9ba717f53e73fabf2aa
|
[
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0",
"MIT"
] |
permissive
|
bopopescu/socialliteapp
|
b9041f17f8724ee86f2ecc6e2e45b8ff6a44b494
|
85bb264e273568b5a0408f733b403c56373e2508
|
refs/heads/master
| 2022-11-20T03:01:47.654498
| 2020-02-01T20:29:43
| 2020-02-01T20:29:43
| 282,403,750
| 0
| 0
|
MIT
| 2020-07-25T08:31:59
| 2020-07-25T08:31:59
| null |
UTF-8
|
Python
| false
| false
| 3,880
|
py
|
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen
https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.14.4
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1WatchEvent(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name and the value is attribute
type.
attribute_map (dict): The key is attribute name and the value is json key
in definition.
"""
swagger_types = {'object': 'RuntimeRawExtension', 'type': 'str'}
attribute_map = {'object': 'object', 'type': 'type'}
def __init__(self, object=None, type=None):
"""
V1WatchEvent - a model defined in Swagger
"""
self._object = None
self._type = None
self.discriminator = None
self.object = object
self.type = type
@property
def object(self):
"""
Gets the object of this V1WatchEvent.
Object is: * If Type is Added or Modified: the new state of the object.
* If Type is Deleted: the state of the object immediately before
deletion. * If Type is Error: *Status is recommended; other types may
make sense depending on context.
:return: The object of this V1WatchEvent.
:rtype: RuntimeRawExtension
"""
return self._object
@object.setter
def object(self, object):
"""
Sets the object of this V1WatchEvent.
Object is: * If Type is Added or Modified: the new state of the object.
* If Type is Deleted: the state of the object immediately before
deletion. * If Type is Error: *Status is recommended; other types may
make sense depending on context.
:param object: The object of this V1WatchEvent.
:type: RuntimeRawExtension
"""
if object is None:
raise ValueError('Invalid value for `object`, must not be `None`')
self._object = object
@property
def type(self):
"""
Gets the type of this V1WatchEvent.
:return: The type of this V1WatchEvent.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V1WatchEvent.
:param type: The type of this V1WatchEvent.
:type: str
"""
if type is None:
raise ValueError('Invalid value for `type`, must not be `None`')
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, 'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], 'to_dict') else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1WatchEvent):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"jonathang132298@gmail.com"
] |
jonathang132298@gmail.com
|
541be20181d90c2f788955ed7c94c8e307b6d08e
|
a7da58ad91b007b3650003708eb91928f1e3684a
|
/bt5/erp5_banking_cash/WorkflowTemplateItem/portal_workflow/internal_money_payment_workflow/scripts/validateCounter.py
|
1259c1c7a0143dad30f158e310e8328d81adaa3d
|
[] |
no_license
|
jgpjuniorj/j
|
042d1bd7710fa2830355d4312a6b76103e29639d
|
dc02bfa887ffab9841abebc3f5c16d874388cef5
|
refs/heads/master
| 2021-01-01T09:26:36.121339
| 2020-01-31T10:34:17
| 2020-02-07T04:39:18
| 239,214,398
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,447
|
py
|
from Products.DCWorkflow.DCWorkflow import ValidationFailed
from Products.ERP5Type.Message import Message
transaction = state_change['object']
date = transaction.getStartDate()
source = transaction.getSource(None)
# check we are in an opened accounting day
transaction.Baobab_checkCounterDateOpen(site=source, date=transaction.getStartDate())
# check again that the counter is open
context.Baobab_checkCounterOpened(source)
if transaction.getPaymentType() in (None, ""):
msg = Message(domain="ui", message="No payment type defined.")
raise ValidationFailed, (msg,)
#test if the source or the destination is correct
transaction.Base_checkBaobabSourceAndDestination()
# Get price and total_price.
amount = transaction.getSourceTotalAssetPrice()
total_price = transaction.getTotalPrice(portal_type=('Cash Delivery Line','Cash Delivery Cell'), fast=0)
if amount != total_price:
msg = Message(domain="ui", message="Amount differ from total price.")
raise ValidationFailed, (msg,)
if source is None:
msg = Message(domain='ui', message='No counter defined.')
raise ValidationFailed, (msg,)
site = transaction.getSourceValue()
vault = transaction.getBaobabSource()
resource = transaction.CashDelivery_checkCounterInventory(source=vault, portal_type='Cash Delivery Line',same_source=1)
#context.log('resource',resource)
if resource == 2:
msg = Message(domain="ui", message="No Resource.")
raise ValidationFailed, (msg,)
|
[
"georgios.dagkakis@nexedi.com"
] |
georgios.dagkakis@nexedi.com
|
c4167281b5e6283bb6cd67dd447b40152c61100c
|
f36fc94a1ac5ffbfb6d2a78807992347a7e9f6e2
|
/assignment1/cs231n/classifiers/linear_classifier.py
|
844826318d20b5e2114d43a0cfb20aa6ca31046a
|
[] |
no_license
|
Dipeshtamboli/CS231n-Assignments
|
d2f60504410499aed96da9f988fc69c239096abe
|
146b3ce885867c81dd609abdbaedabeafa23f7b7
|
refs/heads/master
| 2020-04-11T09:10:45.563002
| 2019-01-01T20:56:18
| 2019-01-01T20:56:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,966
|
py
|
from __future__ import print_function
import numpy as np
from cs231n.classifiers.linear_svm import *
from cs231n.classifiers.softmax import *
class LinearClassifier(object):
def __init__(self):
self.W = None
def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,
batch_size=200, verbose=False):
"""
Train this linear classifier using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) containing training data; there are N
training samples each of dimension D.
- y: A numpy array of shape (N,) containing training labels; y[i] = c
means that X[i] has label 0 <= c < C for C classes.
- learning_rate: (float) learning rate for optimization.
- reg: (float) regularization strength.
- num_iters: (integer) number of steps to take when optimizing
- batch_size: (integer) number of training examples to use at each step.
- verbose: (boolean) If true, print progress during optimization.
Outputs:
A list containing the value of the loss function at each training iteration.
"""
num_train, dim = X.shape
num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes
if self.W is None:
# lazily initialize W
self.W = 0.001 * np.random.randn(dim, num_classes)
# Run stochastic gradient descent to optimize W
loss_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
#########################################################################
# TODO: #
# Sample batch_size elements from the training data and their #
# corresponding labels to use in this round of gradient descent. #
# Store the data in X_batch and their corresponding labels in #
# y_batch; after sampling X_batch should have shape (dim, batch_size) # @@ X_batch should have shape (batch_size,dim)
# and y_batch should have shape (batch_size,) # @@ instead of (dim,batch_size)
# #
# Hint: Use np.random.choice to generate indices. Sampling with #
# replacement is faster than sampling without replacement. #
#########################################################################
#######
#CODE
#######
ids=np.arange(batch_size)
ids=np.random.choice(ids,batch_size,replace=True)
X_batch=X[ids]
y_batch=y[ids]
#######
pass
#########################################################################
# END OF YOUR CODE #
#########################################################################
# evaluate loss and gradient
loss, grad = self.loss(X_batch, y_batch, reg)
loss_history.append(loss)
# perform parameter update
#########################################################################
# TODO: #
# Update the weights using the gradient and the learning rate. #
#########################################################################
#######
#CODE
#######
self.W-=learning_rate*grad
#######
pass
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
return loss_history
def predict(self, X):
"""
Use the trained weights of this linear classifier to predict labels for
data points.
Inputs:
- X: A numpy array of shape (N, D) containing training data; there are N
training samples each of dimension D.
Returns:
- y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
array of length N, and each element is an integer giving the predicted
class.
"""
y_pred = np.zeros(X.shape[0])
###########################################################################
# TODO: #
# Implement this method. Store the predicted labels in y_pred. #
###########################################################################
#######
#CODE
#######
score=X.dot(self.W)
y_pred=np.argmax(score,axis=1)
#######
pass
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
def loss(self, X_batch, y_batch, reg):
"""
Compute the loss function and its derivative.
Subclasses will override this.
Inputs:
- X_batch: A numpy array of shape (N, D) containing a minibatch of N
data points; each point has dimension D.
- y_batch: A numpy array of shape (N,) containing labels for the minibatch.
- reg: (float) regularization strength.
Returns: A tuple containing:
- loss as a single float
- gradient with respect to self.W; an array of the same shape as W
"""
pass
class LinearSVM(LinearClassifier):
""" A subclass that uses the Multiclass SVM loss function """
def loss(self, X_batch, y_batch, reg):
return svm_loss_vectorized(self.W, X_batch, y_batch, reg)
class Softmax(LinearClassifier):
""" A subclass that uses the Softmax + Cross-entropy loss function """
def loss(self, X_batch, y_batch, reg):
return softmax_loss_vectorized(self.W, X_batch, y_batch, reg)
|
[
"dipeshtamboli@gmail.com"
] |
dipeshtamboli@gmail.com
|
d243c506f63f7cc1780806923f5d78de5943116b
|
08ee36e0bb1c250f7f2dfda12c1a73d1984cd2bc
|
/src/mnistk/networks/linearrelu_5.py
|
efcfbec738c0a5f4fad45d439a1de52528caf7c2
|
[] |
no_license
|
ahgamut/mnistk
|
58dadffad204602d425b18549e9b3d245dbf5486
|
19a661185e6d82996624fc6fcc03de7ad9213eb0
|
refs/heads/master
| 2021-11-04T07:36:07.394100
| 2021-10-27T18:37:12
| 2021-10-27T18:37:12
| 227,103,881
| 2
| 1
| null | 2020-02-19T22:07:24
| 2019-12-10T11:33:09
|
Python
|
UTF-8
|
Python
| false
| false
| 675
|
py
|
# -*- coding: utf-8 -*-
"""
linearrelu_5.py
:copyright: (c) 2019 by Gautham Venkatasubramanian.
:license: MIT
"""
import torch
from torch import nn
class LinearReLU_5(nn.Module):
def __init__(self):
nn.Module.__init__(self)
self.f0 = nn.Linear(in_features=784, out_features=70, bias=True)
self.f1 = nn.ReLU(inplace=False)
self.f2 = nn.Linear(in_features=70, out_features=10, bias=False)
self.f3 = nn.LogSoftmax(dim=1)
def forward(self, *inputs):
x = inputs[0]
x = x.view(x.shape[0],784)
x = self.f0(x)
x = self.f1(x)
x = self.f2(x)
x = self.f3(x)
return x
|
[
"41098605+ahgamut@users.noreply.github.com"
] |
41098605+ahgamut@users.noreply.github.com
|
3603592a43f6cb57493b90f261bc46ecb00ef171
|
1f936103af336af6bbd335f45d6baa55c426922b
|
/monatbx/generate_random_image_list.py
|
8a9ca4dabc8f05852c8bdb56a7c99cb54b3732fe
|
[] |
no_license
|
monarin/monatbx
|
2ec342d67f1fbccb82656218ffd136f2eb7d96ab
|
43f56974f811e5b2b0dcc428d4f9b36043ed9d04
|
refs/heads/master
| 2020-06-18T13:08:58.893701
| 2016-11-30T00:58:18
| 2016-11-30T00:58:18
| 75,136,381
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 510
|
py
|
import os
import sys
import random
p = sys.argv[1]
n_images = int(sys.argv[2])
frame_files = []
if os.path.isdir(p):
for pickle_filename in os.listdir(p):
if pickle_filename.endswith('.pickle'):
frame_files.append(p+'/'+pickle_filename)
i_rand = random.sample(range(len(frame_files)),n_images)
frame_files_sel = [frame_files[i] for i in i_rand]
txt_out = ''
for frame in frame_files_sel:
txt_out += frame + '\n'
f = open('frame_rand_'+str(n_images)+'.lst', 'w')
f.write(txt_out)
f.close()
|
[
"monarin@gmail.com"
] |
monarin@gmail.com
|
947dced367bd8dde73a91f39443a0f7b80bda3a8
|
86319aad3690906f614ac1af28b8843529e9e0da
|
/thwackbin/data/__init__.py
|
a156485bd18d0e80a766cdfa5aabbee5f290dab9
|
[] |
no_license
|
sohgoh/thwackbin
|
b5828783a6179e96784bed0bdb894b179e3bea07
|
ba9fedc4bcec598f367aa6d4f2567d1840c65c51
|
refs/heads/master
| 2021-01-21T03:14:08.261732
| 2014-04-16T03:53:51
| 2014-04-16T04:02:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 410
|
py
|
"""
thwackbin.data
~~~~~~~~~~~~~~
Package which contains mock results data stored on the file system.
"""
__author__ = 'Andrew Hawker <andrew@appthwack.com>'
import json
import os
RESULTS = None
ROOT = os.path.dirname(__file__)
def init():
"""
Load and cache our results.json data on startup.
"""
global RESULTS
RESULTS = json.load(open(os.path.join(ROOT, 'results.json')))
|
[
"andrew.r.hawker@gmail.com"
] |
andrew.r.hawker@gmail.com
|
6f4f86844af5579493a6f13c1a0dcd95fafe0bd1
|
c79e7e691c9fa5cc05bd227209762f735e6263e7
|
/pyy1/.pycharm_helpers/python_stubs/-1550516950/apt_pkg/Hashes.py
|
92a3d2de462fdebf1fdaf1414141fd87e81bd746
|
[
"Apache-2.0"
] |
permissive
|
pyy1988/pyy_test1
|
27fd5fbd41935ba907e26f4f4d2546ca502f29a6
|
6bea878409e658aa87441384419be51aaab061e7
|
refs/heads/master
| 2020-04-05T07:01:58.745653
| 2018-11-08T12:51:00
| 2018-11-08T12:51:00
| 156,660,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,555
|
py
|
# encoding: utf-8
# module apt_pkg
# from /usr/lib/python3/dist-packages/apt_pkg.cpython-35m-x86_64-linux-gnu.so
# by generator 1.145
"""
Classes and functions wrapping the apt-pkg library.
The apt_pkg module provides several classes and functions for accessing
the functionality provided by the apt-pkg library. Typical uses might
include reading APT index files and configuration files and installing
or removing packages.
"""
# no imports
from .object import object
class Hashes(object):
"""
Hashes([object: (bytes, file)])
Calculate hashes for the given object. It can be used to create all
supported hashes for a file.
The parameter 'object' can be a bytestring, an object providing the
fileno() method, or an integer describing a file descriptor.
"""
def __init__(self, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
pass
@staticmethod # known case of __new__
def __new__(*args, **kwargs): # real signature unknown
""" Create and return a new object. See help(type) for accurate signature. """
pass
md5 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The MD5Sum of the file as a string."""
sha1 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The SHA1Sum of the file as a string."""
sha256 = property(lambda self: object(), lambda self, v: None, lambda self: None) # default
"""The SHA256Sum of the file as a string."""
|
[
"347003917@qq.com"
] |
347003917@qq.com
|
57b977f2ae53db87282b285aa878effd453face0
|
2a46ad4e83dcd903451fb5fba8d04da266dbd49e
|
/Algorithm/Leetcode/Codes/ConstructBinaryTreeFromInorderAndPostorderTraversal.py
|
769cc7114912f93e7e81cf26965025c23ac1cdbd
|
[] |
no_license
|
chloeeekim/TIL
|
e248801508340cb2eb9f3cfddc486b7dd7250386
|
c5a94e81aa2f2dfcc626820205ca9feaad069fad
|
refs/heads/master
| 2022-03-02T04:05:24.439271
| 2022-02-22T01:25:14
| 2022-02-22T01:25:14
| 190,150,063
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,522
|
py
|
"""
106. Construct Binary Tree from Inorder and Postorder Traversal : https://leetcode.com/problems/construct-binary-tree-from-inorder-and-postorder-traversal/
어떤 트리의 inorder, postorder traversal 결과가 리스트로 주어졌을 때, 트리를 복원하는 문제
- 트리 내에 중복된 값은 없다고 가정한다
Example:
- Input : inorder = [9,3,15,20,7], postorder = [9,15,7,20,3]
- Output : [3,9,20,null,null,15,7]
Note:
recursive하게 해결
inorder와 preorder로 트리를 복원하는 문제에서 약간만 변형
postorder 리스트의 마지막 값이 root가 되고, inorder 리스트에서 root 값을 기준으로 left children과 right children으로 구분된다
위 조건이 모든 subtree에 대해서도 만족
preorder에서는 left children을 먼저 구하고, right children을 구하는 순서였으나,
postorder에서는 반대로 right children을 먼저 구하고, left children을 구하는 순서
"""
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def buildTree(self, inorder: List[int], postorder: List[int]) -> TreeNode:
if inorder:
rootval = postorder.pop(-1)
root = TreeNode(rootval)
idx = inorder.index(rootval)
root.right = self.buildTree(inorder[idx+1:], postorder)
root.left = self.buildTree(inorder[:idx], postorder)
return root
|
[
"hiyaku0317@gmail.com"
] |
hiyaku0317@gmail.com
|
fdba2e38a7275b27bf739668f77984e9aad554b6
|
d5fd936e7346844a1b7c5ea81dfa9adf5bb647d0
|
/datasets/load_data.py
|
c547ebd91f699327cac78ca35d0dbe0f0094489e
|
[] |
no_license
|
isaachenrion/graphs
|
098e7098a894a3d1d9d18cf0ce1054e5910afa15
|
2ba6d50a7f61233fa8cc92ba03256691abb889de
|
refs/heads/master
| 2021-01-02T09:10:49.686240
| 2017-09-11T19:52:48
| 2017-09-11T19:52:48
| 99,154,954
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,013
|
py
|
import os
import pickle
from .datasets import BatchedFixedOrderGraphDataset, FixedOrderGraphDataset, GraphDataset, BatchedGraphDataset
from .add_virtual_node import add_virtual_node, add_target_nodes
from .path import DATA_DIR
def load_from_path(data_path, args):
with open(data_path, 'rb') as f:
dataset = pickle.load(f)
if isinstance(dataset, FixedOrderGraphDataset):
dataset = BatchedFixedOrderGraphDataset(dataset, args.batch_size)
elif isinstance(dataset, GraphDataset):
dataset = BatchedGraphDataset(dataset, args.batch_size)
if args.model == 'vcn':
add_target_nodes(dataset)
dataset = dataset.preprocess()
return dataset
def load_data(args):
train_data_path = os.path.join(DATA_DIR, args.problem + '-train.pkl')
eval_data_path = os.path.join(DATA_DIR, args.problem + '-eval.pkl')
training_set = load_from_path(train_data_path, args)
validation_set = load_from_path(eval_data_path, args)
return training_set, validation_set
|
[
"isaachenrion@gmail.com"
] |
isaachenrion@gmail.com
|
02af9acedfd8eb63a76f63c93c109e539acb1fa4
|
0f9f8e8478017da7c8d408058f78853d69ac0171
|
/python2/l0064_minimum_path_sum.py
|
e5eed8adafa9b21abd66ed0af9541fba57e42edd
|
[] |
no_license
|
sprax/1337
|
dc38f1776959ec7965c33f060f4d43d939f19302
|
33b6b68a8136109d2aaa26bb8bf9e873f995d5ab
|
refs/heads/master
| 2022-09-06T18:43:54.850467
| 2020-06-04T17:19:51
| 2020-06-04T17:19:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 748
|
py
|
class Solution(object):
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
if not grid:
return 0
m = len(grid)
n = len(grid[0])
dp = [[0 for _ in range(n)] for _ in range(m)]
# Initialize.
dp[m-1][n-1] = grid[m-1][n-1]
for i in range(m-2, -1, -1):
dp[i][n-1] = grid[i][n-1] + dp[i+1][n-1]
for j in range(n-2, -1, -1):
dp[m-1][j] = grid[m-1][j] + dp[m-1][j+1]
# Solve.
for i in range(m-2, -1, -1):
for j in range(n-2, -1, -1):
dp[i][j] = min(dp[i+1][j], dp[i][j+1]) + grid[i][j]
return dp[0][0]
|
[
"zhoulu312@gmail.com"
] |
zhoulu312@gmail.com
|
ed777a2b20b0c94e0469882347bedeaacedfd55e
|
876a1b7b7c898c826b94ff34f3d9a1d22ee5459b
|
/QUANTAXIS/QAUtil/QAWebutil.py
|
8a2a75459233fd85e3744b092b8ba3babacb56ca
|
[
"MIT"
] |
permissive
|
pm58/QUANTAXIS
|
6db63c461d18f13f7340f7d46e42cde3bc3f40cb
|
03c526f640f48f4a153e9c4e0e27f74ccd18a345
|
refs/heads/master
| 2020-04-27T08:17:42.227150
| 2019-03-09T05:56:05
| 2019-03-09T05:56:05
| 174,165,118
| 5
| 0
|
MIT
| 2019-03-09T05:56:06
| 2019-03-06T14:55:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,967
|
py
|
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2018 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
from subprocess import PIPE, Popen
def QA_util_web_ping(url):
ms_list = []
p = Popen(["ping", url],
stdin=PIPE, stdout=PIPE, stderr=PIPE,
shell=True)
out = p.stdout.read()
list_ = str(out).split('=')
# print(list)
for item in list_:
if 'ms' in item:
ms_list.append(int(item.split('ms')[0]))
if len(ms_list) < 1:
# Bad Request:
ms_list.append(9999999)
return ms_list[-1]
class QA_Util_web_pool():
def __init__(self):
pass
def hot_update(self):
pass
def dynamic_optimics(self):
pass
def task_queue(self):
pass
if __name__ == "__main__":
print(datetime.datetime.now())
print(QA_util_web_ping('www.baidu.com'))
print(datetime.datetime.now())
|
[
"yutiansut@qq.com"
] |
yutiansut@qq.com
|
a41ee74e0d74a2f619205675cb265d0c888b3d01
|
9645bdfbb15742e0d94e3327f94471663f32061a
|
/Python/235 - Lowest Common Ancestor of a Binary Search Tree/235_lowest-common-ancestor-of-a-binary-search-tree.py
|
863b29d2d3d70572b919bc045ab5e6b412efb394
|
[] |
no_license
|
aptend/leetcode-rua
|
f81c080b2260adb2da677612e5c437eda256781d
|
80e44f4e9d3a5b592fdebe0bf16d1df54e99991e
|
refs/heads/master
| 2023-06-22T00:40:05.533424
| 2021-03-17T13:51:28
| 2021-03-17T13:51:28
| 186,434,133
| 2
| 0
| null | 2023-06-21T22:12:51
| 2019-05-13T14:17:27
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,554
|
py
|
from leezy import Solution, solution
from leezy.assists import TreeContext
class Q235(Solution):
@solution
def lowestCommonAncestor(self, root, p, q):
# 68ms
if p < root.val > q:
return self.lowestCommonAncestor(root.left, p, q)
if p > root.val < q:
return self.lowestCommonAncestor(root.right, p, q)
return root
@solution
def lca_iter(self, root, p, q):
# 76ms 40.62%
while root:
if root.val > p and root.val > q:
root = root.left
elif root.val < p and root.val < q:
root = root.right
else:
return root
def lca_dumb(self, root, p, q):
ppath, qpath = [], []
self.search(root, p, ppath)
self.search(root, q, qpath)
prev = x = y = None
for x, y in zip(ppath, qpath):
if x.val != y.val:
return prev
prev = x
return x
def search(self, node, v, path):
if node is None:
path.clear()
return
if v == node.val:
path.append(node)
return
path.append(node)
if v > node.val:
self.search(node.right, v, path)
else:
self.search(node.left, v, path)
def main():
q = Q235()
q.set_context(TreeContext)
t1 = [6, 2, 8, 0, 4, 7, 9, None, None, 3, 5]
q.add_args(t1, 2, 8)
q.add_args(t1, 2, 4)
q.add_args(t1, 3, 7)
q.run()
if __name__ == "__main__":
main()
|
[
"crescentwhale@hotmail.com"
] |
crescentwhale@hotmail.com
|
8e990b308f624c1525603f9ab92945fda7fb8ce2
|
5167f77d96d1dc5412a8a0a91c95e3086acd05dc
|
/test/functional/wallet_implicitsegwit.py
|
553ce7367502b4851bea035523dbb7026ed2072f
|
[
"MIT"
] |
permissive
|
ocvcoin/ocvcoin
|
04fb0cea7c11bf52e07ea06ddf9df89631eced5f
|
79c3803e330f32ed50c02ae657ff9aded6297b9d
|
refs/heads/master
| 2023-04-30T10:42:05.457630
| 2023-04-15T11:49:40
| 2023-04-15T11:49:40
| 406,011,904
| 3
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,424
|
py
|
#!/usr/bin/env python3
# Copyright (c) 2019 The Ocvcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet implicit segwit feature."""
import test_framework.address as address
from test_framework.test_framework import OcvcoinTestFramework
# TODO: Might be nice to test p2pk here too
address_types = ('legacy', 'bech32', 'p2sh-segwit')
def key_to_address(key, address_type):
if address_type == 'legacy':
return address.key_to_p2pkh(key)
elif address_type == 'p2sh-segwit':
return address.key_to_p2sh_p2wpkh(key)
elif address_type == 'bech32':
return address.key_to_p2wpkh(key)
def send_a_to_b(receive_node, send_node):
keys = {}
for a in address_types:
a_address = receive_node.getnewaddress(address_type=a)
pubkey = receive_node.getaddressinfo(a_address)['pubkey']
keys[a] = pubkey
for b in address_types:
b_address = key_to_address(pubkey, b)
send_node.sendtoaddress(address=b_address, amount=1)
return keys
def check_implicit_transactions(implicit_keys, implicit_node):
# The implicit segwit node allows conversion all possible ways
txs = implicit_node.listtransactions(None, 99999)
for a in address_types:
pubkey = implicit_keys[a]
for b in address_types:
b_address = key_to_address(pubkey, b)
assert(('receive', b_address) in tuple((tx['category'], tx['address']) for tx in txs))
class ImplicitSegwitTest(OcvcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.log.info("Manipulating addresses and sending transactions to all variations")
implicit_keys = send_a_to_b(self.nodes[0], self.nodes[1])
self.sync_all()
self.log.info("Checking that transactions show up correctly without a restart")
check_implicit_transactions(implicit_keys, self.nodes[0])
self.log.info("Checking that transactions still show up correctly after a restart")
self.restart_node(0)
self.restart_node(1)
check_implicit_transactions(implicit_keys, self.nodes[0])
if __name__ == '__main__':
ImplicitSegwitTest().main()
|
[
"contact@ocvcoin.com"
] |
contact@ocvcoin.com
|
bbc89e8e7645a694b405dccb4acd25b4f0cc9544
|
84cfe9b0ca7209487231e0725f7ad0d233f09544
|
/smv/views.py
|
e0abea56ca1a13c1798a6cffabfed45f0991342d
|
[] |
no_license
|
archit-dwevedi/M4Plan
|
3eefc12ea447d624bae6f758c3648d7caf825c1a
|
d162592748ea37bc070b6217365e8601a6ccdd9a
|
refs/heads/master
| 2021-10-26T23:22:04.456014
| 2019-04-14T20:02:17
| 2019-04-14T20:02:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,397
|
py
|
from django.shortcuts import render,redirect
from django.http import HttpResponse
from django.contrib import messages
from absenteeism.models import *
from skill_matrix.models import *
from leave_calendar.models import *
from .forms import *
from .models import *
import datetime
def smv(request):
if(request.method=='POST'):
form = Smv(request.POST)
if form.is_valid():
form.save()
return HttpResponse("<h1>SMV is submitted</h1>")
else:
messages.error(request,"Error")
else:
form = Smv()
return render(request,'smv/smv.html',{'form':form})
def dashsmv(request):
a=SMV.objects.all()
return render(request,'smv/dash_smv.html',{'a':a})
def dashpfm(request):
a=SMV.objects.all()
sam=[]
mcs=[]
for i in a:
#time=((i.pick_in_sec+i.main_Process_in_sec+i.turn_in_sec+i.dispose_in_sec)*i.s_P_I.s_p_i)/12
sam.append((((((((i.pick_in_sec+i.main_Process_in_sec+i.turn_in_sec+i.dispose_in_sec)/60)/12)*i.s_P_I.s_p_i)/20)*i.stitch_Length.stitch_length)*int(i.complexity.complx))*(1+int(i.personal_Allowance+i.fatigue_Allowance+i.delay_Allowance))*0.02*0.9)
print(sam)
for i in sam:
mcs.append(560/(480/(i*0.85)))
print(mcs)
return render(request,'smv/dash_pfm.html',{'a':a,'sam':sam,'mcs':mcs})
def newdashpfm(request):
a=PFM.objects.all()
return render(request,'smv/new_dash_pfm.html',{'a':a})
def ob(request):
if(request.method=='POST'):
form=Pfm(request.POST)
if(form.is_valid()):
global a
global d
global s
s=request.POST.get('section')
a=PFM.objects.filter(sec__name=s)
d=a
return redirect('/newob')
else:
messages.error(request,"Error")
else:
form=Pfm()
return render(request,'smv/ob.html',{'form':form})
def newob(request):
if(request.method=='POST'):
global d
myself=Ob(request.POST,operation=d)
if(myself.is_valid()):
global get
cat=myself.cleaned_data['category']
sub=myself.cleaned_data['subcategory']
get=myself.cleaned_data['Add Neccessary Operation']
print(get)
print(cat,sub)
return redirect('/dashob')
else:
messages.error(request,"Error")
else:
global a
global s
form = Ob(operation=a)
return render(request,'smv/ob.html',{'form':form,'s':s})
def dashob(request):
global get
global q
q=[]
sam=[]
for i in get:
q.append(SMV.objects.get(operation=i))
for i in q:
print(i.operation)
print(i.s_P_I)
sam.append((((((((i.pick_in_sec + i.main_Process_in_sec + i.turn_in_sec + i.dispose_in_sec) / 60) / 12) * i.s_P_I.s_p_i) / 20) * i.stitch_Length.stitch_length) * int(i.complexity.complx)) * ( 1 + int(i.personal_Allowance + i.fatigue_Allowance + i.delay_Allowance)) * 0.02 * 0.9)
return render(request,'smv/dashob.html',{'a':q,'sam':sam})
def layout(request):
global s
global q
return render(request,'smv/layout.html',{'a':s,'q':q})
def dashboard(request):
global s
global q
global get
ab=[]
d=datetime.datetime.now().date()
a=LeaveApplication.objects.all()
for i in a:
if(d<=i.end_date):
ab.append(i.key.user)
print(ab)
b=Person.objects.all()
ab2=[]
for j in b:
if(j.date==d):
if(j.status=='Absent' or j.status=='Leave' or j.status==None):
ab2.append(User.objects.get(username=j.name))
print(ab2)
c=Scale.objects.all()
#e=Employee.objects.all()
ss=ab+ab2
for m in ss:
for n in c:
if(m==n.use):
c=c.exclude(use=m)
print(c)
print(get)
for i in get:
for j in c:
if(str(j.operation)==i):
print(j.use,j.operation,j.level)
## m=lambda x:x==y
## for i in c:
## y=str(i.operation)
## print(list(map(m,get)))
list=zip(c,q)
return render(request,'smv/dashboard.html',{'a':s,'q':q,'c':c,'get':get,'list':list})
def desc(request):
return render(request,'smv/desc.html')
|
[
"dwevediar@gmail.com"
] |
dwevediar@gmail.com
|
45b49213838540d4cfa9b40c36aa8caf8d58558d
|
38445323b49947266d72645ec973b02e96879eed
|
/harshad number.py
|
8a99c9850563a0ad3ee51f2ed7074159f804f964
|
[] |
no_license
|
pooja-pichad/loop
|
2d9989b472a2fbacf0a85da06d869016b2d74083
|
47dafba1253da98f98c8fa389e13283ce1e14dee
|
refs/heads/main
| 2023-04-22T02:58:49.274211
| 2021-05-22T07:13:39
| 2021-05-22T07:13:39
| 369,741,349
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 717
|
py
|
# harshad number :
# it is take any number and add this two digit number and check the
# addition value is divisible bye this two digit number then it is divisible then its harshad
# number then it not divisiblr then it not harshad number
# forEx; 43
# 4+3=7
# 7/43
# num=int(input("enter a number "))
# i=0
# while i<1:
# a=num%10
# b=(num//10)%10
# c=(num//10)//10
# d=a+b+c
# i=i+1
# if num%d==0:
# print("harshad number")
# else:
# print("not harshad number")
i=1
while i<1000:
a=i%10
b=(i//10)%10
c=(i//10)//10
d=a+b+c
i=i+1
if i%d==0:
print("harshad number",i)
else:
print("not harshad number",i)
|
[
"noreply@github.com"
] |
pooja-pichad.noreply@github.com
|
0d3b899d072571d9b6f47263ee86838fd0b208a6
|
6ecc1d05bbd9ca2c1d21322faef076c1f28454db
|
/chrome/browser/ui/webui/chromeos/login/DEPS
|
52acfb6a38a1c062632e3dbccf09ecbcc162ff4b
|
[
"BSD-3-Clause"
] |
permissive
|
pandareen/chromium
|
0e3a9fb92bb9ad027d5b3482a6b03d0bb51c16a1
|
3ea799335afb5178c519f9e12db8b31390375736
|
refs/heads/master
| 2023-03-14T05:47:29.433132
| 2018-06-27T07:21:08
| 2018-06-27T07:21:08
| 138,843,522
| 0
| 0
| null | 2018-06-27T07:09:52
| 2018-06-27T07:09:52
| null |
UTF-8
|
Python
| false
| false
| 863
|
specific_include_rules = {
# TODO(mash): Fix. https://crbug.com/770866
"core_oobe_handler\.cc": [
"+ash/shell.h",
],
"oobe_display_chooser\.cc": [
"+ash/display/window_tree_host_manager.h",
"+ash/shell.h",
],
# TODO(mash): Fix. https://crbug.com/678990
"signin_screen_handler\.cc": [
"+ash/detachable_base",
"+ash/shell.h",
],
"signin_screen_handler\.h": [
"+ash/detachable_base/detachable_base_observer.h",
],
# Tests.
"oobe_display_chooser_browsertest\.cc": [
"+ash/shell.h",
],
"oobe_display_chooser_unittest.cc": [
"+ash/display/display_configuration_controller.h",
"+ash/shell.h",
"+ash/test/ash_test_base.h",
# TODO(mash): Remove. http://crbug.com/720917.
"+ui/events/devices/device_data_manager.h",
],
"signin_userlist_unittest\.cc": [
"+ash/test/ash_test_base.h"
],
}
|
[
"commit-bot@chromium.org"
] |
commit-bot@chromium.org
|
|
1984950eeeabd376b7d534bbc788f09949c9ea71
|
f3416956f9bfc7af870867e2fe8644f08d513b23
|
/combine/contest_20150310a/data_prep/prepare_pgmodel.py
|
18a14ff2cbcfdb41cfe5e56133323bb4b304d6ed
|
[] |
no_license
|
dsjoerg/blundercheck
|
a71012c0d3ded929599d191d4f73dcb14f94030a
|
04fb39ba0dd1591b387f573f767973518b688822
|
refs/heads/master
| 2021-01-18T18:35:21.992359
| 2015-03-24T18:11:11
| 2015-03-24T18:11:11
| 27,928,453
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 8,108
|
py
|
#!/usr/bin/env python
from pandas import *
from numpy import *
from djeval import *
import csv, code
import cPickle as pickle
from sklearn.externals import joblib
GAMELIMIT=60000
NUM_GAMES=100000
def shell():
vars = globals()
vars.update(locals())
shell = code.InteractiveConsole(vars)
shell.interact()
msg("Hi! Reading eheaders")
eheaders_filename = '/data/eheaders.p'
eheaders_file = open(eheaders_filename, 'r')
eheaders = pickle.load(eheaders_file)
elos = eheaders['elos']
result = eheaders['result']
checkmate = eheaders['checkmate']
openings = eheaders['openings']
ocount = eheaders['opening_count']
msg("Hi! Reading crunched movescores from %s" % sys.argv[1])
crunched_path = sys.argv[1]
crunched_df = read_csv(crunched_path, sep=',', engine='c', index_col=['gamenum', 'side'])
msg("Hi! Reading GB scores from %s" % sys.argv[2])
gb_path = sys.argv[2]
gb_df = read_csv(gb_path, sep=',', engine='c', index_col=['gamenum'])
msg("Hi! Reading depthstats")
depthstats_path = '/data/depthstats.csv'
columns = [
'gamenum',
'side',
'mean_depth',
'mean_seldepth',
'mean_depths_agreeing_ratio',
'mean_deepest_agree_ratio',
'pct_sanemoves',
'gamelength',
'mean_num_bestmoves',
'mean_num_bestmove_changes',
'mean_bestmove_depths_agreeing',
'mean_deepest_change',
'mean_deepest_change_ratio',
]
depthstats_df = read_csv(depthstats_path, sep=' ', engine='c', header=None, names=columns, index_col=False)
depthstats_df = depthstats_df.set_index(['gamenum', 'side'])
# we have the gamelength column in another df, drop it here to avoid conflicts
depthstats_df.drop('gamelength', axis=1, inplace=True)
msg("Hi! Reading material")
material_path = '/data/material.csv'
columns = [
'gamenum',
'material_break_0',
'material_break_1',
'material_break_2',
'material_break_3',
'material_break_4',
'opening_length',
'midgame_length',
'endgame_length',
'mean_acwsa',
'mean_acwsa_0',
'mean_acwsa_1',
'mean_acwsa_2',
'mean_acwsa_3',
'mean_acwsa_4',
'mean_acwsa_5',
'mean_acwsa_6',
'mean_acwsa_7',
'mean_acwsa_8',
'mean_acwsa_9',
]
material_df = read_csv(material_path, sep=' ', engine='c', header=None, names=columns, index_col=False)
material_df = material_df.set_index(['gamenum'])
material_df = material_df.reindex(range(1, NUM_GAMES+1))
material_df = material_df.fillna(material_df.mean())
msg("Reading ELOscored data")
eloscored_cols = [
'gamenum',
'final_elo',
'final_ply',
'final_num_games',
'final_elo_stdev',
'elopath_min',
'elopath_max',
]
eloscored_df = read_csv('/data/data.pgn.eloscored21', sep=',', engine='c', header=None, names=eloscored_cols, index_col=False)
eloscored_df = eloscored_df.set_index(['gamenum'])
msg("Reading ELOscored data 4")
eloscored4_cols = [
'gamenum',
'final_elo',
'final_ply',
'final_num_games',
'final_elo_stdev',
]
eloscored4_cols[1:] = [x + '_elo4' for x in eloscored4_cols[1:]]
eloscored4_df = read_csv('/data/data.pgn.eloscored4', sep=',', engine='c', header=None, names=eloscored4_cols, index_col=False)
eloscored4_df = eloscored4_df.set_index(['gamenum'])
msg("Reading ELOscored data 10")
eloscored10_cols = [
'gamenum',
'final_elo',
'final_ply',
'final_num_games',
'final_elo_stdev',
]
eloscored10_cols[1:] = [x + '_elo10' for x in eloscored10_cols[1:]]
eloscored10_df = read_csv('/data/data.pgn.eloscored10', sep=',', engine='c', header=None, names=eloscored10_cols, index_col=False)
eloscored10_df = eloscored10_df.set_index(['gamenum'])
msg("Hi! Reading moveaggs")
move_aggs = joblib.load('/data/move_aggs.p')
move_aggs.fillna(move_aggs.mean(), inplace=True)
msg("Hi! Reading wmoveaggs")
wmove_aggs = joblib.load('/data/wmove_aggs.p')
wmove_aggs.fillna(wmove_aggs.mean(), inplace=True)
wmove_aggs.rename(columns={'elo_pred': 'moveelo_weighted'}, inplace=True)
do_elochunk = True
if do_elochunk:
ch_agg_df = joblib.load('/data/chunk_aggs.p')
ch_agg_df.index = ch_agg_df.index.droplevel('elo')
ch_agg_df.columns = ['elochunk_' + x for x in ch_agg_df.columns]
msg("Hi! Setting up playergame rows")
if do_elochunk:
elorange_cols = list(ch_agg_df.columns.values)
msg("elorange cols are %s" % elorange_cols)
msg('Preparing ELO df')
elo_rows = [[x[0][0], x[0][1], x[1]] for x in elos.items()]
elo_df = DataFrame(elo_rows, columns=['gamenum','side','elo'])
elo_df.set_index(['gamenum','side'], inplace=True)
msg('Joining DFs')
supplemental_dfs = [move_aggs[['mean', 'median', '25', '10', 'min', 'max', 'stdev']], wmove_aggs['moveelo_weighted'], depthstats_df, elo_df, crunched_df]
if do_elochunk:
supplemental_dfs.append(ch_agg_df)
mega_df = concat(supplemental_dfs, axis=1)
mega_df = mega_df.join(material_df, how='outer')
mega_df = mega_df.join(eloscored_df, how='outer')
mega_df = mega_df.join(eloscored4_df, how='outer')
mega_df = mega_df.join(eloscored10_df, how='outer')
mega_df = mega_df.join(gb_df, how='outer')
yy_df = mega_df
msg("hi, columns are %s" % yy_df.columns)
# TODO confirm that all columns are there
def opening_feature(opening):
if ocount[opening] < 20:
return 'rare'
if ocount[opening] < 200:
return 'uncommon'
return opening
msg("Hi! Computing additional features")
yy_df['opening_feature'] = [opening_feature(openings[x]) for x in yy_df.index.get_level_values('gamenum')]
yy_df['opening_count'] = [ocount[openings[x]] for x in yy_df.index.get_level_values('gamenum')]
yy_df['any_grit'] = (yy_df['grit'] > 0)
yy_df['major_grit'] = (yy_df['grit'] > 5)
yy_df['nmerror'] = log((-1 * yy_df['meanerror']).clip(1,60)).clip(1,4) - 2.53
yy_df['premature_quit'] = (yy_df['gameoutcome'] == -1) & (yy_df['my_final_equity'] > -100)
yy_df['drawn_game'] = (yy_df['gameoutcome'] == 0)
yy_df['ended_by_checkmate'] = yy_df['won_by_checkmate'] | yy_df['lost_by_checkmate']
yy_df['noblunders'] = (yy_df['blunderrate'] == 0)
yy_df['final_equity'] = yy_df['my_final_equity'].abs().clip(0,300)
yy_df['early_lead'] = yy_df['early_lead'].clip(0,100)
yy_df['mean_depth_clipped'] = yy_df['mean_depth'].clip(0,25)
yy_df['gamelength_clipped'] = yy_df['gamelength'].clip(20,200)
# prepare opponent_df with selected info about opponent
opponent_columns = ['meanerror', 'blunderrate', 'perfectrate', 'grit', 'meanecho', 'mate_created', 'mate_destroyed', 'q_error_one', 'q_error_two', 'stdeverror', 'elo', 'any_grit', 'noblunders', 'nmerror', 'mean_depths_agreeing_ratio', 'mean_deepest_agree_ratio']
if do_elochunk:
opponent_columns.extend(elorange_cols)
opponent_df = yy_df[opponent_columns]
opponent_df = opponent_df.reset_index()
opponent_df['side'] = opponent_df['side'] * -1
opponent_df.set_index(['gamenum', 'side'], inplace=True)
opponent_df.columns = ['opponent_' + x for x in opponent_df.columns]
yy_df = concat([yy_df, opponent_df], axis=1)
# more derived columns that use opponent comparisons
yy_df['elo_advantage'] = (yy_df['elo'] - yy_df['opponent_elo']).clip(-500, 500)
yy_df['max_nmerror'] = yy_df[['nmerror', 'opponent_nmerror']].max(axis=1)
yy_df['min_nmerror'] = yy_df[['nmerror', 'opponent_nmerror']].min(axis=1)
yy_df['max_meanecho'] = yy_df[['meanecho', 'opponent_meanecho']].max(axis=1)
yy_df['elo_avg'] = (yy_df['elo'] + yy_df['opponent_elo'])/2.0
yy_df['elo_advantage'] = (yy_df['elo'] - yy_df['opponent_elo'])
yy_df['winner_elo_advantage'] = yy_df['elo_advantage'] * yy_df['gameoutcome']
msg("Hi! Computing dummy variables")
categorical_features = ['opening_feature']
dummies = get_dummies(yy_df[categorical_features]).astype(np.int8)
yy_df = yy_df.join(dummies)
# fill in missing values
msg("Hi! Filling in missing values")
full_index = pandas.MultiIndex.from_product([range(1,NUM_GAMES + 1), [1,-1]], names=['gamenum', 'side'])
yy_df = yy_df.reindex(full_index)
yy_elo = yy_df['elo'].copy(True)
yy_df.fillna(yy_df.mean(numeric_only=True), inplace=True)
yy_df.fillna(False, inplace=True)
yy_df['elo'] = yy_elo
# stupid patch for some stupid opening feature that got assigned to False by fillna ?!!?!?!?
yy_df.loc[yy_df['opening_feature'] == False,'opening_feature'] = 'rare'
msg("Hi! Writing yy_df to disk")
yy_df.to_pickle(sys.argv[3])
msg("Column counts are:")
counts = yy_df.count(axis=0)
print counts
|
[
"dsjoerg@gmail.com"
] |
dsjoerg@gmail.com
|
307b39b476091ab984dde86e503be570839f4667
|
77a7508c3a647711191b924959db80fb6d2bd146
|
/src/gamesbyexample/countingquiz.py
|
8b3131533dd87c5a56493d1814d27b3cca90f27e
|
[
"MIT"
] |
permissive
|
surlydev/PythonStdioGames
|
ff7edb4c8c57a5eb6e2036e2b6ebc7e23ec994e0
|
d54c2509c12a5b1858eda275fd07d0edd456f23f
|
refs/heads/master
| 2021-05-22T21:01:15.529159
| 2020-03-26T07:34:10
| 2020-03-26T07:34:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,345
|
py
|
"""Counting Quiz, by Al Sweigart al@inventwithpython.com
Use multiplication and subtraction to count the number of stars shown
as fast as possible.
Tags: short, math"""
import math, random, time
def main():
print('''Counting Quiz, by Al Sweigart al@inventwithpython.com
Use multiplication and subtraction to count the number of stars shown
as fast as possible. The quiz is 60 seconds long. For example:
* * * * * *
* * * * *
* * * * *
This is a 6 x 3 star field with 2 missing stars.
The answer is 6 x 3 - 2 = 16
''')
while True:
input('Press Enter to begin...')
runQuiz()
print('Would you like to play again? Y/N')
response = input().upper()
if not response.startswith('Y'):
print('Thanks for playing!')
break
def runQuiz():
correct = 0
startTime = time.time()
while time.time() < startTime + 60:
print('\n' * 40) # Clear the screen by printing several newlines.
# Generate the problem and the star field to display:
width = random.randint(1, 10)
height = random.randint(1, 10)
canvas = {}
for x in range(width):
for y in range(height):
canvas[(x, y)] = '*'
numMissing = random.randint(0, math.sqrt(width * height) // 2)
for i in range(numMissing):
while True:
x = random.randint(0, width - 1)
y = random.randint(0, height - 1)
if canvas[(x, y)] == '*':
break
canvas[(x, y)] = ' '
answer = width * height - numMissing
# Display the star field:
for y in range(height):
for x in range(width):
print(canvas[(x, y)] + ' ', end='')
print() # Print a newline.
# Let the player answer and determine if they're right or wrong.
response = input('Enter the number of stars. > ')
if response.isdecimal() and int(response) == answer:
correct += 1
else:
print('Wrong:', answer)
time.sleep(1)
print('Time\'s up!')
print('You were able to count', correct, 'star fields correctly.')
print()
# If the program is run (instead of imported), run the game:
if __name__ == '__main__':
main()
|
[
"asweigart@gmail.com"
] |
asweigart@gmail.com
|
8fd5e717b4d06d2f26535413e07fae832635769d
|
72e463c26daf79b7d380db59a58849e3cd095a7e
|
/week7/day1_api.py
|
f7bcb6d95489339333501141914115cb6d9975ba
|
[] |
no_license
|
tdhuynh/tiy_class_notes
|
dcc5454af63ca888cfdb99e85f4370cabce88f88
|
a254d77f52cc438476d80ff58bfa9759de7826fa
|
refs/heads/master
| 2020-04-15T12:19:30.045552
| 2016-11-09T14:30:06
| 2016-11-09T14:30:06
| 68,213,599
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 973
|
py
|
import requests
# result = requests.get("http://swapi.co/api/people/")
# # print(result.text)
# json_result = result.json()
# # print(json_result)
# # print(json_result["name"])
# for person in json_result["results"]:
# print(person["name"])
# result = requests.get(json_result["next"])
# json_result = result.json()
#
# for person in json_result["results"]:
# print(person["name"])
###################
def get_data(endpoint, lookup="name"):
url = "http://swapi.co/api/{}/".format(endpoint)
while url:
result = requests.get(url)
json_result = result.json()
for person in json_result["results"]:
print(person[lookup])
if input("Press Enter to keep going, type 'n' to stop " ):
break
url = json_result["next"]
while True:
value = input("What do you want to search for? (films) or (people)? ")
if value == "films":
get_data(value, lookup="title")
get_data(value)
|
[
"tommyhuynh93@gmail.com"
] |
tommyhuynh93@gmail.com
|
40be0ddf55f39cfcc4482a4bd777e333af9190e2
|
8ef8e6818c977c26d937d09b46be0d748022ea09
|
/cv/distiller/CWD/pytorch/mmrazor/tests/test_models/test_losses/test_distillation_losses.py
|
77233b81fce0cffa1c0bce23a3ba60bdeed31133
|
[
"Apache-2.0"
] |
permissive
|
Deep-Spark/DeepSparkHub
|
eb5996607e63ccd2c706789f64b3cc0070e7f8ef
|
9d643e88946fc4a24f2d4d073c08b05ea693f4c5
|
refs/heads/master
| 2023-09-01T11:26:49.648759
| 2023-08-25T01:50:18
| 2023-08-25T01:50:18
| 534,133,249
| 7
| 6
|
Apache-2.0
| 2023-03-28T02:54:59
| 2022-09-08T09:07:01
|
Python
|
UTF-8
|
Python
| false
| false
| 8,154
|
py
|
# Copyright (c) OpenMMLab. All rights reserved.
from unittest import TestCase
import torch
from mmengine.structures import BaseDataElement
from mmrazor import digit_version
from mmrazor.models import (ABLoss, ActivationLoss, ATLoss, CRDLoss, DKDLoss,
FBKDLoss, FTLoss, InformationEntropyLoss,
KDSoftCELoss, MGDLoss, OFDLoss, OnehotLikeLoss,
PKDLoss)
class TestLosses(TestCase):
@classmethod
def setUpClass(cls):
cls.feats_1d = torch.randn(5, 6)
cls.feats_2d = torch.randn(5, 2, 3)
cls.feats_3d = torch.randn(5, 2, 3, 3)
num_classes = 6
cls.labels = torch.randint(0, num_classes, [5])
def test_ofd_loss(self):
ofd_loss = OFDLoss()
self.normal_test_1d(ofd_loss)
self.normal_test_3d(ofd_loss)
# test the calculation
s_feat_0 = torch.Tensor([[1, 1], [2, 2], [3, 3]])
t_feat_0 = torch.Tensor([[0, 0], [1, 1], [2, 2]])
ofd_loss_num_0 = ofd_loss.forward(s_feat_0, t_feat_0)
assert ofd_loss_num_0 != torch.tensor(0.0)
s_feat_1 = torch.Tensor([[1, 1], [2, 2], [3, 3]])
t_feat_1 = torch.Tensor([[2, 2], [3, 3], [4, 4]])
ofd_loss_num_1 = ofd_loss.forward(s_feat_1, t_feat_1)
assert ofd_loss_num_1 != torch.tensor(0.0)
s_feat_2 = torch.Tensor([[-3, -3], [-2, -2], [-1, -1]])
t_feat_2 = torch.Tensor([[-2, -2], [-1, -1], [0, 0]])
ofd_loss_num_2 = ofd_loss.forward(s_feat_2, t_feat_2)
assert ofd_loss_num_2 == torch.tensor(0.0)
def normal_test_1d(self, loss_instance, labels=False):
args = tuple([self.feats_1d, self.feats_1d])
if labels:
args += (self.labels, )
loss_1d = loss_instance.forward(*args)
self.assertTrue(loss_1d.numel() == 1)
def normal_test_2d(self, loss_instance, labels=False):
args = tuple([self.feats_2d, self.feats_2d])
if labels:
args += (self.labels, )
loss_2d = loss_instance.forward(*args)
self.assertTrue(loss_2d.numel() == 1)
def normal_test_3d(self, loss_instance, labels=False):
args = tuple([self.feats_3d, self.feats_3d])
if labels:
args += (self.labels, )
loss_3d = loss_instance.forward(*args)
self.assertTrue(loss_3d.numel() == 1)
def test_ab_loss(self):
ab_loss_cfg = dict(loss_weight=1.0, margin=1.0)
ab_loss = ABLoss(**ab_loss_cfg)
self.normal_test_1d(ab_loss)
self.normal_test_2d(ab_loss)
self.normal_test_3d(ab_loss)
def _mock_crd_data_sample(self, sample_idx_list):
data_samples = []
for _idx in sample_idx_list:
data_sample = BaseDataElement()
data_sample.set_data(dict(sample_idx=_idx))
data_samples.append(data_sample)
return data_samples
def test_crd_loss(self):
crd_loss = CRDLoss(**dict(neg_num=5, sample_n=10, dim_out=6))
sample_idx_list = torch.tensor(list(range(5)))
data_samples = self._mock_crd_data_sample(sample_idx_list)
loss = crd_loss.forward(self.feats_1d, self.feats_1d, data_samples)
self.assertTrue(loss.numel() == 1)
# test the calculation
s_feat_0 = torch.randn((5, 6))
t_feat_0 = torch.randn((5, 6))
crd_loss_num_0 = crd_loss.forward(s_feat_0, t_feat_0, data_samples)
assert crd_loss_num_0 != torch.tensor(0.0)
s_feat_1 = torch.randn((5, 6))
t_feat_1 = torch.rand((5, 6))
sample_idx_list_1 = torch.tensor(list(range(5)))
data_samples_1 = self._mock_crd_data_sample(sample_idx_list_1)
crd_loss_num_1 = crd_loss.forward(s_feat_1, t_feat_1, data_samples_1)
assert crd_loss_num_1 != torch.tensor(0.0)
def test_dkd_loss(self):
dkd_loss_cfg = dict(loss_weight=1.0)
dkd_loss = DKDLoss(**dkd_loss_cfg)
# dkd requires label logits
self.normal_test_1d(dkd_loss, labels=True)
def test_ft_loss(self):
ft_loss_cfg = dict(loss_weight=1.0)
ft_loss = FTLoss(**ft_loss_cfg)
assert ft_loss.loss_weight == 1.0
self.normal_test_1d(ft_loss)
self.normal_test_2d(ft_loss)
self.normal_test_3d(ft_loss)
def test_dafl_loss(self):
dafl_loss_cfg = dict(loss_weight=1.0)
ac_loss = ActivationLoss(**dafl_loss_cfg, norm_type='abs')
oh_loss = OnehotLikeLoss(**dafl_loss_cfg)
ie_loss = InformationEntropyLoss(**dafl_loss_cfg, gather=False)
# normal test with only one input
loss_ac = ac_loss.forward(self.feats_1d)
self.assertTrue(loss_ac.numel() == 1)
loss_oh = oh_loss.forward(self.feats_1d)
self.assertTrue(loss_oh.numel() == 1)
loss_ie = ie_loss.forward(self.feats_1d)
self.assertTrue(loss_ie.numel() == 1)
with self.assertRaisesRegex(AssertionError,
'"norm_type" must be "norm" or "abs"'):
_ = ActivationLoss(**dafl_loss_cfg, norm_type='random')
# test gather_tensors
ie_loss = InformationEntropyLoss(**dafl_loss_cfg, gather=True)
ie_loss.world_size = 2
if digit_version(torch.__version__) >= digit_version('1.8.0'):
with self.assertRaisesRegex(
RuntimeError,
'Default process group has not been initialized'):
loss_ie = ie_loss.forward(self.feats_1d)
else:
with self.assertRaisesRegex(
AssertionError,
'Default process group is not initialized'):
loss_ie = ie_loss.forward(self.feats_1d)
def test_kdSoftce_loss(self):
kdSoftce_loss_cfg = dict(loss_weight=1.0)
kdSoftce_loss = KDSoftCELoss(**kdSoftce_loss_cfg)
# kd soft ce loss requires label logits
self.normal_test_1d(kdSoftce_loss, labels=True)
def test_at_loss(self):
at_loss_cfg = dict(loss_weight=1.0)
at_loss = ATLoss(**at_loss_cfg)
assert at_loss.loss_weight == 1.0
self.normal_test_1d(at_loss)
self.normal_test_2d(at_loss)
self.normal_test_3d(at_loss)
def test_fbkdloss(self):
fbkdloss_cfg = dict(loss_weight=1.0)
fbkdloss = FBKDLoss(**fbkdloss_cfg)
spatial_mask = torch.randn(1, 1, 3, 3)
channel_mask = torch.randn(1, 4, 1, 1)
channel_pool_adapt = torch.randn(1, 4)
relation_adpt = torch.randn(1, 4, 3, 3)
s_input = (spatial_mask, channel_mask, channel_pool_adapt,
spatial_mask, channel_mask, relation_adpt)
t_input = (spatial_mask, channel_mask, spatial_mask, channel_mask,
relation_adpt)
fbkd_loss = fbkdloss(s_input, t_input)
self.assertTrue(fbkd_loss.numel() == 1)
def test_pkdloss(self):
pkd_loss = PKDLoss(loss_weight=1.0)
feats_S, feats_T = torch.rand(2, 256, 4, 4), torch.rand(2, 256, 4, 4)
loss = pkd_loss(feats_S, feats_T)
self.assertTrue(loss.numel() == 1)
self.assertTrue(0. <= loss <= 1.)
num_stages = 4
feats_S = (torch.rand(2, 256, 4, 4) for _ in range(num_stages))
feats_T = (torch.rand(2, 256, 4, 4) for _ in range(num_stages))
loss = pkd_loss(feats_S, feats_T)
self.assertTrue(loss.numel() == 1)
self.assertTrue(0. <= loss <= num_stages * 1.)
feats_S, feats_T = torch.rand(2, 256, 2, 2), torch.rand(2, 256, 4, 4)
loss = pkd_loss(feats_S, feats_T)
self.assertTrue(loss.numel() == 1)
self.assertTrue(0. <= loss <= 1.)
pkd_loss = PKDLoss(loss_weight=1.0, resize_stu=False)
feats_S, feats_T = torch.rand(2, 256, 2, 2), torch.rand(2, 256, 4, 4)
loss = pkd_loss(feats_S, feats_T)
self.assertTrue(loss.numel() == 1)
self.assertTrue(0. <= loss <= 1.)
def test_mgd_loss(self):
mgd_loss = MGDLoss(alpha_mgd=0.00002)
feats_S, feats_T = torch.rand(2, 256, 4, 4), torch.rand(2, 256, 4, 4)
loss = mgd_loss(feats_S, feats_T)
self.assertTrue(loss.numel() == 1)
|
[
"mingjiang.li@iluvatar.ai"
] |
mingjiang.li@iluvatar.ai
|
8a8b8ba79006a28316ef9aa505f79d5b02b4b33a
|
2481cde6506743565dff2b405a2396daf208ab3e
|
/src/ranking/management/modules/algorithm_yandex.py
|
704ac764e959da19a6154d23d666eb6a1279c8cb
|
[
"Apache-2.0"
] |
permissive
|
aropan/clist
|
4819a3036d179595e4df8c646aff2ed593b9dad3
|
5c805b2af71acee97f993f19d8d4e229f7f5b411
|
refs/heads/master
| 2023-08-31T11:15:17.987776
| 2023-08-27T21:51:14
| 2023-08-27T21:52:16
| 187,111,853
| 276
| 35
|
Apache-2.0
| 2023-09-06T18:42:53
| 2019-05-16T22:57:03
|
Python
|
UTF-8
|
Python
| false
| false
| 586
|
py
|
# -*- coding: utf-8 -*-
import re
from ranking.management.modules import yandex
class Statistic(yandex.Statistic):
def get_standings(self, *args, **kwargs):
standings = super().get_standings(*args, **kwargs)
if re.search(r'\bfinals?\b', self.name, re.I):
if 'medals' not in standings.get('options', {}) and 'medals' not in self.info.get('standings', {}):
options = standings.setdefault('options', {})
options['medals'] = [{'name': name, 'count': 1} for name in ('gold', 'silver', 'bronze')]
return standings
|
[
"nap0rbl4@gmail.com"
] |
nap0rbl4@gmail.com
|
1bf2158bc437ca181fbc66a1c3e55214a6f792ff
|
7ed9b1d87012cd11ecc1625cadbea861223e82c5
|
/plugins/devices/FakeCamera.py
|
cfad8329be69fc9640b4f45f25b6f0e84cc7df71
|
[] |
no_license
|
dsblank/pyrobot
|
577bdcb1cd68777b76aaada11ff3d3c3c5231c38
|
d9c19947767a97980ec31d2096ec157bafa55f0d
|
refs/heads/master
| 2021-01-21T21:19:48.788998
| 2019-04-01T03:55:06
| 2019-04-01T03:55:06
| 94,819,207
| 2
| 2
| null | 2019-03-30T17:03:32
| 2017-06-19T20:43:18
|
Python
|
UTF-8
|
Python
| false
| false
| 174
|
py
|
from pyrobot.camera.fake import FakeCamera
from pyrobot.vision.cvision import VisionSystem
def INIT(robot):
return {"camera": FakeCamera(visionSystem = VisionSystem())}
|
[
"doug.blank@gmail.com"
] |
doug.blank@gmail.com
|
aee96f6de4e6fd2ecd47ec453c188c2895fc41c9
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/173/usersdata/268/81652/submittedfiles/moedas.py
|
b3b9d67c5c88d783592b6b36e092be5012cfea8b
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 380
|
py
|
# -*- coding: utf-8 -*-
a=int(input('Digite o valor de a: '))
b=int(input('Digite o valor de b: '))
c=int(input('Digite o valor da cédula: '))
w=0
x9=0
while(w<c):
duvida= a*w
comprovacao= (n-a*w)
if (comprovacao%b)==0:
print(duvida)
print(b*(comprovacao/b)
if ((comprovacao%b)!=0) :
x9=x9 +1
w=w+1
if(x9==n):
print('N')
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
ff00a04615743bcd931d99ee7f9a21cade5d3410
|
3ea3f46bd4d7231c5eb5c1e1c02625f5290cac76
|
/heart/migrations/0003_auto_20170317_1846.py
|
0573637431da4546fd61ccdbdfb05f3edf19ea1b
|
[] |
no_license
|
moonclearner/simpleDjangoProject
|
0340b0a744651bcc9dbd7a52b12c4827d40a7a5f
|
51fc70d4c499aa64e82a6f02c913f44c45cad323
|
refs/heads/master
| 2021-01-23T01:41:25.481027
| 2017-04-11T14:29:09
| 2017-04-11T14:29:09
| 85,927,020
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,117
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-17 10:46
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('heart', '0002_auto_20170317_1841'),
]
operations = [
migrations.AlterField(
model_name='hbeat',
name='Created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='hpluse',
name='Created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='hpres',
name='Created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='hrelax',
name='Created_at',
field=models.DateTimeField(auto_now_add=True),
),
migrations.AlterField(
model_name='htem',
name='Created_at',
field=models.DateTimeField(auto_now_add=True),
),
]
|
[
"718857460@qq.com"
] |
718857460@qq.com
|
b5097dc639ce1b85de30e5898a505721e3bb28f1
|
a2e638cd0c124254e67963bda62c21351881ee75
|
/Extensions/Deal Capture Examples/FPythonCode/MiniFutureInsDef.py
|
f25b228e0539ca28ceaaee4e44dc3bd4a628ca06
|
[] |
no_license
|
webclinic017/fa-absa-py3
|
1ffa98f2bd72d541166fdaac421d3c84147a4e01
|
5e7cc7de3495145501ca53deb9efee2233ab7e1c
|
refs/heads/main
| 2023-04-19T10:41:21.273030
| 2021-05-10T08:50:05
| 2021-05-10T08:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,795
|
py
|
from __future__ import print_function
import acm, ael
def SetUpMiniFuture(definitionSetUp):
from DealCaptureSetup import AddInfoSetUp, CustomMethodSetUp
definitionSetUp.AddSetupItems(
AddInfoSetUp( recordType='Instrument',
fieldName='MiniFuture',
dataType='Boolean',
description='CustomInsdef',
dataTypeGroup='Standard',
subTypes=['Warrant'],
defaultValue='',
mandatory=False),
AddInfoSetUp( recordType='Instrument',
fieldName='RateMargin',
dataType='Double',
description='CustomInsdef',
dataTypeGroup='Standard',
subTypes=['Warrant'],
defaultValue='',
mandatory=False)
)
definitionSetUp.AddSetupItems(
CustomMethodSetUp( className='FWarrant',
customMethodName='GetMiniFuture',
methodName='MiniFuture'),
CustomMethodSetUp( className='FWarrant',
customMethodName='GetMiniFutureFinancingLevel',
methodName='MiniFutureFinancingLevel'),
CustomMethodSetUp( className='FWarrant',
customMethodName='GetMiniFutureFinancingSpread',
methodName='MiniFutureFinancingSpread'),
CustomMethodSetUp( className='FWarrant',
customMethodName='GetMiniFutureInterestRateMargin',
methodName='MiniFutureInterestRateMargin'),
CustomMethodSetUp( className='FWarrant',
customMethodName='GetMiniFutureStopLoss',
methodName='MiniFutureStopLoss'),
CustomMethodSetUp( className='FWarrant',
customMethodName='GetMiniFutureUnderlyingType',
methodName='MiniFutureUnderlyingType'),
CustomMethodSetUp( className='FWarrant',
customMethodName='GetMiniFutureUnderlyingType',
methodName='MiniFutureUnderlyingType'),
CustomMethodSetUp( className='FWarrant',
customMethodName='SetMiniFuture',
methodName='SetMiniFuture'),
CustomMethodSetUp( className='FWarrant',
customMethodName='SetMiniFutureFinancingLevel',
methodName='MiniFutureFinancingLevel'),
CustomMethodSetUp( className='FWarrant',
customMethodName='SetMiniFutureInterestRateMargin',
methodName='MiniFutureInterestRateMargin'),
CustomMethodSetUp( className='FWarrant',
customMethodName='SetMiniFutureStopLoss',
methodName='MiniFutureStopLoss'),
CustomMethodSetUp( className='FWarrant',
customMethodName='SetMiniFutureUnderlyingType',
methodName='MiniFutureUnderlyingType')
)
def SetUnderlyingType(instrument, underlyingType):
instrument.UnderlyingType(underlyingType)
return
def GetUnderlyingType(instrument):
return instrument.UnderlyingType()
def GetMiniFuture(instrument):
isMiniFuture = None
try:
isMiniFuture = instrument.AdditionalInfo().MiniFuture()
except Exception as e:
print ("Additional Info field missing. Please create an Additional Info field on Instrument (Warrant) of type boolean called MiniFuture and restart system.")
return isMiniFuture
def GetFinancingSpread(instrument):
if instrument.StrikePrice():
premium=instrument.Barrier()-instrument.StrikePrice()
premiumPercent=premium/instrument.StrikePrice()*100
return premiumPercent
else:
return 0
def SetStopLoss(instrument, stopLoss):
instrument.Barrier(stopLoss)
if instrument.StrikePrice():
premium=instrument.Barrier()-instrument.StrikePrice()
if premium < 0:
instrument.SuggestOptionType(False)
else:
instrument.SuggestOptionType(True)
return
def GetStopLoss(instrument):
return instrument.Barrier()
def SetFinancingLevel(instrument, financingLevel):
instrument.StrikePrice(financingLevel)
if instrument.StrikePrice():
premium=instrument.Barrier()-instrument.StrikePrice()
if premium < 0:
instrument.SuggestOptionType(False)
else:
instrument.SuggestOptionType(True)
return
def GetFinancingLevel(instrument):
return instrument.StrikePrice()
def SetMiniFuture(instrument, miniFuture):
try:
instrument.AdditionalInfo().MiniFuture(miniFuture)
except:
print ("Additional Info field missing. Please create an Additional Info field on Instrument (Warrant) of type boolean called MiniFuture and restart system.")
return
def SetRateMargin(instrument, rateMargin):
try:
instrument.AdditionalInfo().RateMargin(rateMargin)
except:
print ("Additional Info field missing. Please create an Additional Info field on Instrument (Warrant) of type double called RateMargin and restart system.")
def GetRateMargin(instrument):
try:
if instrument.AdditionalInfo().RateMargin():
return instrument.AdditionalInfo().RateMargin()
else:
return 0.0
except:
print ("Additional Info field missing. Please create an Additional Info field on Instrument (Warrant) of type double called RateMargin and restart system.")
def UpdateDefaultInstrument(ins):
# Not possible to set AddInfo fields on default instrument. Set Mini Future field to true.
try:
ins.AdditionalInfo().MiniFuture(True)
except:
print ("Additional Info field missing. Please create an Additional Info field on Instrument (Warrant) of type boolean called MiniFuture and restart system.")
if not ins.Exotic():
# This code will set up the Barrier if no default barrier instrument exists
ins.ExoticType('Other')
e=acm.FExotic()
ins.Exotics().Add(e)
e.RegisterInStorage()
e.BarrierOptionType("Up & In")
|
[
"nencho.georogiev@absa.africa"
] |
nencho.georogiev@absa.africa
|
b506ffdba484cd5f343426bd1f96c751004ba9fa
|
eaeb685d13ef6c58364c5497c911f3e2f8c49a43
|
/Solution/922_Sort_Array_By_Parity_II.py
|
09d03afec572e8518220ff839a03cf91f98b8253
|
[] |
no_license
|
raririn/LeetCodePractice
|
8b3a18e34a2e3524ec9ae8163e4be242c2ab6d64
|
48cf4f7d63f2ba5802c41afc2a0f75cc71b58f03
|
refs/heads/master
| 2023-01-09T06:09:02.017324
| 2020-09-10T02:34:46
| 2020-09-10T02:34:46
| 123,109,055
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 557
|
py
|
class Solution:
def sortArrayByParityII(self, A: List[int]) -> List[int]:
ret = [0] * len(A)
pos_e = 0
pos_o = 1
for i in A:
if i % 2 == 0:
ret[pos_e] = i
pos_e += 2
else:
ret[pos_o] = i
pos_o += 2
return ret
'''
Runtime: 244 ms, faster than 81.55% of Python3 online submissions for Sort Array By Parity II.
Memory Usage: 16 MB, less than 8.70% of Python3 online submissions for Sort Array By Parity II.
'''
|
[
"raririn.sandbag@gmail.com"
] |
raririn.sandbag@gmail.com
|
8c4e0732907c0a50c71b4fd46d7db075c8ad46a5
|
760fbdca58de7e2fb146ec60905ded7497b1812b
|
/ibm_whcs_sdk/insights_for_medical_literature/tests/integration/test_search_typeahead.py
|
ee752832dabdf1753e9150dd069ef924defa1b65
|
[
"Apache-2.0"
] |
permissive
|
dmansjur/whcs-python-sdk
|
c5d28742cefc65e19a7eb5de0027fe9f59b1e689
|
110a847c91d5779df91c6562394bde557ee132e5
|
refs/heads/master
| 2021-05-26T21:49:44.515561
| 2020-04-07T17:17:36
| 2020-04-07T17:17:36
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,587
|
py
|
# coding: utf-8
# Copyright 2018 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is an example of invoking the /v1/corpora/{corpus}/search/{corpus}/typeahead REST API
# of Insights for Medical Literature.
import configparser
import ibm_whcs_sdk.insights_for_medical_literature as wh
# To access a secure environment additional parameters are needed on the constructor which are listed below
CONFIG = configparser.RawConfigParser()
CONFIG.read('./ibm_whcs_sdk/insights_for_medical_literature/tests/config.ini')
BASE_URL = CONFIG.get('settings', 'base_url')
APIKEY = CONFIG.get('settings', 'key')
IAMURL = CONFIG.get('settings', 'iam_URL')
LEVEL = CONFIG.get('settings', 'logging_level')
VERSION = CONFIG.get('settings', 'version')
DISABLE_SSL = CONFIG.get('settings', 'disable_ssl')
VERSION = CONFIG.get('settings', 'version')
CORPUS = CONFIG.get('settings', 'corpus')
ONTOLGOY = CONFIG.get('search', 'umls')
QUERY = CONFIG.get('search', 'typeahead_query')
TYPE = CONFIG.get('search', 'typeahead_type')
IML_TEST = wh.InsightsForMedicalLiteratureServiceV1(BASE_URL, APIKEY, IAMURL, VERSION, LEVEL, DISABLE_SSL)
# test can only be successful against a custom plan intance
def test_search_typeahead():
types = [TYPE]
ontologies = [ONTOLGOY]
response = IML_TEST.typeahead(corpus=CORPUS, query=QUERY, types=types, category='disorders', verbose=False,
limit=10, max_hit_count=1000, no_duplicates=True, ontologies=ontologies)
concept_list = wh.ConceptListModel._from_dict(response.get_result())
assert concept_list is not None
concepts = concept_list.concepts
for concept in concepts:
assert concept.cui is not None
assert concept.ontology is not None
def test_search_typeahead_verbose():
types = [TYPE]
ontologies = [ONTOLGOY]
response = IML_TEST.typeahead(corpus=CORPUS, query=QUERY, types=types, category='disorders', verbose=True,
limit=10, max_hit_count=1000, no_duplicates=True, ontologies=ontologies)
concept_list = wh.ConceptListModel._from_dict(response.get_result())
assert concept_list is not None
concepts = concept_list.concepts
for concept in concepts:
assert concept.cui is not None
assert concept.ontology is not None
def test_search_typeahead_no_corpus():
types = [TYPE]
ontologies = [ONTOLGOY]
try:
response = IML_TEST.typeahead(corpus=None, query=QUERY, types=types, category='disorders', verbose=True,
limit=10, max_hit_count=1000, no_duplicates=True, ontologies=ontologies)
except ValueError as imle:
assert imle is not None
def test_search_typeahead_verbose_no_query():
types = [TYPE]
ontologies = [ONTOLGOY]
try:
response = IML_TEST.typeahead(corpus=CORPUS, query=None, types=types, category='disorders', verbose=True,
limit=10, max_hit_count=1000, no_duplicates=True, ontologies=ontologies)
except ValueError as imle:
assert imle is not None
|
[
"dcweber@us.ibm.com"
] |
dcweber@us.ibm.com
|
6dbabae65ed075a12913a12e8af2019751371a24
|
fdd2ed32e45ca3dcc978cf7e5af76d2afd8cb9f9
|
/87.py
|
ecdbdd1ab3aee85a6e5da158c4511eb5ef0c7440
|
[] |
no_license
|
Narendon123/python
|
e5295e5b71867fd6a90d080c01e2db6930659f95
|
cf0b3dd4ff4eb4d6d44f061b45d00baa25de5a38
|
refs/heads/master
| 2020-05-31T06:06:19.230781
| 2019-07-11T12:51:25
| 2019-07-11T12:51:25
| 190,134,515
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 138
|
py
|
w=input()
w=w.split()
a=int(w[0])
b=int(w[1])
i=1
while(i<=a and i<=b):
if(a%i==0 and b%i==0):
gcd=i
i=i+1
print(gcd)
|
[
"noreply@github.com"
] |
Narendon123.noreply@github.com
|
8a33374b9c01ded55865a5c9464ca843e32074d6
|
37220d7b60d682eb1abf40326d061485581aab36
|
/ajax/urls.py
|
b06af246996089bc8452ee5a25eabcdc705623a1
|
[
"BSD-3-Clause"
] |
permissive
|
lautarianoo/LautAvito
|
547fba9a0bb3a65aac6132e00382a8876bca4a28
|
106dcb6f04230af2540bd3883c85713828cd051c
|
refs/heads/master
| 2023-06-24T11:08:17.889875
| 2021-07-26T17:30:08
| 2021-07-26T17:30:08
| 377,897,865
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 137
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path('get_districts', views.get_districts, name='get_districts')
]
|
[
"neonchick1"
] |
neonchick1
|
49c119a4c7491a7b5b8bcf0c18b0dbbd7e0c9b34
|
19ac8aa8ee916cef99ddc85b6565c4d6fbe40749
|
/FunctionsAndFunctionalProgramming/functionalFizzing.py
|
953c863c737f9d99921591c2b75d1cc537db621e
|
[] |
no_license
|
Darrenrodricks/IntermediatePythonNanodegree
|
53570bb1b97d9d10d6e6bd19d3a1f8f654a1cfe9
|
5e597fbe147c23b694fc9b354797e443f0a87a67
|
refs/heads/main
| 2023-06-25T18:54:38.962503
| 2021-07-28T17:15:44
| 2021-07-28T17:15:44
| 389,006,852
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
import helper
"""Generate an infinite stream of successively larger random lists."""
def generate_cases():
a = 0
while True:
yield helper.random_list(a)
a += 1
if __name__ == '__main__':
for case in generate_cases():
if len(case) > 10:
break
print(case)
|
[
"noreply@github.com"
] |
Darrenrodricks.noreply@github.com
|
d5f638b16f492c7594a3fcea541c45e8aae9fab2
|
2435099201902a12689621baba62f7799a260ae3
|
/backend/red_frost_25038/urls.py
|
990009da86771f717c4f523d324c2d500dcb88a5
|
[] |
no_license
|
crowdbotics-apps/red-frost-25038
|
eab0bada99927f8f7d76f4866bbcf042be762a0d
|
cfb48c84f707a558d0cf6405f5057371bdcb2778
|
refs/heads/master
| 2023-03-30T10:07:45.116090
| 2021-03-15T15:46:25
| 2021-03-15T15:46:25
| 348,029,901
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,217
|
py
|
"""red_frost_25038 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include, re_path
from django.views.generic.base import TemplateView
from allauth.account.views import confirm_email
from rest_framework import permissions
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
urlpatterns = [
path("", include("home.urls")),
path("accounts/", include("allauth.urls")),
path("modules/", include("modules.urls")),
path("api/v1/", include("home.api.v1.urls")),
path("admin/", admin.site.urls),
path("users/", include("users.urls", namespace="users")),
path("rest-auth/", include("rest_auth.urls")),
# Override email confirm to use allauth's HTML view instead of rest_auth's API view
path("rest-auth/registration/account-confirm-email/<str:key>/", confirm_email),
path("rest-auth/registration/", include("rest_auth.registration.urls")),
]
admin.site.site_header = "Red Frost"
admin.site.site_title = "Red Frost Admin Portal"
admin.site.index_title = "Red Frost Admin"
# swagger
api_info = openapi.Info(
title="Red Frost API",
default_version="v1",
description="API documentation for Red Frost App",
)
schema_view = get_schema_view(
api_info,
public=True,
permission_classes=(permissions.IsAuthenticated,),
)
urlpatterns += [
path("api-docs/", schema_view.with_ui("swagger", cache_timeout=0), name="api_docs")
]
urlpatterns += [path("", TemplateView.as_view(template_name='index.html'))]
urlpatterns += [re_path(r"^(?:.*)/?$",
TemplateView.as_view(template_name='index.html'))]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
195624fb20c54ced15a65be4c1af7cb329cc3b1c
|
31f9333012fd7dad7b8b12c1568f59f33420b0a5
|
/Alessandria/env/lib/python3.8/site-packages/django/contrib/staticfiles/testing.py
|
754bd296574e9e20066c857e41043e1bb11bfcc3
|
[] |
no_license
|
jcmloiacono/Django
|
0c69131fae569ef8cb72b135ab81c8e957d2a640
|
20b9a4a1b655ae4b8ff2a66d50314ed9732b5110
|
refs/heads/master
| 2022-11-15T22:18:57.610642
| 2020-07-14T14:43:16
| 2020-07-14T14:43:16
| 255,125,001
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 464
|
py
|
from django.contrib.staticfiles.handlers import StaticFilesHandler
from django.test import LiveServerTestCase
class StaticLiveServerTestCase(LiveServerTestCase):
"""
Extend django.test.LiveServerTestCase to transparently overlay at test
execution-time the assets provided by the staticfiles app2 finders. This
means you don't need to run collectstatic before or as a part of your tests
setup.
"""
static_handler = StaticFilesHandler
|
[
"jcmloiacono@gmail.com"
] |
jcmloiacono@gmail.com
|
e02f4a0c5b78cca43171902e5b8212d0c9bf443a
|
2fe18f4babd857381c2251f1c2437ccdae234dd8
|
/bookmarks/bookmarks/settings.py
|
273af7e947825b97a57cf7f7558397f12874a3f2
|
[] |
no_license
|
Akhtyrtsev/bookmarks
|
62f23d87c9442aaa2f56c73dd52ddbf8e456f7e1
|
c8c52f1a9d4674a7187ad2408af7c090424a9738
|
refs/heads/master
| 2020-07-03T23:17:44.547699
| 2019-08-15T12:27:04
| 2019-08-15T12:27:04
| 202,083,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,627
|
py
|
"""
Django settings for bookmarks project.
Generated by 'django-admin startproject' using Django 2.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '71wl&ele@0v_^508xm(cy)z!%6is^_sb1k_k4b$2=1gzupra-r'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'account.apps.AccountConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bookmarks.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bookmarks.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
LOGIN_REDIRECT_URL = 'dashboard'
LOGIN_URL = 'login'
LOGOUT_URL = 'logout'
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
AUTHENTICATION_BACKENDS = [
'django.contrib.auth.backends.ModelBackend',
'account.authentication.EmailAuthBackend',
]
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media/')
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_HOST_USER = 'mytestmail842@gmail.com'
EMAIL_HOST_PASSWORD = 'mytestmail842mytestmail842'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
|
[
"akhtyrtsev@gmail.com"
] |
akhtyrtsev@gmail.com
|
440d156989c7d14212ee7acec2a615fa1d0d34cc
|
f75f9c0e7192170a5846c0b726b10e645d5812b7
|
/tests/test_models.py
|
845a6eaf73b1e3765e21211184bc835c50c73de7
|
[
"MIT"
] |
permissive
|
mzbotr/betfair.py
|
6feff7250fec38c31ef9c89fc15a057c935d7274
|
dca804a4eaf999af54c53589e9559409fae26d6f
|
refs/heads/master
| 2021-01-21T06:02:35.902807
| 2015-06-15T04:05:51
| 2015-06-15T04:05:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,563
|
py
|
# -*- coding: utf-8 -*-
import pytest
from enum import Enum
from schematics.types import StringType
from betfair.meta.types import EnumType
from betfair.meta.types import ModelType
from betfair.meta.models import BetfairModel
def test_field_inflection():
class FakeModel(BetfairModel):
underscore_separated_field = StringType()
record = FakeModel(underscoreSeparatedField='test')
assert record.underscore_separated_field == 'test'
serialized = record.serialize()
assert 'underscoreSeparatedField' in serialized
assert serialized['underscoreSeparatedField'] == 'test'
FakeEnum = Enum(
'TestEnum', [
'val1',
'val2',
]
)
@pytest.mark.parametrize(['input', 'expected'], [
('val1', 'val1'),
(FakeEnum.val1, 'val1'),
])
def test_enum_type(input, expected):
class FakeModel(BetfairModel):
enum_field = EnumType(FakeEnum)
datum = FakeModel(enum_field=input)
datum.validate()
serialized = datum.serialize()
assert serialized['enumField'] == expected
class Child(BetfairModel):
child_name = StringType()
class Parent(BetfairModel):
parent_name = StringType()
child = ModelType(Child)
def test_nested_model():
parent = Parent(parent_name='mom', child=dict(child_name='kid'))
expected = {
'parentName': 'mom',
'child': {
'childName': 'kid',
},
}
assert parent.serialize() == expected
def test_nested_model_unserialize_rogue():
Parent(parent_name='dad', child=dict(child_name='kid', rogue='rogue'))
|
[
"jm.carp@gmail.com"
] |
jm.carp@gmail.com
|
61511f49964ca71e6a0f6d8c8c5023828b810084
|
55909fd5282ea210f2221fc467f71f9ed41b0bef
|
/Aula 13/ex056.py
|
5a65082bf9d2352ec7ab655f7557494215f5ccf6
|
[
"MIT"
] |
permissive
|
alaanlimaa/Python_CVM1-2-3
|
163ecd8c9145f2d332e6574d8923373b87a2e1f5
|
6d9a9bd693580fd1679a1d0b23afd26841b962a6
|
refs/heads/main
| 2023-06-18T16:07:59.930804
| 2021-07-20T16:22:01
| 2021-07-20T16:22:01
| 387,841,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 671
|
py
|
midade = Hmaior = nomevelho = contM20 = 0
for p in range(1, 5):
print('-=-' * 10)
print(f'{p}º pessoa ')
nome = str(input('Nome: ')).strip()
idade = int(input('Idade: '))
sexo = str(input('Sexo [F/M]: ')).strip()[0]
midade += idade
if p == 1 and sexo in 'Mm':
Hmaior = idade
nomevelho = nome
if sexo in 'Mm' and idade > Hmaior:
Hmaior = idade
nomevelho = nome
if sexo in 'Ff' and idade < 20:
contM20 += 1
print(f'A média de idade do grupo é {midade / p:.2f} anos')
print(f'O homem mais velho tem {Hmaior} anos eo seu nome é {nomevelho}')
print(f'São {contM20} mulheres menores de 20 anos')
|
[
"alanlimabusiness@outlook.com"
] |
alanlimabusiness@outlook.com
|
169d1b34052601f7372457060040c76fbb71fe6b
|
498d65615aeba1f7399344a32a23514e057fb30e
|
/decode_verify_jwt.py
|
224caf0f4e6b9ae7531dc23017880f0ac6b66eee
|
[] |
no_license
|
gautamamber/Blog-Serverless-chalice
|
54fd128f76a3e918a170225bb49ded0874089a61
|
e1735c5bb617bdb9720b5ecf847ea32833d7e5bc
|
refs/heads/master
| 2020-08-04T15:17:44.405145
| 2019-10-02T14:33:32
| 2019-10-02T14:33:32
| 212,181,532
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,896
|
py
|
from urllib.request import urlopen
import json
import os
import time
from constant import Constants
from jose import jwk, jwt
from jose.utils import base64url_decode
region = 'us-west-2'
userpool_id = Constants.COGNITO_POOL_ID
app_client_id = Constants.COGNITO_CLIENT
keys_url = 'https://cognito-idp.{}.amazonaws.com/{}/.well-known/jwks.json'.format(region, userpool_id)
# instead of re-downloading the public keys every time
# we download them only on cold start
# https://aws.amazon.com/blogs/compute/container-reuse-in-lambda/
response = urlopen(keys_url)
keys = json.loads(response.read())['keys']
def token_verification(token):
# get the kid from the headers prior to verification
headers = jwt.get_unverified_headers(token)
kid = headers['kid']
# search for the kid in the downloaded public keys
key_index = -1
for i in range(len(keys)):
if kid == keys[i]['kid']:
key_index = i
break
if key_index == -1:
return False
# construct the public key
public_key = jwk.construct(keys[key_index])
# get the last two sections of the token,
# message and signature (encoded in base64)
message, encoded_signature = str(token).rsplit('.', 1)
# decode the signature
decoded_signature = base64url_decode(encoded_signature.encode('utf-8'))
# verify the signature
if not public_key.verify(message.encode("utf8"), decoded_signature):
return False
# since we passed the verification, we can now safely
# use the unverified claims
claims = jwt.get_unverified_claims(token)
# additionally we can verify the token expiration
if time.time() > claims['exp']:
return False
# and the Audience (use claims['client_id'] if verifying an access token)
if claims['aud'] != app_client_id:
return False
# now we can use the claims
return claims
|
[
"ambergautam1@gmail.com"
] |
ambergautam1@gmail.com
|
f135349869cce6877593dc177603adef88a8dd07
|
8eb2e7d0b82e26b8999c1e2f14b4fe0f7dfeab65
|
/scripts/run_slim_bpr_cython_baesyan.py
|
8262e9aefd632f8690b346aca92562dd0f270d73
|
[
"Apache-2.0"
] |
permissive
|
edervishaj/spotify-recsys-challenge
|
c8d66cec51495bef85809dbbff183705e53a7bd4
|
4077201ac7e4ed9da433bd10a92c183614182437
|
refs/heads/master
| 2021-06-28T14:59:02.619439
| 2020-10-03T09:53:50
| 2020-10-03T09:53:50
| 150,008,507
| 0
| 0
|
Apache-2.0
| 2020-10-03T09:53:51
| 2018-09-23T17:31:20
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 7,642
|
py
|
from personal.MaurizioFramework.ParameterTuning.BayesianSearch import BayesianSearch
from personal.MaurizioFramework.ParameterTuning.AbstractClassSearch import DictionaryKeys
from utils.definitions import ROOT_DIR
import pickle
from personal.MaurizioFramework.SLIM_BPR.Cython.SLIM_BPR_Cython import SLIM_BPR_Cython
from recommenders.similarity.dot_product import dot_product
from utils.datareader import Datareader
from utils.evaluator import Evaluator
from utils.bot import Bot_v1
from tqdm import tqdm
import scipy.sparse as sps
import numpy as np
import sys
def run_SLIM_bananesyan_search(URM_train, URM_validation, logFilePath = ROOT_DIR+"/results/logs_baysian/"):
recommender_class = SLIM_BPR_Cython
bananesyan_search = BayesianSearch(recommender_class, URM_validation=URM_validation,
evaluation_function=evaluateRecommendationsSpotify_BAYSIAN)
hyperparamethers_range_dictionary = {}
hyperparamethers_range_dictionary["topK"] = [100, 150, 200, 250, 300, 350, 400, 500]
hyperparamethers_range_dictionary["lambda_i"] = [1e-7,1e-6,1e-5,1e-4,1e-3,0.001,0.01,0.05,0.1]
hyperparamethers_range_dictionary["lambda_j"] = [1e-7,1e-6,1e-5,1e-4,1e-3,0.001,0.01,0.05,0.1]
hyperparamethers_range_dictionary["learning_rate"] = [0.1,0.01,0.001,0.0001,0.00005,0.000001, 0.0000001]
hyperparamethers_range_dictionary["minRatingsPerUser"] = [0, 5, 50, 100]
logFile = open(logFilePath + recommender_class.RECOMMENDER_NAME + "_BayesianSearch Results.txt", "a")
recommenderDictionary = {DictionaryKeys.CONSTRUCTOR_POSITIONAL_ARGS: [],
DictionaryKeys.CONSTRUCTOR_KEYWORD_ARGS: {
"URM_train":URM_train,
"positive_threshold":0,
"URM_validation":URM_validation,
"final_model_sparse_weights":True,
"train_with_sparse_weights":True,
"symmetric" : True},
DictionaryKeys.FIT_POSITIONAL_ARGS: dict(),
DictionaryKeys.FIT_KEYWORD_ARGS: {
"epochs" : 5,
"beta_1" : 0.9,
"beta_2" : 0.999,
"validation_function": evaluateRecommendationsSpotify_RECOMMENDER,
"stop_on_validation":True ,
"sgd_mode" : 'adam',
"validation_metric" : "ndcg_t",
"lower_validatons_allowed":3,
"validation_every_n":1},
DictionaryKeys.FIT_RANGE_KEYWORD_ARGS: hyperparamethers_range_dictionary}
best_parameters = bananesyan_search.search(recommenderDictionary,
metric="ndcg_t",
n_cases=200,
output_root_path=""+logFilePath + recommender_class.RECOMMENDER_NAME,
parallelPoolSize=4)
logFile.write("best_parameters: {}".format(best_parameters))
logFile.flush()
logFile.close()
pickle.dump(best_parameters, open(logFilePath + recommender_class.RECOMMENDER_NAME + "_best_parameters", "wb"),
protocol=pickle.HIGHEST_PROTOCOL)
def evaluateRecommendationsSpotify_RECOMMENDER(recommender):
"""
THIS FUNCTION WORKS INSIDE THE RECOMMENDER
:param self:
:return:
"""
user_profile_batch = recommender.URM_train[pids_converted]
eurm = dot_product(user_profile_batch, recommender.W_sparse, k=500).tocsr()
recommendation_list = np.zeros((10000, 500))
for row in tqdm(range(eurm.shape[0]), desc="spotify rec list"):
val = eurm[row].data
ind = val.argsort()[-500:][::-1]
ind = eurm[row].indices[ind]
recommendation_list[row, 0:len(ind)] = ind
prec_t, ndcg_t, clicks_t, prec_a, ndcg_a, clicks_a = ev.evaluate(recommendation_list=recommendation_list,
name=recommender.configuration+"epoca"+
str(recommender.currentEpoch),
return_overall_mean=True, verbose = False,
show_plot=False, do_plot=True)
results_run = {}
results_run["prec_t"] = prec_t
results_run["ndcg_t"] = ndcg_t
results_run["clicks_t"] = clicks_t
results_run["prec_a"] = prec_a
results_run["ndcg_a"] = ndcg_a
results_run["clicks_a"] = clicks_a
return (results_run)
def evaluateRecommendationsSpotify_BAYSIAN(recommender, URM_validation, paramether_dictionary) :
"""
THIS FUNCTION WORKS INSIDE THE BAYSIAN-GRID SEARCH
:param self:
:return:
"""
user_profile_batch = recommender.URM_train[pids_converted]
eurm = dot_product(user_profile_batch, recommender.W_sparse, k=500).tocsr()
recommendation_list = np.zeros((10000, 500))
for row in tqdm(range(eurm.shape[0]), desc="spotify rec list"):
val = eurm[row].data
ind = val.argsort()[-500:][::-1]
ind = eurm[row].indices[ind]
recommendation_list[row, 0:len(ind)] = ind
prec_t, ndcg_t, clicks_t, prec_a, ndcg_a, clicks_a = ev.evaluate(recommendation_list=recommendation_list,
name=recommender.configuration+"epoca"+str(recommender.currentEpoch),
return_overall_mean=True, verbose= False,
show_plot=False, do_plot=True)
results_run = {}
results_run["prec_t"] = prec_t
results_run["ndcg_t"] = ndcg_t
results_run["clicks_t"] = clicks_t
results_run["prec_a"] = prec_a
results_run["ndcg_a"] = ndcg_a
results_run["clicks_a"] = clicks_a
return (results_run)
if __name__ == '__main__':
bot = Bot_v1("keplero bananesyan slim")
try:
######################SHRINKED
dr = Datareader(mode="offline", train_format="100k", only_load=True)
ev = Evaluator(dr)
pids = dr.get_test_pids()
urm, dictns, dict2 = dr.get_urm_shrinked()
urm_evaluation = dr.get_evaluation_urm()[pids]
pids_converted = np.array([dictns[x] for x in pids], dtype=np.int32)
run_SLIM_bananesyan_search(URM_train=urm, URM_validation=urm_evaluation)
# dr = Datareader(mode="offline", only_load=True)
# ev = Evaluator(dr)
# pids = dr.get_test_pids()
#
# urm = dr.get_urm()
# urm_evaluation = dr.get_evaluation_urm()[pids]
# pids_converted = pids
#
# run_SLIM_bananesyan_search(URM_train=urm, URM_validation=urm_evaluation)
except Exception as e:
bot.error("Exception "+str(e))
bot.end()
|
[
"scarlattitommaso@gmail.com"
] |
scarlattitommaso@gmail.com
|
26d7c06f88ff8b77fb6eb704335b28197ac7b3ac
|
49c2492d91789b3c2def7d654a7396e8c6ce6d9f
|
/ROS/vrep_ros_ws/build/vrep_skeleton_msg_and_srv/catkin_generated/generate_cached_setup.py
|
063f8efb2e5b20ed1335dd677a45fae2675a3513
|
[] |
no_license
|
DavidHan008/lockdpwn
|
edd571165f9188e0ee93da7222c0155abb427927
|
5078a1b08916b84c5c3723fc61a1964d7fb9ae20
|
refs/heads/master
| 2021-01-23T14:10:53.209406
| 2017-09-02T18:02:50
| 2017-09-02T18:02:50
| 102,670,531
| 0
| 2
| null | 2017-09-07T00:11:33
| 2017-09-07T00:11:33
| null |
UTF-8
|
Python
| false
| false
| 1,508
|
py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import argparse
import os
import stat
import sys
# find the import for catkin's python package - either from source space or from an installed underlay
if os.path.exists(os.path.join('/opt/ros/indigo/share/catkin/cmake', 'catkinConfig.cmake.in')):
sys.path.insert(0, os.path.join('/opt/ros/indigo/share/catkin/cmake', '..', 'python'))
try:
from catkin.environment_cache import generate_environment_script
except ImportError:
# search for catkin package in all workspaces and prepend to path
for workspace in "/home/dyros-vehicle/gitrepo/lockdpwn/ROS/vrep_ros_ws/devel;/home/dyros-vehicle/gitrepo/lockdpwn/ROS/catkin_ws/devel;/opt/ros/indigo".split(';'):
python_path = os.path.join(workspace, 'lib/python2.7/dist-packages')
if os.path.isdir(os.path.join(python_path, 'catkin')):
sys.path.insert(0, python_path)
break
from catkin.environment_cache import generate_environment_script
code = generate_environment_script('/home/dyros-vehicle/gitrepo/lockdpwn/ROS/vrep_ros_ws/devel/.private/vrep_skeleton_msg_and_srv/env.sh')
output_filename = '/home/dyros-vehicle/gitrepo/lockdpwn/ROS/vrep_ros_ws/build/vrep_skeleton_msg_and_srv/catkin_generated/setup_cached.sh'
with open(output_filename, 'w') as f:
#print('Generate script for cached setup "%s"' % output_filename)
f.write('\n'.join(code))
mode = os.stat(output_filename).st_mode
os.chmod(output_filename, mode | stat.S_IXUSR)
|
[
"gyurse@gmail.com"
] |
gyurse@gmail.com
|
9121aa7623fa31fd8cad9ac6cd3485cb1656a44d
|
a36501f44a09ca03dd1167e1d7965f782e159097
|
/app/modules/auth/params.py
|
c7dd1d359b51eb056962e44c9b871c1d299d8c4b
|
[
"Apache-2.0"
] |
permissive
|
ssfdust/full-stack-flask-smorest
|
9429a2cdcaa3ff3538875cc74cff802765678d4b
|
4f866b2264e224389c99bbbdb4521f4b0799b2a3
|
refs/heads/master
| 2023-08-05T08:48:03.474042
| 2023-05-07T01:08:20
| 2023-05-07T01:08:20
| 205,528,296
| 39
| 10
|
Apache-2.0
| 2023-08-31T00:18:42
| 2019-08-31T10:12:25
|
Python
|
UTF-8
|
Python
| false
| false
| 2,162
|
py
|
# Copyright 2019 RedLotus <ssfdust@gmail.com>
# Author: RedLotus <ssfdust@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
app.modules.auth.params
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
用户验证参数模块
"""
from app.extensions import ma
from marshmallow import fields
class LoginParams(ma.Schema):
"""
登录用参数
:attr email: str 用户邮箱
:attr password: str 密码
:attr captcha: str 验证码
:attr token: str 验证码token
"""
email = fields.Str(required=True, allow_none=False, description="用户邮箱")
password = fields.Str(required=True, allow_none=False, description="密码")
captcha = fields.Str(required=True, allow_none=False, description="验证码")
token = fields.Str(required=True, allow_none=False, description="验证token")
class JwtParam(ma.Schema):
"""
Jwt的Token参数
:attr token: str Jwt token
"""
token = fields.Str(required=False, allow_none=False, description="token")
class PasswdParam(ma.Schema):
"""
验证密码
:attr password: str 原密码
:attr confirm_password: str 确认密码
"""
password = fields.Str(required=True, allow_none=False, description="密码")
confirm_password = fields.Str(required=True, allow_none=False, description="确认密码")
class EmailParam(ma.Schema):
"""
邮箱参数
:attr email: str 邮箱
"""
email = fields.Str(required=True, description="邮箱")
class CaptchaParam(ma.Schema):
"""
验证图片Token参数
:attr token: str 验证码token
"""
token = fields.Str(required=True, description="随机token")
|
[
"ssfdust@gmail.com"
] |
ssfdust@gmail.com
|
a42609d3d57b7e0f3298e6dee88c7531e8b4df7b
|
32c915adc51bdb5d2deab2a592d9f3ca7b7dc375
|
/Chapter_11_programming_tasks/task_2.py
|
57291c7a43269738ae347bef625ced59459b1aa2
|
[] |
no_license
|
nervig/Starting_Out_With_Python
|
603c2b8c9686edcf92c1a90596d552b873fe6229
|
d617ee479c7c77038331b5f262e00f59e8e90070
|
refs/heads/master
| 2023-02-25T07:14:12.685417
| 2021-02-02T18:45:00
| 2021-02-02T18:45:00
| 335,391,362
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 725
|
py
|
import employee
def main():
name_of_employee = input('Enter a name of employee: ')
number_of_employee = input('Enter a number of employee: ')
annual_salary = input('Enter an annual salary: ')
annual_bonus = input('Enter an annual bonus: ')
data_of_shift_supervisor = employee.ShiftSupervisor(name_of_employee, number_of_employee, annual_salary, annual_bonus)
print('The data of shift supervisor: ')
print('Name: ' + data_of_shift_supervisor.get_name_of_employee())
print('ID: ' + data_of_shift_supervisor.get_number_of_employee())
print('Annual salary: ' + data_of_shift_supervisor.get_annual_salary())
print('Annual bonus: ' + data_of_shift_supervisor.get_annual_bonus())
main()
|
[
"solide@yandex.ru"
] |
solide@yandex.ru
|
23e77f8d02e5d307347f08baca5d033626e01412
|
51b7b81cce1e8943926c531ad8763af8fd4074dc
|
/1260.py
|
8280b9c478f211dddcdc27f39f47b057c9ca1dae
|
[] |
no_license
|
goodsosbva/BOJ_Graph
|
f65598591b07ea2f637cba2644bdc81386afb36e
|
34fe8bfec0543d9884869fe5ebbb536c6fcc3fbf
|
refs/heads/main
| 2023-03-22T08:14:53.735351
| 2021-03-07T09:22:39
| 2021-03-07T09:22:39
| 338,587,428
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 790
|
py
|
N, M, V = map(int, input().split())
matrix = [[0] * (N + 1) for i in range(N + 1)]
for i in range(M):
a, b = map(int, input().split())
matrix[a][b] = matrix[b][a] = 1
visit_list = [0] * (N + 1)
def dfs(V):
visit_list[V] = 1 # 방문한 점 1로 표시
print(V, end=' ')
for i in range(1, N + 1):
if (visit_list[i] == 0 and matrix[V][i] == 1):
dfs(i)
def bfs(V):
queue = [V] # 들려야 할 정점 저장
visit_list[V] = 0 # 방문한 점 0으로 표시
while queue:
V = queue.pop(0)
print(V, end=' ')
for i in range(1, N + 1):
if (visit_list[i] == 1 and matrix[V][i] == 1):
queue.append(i)
visit_list[i] = 0
dfs(V)
print()
bfs(V)
|
[
"noreply@github.com"
] |
goodsosbva.noreply@github.com
|
ed1a80133b79485d1c7d0125da7309754e321eea
|
d922b02070c11c19ba6104daa3a1544e27a06e40
|
/DSA_Project/venv/Scripts/easy_install-3.8-script.py
|
d71594c04a3b333adb75b4777054c951680c802e
|
[] |
no_license
|
viharivnv/DSA
|
2ca393a8e304ee7b4d540ff435e832d94ee4b2a7
|
777c7281999ad99a0359c44291dddaa868a2525c
|
refs/heads/master
| 2022-10-15T15:26:59.045698
| 2020-06-17T15:55:33
| 2020-06-17T15:55:33
| 273,020,116
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 462
|
py
|
#!C:\Users\vihar\PycharmProjects\DSA_Project\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==40.8.0','console_scripts','easy_install-3.8'
__requires__ = 'setuptools==40.8.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==40.8.0', 'console_scripts', 'easy_install-3.8')()
)
|
[
"52350934+viharivnv@users.noreply.github.com"
] |
52350934+viharivnv@users.noreply.github.com
|
ef40ec48bf2a0cb2ff75da74ffa77734efd92e46
|
7e34f45c4c046f01764583b6317f85200ddf2bcf
|
/tests/settings.py
|
6b203371768fca78fd5a3bcd4c863f83fbb1ae04
|
[
"BSD-3-Clause"
] |
permissive
|
MarkusH/django-jellyglass
|
3953de9fb840320db23a8b748df089da2aeb1013
|
2b7c8fcaac76f8833f2880b10f687552530a3ccb
|
refs/heads/master
| 2021-01-18T15:09:08.904899
| 2018-12-03T16:41:06
| 2018-12-03T16:41:06
| 49,637,243
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,540
|
py
|
ALLOWED_HOSTS = []
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ":memory:",
}
}
DEBUG = True
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'jellyglass.apps.JellyGlassConfig',
]
LANGUAGE_CODE = 'en-us'
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'tests.urls'
SECRET_KEY = 'test-secret-key'
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
|
[
"info@markusholtermann.eu"
] |
info@markusholtermann.eu
|
c6866ffcb6663df60970fd0041ee61d604f921a5
|
930c207e245c320b108e9699bbbb036260a36d6a
|
/BRICK-RDFAlchemy/generatedCode/brick/brickschema/org/schema/_1_0_2/Brick/Differential_Pressure_Load_Shed_Status.py
|
4f44b20dc9c3108fb3505cc8f10804105b148f22
|
[] |
no_license
|
InnovationSE/BRICK-Generated-By-OLGA
|
24d278f543471e1ce622f5f45d9e305790181fff
|
7874dfa450a8a2b6a6f9927c0f91f9c7d2abd4d2
|
refs/heads/master
| 2021-07-01T14:13:11.302860
| 2017-09-21T12:44:17
| 2017-09-21T12:44:17
| 104,251,784
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 428
|
py
|
from rdflib import Namespace, Graph, Literal, RDF, URIRef
from rdfalchemy.rdfSubject import rdfSubject
from rdfalchemy import rdfSingle, rdfMultiple, rdfList
from brick.brickschema.org.schema._1_0_2.Brick.Load_Shed_Status import Load_Shed_Status
class Differential_Pressure_Load_Shed_Status(Load_Shed_Status):
rdf_type = Namespace('https://brickschema.org/schema/1.0.2/Brick#').Differential_Pressure_Load_Shed_Status
|
[
"Andre.Ponnouradjane@non.schneider-electric.com"
] |
Andre.Ponnouradjane@non.schneider-electric.com
|
032be5db944974a1f32618e9395669e88e00c17e
|
5dfbfa153f22b3f58f8138f62edaeef30bad46d3
|
/old_ws/build/catkin_generated/order_packages.py
|
e6748be72199deb25e033f5d33a964cf1bf10700
|
[] |
no_license
|
adubredu/rascapp_robot
|
f09e67626bd5a617a569c9a049504285cecdee98
|
29ace46657dd3a0a6736e086ff09daa29e9cf10f
|
refs/heads/master
| 2022-01-19T07:52:58.511741
| 2019-04-01T19:22:48
| 2019-04-01T19:22:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 360
|
py
|
# generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/bill/ros_ws/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/home/bill/ros_ws/devel;/opt/ros/kinetic".split(';') if "/home/bill/ros_ws/devel;/opt/ros/kinetic" != "" else []
|
[
"alphonsusbq436@gmail.com"
] |
alphonsusbq436@gmail.com
|
d57bafa6b041e14b363221f5424fcc938e2a081a
|
4d21da5a3d07f4d05b997e80119cd79692ac0d25
|
/Leetcode/201-300/259. 3Sum Smaller.py
|
fc6828a904244248c20e44b9f93c23460bea2b66
|
[] |
no_license
|
ErinC123/Algorithm
|
92b2789ec3b36c49f9e65f2e7a702bb4b732e8ba
|
4544fee91e811a6625000921c32ad054df550f1e
|
refs/heads/master
| 2021-06-17T14:03:33.955233
| 2017-06-18T21:20:55
| 2017-06-18T21:20:55
| 75,894,724
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 479
|
py
|
class Solution(object):
def threeSumSmaller(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: int
"""
nums.sort()
ret = 0
for i in range(len(nums)):
j, k = i+1, len(nums)-1
while j < k:
if nums[i]+nums[j]+nums[k] < target:
ret += k-j
j += 1
else:
k -= 1
return ret
|
[
"zhencao93@gmail.com"
] |
zhencao93@gmail.com
|
7935285b6302c1b7277f4c9d4939535c9636fe0d
|
159da3fc63ccf20b80dc17bb44b53e9a5578bcfd
|
/arkav_is_api/arkavauth/migrations/0005_refactor_auth.py
|
f92058d0d724b71e6dab65b111c6f5e8e2a4a7d7
|
[
"MIT"
] |
permissive
|
arkavidia5/arkav-is
|
4338829e7c0a9446393545316e46395e9df111fd
|
6c6e8d091ead5bfff664d86f7903c62209800031
|
refs/heads/master
| 2021-07-16T03:49:15.900812
| 2019-02-08T18:08:32
| 2019-02-08T18:08:32
| 149,406,261
| 3
| 2
|
MIT
| 2018-11-09T16:49:17
| 2018-09-19T06:58:16
|
Python
|
UTF-8
|
Python
| false
| false
| 2,458
|
py
|
import arkav_is_api.arkavauth.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('arkavauth', '0004_user_email_confirmed'),
]
operations = [
migrations.RenameModel('EmailConfirmationAttempt', 'RegistrationConfirmationAttempt'),
migrations.RenameModel('PasswordResetAttempt', 'PasswordResetConfirmationAttempt'),
migrations.RenameField(
model_name='user',
old_name='email_confirmed',
new_name='is_email_confirmed',
),
migrations.RenameField(
model_name='passwordresetconfirmationattempt',
old_name='used',
new_name='is_confirmed',
),
migrations.RenameField(
model_name='registrationconfirmationattempt',
old_name='confirmed',
new_name='is_confirmed',
),
migrations.AddField(
model_name='passwordresetconfirmationattempt',
name='email_last_sent_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='registrationconfirmationattempt',
name='email_last_sent_at',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='passwordresetconfirmationattempt',
name='token',
field=models.CharField(default=arkav_is_api.arkavauth.models.generate_email_confirmation_token, max_length=30),
),
migrations.AlterField(
model_name='passwordresetconfirmationattempt',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='password_reset_confirmation_attempt', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='registrationconfirmationattempt',
name='token',
field=models.CharField(default=arkav_is_api.arkavauth.models.generate_email_confirmation_token, max_length=30),
),
migrations.AlterField(
model_name='registrationconfirmationattempt',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='registration_confirmation_attempt', to=settings.AUTH_USER_MODEL),
),
]
|
[
"jonathan.christopher@outlook.com"
] |
jonathan.christopher@outlook.com
|
32deaed41a4e6581445f42876563cf802299ebe7
|
da7149f3182d2421e046828d30fc1c91c14d496d
|
/chapter16/coro_exc_demo.py
|
c9aa826582f4e4609e85504e132a7eb87f93559b
|
[] |
no_license
|
tangkaiyang/fluent_python
|
6db2825cfadccb70a886cb822026d69be4b03cc9
|
5f07072d8db5ddf43bfe913b3262b325a8f1ad35
|
refs/heads/master
| 2020-05-02T20:21:00.404872
| 2019-04-18T02:35:55
| 2019-04-18T02:35:55
| 178,188,495
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,724
|
py
|
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 2019/4/12 16:18
# @Author : tangky
# @Site :
# @File : coro_exc_demo.py
# @Software : PyCharm
# 示例16-8 coro_exc_demo.py:学习在协程中处理异常的测试代码
class DemoException(Exception):
"""为这次演示定义的异常类型"""
def demo_exc_handling():
print('-> coroutine started')
while True:
try:
x = yield
except DemoException: # 特别处理DemoException异常
print('*** DemoExceptiion handled. Continuing...')
else: # 如果没有异常,那么显示接受到的值
print('-> coroutine received: {!r}'.format(x))
raise RuntimeError('This line should never run.') # 这一行永远不会执行
# 示例16-8 中的最后一行代码不会执行,因为只有未处理的异常才会中止那个无限循环,而一旦出现未处理的异常,协程会立即终止
# 示例16-9 激活和关闭demo_exc_handling,没有异常
exc_coro = demo_exc_handling()
next(exc_coro)
exc_coro.send(11)
exc_coro.send(22)
exc_coro.close()
from inspect import getgeneratorstate
print(getgeneratorstate(exc_coro))
# 示例16-10 把DemoException异常传入demo_exc_handling不会导致协程中止
exc_coro = demo_exc_handling()
next(exc_coro)
exc_coro.send(11)
exc_coro.throw(DemoException)
print(getgeneratorstate(exc_coro))
# 如果传入协程的异常没有处理,协程会停止,即状态变成'GEN_CLOSED'
# 示例16-11 如果无法处理传入的异常,协程会终止
exc_coro = demo_exc_handling()
next(exc_coro)
exc_coro.send(11)
try:
exc_coro.throw(ZeroDivisionError)
except Exception:
print(ZeroDivisionError)
print(getgeneratorstate(exc_coro))
|
[
"945541696@qq.com"
] |
945541696@qq.com
|
05f1381ac472766b4cd06fbd8153c99cc502c0e1
|
84d891b6cb6e1e0d8c5f3e285933bf390e808946
|
/Demo/PO_V6/TestCases/test_login_pytest.py
|
174dcfab88ddcee38ccb2eb6b5ea51c6f4e0d99d
|
[] |
no_license
|
zzlzy1989/web_auto_test
|
4df71a274eb781e609de1067664264402c49737e
|
3e20a55836144e806496e99870f5e8e13a85bb93
|
refs/heads/master
| 2020-05-24T10:37:29.709375
| 2019-10-28T06:14:31
| 2019-10-28T06:14:31
| 187,230,775
| 2
| 0
| null | 2019-06-20T11:06:32
| 2019-05-17T14:29:11
| null |
UTF-8
|
Python
| false
| false
| 2,400
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
# Name: test_login_pytest
# Author: 简
# Time: 2019/6/20
from Demo.PO_V6.PageObjects.login_page import LoginPage
from Demo.PO_V6.PageObjects.index_page import IndexPage
from Demo.PO_V6.TestDatas import login_datas as ld
from Demo.PO_V6.TestDatas import Comm_Datas as cd
import pytest
# pytestmark = pytest.mark.model # 模块级别的标签名
@pytest.mark.demo
@pytest.mark.usefixtures("session_action")
def test_demo():
print("111111111111111")
@pytest.mark.parametrize("a,b,c",[(1,3,4),(10,35,45),(22.22,22.22,44.44)])
def test_add(a,b,c):
res = a + b
assert res == c
# 用例三步曲:前置 、步骤 、 断言
# @ddt.ddt
# @pytest.mark.login # 整个TestLogin类里面,所有测试用例都有login标签。
@pytest.mark.usefixtures("open_url") # 使用函数名称为open_url的fixture
@pytest.mark.usefixtures("refresh_page")
class TestLogin:
pytestmark=pytest.mark.login # 整个TestLogin类里面,所有测试用例都有
# 异常用例 -....
@pytest.mark.parametrize("data", ld.wrong_datas)
def test_login_0_failed_by_wrong_datas(self, data):
# 步骤 - 登陆操作 - 登陆页面 - 密码为空 18684720553
LoginPage(self.driver).login(data["user"], data["passwd"])
# 断言 - 页面的提示内容为:请输入密码
self.assertEqual(data["check"], LoginPage(self.driver).get_error_msg_from_loginForm())
# 正常用例 - 登陆+首页
@pytest.mark.smoke
def test_login_2_success(self,open_url): # open_url = driver
# logging.info("用例1-正常场景-登陆成功-使用到测试数据-")
# 步骤 - 登陆操作 - 登陆页面 - 18684720553、python
LoginPage(open_url).login(ld.success_data["user"],ld.success_data["passwd"]) # 测试对象+测试数据
# 断言 - 页面是否存在 我的帐户 元素 元素定位+元素操作
assert IndexPage(open_url).check_nick_name_exists() == True # 测试对象+测试数据
# url跳转
assert open_url.current_url == ld.success_data["check"] # 测试对象+测试数据 # # 正常用例 - 登陆+首页
class TestTT:
pytestmark = pytest.mark.demo
# pytestmark = [pytest.mark.demo,pytest.mark.demo2]
def test_add(self):
c = 100 +200
assert c == 300
def test_demo(self):
print("demo!!!")
|
[
"394845369@qq.com"
] |
394845369@qq.com
|
cdd5a31a1454daea675c492521e6a22eed8d06bc
|
8997a0bf1e3b6efe5dd9d5f307e1459f15501f5a
|
/unistream_examples/get_agent_point_info.py
|
0278d0f98ae5b4b9aca91d42c4b6f9d5fe4f01f8
|
[
"CC-BY-4.0"
] |
permissive
|
stepik/SimplePyScripts
|
01092eb1b2c1c33756427abb2debbd0c0abf533f
|
3259d88cb58b650549080d6f63b15910ae7e4779
|
refs/heads/master
| 2023-05-15T17:35:55.743164
| 2021-06-11T22:59:07
| 2021-06-11T22:59:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,039
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
"""
Получение информации о точке предоставления услуги.
GET https://test.api.unistream.com/v1/agents/{agentId}/poses/{posId}
"""
if __name__ == '__main__':
from utils import get_today_RFC1123_date, get_authorization_header
from config import APPLICATION_ID, SECRET
# Идентификатор партнера
AGENT_ID = -1
# Идентификатор точки предоставления услуги
POS_ID = -1
params = {
'agentId': AGENT_ID,
'posId': POS_ID,
}
URL = 'https://test.api.unistream.com/v1/agents/{agentId}/poses/{posId}'.format(**params)
TODAY_DATE = get_today_RFC1123_date()
headers = dict()
headers['Date'] = TODAY_DATE
headers['Authorization'] = get_authorization_header(APPLICATION_ID, SECRET, TODAY_DATE, URL, headers)
import requests
rs = requests.get(URL, headers=headers)
print(rs)
print(rs.text)
|
[
"gil9red@gmail.com"
] |
gil9red@gmail.com
|
fcdcbeba752542d4e128ddebf54c68d5df123be8
|
385c01f7337cf5031093147f6731251bfbf17430
|
/lms/level/containers/get_by_id.py
|
d7e05c88e2f38349997208a7d052eb38bf54862a
|
[] |
no_license
|
lucassimon/lmswebaula
|
23a73d6d2d43c78a2f9e3b552113cf50a11a3587
|
671276426685968458f240faa93b313427fa32d9
|
refs/heads/master
| 2021-01-19T13:26:12.352308
| 2017-08-16T21:07:43
| 2017-08-16T21:07:43
| 88,088,474
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,009
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import six
class GetByIdRQ(object):
_level_id = None
_lms_level_id = None
def __init__(self, lms_level_id=None, level_id=None):
if lms_level_id:
if not isinstance(lms_level_id, six.integer_types):
raise ValueError(
'O lms id do nivel precisa ser um inteiro'
)
self._lms_level_id = lms_level_id
if level_id:
self._level_id = level_id
@property
def lms_level_id(self):
return self._lms_level_id
@lms_level_id.setter
def lms_level_id(self, value):
if not isinstance(value, six.integer_types):
raise ValueError(
'O lms id do nível precisa ser um inteiro'
)
self._lms_level_id = value
@property
def level_id(self):
return self._level_id
@level_id.setter
def level_id(self, value):
self._level_id = value
|
[
"lucassrod@gmail.com"
] |
lucassrod@gmail.com
|
f68e80676e72be2e4597cabb98f6e8312c69fc60
|
d9cd697f76565e8230a98909204a5c516437f977
|
/tutorial/tutorial/settings.py
|
7b7807db8743db25310455fe110dcac0eed68dba
|
[] |
no_license
|
huazhicai/webspider
|
be20d0d3a248ef8cbfaab8e3d1fd0e8ac7551352
|
a1defa3778956accbb7617c9a3798d02e0b175f6
|
refs/heads/master
| 2020-03-22T09:00:23.518744
| 2019-07-11T14:53:37
| 2019-07-11T14:53:37
| 139,807,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,207
|
py
|
# -*- coding: utf-8 -*-
# Scrapy settings for tutorial project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'tutorial'
SPIDER_MODULES = ['tutorial.spiders']
NEWSPIDER_MODULE = 'tutorial.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'tutorial (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'tutorial.middlewares.TutorialSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
# 'tutorial.middlewares.TutorialDownloaderMiddleware': 543,
# }
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'tutorial.pipelines.TextPipeline': 300,
'tutorial.pipelines.MongoPipeline': 400,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
MONGO_URI = '192.168.11.138'
MONGO_DB = 'tutorial'
|
[
"936844218@qq.com"
] |
936844218@qq.com
|
01d3691dce55255c364bb881f05bb97a3c770ca9
|
5982cd8db693927e83cd99f8ea1acf4fc90b8b9b
|
/Configurations/ControlRegions/WgS/torqueBatch/configuration1.py
|
7d9ce4b429ce76b6bac43226395ae897e4ab9636
|
[] |
no_license
|
cedricpri/PlotsConfigurations
|
61fc78ce9f081fd910a25f8101ea8150a7312f25
|
5cb0a87a17f89ea89003508a87487f91736e06f4
|
refs/heads/master
| 2021-01-17T09:46:55.026779
| 2016-09-01T09:30:09
| 2016-09-01T09:30:09
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 817
|
py
|
# example of configuration file
tag = 'WgS'
# used by mkShape to define output directory for root files
outputDir = 'rootFile1'
# file with list of variables
variablesFile = '../variables.py'
# file with list of cuts
cutsFile = '../cuts.py'
# file with list of samples
samplesFile = 'samples1.py'
# file with list of samples
plotFile = 'plot.py'
# luminosity to normalize to (in 1/fb)
# lumi = 2.264
#lumi = 2.318
lumi = 2.6
# used by mkPlot to define output directory for plots
# different from "outputDir" to do things more tidy
outputDirPlots = 'plotWgS'
# used by mkDatacards to define output directory for datacards
outputDirDatacard = 'datacards'
# structure file for datacard
structureFile = 'structure.py'
# nuisances file for mkDatacards and for mkShape
nuisancesFile = 'nuisances.py'
|
[
"d4space@gmail.com"
] |
d4space@gmail.com
|
ae734d529bcbe273e29551f3ccd8c250513c04ad
|
f0aba1aa9949cc6a8d3678c0b3ecb5503b470c17
|
/dtc/__init__.py
|
d9f20e7ca90c628c2cccd70a6d6bceb53e2bd4f4
|
[] |
no_license
|
hugosenari/dtc
|
788eafc1a92701332ae54e2f2d74491566d635dd
|
9bb2e6f4f9180b7291a5daf6a35903e5c59e3fc4
|
refs/heads/master
| 2020-12-24T17:44:45.474422
| 2012-08-03T20:02:28
| 2012-08-03T20:02:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,477
|
py
|
'''
Created on Jun 30, 2012
@author: hugosenari
'''
import plugnplay
import logging
from os import path
from dtc.core import interfaces
from dtc.core.interfaces.module import _CoreModule
from dtc.core.interfaces.mainloop import MainLoop
from dtc import modules
class Dtc(object):
def __init__(self, dirs = []):
self.loaded_dirs = []
def set_plugin_dirs(arg, dirpath, files):
logging.debug('add dir: %s to path', dirpath)
self.loaded_dirs.append(dirpath)
path.walk(interfaces.__path__[0], set_plugin_dirs, None)
path.walk(modules.__path__[0], set_plugin_dirs, None)
for directory in dirs:
path.walk(directory, set_plugin_dirs, None)
plugnplay.set_plugin_dirs(*self.loaded_dirs)
logging.debug('Set up plugnplay')
def run(self, logger=logging, *args, **vargs):
logging.debug('load modules')
plugnplay.load_plugins(logger)
#get mainloop implementation
mainloop = vargs.get('mainloop', None)
if not mainloop:
loops = MainLoop.implementors()
if len(loops) > 0:
mainloop = loops[0]
vargs['mainloop'] = mainloop
for module in _CoreModule.implementors():
logging.debug('execute core module: %s', module)
module.execute_modules(*args, **vargs)
if mainloop:
mainloop.run()
|
[
"hugosenari@gmail.com"
] |
hugosenari@gmail.com
|
6da75afc662601dd4bc0b2aaf0413dede6a4ac94
|
6df18031547b1fde808944b4c8f83d2766c95251
|
/UoM_databases_with_python/assignment_2/tracks.py
|
094f7bf6650220142fd1d777f5317ba3710277e3
|
[] |
no_license
|
skreynolds/UoM_data_science
|
6edce9b3d3bf03b6dab6471346e40965464d6adb
|
9636c0a784079445f585b830a1d093acea608d6a
|
refs/heads/master
| 2020-05-20T23:06:36.560299
| 2019-06-01T02:30:09
| 2019-06-01T02:30:09
| 185,794,803
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,719
|
py
|
import xml.etree.ElementTree as ET
import sqlite3
conn = sqlite3.connect('../databases/sqldb_5.sqlite')
cur = conn.cursor()
# Make some fresh tables using executescript()
cur.executescript('''
DROP TABLE IF EXISTS Artist;
DROP TABLE IF EXISTS Genre;
DROP TABLE IF EXISTS Album;
DROP TABLE IF EXISTS Track;
CREATE TABLE Artist (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE Genre (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
name TEXT UNIQUE
);
CREATE TABLE Album (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
artist_id INTEGER,
title TEXT UNIQUE
);
CREATE TABLE Track (
id INTEGER NOT NULL PRIMARY KEY
AUTOINCREMENT UNIQUE,
title TEXT UNIQUE,
album_id INTEGER,
genre_id INTEGER,
len INTEGER, rating INTEGER, count INTEGER
);
''')
fname = input('Enter file name: ')
if ( len(fname) < 1 ) : fname = '../raw_data/Library.xml'
# <key>Track ID</key><integer>369</integer>
# <key>Name</key><string>Another One Bites The Dust</string>
# <key>Artist</key><string>Queen</string>
def lookup(d, key):
found = False
for child in d:
if found : return child.text
if child.tag == 'key' and child.text == key :
found = True
return None
stuff = ET.parse(fname)
all = stuff.findall('dict/dict/dict')
print('Dict count:', len(all))
for entry in all:
if ( lookup(entry, 'Track ID') is None ) : continue
name = lookup(entry, 'Name')
artist = lookup(entry, 'Artist')
album = lookup(entry, 'Album')
genre = lookup(entry, 'Genre')
count = lookup(entry, 'Play Count')
rating = lookup(entry, 'Rating')
length = lookup(entry, 'Total Time')
if name is None or artist is None or album is None or genre is None :
continue
print(name, artist, album, genre, count, rating, length)
cur.execute('''INSERT OR IGNORE INTO Artist (name)
VALUES ( ? )''', ( artist, ) )
cur.execute('SELECT id FROM Artist WHERE name = ? ', (artist, ))
artist_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Genre (name)
VALUES ( ? )''', (genre, ) )
cur.execute('SELECT id FROM Genre WHERE name = ?', (genre, ))
genre_id = cur.fetchone()[0]
cur.execute('''INSERT OR IGNORE INTO Album (title, artist_id)
VALUES ( ?, ? )''', ( album, artist_id ) )
cur.execute('SELECT id FROM Album WHERE title = ? ', (album, ))
album_id = cur.fetchone()[0]
cur.execute('''INSERT OR REPLACE INTO Track
(title, album_id, genre_id, len, rating, count)
VALUES ( ?, ?, ?, ?, ?, ? )''',
( name, album_id, genre_id, length, rating, count ) )
conn.commit()
|
[
"shane.k.reynolds@gmail.com"
] |
shane.k.reynolds@gmail.com
|
3e108d215330ee3c14ab7f7957e3cbc55dfcb5f9
|
aecad2b0e89d72aca6c80bf63c424ee7904257ce
|
/pending_deletes/named_entity_recognition/NER_co_occurrence.py
|
deeeaa377fae611e679786374f32a94ecd4dcd2c
|
[] |
no_license
|
humlab/text_analytic_tools
|
fdf4ba814263672b05ec188aac9a059b55d085b6
|
32fc444ed11649a948a7bf59653ec792396f06e3
|
refs/heads/master
| 2022-03-02T06:56:29.223039
| 2019-10-28T13:06:49
| 2019-10-28T13:06:49
| 74,679,680
| 2
| 1
| null | 2019-10-26T21:33:23
| 2016-11-24T14:19:46
|
Python
|
UTF-8
|
Python
| false
| false
| 2,705
|
py
|
import pandas as pd
# %%
writer = pd.ExcelWriter('C:\\TEMP\\papacy.xlsx')
# %%
#pope = 'benedict-xvi'
#pope = 'francesco'
pope = 'john-paul-ii'
#df = pd.read_excel('./Data/' + pope + '.xlsx', 'Data')
df = pd.read_excel('C:\\Users\\roma0050\\Documents\\Projects\\papacy_scraper\\data\\' + pope + '.xlsx', 'Data', dtype={'Concept': 'str'})
# %%
df_locations = df.loc[(df.Classifier=='LOCATION')]
# %%
df_place_occurrences_counts = df_locations.groupby(['Document', 'Year', 'Genre', 'Concept'])[['Count']].sum().reset_index()
df_place_occurrences_counts.columns = ['Document', 'Year', 'Genre', 'Concept', 'PlaceOccurenceCount']
df_place_distinct_counts = df_locations.groupby(['Document', 'Year', 'Genre'])[['Count']].sum().reset_index()
df_place_distinct_counts.columns = ['Document', 'Year', 'Genre', 'PlaceCount']
# %%
df_place_counts = pd.merge(df_place_distinct_counts, df_place_occurrences_counts, left_on="Document", right_on="Document")[['Document', 'Year_x', 'Concept', 'PlaceOccurenceCount', 'PlaceCount']]
df_place_counts.columns = ['Document', 'Year', 'Concept', 'PlaceOccurenceCount', 'PlaceCount']
df_place_counts['Weight'] = df_place_counts['PlaceOccurenceCount'] / df_place_counts['PlaceCount']
# %%
#df_place_counts.loc[(df_place_counts.Document=='benedict-xvi_en_travels_2008_trav-ben-xvi-usa-program-20080415')]
df_place_cooccurrence_document = pd.merge(df_place_counts,
df_place_counts,
left_on=["Document", "Year"],
right_on=["Document", "Year"])[[ 'Document', 'Year', 'Concept_x', 'Concept_y', 'Weight_x', 'Weight_y' ]]
# %%
df_place_cooccurrence_document['Weight'] = df_place_cooccurrence_document['Weight_x'] * df_place_cooccurrence_document['Weight_y']
# Note: Concept had set as string to allow for comparison below, i.e. to use '<'
df_place_cooccurrence_document = df_place_cooccurrence_document.loc[(df_place_cooccurrence_document.Concept_x < df_place_cooccurrence_document.Concept_y)]
df_place_cooccurrence_document = df_place_cooccurrence_document [['Document', 'Year', 'Concept_x', 'Concept_y', 'Weight']]
# %%
df_place_cooccurrence_document.to_excel(writer, pope + '_cooc_doc')
# %%
df_place_cooccurrence_document = df_place_cooccurrence_document.set_index(['Concept_x', 'Concept_y'])
# %%
df_place_cooccurrence_corpus = df_place_cooccurrence_document.groupby(['Concept_x', 'Concept_y'])[['Weight']].sum().reset_index()
# %%
#df_place_cooccurrence_corpus = df_place_cooccurrence_document [['Document', 'Year', 'Concept_x', 'Concept_y', 'Weight']]
df_place_cooccurrence_corpus.to_excel(writer, pope + '_cooc_corpus')
#%%
writer.save()
|
[
"roger.mahler@hotmail.com"
] |
roger.mahler@hotmail.com
|
f1b9d705860e3e5f69e290b188025d10c52789f1
|
c4c159a21d2f1ea0d7dfaa965aeff01c8ef70dce
|
/flask/flaskenv/Lib/site-packages/tensorflow/python/autograph/core/unsupported_features_checker.py
|
9ecab32e6c5f01db35c77233cc55c757a9f80212
|
[] |
no_license
|
AhsonAslam/webapi
|
54cf7466aac4685da1105f9fb84c686e38f92121
|
1b2bfa4614e7afdc57c9210b0674506ea70b20b5
|
refs/heads/master
| 2020-07-27T06:05:36.057953
| 2019-09-17T06:35:33
| 2019-09-17T06:35:33
| 208,895,450
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 129
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:d0644b74e1e6d4d41084b1c1d32d62fc2a1adb15cd7c6141bd2a62448c182854
size 1815
|
[
"github@cuba12345"
] |
github@cuba12345
|
f954afca286ead0f30eadda260fb7ed77017edd1
|
d3efc82dfa61fb82e47c82d52c838b38b076084c
|
/Autocase_Result/FJZJJMM/YW_FJZJJMM_SZSJ_258.py
|
e1df1921dc57e77398bc293709c609467ced5724
|
[] |
no_license
|
nantongzyg/xtp_test
|
58ce9f328f62a3ea5904e6ed907a169ef2df9258
|
ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f
|
refs/heads/master
| 2022-11-30T08:57:45.345460
| 2020-07-30T01:43:30
| 2020-07-30T01:43:30
| 280,388,441
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,338
|
py
|
#!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
from SqlData_Transfer import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from env_restart import *
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_FJZJJMM_SZSJ_258(xtp_test_case):
def setUp(self):
sql_transfer = SqlData_Transfer()
sql_transfer.transfer_fund_asset('YW_FJZJJMM_SZSJ_258')
clear_data_and_restart_sz()
Api.trade.Logout()
Api.trade.Login()
def test_YW_FJZJJMM_SZSJ_258(self):
title = '可用资金正好-深A本方最优买(可用资金=下单金额+费用)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': ['未成交','全成','部成'][trade_type],
'errorID': 0,
'errorMSG': '',
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('151133', '2', '24', '2', '0', 'B', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':trade_type + 1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_BUY'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': 200,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 5
if __name__ == '__main__':
unittest.main()
|
[
"418033945@qq.com"
] |
418033945@qq.com
|
7933e38da4d9057e66aacf8c9acc9ba0b3e8b4e3
|
af61c369e3550643d47fba2445d9f279e412e15c
|
/basicSprite.py
|
2f0bfb2df06b261977e9f782873c230385348b8d
|
[] |
no_license
|
Rabidza/pygame_learningpython
|
45e900b5a8458a14e7df317de16a9e7cd18737fa
|
ef58d9ca977e2ea1406200ce04c3a32a440be66a
|
refs/heads/master
| 2020-06-03T15:13:38.419015
| 2015-02-18T16:16:30
| 2015-02-18T16:16:30
| 30,924,548
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 738
|
py
|
import pygame
from helpers import *
class Sprite(pygame.sprite.Sprite):
def __init__(self, centerPoint, image):
pygame.sprite.Sprite.__init__(self)
# Set the image and the rect
self.image = image
self.rect = image.get_rect()
# Move the rect into the correct position
self.rect.center = centerPoint
class Pellet(pygame.sprite.Sprite):
def __init__(self, top_left, image = None):
pygame.sprite.Sprite.__init__(self)
if image == None:
self.image, self.rect = load_image('pellet.png',-1)
else:
self.image = image
self.rect = image.get_rect()
self.rect.topleft = top_left
|
[
"neillhenning@gmail.com"
] |
neillhenning@gmail.com
|
a0b40c1e4cfc595d8bc11fa49ffb5e77e2d600c3
|
238ebc43c3d54d2842de75fd8ddf0b0b0261906e
|
/SimulateData.py
|
eb704550b17512faa02f5b718ec6ed67b6f373b5
|
[] |
no_license
|
johndowen/CrossMgr
|
17c114ab80382b24ce0cdd228782bd000f513ea8
|
fc9eaf8ae5d4919cef3f1a3680c169be70cf356b
|
refs/heads/master
| 2021-06-28T03:14:41.682880
| 2017-09-17T00:35:26
| 2017-09-17T00:35:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,686
|
py
|
import random
import bisect
from Names import GetNameTeam
def SimulateData( riders=200 ):
# Generate random rider events.
random.seed( 10101021 )
raceMinutes = 8
mean = 8*60.0 / 8 # Average lap time.
var = mean/20.0 # Variance between riders.
lapsTotal = int(raceMinutes * 60 / mean + 3)
raceTime = mean * lapsTotal
errorPercent = 1.0/25.0
for nMid in (10,100,200,500,1000,2000,5000,10000,20000,50000):
if nMid >= riders:
break
numStart = nMid - riders//2
startOffset = 10
lapTimes = []
riderInfo = []
for num in xrange(numStart,numStart+riders+1):
t = 0
if num < numStart + riders // 2:
mu = random.normalvariate( mean, mean/20.0 ) # Rider's random average lap time.
riderInfo.append( [num] + list(GetNameTeam(True)) )
else:
mu = random.normalvariate( mean * 1.15, mean/20.0 ) # These riders are slower, on average.
riderInfo.append( [num] + list(GetNameTeam(False)) )
t += startOffset # Account for offset start.
for laps in xrange(lapsTotal):
t += random.normalvariate( mu, var/2.0 ) # Rider's lap time.
if random.random() > errorPercent: # Respect error rate.
lapTimes.append( (t, num) )
lapTimes.sort()
# Get the times and leaders for each lap.
leaderTimes = [lapTimes[0][0]]
leaderNums = [lapTimes[0][1]]
numSeen = set()
for t, n in lapTimes:
if n in numSeen:
leaderTimes.append( t )
leaderNums.append( n )
numSeen.clear()
numSeen.add( n )
# Find the leader's time after the end of the race.
iLast = bisect.bisect_left( leaderTimes, raceMinutes * 60.0, hi = len(leaderTimes) - 1 )
if leaderTimes[iLast] < raceMinutes * 60.0:
iLast += 1
# Trim out everything except next arrivals after the finish time.
tLeaderLast = leaderTimes[iLast]
numSeen = set()
afterLeaderFinishEvents = [evt for evt in lapTimes if evt[0] >= tLeaderLast]
lapTimes = [evt for evt in lapTimes if evt[0] < tLeaderLast]
# Find the next unique arrival of all finishers.
lastLapFinishers = []
tStop = raceMinutes * 60.0
numSeen = set()
for t, n in afterLeaderFinishEvents:
if n not in numSeen:
numSeen.add( n )
lastLapFinishers.append( (t, n) )
lapTimes.extend( lastLapFinishers )
categories = [
{'name':'Junior', 'catStr':'{}-{}'.format(nMid-riders//2,nMid-1), 'startOffset':'00:00', 'distance':0.5, 'gender':'Men', 'numLaps':5},
{'name':'Senior', 'catStr':'{}-{}'.format(nMid,nMid+riders//2), 'startOffset':'00:{:02d}'.format(startOffset), 'distance':0.5, 'gender':'Women', 'numLaps':4}
]
return {
'raceMinutes': raceMinutes,
'lapTimes': lapTimes,
'categories': categories,
'riderInfo': riderInfo,
}
if __name__ == '__main__':
print SimulateData()['riderInfo']
|
[
"edward.sitarski@gmail.com"
] |
edward.sitarski@gmail.com
|
2ef45d0901e7aa9952c147ec2d1daccaef373028
|
e5d130e183b5dea1b7aad23a047c703fa0d2b3bf
|
/lightbus/transports/pool.py
|
b680ac6858890624f59a6c14e96bddbe072a9cae
|
[
"Apache-2.0"
] |
permissive
|
adamcharnock/lightbus
|
4a86428b8203bfe98f77a32375ac961ef398ce16
|
cf892779a9a9a8f69c789ffa83c24acfb7f9a336
|
refs/heads/master
| 2023-08-26T04:19:39.395735
| 2023-08-23T11:07:44
| 2023-08-23T11:07:44
| 94,617,214
| 193
| 22
|
Apache-2.0
| 2023-08-10T21:21:51
| 2017-06-17T10:39:23
|
Python
|
UTF-8
|
Python
| false
| false
| 7,438
|
py
|
import threading
from inspect import iscoroutinefunction, isasyncgenfunction
from typing import NamedTuple, List, TypeVar, Type, Generic, TYPE_CHECKING
from lightbus.exceptions import (
TransportPoolIsClosed,
CannotShrinkEmptyPool,
CannotProxySynchronousMethod,
CannotProxyPrivateMethod,
CannotProxyProperty,
)
if TYPE_CHECKING:
# pylint: disable=unused-import,cyclic-import
from lightbus.config import Config
from lightbus.transports.base import Transport
VT = TypeVar("VT", bound=Transport)
else:
VT = TypeVar("VT")
class TransportPool(Generic[VT]):
"""Pool for managing access to transports
This pool with function as a transparent proxy to the underlying transports.
In most cases you shouldn't need to access the underlying transports. If you
do you can use the context manage as follows:
async with transport_pool as transport:
transport.send_event(...)
Note that this pool will only perform pooling within the thread in which the
pool was created. If another thread uses the pool then the pool will be bypassed.
In this case, a new transport will always be created on checkout, and this
transport will then be immediately closed when checked back in.
This is because the pool will normally be closed sometime after the thread has
completed, at which point each transport in the pool will be closed. However, closing
the transport requires access to the event loop for the specific transport, but that
loop would have been closed when the thread shutdown. It therefore becomes impossible to
the transport cleanly. Therefore, in the case of threads, we create new transports on
checkout, and close and discard the transport on checkin.
This will have some performance impact for non-async user-provided-callables which need to
access the bus. These callables area run in a thread, and so will need fresh connections.
"""
def __init__(self, transport_class: Type[VT], transport_config: NamedTuple, config: "Config"):
self.transport_class = transport_class
self.transport_config = transport_config
self.config = config
self.closed = False
self.lock = threading.RLock()
self.pool: List[VT] = []
self.checked_out = set()
self.context_stack: List[VT] = []
self.home_thread = threading.current_thread()
def __repr__(self):
return f"<Pool of {self.transport_class.__name__} at 0x{id(self):02x} to {self}>"
def __hash__(self):
return hash((self.transport_class, self.transport_config))
def __eq__(self, other):
return hash(self) == hash(other)
def __str__(self):
# Here we create an un-opened transport and stringify it.
# This means we can display nice redis URLs when displaying the pool
# for debugging output.
transport = self._instantiate_transport()
return str(transport)
async def grow(self):
with self.lock:
new_transport = await self._create_transport()
self.pool.append(new_transport)
async def shrink(self):
with self.lock:
try:
old_transport = self.pool.pop(0)
except IndexError:
raise CannotShrinkEmptyPool(
"Transport pool is already empty, cannot shrink it further"
)
await self._close_transport(old_transport)
async def checkout(self) -> VT:
if self.closed:
raise TransportPoolIsClosed("Cannot get a connection, transport pool is closed")
if threading.current_thread() != self.home_thread:
return await self._create_transport()
else:
with self.lock:
if not self.pool:
await self.grow()
transport = self.pool.pop(0)
self.checked_out.add(transport)
return transport
async def checkin(self, transport: VT):
if threading.current_thread() != self.home_thread:
return await self._close_transport(transport)
else:
with self.lock:
self.checked_out.discard(transport)
self.pool.append(transport)
if self.closed:
await self._close_all()
@property
def free(self) -> int:
return len(self.pool)
@property
def in_use(self) -> int:
return len(self.checked_out)
@property
def total(self) -> int:
return self.free + self.in_use
async def __aenter__(self) -> VT:
transport = await self.checkout()
self.context_stack.append(transport)
return transport
async def __aexit__(self, exc_type, exc_val, exc_tb):
transport = self.context_stack.pop()
await self.checkin(transport)
async def close(self):
with self.lock:
self.closed = True
await self._close_all()
async def _close_all(self):
with self.lock:
while self.pool:
await self._close_transport(self.pool.pop())
def _instantiate_transport(self) -> VT:
"""Instantiate a transport without opening it"""
return self.transport_class.from_config(
config=self.config, **self.transport_config._asdict()
)
async def _create_transport(self) -> VT:
"""Return an opened transport"""
new_transport = self._instantiate_transport()
await new_transport.open()
return new_transport
async def _close_transport(self, transport: VT):
"""Close a specific transport"""
await transport.close()
def __getattr__(self, item):
async def fn_pool_wrapper(*args, **kwargs):
async with self as transport:
return await getattr(transport, item)(*args, **kwargs)
async def gen_pool_wrapper(*args, **kwargs):
async with self as transport:
async for value in getattr(transport, item)(*args, **kwargs):
yield value
attr = getattr(self.transport_class, item, None)
if not attr:
raise AttributeError(
f"Neither the transport pool {repr(self)} nor the transport with class "
f"{repr(self.transport_class)} has an attribute named {item}"
)
elif item[0] == "_":
raise CannotProxyPrivateMethod(
f"Cannot proxy private method calls to transport. Use the pool's async context or "
f"checkout() method if you really need to access private methods. (Private methods "
f"are ones whose name starts with an underscore)"
)
elif not callable(attr):
raise CannotProxyProperty(
f"Cannot proxy property access on transports. Use the pool's async context or "
f"checkout() method to get access to a transport directly."
)
else:
if iscoroutinefunction(attr):
return fn_pool_wrapper
elif isasyncgenfunction(attr):
return gen_pool_wrapper
else:
raise CannotProxySynchronousMethod(
f"{self.transport_class.__name__}.{item}() is synchronous "
"and must be accessed directly and not via the pool"
)
|
[
"adam@adamcharnock.com"
] |
adam@adamcharnock.com
|
563dfccd2fd271a2ae0edc1613952e7947965a62
|
58afefdde86346760bea40690b1675c6639c8b84
|
/leetcode/global-and-local-inversions/288943653.py
|
0f1c892c5fa61990ec2ad92c40c0f4af8ae7abd2
|
[] |
no_license
|
ausaki/data_structures_and_algorithms
|
aaa563f713cbab3c34a9465039d52b853f95548e
|
4f5f5124534bd4423356a5f5572b8a39b7828d80
|
refs/heads/master
| 2021-06-21T10:44:44.549601
| 2021-04-06T11:30:21
| 2021-04-06T11:30:21
| 201,942,771
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 712
|
py
|
# title: global-and-local-inversions
# detail: https://leetcode.com/submissions/detail/288943653/
# datetime: Fri Dec 27 19:09:22 2019
# runtime: 388 ms
# memory: 13.4 MB
class Solution:
def isIdealPermutation(self, A: List[int]) -> bool:
N = len(A)
k = -1
i = 0
while i < N:
j = i + 1
while j < N and A[j] > A[j - 1]:
k = A[j - 1]
j += 1
if j == N:
break
i = j
if A[i] < k:
return False
if i + 1 < N and (A[i] > A[i + 1] or A[i + 1] < A[i - 1]):
return False
k = A[i - 1]
i += 1
return True
|
[
"ljm51689@gmail.com"
] |
ljm51689@gmail.com
|
3cb9259d4f4214fc9346777f14b80e8f08b66957
|
e34dfe70b30e584d8b1992377b1b4f8a08235824
|
/cloudmesh/common/console.py
|
7042af40082ed1d6fcf2d07ae6ca9ec0509d795b
|
[
"Python-2.0",
"Apache-2.0"
] |
permissive
|
juaco77/cloudmesh-common
|
09efd91310f1d6fc5d34f60f4c34e63e8c6fc9ae
|
0bb330da363b8edb9e509a8138a3054978a8a390
|
refs/heads/master
| 2020-06-08T05:04:18.070674
| 2019-05-17T10:33:13
| 2019-05-17T10:33:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,233
|
py
|
"""
Printing messages in a console
"""
from __future__ import print_function
import textwrap
import traceback
import colorama
from colorama import Fore, Back, Style
colorama.init()
def indent(text, indent=2, width=128):
"""
indents the given text by the indent specified and wrapping to the given width
:param text: the text to print
:param indent: indent characters
:param width: the width of the text
:return:
"""
return "\n".join(
textwrap.wrap(text,
width=width,
initial_indent=" " * indent,
subsequent_indent=" " * indent))
class Console(object):
"""
A simple way to print in a console terminal in color. Instead of using
simply the print statement you can use special methods to indicate
warnings, errors, ok and regular messages.
Example Usage::
Console.warning("Warning")
Console.error("Error")
Console.info("Info")
Console.msg("msg")
Console.ok("Success")
One can switch the color mode off with::
Console.color = False
Console.error("Error")
The color will be switched on by default.
"""
color = True
debug = True
theme_color = {
'HEADER': Fore.MAGENTA,
'BLACK': Fore.BLACK,
'CYAN': Fore.CYAN,
'WHITE': Fore.WHITE,
'BLUE': Fore.BLUE,
'OKBLUE': Fore.BLUE,
'OKGREEN': Fore.GREEN,
'GREEN': Fore.GREEN,
'FAIL': Fore.RED,
'WARNING': Fore.MAGENTA,
'RED': Fore.RED,
'ENDC': '\033[0m',
'BOLD': "\033[1m",
}
theme_bw = {
'HEADER': '',
'BLACK': '',
'CYAN': '',
'WHITE': '',
'BLUE': '',
'OKBLUE': '',
'OKGREEN': '',
'GREEN': '',
'FAIL': '',
'WARNING': '',
'RED': '',
'ENDC': '',
'BOLD': "",
}
theme = theme_color
@classmethod
def set_debug(cls, on=True):
"""
sets debugging on or of
:param on: if on debugging is set
:return:
"""
cls.debug = on
@staticmethod
def set_theme(color=True):
"""
defines if the console messages are printed in color
:param color: if True its printed in color
:return:
"""
if color:
Console.theme = Console.theme_color
else:
Console.theme = Console.theme_bw
Console.color = color
@staticmethod
def get(name):
"""
returns the default theme for printing console messages
:param name: the name of the theme
:return:
"""
if name in Console.theme:
return Console.theme[name]
else:
return Console.theme['BLACK']
@staticmethod
def txt_msg(message, width=79):
"""
prints a message to the screen
:param message: the message to print
:param width: teh width of the line
:return:
"""
return textwrap.fill(message, width=width)
@staticmethod
def msg(*message):
"""
prints a message
:param message: the message to print
:return:
"""
str = " ".join(message)
print(str)
@classmethod
def error(cls, message, prefix=True, traceflag=False):
"""
prints an error message
:param message: the message
:param prefix: a prefix for the message
:param traceflag: if true the stack trace is retrieved and printed
:return:
"""
# print (message, prefix)
message = message or ""
if prefix:
text = "ERROR: "
else:
text = ""
if cls.color:
cls.cprint('FAIL', text, str(message))
else:
print(cls.txt_msg(text + str(message)))
if traceflag and cls.debug:
trace = traceback.format_exc().strip()
if trace:
print()
print("Trace:")
print("\n ".join(str(trace).splitlines()))
print()
@staticmethod
def TODO(message, prefix=True, traceflag=True):
"""
prints an TODO message
:param message: the message
:param prefix: if set to true it prints TODO: as prefix
:param traceflag: if true the stack trace is retrieved and printed
:return:
"""
message = message or ""
if prefix:
text = "TODO: "
else:
text = ""
if Console.color:
Console.cprint('FAIL', text, str(message))
else:
print(Console.msg(text + str(message)))
trace = traceback.format_exc().strip()
if traceflag and trace != "None":
print()
print("\n".join(str(trace).splitlines()))
print()
@staticmethod
def debug_msg(message):
"""
print a debug message
:param message: the message
:return:
"""
message = message or ""
if Console.color:
Console.cprint('RED', 'DEBUG: ', message)
else:
print(Console.msg('DEBUG: ' + message))
@staticmethod
def info(message):
"""
prints an informational message
:param message: the message
:return:
"""
message = message or ""
if Console.color:
Console.cprint('OKBLUE', "INFO: ", message)
else:
print(Console.msg("INFO: " + message))
@staticmethod
def warning(message):
"""
prints a warning
:param message: the message
:return:
"""
message = message or ""
if Console.color:
Console.cprint('WARNING', "WARNING: ", message)
else:
print(Console.msg("WARNING: " + message))
@staticmethod
def ok(message):
"""
prints an ok message
:param message: the message<
:return:
"""
message = message or ""
if Console.color:
Console.cprint('OKGREEN', "", message)
else:
print(Console.msg(message))
@staticmethod
def cprint(color, prefix, message):
"""
prints a message in a given color
:param color: the color as defined in the theme
:param prefix: the prefix (a string)
:param message: the message
:return:
"""
message = message or ""
prefix = prefix or ""
print((Console.theme[color] +
prefix +
message +
Console.theme['ENDC']))
#
# Example
#
if __name__ == "__main__":
print(Console.color)
print(Console.theme)
Console.warning("Warning")
Console.error("Error")
Console.info("Info")
Console.msg("msg")
Console.ok("Ok")
Console.color = False
print(Console.color)
Console.error("Error")
print(Fore.RED + 'some red text')
print(Back.GREEN + 'and with a green background')
print(Style.DIM + 'and in dim text')
print(Fore.RESET + Back.RESET + Style.RESET_ALL)
print('back to normal now')
|
[
"laszewski@gmail.com"
] |
laszewski@gmail.com
|
6f9cd1e5b7498d442628bca6592c84f90f1d02c0
|
82f993631da2871933edf83f7648deb6c59fd7e4
|
/w1/L1/12.py
|
4e40656a6ec9bba93b7855da255ff4c9ddd100ee
|
[] |
no_license
|
bobur554396/PPII2021Summer
|
298f26ea0e74c199af7b57a5d40f65e20049ecdd
|
7ef38fb4ad4f606940d2ba3daaa47cbd9ca8bcd2
|
refs/heads/master
| 2023-06-26T05:42:08.523345
| 2021-07-24T12:40:05
| 2021-07-24T12:40:05
| 380,511,125
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 369
|
py
|
# line = input()
# print(len(line))
'''
4
4 10 -1 100
'''
n = int(input())
# [<returning iter val> for <iter> in <list> condition ]
numbers = [int(n) for n in input().split()]
print(numbers)
# nums = []
# for n in numbers:
# nums.append(int(n))
# print(nums)
s = 0
for i in numbers:
if i > 0:
s += i
# print(s)
print(sum([n for n in numbers if n > 0]))
|
[
"bobur.muhsimbaev@gmail.com"
] |
bobur.muhsimbaev@gmail.com
|
9304946f7f5ed9562d7a3dbb6c52486fd296a7a1
|
9ef502b92bd218e919c65513e835c15c32667e8f
|
/samsung_load_0113.py
|
75e8319216cf77777569806bc31afb952c0b80c3
|
[] |
no_license
|
YoungriKIM/samsung_stock
|
034bc586440ab04531bb8d0b951747377c340966
|
f15b6a3ebc3db76f960fc8f138dba7e43e345ef4
|
refs/heads/main
| 2023-04-14T03:20:51.169497
| 2021-03-25T08:35:48
| 2021-03-25T08:35:48
| 351,362,762
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 794
|
py
|
import numpy as np
x_train = np.load('../data/npy/samsung_x_train.npy')
y_train = np.load('../data/npy/samsung_y_train.npy')
x_val = np.load('../data/npy/samsung_x_val.npy')
y_val = np.load('../data/npy/samsung_y_val.npy')
x_test = np.load('../data/npy/samsung_x_test.npy')
y_test = np.load('../data/npy/samsung_y_test.npy')
x_pred = np.load('../data/npy/samsung_x_pred.npy')
from tensorflow.keras.models import load_model
model = load_model('../data/modelcheckpoint/samsung_14-891193.4375.hdf5')
#4. 평가, 예측
result = model.evaluate(x_test, y_test, batch_size=1)
print('mse: ', result[0])
print('mae: ', result[1])
y_pred = model.predict(x_pred)
print('1/14일 삼성주식 종가: ', y_pred)
# mse: 1286656.875
# mae: 825.32763671875
# 1/14일 삼성주식 종가: [[90572.59]]
|
[
"lemontleo0311@gmail.com"
] |
lemontleo0311@gmail.com
|
e6ba2e66f4df8af86c5e31215b5c3d8973ecf055
|
81302ee42c1b3c25ce1566d70a782ab5525c7892
|
/nr/nr_band_matching/autocorrelation_full_chain.py
|
aba89bd8971c7b2b106fb1a5a0ea7d38951568ae
|
[] |
no_license
|
mdanthony17/neriX
|
5dd8ce673cd340888d3d5e4d992f7296702c6407
|
2c4ddbb0b64e7ca54f30333ba4fb8f601bbcc32e
|
refs/heads/master
| 2020-04-04T06:01:25.200835
| 2018-06-05T00:37:08
| 2018-06-05T00:46:11
| 49,095,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,812
|
py
|
#!/usr/bin/python
import sys, array, os
#sys.path.insert(0, '..')
import ROOT as root
from rootpy.plotting import Hist, Hist2D, Canvas, Legend
import nr_band_config
import numpy as np
import corner
import cPickle as pickle
import time, emcee
if len(sys.argv) != 5:
print 'Use is python perform_full_matching.py <filename> <anode setting> <cathode setting> <num walkers> [<deviation_from_nest(efficiency fit only!!!)>]'
sys.exit()
filename = sys.argv[1]
anode_setting = float(sys.argv[2])
cathode_setting = float(sys.argv[3])
num_walkers = int(sys.argv[4])
nameOfResultsDirectory = nr_band_config.results_directory_name
l_plots = ['plots', filename]
dir_specifier_name = '%.3fkV_%.1fkV' % (cathode_setting, anode_setting)
nameOfResultsDirectory += '/yields_fit'
sPathToFile = '%s/%s/%s/sampler_dictionary.p' % (nameOfResultsDirectory, dir_specifier_name, filename)
if os.path.exists(sPathToFile):
dSampler = pickle.load(open(sPathToFile, 'r'))
l_chains = []
for sampler in dSampler[num_walkers]:
l_chains.append(sampler['_chain'])
a_full_chain = np.concatenate(l_chains, axis=1)
#print a_full_chain.shape
l_chains = dSampler[num_walkers][-1]['_chain'] # look at last sampler only (can change)
print 'Successfully loaded sampler!'
else:
print sPathToFile
print 'Could not find file!'
sys.exit()
print emcee.autocorr.integrated_time(np.mean(a_full_chain, axis=0), axis=0,
low=10, high=None, step=1, c=2,
fast=False)
"""
# need to figure this out
if not fit_efficiency:
numDim = 36
else:
numDim = 3
lLabelsForCorner = ['py_0', 'py_1', 'py_2', 'py_3', 'py_4', 'py_5', 'py_6', 'py_7', 'qy_0', 'qy_1', 'qy_2', 'qy_3', 'qy_4', 'qy_5', 'qy_6', 'qy_7', 'intrinsic_res_s1', 'intrinsic_res_s2', 'g1_value', 'spe_res_rv', 'g2_value', 'gas_gain_rv', 'gas_gain_width_rv', 'pf_eff_par0', 'pf_eff_par1', 's1_eff_par0', 's1_eff_par1', 's2_eff_par0', 's2_eff_par1', 'pf_stdev_par0', 'pf_stdev_par1', 'pf_stdev_par2', 'exciton_to_ion_par0_rv', 'exciton_to_ion_par1_rv', 'exciton_to_ion_par2_rv', 'scale_par']
if fit_efficiency:
lLabelsForCorner = ['scale', 's2_eff_par0', 's2_eff_par1']
samples = aSampler[:, -5:, :].reshape((-1, numDim))
start_time = time.time()
print 'Starting corner plot...\n'
fig = corner.corner(samples, labels=lLabelsForCorner, quantiles=[0.16, 0.5, 0.84], show_titles=True, title_kwargs={"fontsize": 12})
print 'Corner plot took %.3f minutes.\n\n' % ((time.time()-start_time)/60.)
# path for save
sPathForSave = './'
for directory in l_plots:
sPathForSave += directory + '/'
if not os.path.exists(sPathForSave):
os.makedirs(sPathForSave)
plot_name = 'nr_band_corner_%s' % (filename)
plot_name = 'yields_fit_%s' % (plot_name)
fig.savefig('%s%s.png' % (sPathForSave, plot_name))
"""
|
[
"mda2149@columbia.edu"
] |
mda2149@columbia.edu
|
27f1d1e42412bfb3574bdec543ba0703469f2fce
|
82f6a6c50a1fef2d7522a43cc4f60e5ff80b37a8
|
/solutions/Missing Number/solution.py
|
0bf89957ba6d64c0deea0d059f647ac75434429a
|
[
"MIT"
] |
permissive
|
nilax97/leetcode-solutions
|
ca0f9545ce70975617738f053e0935fac00b04d4
|
d3c12f2b289662d199510e0431e177bbf3cda121
|
refs/heads/master
| 2023-05-14T02:21:48.893716
| 2021-06-08T13:16:53
| 2021-06-08T13:16:53
| 374,466,870
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
class Solution:
def missingNumber(self, nums: List[int]) -> int:
return (len(nums) * (len(nums)+1))//2 - sum(nums)
|
[
"agarwal.nilaksh@gmail.com"
] |
agarwal.nilaksh@gmail.com
|
173de47073bcfee2292415ce0e9b944d48e315cb
|
d912423117d96cd67d23bab87c0773a07d962cc1
|
/backend/socket_chat/consumers/main.py
|
a923f06cb91d42b37282f3545803320df8b675de
|
[] |
no_license
|
modekano/ChatApp
|
b98f9081235c976642d024d56d1531b5120a04cf
|
22cca9f3d4c25a93ca255d6616f61773da757d18
|
refs/heads/master
| 2020-08-19T06:03:45.010063
| 2019-10-17T11:17:07
| 2019-10-17T11:17:07
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,557
|
py
|
from backend.socket_chat.consumers.base import BaseConsumer
from channels.db import database_sync_to_async
from backend.profiles.models import Profile
from backend.socket_chat.consumers.dialog import DialogConsumer
class MainConsumer(DialogConsumer, BaseConsumer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.dialogs = []
self._groups = []
async def channels_message(self, message):
""" Redirect Group messages to each person """
await self._send_message(message['data'], event=message['event'])
async def connect_users(self, message):
""" Connect user to rooms """
users = message['data']['users']
room = message['data']['room']
room_data = message['data']['room_data']
event = message['event']
if self.user.id in users:
print('connecting %s to %s' % (self.user.id, room))
print(room_data, room)
await self.channel_layer.group_add(room, self.channel_name)
await self._send_message(room_data[self.user.id], event=event)
async def on_authenticate_success(self):
""" Execute after user authenticate """
await self.get_user_channels(self.user)
await self.channel_layer.group_add('general', self.channel_name)
# connect to channel for all groups
if self.dialogs:
for dialog in self.dialogs:
await self.channel_layer.group_add(f'dialog_{dialog}', self.channel_name)
if self._groups:
for group in self._groups:
await self.channel_layer.group_add(f'group_{group}', self.channel_name)
async def disconnect(self, *args, **kwargs):
""" Discard from all channels """
if self.dialogs:
for dialog in self.dialogs:
await self.channel_layer.group_discard(
f'dialog_{dialog}',
self.channel_name
)
if self._groups:
for group in self._groups:
await self.channel_layer.group_discard(
f'group_{group}',
self.channel_name
)
@database_sync_to_async
def get_user_channels(self, user):
""" Get all user's dialogs & groups id """
profile = Profile.objects.get(user=user)
for dialog in profile.dialogs.values():
self.dialogs.append(dialog.get('id'))
for group in profile.groups.values():
self._groups.append(group.get('id'))
|
[
"kostya.nik.3854@gmail.com"
] |
kostya.nik.3854@gmail.com
|
4d9b59df5f0fe4ca4796d0121a12dc0208a93d3e
|
f5b7b87d0de1459c284b6ebf3aa21c6a96e52207
|
/broadgauge/views/auth.py
|
8d91aca9aa0d2097097fb9062d97b809ab2611b1
|
[] |
no_license
|
iambibhas/broadgauge
|
cfbce9bbebdc5337918df7b378810a53c9a68f8b
|
381816cb9c288b071b44f189d662611cdc57e58b
|
refs/heads/master
| 2021-01-18T09:01:32.155941
| 2014-08-15T11:42:58
| 2014-08-15T11:42:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,323
|
py
|
import web
import json
from .. import account
from .. import oauth
from .. import forms
from ..sendmail import sendmail
from ..flash import flash
from ..models import User, Organization
from ..template import render_template
urls = (
"/login", "login",
"/logout", "logout",
"/oauth/(github|google|facebook)", "oauth_callback",
"(/trainers/signup|/orgs/signup|/login)/reset", "signup_reset",
"(/trainers/signup|/orgs/signup|/login)/(github|google|facebook)", "signup_redirect",
"/trainers/signup", "trainer_signup",
"/orgs/signup", "org_signup",
)
def get_oauth_redirect_url(provider):
home = web.ctx.home
if provider == 'google' and home == 'http://0.0.0.0:8080':
# google doesn't like 0.0.0.0
home = 'http://127.0.0.1:8080'
elif provider == 'facebook' and home == 'http://127.0.0.1:8080':
# facebook doesn't like 127.0.0.1
home = 'http://0.0.0.0:8080'
return "{home}/oauth/{provider}".format(home=home, provider=provider)
def get_oauth_data():
userdata_json = web.cookies().get('oauth')
if userdata_json:
try:
return json.loads(userdata_json)
except ValueError:
pass
class login:
def GET(self):
userdata = get_oauth_data()
if userdata:
user = User.find(email=userdata['email'])
if user:
account.set_login_cookie(user.email)
raise web.seeother("/dashboard")
else:
return render_template("login.html", userdata=userdata,
error=True)
else:
return render_template("login.html", userdata=None)
class logout:
def POST(self):
account.logout()
referer = web.ctx.env.get('HTTP_REFERER', '/')
raise web.seeother(referer)
class oauth_callback:
def GET(self, service):
i = web.input(code=None, state="/")
if i.code:
redirect_uri = get_oauth_redirect_url(service)
client = oauth.oauth_service(service, redirect_uri)
userdata = client.get_userdata(i.code)
if userdata:
# login or signup
t = User.find(email=userdata['email'])
if t:
account.set_login_cookie(t.email)
raise web.seeother("/dashboard")
else:
web.setcookie("oauth", json.dumps(userdata))
raise web.seeother(i.state)
flash("Authorization failed, please try again.", category="error")
raise web.seeother(i.state)
class signup_redirect:
def GET(self, base, provider):
redirect_uri = get_oauth_redirect_url(provider)
client = oauth.oauth_service(provider, redirect_uri)
url = client.get_authorize_url(state=base)
raise web.seeother(url)
class signup_reset:
def GET(self, base):
# TODO: This should be a POST request, not GET
web.setcookie("oauth", "", expires=-1)
raise web.seeother(base)
class trainer_signup:
FORM = forms.TrainerSignupForm
TEMPLATE = "trainers/signup.html"
def GET(self):
userdata = get_oauth_data()
if userdata:
# if already logged in, send him to dashboard
user = self.find_user(email=userdata['email'])
if user:
if not user.is_trainer():
user.make_trainer()
account.set_login_cookie(user.email)
raise web.seeother("/dashboard")
form = self.FORM(userdata)
return render_template(self.TEMPLATE, form=form, userdata=userdata)
def POST(self):
userdata = get_oauth_data()
if not userdata:
return self.GET()
i = web.input()
form = self.FORM(i)
if not form.validate():
return render_template(self.TEMPLATE, form=form)
return self.signup(i, userdata)
def signup(self, i, userdata):
user = User.new(
name=i.name,
email=userdata['email'],
username=i.username,
phone=i.phone,
city=i.city,
github=userdata.get('github'),
is_trainer=True)
account.set_login_cookie(user.email)
flash("Thank you for signing up as a trainer!")
sendmail("emails/trainers/welcome.html",
subject="Welcome to Python Express",
to=user.email,
trainer=user)
raise web.seeother("/dashboard")
def find_user(self, email):
return User.find(email=email)
class org_signup(trainer_signup):
FORM = forms.OrganizationSignupForm
TEMPLATE = "orgs/signup.html"
def find_user(self, email):
# We don't limit numer of org signups per person
return None
def signup(self, i, userdata):
user = User.find(email=userdata['email'])
if not user:
user = User.new(name=userdata['name'], email=userdata['email'])
org = Organization.new(name=i.name,
city=i.city)
org.add_member(user, i.role)
account.set_login_cookie(user.email)
flash("Thank you for registering your organization with Python Express!")
raise web.seeother("/orgs/{}".format(org.id))
|
[
"anandology@gmail.com"
] |
anandology@gmail.com
|
630c9af1fd5f87769d2cd87621e901ba2e383c7c
|
99c4d4a6592fded0e8e59652484ab226ac0bd38c
|
/code/batch-2/dn13 - objektni minobot/M-17021-1547.py
|
e7ab6dbf8eecabc84d9990edc02404615aaba381
|
[] |
no_license
|
benquick123/code-profiling
|
23e9aa5aecb91753e2f1fecdc3f6d62049a990d5
|
0d496d649247776d121683d10019ec2a7cba574c
|
refs/heads/master
| 2021-10-08T02:53:50.107036
| 2018-12-06T22:56:38
| 2018-12-06T22:56:38
| 126,011,752
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,849
|
py
|
class Minobot:
def __init__(self):
self.x=0
self.y=0
self.direction=90
self.tab=[]
def koordinate(self):
return self.x,self.y
def naprej(self, d):
self.tab.append(['naprej',d])
if self.direction == 0 or self.direction == 360:
self.y+=d
elif self.direction == 90 or self.direction == -90:
self.x+=d
elif self.direction == 180 or self.direction == -180:
self.y-=d
elif self.direction == 270 or self.direction == -270:
self.x-=d
def desno(self):
self.tab.append(['desno',90])
self.direction += 90
if self.direction >= 360:
self.direction = 0
def levo(self):
self.tab.append(['levo',-90])
self.direction -= 90
if self.direction <= 0:
self.direction = 360
def razdalja(self):
return abs(self.x)+abs(self.y)
def razveljavi(self):
print(self.tab)
if self.tab:
if self.tab[len(self.tab)-1][0] == 'naprej':
self.naprej(-(self.tab[len(self.tab)-1][1]))
elif self.tab[len(self.tab)-1][0] == 'desno':
self.levo()
elif self.tab[len(self.tab)-1][0] == 'levo':
self.desno()
self.tab.pop()
self.tab.pop()
import unittest
class TestObvezna(unittest.TestCase):
def test_minobot(self):
a = Minobot()
b = Minobot()
self.assertEqual(a.koordinate(), (0, 0))
self.assertEqual(b.koordinate(), (0, 0))
self.assertEqual(a.razdalja(), 0)
self.assertEqual(b.razdalja(), 0)
a.naprej(1)
self.assertEqual(a.koordinate(), (1, 0))
self.assertEqual(b.koordinate(), (0, 0))
self.assertEqual(a.razdalja(), 1)
self.assertEqual(b.razdalja(), 0)
a.naprej(2)
self.assertEqual(a.koordinate(), (3, 0))
self.assertEqual(b.koordinate(), (0, 0))
self.assertEqual(a.razdalja(), 3)
self.assertEqual(b.razdalja(), 0)
b.naprej(2)
self.assertEqual(a.koordinate(), (3, 0))
self.assertEqual(b.koordinate(), (2, 0))
self.assertEqual(a.razdalja(), 3)
self.assertEqual(b.razdalja(), 2)
a.desno() # zdaj je obrnjen dol
a.naprej(4)
self.assertEqual(a.koordinate(), (3, -4))
self.assertEqual(b.koordinate(), (2, 0))
self.assertEqual(a.razdalja(), 7)
self.assertEqual(b.razdalja(), 2)
a.desno() # obrnjen je levo
a.naprej(1)
self.assertEqual(a.koordinate(), (2, -4))
self.assertEqual(b.koordinate(), (2, 0))
self.assertEqual(a.razdalja(), 6)
self.assertEqual(b.razdalja(), 2)
a.desno() # obrnjen je gor
a.naprej(1)
self.assertEqual(a.koordinate(), (2, -3))
self.assertEqual(b.koordinate(), (2, 0))
self.assertEqual(a.razdalja(), 5)
self.assertEqual(b.razdalja(), 2)
a.desno() # obrnjen desno
a.naprej(3)
self.assertEqual(a.koordinate(), (5, -3))
self.assertEqual(b.koordinate(), (2, 0))
self.assertEqual(a.razdalja(), 8)
self.assertEqual(b.razdalja(), 2)
b.levo() # obrnjen gor
b.naprej(3)
self.assertEqual(b.koordinate(), (2, 3))
self.assertEqual(b.razdalja(), 5)
b.levo() # obrnjen levo
b.naprej(3)
self.assertEqual(b.koordinate(), (-1, 3))
self.assertEqual(b.razdalja(), 4)
a.naprej(5)
self.assertEqual(a.koordinate(), (10, -3))
self.assertEqual(a.razdalja(), 13)
class TestDodatna(unittest.TestCase):
def test_undo(self):
a = Minobot()
a.desno() # gleda dol
a.naprej(4)
a.levo() # gleda desno
a.naprej(1)
a.naprej(2)
self.assertEqual(a.koordinate(), (3, -4))
a.razveljavi()
self.assertEqual(a.koordinate(), (1, -4))
a.naprej(1)
self.assertEqual(a.koordinate(), (2, -4))
a.razveljavi()
self.assertEqual(a.koordinate(), (1, -4))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, -4))
a.naprej(1)
self.assertEqual(a.koordinate(), (1, -4))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, -4))
a.razveljavi() # spet gleda dol
self.assertEqual(a.koordinate(), (0, -4))
a.naprej(2)
self.assertEqual(a.koordinate(), (0, -6))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, -4))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, 0))
a.naprej(3)
self.assertEqual(a.koordinate(), (0, -3))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # spet gleda desno
self.assertEqual(a.koordinate(), (0, 0))
a.naprej(3)
self.assertEqual(a.koordinate(), (3, 0))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # se ne usuje
self.assertEqual(a.koordinate(), (0, 0))
a.naprej(2)
self.assertEqual(a.koordinate(), (2, 0))
a.razveljavi()
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # se ne usuje
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # se ne usuje
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # se ne usuje
self.assertEqual(a.koordinate(), (0, 0))
a.razveljavi() # se ne usuje
self.assertEqual(a.koordinate(), (0, 0))
if __name__ == "__main__":
unittest.main()
|
[
"benjamin.fele@gmail.com"
] |
benjamin.fele@gmail.com
|
d4b371038a871ea6c4c51c8868534d2b5ff67817
|
c333b3cfb05f4bc08a682ca5f4d70b212e9624ff
|
/punyty/objects.py
|
45d95c22a7c1a12f50b8844fd42352e55fd3d51a
|
[
"MIT"
] |
permissive
|
jsheedy/punyty
|
a450f7daaf9e8b2acf5d861ac258e07e762c46c6
|
34d5bffc4cf85985537e199567c5ba2aa9105a05
|
refs/heads/master
| 2020-05-09T19:58:37.665508
| 2019-12-25T18:22:00
| 2019-12-25T18:22:00
| 181,391,798
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,551
|
py
|
from math import sqrt
import numpy as np
from .object3d import Object3D
class Tetrahedron(Object3D):
vertices = np.array([
[1, 1, 1],
[-1, -1, 1],
[1, -1, -1],
[-1, 1, -1],
], dtype=np.float64)
edges = (
(0, 1),
(1, 2),
(2, 3),
(1, 3),
(0, 2),
(0, 3),
)
polys = (
(0, 1, 2),
(0, 2, 3),
(0, 3, 1),
(3, 2, 1)
)
class Cube(Object3D):
vertices = np.array([
[1, 1, -1],
[-1, 1, -1],
[-1, -1, -1],
[1, -1, -1],
[1, 1, 1],
[-1, 1, 1],
[-1, -1, 1],
[1, -1, 1]
], dtype=np.float64)
edges = (
(0, 1),
(1, 2),
(2, 3),
(3, 0),
(4, 5),
(5, 6),
(6, 7),
(7, 4),
(0, 4),
(1, 5),
(2, 6),
(3, 7),
)
polys = (
(0, 1, 2),
(2, 3, 0),
(4, 7, 6),
(6, 5, 4),
(1, 5, 6),
(6, 2, 1),
(0, 3, 7),
(7, 4, 0),
(3, 2, 6),
(6, 7, 3),
(5, 1, 0),
(0, 4, 5),
)
class Octahedron(Object3D):
vertices = np.array([
[1, 0, 0],
[-1, 0, 0],
[0, 1, 0],
[0, -1, 0],
[0, 0, 1],
[0, 0, -1],
], dtype=np.float64)
edges = (
(0, 2),
(0, 3),
(0, 4),
(0, 5),
(1, 2),
(1, 3),
(1, 4),
(1, 5),
(2, 4),
(2, 5),
(3, 4),
(3, 5),
)
polys = (
(2, 4, 0),
(2, 0, 5),
(2, 5, 1),
(2, 1, 4),
(3, 0, 4),
(3, 5, 0),
(3, 1, 5),
(3, 4, 1),
)
class Dodecahedron(Object3D):
def __init__(self, **kwargs):
super().__init__(**kwargs)
# lay out as cube + 3 rects as on
# https://en.wikipedia.org/wiki/Regular_dodecahedron?oldformat=true#Cartesian_coordinates
phi = (1 + sqrt(5)) / 2
vertices = np.array([
# cube
[1, 1, 1],
[1, -1, 1],
[-1, -1, 1],
[-1, 1, 1],
[1, 1, -1],
[1, -1, -1],
[-1, -1, -1],
[-1, 1, -1],
[phi, 1/phi, 0],
[phi, -1/phi, 0],
[-phi, -1/phi, 0],
[-phi, 1/phi, 0],
[0, phi, 1/phi],
[0, phi, -1/phi],
[0, -phi, -1/phi],
[0, -phi, 1/phi],
[1/phi, 0, phi],
[1/phi, 0, -phi],
[-1/phi, 0, -phi],
[-1/phi, 0, phi]
], dtype=np.float64)
self.edges = (
# one r/g/b vertex for each cube corner vertex
(0, 8),
(0, 12),
(0, 16),
(1, 9),
(1, 15),
(1, 16),
(2, 10),
(2, 15),
(2, 19),
(3, 11),
(3, 12),
(3, 19),
(4, 8),
(4, 13),
(4, 17),
(5, 9),
(5, 14),
(5, 17),
(6, 10),
(6, 14),
(6, 18),
(7, 11),
(7, 13),
(7, 18),
# lace up the rects exterior edges
# r
(8, 9),
(10, 11),
# g
(12, 13),
(14, 15),
# b
(17, 18),
(19, 16)
)
self.vertices = self.to_homogenous_coords(vertices / (2*phi))
|
[
"joseph.sheedy@gmail.com"
] |
joseph.sheedy@gmail.com
|
8c958e900b806f0503625aae951c03d030a5cea1
|
ebd6f68d47e192da7f81c528312358cfe8052c8d
|
/swig/Examples/test-suite/python/template_typedef_cplx4_runme.py
|
25ac851fbff3855719300e610179db627047c152
|
[
"Apache-2.0",
"LicenseRef-scancode-swig",
"GPL-3.0-or-later",
"LicenseRef-scancode-unknown-license-reference",
"GPL-3.0-only"
] |
permissive
|
inishchith/DeepSpeech
|
965ad34d69eb4d150ddf996d30d02a1b29c97d25
|
dcb7c716bc794d7690d96ed40179ed1996968a41
|
refs/heads/master
| 2021-01-16T16:16:05.282278
| 2020-05-19T08:00:33
| 2020-05-19T08:00:33
| 243,180,319
| 1
| 0
|
Apache-2.0
| 2020-02-26T05:54:51
| 2020-02-26T05:54:50
| null |
UTF-8
|
Python
| false
| false
| 431
|
py
|
import string
from template_typedef_cplx4 import *
#
# this is OK
#
s = Sin()
s.get_base_value()
s.get_value()
s.get_arith_value()
my_func_r(s)
make_Multiplies_double_double_double_double(s, s)
z = CSin()
z.get_base_value()
z.get_value()
z.get_arith_value()
my_func_c(z)
make_Multiplies_complex_complex_complex_complex(z, z)
#
# Here we fail
#
d = make_Identity_double()
my_func_r(d)
c = make_Identity_complex()
my_func_c(c)
|
[
"inishchith@gmail.com"
] |
inishchith@gmail.com
|
27ccdbea81862874e0b78a77232a7d471e5f184a
|
6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4
|
/Av2u6FKvzFvrtGEKS_18.py
|
4e2d4cf4fdcb9ad90f1ec69e7cba9c1c762d567b
|
[] |
no_license
|
daniel-reich/ubiquitous-fiesta
|
26e80f0082f8589e51d359ce7953117a3da7d38c
|
9af2700dbe59284f5697e612491499841a6c126f
|
refs/heads/master
| 2023-04-05T06:40:37.328213
| 2021-04-06T20:17:44
| 2021-04-06T20:17:44
| 355,318,759
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 846
|
py
|
# Do not touch this starter code but implement the reverse function at the
# end of the LinkedList class
class Node(object):
def __init__(self, data):
self.data = data
self.next = None
class LinkedList(object):
def __init__(self):
self.head = None
self.tail = None
def insert(self, data):
new_node = Node(data)
if self.head == None:
self.head = self.tail = new_node
else:
self.tail.next = new_node
self.tail = new_node
def traverse(self):
if self.head == None:
return []
temp = self.head
result = []
while temp!=None:
result.append(temp.data)
temp = temp.next
return result
def reverse(self):
nodes = self.traverse()
self.head = self.tail = None
while nodes:
self.insert(nodes.pop(-1))
|
[
"daniel.reich@danielreichs-MacBook-Pro.local"
] |
daniel.reich@danielreichs-MacBook-Pro.local
|
366e5b6c1921361a7577480414955fd30e18ee39
|
0547c3ebab814e3fdf2616ae63f8f6c87a0ff6c5
|
/846.hand-of-straights.py
|
1efee8792025199a30b3260fd14120bab6d55e5d
|
[] |
no_license
|
livepo/lc
|
b8792d2b999780af5d5ef3b6050d71170a272ca6
|
605d19be15ece90aaf09b994098716f3dd84eb6a
|
refs/heads/master
| 2020-05-15T03:57:15.367240
| 2019-07-30T03:11:46
| 2019-07-30T03:11:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
class Solution(object):
def isNStraightHand(self, hand, W):
"""
:type hand: List[int]
:type W: int
:rtype: bool
"""
|
[
"qgmfky@gmail.com"
] |
qgmfky@gmail.com
|
b67b5e6d66ad477d22a129a6bb6faf2a37a69867
|
ad846a63f010b808a72568c00de016fbe86d6c35
|
/algotradingenv/lib/python3.8/site-packages/IPython/external/decorators/_numpy_testing_noseclasses.py
|
9f8f382391de958a20ccb9a35664f5c7c66ba463
|
[] |
no_license
|
krishansinghal29/algotrade
|
74ee8b1c9113812b1c7c00ded95d966791cf76f5
|
756bc2e3909558e9ae8b2243bb4dabc530f12dde
|
refs/heads/master
| 2023-06-02T01:53:24.924672
| 2021-06-10T09:17:55
| 2021-06-10T09:17:55
| 375,641,074
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,417
|
py
|
# IPython: modified copy of numpy.testing.noseclasses, so
# IPython.external._decorators works without numpy being installed.
# These classes implement a "known failure" error class.
import os
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
class KnownFailureTest(Exception):
"""Raise this exception to mark a test as a known failing test."""
pass
class KnownFailure(ErrorClassPlugin):
"""Plugin that installs a KNOWNFAIL error class for the
KnownFailureClass exception. When KnownFailureTest is raised,
the exception will be logged in the knownfail attribute of the
result, 'K' or 'KNOWNFAIL' (verbose) will be output, and the
exception will not be counted as an error or failure."""
enabled = True
knownfail = ErrorClass(KnownFailureTest, label="KNOWNFAIL", isfailure=False)
def options(self, parser, env=os.environ):
env_opt = "NOSE_WITHOUT_KNOWNFAIL"
parser.add_option(
"--no-knownfail",
action="store_true",
dest="noKnownFail",
default=env.get(env_opt, False),
help="Disable special handling of KnownFailureTest " "exceptions",
)
def configure(self, options, conf):
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, "noKnownFail", False)
if disable:
self.enabled = False
|
[
"krishansinghal29@gmail.com"
] |
krishansinghal29@gmail.com
|
9a91b60c24903f61054fed747c3be85c66cb2793
|
256f817910dd698970fab89871c6ce66a3c416e7
|
/1. solvedProblems/340. Longest Substring with At Most K Distinct Characters/340.py
|
e1fd7e173bc2c9b114189909699c70c7543f9303
|
[] |
no_license
|
tgaochn/leetcode
|
5926c71c1555d2659f7db4eff9e8cb9054ea9b60
|
29f1bd681ae823ec6fe755c8f91bfe1ca80b6367
|
refs/heads/master
| 2023-02-25T16:12:42.724889
| 2021-02-04T21:05:34
| 2021-02-04T21:05:34
| 319,225,860
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,982
|
py
|
# !/usr/bin/env python
# coding: utf-8
"""
Author:
Tian Gao (tgaochn@gmail.com)
CreationDate:
Sat, 11/28/2020, 20:48
# !! Description:
"""
import sys
from typing import List
sys.path.append('..')
from utils import binaryTree, nTree, singleLinkedList
from utils.utils import (
printMatrix,
printDict,
printList,
isMatrix,
)
ListNode = singleLinkedList.ListNode
TreeNode = binaryTree.TreeNode
Node = nTree.Node
null = None
testCaseCnt = 6
# maxFuncInputParaCnt = 8
# !! step1: replace these two lines with the given code
class Solution:
def lengthOfLongestSubstringKDistinct(self, s: str, k: int) -> int:
if not k or not s: return 0
from collections import deque
n = len(s)
l, r = 0, 0
win = deque()
self.freqHash = {}
# maxStr = ''
maxLen = -float('inf')
def isValidRlt():
return len(self.freqHash) <= k
def removeEle(ele):
if self.freqHash[ele] == 1:
del self.freqHash[ele]
else:
self.freqHash[ele] -= 1
def addEle(ele):
self.freqHash.setdefault(ele, 0)
self.freqHash[ele] += 1
while r < n:
if not isValidRlt():
eleL = win.popleft()
removeEle(eleL)
l += 1
else:
if len(win) > maxLen:
maxLen = len(win)
# maxStr = ''.join(list(win))
eleR = s[r]
win.append(eleR)
addEle(eleR)
r += 1
# while not maxStr and l < n:
while maxLen >= 0 and l < n:
if isValidRlt():
if len(win) > maxLen:
maxLen = len(win)
eleL = win.popleft()
removeEle(eleL)
l += 1
return maxLen
# endFunc
# endClass
def func():
# !! step2: change function name
s = Solution()
myFuncLis = [
s.lengthOfLongestSubstringKDistinct,
# optional: add another function for comparison
]
onlyDisplayError = True
enableInput = [True] * testCaseCnt
input = [None] * testCaseCnt
expectedRlt = [None] * testCaseCnt
# enableInput[0] = False
# enableInput[1] = False
# enableInput[2] = False
# enableInput[3] = False
# enableInput[4] = False
# enableInput[5] = False
# !! step3: change input para, input para can be found in "run code" - "test case"
# ! para1
input[0] = (
"eceba",
2,
# binaryTree.buildTree(None)
# singleLinkedList.buildSingleList(None)
# nTree.buildTree(None)
)
expectedRlt[0] = 3
# ! para2
input[1] = (
None
# binaryTree.buildTree(None),
# singleLinkedList.buildSingleList(None),
# nTree.buildTree(None),
)
expectedRlt[1] = None
# ! para3
input[2] = (
None
# singleLinkedList.buildSingleList(None),
# binaryTree.buildTree(None),
# nTree.buildTree(None),
)
expectedRlt[2] = None
# ! para4
input[3] = (
None
# singleLinkedList.buildSingleList(None),
# binaryTree.buildTree(None),
# nTree.buildTree(None),
)
expectedRlt[3] = None
# ! para5
input[4] = (
None
# singleLinkedList.buildSingleList(None),
# binaryTree.buildTree(None),
# nTree.buildTree(None),
)
expectedRlt[4] = None
# ! para6
input[5] = (
None
# singleLinkedList.buildSingleList(None),
# binaryTree.buildTree(None),
# nTree.buildTree(None),
)
expectedRlt[5] = None
# !! ====================================
# function and parameters count
allInput = [(input[i], enableInput[i], expectedRlt[i]) for i in range(testCaseCnt)]
if not input[0]:
print("ERROR: please assign at least one input for input[0]!")
exit()
funcParaCnt = 1 if not isinstance(input[0], tuple) else len(input[0])
funcCnt = len(myFuncLis)
# for each test case
for inputPara, enableInput, expectedRlt in allInput:
if not enableInput or not inputPara: continue
inputParaList = [None] * funcParaCnt
if not isinstance(inputPara, tuple):
inputPara = [inputPara]
for j in range(funcParaCnt):
inputParaList[j] = inputPara[j]
# for each function
for j in range(funcCnt):
print('==' * 20)
myFunc = myFuncLis[j]
# ! manually call function, max para count: 8
rlt = None
if funcParaCnt == 1:
rlt = myFunc(inputPara[0])
if funcParaCnt == 2:
rlt = myFunc(inputPara[0], inputPara[1])
if funcParaCnt == 3:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2])
if funcParaCnt == 4:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2], inputPara[3])
if funcParaCnt == 5:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2], inputPara[3], inputPara[4])
if funcParaCnt == 6:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2], inputPara[3], inputPara[4], inputPara[5])
if funcParaCnt == 7:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2], inputPara[3], inputPara[4], inputPara[5], inputPara[6])
if funcParaCnt == 8:
rlt = myFunc(inputPara[0], inputPara[1], inputPara[2], inputPara[3], inputPara[4], inputPara[5], inputPara[6], inputPara[7])
# only output when the result is not expected
if onlyDisplayError and expectedRlt is not None and expectedRlt == rlt: continue
# output function name
if funcCnt > 1:
print('func: \t%s' % myFunc.__name__)
# output para
for k in range(funcParaCnt):
para = inputParaList[k]
formatPrint('input %s:' % (k + 1), para)
# output result
print()
if not rlt:
print('rlt:\t', rlt)
else:
formatPrint('rlt:', rlt)
if expectedRlt is not None:
if not expectedRlt:
print('expRlt:\t', expectedRlt)
else:
formatPrint('expRlt:', expectedRlt)
print('==' * 20)
# endFunc
def isSpecialInstance(myInstance):
for curType in [TreeNode, Node]:
if isinstance(myInstance, curType):
return True
return False
# endFunc
def formatPrint(prefix, data):
if isMatrix(data):
print('%s' % prefix)
printMatrix(data)
else:
splitter = '\n' if isSpecialInstance(data) else '\t'
print('%s%s%s' % (prefix, splitter, data))
# endFunc
def main():
func()
# endMain
if __name__ == "__main__":
main()
# endIf
|
[
"tgaochn@gmail.com"
] |
tgaochn@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.