blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
751621fe4393f2b0e5ae91137618498f9a96c992
|
525c4a4a65b9b87a2acd027164381d4be8e2d03a
|
/autotf/tuner/priors/default_priors.py
|
a725781d2d1bc099191bb22d28cc75a620c8016a
|
[
"BSD-3-Clause"
] |
permissive
|
DMALab/autotf
|
da1aecae7b9e51d3e27ccd7ee610dc9b3d6cf491
|
3f82d858f49c27d5ecb624cee555fb8fd47bf067
|
refs/heads/master
| 2021-10-25T06:24:38.243496
| 2019-04-02T07:41:42
| 2019-04-02T07:41:42
| 123,559,497
| 8
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,627
|
py
|
import numpy as np
from tuner.priors.base_prior import BasePrior, TophatPrior, \
LognormalPrior, HorseshoePrior
class DefaultPrior(BasePrior):
def __init__(self, n_dims, rng=None):
if rng is None:
self.rng = np.random.RandomState(np.random.randint(0, 10000))
else:
self.rng = rng
# The number of hyperparameters
self.n_dims = n_dims
# Prior for the Matern52 lengthscales
self.tophat = TophatPrior(-10, 2, rng=self.rng)
# Prior for the covariance amplitude
self.ln_prior = LognormalPrior(mean=0.0, sigma=1.0, rng=self.rng)
# Prior for the noise
self.horseshoe = HorseshoePrior(scale=0.1, rng=self.rng)
def lnprob(self, theta):
lp = 0
# Covariance amplitude
lp += self.ln_prior.lnprob(theta[0])
# Lengthscales
lp += self.tophat.lnprob(theta[1:-1])
# Noise
lp += self.horseshoe.lnprob(theta[-1])
return lp
def sample_from_prior(self, n_samples):
p0 = np.zeros([n_samples, self.n_dims])
# Covariance amplitude
p0[:, 0] = self.ln_prior.sample_from_prior(n_samples)[:, 0]
# Lengthscales
ls_sample = np.array([self.tophat.sample_from_prior(n_samples)[:, 0]
for _ in range(1, (self.n_dims - 1))]).T
p0[:, 1:(self.n_dims - 1)] = ls_sample
# Noise
p0[:, -1] = self.horseshoe.sample_from_prior(n_samples)[:, 0]
return p0
def gradient(self, theta):
# TODO: Implement real gradient here
return np.zeros([theta.shape[0]])
|
[
"1225646303@qq.com"
] |
1225646303@qq.com
|
2f0aed1419a0bebcacecf1a22b33d367a5260d73
|
f295b56e9af284092233a724af041a91b35a9f6a
|
/insert-into-a-binary-search-tree/insert-into-a-binary-search-tree.py
|
b797e9509a60d9727d912eddfcc852375834be7c
|
[] |
no_license
|
saviaga/Coding_E
|
7ebdf03b5eca775903ee4b863b56e26190b40029
|
dd21bb3b9d8905263416b206877f1a3d9416ee3f
|
refs/heads/main
| 2023-05-02T19:42:07.267054
| 2021-05-21T17:41:52
| 2021-05-21T17:41:52
| 334,220,320
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 886
|
py
|
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution(object):
def insertIntoBST(self, root, val):
"""
:type root: TreeNode
:type val: int
:rtype: TreeNode
"""
node = root
while node:
if val > node.val:
if not node.right:
node.right = TreeNode(val)
return root
else:
node = node.right
elif val <= node.val:
if not node.left:
node.left = TreeNode(val)
return root
else:
node = node.left
return TreeNode(val)
|
[
"saviaga@gmail.com"
] |
saviaga@gmail.com
|
784d8ea7bf5c599bdbf456370cc6a1361af7936b
|
e33dbdee28b7452ecd3b051d04af871edd901968
|
/tools/transcode_tool.py
|
1561d961bb4466e8b3858456413509a8551e7fbf
|
[] |
no_license
|
gm19900510/py_examples
|
07f2b9c35362e95989e4c3e8f0f786056dd828af
|
48431167458e6528bc6d44b2f51a39d338dd1df1
|
refs/heads/master
| 2020-05-30T03:33:53.976482
| 2019-05-31T02:58:54
| 2019-05-31T02:58:54
| 189,517,979
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,184
|
py
|
# -*- coding:utf-8 -*-
import redis
import subprocess
import time
import sys
def handle(ffmpeg_path, input_path, out_path):
command = [ffmpeg_path , '-i' , input_path , '-vcodec', 'copy', '-f' , 'flv', '-an' , out_path]
p = subprocess.Popen(command)
return p
class TranscodeTool():
def __init__(self, args):
self.pool = redis.ConnectionPool(host=args.redis_host) # 实现一个连接池
self.r = redis.Redis(connection_pool=self.pool)
self.args = args
def trans(self, device):
device_id = 'device_id_' + device
process_id = 'process_id_' + device
print('device_id:', device_id)
print('process_id:', process_id)
# 子进程PID键值不存在表示无转换码流进行,需进行转换
if not self.r.exists(process_id):
print('用户执行播放操作,无转换进程,开启转换进程')
# 获取设备的RTSP路径
rtsp_path = str(self.r.get(device_id), encoding="utf-8")
print('设备:' + device_id + ' ,对应的RTSP路径:', rtsp_path)
p = handle(self.args.ffmpeg_path, rtsp_path, self.args.rtmp_path + device)
print('开始执行码流转换进程:' , p.pid)
print('保存码流转换进程键值对:' + process_id, 1)
self.r.setex(process_id, 60 * 60 * 1, 1)
while True:
time.sleep(20)
if not self.r.exists(process_id):
p.kill()
print('码流转换进程键值对不存在,关闭转换进程')
break;
sys.exit(0)
else: # 子进程PID键值存在表示有转换码流进行,运行进程数+1
process_num = int(str(self.r.get(process_id), encoding="utf-8"))
print('用户执行播放操作,存在转换进程,当前转换进程数:', process_num)
self.r.setex(process_id, 60 * 60 * 1, (process_num + 1))
print('更新码流转换进程键值对:' + process_id, (process_num + 1))
sys.exit(0)
|
[
"Administrator@PC-20170308PKRS"
] |
Administrator@PC-20170308PKRS
|
4e1dd335864e7af9b9c0fa24be2426897758e16f
|
04a89d6cbc02af00db57af791eb07f90ddf43944
|
/Final/junk.py
|
a0a8b222f27ef60865326eaf40c74a9395d3af52
|
[] |
no_license
|
jclarkrichards/PacmanRedo
|
000518456c3de9a29af65176d83e4f6694a697e6
|
343b661631716ea6f2286c5c8f597f7f600b1e89
|
refs/heads/main
| 2023-07-16T09:33:38.891573
| 2021-08-29T22:12:25
| 2021-08-29T22:12:25
| 390,863,357
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 302
|
py
|
def startFreight(self):
self.mode.setFreightMode()
if self.mode.current == FREIGHT:
self.setSpeed(50)
self.directionMethod = self.randomDirection
def normalMode(self):
self.setSpeed(100)
self.directionMethod = self.goalDirection
|
[
"jclarkrichards@gmail.com"
] |
jclarkrichards@gmail.com
|
090eda276fb550f3ed332283b7330f88c4a215ac
|
27da1c772decb031eeabdee94530c6c0d53d82d7
|
/DataStructures/DataStructureAndAlgorithmicThinkingWithPython-master/chapter21miscconcepts/NumberPlusone.py
|
22e11b09ac5829b47aedb942d7753bb821f14943
|
[
"MIT"
] |
permissive
|
ManasveeMittal/dropbox
|
09e400c1cf0286051b115d81b509eabba0159c91
|
58d893b14119d1a4e87a122ab37aeaa523fa0a3c
|
refs/heads/master
| 2021-05-11T19:35:27.104757
| 2018-01-31T13:46:07
| 2018-01-31T13:46:07
| 117,876,352
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,035
|
py
|
# Copyright (c) Dec 22, 2014 CareerMonk Publications and others.
# E-Mail : info@careermonk.com
# Creation Date : 2014-01-10 06:15:46
# Last modification : 2008-10-31
# by : Narasimha Karumanchi
# Book Title : Data Structures And Algorithmic Thinking With Python
# Warranty : This software is provided "as is" without any
# warranty; without even the implied warranty of
# merchantability or fitness for a particular purpose.
#!/usr/bin/env python
'''
Given a number represented as an array of digits, plus one to the number.
'''
from __future__ import division
import random
def plus_one(digits):
print digits, '+ 1 =',
carry = 1
for i in reversed(xrange(len(digits))):
x = digits[i]
carry, x = divmod(x + carry, 10)
digits[i] = x
if carry > 0: digits.insert(0, carry)
print digits
return digits
if __name__ == '__main__':
plus_one([1, 2, 3, 4])
plus_one([1, 9, 9])
plus_one([9, 9, 9])
plus_one([0])
|
[
"manasvee.k@outlook.com"
] |
manasvee.k@outlook.com
|
010012fa069264721194c28e9de89102e43737dc
|
704393fd5ee87339623e343e493071c8139f1750
|
/examples/structured_heatmap.py
|
5fe55b42bb4025e9201be903cf95257a3a40fd29
|
[
"BSD-3-Clause"
] |
permissive
|
seanzhou1023/seaborn
|
18af2d3fa82242899bcd0363ea3810fd521c1c5c
|
30b4cd8b75e7c80e9edad2b19aa28394cc592455
|
refs/heads/master
| 2020-12-02T21:08:54.956829
| 2017-07-03T19:36:45
| 2017-07-03T19:36:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,206
|
py
|
"""
Discovering structure in heatmap data
=====================================
_thumb: .4, .2
"""
import pandas as pd
import seaborn as sns
sns.set(font="monospace")
# Load the brain networks example dataset
df = sns.load_dataset("brain_networks", header=[0, 1, 2], index_col=0)
# Select a subset of the networks
used_networks = [1, 5, 6, 7, 8, 11, 12, 13, 16, 17]
used_columns = (df.columns.get_level_values("network")
.astype(int)
.isin(used_networks))
df = df.loc[:, used_columns]
# Create a custom palette to identify the networks
network_pal = sns.cubehelix_palette(len(used_networks),
light=.9, dark=.1, reverse=True,
start=1, rot=-2)
network_lut = dict(zip(map(str, used_networks), network_pal))
# Convert the palette to vectors that will be drawn on the side of the matrix
networks = df.columns.get_level_values("network")
network_colors = pd.Series(networks, index=df.columns).map(network_lut)
# Draw the full plot
sns.clustermap(df.corr(), center=0, cmap="vlag",
row_colors=network_colors, col_colors=network_colors,
figsize=(13, 13))
|
[
"mwaskom@stanford.edu"
] |
mwaskom@stanford.edu
|
0e5abbc5b9435a3e0799123bc67bc74ebf4e32df
|
473035074bd546694d5e3dbe6decb900ba79e034
|
/traffic fluid simulator/backend/env_4_6/model/Memory.py
|
151d10fed341470aef177d167c464d139f4f46c9
|
[] |
no_license
|
johny1614/magazyn
|
35424203036191fb255c410412c195c8f41f0ba5
|
a170fea3aceb20f59716a7b5088ccdcb6eea472f
|
refs/heads/master
| 2022-03-26T01:10:04.472374
| 2019-09-19T16:34:22
| 2019-09-19T16:34:22
| 171,033,407
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 279
|
py
|
import attr
from model.Action import ActionInt
from model.LearningState import LearningState
@attr.s(auto_attribs=True)
class Memory:
state: LearningState
action: ActionInt
reward: float
new_state: LearningState
times: any
reshapedReward: bool = False
|
[
"johny1614@gmail.com"
] |
johny1614@gmail.com
|
492cdf65fcd139d8447f856e9245891e042dcb48
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_bummed.py
|
6448a6af4a3d357aec3ad853a9879cbba2677024
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 231
|
py
|
from xai.brain.wordbase.adjectives._bum import _BUM
#calss header
class _BUMMED(_BUM, ):
def __init__(self,):
_BUM.__init__(self)
self.name = "BUMMED"
self.specie = 'adjectives'
self.basic = "bum"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
1ebe3832f7d3e6e2cad8cb7b057128d7445ae88f
|
5e84763c16bd6e6ef06cf7a129bb4bd29dd61ec5
|
/blimgui/dist/sdl2/test/sdl2ext_test.py
|
634c91a13c8617bde9751ff7777b72eacbae1f42
|
[
"MIT"
] |
permissive
|
juso40/bl2sdk_Mods
|
8422a37ca9c2c2bbf231a2399cbcb84379b7e848
|
29f79c41cfb49ea5b1dd1bec559795727e868558
|
refs/heads/master
| 2023-08-15T02:28:38.142874
| 2023-07-22T21:48:01
| 2023-07-22T21:48:01
| 188,486,371
| 42
| 110
|
MIT
| 2022-11-20T09:47:56
| 2019-05-24T20:55:10
|
Python
|
UTF-8
|
Python
| false
| false
| 3,023
|
py
|
import sys
import pytest
import sdl2
from sdl2 import ext as sdl2ext
from sdl2 import SDL_Quit, SDL_WasInit, SDL_FlushEvent, SDL_USEREVENT, \
SDL_FIRSTEVENT, SDL_LASTEVENT, SDL_Event, SDL_UserEvent, SDL_PushEvent
@pytest.fixture(scope="module")
def with_sdl_ext():
if SDL_WasInit(0) != 0:
SDL_Quit()
sdl2ext.init()
yield
sdl2ext.quit()
def test_init_quit():
# NOTE: Currently init only inits the video subsystem, but quit shuts down
# SDL2 and ttf/image/mixer libraries. This latter function should be tested.
try:
sdl2ext.init()
except sdl2ext.SDLError:
raise pytest.skip('Video subsystem not supported')
assert SDL_WasInit(sdl2.SDL_INIT_VIDEO) == sdl2.SDL_INIT_VIDEO
assert SDL_WasInit(sdl2.SDL_INIT_EVENTS) == sdl2.SDL_INIT_EVENTS
sdl2ext.quit()
assert SDL_WasInit(sdl2.SDL_INIT_VIDEO) != sdl2.SDL_INIT_VIDEO
sdl2ext.init()
sdl2ext.init()
sdl2ext.init()
assert SDL_WasInit(sdl2.SDL_INIT_VIDEO) == sdl2.SDL_INIT_VIDEO
sdl2ext.quit()
assert SDL_WasInit(sdl2.SDL_INIT_VIDEO) != sdl2.SDL_INIT_VIDEO
# Test initializing other subsystems
sdl2ext.init(video=False, events=True)
assert SDL_WasInit(sdl2.SDL_INIT_VIDEO) != sdl2.SDL_INIT_VIDEO
assert SDL_WasInit(sdl2.SDL_INIT_EVENTS) == sdl2.SDL_INIT_EVENTS
sdl2ext.init(video=True, audio=True, timer=True)
assert SDL_WasInit(sdl2.SDL_INIT_VIDEO) == sdl2.SDL_INIT_VIDEO
assert SDL_WasInit(sdl2.SDL_INIT_AUDIO) == sdl2.SDL_INIT_AUDIO
assert SDL_WasInit(sdl2.SDL_INIT_TIMER) == sdl2.SDL_INIT_TIMER
sdl2ext.init(joystick=True, haptic=True)
assert SDL_WasInit(sdl2.SDL_INIT_VIDEO) == sdl2.SDL_INIT_VIDEO
assert SDL_WasInit(sdl2.SDL_INIT_JOYSTICK) == sdl2.SDL_INIT_JOYSTICK
assert SDL_WasInit(sdl2.SDL_INIT_HAPTIC) == sdl2.SDL_INIT_HAPTIC
assert SDL_WasInit(sdl2.SDL_INIT_GAMECONTROLLER) != sdl2.SDL_INIT_GAMECONTROLLER
sdl2ext.init(controller=True)
assert SDL_WasInit(sdl2.SDL_INIT_GAMECONTROLLER) == sdl2.SDL_INIT_GAMECONTROLLER
if sdl2.dll.version < 2009:
with pytest.raises(RuntimeError):
sdl2ext.init(sensor=True)
else:
sdl2ext.init(sensor=True)
assert SDL_WasInit(sdl2.SDL_INIT_SENSOR) == sdl2.SDL_INIT_SENSOR
sdl2ext.quit()
def test_get_events(with_sdl_ext):
SDL_FlushEvent(SDL_FIRSTEVENT, SDL_LASTEVENT)
for x in range(12):
event = SDL_Event()
event.type = SDL_USEREVENT + x
event.user = SDL_UserEvent(
type=event.type, timestamp=0, windowID=0, code=0
)
SDL_PushEvent(event)
results = sdl2ext.get_events()
assert len(results) == 12
for idx, r in enumerate(results):
assert idx == r.type - SDL_USEREVENT
def test_TestEventProcessor(with_sdl_ext):
# NOTE: This doesn't really test functionality, but since I don't think
# it's terribly useful I'm not going to bother expanding it
proc = sdl2ext.TestEventProcessor()
assert isinstance(proc, sdl2ext.TestEventProcessor)
|
[
"justin.sostmann@googlemail.com"
] |
justin.sostmann@googlemail.com
|
a0ece73e9e41e6c549bf663092a130f37be60564
|
2d0b7c568de35671c54bbe0a32a59bf72983273e
|
/src/token_generator.py
|
cbf968be44d0975bbe24f871d1e9461588fe1862
|
[] |
no_license
|
pantsbuild/rbe-token-server
|
7ae2a4668990cb69e193f4c529987fe544ae831e
|
3bfc6410365e415c40b7c4b1879195ba533d8804
|
refs/heads/master
| 2021-06-19T05:39:02.068921
| 2020-02-23T21:23:49
| 2020-02-23T21:23:49
| 195,114,143
| 0
| 3
| null | 2021-03-20T01:38:52
| 2019-07-03T19:20:04
|
Python
|
UTF-8
|
Python
| false
| false
| 861
|
py
|
from __future__ import annotations
from google.cloud import iam_credentials_v1
credentials_client = iam_credentials_v1.IAMCredentialsClient()
# NB: The project name must be a wildcard `-`, per
# https://cloud.google.com/iam/credentials/reference/rest/v1/projects.serviceAccounts/generateAccessToken.
resource_name = credentials_client.service_account_path(
project="-", service_account="travis-ci-rbe@pants-remoting-beta.iam.gserviceaccount.com"
)
# NB: This may either be `auth/cloud-platform` or `auth/iam`, per
# https://cloud.google.com/iam/docs/creating-short-lived-service-account-credentials#sa-credentials-oauth
scope = ["https://www.googleapis.com/auth/cloud-platform"]
def generate() -> str:
access_token: str = credentials_client.generate_access_token(
name=resource_name, scope=scope
).access_token
return access_token
|
[
"noreply@github.com"
] |
pantsbuild.noreply@github.com
|
d083ca1556dd430f3d6201dd69d2bae797c40620
|
ea7d2090ba1d66fc5bf91b255742ae07e1f74c3d
|
/2020/insomnihack_teaser/welcome/pow.py
|
7fe66a371a9a4572dc10a0584c49d9cb52517718
|
[] |
no_license
|
arty-hlr/CTF-writeups
|
1a3e29b9a3c3b80e33df0c9489cacd6ec09e46fe
|
64bcda1d1d8893c2ece308f82348755a2c62ca9e
|
refs/heads/master
| 2022-08-04T20:26:07.428393
| 2022-07-30T11:11:34
| 2022-07-30T11:11:34
| 167,851,059
| 4
| 3
| null | 2022-07-30T10:55:11
| 2019-01-27T19:47:03
|
Python
|
UTF-8
|
Python
| false
| false
| 232
|
py
|
import hashX
from pwn import *
s = remote('welcome.insomnihack.ch',1337)
s.recvuntil('with "')
h = s.recv(6).decode()
found = hashX.main(h,'md5')
log.info(f"found string: {found}")
s.sendline(found)
log.info(s.recvall().decode())
|
[
"flrn.pjd@protonmail.com"
] |
flrn.pjd@protonmail.com
|
d584d7c8d4764221eb0ba568444c06c5b4f675d2
|
4bc048ebbf5d28b399d3ab89e717f3e7496abc38
|
/periods/tests/test_email_sender.py
|
e6b3eb6a2683c922dfbbdc6f65b6fe9db9609a2b
|
[
"MIT"
] |
permissive
|
jessamynsmith/eggtimer-server
|
3feff03057148f7ab54c0df8c863f1543be886a9
|
e1b1d9d848893b9e6e56e985da74d6b378c07744
|
refs/heads/master
| 2023-02-05T05:38:51.194914
| 2023-02-04T04:27:02
| 2023-02-04T04:27:02
| 8,124,406
| 50
| 18
|
MIT
| 2023-01-11T12:45:53
| 2013-02-10T15:57:26
|
Python
|
UTF-8
|
Python
| false
| false
| 871
|
py
|
from django.contrib.auth import get_user_model
from django.test import TestCase
from mock import patch
from periods import email_sender
class TestEmailSender(TestCase):
def setUp(self):
self.user = get_user_model().objects.create_user(
password='bogus', email='jessamyn@example.com', first_name=u'Jessamyn')
@patch('django.core.mail.EmailMultiAlternatives.send')
def test_send_text_only(self, mock_send):
result = email_sender.send(self.user, 'Hi!', 'good day', None)
self.assertEqual(True, result)
mock_send.assert_called_once_with()
@patch('django.core.mail.EmailMultiAlternatives.send')
def test_send_with_html(self, mock_send):
result = email_sender.send(self.user, 'Hi!', 'good day', '<p>good day</p>')
self.assertEqual(True, result)
mock_send.assert_called_once_with()
|
[
"geekchick77@gmail.com"
] |
geekchick77@gmail.com
|
d4fcfeef7b63063c5f5d8f152d236ed85efe6a52
|
41f4415409901876ac153459b4f6fe28a5a934a7
|
/src/lambda_reduce2.py
|
8775db4ef45cb5721860c3a3f4f8ed9297b3fde1
|
[] |
no_license
|
prasertcbs/python_tutorial
|
4062a413df6192a71eb56f211501d710ddc26b90
|
2302ea030f6984e6ac3f77a366369e9c47502f5a
|
refs/heads/master
| 2023-03-18T16:47:27.564924
| 2023-03-09T14:33:59
| 2023-03-09T14:33:59
| 320,748,324
| 1
| 3
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,199
|
py
|
from functools import reduce
def demo_reduce_logic():
age = 15
gender = 'F'
height = 165
weight = 48
crit = [age > 18, gender == 'F', height > 160, weight > 45]
ok = reduce(lambda cv, v: cv and v, crit)
# ok = crit[0] and crit[1] and crit[2] and crit[3]
print(ok)
def demo_join_list():
sep='|'
names=['Peter', 'Jenny', 'Linda', 'Bruce', 'Ann']
print(sep.join(names))
x = reduce(lambda cv, v: f'{cv}{sep}{v}', names)
print(x)
def demo_if_reduce():
numbers = [3, 6, 4, 1, 7, 8]
# sum even numbers
x = reduce(lambda cv, v: cv + (v if v % 2 == 0 else 0), numbers, 0)
# x = reduce(lambda cv, v: cv + (v if v % 2 == 0 else 0), numbers)
print(x)
def demo_list_dict():
names = [
{'name': 'Peter', 'score': 5},
{'name': 'Ann', 'score': 8},
{'name': 'Jenny', 'score': 7},
{'name': 'Bruce', 'score': 10}
]
x = reduce(lambda cv, v: cv + v['score'], names, 0)
# x = reduce(lambda cv, v: cv + v['score'], names)
print(x)
print(sum([v['score'] for v in names]))
if __name__ == "__main__":
# demo_reduce_logic()
# demo_join_list()
# demo_if_reduce()
demo_list_dict()
|
[
"noreply@github.com"
] |
prasertcbs.noreply@github.com
|
b7b671cd460bf84fd46bd1bcb8cbca90d74ec439
|
344e2956b4e2a30a8ef7532d951f96d995d1dd1e
|
/16_mmdet/lib/mmdet/models/dense_heads/rpn_test_mixin.py
|
a59422fd69ed7e61f628b90f4528d0cec11c2370
|
[
"Apache-2.0",
"LGPL-3.0-only",
"MIT",
"LicenseRef-scancode-proprietary-license",
"BSD-3-Clause",
"GPL-3.0-only"
] |
permissive
|
karndeepsingh/Monk_Object_Detection
|
e64199705326e4cd65e4b29946cae210a4ef9649
|
425fa50a3236cb9097389646275da06bf9185f6b
|
refs/heads/master
| 2022-12-22T18:26:53.933397
| 2020-09-28T12:49:50
| 2020-09-28T12:49:50
| 299,307,843
| 1
| 1
|
Apache-2.0
| 2020-09-28T12:52:18
| 2020-09-28T12:52:17
| null |
UTF-8
|
Python
| false
| false
| 2,151
|
py
|
import sys
from mmdet.core import merge_aug_proposals
if sys.version_info >= (3, 7):
from mmdet.utils.contextmanagers import completed
class RPNTestMixin(object):
"""Test methods of RPN."""
if sys.version_info >= (3, 7):
async def async_simple_test_rpn(self, x, img_metas):
sleep_interval = self.rpn_head.test_cfg.pop(
'async_sleep_interval', 0.025)
async with completed(
__name__, 'rpn_head_forward',
sleep_interval=sleep_interval):
rpn_outs = self(x)
proposal_list = self.get_bboxes(*rpn_outs, img_metas)
return proposal_list
def simple_test_rpn(self, x, img_metas):
"""Test without augmentation.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
img_metas (list[dict]): Meta info of each image.
Returns:
list[Tensor]: Proposals of each image.
"""
rpn_outs = self(x)
proposal_list = self.get_bboxes(*rpn_outs, img_metas)
return proposal_list
def aug_test_rpn(self, feats, img_metas):
samples_per_gpu = len(img_metas[0])
aug_proposals = [[] for _ in range(samples_per_gpu)]
for x, img_meta in zip(feats, img_metas):
proposal_list = self.simple_test_rpn(x, img_meta)
for i, proposals in enumerate(proposal_list):
aug_proposals[i].append(proposals)
# reorganize the order of 'img_metas' to match the dimensions
# of 'aug_proposals'
aug_img_metas = []
for i in range(samples_per_gpu):
aug_img_meta = []
for j in range(len(img_metas)):
aug_img_meta.append(img_metas[j][i])
aug_img_metas.append(aug_img_meta)
# after merging, proposals will be rescaled to the original image size
merged_proposals = [
merge_aug_proposals(proposals, aug_img_meta, self.test_cfg)
for proposals, aug_img_meta in zip(aug_proposals, aug_img_metas)
]
return merged_proposals
|
[
"abhishek4273@gmail.com"
] |
abhishek4273@gmail.com
|
ba069b17cac491de9884a946e82b583bcaab68e4
|
2713827b2f68aae65d4be516f024d51a7e762a97
|
/doc/user/SConscript
|
9e039dac7b1b08fbd7c55e9e8439769e0b70f962
|
[
"MIT"
] |
permissive
|
ptomulik/scons-tool-gnuplot
|
bd83fd6c4d85fb4391288d90d7e26f67ec4f1f29
|
2b92500feed48267d1bfdcaaae542a65dcc42b60
|
refs/heads/master
| 2021-05-21T12:03:14.776722
| 2020-04-16T22:36:33
| 2020-04-16T22:36:33
| 9,398,514
| 1
| 0
| null | 2020-04-16T22:36:35
| 2013-04-12T16:21:39
|
Python
|
UTF-8
|
Python
| false
| false
| 1,493
|
#
# Copyright (c) 2013-2020 by Pawel Tomulik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
Import(['env'])
if 'user-doc' in COMMAND_LINE_TARGETS:
env.Tool('docbook')
pdf = env.DocbookPdf('manual')
html = env.DocbookHtml('manual')
env.Ignore('.', pdf + html)
env.Alias( 'user-doc', pdf + html )
env.AlwaysBuild( 'user-doc' )
# Local Variables:
# # tab-width:4
# # indent-tabs-mode:nil
# # End:
# vim: set syntax=scons expandtab tabstop=4 shiftwidth=4:
|
[
"ptomulik@meil.pw.edu.pl"
] |
ptomulik@meil.pw.edu.pl
|
|
6a3716a612b8ebb22cb755b1aaab49a349259463
|
fc38005e1474ce803a272387d401da6cd0a8c0ef
|
/lter_pasta/src/pasta_gmn_adapter/app/restrict_to_verb.py
|
b5ab1b462e5d53d91264fe160c26b0abe8ce0482
|
[
"Apache-2.0"
] |
permissive
|
DataONEorg/SlenderNodes
|
4a3876e12d46c031b99821717533e2f4f39a57c8
|
34dd4ed9d581d259a70d7c9a884f520226dd2691
|
refs/heads/master
| 2023-02-18T08:39:24.072662
| 2022-01-07T13:12:18
| 2022-01-07T13:12:18
| 53,552,615
| 1
| 3
|
Apache-2.0
| 2023-02-08T02:36:49
| 2016-03-10T03:38:03
|
HTML
|
UTF-8
|
Python
| false
| false
| 1,243
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""":mod:`restrict_to_verb`
==========================
:Synopsis: Limit views to be called only with specific verb.
:Author: Roger Dahl
"""
import django.http
def _allow_only_verbs(f, verbs):
def wrap(request, *args, **kwargs):
if request.method not in verbs:
return django.http.HttpResponseNotAllowed(verbs)
return f(request, *args, **kwargs)
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
return wrap
def get(f):
return _allow_only_verbs(f, ['GET'])
def put(f):
return _allow_only_verbs(f, ['PUT'])
def post(f):
return _allow_only_verbs(f, ['POST'])
def delete(f):
return _allow_only_verbs(f, ['DELETE'])
|
[
"git@dahlsys.com"
] |
git@dahlsys.com
|
b968ff45bd84273d0e43339d72915b1cd40cf9af
|
0bff4e342d15d90dde7ed0b8a8a479b2c82d17d7
|
/home/check_images.py
|
7a787f6253d0666f6a04e6ff8730a63e9d128ce2
|
[] |
no_license
|
AyodejiOkusanya/Dog_classifier_project
|
e272070d21646d11c5724724c0abdc691c4b8226
|
13a8f29d9506332bd0bc23415501918565147624
|
refs/heads/master
| 2020-07-26T23:41:14.535123
| 2019-09-16T13:31:38
| 2019-09-16T13:31:38
| 208,800,039
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,398
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# */AIPND-revision/intropyproject-classify-pet-images/check_images.py
#
# TODO 0: Add your information below for Programmer & Date Created.
# PROGRAMMER: Ayodeji Okusanya
# DATE CREATED: 11th September 2019
# REVISED DATE:
# PURPOSE: Classifies pet images using a pretrained CNN model, compares these
# classifications to the true identity of the pets in the images, and
# summarizes how well the CNN performed on the image classification task.
# Note that the true identity of the pet (or object) in the image is
# indicated by the filename of the image. Therefore, your program must
# first extract the pet image label from the filename before
# classifying the images using the pretrained CNN model. With this
# program we will be comparing the performance of 3 different CNN model
# architectures to determine which provides the 'best' classification.
#
# Use argparse Expected Call with <> indicating expected user input:
# python check_images.py --dir <directory with images> --arch <model>
# --dogfile <file that contains dognames>
# Example call:
# python check_images.py --dir pet_images/ --arch vgg --dogfile dognames.txt
##
# Imports python modules
from time import time, sleep
# Imports print functions that check the lab
from print_functions_for_lab_checks import *
# Imports functions created for this program
from get_input_args import get_input_args
from get_pet_labels import get_pet_labels
from classify_images import classify_images
from adjust_results4_isadog import adjust_results4_isadog
from calculates_results_stats import calculates_results_stats
from print_results import print_results
# Main program function defined below
def main():
# TODO 0: Measures total program runtime by collecting start time
start_time = time()
# TODO 1: Define get_input_args function within the file get_input_args.py
# This function retrieves 3 Command Line Arugments from user as input from
# the user running the program from a terminal window. This function returns
# the collection of these command line arguments from the function call as
# the variable in_arg
in_arg = get_input_args()
# Function that checks command line arguments using in_arg
check_command_line_arguments(in_arg)
# TODO 2: Define get_pet_labels function within the file get_pet_labels.py
# Once the get_pet_labels function has been defined replace 'None'
# in the function call with in_arg.dir Once you have done the replacements
# your function call should look like this:
# get_pet_labels(in_arg.dir)
# This function creates the results dictionary that contains the results,
# this dictionary is returned from the function call as the variable results
results = get_pet_labels(in_arg.dir)
# Function that checks Pet Images in the results Dictionary using results
check_creating_pet_image_labels(results)
# TODO 3: Define classify_images function within the file classiy_images.py
# Once the classify_images function has been defined replace first 'None'
# in the function call with in_arg.dir and replace the last 'None' in the
# function call with in_arg.arch Once you have done the replacements your
# function call should look like this:
# classify_images(in_arg.dir, results, in_arg.arch)
# Creates Classifier Labels with classifier function, Compares Labels,
# and adds these results to the results dictionary - results
classify_images(in_arg.dir, results, in_arg.arch)
# Function that checks Results Dictionary using results
check_classifying_images(results)
# TODO 4: Define adjust_results4_isadog function within the file adjust_results4_isadog.py
# Once the adjust_results4_isadog function has been defined replace 'None'
# in the function call with in_arg.dogfile Once you have done the
# replacements your function call should look like this:
# adjust_results4_isadog(results, in_arg.dogfile)
# Adjusts the results dictionary to determine if classifier correctly
# classified images as 'a dog' or 'not a dog'. This demonstrates if
# model can correctly classify dog images as dogs (regardless of breed)
adjust_results4_isadog(results, in_arg.dogfile)
# Function that checks Results Dictionary for is-a-dog adjustment using results
check_classifying_labels_as_dogs(results)
# TODO 5: Define calculates_results_stats function within the file calculates_results_stats.py
# This function creates the results statistics dictionary that contains a
# summary of the results statistics (this includes counts & percentages). This
# dictionary is returned from the function call as the variable results_stats
# Calculates results of run and puts statistics in the Results Statistics
# Dictionary - called results_stats
results_stats = calculates_results_stats(results)
# Function that checks Results Statistics Dictionary using results_stats
check_calculating_results(results, results_stats)
# TODO 6: Define print_results function within the file print_results.py
# Once the print_results function has been defined replace 'None'
# in the function call with in_arg.arch Once you have done the
# replacements your function call should look like this:
# print_results(results, results_stats, in_arg.arch, True, True)
# Prints summary results, incorrect classifications of dogs (if requested)
# and incorrectly classified breeds (if requested)
print_results(results, results_stats, in_arg.arch, True, True)
# TODO 0: Measure total program runtime by collecting end time
end_time = time()
# TODO 0: Computes overall runtime in seconds & prints it in hh:mm:ss format
tot_time = end_time - start_time #calculate difference between end time and start time
print("\n** Total Elapsed Runtime:",
str(int((tot_time/3600)))+":"+str(int((tot_time%3600)/60))+":"
+str(int((tot_time%3600)%60)) )
# Call to main function to run the program
if __name__ == "__main__":
main()
|
[
"github email address"
] |
github email address
|
c3c2c3822b57e06c8b713582230fd8a9950bcfcf
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/Sourcem8/pirates/inventory/InventoryUIGoldItem.py
|
1a3afd314852cc484b7e5797f1275e913e7e2160
|
[] |
no_license
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453
| 2018-10-24T15:33:17
| 2018-10-24T15:33:17
| 154,521,816
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,541
|
py
|
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from pirates.piratesgui import GuiPanel, PiratesGuiGlobals
from pirates.piratesbase import PiratesGlobals
from pirates.piratesbase import PLocalizer
from otp.otpbase import OTPLocalizer
from pirates.inventory.InventoryUIGlobals import *
from pirates.inventory.InventoryGlobals import *
from pirates.inventory import InventoryUIStackItem
class InventoryUIGoldItem(InventoryUIStackItem.InventoryUIStackItem):
def __init__(self, manager, itemTuple, imageScaleFactor = 1.0, update = False):
InventoryUIStackItem.InventoryUIStackItem.__init__(self, manager, itemTuple, imageScaleFactor = imageScaleFactor, showMax = 0, update = False)
self.initialiseoptions(InventoryUIGoldItem)
gui = loader.loadModel('models/gui/toplevel_gui')
self['image'] = gui.find('**/treasure_w_coin*')
self['image_scale'] = 0.1 * imageScaleFactor
self.imageScale = 3.0
self.textScale = 1.1
if update:
self.accept(getCategoryChangeMsg(localAvatar.getInventoryId(), InventoryType.ItemTypeMoney), self.updateAmount)
def destroy(self):
self.ignoreAll()
InventoryUIStackItem.InventoryUIStackItem.destroy(self)
def getName(self):
return PLocalizer.GoldName
def updateAmount(self, caller = None):
inventory = localAvatar.getInventory()
if inventory:
amount = inventory.getGoldInPocket()
self.amount = amount
self.updateAmountText()
|
[
"brandoncarden12345@gmail.com"
] |
brandoncarden12345@gmail.com
|
05464dd5b1fdbd853afe7496ba59a07cb777199b
|
ce26ae9315d7814f6dbfa1918c7f5c5a6293e49b
|
/Lammps/Pore/qsub/Launcher.py.bak
|
145233501f45b556ca2fb8eec925aebc7b2a894c
|
[] |
no_license
|
sramirezh/Utilities
|
25982a28cc40c6bea47c8ccbd95870addd2e826d
|
a86e72787059e511983cd047f3027aa10eba7090
|
refs/heads/master
| 2023-02-09T10:16:28.571756
| 2023-01-31T00:14:28
| 2023-01-31T00:14:28
| 89,708,819
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,576
|
bak
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue May 28 09:24:03 2019
"Creates replicas of simulations starting from configurations during the equilibration"
@author: sr802
"""
import glob
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '../../../')) #This falls into Utilities path
import Lammps.core_functions as cf
import shutil
from simulation_utilities import simulation
cwd = os.getcwd() #current working directory
dir_path = os.path.dirname(os.path.realpath(__file__))#Path of this python script
#Main
#Getting the path to all the restart files
files=glob.glob('./Try/particle/*')
template=cwd+'/Template'
home=cwd+'/mu_force'
times=cf.extract_digits(files)
times=[str(int(time)) for time in times]
#Takign the last N configurations
n_conf=4
conf_times=times[-n_conf:]
files_analysis=cf.parameter_finder(files,conf_times)
shutil.rmtree(home,ignore_errors=True)
for i in files_analysis:
#The extraction of the parameters for the simulation comes here
path="%s/%s"%(home,times[i])
time=int(cf.extract_digits(files[i])[-1])
name=str(time)
restart=files[i]
#Creating the simulation instance
sim=simulation(home,template,name,restart)
sim.create_folder()
sim.create_qsub('test',1,1,1,'input.lmp',)
#Mofications to the files here
file_name="input.lmp"
file_path=sim.folder+'/'+file_name
value_modify=sim.initial_conf.split('/')[-1]
cf.modify_file(file_path,'read_restart','read_restart\t%s\n'%value_modify)
|
[
"sramirez.hinestrosa@gmail.com"
] |
sramirez.hinestrosa@gmail.com
|
46bc65733acedf9596954169791412496a1c48f4
|
8b2e795c3040a2ef1d3f0c21752bec57a0614bd6
|
/venv/Scripts/enhancer.py
|
294eaaa77d0cdd6cb0de824a7d27a60fe56e0e2b
|
[] |
no_license
|
harshit8858/NHDO
|
c75e244dfdc91817b3047d65c7be610f3e18aba3
|
6a5ea2de4ba607c20c0b9bd241e6b1c82090eba9
|
refs/heads/master
| 2023-01-06T20:18:33.795898
| 2018-01-03T07:39:04
| 2018-01-03T07:39:04
| 105,629,451
| 1
| 3
| null | 2022-12-20T22:32:34
| 2017-10-03T08:26:57
|
Python
|
UTF-8
|
Python
| false
| false
| 1,603
|
py
|
#!c:\users\harshi~1\nhdo\venv\scripts\python.exe
#
# The Python Imaging Library
# $Id$
#
# this demo script creates four windows containing an image and a slider.
# drag the slider to modify the image.
#
import sys
if sys.version_info[0] > 2:
import tkinter
else:
import Tkinter as tkinter
from PIL import Image, ImageTk, ImageEnhance
#
# enhancer widget
class Enhance(tkinter.Frame):
def __init__(self, master, image, name, enhancer, lo, hi):
tkinter.Frame.__init__(self, master)
# set up the image
self.tkim = ImageTk.PhotoImage(image.mode, image.size)
self.enhancer = enhancer(image)
self.update("1.0") # normalize
# image window
tkinter.Label(self, image=self.tkim).pack()
# scale
s = tkinter.Scale(self, label=name, orient=tkinter.HORIZONTAL,
from_=lo, to=hi, resolution=0.01,
command=self.update)
s.set(self.value)
s.pack()
def update(self, value):
self.value = float(value)
self.tkim.paste(self.enhancer.enhance(self.value))
#
# main
if len(sys.argv) != 2:
print("Usage: enhancer file")
sys.exit(1)
root = tkinter.Tk()
im = Image.open(sys.argv[1])
im.thumbnail((200, 200))
Enhance(root, im, "Color", ImageEnhance.Color, 0.0, 4.0).pack()
Enhance(tkinter.Toplevel(), im, "Sharpness", ImageEnhance.Sharpness, -2.0, 2.0).pack()
Enhance(tkinter.Toplevel(), im, "Brightness", ImageEnhance.Brightness, -1.0, 3.0).pack()
Enhance(tkinter.Toplevel(), im, "Contrast", ImageEnhance.Contrast, -1.0, 3.0).pack()
root.mainloop()
|
[
"harshit8858@gmail.com"
] |
harshit8858@gmail.com
|
48a4d700a45b7466b60ce18dc1bbe99043d5a7ed
|
42a7abc31b447d1bfa5db19d5e047c475a00ca81
|
/leetcode/contest/2017/mar4/531.py
|
15c29bfc748d3be08dcc2797495bcce045c9fc0b
|
[] |
no_license
|
jonathantsang/CompetitiveProgramming
|
f01f3727e49e03038a981871f29234fccfac0e7c
|
05d49ca91ac2a4d414dbb38b01266962ce68f34a
|
refs/heads/master
| 2022-12-12T11:52:13.327425
| 2022-12-07T20:37:37
| 2022-12-07T20:37:37
| 121,400,994
| 2
| 0
| null | 2020-10-08T19:24:10
| 2018-02-13T15:43:07
|
Python
|
UTF-8
|
Python
| false
| false
| 1,229
|
py
|
class Solution(object):
def findLonelyPixel(self, picture):
"""
:type picture: List[List[str]]
:rtype: int
"""
lonely = 0
picleng = len(picture)
row = []
column = []
for w in range(0, 501):
row.append(0)
for w in range(0, 501):
column.append(0)
## Find the number of B in the rows
for j in range(0, picleng):
leng = len(picture[j]);
for i in range(0, leng):
if(picture[j][i] == 'B'):
row[j] += 1
## Find the number of B in the columns
for j in range(0, picleng):
leng = len(picture[j]);
for i in range(0, leng):
if(picture[j][i] == 'B'):
column[i] += 1
## Go through all the rows and mark not lonely
for j in range(0, picleng):
leng = len(picture[j])
for i in range(0, leng):
## If it is a B
if('B' == picture[j][i]):
if(row[j] == 1 and column[i] == 1):
lonely += 1
return lonely
|
[
"j26tsang@gmail.com"
] |
j26tsang@gmail.com
|
f19f8a0455cc9545e32288d7e8eefcc1050952ce
|
88eb24f0890457b994e867b68e1b2d3a34a3b900
|
/rv/uniform.py
|
ee15e496062954df619b976f1ae08b5fc1370b41
|
[] |
no_license
|
jiye-ML/Probalistic-Graphical-Model-21-Sample-Method
|
9cf4168ee5cf2fb33d92236997fc03ff84a2243b
|
a17351de817dd340a189696592dee9ec77e49edd
|
refs/heads/master
| 2020-05-17T08:44:24.075793
| 2019-04-26T12:56:22
| 2019-04-26T12:56:22
| 183,615,211
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,699
|
py
|
import numpy as np
from rv.rv import RandomVariable
class Uniform(RandomVariable):
"""
Uniform distribution
p(x|a, b)
= 1 / ((b_0 - a_0) * (b_1 - a_1)) if a <= x <= b else 0
"""
def __init__(self, low, high):
"""
construct uniform distribution
Parameters
----------
low : int, float, or np.ndarray
lower boundary
high : int, float, or np.ndarray
higher boundary
"""
super().__init__()
low = np.asarray(low)
high = np.asarray(high)
assert low.shape == high.shape
assert (low <= high).all()
self.low = low
self.high = high
self.value = 1 / np.prod(high - low)
pass
@property
def low(self):
return self.parameter["low"]
@low.setter
def low(self, low):
self.parameter["low"] = low
pass
@property
def high(self):
return self.parameter["high"]
@high.setter
def high(self, high):
self.parameter["high"] = high
pass
@property
def ndim(self):
return self.low.ndim
@property
def size(self):
return self.low.size
@property
def shape(self):
return self.low.shape
@property
def mean(self):
return 0.5 * (self.low + self.high)
def _pdf(self, X):
higher = np.logical_and.reduce(X >= self.low, 1)
lower = np.logical_and.reduce(X <= self.high, 1)
return self.value * np.logical_and(higher, lower)
def _draw(self, sample_size=1):
u01 = np.random.uniform(size=(sample_size,) + self.shape)
return u01 * (self.high - self.low) + self.low
|
[
"woxinxie1234@163.com"
] |
woxinxie1234@163.com
|
a4a582cb04903022b261b7f8fc8ea362601afe49
|
ac52ddddf672216998a33d5e6905a1a1e4d97a55
|
/pipe/scripts/filter_misaligned_shared_indels.py
|
549616ebf8ce5e5022ab69d39483766500b714b8
|
[
"MIT"
] |
permissive
|
EddieKHHo/megadaph
|
62e29e72896a5969b21d531a20a95fbce1589c3c
|
23010e7ce9ee6cceedaa3d4ba3e990e9af34aae0
|
refs/heads/master
| 2021-09-22T00:53:15.881183
| 2018-09-04T02:14:21
| 2018-09-04T02:14:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,362
|
py
|
#!/usr/bin/env python3
"""Remove indels which are falsely aligned as runs of single base mismatches in
other samples."""
# This script is awful
import sys
import click
from pandas import read_csv
from plumbum.cmd import rg
BASE_IDX = {
"A": [4, 8],
"T": [5, 9],
"C": [6, 10],
"G": [7, 11]
}
def is_insertion(indel):
return len(indel["REF"]) < len(indel["ALT"])
def get_ref_base(chrom, pos, pileup):
exit_code, stdout, stderr = rg.run(
["\s".join([chrom, str(pos)]), pileup],
retcode=None
)
if exit_code != 0:
return 0
return stdout.split()[2]
def check_indel(indel, pileups):
if is_insertion(indel):
bad_bases = indel["ALT"][1:]
indel_length = len(indel["ALT"])
else:
indel_length = len(indel["REF"])
bad_base_start = indel["POS"] + indel_length
bad_base_pos = range(bad_base_start, bad_base_start + indel_length)
bad_bases = [
get_ref_base(indel["CHROM"], pos, pileups[0])
for pos in bad_base_pos
]
if any([base == 0 for base in bad_bases]):
return True
adj_base = int(indel["POS"] + 1)
adj_base_pos = range(adj_base, adj_base + indel_length - 1)
for bad, adj in zip(bad_bases, adj_base_pos):
if get_ref_base(indel["CHROM"], adj, pileups[0]) != bad:
for pileup in pileups:
counts = rg.run(
["\s".join([indel["CHROM"], str(adj)]),
pileup], retcode=None
)[1].split()
if counts:
target_base_counts = (
int(counts[BASE_IDX[bad][0]]) +
int(counts[BASE_IDX[bad][1]])
)
if target_base_counts > 0:
return False
return True
def filter_indels(pileups, indels):
if len(indels):
passing = indels.apply(check_indel, axis=1, pileups=pileups)
filtered_indels = indels[passing]
else:
filtered_indels = indels
filtered_indels.to_csv(sys.stdout, sep="\t", index=False)
@click.command()
@click.option("--indels", help="TSV file containing indels.")
@click.argument("pileups", nargs=-1)
def cli(pileups, indels):
filter_indels(pileups, read_csv(indels, sep="\t"))
if __name__ == "__main__":
cli()
|
[
"13209544+fennerm@users.noreply.github.com"
] |
13209544+fennerm@users.noreply.github.com
|
13a2d878d94dd5ce7ae75e793523a256cbb3845e
|
4fc21c3f8dca563ce8fe0975b5d60f68d882768d
|
/Ekeopara_Praise/Phase 1/Python Basic 1/Day4 Tasks/Task 4.py
|
859dab1a442dd79e17c8a15f153c9475f280c367
|
[
"MIT"
] |
permissive
|
Uche-Clare/python-challenge-solutions
|
17e53dbedbff2f33e242cf8011696b3059cd96e9
|
49ede6204ee0a82d5507a19fbc7590a1ae10f058
|
refs/heads/master
| 2022-11-13T15:06:52.846937
| 2020-07-10T20:59:37
| 2020-07-10T20:59:37
| 266,404,840
| 1
| 0
|
MIT
| 2020-05-23T19:24:56
| 2020-05-23T19:24:55
| null |
UTF-8
|
Python
| false
| false
| 299
|
py
|
'''4. Write a Python program to test whether a passed letter is a vowel or not.'''
letr = str(input("Enter any letter: "))
vowel = ["A", 'a', "E", 'e', "I", 'i', "O", 'o', "U", 'u']
if letr in vowel:
print("The letter entered is a vowel!")
else:
print("The letter entered is not a vowel!")
|
[
"60721962+Ekeopara-Praise@users.noreply.github.com"
] |
60721962+Ekeopara-Praise@users.noreply.github.com
|
0ea60aa86c763bb8a7b07ee39bbe0bdd0cbcfddd
|
50725a9ada0fe57fa2b49af36863eb1ce9d8c134
|
/lists/forms.py
|
319463e501e13dc9c5482844c30dda02536e6d03
|
[] |
no_license
|
dungnguyen1991/superlists
|
1381537a2b168b6d5ea5bac23608f8f425ce642f
|
e3bcca1876275414d5dba1f83c482eadaff381d3
|
refs/heads/master
| 2023-08-28T07:09:12.855120
| 2021-10-12T09:24:21
| 2021-10-12T09:24:21
| 396,571,092
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,324
|
py
|
from django import forms
from django.core.exceptions import ValidationError
from lists.models import Item, List
EMPTY_ITEM_ERROR = "You can't have an empty list item"
DUPLICATE_ITEM_ERROR = "You've already got this in your list"
class ItemForm(forms.models.ModelForm):
class Meta:
model = Item
fields = ('text', )
widgets = {
'text': forms.TextInput(attrs={
'placeholder': 'Enter a to-do item',
'class': 'form-control input-lg',
})
}
error_messages = {
'text': {
'required': EMPTY_ITEM_ERROR
}
}
class ExistingListItemForm(ItemForm):
def __init__(self, for_list, *args, **kwargs):
super().__init__(*args, **kwargs)
self.instance.list = for_list
def validate_unique(self):
try:
self.instance.validate_unique()
except ValidationError as e:
e.error_dict = {'text': [DUPLICATE_ITEM_ERROR]}
self._update_errors(e)
class NewListForm(ItemForm):
def save(self, owner):
if owner.is_authenticated:
return List.create_new(first_item_text=self.cleaned_data['text'], owner=owner)
else:
return List.create_new(first_item_text=self.cleaned_data['text'])
|
[
"dungnguyendeveloper1991@gmail.com"
] |
dungnguyendeveloper1991@gmail.com
|
014ee15478ee543d6b6c2633912f2e2076087098
|
f576f0ea3725d54bd2551883901b25b863fe6688
|
/sdk/redis/azure-mgmt-redis/generated_samples/redis_cache_linked_server_get.py
|
b448a35a6f903c774db566a3add94321ac068413
|
[
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] |
permissive
|
Azure/azure-sdk-for-python
|
02e3838e53a33d8ba27e9bcc22bd84e790e4ca7c
|
c2ca191e736bb06bfbbbc9493e8325763ba990bb
|
refs/heads/main
| 2023-09-06T09:30:13.135012
| 2023-09-06T01:08:06
| 2023-09-06T01:08:06
| 4,127,088
| 4,046
| 2,755
|
MIT
| 2023-09-14T21:48:49
| 2012-04-24T16:46:12
|
Python
|
UTF-8
|
Python
| false
| false
| 1,543
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from azure.identity import DefaultAzureCredential
from azure.mgmt.redis import RedisManagementClient
"""
# PREREQUISITES
pip install azure-identity
pip install azure-mgmt-redis
# USAGE
python redis_cache_linked_server_get.py
Before run the sample, please set the values of the client ID, tenant ID and client secret
of the AAD application as environment variables: AZURE_CLIENT_ID, AZURE_TENANT_ID,
AZURE_CLIENT_SECRET. For more info about how to get the value, please see:
https://docs.microsoft.com/azure/active-directory/develop/howto-create-service-principal-portal
"""
def main():
client = RedisManagementClient(
credential=DefaultAzureCredential(),
subscription_id="subid",
)
response = client.linked_server.get(
resource_group_name="rg1",
name="cache1",
linked_server_name="cache2",
)
print(response)
# x-ms-original-file: specification/redis/resource-manager/Microsoft.Cache/stable/2023-04-01/examples/RedisCacheLinkedServer_Get.json
if __name__ == "__main__":
main()
|
[
"noreply@github.com"
] |
Azure.noreply@github.com
|
439f9f249c544c5523729e5e44bf9a0b2f7f1174
|
954c112d4805da5b1c8ba6460ae137935d85fe69
|
/advanced/methods.py
|
972de67fc2b7a1924eb22c1ebfe1fed93ab18985
|
[] |
no_license
|
ProsenjitKumar/DataHeadOffice
|
b329b96b9efa59976031483dc32beb5e09082528
|
89dc2b4fe73bc952252b190d3c64186908a026f1
|
refs/heads/master
| 2020-04-24T10:23:25.974884
| 2019-04-29T17:10:36
| 2019-04-29T17:10:36
| 171,892,722
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,167
|
py
|
class Employee:
def __init__(self, first_name, last_name, pay):
self.first_name = first_name
self.last_name = last_name
self.pay = pay
@classmethod
def another_stuff(cls, full_name):
cls.full_name = full_name
@classmethod
def from_string(cls, emp_str):
first, last, pay = emp_str.split('-')
return cls(first, last, pay)
@staticmethod
def is_workkday(day):
if day.weekday() == 5 or day.weekday() == 6: # saturday
return False
return True
import datetime
my_date = datetime.date(2019, 2, 22)
print(Employee.is_workkday(my_date))
print(my_date.weekday())
# emp_obj = Employee('Prosenjit', 'Das', 26980)
# emp_obj1 = Employee('Samoli', 'Das', 5688)
# emp_obj.another_stuff('Prosenjit Das')
# print(Employee.full_name)
# print(emp_obj.full_name)
# print(emp_obj1.full_name)
# emp_str_1 = 'Prosenjit-Das-85200'
# emp_str_2 = 'Jalil-Khan-56870'
# emp_str_3 = 'Suvo-Roy-87452'
# first, last, pay = emp_str_1.split('-')
# new_emp_1 = Employee(first, last, pay)
# new_emp_1 = Employee.from_string(emp_str_1)
# print(new_emp_1.first_name)
# print(new_emp_1.pay)
|
[
"prosenjitearnkuar@gmail.com"
] |
prosenjitearnkuar@gmail.com
|
eb4caeb850832163e9a1b127ded1f4c34520b942
|
6710c52d04e17facbc9fb35a7df313f7a2a7bd53
|
/Templates/0133. Clone Graph.py
|
c68d1c458509885aef75e70880935fb3dbd2c431
|
[] |
no_license
|
pwang867/LeetCode-Solutions-Python
|
535088fbe747a453360457728cc22cf336020bd2
|
188befbfb7080ba1053ee1f7187b177b64cf42d2
|
refs/heads/master
| 2022-11-13T16:20:28.211707
| 2020-06-28T06:01:14
| 2020-06-28T06:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,897
|
py
|
"""
# Definition for a Node.
class Node(object):
def __init__(self, val, neighbors):
self.val = val
self.neighbors = neighbors
"""
# method 2, combine node creation and relationship copy
# make sure that: 1. a node is only copied once, 2. a node's relationship is only processed once
class Solution(object):
def cloneGraph(self, node):
"""
:type node: Node
:rtype: Node
"""
if not node:
return None
d = {node: Node(node.val, [])}
visited = set() # visited means the nodes whose copies' neighbors has been copied
stack = [node]
while stack:
p = stack.pop()
if p in visited:
continue
visited.add(p)
for nei in p.neighbors:
if nei not in d:
d[nei] = Node(nei.val, [])
d[p].neighbors.append(d[nei])
stack.append(nei)
return d[node]
# method 1: separate creating node and copying relation
class Solution1(object):
def cloneGraph(self, node):
"""
:type node: Node
:rtype: Node
"""
d = {}
stack = [node]
while stack:
p = stack.pop()
if p in d:
continue
else:
d[p] = Node(p.val, [])
for nei in p.neighbors:
stack.append(nei)
stack = [node]
visited = set() # save nodes whose neighbor relationship has been cloned
while stack:
p = stack.pop()
if p in visited:
continue
else:
visited.add(p)
for nei in p.neighbors:
d[p].neighbors.append(d[nei])
stack.append(nei)
return d[node]
"""
Given a reference of a node in a connected undirected graph, return a deep copy (clone) of the graph.
Each node in the graph contains a val (int) and a list (List[Node]) of its neighbors.
Example:
Input:
{"$id":"1","neighbors":[{"$id":"2","neighbors":[{"$ref":"1"},{"$id":"3","neighbors":[{"$ref":"2"},
{"$id":"4","neighbors":[{"$ref":"3"},{"$ref":"1"}],"val":4}],"val":3}],"val":2},{"$ref":"4"}],"val":1}
Explanation:
Node 1's value is 1, and it has two neighbors: Node 2 and 4.
Node 2's value is 2, and it has two neighbors: Node 1 and 3.
Node 3's value is 3, and it has two neighbors: Node 2 and 4.
Node 4's value is 4, and it has two neighbors: Node 1 and 3.
Note:
The number of nodes will be between 1 and 100.
The undirected graph is a simple graph, which means no repeated edges and no self-loops in the graph.
Since the graph is undirected, if node p has node q as neighbor, then node q must have node p as neighbor too.
You must return the copy of the given node as a reference to the cloned graph.
"""
|
[
"gemingshangweichenggong@gmail.com"
] |
gemingshangweichenggong@gmail.com
|
240ba5faf2e6c26a2582db83afe8123094b20c04
|
1515be3015ad988278d5a095416c0a0066a02757
|
/src/users/models/componentsschemasmicrosoft_graph_detectedappallof1.py
|
5b14ba5d58afbceee708dcfb5a655fe2b76c8442
|
[
"MIT"
] |
permissive
|
peombwa/Sample-Graph-Python-Client
|
2ad494cc5b5fe026edd6ed7fee8cac2dd96aaa60
|
3396f531fbe6bb40a740767c4e31aee95a3b932e
|
refs/heads/master
| 2020-12-29T09:50:38.941350
| 2020-02-05T22:45:28
| 2020-02-05T22:45:28
| 238,561,578
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,975
|
py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ComponentsschemasmicrosoftGraphDetectedappallof1(Model):
"""detectedApp.
A managed or unmanaged app that is installed on a managed device. Unmanaged
apps will only appear for devices marked as corporate owned.
:param display_name: Name of the discovered application. Read-only
:type display_name: str
:param version: Version of the discovered application. Read-only
:type version: str
:param size_in_byte: Discovered application size in bytes. Read-only
:type size_in_byte: long
:param device_count: The number of devices that have installed this
application
:type device_count: int
:param managed_devices:
:type managed_devices: list[~users.models.MicrosoftgraphmanagedDevice]
"""
_validation = {
'device_count': {'maximum': 2147483647, 'minimum': -2147483648},
}
_attribute_map = {
'display_name': {'key': 'displayName', 'type': 'str'},
'version': {'key': 'version', 'type': 'str'},
'size_in_byte': {'key': 'sizeInByte', 'type': 'long'},
'device_count': {'key': 'deviceCount', 'type': 'int'},
'managed_devices': {'key': 'managedDevices', 'type': '[MicrosoftgraphmanagedDevice]'},
}
def __init__(self, display_name=None, version=None, size_in_byte=None, device_count=None, managed_devices=None):
super(ComponentsschemasmicrosoftGraphDetectedappallof1, self).__init__()
self.display_name = display_name
self.version = version
self.size_in_byte = size_in_byte
self.device_count = device_count
self.managed_devices = managed_devices
|
[
"peombwa@microsoft.com"
] |
peombwa@microsoft.com
|
3d5ebcfd0cf48f5f261c0ae6530b42549b161e95
|
33c7a8d150f0f95f5240c1ad8b458284e4db7ae0
|
/musicdl/modules/utils/logger.py
|
35a600ed1b3cd771a681fbbdaeb69ddeb9c8bedf
|
[
"MIT"
] |
permissive
|
Yellowhxc/musicdl
|
55a81d75923f7d3cf9917aa6ef635d4ddabdd4ef
|
97d6254c9427046fef5d2ef1e65297cf04397728
|
refs/heads/master
| 2023-01-07T04:56:26.223829
| 2020-11-03T17:37:22
| 2020-11-03T17:37:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,549
|
py
|
'''
Function:
一些终端打印工具
Author:
Charles
微信公众号:
Charles的皮卡丘
'''
import logging
from prettytable import PrettyTable
'''打印日志类'''
class Logger():
def __init__(self, logfilepath, **kwargs):
setattr(self, 'logfilepath', logfilepath)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
handlers=[logging.FileHandler(logfilepath),
logging.StreamHandler()])
@staticmethod
def log(level, message):
logging.log(level, message)
def debug(self, message, disable_print=False):
if disable_print:
fp = open(self.logfilepath, 'a')
fp.write(message + '\n')
else:
Logger.log(logging.DEBUG, message)
def info(self, message, disable_print=False):
if disable_print:
fp = open(self.logfilepath, 'a')
fp.write(message + '\n')
else:
Logger.log(logging.INFO, message)
def warning(self, message, disable_print=False):
if disable_print:
fp = open(self.logfilepath, 'a')
fp.write(message + '\n')
else:
Logger.log(logging.WARNING, message)
def error(self, message, disable_print=False):
if disable_print:
fp = open(self.logfilepath, 'a')
fp.write(message + '\n')
else:
Logger.log(logging.ERROR, message)
'''打印表格'''
def printTable(title, items):
assert isinstance(title, list) and isinstance(items, list), 'title and items should be list in function printTable'
table = PrettyTable(title)
for item in items: table.add_row(item)
print(table)
return table
|
[
"1159254961@qq.com"
] |
1159254961@qq.com
|
589872846ac6ea51c041f6cd2f35f8715f7aa528
|
d04c79e5ed09d47f306eeee2bd9ef9a1a67ef693
|
/20200316/118. Pascal's Triangle.py
|
16df0c80adff5ce4749bdf955b38fab3aeb9f641
|
[] |
no_license
|
mycomax0416/LeetCode
|
fe1d345d9b9355b37d9aa33b2633597de65a3838
|
b706a57a64313ca48df9eb61cb2e08d16ddf35b1
|
refs/heads/master
| 2021-03-09T20:22:24.356206
| 2020-04-04T12:27:39
| 2020-04-04T12:27:39
| 246,377,186
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 465
|
py
|
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
if numRows == 0:
return []
elif numRows == 1:
return [[1]]
else:
ans = [[1]]
for n in range(numRows-1):
row = [1]
for idx in range(len(ans)-1):
row.append(ans[-1][idx]+ans[-1][idx+1])
row.append(1)
ans.append(row)
return ans
|
[
"mycomax0416@gmail.com"
] |
mycomax0416@gmail.com
|
4d77d33b162d01b0729e4f0492e7ad90b02aa416
|
4ca8df3a127e9b15cbfecea6505928741f685a63
|
/gongfei/month04/spider/爬虫滑块验证.py
|
7bce8222aa6cd7fdcb4220a8bd736da8e4350889
|
[] |
no_license
|
gongfei6644/gongfei
|
2beb082c56197bc23ca20a6927ff6c10d8beaa83
|
bfdd5e6a3a8d76ad1e43cf54df186b944cad29e4
|
refs/heads/master
| 2022-11-30T20:49:22.213040
| 2020-08-16T12:52:28
| 2020-08-16T12:52:28
| 286,283,597
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,385
|
py
|
# Python爬虫滑块验证
# 滑块验证网址:http://www.cnbaowen.net/api/geetest/
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait # 等待元素加载的
from selenium.webdriver.common.action_chains import ActionChains #拖拽
from selenium.webdriver.support import expected_conditions as EC #等待查找
from selenium.common.exceptions import TimeoutException, NoSuchElementException #错误
from selenium.webdriver.common.by import By #标签查找
from PIL import Image #处理图片
import requests #处理网络请求
import time
import re #正则
import random #随机数
from io import BytesIO
import os
def merge_image(image_file,location_list):
"""
拼接图片
:param image_file:
:param location_list:
:return:
"""
im = Image.open(image_file) #打开图片二进制文件
im.save('code.jpg') #保存到code.jpg
new_im = Image.new('RGB',(260,116)) #空白图片长260,宽116的实例
# 把无序的图片 切成52张小图片
im_list_upper = [] #上边边
im_list_down = [] #下半边
# print(location_list)
for location in location_list:
# print(location['y'])
if location['y'] == -58: # 上半边
#im.crop(图片的x左坐标,图片的y上坐标,图片的x右坐标,图片的y下坐标)左、上、右和下像素的4元组
im_list_upper.append(im.crop((abs(location['x']),58,abs(location['x'])+10,116)))
if location['y'] == 0: # 下半边
#同理如上,返回一个对象的对象PIL.Image.Image的object
im_list_down.append(im.crop((abs(location['x']),0,abs(location['x'])+10,58)))
x_offset = 0
for im in im_list_upper: #拼接上半部分
new_im.paste(im,(x_offset,0)) # 把小图片放到 新的空白图片上,im为无序图片,(x_offset,0)用的是二元组,可以为四元(左上右下),二元或不填,默认为左上方填充
x_offset += im.size[0] #每一次一定图片的长度
x_offset = 0 #重置为零,下面同样的拼接下半部分
for im in im_list_down:
new_im.paste(im,(x_offset,58))
x_offset += im.size[0]
# new_im.show() #显示生成的图片
return new_im #返回这张图片
def get_image(driver,div_path):
'''
下载无序的图片 然后进行拼接 获得完整的图片
:param driver:
:param div_path:
:return:
'''
time.sleep(2)
background_images = driver.find_elements_by_xpath(div_path)
location_list = []
image_url=""
for background_image in background_images:
location = {}
result = re.findall('background-image: url\("(.*?)"\); background-position: (.*?)px (.*?)px;',background_image.get_attribute('style')) #
# print(result)
location['x'] = int(result[0][1]) #获取无序图片x坐标
location['y'] = int(result[0][2]) #获取无序图片y坐标
image_url = result[0][0].replace('webp','jpg') #图片链接
location_list.append(location) #将xy坐标 字典放入列表中 {"x":"-157","y":"-58"}
print('==================================')
# '替换url http://static.geetest.com/pictures/gt/579066de6/579066de6.webp'
#content响应的内容,以字节为单位
image_result = requests.get(image_url).content #b'\xff\ 字节
#BytesIO相当于实现一个with open:
# with open('1.jpg','wb') as f:
# f.write(image_result)
image_file = BytesIO(image_result) # 是一张无序的图片 返回一个对象<_io.BytesIO object at 0x000001B5A139D3B8>
image = merge_image(image_file,location_list) #拼接图片 <PIL.Image.Image image mode=RGB size=260x116 at 0x1B5A131AD30>
return image
def get_track(distance):
'''
拿到移动轨迹,模仿人的滑动行为,先匀加速后匀减速
匀变速运动基本公式:
①v=v0+at
②s=v0t+(1/2)at²
③v²-v0²=2as
:param distance: 需要移动的距离
:return: 存放每0.2秒移动的距离
'''
# 初速度
v=0
# 单位时间为0.2s来统计轨迹,轨迹即0.2内的位移
t=0.2
# 位移/轨迹列表,列表内的一个元素代表0.2s的位移
tracks=[]
# 当前的位移
current=0
accuracy_distance=distance
# 到达目标值的八分之七,开始减速
mid=distance * 3/5
# distance += 20 # 先滑过一点,最后再反着滑动回来
# a = random.randint(1,3)
while current < distance:
if current < mid:
# 加速度越小,单位时间的位移越小,模拟的轨迹就越多越详细
a = random.randint(2,4) # 加速运动的加速度
else:
a = -random.randint(2,4) # 减速运动的加速度
# 初速度
v0 = v
# 0.2秒时间内的位移
s = v0*t+0.5*a*(t**2) #s=v0t+(1/2)at²
# 当前的位置
current += s
# 添加到轨迹列表
print(a)
tracks.append(round(s)) #添加每一次x位置的坐标
# 速度已经达到v,该速度作为下次的初速度
v= v0+a*t #记录每一次0.2s的末速度,作为下一个0.2s的初速度,拼接一个滑动动作
# 反着滑动到大概准确位置
if abs(current - distance) > 1:
s = -(current - distance - 1)
tracks.append(round(s)) # 添加每一次x位置的坐标
print(current,"<><><>",distance)
# for i in range(4):
# tracks.append(-random.randint(1,3))
return tracks #返回位置坐标列表
def get_distance(image1,image2):
'''
拿到滑动验证码需要移动的距离
:param image1:没有缺口的图片对象
:param image2:带缺口的图片对象
:return:需要移动的距离
'''
# print('size', image1.size)
threshold = 50 #设置rgb差值
for i in range(0,image1.size[0]): # 0到260的x坐标 0
for j in range(0,image1.size[1]): # 0到160的y坐标0
pixel1 = image1.getpixel((i,j)) #返回一个像素值的元组 <class 'tuple'>: (255, 101, 86)
pixel2 = image2.getpixel((i,j)) #<class 'tuple'>: (255, 101, 86)
res_R = abs(pixel1[0]-pixel2[0]) # 计算RGB差
res_G = abs(pixel1[1] - pixel2[1]) # 计算RGB差
res_B = abs(pixel1[2] - pixel2[2]) # 计算RGB差
if res_R > threshold and res_G > threshold and res_B > threshold:
#即判断两张图片的每个像素的色差大于五十,即锁定了缺口,
#因为滑块起点始终为0,i 的坐标,即为滑块x轴移动距离
return i # 需要移动的距离
def main_check_code(driver, element):
"""
拖动识别验证码
:param driver:
:param element:
:return:
"""
image1 = get_image(driver, '//div[@class="gt_cut_bg gt_show"]/div') #拼接无序缺口图片1
image2 = get_image(driver, '//div[@class="gt_cut_fullbg gt_show"]/div') # 拼接无序完整图片2
# 图片上 缺口的位置的x坐标
# 2 对比两张图片的所有RBG像素点,得到不一样像素点的x值,即要移动的距离
l = get_distance(image1, image2) #像素值 182
print('l=',l)
# 3 获得移动轨迹
track_list = get_track(l) #模拟人行为滑动,即匀加速在匀速
print('第一步,点击滑动按钮')
#ActionChains执行用户操作的WebDriver实例,按住元素上的鼠标左键。on_element:鼠标向下移动的元素。perform() 执行操作
ActionChains(driver).click_and_hold(on_element=element).perform() # 点击鼠标左键,按住不放
time.sleep(0.3)
print('第二步,拖动元素')
for track in track_list:
#move_by_offset将鼠标移动到当前鼠标位置的偏移量。xoffset为x轴,yoffset为y轴
ActionChains(driver).move_by_offset(xoffset=track, yoffset=0).perform() # 鼠标移动到距离当前位置(x,y)
time.sleep(0.003)
# if l>100:
ActionChains(driver).move_by_offset(xoffset=-random.randint(2,5), yoffset=0).perform()
time.sleep(0.3)
print('第三步,释放鼠标')
#释放元素上的已按住的鼠标按钮。 on_element:鼠标向上移动的元素。
ActionChains(driver).release(on_element=element).perform()
time.sleep(5)
def main_check_slider(driver):
"""
检查滑动按钮是否加载
:param driver:
:return:
"""
while True:
try :
driver.get('http://www.cnbaowen.net/api/geetest/')
element = WebDriverWait(driver, 30, 0.5).until(EC.element_to_be_clickable((By.CLASS_NAME, 'gt_slider_knob')))
if element:
return element
except TimeoutException as e:
print('超时错误,继续')
time.sleep(5)
if __name__ == '__main__':
count = 6 # 最多识别6次
chrome_path = os.path.join(os.path.dirname(__file__), "chromedriver.exe") # 拼接chrome路径
driver = webdriver.Chrome(executable_path=chrome_path) # 示列化Chrome
try:
# 等待滑动按钮加载完成
element = main_check_slider(driver) #返回一个 滑块加载的页面
while count > 0:
main_check_code(driver,element) #进行滑块验证
time.sleep(2)
try:
success_element = (By.CSS_SELECTOR, '.gt_holder .gt_ajax_tip.gt_success')
# 得到成功标志
print('suc=',driver.find_element_by_css_selector('.gt_holder .gt_ajax_tip.gt_success'))
#等待20s,直到找到成功标签
success_images = WebDriverWait(driver, 20).until(EC.presence_of_element_located(success_element))
if success_images: #存在,不为空
print('成功识别!!!!!!')
count = 0
#这里验证完成后就自动跳转,或者再加一个点击跳转,后面跟上你的爬虫数据爬取的自定义函数模块,进行解析即可
break
except NoSuchElementException as e:
print('识别错误,继续')
count -= 1
time.sleep(2)
else:
print('too many attempt check code ')
exit('退出程序')
finally:
driver.close()
|
[
"1"
] |
1
|
2eb9bf182ce1419f171b1e57e534ce9d199b59c2
|
f138cfdc2f488100074d946a059f0967d76f4a70
|
/tests/example/settings.py
|
5331be406d20d662be7b18c66763d0753f8bd900
|
[
"MIT"
] |
permissive
|
davecap/django-subdomains
|
7677a5a31ac6cf8d22391997288821af83f4d4eb
|
d595959a8bce8ff9605c42f367c02a91340e9a05
|
refs/heads/master
| 2021-01-16T20:28:55.738879
| 2012-07-04T19:56:43
| 2012-07-04T19:56:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 651
|
py
|
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
SITE_ID = 1
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'subdomains.middleware.SubdomainURLRoutingMiddleware',
)
ROOT_URLCONF = 'example.urls.application'
SUBDOMAIN_URLCONFS = {
None: 'example.urls.marketing',
'api': 'example.urls.api',
'www': 'example.urls.marketing',
}
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'example.wsgi.application'
INSTALLED_APPS = (
'django.contrib.sites',
'example',
'subdomains',
)
|
[
"ted@kaemming.com"
] |
ted@kaemming.com
|
07ffc5e871f981299be97b62551c7b294f59e64a
|
a2e638cd0c124254e67963bda62c21351881ee75
|
/Extensions/Default/FPythonCode/interestRateSpreadDeltaCurveBucketsShift.py
|
5440ba998dea1a913984998dd3f6f72075eb13f5
|
[] |
no_license
|
webclinic017/fa-absa-py3
|
1ffa98f2bd72d541166fdaac421d3c84147a4e01
|
5e7cc7de3495145501ca53deb9efee2233ab7e1c
|
refs/heads/main
| 2023-04-19T10:41:21.273030
| 2021-05-10T08:50:05
| 2021-05-10T08:50:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,483
|
py
|
import acm
def CreateBuckets(validSpreadCurve):
buckets = acm.FArray()
for point in validSpreadCurve.Points():
bucketDef = acm.FFixedDateTimeBucketDefinition()
date = point.ActualDate()
bucketDef.FixedDate( date )
bucketDef.DiscardIfExpired( True )
if(point.Instrument()):
bucketDef.Name(point.Name())
elif(not point.Date()):
period = point.DatePeriod()
bucketDef.Name(acm.Time().DatePeriodToString(period))
buckets.Add(bucketDef)
if buckets.Size():
return buckets;
else:
return None
yieldCurveTypeEnum = acm.GetDomain('enum(IrType)')
q = acm.CreateFASQLQuery(acm.FYieldCurve, 'AND')
yieldCurveType = q.AddOpNode('OR')
yieldCurveType.AddAttrNode('Type', 'EQUAL', yieldCurveTypeEnum.Enumeration('Spread'))
yieldCurveType.AddAttrNode('Type', 'EQUAL', yieldCurveTypeEnum.Enumeration('Attribute Spread'))
validSpreadCurves = q.Select()
ael_variables = [
['Base Value', 'Base Value', 'string', acm.GetDomain('EnumRiskBaseCalculation').Enumerators(), None, 1, 0, 'Determines if Theoretical TPL or Theoretical Value (default) is used as the base for curve shifts. Different results can be arrived at if the ThTPL column includes Cash values sensitive to curves (for example via Exact FX conversions).', None, 1],
['Yield Curve', 'Yield Curve', 'FYieldCurve', validSpreadCurves, None, 1, 0, 'The attribute spread curve that will be shifted in buckets.', None, 1]
]
def ael_custom_label( parameters, dictExtra ):
label = parameters.At('Yield Curve').Name()
if parameters.At('Base Value'):
label += ", Including Cash"
return label
def ael_main_ex(parameters, dictExtra):
validSpreadCurve = parameters['Yield Curve']
baseValue = parameters['Base Value']
buckets = CreateBuckets(validSpreadCurve)
if not buckets:
return 0
resultVector = []
timeBuckets = acm.Time.CreateTimeBucketsFromDefinitions(0, buckets, None, 0, True, False, False, False, False)
for idx, bucket in enumerate(timeBuckets):
params = acm.FNamedParameters()
if idx == 0:
params.AddParameter('baseValueChoice', baseValue)
params.AddParameter('buckets', timeBuckets)
params.AddParameter('yieldCurve', validSpreadCurve)
params.Name(bucket.Name())
params.UniqueTag(bucket.Spec())
resultVector.append(params)
return resultVector
|
[
"nencho.georogiev@absa.africa"
] |
nencho.georogiev@absa.africa
|
02976528d092c165749236583b260d77148e8d5c
|
6cbc44e497be77774c62b0d894bec03218b3b9c1
|
/utils/custom_context_processors.py
|
02a15c775eb2b8683d6969e39dbb424730aa982f
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
usnistgov/InternationalMetrologyResourceRegistry
|
416263c8775bd70f27d8d7892f6342a7c66f7adf
|
d1eaee864727466c0e62f7ed2fafa034ce17ddee
|
refs/heads/master
| 2021-01-17T19:21:07.832355
| 2016-09-23T21:33:50
| 2016-09-23T21:33:50
| 60,211,427
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 953
|
py
|
from django.conf import settings
def domain_context_processor(request):
return {
'CUSTOM_TITLE': settings.CUSTOM_TITLE if hasattr(settings, 'CUSTOM_TITLE') else '',
'CUSTOM_ORGANIZATION': settings.CUSTOM_ORGANIZATION if hasattr(settings, 'CUSTOM_ORGANIZATION') else '',
'CUSTOM_NAME': settings.CUSTOM_NAME if hasattr(settings, 'CUSTOM_NAME') else '',
'CUSTOM_SUBTITLE': settings.CUSTOM_SUBTITLE if hasattr(settings, 'CUSTOM_SUBTITLE') else '',
'CUSTOM_DATA': settings.CUSTOM_DATA if hasattr(settings, 'CUSTOM_DATA') else '',
'CUSTOM_CURATE': settings.CUSTOM_CURATE if hasattr(settings, 'CUSTOM_CURATE') else '',
'CUSTOM_EXPLORE': settings.CUSTOM_EXPLORE if hasattr(settings, 'CUSTOM_EXPLORE') else '',
'CUSTOM_COMPOSE': settings.CUSTOM_COMPOSE if hasattr(settings, 'CUSTOM_COMPOSE') else '',
'CUSTOM_URL': settings.CUSTOM_URL if hasattr(settings, 'CUSTOM_URL') else '',
}
|
[
"guillaume.sousa@nist.gov"
] |
guillaume.sousa@nist.gov
|
aee8ed2b1303f7ec53448b742aac1467ec30e201
|
eed3d7d9dcf5804d602a1acb32d535e2f49d3324
|
/2018-07-01_Valid-Parentheses/solution.py
|
8aece5a21d9de72a16fb7a524bcc5bdcba662466
|
[] |
no_license
|
ansonmiu0214/algorithms
|
e928b8a932ca3050a3c6fd5d07fae6df3fd7c5c4
|
f35efb3536186dcd672c9aa91856d8d9213b1b82
|
refs/heads/master
| 2020-03-21T05:18:12.768425
| 2019-03-24T12:30:24
| 2019-03-24T12:30:24
| 138,154,059
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 782
|
py
|
#!/bin/python3
from collections import deque
"""
Stack-based approach to keep track of bracket depth in LIFO.
Running time complexity of O(n).
"""
def isValid(s):
brackets = { '(' : ')', '[': ']', '{': '}' }
stack = deque()
count = 0
for letter in s:
if letter in brackets:
stack.append(letter) # keep track of open brackets
count += 1
else:
if count == 0:
return False # not expecting closing
open_bracket = stack.pop()
count -= 1
if brackets[open_bracket] != letter:
return False # not the closing expected
return count == 0 # stack should be empty now
if __name__ == "__main__":
print("Enter bracket pattern: ", end="")
s = input().strip()
print("Pattern '{}' is {}.".format(s, "valid" if isValid(s) else "not valid"))
|
[
"ansonmiu0214@gmail.com"
] |
ansonmiu0214@gmail.com
|
a40e9ab681bab08ec937fc638b267119a9bf37bc
|
350d6b7246d6ef8161bdfccfb565b8671cc4d701
|
/Last Stone Weight.py
|
ba9f5c8adb1c5b151887f749b6ed3c70aee94f06
|
[] |
no_license
|
YihaoGuo2018/leetcode_python_2
|
145d5fbe7711c51752b2ab47a057b37071d2fbf7
|
2065355198fd882ab90bac6041c1d92d1aff5c65
|
refs/heads/main
| 2023-02-14T14:25:58.457991
| 2021-01-14T15:57:10
| 2021-01-14T15:57:10
| 329,661,893
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 273
|
py
|
class Solution(object):
def lastStoneWeight(self, A):
pq = [-x for x in A]
heapq.heapify(pq)
for i in xrange(len(A) - 1):
x, y = -heapq.heappop(pq), -heapq.heappop(pq)
heapq.heappush(pq, -abs(x - y))
return -pq[0]
|
[
"yihao_guo@gwmail.gwu.edu"
] |
yihao_guo@gwmail.gwu.edu
|
56ff8e02d953cdf69263eb9a3ecb20990afd092d
|
2b3ea7bb0df4be7f55d2ac188e23d801e497df8d
|
/fcsm_eos_api_client/models/vmware_availability_zone.py
|
ff4e4ce3c2bc04d8cce251739b49b4d377799d7d
|
[] |
no_license
|
mikespub/fcsm-eos-api-client
|
12b663b4e79ac5d86c2162dec168bfa240a85f0c
|
107a3a7733c55ae6a750e32497268300c6be590e
|
refs/heads/master
| 2020-08-01T18:13:17.229375
| 2019-10-29T14:30:56
| 2019-10-29T14:30:56
| 211,071,995
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,713
|
py
|
# coding: utf-8
"""
Combined FCSM EOS API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: 1.2.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
class VmwareAvailabilityZone(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'available': 'bool',
'id': 'str',
'name': 'str'
}
attribute_map = {
'available': 'available',
'id': 'id',
'name': 'name'
}
def __init__(self, available=None, id=None, name=None): # noqa: E501
"""VmwareAvailabilityZone - a model defined in OpenAPI""" # noqa: E501
self._available = None
self._id = None
self._name = None
self.discriminator = None
self.available = available
self.id = id
self.name = name
@property
def available(self):
"""Gets the available of this VmwareAvailabilityZone. # noqa: E501
Determines whether the availability zone is available for use # noqa: E501
:return: The available of this VmwareAvailabilityZone. # noqa: E501
:rtype: bool
"""
return self._available
@available.setter
def available(self, available):
"""Sets the available of this VmwareAvailabilityZone.
Determines whether the availability zone is available for use # noqa: E501
:param available: The available of this VmwareAvailabilityZone. # noqa: E501
:type: bool
"""
if available is None:
raise ValueError("Invalid value for `available`, must not be `None`") # noqa: E501
self._available = available
@property
def id(self):
"""Gets the id of this VmwareAvailabilityZone. # noqa: E501
:return: The id of this VmwareAvailabilityZone. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this VmwareAvailabilityZone.
:param id: The id of this VmwareAvailabilityZone. # noqa: E501
:type: str
"""
if id is None:
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def name(self):
"""Gets the name of this VmwareAvailabilityZone. # noqa: E501
:return: The name of this VmwareAvailabilityZone. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this VmwareAvailabilityZone.
:param name: The name of this VmwareAvailabilityZone. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, VmwareAvailabilityZone):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"github@mikespub.net"
] |
github@mikespub.net
|
adbebf98c026159ef9c04143343d4151516385e5
|
60a6ba6e5f3faca2b1e17c1e90917efc3cfc561a
|
/aoc2015/day1/day1_part1.py
|
d9bbe04649059dc1e7251f5840c3bd4ee320b3d9
|
[
"MIT"
] |
permissive
|
GetPastTheMonkey/advent-of-code
|
f462f5e2b72d913e39484446ce92a043d455091c
|
7a5ee30dbafaf8ef6f9bf9936e484efd024aa308
|
refs/heads/master
| 2023-01-14T09:45:00.553575
| 2022-12-25T10:59:19
| 2022-12-25T13:00:44
| 160,684,715
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 307
|
py
|
from os.path import join, dirname, realpath
with open(join(dirname(realpath(__file__)), "input.txt")) as f:
c = 0
for char in f.read():
if char == '(':
c += 1
elif char == ')':
c -= 1
else:
raise ValueError("Invalid character")
print(c)
|
[
"sven.gruebel@gmx.ch"
] |
sven.gruebel@gmx.ch
|
883c46163f5400da29155668fbcf6818585325c3
|
09e5cfe06e437989a2ccf2aeecb9c73eb998a36c
|
/modules/cctbx_project/iotbx/examples/recalculate_phenix_refine_r_factors.py
|
55f7cb79f2b307b7a1d57a9bb37a58306055e6c0
|
[
"BSD-3-Clause-LBNL",
"BSD-3-Clause"
] |
permissive
|
jorgediazjr/dials-dev20191018
|
b81b19653624cee39207b7cefb8dfcb2e99b79eb
|
77d66c719b5746f37af51ad593e2941ed6fbba17
|
refs/heads/master
| 2020-08-21T02:48:54.719532
| 2020-01-25T01:41:37
| 2020-01-25T01:41:37
| 216,089,955
| 0
| 1
|
BSD-3-Clause
| 2020-01-25T01:41:39
| 2019-10-18T19:03:17
|
Python
|
UTF-8
|
Python
| false
| false
| 2,864
|
py
|
"""
Simple script to read in an MTZ file produced by phenix.refine, extract the
F-obs-filtered, F-model, and R-free-flags arrays, and calculate R-factors both
for the entire dataset and for resolution shells. This serves as an example
both for processing MTZ files, and for cctbx.miller functionality.
"""
from __future__ import absolute_import, division, print_function
from iotbx.reflection_file_utils import get_r_free_flags_scores
from iotbx.file_reader import any_file
import sys
def compute_r_factors(fobs, fmodel, flags):
fmodel, fobs = fmodel.common_sets(other=fobs)
fmodel, flags = fmodel.common_sets(other=flags)
fc_work = fmodel.select(~(flags.data()))
fo_work = fobs.select(~(flags.data()))
fc_test = fmodel.select(flags.data())
fo_test = fobs.select(flags.data())
r_work = fo_work.r1_factor(fc_work)
r_free = fo_test.r1_factor(fc_test)
print("r_work = %.4f" % r_work)
print("r_free = %.4f" % r_free)
print("")
flags.setup_binner(n_bins=20)
fo_work.use_binning_of(flags)
fc_work.use_binner_of(fo_work)
fo_test.use_binning_of(fo_work)
fc_test.use_binning_of(fo_work)
for i_bin in fo_work.binner().range_all():
sel_work = fo_work.binner().selection(i_bin)
sel_test = fo_test.binner().selection(i_bin)
fo_work_bin = fo_work.select(sel_work)
fc_work_bin = fc_work.select(sel_work)
fo_test_bin = fo_test.select(sel_test)
fc_test_bin = fc_test.select(sel_test)
if fc_test_bin.size() == 0 : continue
r_work_bin = fo_work_bin.r1_factor(other=fc_work_bin,
assume_index_matching=True)
r_free_bin = fo_test_bin.r1_factor(other=fc_test_bin,
assume_index_matching=True)
cc_work_bin = fo_work_bin.correlation(fc_work_bin).coefficient()
cc_free_bin = fo_test_bin.correlation(fc_test_bin).coefficient()
legend = flags.binner().bin_legend(i_bin, show_counts=False)
print("%s %8d %8d %.4f %.4f %.3f %.3f" % (legend, fo_work_bin.size(),
fo_test_bin.size(), r_work_bin, r_free_bin, cc_work_bin, cc_free_bin))
def run(args):
mtz_in = any_file(args[0])
ma = mtz_in.file_server.miller_arrays
flags = fmodel = fobs = None
# select the output arrays from phenix.refine. This could easily be modified
# to handle MTZ files from other programs.
for array in ma :
labels = array.info().label_string()
if labels.startswith("R-free-flags"):
flags = array
elif labels.startswith("F-model"):
fmodel = abs(array)
elif labels.startswith("F-obs-filtered"):
fobs = array
if (None in [flags, fobs, fmodel]):
raise RuntimeError("Not a valid phenix.refine output file")
scores = get_r_free_flags_scores([flags], None)
test_flag_value = scores.test_flag_values[0]
flags = flags.customized_copy(data=flags.data()==test_flag_value)
compute_r_factors(fobs, fmodel, flags)
if (__name__ == "__main__"):
run(sys.argv[1:])
|
[
"jorge7soccer@gmail.com"
] |
jorge7soccer@gmail.com
|
eb68af81c19aa405345e4b717d86a0032583c9e8
|
eec267b544295bccb2ab88b13b221ff4fd3d2985
|
/test_new_edi.py
|
d82f2d0cbabccbb7fac6c5dbe44dfe81e85531d0
|
[] |
no_license
|
ralfcam/sandbox_scripts
|
dda368dcf8b8d01147660dedc6d0fcae2d15f80c
|
6fa53a63152c4a00396b38fb92ae7dc6f72d6b90
|
refs/heads/master
| 2022-05-29T02:02:24.849913
| 2020-05-01T02:23:57
| 2020-05-01T02:23:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,211
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed May 25 10:48:11 2016
@author: jpeacock
"""
import mtpy.core.z as mtz
import numpy as np
f1 = r"c:\Users\jpeacock\Documents\ShanesBugs\Sev_MT_Final_ga\MT001.edi"
with open(f1, 'r') as fid:
data_lines = fid.readlines()[102:]
data_dict = {}
data_find = False
for line in data_lines:
if line.find('>') >= 0 and line.find('!') == -1:
line_list = line[1:].strip().split()
key = line_list[0].lower()
if key[0] == 'z' or key[0] == 't' or key == 'freq':
data_find = True
data_dict[key] = []
else:
data_find = False
elif data_find == True and line.find('>') == -1 and line.find('!') == -1:
d_lines = line.strip().split()
for ii, dd in enumerate(d_lines):
# check for empty values and set them to 0, check for any
# other characters sometimes there are ****** for a null
# component
try:
d_lines[ii] = float(dd)
if d_lines[ii] == 1.0e32:
d_lines[ii] = 0.0
except ValueError:
d_lines[ii] = 0.0
data_dict[key] += d_lines
## fill useful arrays
freq_arr = np.array(data_dict['freq'], dtype=np.float)
## fill impedance tensor
z_obj = mtz.Z()
z_obj.freq = freq_arr.copy()
z_obj.z = np.zeros((freq_arr.size, 2, 2), dtype=np.complex)
z_obj.z_err = np.zeros((freq_arr.size, 2, 2), dtype=np.float)
try:
z_obj.rotation_angle = data_dict['zrot']
except KeyError:
z_obj.rotation_angle = np.zeros_like(freq_arr)
z_obj.z[:, 0, 0] = np.array(data_dict['zxxr'])+\
np.array(data_dict['zxxi'])*1j
z_obj.z[:, 0, 1] = np.array(data_dict['zxyr'])+\
np.array(data_dict['zxyi'])*1j
z_obj.z[:, 1, 0] = np.array(data_dict['zyxr'])+\
np.array(data_dict['zyxi'])*1j
z_obj.z[:, 1, 1] = np.array(data_dict['zyyr'])+\
np.array(data_dict['zyyi'])*1j
z_obj.z_err[:, 0, 0] = np.array(data_dict['zxx.var'])
z_obj.z_err[:, 0, 1] = np.array(data_dict['zxy.var'])
z_obj.z_err[:, 1, 0] = np.array(data_dict['zyx.var'])
z_obj.z_err[:, 1, 1] = np.array(data_dict['zyy.var'])
|
[
"peacock.jared@gmail.com"
] |
peacock.jared@gmail.com
|
41dcc0f46adda32291cfcb69a957b1b3ffce535f
|
8e311f8f94c9d218bd37f81c0badc906d78d6b33
|
/env/Lib/site-packages/reversion/__init__.py
|
e3b63c498750aeee9563ccdc26d3ceff9e27f228
|
[
"MIT"
] |
permissive
|
htwenhe/DJOA
|
d76307ff8752c1e2a89101de1f74094b94bf9b18
|
3c2d384a983e42dedfd72561353ecf9370a02115
|
refs/heads/master
| 2021-09-03T21:49:28.267986
| 2018-01-12T08:12:55
| 2018-01-12T08:12:55
| 108,937,324
| 0
| 1
|
MIT
| 2018-01-12T08:06:50
| 2017-10-31T02:59:26
|
Python
|
UTF-8
|
Python
| false
| false
| 923
|
py
|
"""
An extension to the Django web framework that provides version control for model instances.
Developed by Dave Hall.
<http://www.etianen.com/>
"""
try:
import django # noqa
except ImportError: # pragma: no cover
# The top-level API requires Django, which might not be present if setup.py
# is importing reversion to get __version__.
pass
else:
from reversion.errors import ( # noqa
RevertError,
RevisionManagementError,
RegistrationError,
)
from reversion.revisions import ( # noqa
is_active,
is_manage_manually,
get_user,
set_user,
get_comment,
set_comment,
get_date_created,
set_date_created,
add_meta,
add_to_revision,
create_revision,
register,
is_registered,
unregister,
get_registered_models,
)
__version__ = VERSION = (2, 0, 10)
|
[
"htwenhe@hotmail.com"
] |
htwenhe@hotmail.com
|
bd94c819b0a4ffdedd7fe7221a210a1d599e191d
|
8f3336bbf7cd12485a4c52daa831b5d39749cf9b
|
/Python/course-schedule-ii.py
|
584064f35bc6f961716bc46e8c017dda5ee86015
|
[] |
no_license
|
black-shadows/LeetCode-Topicwise-Solutions
|
9487de1f9a1da79558287b2bc2c6b28d3d27db07
|
b1692583f7b710943ffb19b392b8bf64845b5d7a
|
refs/heads/master
| 2022-05-30T22:16:38.536678
| 2022-05-18T09:18:32
| 2022-05-18T09:18:32
| 188,701,704
| 240
| 110
| null | 2020-05-08T13:04:36
| 2019-05-26T15:41:03
|
C++
|
UTF-8
|
Python
| false
| false
| 1,129
|
py
|
from collections import defaultdict, deque
class Solution(object):
def findOrder(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: List[int]
"""
res, zero_in_degree_queue = [], deque()
in_degree, out_degree = defaultdict(set), defaultdict(set)
for i, j in prerequisites:
in_degree[i].add(j)
out_degree[j].add(i)
for i in xrange(numCourses):
if i not in in_degree:
zero_in_degree_queue.append(i)
while zero_in_degree_queue:
prerequisite = zero_in_degree_queue.popleft()
res.append(prerequisite)
if prerequisite in out_degree:
for course in out_degree[prerequisite]:
in_degree[course].discard(prerequisite)
if not in_degree[course]:
zero_in_degree_queue.append(course)
del out_degree[prerequisite]
if out_degree:
return []
return res
|
[
"noreply@github.com"
] |
black-shadows.noreply@github.com
|
f9d2a50793292a53ca569289d3130dccc8a80386
|
e10a6d844a286db26ef56469e31dc8488a8c6f0e
|
/gift/experiment/cifar10_classification/multicifar10_wide_resnet_gradualmixup_1000_config.py
|
ebe2db0f88122e4e8cd4430b15c0185d117daf04
|
[
"Apache-2.0",
"CC-BY-4.0"
] |
permissive
|
Jimmy-INL/google-research
|
54ad5551f97977f01297abddbfc8a99a7900b791
|
5573d9c5822f4e866b6692769963ae819cb3f10d
|
refs/heads/master
| 2023-04-07T19:43:54.483068
| 2023-03-24T16:27:28
| 2023-03-24T16:32:17
| 282,682,170
| 1
| 0
|
Apache-2.0
| 2020-07-26T15:50:32
| 2020-07-26T15:50:31
| null |
UTF-8
|
Python
| false
| false
| 4,803
|
py
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Config for Cifar10 classification with end2end training."""
import ml_collections
def get_config():
"""Returns the experiment configuration for WideResnet on Cifar10."""
config = ml_collections.ConfigDict()
config.experiment_name = 'wide_resnet_cifar10_cls_gradual_mixup'
# Train mode
config.train_mode = 'self_adaptive_gradual_mixup'
config.pretrained = ml_collections.ConfigDict()
config.pretrained.only_backbone_pretrained = False
config.pretrained.checkpoint_path = ''
# Task
config.task_name = 'multi_env_identity_dm_cls'
config.dataset_name = 'multi_cifar10'
config.data_augmentations = ['random_flip']
config.train_environments = ['cifar', 'translated']
config.eval_environments = ['cifar', 'translated']
config.labeled_environments = ['cifar']
config.unlabeled_environments = ['translated']
# Model and data dtype
config.model_dtype_str = 'float32'
config.data_dtype_str = 'float32'
config.model_name = 'wide_resnet'
config.blocks_per_group = 4
config.channel_multiplier = 10
config.num_outputs = 10
config.dropout_rate = 0.0
config.output_dim = 10
# Training
config.optimizer = 'adam'
config.opt_hparams = {'weight_decay': 0.001}
config.l2_decay_factor = .0
config.max_grad_norm = 5.0
config.label_smoothing = None
config.num_training_steps = 1000
config.num_training_epochs = None
config.eval_frequency = 100
config.batch_size = 512
config.eval_batch_size = 512
config.rng_seed = 0
# Learning rate
config.steps_per_epoch = 50000 // config.batch_size
config.total_steps = config.num_training_steps
config.base_lr = 0.000002 * (config.batch_size / 256)
config.lr_hparams = {
'learning_rate_schedule': 'compound',
'factors': 'constant * decay_every',
'initial_learning_rate': config.base_lr,
'steps_per_decay': 100,
'decay_factor': 0.99,
}
# Pipeline params
config.confidence_quantile_threshold = 0.3
config.self_supervised_label_transformation = 'sharp'
config.label_temp = 0.5
config.self_training_iterations = 5
config.reinitialize_optimizer_at_each_step = False
config.restart_learning_rate = False
config.pseudo_labels_train_mode = False
config.stop_gradient_for_interpolations = True
config.ground_truth_factor_params = {'mode': 'constant', 'initial_value': 0.0}
config.inter_env_interpolation = False
config.intra_env_interpolation = False
config.unlabeled_interpolation = True
config.mixup_layer_set = [0, 1, 2, 3]
config.interpolation_method = 'plain_convex_combination'
config.intra_interpolation_method = 'plain_convex_combination'
config.interpolation_mode = 'hard'
config.ot_label_cost = 0.1
config.ot_l2_cost = 0.0000000
config.ot_noise_cost = 0.0
config.intra_mixup_factor_params = {'mode': 'constant', 'initial_value': 0.0}
config.beta_schedule_params = {'mode': 'constant', 'initial_value': 1.0}
config.alpha_schedule_param = {'mode': 'constant', 'initial_value': 1.0}
config.inter_mixup_factor_params = {'mode': 'constant', 'initial_value': 0.0}
config.inter_beta_schedule_params = {'mode': 'constant', 'initial_value': 1.0}
config.inter_alpha_schedule_param = {'mode': 'constant', 'initial_value': 1.0}
config.unlabeled_mixup_factor_params = {
'mode': 'constant',
'initial_value': 1.0
}
config.unlabeled_beta_params = {
'mode': 'linear_decay',
'initial_value': 10,
'min_value': 0,
'total_steps': config.total_steps,
'num_steps': config.self_training_iterations
}
config.unlabeled_alpha_params = {
'mode': 'linear_grow',
'initial_value': 1,
'max_value': 10,
'total_steps': config.total_steps,
'num_steps': config.self_training_iterations
}
# IRM related
config.penalty_weight = 0.0
config.penalty_anneal_iters = 0
# Continual learning related:
config.gift_factor = 0.001
# Domain Mapper related:
config.aux_weight = 0
config.aux_l2 = 0
# logging
config.write_summary = True # write TB and XM summary
config.checkpoint = True # do checkpointing
config.keep_ckpts = 3
config.keep_env_ckpts = False
config.write_xm_measurements = True
config.trial = 0
return config
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
bc3feca74db58c5140fd26ee1ae452b49921fbcf
|
178109bccf5014a97d74054d8f40511a241625fe
|
/signature/signature.py
|
258257f75fa208b6755eb6441b82953b60aaf0a3
|
[] |
no_license
|
rajeshwarg/signature-disambiguation
|
1b6d46a144feb71ca7efe7e7f4105cc3d93b548a
|
2b10c4f1ec4ca7785d812ea8de679ab681d0d98f
|
refs/heads/master
| 2021-01-19T11:54:39.793623
| 2014-04-28T21:28:43
| 2014-04-28T21:28:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,050
|
py
|
import matplotlib.pyplot as plt
import numpy as np
from skimage.data import imread
from skimage import io
from skimage.color import rgb2gray
from skimage.measure import find_contours
from skimage.transform import probabilistic_hough_line
from itertools import cycle
from pylab import *
from PIL import Image
import os
import shutil
import sys
import pandas as pd
from skimage.morphology import square, erosion
from skimage.filter import hsobel
from scipy.spatial.distance import euclidean
from scipy.signal import convolve2d, convolve
## CONSTANTS ##
CONTOUR_MINLENGTH = 200
def compute_contours(img, length=300, value=0.1):
"""
Given an Image object, finds the contours. Filters
the contours by how long they are (this is the optional length
argument)
Returns:
ret_contours (list of contours),
ret_lengths (list of lengths of each contour in ret_contours)
"""
length = CONTOUR_MINLENGTH
contours = find_contours(img, value)
contour_lengths = [len(x[:, 1]) for x in contours]
ret_contours = []
ret_lengths = []
for contour in contours:
if (contour.shape[0] >= length):
ret_contours.append(contour)
ret_lengths.append(contour.shape[0])
return ret_contours, ret_lengths
def get_boundingboxes(contours, plot=False):
"""
Given a list of contours, computes the bounding box
for each and returns the list
"""
boxes = []
for contour in contours:
# compute bounding box coordinates
minx = miny = float('inf')
maxx = maxy = float('-inf')
minx = min(minx, min(contour, key=lambda x: x[1])[1])
miny = min(miny, min(contour, key=lambda x: x[0])[0])
maxx = max(maxx, max(contour, key=lambda x: x[1])[1])
maxy = max(maxy, max(contour, key=lambda x: x[0])[0])
if plot:
x = (minx, maxx, maxx, minx, minx)
y = (miny, miny, maxy, maxy, miny)
plt.plot(x,y,'-b',linewidth=2)
boxes.append( map(int,(minx,miny,maxx,maxy)) )
return boxes
def boundingbox(contour):
"""
Given a list of contours, computes the bounding box
for each and returns the list
"""
# compute bounding box coordinates
minx = miny = float('inf')
maxx = maxy = float('-inf')
minx = int(min(minx, min(contour, key=lambda x: x[1])[1]))
miny = int(min(miny, min(contour, key=lambda x: x[0])[0]))
maxx = int(max(maxx, max(contour, key=lambda x: x[1])[1]))
maxy = int(max(maxy, max(contour, key=lambda x: x[0])[0]))
return (minx,miny,maxx,maxy)
def boundingboxcorners(box):
minx,miny,maxx,maxy = box
corners = []
for x in (minx,maxx):
for y in (miny,maxy):
corners.append((x,y))
return corners
def mergeboxes(box1,box2):
minx1,miny1,maxx1,maxy1 = box1
minx2,miny2,maxx2,maxy2 = box2
minx = min(minx1,minx2)
maxx = max(maxx1,maxx2)
miny = min(miny1,miny2)
maxy = max(maxy1,maxy2)
return (minx,miny,maxx,maxy)
def is_box_in_box(corners1, corners2):
"""
returns True if corners1 is in-part contained
inside corners2
"""
min_x = min(map(lambda x: x[0], corners2))
max_x = max(map(lambda x: x[0], corners2))
min_y = min(map(lambda x: x[1], corners2))
max_y = max(map(lambda x: x[1], corners2))
width = max_x - min_x
height = max_y - min_y
for p in corners1:
if p[0] >= min_x and p[1] >= min_y and \
p[0] < min_x+width and p[1] < min_y+height:
return True
return False
def do_merge(corners1, corners2):
for corner1 in corners1:
for corner2 in corners2:
if euclidean(corner1,corner2) < 100:
return True
if is_box_in_box(corners1, corners2) or is_box_in_box(corners2, corners1):
return True
return False
def link_contours(contours):
# check overlaps
# remove flat lines
merged = True
boxes = map(boundingbox, contours)
iterations_left = len(boxes)
old_boxes = None
for i in range(10*len(boxes)):
if iterations_left == 0:
print 'none',i,boxes
break
box1 = boxes.pop(0)
iterations_left -= 1
corners1 = boundingboxcorners(box1)
for index,box2 in enumerate(boxes):
corners2 = boundingboxcorners(box2)
if do_merge(corners1, corners2):
boxes.pop(index)
boxes.append(mergeboxes(box1,box2))
iterations_left += 1
merged=True
break
else:
if box1 not in boxes:
boxes.append(box1)
merged = False
return boxes
def process(filename):
imagepath = os.path.join(os.getcwd(), filename)
orig_img = io.imread(filename,True,'pil')
img = orig_img > 0.9 # binary threshold
lines = probabilistic_hough_line(hsobel(img),line_length=200)
for l in lines:
x0, x1 = l[0][0],l[1][0]
y = l[0][1]
for x in range(x0,x1):
img[y+1,x] = 1
img[y,x] = 1
img[y-1,x] = 1
erode_img = erosion(img, square(2))
contours, lengths = compute_contours(erode_img,0.8)
lengths = pd.Series(lengths)
lengths = lengths[lengths > 400]
for i in lengths.index:
contour = contours[i]
box = get_boundingboxes([contour])[0]
x_sum = sum(map(abs, np.gradient(contour[:,1])))
y_sum = sum(map(abs, np.gradient(contour[:,0])))
area = (box[2] - box[0]) * (box[3] - box[1])
plt.plot(contour[:,1],contour[:,0])
contours = [contours[i] for i in lengths.index]
newboxes = set(link_contours(contours))
retboxes = []
for box in newboxes:
minx,miny,maxx,maxy = box
x = (minx, maxx, maxx, minx, minx)
y = (miny, miny, maxy, maxy, miny)
area = (maxx-minx) * (maxy-miny)
if area > 10000:
retboxes.append(box)
plt.plot(x, y, '-b', linewidth=2)
imshow(erode_img)
return retboxes, contours
def output(contours,shape=(126,126),outputfile='signatures.csv'):
"""
Take the set of all contours that we have identified as possible signatures
and resize them all into a canonical shape (the best shape and the best
method for doing so have yet to be determined) so we can train a classifier
on the pixels. We want to do unsupervised clustering to separate the
signatures from non-signatures
"""
from scipy import resize
with open(outputfile,'a') as f:
for c in contours:
newc = map(int, resize(c, shape).flatten())
f.write('\t'.join(map(str, newc))+'\n')
if __name__=='__main__':
plt.gray()
f = plt.figure(figsize=(16,12))
filename = sys.argv[1]
basename = ''.join(filename.split('/')[-1].split('.')[:-1])
boxes, contours = process(filename)
output(contours)
plt.savefig(basename+'-signature.png')
if len(sys.argv) > 2:
shutil.move(basename+'-signature.png',sys.argv[2])
|
[
"gtfierro225@gmail.com"
] |
gtfierro225@gmail.com
|
0c3b7cd7d2c76d0d5aaee1d1c5f1f0d14ecccbb4
|
f47bfd6d1f6e2040c070086a6c0b7f279dfebb6a
|
/brick/check_point_subclass_dict.py
|
d198eff7693fb0a1dd818062f964ad95e1f006b8
|
[
"BSD-3-Clause"
] |
permissive
|
metehangelgi/scrabble
|
b34844cbd17a5588f69af22eb04dfe8d89d14bf3
|
6d64be2e9c7d0392332592c804eb15c20a3e2516
|
refs/heads/master
| 2022-03-26T12:17:06.133378
| 2018-11-25T03:42:56
| 2018-11-25T03:42:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
import json
with open('point_subclass_dict.json', 'r') as fp:
d = json.load(fp)
found_tagsets = set()
redundant_tagsets = set()
for superclass, tagsets in d.items():
redundant_tagsets.union(set([tagset for tagset in tagsets \
if tagset in found_tagsets]))
found_tagsets.union(set(tagsets))
print(redundant_tagsets)
|
[
"bk7749@gmail.com"
] |
bk7749@gmail.com
|
2eff6562ec8a043de5548599076490f426fda71e
|
eedea7d2f6ad0f497f1469ab78ea00c3c33bd57a
|
/hamon_shu/materials/score_structure/segment_07/pitch_material_pattern.py
|
c24b023291d9b5b7db9bb259abe6928e367405ee
|
[] |
no_license
|
GregoryREvans/hamon_shu
|
750927aec941f60bf0b90ee2196a886c19c611ad
|
8081ee57fce8db07c3492e67e7a634e3b08f3bb3
|
refs/heads/master
| 2022-02-27T06:22:44.449635
| 2022-02-10T13:48:23
| 2022-02-10T13:48:23
| 144,753,533
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,713
|
py
|
from hamon_shu.materials.pitch import pitches_VII
key_list_1 = [
"violin_1_pitch_handler_three",
"violin_1_pitch_handler_one",
"violin_1_pitch_handler_two",
"violin_1_pitch_handler_four",
"violin_1_pitch_handler_three",
"violin_1_pitch_handler_one",
"violin_1_pitch_handler_four",
"violin_1_pitch_handler_two",
]
key_list_2 = [
"violin_2_pitch_handler_three",
"violin_2_pitch_handler_one",
"violin_2_pitch_handler_two",
"violin_2_pitch_handler_four",
"violin_2_pitch_handler_three",
"violin_2_pitch_handler_one",
"violin_2_pitch_handler_four",
"violin_2_pitch_handler_two",
]
key_list_3 = [
"viola_pitch_handler_three",
"viola_pitch_handler_one",
"viola_pitch_handler_two",
"viola_pitch_handler_four",
"viola_pitch_handler_three",
"viola_pitch_handler_one",
"viola_pitch_handler_four",
"viola_pitch_handler_two",
]
key_list_4 = [
"cello_pitch_handler_three",
"cello_pitch_handler_one",
"cello_pitch_handler_two",
"cello_pitch_handler_four",
"cello_pitch_handler_three",
"cello_pitch_handler_one",
"cello_pitch_handler_four",
"cello_pitch_handler_two",
]
dict = {
"violin_1_pitch_handler_one": pitches_VII.violin_1_pitch_handler_one,
"violin_1_pitch_handler_two": pitches_VII.violin_1_pitch_handler_two,
"violin_1_pitch_handler_three": pitches_VII.violin_1_pitch_handler_three,
"violin_1_pitch_handler_four": pitches_VII.violin_1_pitch_handler_four,
"violin_2_pitch_handler_one": pitches_VII.violin_2_pitch_handler_one,
"violin_2_pitch_handler_two": pitches_VII.violin_2_pitch_handler_two,
"violin_2_pitch_handler_three": pitches_VII.violin_2_pitch_handler_three,
"violin_2_pitch_handler_four": pitches_VII.violin_2_pitch_handler_four,
"viola_pitch_handler_one": pitches_VII.viola_pitch_handler_one,
"viola_pitch_handler_two": pitches_VII.viola_pitch_handler_two,
"viola_pitch_handler_three": pitches_VII.viola_pitch_handler_three,
"viola_pitch_handler_four": pitches_VII.viola_pitch_handler_four,
"cello_pitch_handler_one": pitches_VII.cello_pitch_handler_one,
"cello_pitch_handler_two": pitches_VII.cello_pitch_handler_two,
"cello_pitch_handler_three": pitches_VII.cello_pitch_handler_three,
"cello_pitch_handler_four": pitches_VII.cello_pitch_handler_four,
}
material_list_1 = [dict[x] for x in key_list_1]
material_list_2 = [dict[x] for x in key_list_2]
material_list_3 = [dict[x] for x in key_list_3]
material_list_4 = [dict[x] for x in key_list_4]
materials = [material_list_1, material_list_2, material_list_3, material_list_4]
pitch_material_list = []
for x in materials:
pitch_material_list.extend(x)
|
[
"gregoryrowlandevans@gmail.com"
] |
gregoryrowlandevans@gmail.com
|
55844d8aed6b61cd815f0b6c616ce85c2dce5750
|
5900bc2615f456512b73455203fa90c4a016230f
|
/mimic.py
|
77ba93da838bbfae6e99cae87a3ee91c38346f59
|
[] |
no_license
|
RamiroAlvaro/google-python-exercises
|
615a2a4aa6c02d7a7b74eed42119dc8402eccd4c
|
50b711ca3fbcd008f28e60c53b7d5136573a44ad
|
refs/heads/master
| 2020-12-25T15:08:36.125816
| 2017-01-27T20:24:47
| 2017-01-27T20:24:47
| 66,118,312
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,568
|
py
|
#!/usr/bin/python -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
"""Mimic pyquick exercise -- optional extra exercise.
Google's Python Class
Read in the file specified on the command line.
Do a simple split() on whitespace to obtain all the words in the file.
Rather than read the file line by line, it's easier to read
it into one giant string and split it once.
Build a "mimic" dict that maps each word that appears in the file
to a list of all the words that immediately follow that word in the file.
The list of words can be be in any order and should include
duplicates. So for example the key "and" might have the list
["then", "best", "then", "after", ...] listing
all the words which came after "and" in the text.
We'll say that the empty string is what comes before
the first word in the file.
With the mimic dict, it's fairly easy to emit random
text that mimics the original. Print a word, then look
up what words might come next and pick one at random as
the next work.
Use the empty string as the first word to prime things.
If we ever get stuck with a word that is not in the dict,
go back to the empty string to keep things moving.
Note: the standard python module 'random' includes a
random.choice(list) method which picks a random element
from a non-empty list.
For fun, feed your program to itself as input.
Could work on getting it to put in linebreaks around 70
columns, so the output looks better.
"""
import random
import sys
import textwrap
def mimic_dict(filename):
"""Returns mimic dict mapping each word to list of words which follow it."""
with open(filename, 'rt') as f:
text = f.read().split()
d = {'': text}
for i, item in enumerate(text):
if i < len(text) - 1 and item not in d.keys():
d[item] = text[i + 1:]
return d
def print_mimic(mimic_dict, word):
"""Given mimic dict and start word, prints 200 random words."""
words = []
for item in range(200):
if word not in mimic_dict:
word = ''
word = random.choice(mimic_dict[word])
words.append(word)
s = ' '.join(words)
print(textwrap.fill(s, 70))
# Provided main(), calls mimic_dict() and mimic()
def main():
if len(sys.argv) != 2:
print('usage: ./mimic.py file-to-read')
sys.exit(1)
dict = mimic_dict(sys.argv[1])
print_mimic(dict, '')
if __name__ == '__main__':
main()
|
[
"ramiroalvaro@hotmail.com"
] |
ramiroalvaro@hotmail.com
|
ce53004e1ff1be30445cc2cf0fb824a75c34b070
|
f2e8afed063bef9292a2ac8d3a84943ebcef9b11
|
/09_INSTAGRAM/feeds/views.py
|
b5126502c96408a53875d8d8ebf6f954d57c4287
|
[] |
no_license
|
seunggue/django
|
e8a4f376bda247b8780d4f838365b7621cde6101
|
377b73652b723be1d7bd83abd9bc9e14203799be
|
refs/heads/master
| 2022-12-15T13:59:31.218873
| 2019-10-28T08:42:33
| 2019-10-28T08:42:33
| 200,620,105
| 0
| 0
| null | 2022-12-08T06:14:59
| 2019-08-05T08:59:41
|
Python
|
UTF-8
|
Python
| false
| false
| 573
|
py
|
from django.shortcuts import render, redirect
from .models import Feed
# from IPython import embed
# Create your views here.
def index(request):
feeds = Feed.objects.all()
context = {
'feeds':feeds
}
return render(request, 'index.html', context)
def create(request):
if request.method == 'POST':
content = request.POST.get('content')
image = request.FILES.get('image')
feed = Feed.objects.create(content=content, image=image)
return redirect('feeds:index')
else:
return render(request, 'form.html')
|
[
"seungue1687@gmail.com"
] |
seungue1687@gmail.com
|
20d57108762761a55044954f0a80ae5d2bc49f5d
|
50f0d33b12778f911fe16a4e18d0659936b9086b
|
/0x04-python-more_data_structures/9-multiply_by_2.py
|
ba5f3c37cd1dca83e69eaeffc1980d63f74de8c8
|
[] |
no_license
|
monicajoa/holbertonschool-higher_level_programming
|
4f4eaa7aa2cad1642e7aed54663cb30eb92e1b4f
|
451d20174144ad96fa726a4389c7aae72abf2495
|
refs/heads/master
| 2022-12-18T00:35:00.682624
| 2020-09-25T05:14:57
| 2020-09-25T05:14:57
| 259,479,680
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 148
|
py
|
#!/usr/bin/python3
def multiply_by_2(a_dictionary):
new_dictionary = {n: a_dictionary[n] * 2 for n in a_dictionary}
return (new_dictionary)
|
[
"mnortiz.ortiz@gmail.com"
] |
mnortiz.ortiz@gmail.com
|
76e6b924ecfda63a2b4613e3f47eb9b30e9f9c31
|
595b7157cdf72060c88b8f5b0807b984fa3e63a4
|
/python/scrap_wiki.py
|
3e8c74c56d0aaaaf94851c796dbe90ce65c9027f
|
[] |
no_license
|
HomingYuan/data_science_way
|
248d15710004eedc1f0fe70ab67318cbdc6e42aa
|
dd3153f44d3b4cc90b589ae0dc1d4d4f0f671da4
|
refs/heads/master
| 2021-01-20T14:34:54.943011
| 2018-03-06T02:00:29
| 2018-03-06T02:00:29
| 90,630,422
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 822
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: Homing
@software: PyCharm Community Edition
@file: scrap_wiki.py
@time: 2017/6/7 20:46
"""
import csv
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("http://en.wikipedia.org/wiki/Comparison_of_text_editors")
bsObj = BeautifulSoup(html)
#The main comparison table is currently the first table on the page
table = bsObj.findAll("table",{"class":"wikitable"})[0]
rows = table.findAll("tr")
csvFile = open(r"D:\Big_data\scrap_download\editors.csv", 'wt', encoding='utf-8') # need add encoding
writer = csv.writer(csvFile)
try:
for row in rows:
csvRow = []
for cell in row.findAll(['td', 'th']):
csvRow.append(cell.get_text())
writer.writerow(csvRow)
finally:
csvFile.close()
|
[
"470034235@qq.com"
] |
470034235@qq.com
|
b494c0f5bfd4f8fe4d82481a52b55d9096e1edc9
|
93f47ba04fc18c4e537f0a48fe6232e2a89a4d30
|
/examples/adspygoogle/dfp/v201408/creative_service/create_creative_from_template.py
|
c1a1728c927822b20cd10193664e4ff7cbe1cbf5
|
[
"Apache-2.0"
] |
permissive
|
jasonshih/googleads-python-legacy-lib
|
c56dc52a1dab28b9de461fd5db0fcd6020b84a04
|
510fad41ecf986fe15258af64b90f99a96dc5548
|
refs/heads/master
| 2021-04-30T22:12:12.900275
| 2015-03-06T15:35:21
| 2015-03-06T15:35:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,585
|
py
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example creates a new template creative for a given advertiser.
To determine which companies are advertisers, run get_advertisers.py.
To determine which creative templates exist, run
get_all_creative_templates.py.
Tags: CreativeService.createCreative
"""
__author__ = 'Nicholas Chen'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import base64
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
from adspygoogle.common import Utils
# Set id of the advertiser (company) that the creative will be assigned to.
ADVERTISER_ID = 'INSERT_ADVERTISER_COMPANY_ID_HERE'
def main(client, advertiser_id):
# Initialize appropriate service.
creative_service = client.GetService('CreativeService', version='v201408')
# Use the image banner with optional third party tracking template.
creative_template_id = '10000680'
image_data = open(os.path.join(__file__[:__file__.rfind('/')], '..', 'data',
'medium_rectangle.jpg'), 'r').read()
image_data = base64.encodestring(image_data)
# Create creative from templates.
creative = {
'type': 'TemplateCreative',
'name': 'Template Creative #%s' % Utils.GetUniqueName(),
'advertiserId': advertiser_id,
'size': {'width': '300', 'height': '250'},
'creativeTemplateId': creative_template_id,
'creativeTemplateVariableValues': [
{
'type': 'AssetCreativeTemplateVariableValue',
'uniqueName': 'Imagefile',
'assetByteArray': image_data,
'fileName': 'image%s.jpg' % Utils.GetUniqueName()
},
{
'type': 'LongCreativeTemplateVariableValue',
'uniqueName': 'Imagewidth',
'value': '300'
},
{
'type': 'LongCreativeTemplateVariableValue',
'uniqueName': 'Imageheight',
'value': '250'
},
{
'type': 'UrlCreativeTemplateVariableValue',
'uniqueName': 'ClickthroughURL',
'value': 'www.google.com'
},
{
'type': 'StringCreativeTemplateVariableValue',
'uniqueName': 'Targetwindow',
'value': '_blank'
}
]
}
# Call service to create the creative.
creative = creative_service.CreateCreative(creative)[0]
# Display results.
print ('Template creative with id \'%s\', name \'%s\', and type \'%s\' was '
'created and can be previewed at %s.'
% (creative['id'], creative['name'], creative['Creative_Type'],
creative['previewUrl']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, ADVERTISER_ID)
|
[
"nicholaschen@google.com"
] |
nicholaschen@google.com
|
b147294d2e555d356a3bddf7311e9d144a4dc147
|
4f825250d1f3b00d4dff1601001bc72f9666f6b6
|
/app/request.py
|
d75359b292c50cad0f63226d1a6931451ae18be7
|
[] |
no_license
|
MigotSharon/Watch-list
|
ee7c33fb8cc50fd6eedeaa76ee853fcd3389aadd
|
139687ccbea73b7029a956a18fa7fe9b0d0458ba
|
refs/heads/main
| 2023-01-02T00:36:08.861473
| 2020-10-24T16:54:31
| 2020-10-24T16:54:31
| 303,660,887
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,972
|
py
|
import urllib.request,json
from .models import Movie
# Getting api key
api_key = None
# Getting the movie base url
base_url = None
def configure_request(app):
global api_key,base_url
api_key = app.config['MOVIE_API_KEY']
base_url = app.config['MOVIE_API_BASE_URL']
def get_movies(category):
'''
Function that gets the json responce to our url request
'''
get_movies_url = base_url.format(category,api_key)
with urllib.request.urlopen(get_movies_url) as url:
get_movies_data = url.read()
get_movies_response = json.loads(get_movies_data)
movie_results = None
if get_movies_response['results']:
movie_results_list = get_movies_response['results']
movie_results = process_results(movie_results_list)
return movie_results
def get_movie(id):
get_movie_details_url = base_url.format(id,api_key)
with urllib.request.urlopen(get_movie_details_url) as url:
movie_details_data = url.read()
movie_details_response = json.loads(movie_details_data)
movie_object = None
if movie_details_response:
id = movie_details_response.get('id')
title = movie_details_response.get('original_title')
overview = movie_details_response.get('overview')
poster = movie_details_response.get('poster_path')
vote_average = movie_details_response.get('vote_average')
vote_count = movie_details_response.get('vote_count')
movie_object = Movie(id,title,overview,poster,vote_average,vote_count)
return movie_object
def search_movie(movie_name):
search_movie_url = 'https://api.themoviedb.org/3/search/movie?api_key={}&query={}'.format(api_key,movie_name)
with urllib.request.urlopen(search_movie_url) as url:
search_movie_data = url.read()
search_movie_response = json.loads(search_movie_data)
search_movie_results = None
if search_movie_response['results']:
search_movie_list = search_movie_response['results']
search_movie_results = process_results(search_movie_list)
return search_movie_results
def process_results(movie_list):
'''
Function that processes the movie result and transform them to a list of Objects
Args:
movie_list: A list of dictionaries that contain movie details
Returns :
movie_results: A list of movie objects
'''
movie_results = []
for movie_item in movie_list:
id = movie_item.get('id')
title = movie_item.get('original_title')
overview = movie_item.get('overview')
poster = movie_item.get('poster_path')
vote_average = movie_item.get('vote_average')
vote_count = movie_item.get('vote_count')
if poster:
movie_object = Movie(id,title,overview,poster,vote_average,vote_count)
movie_results.append(movie_object)
return movie_results
|
[
"sam@example.com"
] |
sam@example.com
|
254aeee6b83580bec0d8ee4fc5f8fd2ea24fd3b8
|
f6cc50c14759c0e865528125b896a47b464c834d
|
/tests/test_radials.py
|
9b44072b9ef542765d017a72ddccc1a0bb6c44b7
|
[
"MIT"
] |
permissive
|
kwilcox/codar_processing
|
243ddea6eea909c3c44d3f02478e5f024f04e2d8
|
3a327f5378a6a9d78d263c8e7b317088823245c1
|
refs/heads/master
| 2020-08-27T11:55:12.440346
| 2020-01-09T20:05:13
| 2020-01-09T20:05:13
| 217,358,645
| 0
| 0
|
MIT
| 2019-10-24T17:44:10
| 2019-10-24T17:44:10
| null |
UTF-8
|
Python
| false
| false
| 6,077
|
py
|
from pathlib import Path
import numpy as np
import xarray as xr
from codar_processing.src.radials import Radial, concatenate_radials
data_path = (Path(__file__).parent.with_name('codar_processing') / 'data').resolve()
output_path = (Path(__file__).parent.with_name('output')).resolve()
def test_codar_radial_to_netcdf():
radial_file = data_path / 'radials' / 'SEAB' / 'RDLi_SEAB_2019_01_01_0000.ruv'
nc_file = output_path / 'radials_nc' / 'SEAB' / 'RDLi_SEAB_2019_01_01_0000.nc'
# Converts the underlying .data (natively a pandas DataFrame)
# to an xarray object when `create_netcdf` is called.
# This automatically 'enhances' the netCDF file
# with better variable names and attributes.
rad1 = Radial(radial_file)
rad1.export(str(nc_file), file_type='netcdf')
# Convert it to an xarray Dataset with no variable
# or attribte enhancements
xds2 = rad1.to_xarray(enhance=False)
# Convert it to xarray Dataset with increased usability
# by changing variables names, adding attributes,
# and decoding the CF standards like scale_factor
xds3 = rad1.to_xarray(enhance=True)
with xr.open_dataset(nc_file) as xds1:
# The two enhanced files should be identical
assert xds1.identical(xds3)
# Enhanced and non-enhanced files should not
# be equal
assert not xds1.identical(xds2)
def test_wera_radial_to_netcdf():
radial_file = data_path / 'radials' / 'WERA' / 'RDL_csw_2019_10_24_162300.ruv'
nc_file = output_path / 'radials_nc' / 'WERA' / 'RDL_csw_2019_10_24_162300.nc'
# Converts the underlying .data (natively a pandas DataFrame)
# to an xarray object when `create_netcdf` is called.
# This automatically 'enhances' the netCDF file
# with better variable names and attributes.
rad1 = Radial(radial_file)
rad1.export(str(nc_file), file_type='netcdf')
# Convert it to an xarray Dataset with no variable
# or attribte enhancements
xds2 = rad1.to_xarray(enhance=False)
# Convert it to xarray Dataset with increased usability
# by changing variables names, adding attributes,
# and decoding the CF standards like scale_factor
xds3 = rad1.to_xarray(enhance=True)
with xr.open_dataset(nc_file) as xds1:
# The two enhanced files should be identical
assert xds1.identical(xds3)
# Enhanced and non-enhanced files should not
# be equal
assert not xds1.identical(xds2)
def test_wera_mask():
radial_file = data_path / 'radials' / 'WERA' / 'RDL_csw_2019_10_24_162300.ruv'
rad1 = Radial(radial_file, mask_over_land=False, replace_invalid=False)
# Total points before masking
assert len(rad1.data) == 6327
rad1.mask_over_land()
# Make sure we subset the land points
assert len(rad1.data) == 5745
def test_wera_qc():
radial_file = data_path / 'radials' / 'WERA' / 'RDL_csw_2019_10_24_162300.ruv'
rad1 = Radial(radial_file, mask_over_land=False, replace_invalid=False)
assert len(rad1.data) == 6327
rad1.mask_over_land()
rad1.qc_qartod_radial_count()
rad1.qc_qartod_valid_location()
rad1.qc_qartod_maximum_velocity()
rad1.qc_qartod_spatial_median()
assert len(rad1.data) == 5745
assert 'QC07' in rad1.data
assert 'QC08' not in rad1.data # no VFLG column so we can't run it
assert 'QC09' in rad1.data
assert 'QC10' in rad1.data
def test_wera_raw_to_quality_nc():
radial_file = data_path / 'radials' / 'WERA' / 'RDL_csw_2019_10_24_162300.ruv'
nc_file = output_path / 'radials_qc_nc' / 'WERA' / 'RDL_csw_2019_10_24_162300.nc'
rad1 = Radial(radial_file, mask_over_land=False, replace_invalid=False)
rad1.mask_over_land()
rad1.qc_qartod_radial_count()
rad1.qc_qartod_valid_location()
rad1.qc_qartod_maximum_velocity()
rad1.qc_qartod_spatial_median()
rad1.export(str(nc_file), file_type='netcdf')
xds2 = rad1.to_xarray(enhance=True)
with xr.open_dataset(nc_file) as xds1:
assert len(xds1.QCTest) == 3 # no VFLG column so one test not run
# The two enhanced files should be identical
assert xds1.identical(xds2)
class TestCombineRadials:
file_paths = list(
(data_path / 'radials' / 'SEAB').glob('*.ruv')
)
radial_files = [
str(r) for r in file_paths
]
radial_objects = [
Radial(str(r)) for r in radial_files
]
# Select even indexed file_paths and odd indexed radial objects
# into one array of mixed content types for concating
radial_mixed = radial_files[::2] + radial_objects[1:][::2]
def test_concat_radial_objects(self):
combined = concatenate_radials(self.radial_objects)
assert combined.time.size == len(self.file_paths)
# Make sure the dataset was sorted by time
assert np.array_equal(
combined.time.values,
np.sort(combined.time.values)
)
def test_concat_radial_files(self):
combined = concatenate_radials(self.radial_files)
assert combined.time.size == len(self.file_paths)
# Make sure the dataset was sorted by time
assert np.array_equal(
combined.time.values,
np.sort(combined.time.values)
)
def test_concat_mixed_radials(self):
combined = concatenate_radials(self.radial_mixed)
assert combined.time.size == len(self.file_paths)
# Make sure the dataset was sorted by time
assert np.array_equal(
combined.time.values,
np.sort(combined.time.values)
)
def test_concat_mixed_radials_enhance(self):
# Select even indexed file_paths and odd indexed radial objects
# into one array of mixed content types for concating
combined = concatenate_radials(self.radial_mixed, enhance=True)
assert combined.time.size == len(self.file_paths)
# Make sure the dataset was sorted by time
assert np.array_equal(
combined.time.values,
np.sort(combined.time.values)
)
|
[
"kyle@axiomdatascience.com"
] |
kyle@axiomdatascience.com
|
1660e21b6c4bd243001029dc05cf5eea57eddffc
|
f24edb38dd4f7de8a7683afbbc9ab2a4237a361e
|
/venv/lib/python3.6/site-packages/pip/_internal/commands/uninstall.py
|
5e5b3553b2a53d1bbe5ecf496384cd9ec36ecdc9
|
[] |
no_license
|
ngecu/automate_django_data_filling
|
882220f84a6b4af5484d4b136c740a803ccccfd2
|
d6b7095904878f06e4aae6beb2156113a6145c21
|
refs/heads/main
| 2023-02-26T02:36:26.582387
| 2021-01-31T15:50:22
| 2021-01-31T15:50:22
| 317,846,258
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,271
|
py
|
from __future__ import absolute_import
from pip._vendor.packaging.utils import canonicalize_name
from pip._internal.cli.base_command import Command
from pip._internal.cli.req_command import SessionCommandMixin
from pip._internal.cli.status_codes import SUCCESS
from pip._internal.exceptions import InstallationError
from pip._internal.req import parse_requirements
from pip._internal.req.constructors import (
install_req_from_line,
install_req_from_parsed_requirement,
)
from pip._internal.utils.misc import protect_pip_from_modification_on_windows
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from optparse import Values
from typing import List
class UninstallCommand(Command, SessionCommandMixin):
"""
Uninstall packages.
pip is able to uninstall most installed packages. Known exceptions are:
- Pure distutils packages installed with ``python setup.py install``, which
leave behind no metadata to determine what files were installed.
- Script wrappers installed by ``python setup.py develop``.
"""
usage = """
%prog [options] <package> ...
%prog [options] -r <requirements file> ..."""
def add_options(self):
# type: () -> None
cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Uninstall all the packages listed in the given requirements '
'file. This option can be used multiple times.',
)
cmd_opts.add_option(
'-y', '--yes',
dest='yes',
action='store_true',
help="Don't ask for confirmation of uninstall deletions.")
parser.insert_option_group(0, cmd_opts)
def run(self, options, args):
# type: (Values, List[str]) -> int
session = get_default_session(options)
reqs_to_uninstall = {}
for name in args:
req = install_req_from_line(
name, isolated=options.isolated_mode,
)
if req.name:
reqs_to_uninstall[canonicalize_name(req.name)] = req
for filename in options.requirements:
for parsed_req in parse_requirements(
filename,
options=options,
session=session):
req = install_req_from_parsed_requirement(
parsed_req,
isolated=options.isolated_mode
)
if req.name:
reqs_to_uninstall[canonicalize_name(req.name)] = req
if not reqs_to_uninstall:
raise InstallationError(
'You must give at least one requirement to {name} (see '
'"pip help {name}")'.format(**locals())
)
protect_pip_from_modification_on_windows(
modifying_pip="pip" in reqs_to_uninstall
)
for req in reqs_to_uninstall.values():
uninstall_pathset = req.uninstall(
auto_confirm=options.yes, verbose=verbosity > 0,
)
if uninstall_pathset:
uninstall_pathset.commit()
return SUCCESS
|
[
"devngecu@gmail.com"
] |
devngecu@gmail.com
|
a9dc210095439b4c997744c603e4dc3dd0542810
|
048f2002ed13503d50428c8949c95a2e4f9bd532
|
/contest/weekcontest179/timeInformAllEmployees.py
|
a91f058cbbb49cce0012cd60c5c105810753fc70
|
[] |
no_license
|
ZhengLiangliang1996/Leetcode_ML_Daily
|
9c9330bd2c7bab5964fbd3827a27eeff5bd2c502
|
8cdb97bc7588b96b91b1c550afd84e976c1926e0
|
refs/heads/master
| 2023-04-06T19:52:23.524186
| 2023-03-30T21:08:57
| 2023-03-30T21:08:57
| 213,055,072
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 932
|
py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2022 liangliang <liangliang@Liangliangs-MacBook-Air.local>
#
# Distributed under terms of the MIT license.
class Solution(object):
def numOfMinutes(self, n, headID, manager, informTime):
"""
:type n: int
:type headID: int
:type manager: List[int]
:type informTime: List[int]
:rtype: int
"""
if n == 0: return 0
d = collections.defaultdict(list)
for i in range(len(manager)):
d[manager[i]].append(i)
res = informTime[headID]
# BFS traversal all
q, res = [(headID, 0)], 0
while q:
newQ = []
for (idx, time) in q:
res = max(res, time)
for k in d[idx]:
newQ += [(k, time + informTime[idx])]
q = newQ
return res
|
[
"zhengliangliang1996@gmail.com"
] |
zhengliangliang1996@gmail.com
|
0cae35ebd5a07c6f2ddafda22aafcab6985493dd
|
786027545626c24486753351d6e19093b261cd7d
|
/ghidra9.2.1_pyi/ghidra/app/util/bin/format/coff/CoffSymbolAuxFunction.pyi
|
b98a12b2316fa9a794264f75a09d1c63d0c82264
|
[
"MIT"
] |
permissive
|
kohnakagawa/ghidra_scripts
|
51cede1874ef2b1fed901b802316449b4bf25661
|
5afed1234a7266c0624ec445133280993077c376
|
refs/heads/main
| 2023-03-25T08:25:16.842142
| 2021-03-18T13:31:40
| 2021-03-18T13:31:40
| 338,577,905
| 14
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,795
|
pyi
|
from typing import List
import ghidra.app.util.bin.format.coff
import ghidra.program.model.data
import java.lang
class CoffSymbolAuxFunction(object, ghidra.app.util.bin.format.coff.CoffSymbolAux):
ASCII: ghidra.program.model.data.DataType = char
BYTE: ghidra.program.model.data.DataType = byte
DWORD: ghidra.program.model.data.DataType = dword
IBO32: ghidra.program.model.data.DataType = ImageBaseOffset32
POINTER: ghidra.program.model.data.DataType = pointer
QWORD: ghidra.program.model.data.DataType = qword
STRING: ghidra.program.model.data.DataType = string
UTF16: ghidra.program.model.data.DataType = unicode
UTF8: ghidra.program.model.data.DataType = string-utf8
VOID: ghidra.program.model.data.DataType = void
WORD: ghidra.program.model.data.DataType = word
def equals(self, __a0: object) -> bool: ...
def getClass(self) -> java.lang.Class: ...
def getFilePointerToLineNumber(self) -> int: ...
def getNextEntryIndex(self) -> int: ...
def getSize(self) -> int: ...
def getTagIndex(self) -> int: ...
def getUnused(self) -> List[int]: ...
def hashCode(self) -> int: ...
def notify(self) -> None: ...
def notifyAll(self) -> None: ...
def toDataType(self) -> ghidra.program.model.data.DataType: ...
def toString(self) -> unicode: ...
@overload
def wait(self) -> None: ...
@overload
def wait(self, __a0: long) -> None: ...
@overload
def wait(self, __a0: long, __a1: int) -> None: ...
@property
def filePointerToLineNumber(self) -> int: ...
@property
def nextEntryIndex(self) -> int: ...
@property
def size(self) -> int: ...
@property
def tagIndex(self) -> int: ...
@property
def unused(self) -> List[int]: ...
|
[
"tsunekou1019@gmail.com"
] |
tsunekou1019@gmail.com
|
dccefe6f3232c64d27f4537dcf77df25ac6eabc3
|
62343cc4b4c44baef354f4552b449a9f53ca799e
|
/Model/engine/val_engine.py
|
6d99465c5ef81d8810d06639485fbe399fba81eb
|
[] |
no_license
|
xwjBupt/simpleval
|
7c71d178657ae12ac1a5ac6f1275940023573884
|
87234e630d7801479575015b8c5bdd3588a3ceed
|
refs/heads/master
| 2023-02-03T13:42:07.013196
| 2020-12-25T09:08:01
| 2020-12-25T09:08:01
| 324,154,886
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 582
|
py
|
from util import registry
from .infer_engine import InferEngine
@registry.register_module('engine')
class ValEngine(InferEngine):
def __init__(self, model, meshgrid, converter, num_classes, use_sigmoid,
test_cfg, eval_metric):
super().__init__(model, meshgrid, converter, num_classes, use_sigmoid,
test_cfg)
self.eval_metric = eval_metric
def forward(self, data):
return self.forward_impl(**data)
def forward_impl(self, img, img_metas):
dets = self.infer(img, img_metas)
return dets
|
[
"xwj_bupt@163.com"
] |
xwj_bupt@163.com
|
1a87dc748de7261e1f06a57f98bf07ce9f709d73
|
b6fc54cff7037f5e4ef26cb4a645d5ea5a6fecdf
|
/001146StepikPyBegin/Stepik001146PyBeginсh07p05st06С03_my__20200422.py
|
30ca61a2153e84595a3e1fc8625d4159136926a7
|
[
"Apache-2.0"
] |
permissive
|
SafonovMikhail/python_000577
|
5483eaf2f7c73bc619ce1f5de67d8d689d2e7dd4
|
f2dccac82a37df430c4eb7425b5d084d83520409
|
refs/heads/master
| 2022-12-08T10:53:57.202746
| 2022-12-07T09:09:51
| 2022-12-07T09:09:51
| 204,713,341
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,110
|
py
|
'''
Дано натуральное число n. Напишите программу, которая определяет его максимальную и минимальную цифры.
Формат входных данных
На вход программе подается одно натуральное число.
Формат выходных данных
Программа должна вывести максимальную и минимальную цифры введенного числа (с поясняющей надписью).
Sample Input 1:
26670
Sample Output 1:
Максимальная цифра равна 7
Минимальная цифра равна 0
'''
# num1 = int(input())
num1 = 123456
max1 = -1
min1 = 10
# print(num1)
while num1 != 0:
num2 = num1 % 10
# print(num2)
if min1 > num2:
min1 = num2
# print("min1: ", min1)
if max1 < num2:
max1 = num2
# print("max1: ", max1)
num1 = num1 // 10
print("Максимальная цифра равна", max1)
print("Минимальная цифра равна", min1)
|
[
"ms33@inbox.ru"
] |
ms33@inbox.ru
|
e70a8301be7918577b324e3ba6cd05445470022b
|
b3742c32903fa8fd6489033a3be3b4a597b734e2
|
/venv/Scripts/pip3.7-script.py
|
398ffa63ec58c5c25822257ce1bb9aef7d50de75
|
[] |
no_license
|
lokeshrapala/onlne6amproj1
|
fceef41e6482c4f627c53207ba60efe1db24c16f
|
9836c85b4a984ad6275080ab0c32a106c095bbfe
|
refs/heads/master
| 2020-04-30T13:01:13.298204
| 2019-03-21T01:12:42
| 2019-03-21T01:12:42
| 176,843,064
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 421
|
py
|
#!C:\Users\LOKESH\PycharmProjects\onlne6amproj1\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==9.0.1','console_scripts','pip3.7'
__requires__ = 'pip==9.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==9.0.1', 'console_scripts', 'pip3.7')()
)
|
[
"lokesh.rapala"
] |
lokesh.rapala
|
4bf5515c69160386a2bb612f15f2de43aaf77d3b
|
6ff1c4a6ba35d1775d4aa2ec72462331fd09b4c4
|
/course1/week4/points_and_segments.py
|
8c354c31151ce1ed575cb72fbc6cfc24fc2ed24c
|
[] |
no_license
|
MohamedFawzy/Data-Structures-And-Algorithms-Specialization
|
eb9bfd66e94a95b3b357f77df8863eb585d1cf13
|
a57953b831e80cdb15ffd0984f3f3a3e7b80d657
|
refs/heads/master
| 2020-03-15T05:49:36.261276
| 2019-01-20T12:33:48
| 2019-01-20T12:33:48
| 131,995,132
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,195
|
py
|
# Uses python2
import sys
from itertools import chain
def fast_count_segments(starts, ends, points):
cnt = [0] * len(points)
a = zip(starts, [float('-inf')]*len(starts))
b = zip(ends, [float('inf')]*len(ends))
c = zip(points, range(len(points)))
sortedlist = sorted(chain(a,b,c), key=lambda a : (a[0], a[1]))
stack = []
for i, j in sortedlist:
if j == float('-inf'):
stack.append(j)
elif j == float('inf'):
stack.pop()
else:
cnt[j] = len(stack)
return cnt
def naive_count_segments(starts, ends, points):
cnt = [0] * len(points)
for i in range(len(points)):
for j in range(len(starts)):
if starts[j] <= points[i] <= ends[j]:
cnt[i] += 1
return cnt
if __name__ == '__main__':
data = list(map(int, raw_input().split()))
n = data[0]
m = data[1]
starts = [0] * n
ends = [0] * n
for i in range(n):
starts[i], ends[i] = map(int, raw_input().split())
points = list(map(int, raw_input().split()))
#use fast_count_segments
cnt = fast_count_segments(starts, ends, points)
for x in cnt:
print str(x)+ ' ',
|
[
"mfawzy22@gmail.com"
] |
mfawzy22@gmail.com
|
9220c0779107ae7990c29f78fa1145f303124228
|
9fcd6a91132fd12731d259fe7d709cdf222381bb
|
/2022/24/foo.py
|
e15c500fc99027160b86dad1e257bfe3bc844f1d
|
[] |
no_license
|
protocol7/advent-of-code
|
f5bdb541d21414ba833760958a1b9d05fc26f84a
|
fa110cef83510d86e82cb5d02f6af5bb7016f2c7
|
refs/heads/master
| 2023-04-05T15:33:26.146031
| 2023-03-18T14:22:43
| 2023-03-18T14:22:43
| 159,989,507
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,424
|
py
|
import sys
from util import *
def parse(line):
return line.strip()
xs = list(map(parse, sys.stdin))
dir = {
">": RIGHT,
"<": LEFT,
"^": UP,
"v": DOWN,
}
ps = set()
winds = list()
g = Grid(xs)
for p, v in g.points():
if v == "." or v in dir:
ps.add(p)
if v in dir:
winds.append((v, p))
w = len(xs[0])
h = len(xs)
for x, v in enumerate(xs[0]):
if v == ".":
start = Point(x, 0)
for x, v in enumerate(xs[h-1]):
if v == ".":
end = Point(x, h-1)
def move_winds(winds):
nw = []
for wind in winds:
d, p = wind
np = p + dir[d]
if np not in ps:
if d == ">":
np = (1, p.y)
elif d == "<":
np = (w-2, p.y)
elif d == "^":
np = (p.x, h-2)
elif d == "v":
np = (p.x, 1)
nw.append((d, Point(np)))
return nw
turn = 0
for s, g in ((start, end), (end, start), (start, end)):
pp = {s}
while True:
ws = set(w for _, w in winds)
npp = set()
for p in pp:
if p not in ws:
npp.add(p)
for d in ORTHOGONAL:
np = p + d
if np not in ws and np in ps:
npp.add(np)
pp = npp
if g in pp:
break
turn += 1
winds = move_winds(winds)
print(turn)
|
[
"niklas@protocol7.com"
] |
niklas@protocol7.com
|
2f7a7a464cfa554501ff53105e96d444dbdb9d6e
|
163bbb4e0920dedd5941e3edfb2d8706ba75627d
|
/Code/CodeRecords/2247/60716/266118.py
|
b3cfb9af57e92d07680b4ef8e087e07bdcf75d44
|
[] |
no_license
|
AdamZhouSE/pythonHomework
|
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
|
ffc5606817a666aa6241cfab27364326f5c066ff
|
refs/heads/master
| 2022-11-24T08:05:22.122011
| 2020-07-28T16:21:24
| 2020-07-28T16:21:24
| 259,576,640
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 329
|
py
|
strs = input().split(',')
lists = [int(i) for i in strs]
alex = 0
lee = 0
index=0
while len(lists)>0:
temp = 0
if lists[0]>=lists[len(lists)-1]:
temp = lists.pop(0)
else:
temp = lists.pop()
if index%2==0:
alex += temp
else:
lee += temp
print(True) if alex>lee else print(False)
|
[
"1069583789@qq.com"
] |
1069583789@qq.com
|
23c08622e46b36e8e88eeb8217c48ebaf5a30c2a
|
22cec5da2b1fb83dcc9cf7c888f1e2078b05b62e
|
/flora/wallet/settings/settings_objects.py
|
e89c586d10d8514e9123368dd15457e76179d5de
|
[
"Apache-2.0"
] |
permissive
|
JuEnPeHa/flora-blockchain
|
649d351e096e73222ab79759c71e191e42da5d34
|
656b5346752d43edb89d7f58aaf35b1cacc9a366
|
refs/heads/main
| 2023-07-18T08:52:51.353754
| 2021-09-07T08:13:35
| 2021-09-07T08:13:35
| 399,297,784
| 0
| 0
|
Apache-2.0
| 2021-08-24T01:30:45
| 2021-08-24T01:30:44
| null |
UTF-8
|
Python
| false
| false
| 577
|
py
|
from dataclasses import dataclass
from flora.util.streamable import Streamable, streamable
@dataclass(frozen=True)
@streamable
class BackupInitialized(Streamable):
"""
Stores user decision regarding import of backup info
"""
user_initialized: bool # Stores if user made a selection in UI. (Skip vs Import backup)
user_skipped: bool # Stores if user decided to skip import of backup info
backup_info_imported: bool # Stores if backup info has been imported
new_wallet: bool # Stores if this wallet is newly created / not restored from backup
|
[
"github@floracoin.farm"
] |
github@floracoin.farm
|
50595f67b3fda0a90dc73fa51951fbdfaf4170a5
|
5462142b5e72cb39bea5b802dd46f55357c4ea84
|
/homework_zero_class/lesson8/函数的简介-times_1.py
|
659bc9d42506df5e7f6a26bed1013354b9ef3d7a
|
[] |
no_license
|
qqmadeinchina/myhomeocde
|
a0996ba195020da9af32613d6d2822b049e515a0
|
291a30fac236feb75b47610c4d554392d7b30139
|
refs/heads/master
| 2023-03-23T05:28:53.076041
| 2020-08-24T08:39:00
| 2020-08-24T08:39:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 530
|
py
|
#!D:\Program Files\Anaconda3
# -*- coding: utf-8 -*-
# @Time : 2020/7/21 23:38
# @Author : 老萝卜
# @File : 函数的简介-times_1.py
# @Software: PyCharm Community Edition
print('nihao')
print('黑猫警长')
print('hahaha')
# 函数中保存的代码不会立即执行,需要调用函数才会执行
def fn():
print('这是第一个函数')
print('nihao')
print('黑猫警长')
print('hahaha')
print(fn)
fn()
fn()
fn()
fn()
# fn是函数对象 fn() 调用函数
# print函数对象 print() 调用函数
|
[
"newwxm@126.com"
] |
newwxm@126.com
|
c476976ac6e209478e0a9c8cddd2f916c3550b64
|
ada9e04c44e9cb577acc1301915490adbb06edf5
|
/test.py
|
9d91270e57e3aa010ce6f5d7ce3911b7d3051506
|
[
"MIT"
] |
permissive
|
iiharu/pyoni
|
9356b68a4ab52fa60d39cec01bf8145d7219ad03
|
3e62cedc1ed7dc726e421858ff143f7b7a713403
|
refs/heads/master
| 2021-01-28T00:24:07.078465
| 2020-03-03T08:22:15
| 2020-03-03T08:22:15
| 243,489,976
| 0
| 0
|
MIT
| 2020-02-27T10:16:06
| 2020-02-27T10:16:06
| null |
UTF-8
|
Python
| false
| false
| 1,237
|
py
|
#cap_openni2.cpp usa status = device.open(filename);
#cap_openni.cpp : status = context.OpenFileRecording( filename, productionNode );
import numpy as np
import cv2,sys
import cv2.cv as cv
cap = cv2.VideoCapture(sys.argv[1])
if True:
print("Depth generator output mode:")
print("FRAME_WIDTH " + str(cap.get(cv.CV_CAP_PROP_FRAME_WIDTH)))
print("FRAME_HEIGHT " + str(cap.get(cv.CV_CAP_PROP_FRAME_HEIGHT)))
print("FRAME_MAX_DEPTH " + str(cap.get(cv.CV_CAP_PROP_OPENNI_FRAME_MAX_DEPTH)) + "mm")
print("FPS " + str(cap.get(cv.CV_CAP_PROP_FPS)))
print("REGISTRATION " + str(cap.get(cv.CV_CAP_PROP_OPENNI_REGISTRATION)) + "\n")
while(True):
# Capture frame-by-frame
cap.grab();
if True:
frame = cap.retrieve( cv.CV_CAP_OPENNI_BGR_IMAGE )[1]
depthMap = cap.retrieve( cv.CV_CAP_OPENNI_DEPTH_MAP );
else:
frame = cap.retrieve()[1]
print frame
# Our operations on the frame come here
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
|
[
"emanuele.ruffaldi@gmail.com"
] |
emanuele.ruffaldi@gmail.com
|
7b28bb9e2f21cd5136e3513b6e6a30200795dc3e
|
8a9b8fc7cbb0e24893f130d49edaf6b23501292d
|
/Python/find_angle.py
|
2350449185b18e8345177efed19841d3a3fb0342
|
[] |
no_license
|
Rubabesabanova/HackerRank
|
9ca6517944ac7253463e734d2fd5fd3e56e19a0a
|
41a6c92f55c72bd20dcb86e32f6f5b792730148c
|
refs/heads/master
| 2022-12-16T11:36:05.059682
| 2020-09-21T09:13:30
| 2020-09-21T09:13:30
| 291,142,383
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 206
|
py
|
# Difficulty : Medium
# Link : https://www.hackerrank.com/challenges/find-angle/problem
# Language : Python 3
from math import *
a=int(input())
b=int(input())
x=round(degrees(atan(a/b)))
print(str(x)+'°')
|
[
"60547132+Rubabe@users.noreply.github.com"
] |
60547132+Rubabe@users.noreply.github.com
|
76bc65a6a3966eaf28d60e029208b8b96b2010f2
|
90a2d0bed5d9eeb6b56c7ac96cc5fbee79dc4c5e
|
/.history/string_format_20220425174809.py
|
485acedc7e0afa5c104cd58b4822e854bba33fb0
|
[] |
no_license
|
KustomApe/dev
|
2d495e22363707b15a22860a773dac6c463903ee
|
a936f5c3b0928eaa2efaf28c6be8cacc17c3ecb3
|
refs/heads/master
| 2023-04-28T11:20:03.056953
| 2023-04-07T17:43:40
| 2023-04-07T17:43:40
| 138,429,111
| 1
| 0
| null | 2023-04-25T19:26:09
| 2018-06-23T19:47:23
|
Python
|
UTF-8
|
Python
| false
| false
| 254
|
py
|
age = 20
year = 2022
next_age = 21
print('私の年齢は{}歳です。'.format(age))
print('今年は西暦{0}年です。私の年齢は{1}歳で、来年は{2}歳になります。'.format(year, age, next_age))
print('egg')
print('{0}'.format('egg'))
|
[
"kustomape@gmail.com"
] |
kustomape@gmail.com
|
2800bc4bde551bf0062ded594820b2eccc44f3d9
|
5b492bf5a906141be9557e654f7f4e7181f4d8eb
|
/backend/placeNewOrder/serializers.py
|
62a05d50a47a85685e7c7bf3fc9dfda57048e9a8
|
[] |
no_license
|
KaziMotiour/fast-courier
|
ac5dece7e7432e80614623dd3795e82d6e253cfa
|
17c3cc26e0aa28467fccafa0cb2019aa0066ff63
|
refs/heads/main
| 2023-06-05T15:36:39.338321
| 2021-06-29T16:40:22
| 2021-06-29T16:40:22
| 361,289,665
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,122
|
py
|
from rest_framework import serializers
from .models import Marchant, PlaceNewOrder
from django.contrib.auth.models import User
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id', 'username', 'is_superuser', 'marchant']
class MarchentSerializer(serializers.ModelSerializer):
user=UserSerializer()
class Meta:
model = Marchant
fields = ['id', 'user', 'first_name', 'last_name']
class PlaceOrderSerializer(serializers.ModelSerializer):
class Meta:
model = PlaceNewOrder
fields = ['marchant', 'percel_name', 'percel_type', 'newInvoiceID', 'weight', 'weight_unit', 'cost', 'cod_charge', 'return_charge', 'total_cost', 'Location', 'return_cost', 'timestamp']
class PlaceOrderListSerializer(serializers.ModelSerializer):
marchant = MarchentSerializer()
class Meta:
model = PlaceNewOrder
fields = ['marchant', 'percel_name', 'percel_type', 'newInvoiceID', 'weight', 'weight_unit', 'cost', 'cod_charge', 'return_charge', 'total_cost', 'return_cost', 'Location', 'timestamp']
|
[
"kmatiour30@gmail.com"
] |
kmatiour30@gmail.com
|
be5831beb31cdbef71082dd0850cefd69de4a4f8
|
7473d931134c444de3dfe61875e5245e9a4ba319
|
/anchore_engine/util/users.py
|
b03e892b462844d4bcd0710ce2e93da2d4f8e4c2
|
[
"Apache-2.0"
] |
permissive
|
anniyanvr/anchore-engine
|
7eb693a8761e11f9d1f1f40b998d36c7cb76a36d
|
f5ffac25aea536016dd08734b4a2aa2746be1f1d
|
refs/heads/master
| 2023-03-08T23:46:30.663379
| 2023-01-26T23:58:10
| 2023-01-26T23:58:10
| 193,227,499
| 0
| 0
|
Apache-2.0
| 2023-02-26T10:35:37
| 2019-06-22T11:42:50
|
Python
|
UTF-8
|
Python
| false
| false
| 736
|
py
|
SYSTEM_USER_ID = "admin" # The system user is always user '0'.
def is_system_user(user_id):
return user_id == SYSTEM_USER_ID
def user_ids_to_search(obj):
"""
Returns an ordered list of user_ids to search for finding related resources for the given object (typically an image or package).
By strength of match, first element is the same user_id as the given object if the given object has a user_id and the second element of
the result is the system user id.
:param obj:
:return:
"""
user_ids = []
if hasattr(obj, "user_id"):
user_ids.append(obj.user_id)
if is_system_user(obj.user_id):
return user_ids
user_ids.append(SYSTEM_USER_ID)
return user_ids
|
[
"nurmi@anchore.com"
] |
nurmi@anchore.com
|
ed042fa10383d90f3442befe36848687e127f1ec
|
f445450ac693b466ca20b42f1ac82071d32dd991
|
/generated_tempdir_2019_09_15_163300/generated_part009094.py
|
fb9ecacb3753ce85b5c502758f6a73e94447253c
|
[] |
no_license
|
Upabjojr/rubi_generated
|
76e43cbafe70b4e1516fb761cabd9e5257691374
|
cd35e9e51722b04fb159ada3d5811d62a423e429
|
refs/heads/master
| 2020-07-25T17:26:19.227918
| 2019-09-15T15:41:48
| 2019-09-15T15:41:48
| 208,357,412
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,304
|
py
|
from sympy.abc import *
from matchpy.matching.many_to_one import CommutativeMatcher
from matchpy import *
from matchpy.utils import VariableWithCount
from collections import deque
from multiset import Multiset
from sympy.integrals.rubi.constraints import *
from sympy.integrals.rubi.utility_function import *
from sympy.integrals.rubi.rules.miscellaneous_integration import *
from sympy import *
class CommutativeMatcher19127(CommutativeMatcher):
_instance = None
patterns = {
0: (0, Multiset({}), [
(VariableWithCount('i3.1.2.2.2.1.0', 1, 1, None), Mul),
(VariableWithCount('i3.1.2.2.2.1.0_1', 1, 1, S(1)), Mul)
])
}
subjects = {}
subjects_by_id = {}
bipartite = BipartiteGraph()
associative = Mul
max_optional_count = 1
anonymous_patterns = set()
def __init__(self):
self.add_subject(None)
@staticmethod
def get():
if CommutativeMatcher19127._instance is None:
CommutativeMatcher19127._instance = CommutativeMatcher19127()
return CommutativeMatcher19127._instance
@staticmethod
def get_match_iter(subject):
subjects = deque([subject]) if subject is not None else deque()
subst0 = Substitution()
# State 19126
return
yield
from collections import deque
|
[
"franz.bonazzi@gmail.com"
] |
franz.bonazzi@gmail.com
|
739c09cc81197c1da29e787e71d299a75eafd6d3
|
f931249f3766bd871eede76a950484701915c32d
|
/collective_decision/urls.py
|
d0ae17e7860ea982b2753b39b340e54169a7963a
|
[] |
no_license
|
cleliofavoccia/Share
|
aa509c9cfa1aa3789237b411b2b94d952d848322
|
cf0b982a6df2b8b4318d12d344ef0827394eedfd
|
refs/heads/main
| 2023-07-11T08:29:59.016252
| 2021-08-09T10:13:53
| 2021-08-09T10:13:53
| 373,621,787
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 860
|
py
|
"""Manage collective_decision app urls"""
from django.urls import path
from . import views
app_name = 'collective_decision'
urlpatterns = [
path('delete_vote_group/',
views.GroupMemberDeleteVoteGroup.as_view(),
name='delete_vote_group'),
path('against_delete_vote_group/',
views.GroupMemberAgainstDeleteVoteGroup.as_view(),
name='against_delete_vote_group'),
path('modify_vote_group/',
views.GroupMemberModifyVoteGroup.as_view(),
name='modify_vote_group'),
path('against_modify_vote_group/',
views.GroupMemberAgainstModifyVoteGroup.as_view(),
name='against_modify_vote_group'),
path('vote/<int:pk>', views.GroupVoteView.as_view(), name='vote'),
path('estimation/<int:pk>/',
views.CostEstimationView.as_view(),
name='estimation'
),
]
|
[
"favoccia.c@live.fr"
] |
favoccia.c@live.fr
|
e740f200c941b20745ea18ccdaf3784e917aeaa5
|
197420c1f28ccb98059888dff214c9fd7226e743
|
/happy_pythoning_cource/Part_11/11.1.2.List_sums_and_multiply/11.1.2.List_sums_and_multiply.py
|
81079913b5e30bc778983e0b1cbae3246c890f63
|
[] |
no_license
|
Vovanuch/python-basics-1
|
fc10b6f745defff31364b66c65a704a9cf05d076
|
a29affec12e8b80a1d3beda3a50cde4867b1dee2
|
refs/heads/master
| 2023-07-06T17:10:46.341121
| 2021-08-06T05:38:19
| 2021-08-06T05:38:19
| 267,504,364
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
'''
Дополните приведенный код, используя операторы конкатенации (+) и умножения списка на число (*), так чтобы он вывел список:
[1, 2, 3, 1, 2, 3, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 8, 9, 10, 11, 12, 13].
'''
numbers1 = [1, 2, 3]
numbers2 = [6]
numbers3 = [7, 8, 9, 10, 11, 12, 13]
num4 = numbers1 * 2 + numbers2 * 9 + numbers3
print(num4)
|
[
"vetohin.vladimir@gmail.com"
] |
vetohin.vladimir@gmail.com
|
4db3cc4e9548add764c3db6f8713931066ec69c5
|
6b201605227f11880c1d32c9cad300f6e29ff4ae
|
/Python/Buch_Python3_Das_umfassende_Praxisbuch/Kapitel_07_Sequenzen_Mengen_und_Generatoren/05_quicksort_algorithm.py
|
6697b4b4650a83138f8341e1252339a9c8352bea
|
[
"MIT"
] |
permissive
|
Apop85/Scripts
|
e2e8e6ed0c0da08a4d7c895aa366c9305197137b
|
467c34e59f2708f2d2f8bb369c36fd782d365e8b
|
refs/heads/master
| 2022-12-08T08:11:04.566376
| 2022-05-13T13:17:04
| 2022-05-13T13:17:04
| 164,251,836
| 0
| 0
|
MIT
| 2022-12-08T01:50:22
| 2019-01-05T21:16:45
|
Python
|
UTF-8
|
Python
| false
| false
| 1,223
|
py
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
###
# File: 05_quicksort_algorithm.py
# Project: Kapitel_07_Sequenzen_Mengen_und_Generatoren
# Created Date: Sunday 03.03.2019, 20:10
# Author: Apop85
# -----
# Last Modified: Monday 04.03.2019, 12:15
# -----
# Copyright (c) 2019 Apop85
# This software is published under the MIT license.
# Check http://www.opensource.org/licenses/MIT for further informations
# -----
# Description: Chapter 7. Page 215. Quicksort-algorithm using recursive function.
###
def quick_sort(item_list):
if len(item_list) > 0:
print('Sorting...', item_list)
if len(item_list) <= 1:
return item_list
else:
# Split the list into 3 parts. First letter + any value which is greater + any value which is smaller
# and repeat this pattern until only one item remains in the list and return them in following pattern:
# any value which is smaller + first value + any value which is greater.
return quick_sort([x for x in item_list[1:] if x < item_list[0]]) + [item_list[0]] + quick_sort([y for y in item_list[1:] if y > item_list[0]])
unsorted_list=['m','g','w','h','l','z','b','c','y']
sorted_list=quick_sort(unsorted_list)
print(sorted_list)
|
[
"39341618+Apop85@users.noreply.github.com"
] |
39341618+Apop85@users.noreply.github.com
|
9d1fb7b4ce24f9fcb9f66e6f268e7d001aeb308e
|
2d9cedf0ed36dadca1ca2f696290c8261ef7851f
|
/000010/DataJoint/DJ-NWB-Li-2015b/scripts/ingest.py
|
4975eba64deab64af06445f9c39c8092f1871b39
|
[
"Apache-2.0"
] |
permissive
|
dandi/example-notebooks
|
4365285697d41fd383110b5af5c30860d72fad22
|
be3a8b345dfa9c0145692a30087647bc47f865e8
|
refs/heads/master
| 2023-08-30T20:41:41.323355
| 2023-08-16T21:21:12
| 2023-08-16T21:21:12
| 231,629,025
| 5
| 8
|
Apache-2.0
| 2023-09-12T19:53:10
| 2020-01-03T16:55:02
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 220
|
py
|
from pipeline.ingest import ingest_lookup, ingest_meta_data, ingest_data
from pipeline import imaging
ingest_meta_data.main()
ingest_data.main()
imaging.RoiAnalyses.populate(suppress_errors=True, display_progress=True)
|
[
"ben.dichter@gmail.com"
] |
ben.dichter@gmail.com
|
0d511d8f1acf5ab27e7f677565d0a4424a91fcd7
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_209/287.py
|
dce3bd9df0ba24f4960a08f60546dcce19c18657
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,198
|
py
|
import sys
import math
def pancake_value(top, side, max_top):
if top > max_top:
return top - max_top + side
else:
return side
for i in range(0, int(sys.stdin.readline())):
inputs = sys.stdin.readline()[:-1].split(" ")
n = int(inputs[0])
k = int(inputs[1])
pancakes = []
r = []
h = []
for j in range(0, n):
inputs = sys.stdin.readline()[:-1].split(" ")
r.append(int(inputs[0]))
h.append(int(inputs[1]))
pancakes.append( (r[j], h[j]) )
pancakes.sort(reverse=True)
sa_side = []
sa_top = []
for p in pancakes:
sa_side.append(2 * math.pi * p[0] * p[1])
sa_top.append(math.pi * p[0] ** 2)
sa = 0
used = []
max_top_used = 0
while len(used) < k:
best_value = -1
for j in range(0, n):
if j in used:
continue
if best_value == -1:
best_value = j
continue
if pancake_value(sa_top[j], sa_side[j], max_top_used) > pancake_value(sa_top[best_value], sa_side[best_value], max_top_used):
best_value = j
sa += pancake_value(sa_top[best_value], sa_side[best_value], max_top_used)
if sa_top[best_value] > max_top_used:
max_top_used = sa_top[best_value]
used.append(best_value)
print("Case #" + str(i + 1) + ": " + "{0:.15f}".format(sa))
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
bd1a740bf8f775851890759a7db88b5fd0cf0bba
|
958d87cc3b77bb3308d0aa04b92fdef5f97d63ae
|
/AdvancedPythonObjectsAndDataStructures/AdvancedNumbers.py
|
8ef05979c4197fe19a9dff75665c1dc35639738e
|
[] |
no_license
|
parihar08/PythonJosePortilla
|
6dec83519af78451c46e323928aedf19dbd908f1
|
6f47291908ad05daf5a505ba0e13687c46651bc2
|
refs/heads/master
| 2022-12-19T07:30:39.603468
| 2020-09-19T14:56:36
| 2020-09-19T14:56:36
| 292,650,778
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 568
|
py
|
print('********Hexadecimal*************','\n')
#Hexadecimal
print(hex(246)) #0xf6
print(hex(512)) #0x200
print('********Binary*************','\n')
#Binary
print(bin(128)) #0b10000000
print(bin(512)) #0b1000000000
print('*********Power************','\n')
#Power
print(pow(2,4)) #16
print(pow(2,4,3)) #1 (2**4)%3
print('*********Absolute************','\n')
#Absolute
print(abs(2)) #2
print('*********Round************','\n')
#Round
print(round(3.1)) #3.0
print(round(3.9)) #4.0
print(round(3.141592,2)) #3.14
|
[
"sparihar08@yahoo.com"
] |
sparihar08@yahoo.com
|
cbfd75a923ee6ced4ac597da44b7dce3d0a8c350
|
a4c04117685c3d28dd60bdfc45654cb2c935f746
|
/read_gedi_l2b.py
|
c5897d51ba3ac9e0fc2d517a732a03472d101b74
|
[] |
no_license
|
DKnapp64/General_Python_Codes
|
1ca40779bb381d526d61c5d5fedcc76ae797c590
|
8d4669c82c17455640a0a3123f92760cd65cc26a
|
refs/heads/main
| 2023-02-28T05:55:46.018482
| 2021-02-01T21:55:16
| 2021-02-01T21:55:16
| 335,077,354
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,098
|
py
|
#!/usr/bin/env python3
import h5py
import pandas as pd
import numpy as np
from shapely.geometry import Point
import geopandas as gpd
import os, sys
fname = sys.argv[1]
f = h5py.File(fname, 'r')
## haydi_bre(f)
## print(list(f.keys()))
groups = [t for t in f.keys()]
print("File: %s" % (fname))
## read in GeoJSON of Peru, Ecuador, Colombia envelope
pec_env = gpd.read_file('pec_env2.geojson')
for group in groups:
if (group == 'METADATA' or group == 'BEAM0000' or group == 'BEAM0001' \
or group == 'BEAM0010' or group == 'BEAM0011'):
continue
df = pd.DataFrame()
## quality flags
algorun = f[group]['algorithmrun_flag']
l2aqual = f[group]['l2a_quality_flag']
l2bqual = f[group]['l2b_quality_flag']
degrade = f[group]['geolocation']['degrade_flag']
surface_flag = f[group]['surface_flag']
## other data
elev_high = f[group]['geolocation']['elev_highestreturn']
elev_low = f[group]['geolocation']['elev_lowestmode']
elev_bin0 = f[group]['geolocation']['elevation_bin0']
elev_bin0_error = f[group]['geolocation']['elevation_bin0_error']
elev_last = f[group]['geolocation']['elevation_lastbin']
elev_last_error = f[group]['geolocation']['elevation_lastbin_error']
hgt_bin0 = f[group]['geolocation']['height_bin0']
hgt_lastbin = f[group]['geolocation']['height_lastbin']
pgap_theta = f[group]['pgap_theta']
pgap_theta_error = f[group]['pgap_theta_error']
rh100 = f[group]['rh100']
beam = f[group]['beam']
lons = f[group]['geolocation']['longitude_bin0']
lats = f[group]['geolocation']['latitude_bin0']
shotnum = f[group]['geolocation']['shot_number']
sensi = f[group]['sensitivity']
solarelev = f[group]['geolocation']['solar_elevation']
cover = f[group]['cover']
landsattreecov = f[group]['land_cover_data']['landsat_treecover']
modistreecov = f[group]['land_cover_data']['modis_treecover']
pai = f[group]['pai']
inside = np.zeros((pai.shape[0]), dtype=np.bool)
## good = np.all(goodstack, axis=0)
for j in range(pai.shape[0]):
if (pec_env.geometry[0].contains(Point(lons[j], lats[j])) \
and (algorun[j] > 0) and (l2aqual[j] > 0) and (l2bqual[j] > 0) \
and (solarelev[j] < 0.0)):
inside[j] = True
if (np.sum(inside) == 0):
print("Shapefile for %s with %d surviving shots" % (group, np.sum(inside)))
continue
algorun = algorun[inside]
l2aqual = l2aqual[inside]
l2bqual = l2bqual[inside]
degrade = degrade[inside]
surface_flag = surface_flag[inside]
elev_high = elev_high[inside]
elev_low = elev_low[inside]
elev_last = elev_last[inside]
elev_last_error = elev_last_error[inside]
elev_bin0 = elev_bin0[inside]
elev_bin0_error = elev_bin0_error[inside]
hgt_bin0 = hgt_bin0[inside]
hgt_lastbin = hgt_lastbin[inside]
beam = beam[inside]
lons = lons[inside]
lats = lats[inside]
shotnum = shotnum[inside]
cover = cover[inside]
landsattreecov = landsattreecov[inside]
modistreecov = modistreecov[inside]
sensi = sensi[inside]
solarelev = solarelev[inside]
pai = pai[inside]
pgap_theta = pgap_theta[inside]
pgap_theta_error = pgap_theta_error[inside]
rh100 = rh100[inside]
df['beam'] = beam
## put them in the data frame
df['shot_number'] = shotnum
df['elev_high'] = elev_high
df['elev_low'] = elev_low
df['height'] = elev_high - elev_low
df['elev_last'] = elev_last
df['elev_last_error'] = elev_last_error
df['elev_bin0'] = elev_bin0
df['elev_bin0_error'] = elev_bin0_error
df['height2'] = elev_bin0 - elev_last
df['hgt_bin0'] = hgt_bin0
df['hgt_lastbin'] = hgt_lastbin
df['height3'] = hgt_bin0 - hgt_lastbin
df['cover'] = cover
df['pai'] = pai
df['pgap_theta'] = pgap_theta
df['pgap_theta_error'] = pgap_theta_error
df['rh100'] = rh100/100.0
df['lstreecov'] = landsattreecov
df['modtreecov'] = modistreecov
df['l2a_qual'] = l2aqual
df['l2b_qual'] = l2bqual
df['degrade'] = degrade
df['sensi'] = sensi
df['solarelev'] = solarelev
df['algorun'] = algorun
df.astype({'beam':'int32', 'shot_number':'uint64', 'elev_high':'float32', 'elev_low':'float32', \
'height':'float32', 'elev_last':'float32', 'elev_last_error':'float32', 'elev_bin0':'float32',\
'elev_bin0_error':'float32', 'height2':'float32', 'hgt_bin0':'float32', \
'hgt_lastbin':'float32', 'height3':'float32', 'cover':'float32', 'pai':'float32', \
'pgap_theta':'float32', 'pgap_theta_error':'float32', \
'rh100':'float32', 'lstreecov':'float32', \
'modtreecov':'float32', 'l2a_qual':'uint8', 'l2b_qual':'uint8', 'degrade':'uint8', \
'sensi':'float32', 'solarelev':'float32', 'algorun':'uint8'})
geometries = gpd.points_from_xy(lons, lats)
gdf = gpd.GeoDataFrame(df, geometry=geometries)
gdf.crs = '+init=epsg:4326' # WGS84
gdf.to_file(os.path.splitext(fname)[0]+'_PEC_'+group+'.shp')
print("Finished Shapefile for %s with %d shots" % (group, np.sum(inside)))
f.close()
|
[
"dknapp4@asu.edu"
] |
dknapp4@asu.edu
|
090048756a8aeb5e5d027527c1844d1ed1266ff8
|
a3d6556180e74af7b555f8d47d3fea55b94bcbda
|
/testing/run_pytype.py
|
fdb0b0cebeda89bac4c61d94946153c6f53a4443
|
[
"BSD-3-Clause"
] |
permissive
|
chromium/chromium
|
aaa9eda10115b50b0616d2f1aed5ef35d1d779d6
|
a401d6cf4f7bf0e2d2e964c512ebb923c3d8832c
|
refs/heads/main
| 2023-08-24T00:35:12.585945
| 2023-08-23T22:01:11
| 2023-08-23T22:01:11
| 120,360,765
| 17,408
| 7,102
|
BSD-3-Clause
| 2023-09-10T23:44:27
| 2018-02-05T20:55:32
| null |
UTF-8
|
Python
| false
| false
| 1,165
|
py
|
#!/usr/bin/env vpython3
# Copyright 2022 The Chromium Authors
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Simple helper script to run pytype on //testing code."""
import os
import sys
from pytype_common import pytype_runner
TESTING_DIR = os.path.abspath(os.path.dirname(__file__))
CHROMIUM_SRC_DIR = os.path.realpath(os.path.join(TESTING_DIR, '..'))
EXTRA_PATHS_COMPONENTS = [
('third_party', 'catapult', 'third_party', 'typ'),
]
EXTRA_PATHS = [
os.path.join(CHROMIUM_SRC_DIR, *p) for p in EXTRA_PATHS_COMPONENTS
]
EXTRA_PATHS.append(TESTING_DIR)
FILES_AND_DIRECTORIES_TO_CHECK = [
'unexpected_passes_common',
'flake_suppressor_common',
]
FILES_AND_DIRECTORIES_TO_CHECK = [
os.path.join(TESTING_DIR, f) for f in FILES_AND_DIRECTORIES_TO_CHECK
]
TEST_NAME = 'testing_pytype'
TEST_LOCATION = "//testing/run_pytype.py"
def main() -> int:
return pytype_runner.run_pytype(TEST_NAME, TEST_LOCATION,
FILES_AND_DIRECTORIES_TO_CHECK,
EXTRA_PATHS, TESTING_DIR)
if __name__ == '__main__':
sys.exit(main())
|
[
"chromium-scoped@luci-project-accounts.iam.gserviceaccount.com"
] |
chromium-scoped@luci-project-accounts.iam.gserviceaccount.com
|
0445697b424a8cf920dd80be998d7abdf21f8014
|
7f1a316ad1b19481e378953d7ffdd27fa435b1a4
|
/00-lecture/python2/week6/day1/dojodesk/apps/comments/migrations/0001_initial.py
|
829b4a81631d6fc3cca20f93d50036ba2c3c1743
|
[] |
no_license
|
Python-November-2018/wes_harper
|
03eaed6d840038473339102ab9b8b85e31084555
|
c5bcef58bb322d5487a7595f3f4f4fa7ae498a69
|
refs/heads/master
| 2020-04-03T18:12:44.212347
| 2018-12-20T03:38:38
| 2018-12-20T03:38:38
| 155,474,954
| 1
| 4
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,056
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2018-12-04 01:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('tickets', '0001_initial'),
('users', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField()),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('ticket', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='tickets.Ticket')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='users.User')),
],
),
]
|
[
"wes@tao.team"
] |
wes@tao.team
|
2b75e38eaab3622916e1d4a48df9fc6747581892
|
ff6248be9573caec94bea0fa2b1e4b6bf0aa682b
|
/log-20190927/132.230.102.123-10.21.11.38/1569575015.py
|
103d484cea235b66a481b6ba8b712737d192faa4
|
[] |
no_license
|
LennartElbe/codeEvo
|
0e41b1a7705204e934ef71a5a28c047366c10f71
|
e89b329bc9edd37d5d9986f07ca8a63d50686882
|
refs/heads/master
| 2020-12-21T17:28:25.150352
| 2020-03-26T10:22:35
| 2020-03-26T10:22:35
| 236,498,032
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,635
|
py
|
import functools
import typing
import string
import random
import pytest
def leap(n: int) -> bool:
"""this functions checks whether a year is a leapyear
args: n, a year number
returns: true if n is a leapyear, false if n is not a leapyear"""
if n % 4 == 0:
return True
elif n % 100 == 0 and n % 400 != 0:
return False
######################################################################
## hidden code
def mk_coverage():
covered = set()
target = set(range(4))
count = 0
def coverage(func):
nonlocal covered, target, count
def wrapper(year):
nonlocal covered, count
if year % 4 != 0:
covered.add(0)
elif year % 100 != 0:
covered.add(1)
elif year % 400 != 0:
covered.add(2)
else:
covered.add(3)
r = func (year)
count += 1
return r
if func == "achieved": return len(covered)
if func == "required": return len(target)
if func == "count" : return count
functools.update_wrapper(wrapper, func)
return wrapper
return coverage
coverage = mk_coverage ()
try:
leap = coverage(leap)
except:
pass
## Lösung Teil 2 (Tests)
def test_leap():
"""this function tests leap()"""
assert leap(2000) == True
assert leap(2001) == False
######################################################################
## hidden tests
pytest.main (["-v", "--assert=plain", "-p", "no:cacheprovider"])
from inspect import getfullargspec
class TestNames:
def test_leap (self):
assert leap
assert 'year' in getfullargspec(leap).args
class TestGrades:
def test_docstring_present(self):
assert leap.__doc__ is not None
def test_typing_present(self):
assert leap.__hints__ == typing.get_type_hints(self.leap_oracle)
def test_coverage(self):
assert coverage("achieved") == coverage("required")
def leap_oracle(self, year :int) -> bool:
if year % 4 != 0:
return False
elif year % 100 != 0:
return True
elif year % 400 == 0:
return True
else:
return False
def check_leap (self, year):
assert leap (year) == self.leap_oracle (year)
def test_correctness(self):
for i in range (100):
year = random.randrange (1582,2500)
self.check_leap (year)
for i in range (100):
year = random.randrange (1600,3000, 100)
self.check_leap (year)
|
[
"lenni.elbe@gmail.com"
] |
lenni.elbe@gmail.com
|
d593bba8d01a2062a54ce89cd99e5c1ad0533d5a
|
006b7c62cc6682bfb0c69bede62145f4c5cd0eb1
|
/art_bms/bacnet/api_urls.py
|
338cd242284c628708606eb4cc2218fae34e254d
|
[] |
no_license
|
ArtInfrastructure/art-bms
|
eb2961207c827b5fa1c0af8cdde08dcf5bc7fed2
|
2b384f371160feb0005b39af8cbaca9855c5c235
|
refs/heads/master
| 2016-09-06T02:35:27.920647
| 2010-03-03T21:51:16
| 2010-03-03T21:51:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 873
|
py
|
# Copyright 2009 GORBET + BANERJEE (http://www.gorbetbanerjee.com/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
from django.conf.urls.defaults import *
from django.conf import settings
from models import *
urlpatterns = patterns('',
(r'^device/(?P<device_id>[\d]+)/$', 'bacnet.api_views.device'),
(r'^device/(?P<device_id>[\d]+)/(?P<property_id>[\d]+)/$', 'bacnet.api_views.device_property'),
)
|
[
"trevor@trevor.smith.name"
] |
trevor@trevor.smith.name
|
d1d613fc7dc70dec5d314468c4b3a8102ab32086
|
d3006a069f12a9c7a3cb49e412a9e679930bc94a
|
/backend/apps/shop/migrations/0006_product_image.py
|
e03a19444a740073bae96a37811c92f765aebdd3
|
[] |
no_license
|
alexmon1989/keiko
|
906e60328bc86f58cae9d9a0f9266869ed29f99a
|
45cc0d770ddd93129b8c51727a683cd15d0afad4
|
refs/heads/master
| 2022-12-12T08:00:31.843565
| 2019-08-13T11:50:17
| 2019-08-13T11:50:17
| 168,360,214
| 0
| 0
| null | 2022-12-08T11:58:54
| 2019-01-30T14:53:11
|
CSS
|
UTF-8
|
Python
| false
| false
| 484
|
py
|
# Generated by Django 2.1.5 on 2019-02-11 14:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0005_auto_20190209_2110'),
]
operations = [
migrations.AddField(
model_name='product',
name='image',
field=models.ImageField(blank=True, help_text='Размер: 450px * 450px', null=True, upload_to='', verbose_name='Изображение'),
),
]
|
[
"alex.mon1989@gmail.com"
] |
alex.mon1989@gmail.com
|
b1b1772431492fcab55c92ae1d4fd03704fa9afe
|
2442d073434d463cede4a79ae8f9fd31c62174f8
|
/procedural-programming/io/without-readlines.py
|
d9a356fabba9a323ba50dd514ed32d2a93cd8438
|
[] |
no_license
|
grbalmeida/hello-python
|
3630d75cfdde15223dc1c3a714fd562f6cda0505
|
4d9ddf2f7d104fdbc3aed2c88e50af19a39c1b63
|
refs/heads/master
| 2020-07-10T10:04:38.982256
| 2020-02-26T00:37:36
| 2020-02-26T00:37:36
| 204,237,527
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
from FILE import FILE
file = open(FILE, 'r')
for index, line in enumerate(file):
print(f'{index + 1} {line}', end='')
file.close()
|
[
"g.r.almeida@live.com"
] |
g.r.almeida@live.com
|
9fd8d20f8d484b4a863e15f52bf93ca314cbe5c9
|
c0bf1f7ca6d9d7562f72b4a668e97a2d5ffe7c88
|
/tests/extension/types_/axi_/write_lite/test_types_axi_write_lite.py
|
1c98a783d3f415821c00d1bca7f8f49b3bfd4486
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
00mjk/veriloggen
|
cee0da16182c3c9bd95340a966d6a3febc0e7ad1
|
9d0af9638470b3b85cbf9cb53f16b853932571c8
|
refs/heads/master
| 2023-06-23T07:10:20.645734
| 2021-07-18T14:53:13
| 2021-07-18T14:53:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 488
|
py
|
from __future__ import absolute_import
from __future__ import print_function
import os
import veriloggen
import types_axi_write_lite
def test(request):
veriloggen.reset()
simtype = request.config.getoption('--sim')
rslt = types_axi_write_lite.run(filename=None, simtype=simtype,
outputfile=os.path.splitext(os.path.basename(__file__))[0] + '.out')
verify_rslt = rslt.splitlines()[-1]
assert(verify_rslt == '# verify: PASSED')
|
[
"shta.ky1018@gmail.com"
] |
shta.ky1018@gmail.com
|
67b77c60ffcd0ca9797a34eb6b56830ac5b0b355
|
ef4e046b3521c97345b1b1bcf58a1f16a2eaf603
|
/mysql/demo.py
|
ccd8cbce22b045f34eb4b706a7049ffdb8a6794b
|
[
"MIT"
] |
permissive
|
wasit7/tutorials
|
0e23b6ffc55519df80fa47473f13baf55e1573ef
|
83499821266c8debac05cb5d6d5f6da0f0abd68f
|
refs/heads/master
| 2020-12-11T01:06:35.532592
| 2017-04-29T15:13:01
| 2017-04-29T15:13:01
| 37,713,995
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 986
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 30 22:09:44 2015
@author: Nun
edited by Wasit
"""
from datetime import datetime
import pytz
import random
#if using additinal connector
#import mysql.connector
#conn = mysql.connector.Connect(host='146.148.37.209 ',user='root',password='',database='testdb')
#if using >>conda install mysql-python
import MySQLdb
#db = MySQLdb.connect(host= "146.148.37.209",user="root",passwd="",db="weather")
db = MySQLdb.connect(host= "173.194.246.163",user="root",passwd="",db="weather")
c = db.cursor()
#c.execute("""drop table if exists weather""")
#conn.commit()
c.execute("""create table if not exists weather (
time DATETIME NOT NULL PRIMARY KEY,
temp real NOT NULL,
humi real NOT NULL,
israin BOOLEAN NOT NULL)""")
cmd="insert into weather values ('%s', %.1f, %.1f, 0)"%(
datetime.now(pytz.timezone('Asia/Bangkok')).isoformat(),
random.randint(30,40),
random.randint(70,100))
print cmd
c.execute(cmd)
db.commit()
|
[
"wasit7@gmail.com"
] |
wasit7@gmail.com
|
1f2f6f6b6e2ce1d00611571c0f7bb6e894ea97ef
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_078/ch20_2020_04_11_20_17_50_548987.py
|
a96fc9803b658c8c63e6829c6f9b06960b007e43
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 251
|
py
|
distancia=float(input('Qual distância deseja percorrer? '))
if distancia<=200:
print ('O preço da passagem é {0}R$'.format(distancia*0,50))
else:
print ('O preço da passagem é {0}R$'.format((distancia*0,50)+(distancia-200)*0,45))
|
[
"you@example.com"
] |
you@example.com
|
6ac8b762b137bfd7afa381c029c405f96348233c
|
3679daa10ea95e90889e07e96e6c98c98f3751ea
|
/ipu/ipu/celery.py
|
e0092f1239df3677d4f604a596110f5cdb4e2224
|
[] |
no_license
|
rmn5124/ggsipu-placement-cell-portal
|
0a8fef69c75ea444588046fcc7b38d7cf5c8e8e5
|
11876c2171bb07308719b205a69cd8330eb08052
|
refs/heads/master
| 2023-09-01T12:01:47.475984
| 2019-09-02T21:49:01
| 2019-09-02T21:49:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 553
|
py
|
from __future__ import absolute_import
import os
from celery import Celery
# set the default Django settings module for the 'celery' program.
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ipu.settings')
app = Celery('ipu')
from django.conf import settings
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
@app.task(bind=True)
def debug_task(self):
print('Request: {0!r}'.format(self.request))
|
[
"pratulyabubna@outlook.com"
] |
pratulyabubna@outlook.com
|
68e09f36ad8392a64d4ae77f98aa3cda11a6d3ec
|
32dda10669e459cf37c31f426fa709001d2c75b0
|
/leetcode_cn/solved/pg_760.py
|
95eb11fb22234cfa2dd1887f120267b93b1fdfbb
|
[] |
no_license
|
fastso/learning-python
|
3300f50d06871245d0bfcbe9d201224580f70852
|
d21dbd1b9f31017cdb1ed9b9ffd1e53ffe326572
|
refs/heads/master
| 2023-02-10T14:43:53.726247
| 2023-01-26T10:14:59
| 2023-01-26T10:14:59
| 193,454,718
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 145
|
py
|
from typing import List
class Solution:
def anagramMappings(self, A: List[int], B: List[int]) -> List[int]:
return map(B.index, A)
|
[
"fastso.biko@gmail.com"
] |
fastso.biko@gmail.com
|
52e2e83c0b62474742d839853d740707a0b7d12e
|
951e433b25a25afeea4d9b45994a57e0a6044144
|
/LeetCode/动态规划_416_分割等和子集_01背包.py
|
133d61bbb2b69d6c32fa2c0755d0dd8b5988b897
|
[] |
no_license
|
EricaEmmm/CodePython
|
7c401073e0a9b7cd15f9f4a553f0aa3db1a951a3
|
d52aa2a0bf71b5e7934ee7bff70d593a41b7e644
|
refs/heads/master
| 2020-05-31T14:00:34.266117
| 2019-09-22T09:48:23
| 2019-09-22T09:48:23
| 190,318,878
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,174
|
py
|
'''
给定一个只包含正整数的非空数组。是否可以将这个数组分割成两个子集,使得两个子集的元素和相等。
注意:每个数组中的元素不会超过100,数组的大小不会超过200
示例1:
输入: [1, 5, 11, 5]
输出: true
解释: 数组可以分割成[1, 5, 5]和[11].
示例2:
输入: [1, 2, 3, 5]
输出: false
解释: 数组不能分割成两个元素和相等的子集.
'''
'''
问题转化:给定一个只包含正整数的非空数组,是否可以从这个数组中挑选出一些正整数,
使得这些数的和等于整个数组元素的和的一半。
'''
class Solution(object):
def canPartition1(self, nums):
"""
dp[i][j]表示前i个数中部分数的和是否等于j
状态转移:dp[i][j] = dp[i-1][j] | dp[i-1][j-nums[i]]
时间复杂度:O(NC),空间复杂度:O(NC)
"""
Sum = sum(nums)
if Sum % 2 == 1:
return False
mid = Sum // 2
nums.insert(0, 0)
dp = [[False for _ in range(mid+1)] for j in range(len(nums))]
for i in range(len(nums)):
dp[i][0] = True
for i in range(1, len(nums)):
for j in range(mid+1):
if j >= nums[i]:
dp[i][j] = dp[i-1][j] | dp[i-1][j-nums[i]]
else:
dp[i][j] = dp[i-1][j]
return dp[-1][-1]
def canPartition2(self, nums):
"""
dp[j]表示部分数的和是否等于j
状态转移:dp[j] = dp[j] | dp[j-nums[i]]
时间复杂度:O(NC),空间复杂度:O(C)
"""
Sum = sum(nums)
if Sum % 2 == 1:
return False
mid = Sum // 2
nums.insert(0, 0)
dp = [False for _ in range(mid+1)]
dp[0] = True
for i in range(1, len(nums)):
for j in range(mid, nums[i]-1, -1):
dp[j] = dp[j] | dp[j-nums[i]]
return dp[-1]
def canPartition(self, nums):
return self.canPartition2(nums)
if __name__ == '__main__':
s = Solution()
nums = [1, 5, 11, 5] #[1, 2, 3, 5] #
print(s.canPartition(nums))
|
[
"1016920795@qq.com"
] |
1016920795@qq.com
|
d5ec5a773a92e6f187149f90d6b37bbd36997335
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/otherforms/_physiotherapists.py
|
28647da34786753adc621542f6a82cc0400f34bb
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 258
|
py
|
#calss header
class _PHYSIOTHERAPISTS():
def __init__(self,):
self.name = "PHYSIOTHERAPISTS"
self.definitions = physiotherapist
self.parents = []
self.childen = []
self.properties = []
self.jsondata = {}
self.basic = ['physiotherapist']
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
ca9f1453462fb30398cb816eb03aee011bc9590f
|
9b20161b91400238b0c6e6ee3282a328d42935e2
|
/tensorflow_datasets/text/goemotions.py
|
ba592ec175c2b47c840ccc53603110ff9efa364a
|
[
"Apache-2.0"
] |
permissive
|
okyanusoz/datasets
|
61c0ced07c420d7e900080e851890def74a37d94
|
8997c4140cd4fc145f0693787b1da78691930459
|
refs/heads/master
| 2023-05-31T23:19:30.153499
| 2021-05-06T19:56:49
| 2021-05-06T19:58:56
| 365,308,067
| 1
| 1
|
Apache-2.0
| 2021-07-04T11:15:13
| 2021-05-07T17:32:53
| null |
UTF-8
|
Python
| false
| false
| 5,173
|
py
|
# coding=utf-8
# Copyright 2021 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""goemotions dataset."""
import csv
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
_CITATION = """
@inproceedings{demszky-2020-goemotions,
title = "{G}o{E}motions: A Dataset of Fine-Grained Emotions",
author = "Demszky, Dorottya and
Movshovitz-Attias, Dana and
Ko, Jeongwoo and
Cowen, Alan and
Nemade, Gaurav and
Ravi, Sujith",
booktitle = "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics",
month = jul,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.acl-main.372",
pages = "4040--4054",
}
"""
_DESCRIPTION = """
The GoEmotions dataset contains 58k carefully curated Reddit comments labeled
for 27 emotion categories or Neutral. The emotion categories are admiration,
amusement, anger, annoyance, approval, caring, confusion, curiosity, desire,
disappointment, disapproval, disgust, embarrassment, excitement, fear,
gratitude, grief, joy, love, nervousness, optimism, pride, realization, relief,
remorse, sadness, surprise.
"""
_URL_TRAIN = 'https://github.com/google-research/google-research/raw/master/goemotions/data/train.tsv'
_URL_DEV = 'https://github.com/google-research/google-research/raw/master/goemotions/data/dev.tsv'
_URL_TEST = 'https://github.com/google-research/google-research/raw/master/goemotions/data/test.tsv'
_TEXT_LABEL = 'comment_text'
_EMOTION_LABELS = [
'admiration', 'amusement', 'anger', 'annoyance', 'approval', 'caring',
'confusion', 'curiosity', 'desire', 'disappointment', 'disapproval',
'disgust', 'embarrassment', 'excitement', 'fear', 'gratitude', 'grief',
'joy', 'love', 'nervousness', 'optimism', 'pride', 'realization', 'relief',
'remorse', 'sadness', 'surprise', 'neutral'
]
class Goemotions(tfds.core.GeneratorBasedBuilder):
"""Dataset of Reddit comments with one or more emotion labels."""
VERSION = tfds.core.Version('0.1.0')
def _info(self):
"""Returns information on the GoEmotions dataset."""
features = {_TEXT_LABEL: tfds.features.Text()}
for label in _EMOTION_LABELS:
features[label] = tf.bool
return tfds.core.DatasetInfo(
builder=self,
description=_DESCRIPTION,
features=tfds.features.FeaturesDict(features),
# Each emotion can be used for single-label classification.
supervised_keys=None,
homepage='https://github.com/google-research/google-research/tree/master/goemotions',
citation=_CITATION,
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
# Download the data.
dl_paths = dl_manager.download({
'train': _URL_TRAIN,
'test': _URL_TEST,
'dev': _URL_DEV,
})
# Specify the splits.
return [
tfds.core.SplitGenerator(
name=tfds.Split.TRAIN,
gen_kwargs={
'filename': dl_paths['train'],
},
),
tfds.core.SplitGenerator(
name=tfds.Split.VALIDATION,
gen_kwargs={
'filename': dl_paths['dev'],
},
),
tfds.core.SplitGenerator(
name=tfds.Split.TEST,
gen_kwargs={
'filename': dl_paths['test'],
},
),
]
def _parse_row_as_example(self, row):
example = {}
if len(row) != 3:
return example
example[_TEXT_LABEL] = row['comment_text']
emotion_ids = row['emotion_ids'].split(',')
for emotion_id in emotion_ids:
emotion_id = int(emotion_id)
example[_EMOTION_LABELS[emotion_id]] = True
for i in range(len(_EMOTION_LABELS)):
if _EMOTION_LABELS[i] not in example.keys():
example[_EMOTION_LABELS[i]] = False
return example
def _generate_examples(self, filename):
"""Yields examples.
Each example contains a text input with the relevant emotion labels.
Args:
filename: the path of the file to be read for this split.
Yields:
A dictionary of features, containing the comment text and, for each
emotions label, 0/1 depending on whether is it a label for the input.
"""
fieldnames = ['comment_text', 'emotion_ids', 'comment_id']
with tf.io.gfile.GFile(filename) as f:
reader = csv.DictReader(f, fieldnames=fieldnames, delimiter='\t')
for row in reader:
example = self._parse_row_as_example(row)
if example:
yield row['comment_id'], example
|
[
"copybara-worker@google.com"
] |
copybara-worker@google.com
|
73d1e09672f5b4289a29a9587a67c4a445a4ee17
|
15f321878face2af9317363c5f6de1e5ddd9b749
|
/solutions_python/Problem_206/622.py
|
1beb0d17b042c1152df8dc35b3110f891c66ab3e
|
[] |
no_license
|
dr-dos-ok/Code_Jam_Webscraper
|
c06fd59870842664cd79c41eb460a09553e1c80a
|
26a35bf114a3aa30fc4c677ef069d95f41665cc0
|
refs/heads/master
| 2020-04-06T08:17:40.938460
| 2018-10-14T10:12:47
| 2018-10-14T10:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 326
|
py
|
t = int(input())
for i in range(t):
d,n = map(int,raw_input().strip().split())
ls = list()
for j in range(n):
k,s = map(int,raw_input().strip().split())
t = float(float(d-k)/float(s))
ls.append(t)
spd = float(float(d)/float(max(ls)))
print "Case #%d:"%(i+1),spd
|
[
"miliar1732@gmail.com"
] |
miliar1732@gmail.com
|
c688fdb0b0df6f828c608726a6a2344e9ea59346
|
3d4cd68400eb5429282b23bf6a1b7226851b731a
|
/spddo/micro/func/format_date.py
|
c89fe79b54b1ef99441575344c516189548b9b64
|
[] |
no_license
|
blueshed/spddo-chat
|
2dee16478e9a30ed0196d76d450e0772147aa208
|
d7ba492162ba95c0d2b8ed78370366eb96e39c3a
|
refs/heads/master
| 2021-01-17T11:29:48.959086
| 2016-05-21T18:33:08
| 2016-05-21T18:33:08
| 43,005,167
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 301
|
py
|
from blueshed.micro.utils.date_utils import parse_date
def format_date(date: str, date_format: str='%b, %d %Y'):
'''
Expects date to be in the format: %Y-%m-%dT%H:%M:%S.%fZ
or just: "%Y-%m-%d"
'''
date_value = parse_date(date)
return date_value.strftime(date_format)
|
[
"pete@blueshed.co.uk"
] |
pete@blueshed.co.uk
|
fbcf6403ced9e1507e9f51fb874d1f4e9f3b6e13
|
7d4d6dc3c897ec7c297bb67f30c3f4e39509b250
|
/Python/DailyFlash/28feb2020/MySolutions/program1.py
|
ab5f70d98b97f7e7edad5810c80f531aef298d58
|
[] |
no_license
|
kumbharswativ/Core2Web
|
48a6ec0275466f4179c502097b1314d04a29e63e
|
60949e5461ef103a4ad2c7c39ee9be0be101ec11
|
refs/heads/master
| 2022-12-24T06:11:45.096063
| 2020-08-09T12:04:07
| 2020-08-09T12:09:13
| 286,219,590
| 0
| 1
| null | 2022-12-11T10:57:50
| 2020-08-09T11:02:18
|
Python
|
UTF-8
|
Python
| false
| false
| 686
|
py
|
'''
Write a Program that prints whether a number entered by user is
Disarium Number or not.
{Note: A number can be termed as Disarium number if the sum of every digits
raised by their position in that number is equal to that number. E.g. 135, 1 is at
position 1, 3 is at position 2 & 5 is at position 3, then 1^1 + 3^2 + 5^3 = 1 + 9 +
125 = 135, so 135 is a Disarium Number}
Input: 89
Output: 89 is a Disarium Number.
'''
list1=[]
z=int(input("Input:"))
num=z
while(z>0):
u=z%10
list1.append(u)
z=z//10
list1.reverse()
n=1
x=len(list1)
sum=0
for i in range(x):
sum=sum+list1[i]**n
n=n+1
if(num==sum):
print(num,"is disarium number")
else:
print(num,"is not disarium number")
|
[
"“kumbharswativ@gmail.com”"
] |
“kumbharswativ@gmail.com”
|
dfa30e0fd9fbef00668d7f2b5968d50393b8aa2a
|
df6ec5d6f5c6beb30c1f4bb6a4c2969e2ef25c31
|
/ngo_npo_profile/migrations/0001_initial.py
|
c086b0111ba7c3edcb1d71d260e8259dcead1fcd
|
[] |
no_license
|
heritiermwalila/wecanchangetheworld
|
0f820f89878107b002c10fa724a39d025e6a5cfc
|
30c48a02d78d366afe6739606b342f6bcefcd576
|
refs/heads/master
| 2023-05-01T20:24:29.925947
| 2019-06-05T09:21:41
| 2019-06-05T09:21:41
| 190,364,316
| 0
| 0
| null | 2023-04-21T20:33:04
| 2019-06-05T09:16:51
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,141
|
py
|
# Generated by Django 2.2.1 on 2019-06-04 19:06
import ckeditor_uploader.fields
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Organisation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Organisation name', max_length=100)),
('slug', models.SlugField(unique=True)),
('email', models.EmailField(blank=True, help_text='Organisation Email', max_length=254)),
('phone', models.CharField(blank=True, max_length=50)),
('ceo', models.CharField(blank=True, max_length=50)),
('logo', models.FileField(default='static/images/noprofile.png', upload_to='ngo-npo/', verbose_name='Profile logo')),
('expect', models.TextField(blank=True, help_text='Expect text', max_length=255)),
('website_url', models.URLField(blank=True, help_text='website Address')),
('background_image', models.FileField(default='static/images/defaultbg.jpg', upload_to='ngo-npo/')),
('date_posted', models.DateTimeField(default=django.utils.timezone.now)),
],
options={
'verbose_name': 'Organisation',
'verbose_name_plural': 'Organisations',
},
),
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('slug', models.SlugField()),
('content', ckeditor_uploader.fields.RichTextUploadingField(blank=True)),
('organisation', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ngo_npo_profile.Organisation')),
],
),
]
|
[
"admin@admins-MacBook-Pro.local"
] |
admin@admins-MacBook-Pro.local
|
d89bf8a620d9998a3cd783110496cbf721c4ee6f
|
6da19be45ff986768eb820f11691977cb3c84772
|
/Python/3_DB_interactions/303_Book_store_project_with_json_format_file_for_storage/main_logic.py
|
5b9f54629f33cf31d9cd4b915529561295178d45
|
[] |
no_license
|
alexp01/trainings
|
9e72f3a571292b79d2b1518f564d2dc0a774ef41
|
9d8daee16f15e0d7851fab12ab3d2505386a686c
|
refs/heads/master
| 2023-05-04T23:37:13.243691
| 2023-05-02T08:02:53
| 2023-05-02T08:02:53
| 272,425,687
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,058
|
py
|
"""
API with logic for file storage in JSON format
"""
import json
file_to_store = 'database.json'
print (file_to_store)
def check_if_file_exists():
with open(file_to_store, 'w'): # this will create the file in case it does not exist
pass
def read_from_file():
with open(file_to_store, 'r') as file:
books = json.load(file)
return books
def add_a_book(name,author):
books = read_from_file()
books.append(dict([('name', name), ('author', author), ('read', '0')]))
# We can skip the dict and just add a dict as an list element like : {'name' = name, 'author' = author, 'read' = '0' }
write_to_file(books)
def mark_as_read(name):
books = read_from_file()
for book in books:
if book['name'] == name:
book['read'] = '1'
write_to_file(books)
def delete_a_book(name):
books = read_from_file()
books = [book for book in books if book['name'] != name]
write_to_file(books)
def write_to_file(books):
with open(file_to_store, 'w') as file:
json.dump(books,file)
|
[
"34630182+alexp01@users.noreply.github.com"
] |
34630182+alexp01@users.noreply.github.com
|
47901753475b364c0fa61c68da06ac7dd0ce77d0
|
4178f2916d2da72cbb45454fbed941dcfe8f6460
|
/POM_test/TestCase/Predict/TC_007.py
|
d31487df7537765b6187688a9253b66573b8e426
|
[] |
no_license
|
maxcrup007/Selenium_Webdriver_Python
|
15196cb04ba5cafdc5b776c26d167f0b48fb0e14
|
6be7f0b9f53df1ba592957029e8a4d22e409d1c4
|
refs/heads/main
| 2023-03-24T21:04:31.976451
| 2021-03-22T09:16:04
| 2021-03-22T09:16:04
| 349,379,454
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,189
|
py
|
# ทดสอบการเข้าใช้งานของ "ประเมิน" (กรอกจำนวนที่ประเมินให้เป็นศูนย์)
import time
import unittest
import sys
from selenium import webdriver
from selenium.webdriver import ActionChains
from POM_test.login import *
from POM_test.predictPage import *
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "...", "..."))
class TestPredict_7(unittest.TestCase):
@classmethod
def setUpClass(self):
self.driver = webdriver.Chrome(executable_path="C:/Users/voraw/Downloads/Compressed/webdriver/chromedriver/chromedriver")
self.driver.implicitly_wait(10)
self.driver.maximize_window()
def test_login_valid(self):
driver = self.driver
self.driver.get("https://top-upstream-client.mulberrysoft.com/#/older/activity")
login = LoginPage(driver)
login.enter_username("demo005")
login.enter_password("123456")
login.click_login()
time.sleep(2)
predict = PredictPage(driver)
predict.into_predictPage()
time.sleep(2)
predict.upload_image()
time.sleep(2)
predict.predict_plant()
time.sleep(2)
predict.predict_select()
time.sleep(2)
predict.predict_value_selected("0")
# กรอกจำนวนที่ประเมินให้เป็นศูนย์
time.sleep(2)
scroll = driver.find_element_by_xpath("//ion-item[2]/ion-select")
action = ActionChains(driver)
action.move_to_element(scroll).perform()
# action object creation to scroll round 1
predict.predict_unit_selected()
time.sleep(2)
predict.predict_submit_value()
time.sleep(2)
@classmethod
def tearDownClass(cls):
cls.driver.close()
cls.driver.quit()
print("Test Completed")
if __name__ == '__main__':
unittest.main()
|
[
"36732487+maxcrup007@users.noreply.github.com"
] |
36732487+maxcrup007@users.noreply.github.com
|
f8b9076be3b1fda65f70ba1b970c0b313998e494
|
d554b1aa8b70fddf81da8988b4aaa43788fede88
|
/5 - Notebooks e Data/1 - Análises numéricas/Arquivos David/Atualizados/logDicas-master/data/2019-1/222/users/4057/codes/1642_2728.py
|
a35b596d3befd8180e3f47a678f1a0b4d5b6c69c
|
[] |
no_license
|
JosephLevinthal/Research-projects
|
a3bc3ca3b09faad16f5cce5949a2279cf14742ba
|
60d5fd6eb864a5181f4321e7a992812f3c2139f9
|
refs/heads/master
| 2022-07-31T06:43:02.686109
| 2020-05-23T00:24:26
| 2020-05-23T00:24:26
| 266,199,309
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 247
|
py
|
percurso = float(input("o percurso de uma viagem (em quilometros): "))
tipo = input("o tipo do carro (A ou B): ")
if tipo.upper() == "A":
consumo = percurso / 8
print(round(consumo, 2))
else :
consumo = percurso / 12
print(round(consumo, 2))
|
[
"jvlo@icomp.ufam.edu.br"
] |
jvlo@icomp.ufam.edu.br
|
3884eb4248bc7e8244797a0c63713adc83ee57e0
|
8e24e8bba2dd476f9fe612226d24891ef81429b7
|
/geeksforgeeks/python/python_all/103_14.py
|
f3f8bf7d10d83beb51333e3d691467e855b5fbdb
|
[] |
no_license
|
qmnguyenw/python_py4e
|
fb56c6dc91c49149031a11ca52c9037dc80d5dcf
|
84f37412bd43a3b357a17df9ff8811eba16bba6e
|
refs/heads/master
| 2023-06-01T07:58:13.996965
| 2021-06-15T08:39:26
| 2021-06-15T08:39:26
| 349,059,725
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,703
|
py
|
Python – Custom dictionary initialization in list
While working with Python, we can have a problem in which we need to
initialize a list of a particular size with custom dictionaries. This task has
it’s utility in web development to store records. Let’s discuss certain ways
in which this task can be performed.
**Method #1 : Using{dict} + "*" operator**
This task can be performed using the “*” operator. We can create a list
containing single custom dictionary and then multiply it by Number that is
size of list. The drawback is that similar reference dictionaries will be made
which will point to similar memory location.
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Custom dictionary initialization in list
# using {dict} + "*" operator
# Initialize dict
test_dict = {'gfg' : 1, 'is' : 2, 'best' : 3}
# Custom dictionary initialization in list
# using {dict} + "*" operator
res = [test_dict] * 6
print("The list of custom dictionaries is : " + str(res))
---
__
__
**Output :**
The list of custom dictionaries is : [{'gfg': 1, 'best': 3, 'is': 2}, {'gfg': 1, 'best': 3, 'is': 2}, {'gfg': 1, 'best': 3, 'is': 2}, {'gfg': 1, 'best': 3, 'is': 2}, {'gfg': 1, 'best': 3, 'is': 2}, {'gfg': 1, 'best': 3, 'is': 2}]
**Method #2 : Using {dict} + list comprehension**
This is perhaps the better and correct way to perform this task. We initialize
the each index of list with dictionary, this way, we have independently
referring dictionaries and don’t point to single reference.
__
__
__
__
__
__
__
# Python3 code to demonstrate working of
# Custom dictionary initialization in list
# using {dict} + list comprehension
# Initialize dict
test_dict = {'gfg' : 1, 'is' : 2, 'best' : 3}
# Custom dictionary initialization in list
# using {dict} + list comprehension
res = [test_dict for sub in range(6)]
print("The list of custom dictionaries is : " + str(res))
---
__
__
**Output :**
The list of custom dictionaries is : [{'gfg': 1, 'best': 3, 'is': 2}, {'gfg': 1, 'best': 3, 'is': 2}, {'gfg': 1, 'best': 3, 'is': 2}, {'gfg': 1, 'best': 3, 'is': 2}, {'gfg': 1, 'best': 3, 'is': 2}, {'gfg': 1, 'best': 3, 'is': 2}]
Attention geek! Strengthen your foundations with the **Python Programming
Foundation** Course and learn the basics.
To begin with, your interview preparations Enhance your Data Structures
concepts with the **Python DS** Course.
My Personal Notes _arrow_drop_up_
Save
|
[
"qmnguyenw@gmail.com"
] |
qmnguyenw@gmail.com
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.